############################################################################## # Copyright (c) Members of the EGEE Collaboration. 2004. # See http://www.eu-egee.org/partners/ for details on the copyright # holders. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## # # NAME : site-info.def # # DESCRIPTION : This is the main configuration file needed to execute the # yaim command. It contains a list of the variables needed to # configure a service. # # AUTHORS : yaim-contact@cern.ch # # NOTES : - site-info.def will contain the list of variables common to # multiple node types. Node type specific variables are # distributed by its corresponding module although a unique # site-info.def can still be used at configuration time. # # - Service specific variables are being distributed under # /opt/glite/yaim/examples/siteinfo/services/ # Copy this file under you siteinfo/services directory or also copy the variables # manually in site-info.def. # # - site-info.pre and site-info.post contain default variables. When sys admins # want to set their own values, they can just define the variable in site-info.def # and that will overwrite the value in site-info.pre/post. # # - VO variables for LCG VOs are currently distributed with example values. # For up to date information of any VO please check the CIC portal VO ID Card information: # http://cic.in2p3.fr/ # # - For more information on YAIM, please check: # https://twiki.cern.ch/twiki/bin/view/EGEE/YAIM # # - For a detailed description of site-info.def variables, please check: # https://twiki.cern.ch/twiki/bin/view/LCG/Site-info_configuration_variables#site_info_def # # YAIM MODULE: glite-yaim-core # ############################################################################## ################################### # General configuration variables # ################################### # List of the batch nodes hostnames and optionally the subcluster ID the # WN belongs to. An example file is available in # ${YAIM_ROOT}/glite/yaim/examples/wn-list.conf # Change the path according to your site settings. WN_LIST=/root/siteinfo_dir/wn-list.conf # List of unix users to be created in the service nodes. # The format is as follows: # UID:LOGIN:GID1,GID2,...:GROUP1,GROUP2,...:VO:FLAG: # An example file is available in ${YAIM_ROOT}/glite/yaim/examples/users.conf # Change the path according to your site settings. # For more information please check ${YAIM_ROOT}/glite/yaim/examples/users.conf.README USERS_CONF=/root/siteinfo_dir/users.conf # List of the local accounts which a user should be mapped to. # The format is as follows: # "VOMS_FQAN":GROUP:GID:FLAG:[VO] # An example file is available in ${YAIM_ROOT}/glite/yaim/examples/groups.conf # Change the path according to your site settings. # For more information please check ${YAIM_ROOT}/glite/yaim/examples/groups.conf.README # NOTE: comment out this variable if you want to specify a groups.conf per VO # under the group.d/ directory. GROUPS_CONF=/root/siteinfo_dir/groups.conf # Uncomment this variable if you want to specify a local groups.conf # It is similar to GROUPS_CONF but used to specify a separate file # where local accounts specific to the site are defined. # LOCAL_GROUPS_CONF=my_local_groups.conf # Uncomment this variable if you are installing a mysql server # It is the MySQL admin password. MYSQL_PASSWORD=mysql_pass # Uncomment this variable if you want to explicitely use pool # accounts for special users when generating the grid-mapfile. # If not defined, YAIM will decide whether to use special # pool accounts or not automatically # SPECIAL_POOL_ACCOUNTS=yes or no ################################ # Site configuration variables # ################################ # Human-readable name of your site SITE_NAME=GridKA_glite_srv # The contact e-mail of your site. # A coma separated list of email addresses. SITE_EMAIL="bertocco@pd.infn.it" # It is the position of your site north or south of the equator # measured from -90. to 90. with positive values going north and # negative values going south. SITE_LAT=0.0 # It is the position of the site east or west of Greenwich, England # measured from -180. to 180. with positive values going east and # negative values going west. SITE_LONG=0.0 # Uncomment this variable if your site has an http proxy # in order to reduce the load on the CA host # SITE_HTTP_PROXY="http-proxy.my.domain" ######################################### # ARGUS authorisation framework control # ######################################### # Set USE_ARGUS to yes to enable the configuration of ARGUS USE_ARGUS=no # In case ARGUS is to be used the following should be set # The ARGUS service PEPD endpoints as a space separated list: ARGUS_PEPD_ENDPOINTS="https://pepd.example.org:8154/authz" # ARGUS resource identities: The resource ID can be set # for the cream CE, WMS and other nodes respectively. # If a resource ID is left unset the ARGUS configuration # will be skipped on the associated node. # CREAM_PEPC_RESOURCEID=urn:mysitename.org:resource:ce # WMS_PEPC_RESOURCEID=urn:mysitename.org:resource:wms # GENERAL_PEPC_RESOURCEID=urn:mysitename.org:resource:other ################################ # User configuration variables # ################################ # Uncomment the following variables if you want to create system user # accounts under a HOME directory different from /home. # Note: It is recommendable to use /var/lib/user_name as HOME directory for # system users. # EDG_HOME_DIR=/var/lib/edguser # EDGINFO_HOME_DIR=/var/lib/edginfo # BDII_HOME_DIR=/var/lib/edguser ############################## # CE configuration variables # ############################## # Optional variable to define the path of a shared directory # available for application data. # Typically a POSIX accessible transient disk space shared # between the Worker Nodes. It may be used by MPI applications # or to store intermediate files that need further processing by # local jobs or as staging area, specially if the Worker Node # have no internet connectivity # CE_DATADIR=/mypath # Hostname of the CE CE_HOST=gks-028.$MY_DOMAIN ############################ # SubCluster configuration # ############################ # Name of the processor model as defined by the vendor # for the Worker Nodes in a SubCluster. #CE_CPU_MODEL=cpu_model # `cat /proc/cpuinfo` (model name) CE_CPU_MODEL=Xeon # Name of the processor vendor # for the Worker Nodes in a SubCluster # `cat /proc/cpuinfo` (vendor_id) CE_CPU_VENDOR=Intel # Processor clock speed expressed in MHz # for the Worker Nodes in a SubCluster. # `cat /proc/cpuinfo` (cpu MHz) CE_CPU_SPEED=2330 # For the following variables please check: # http://goc.grid.sinica.edu.tw/gocwiki/How_to_publish_the_OS_name # # Operating system name used on the Worker Nodes # part of the SubCluster. # `cat /etc/redhat-release` CE_OS="Scientific Linux SL" # Operating system release used on the Worker Nodes # part of the SubCluster. # `cat /etc/redhat-release` #CE_OS_RELEASE=x.y.z CE_OS_RELEASE=5.7 # Operating system version used on the Worker Nodes # part of the SubCluster. # `cat /etc/redhat-release` CE_OS_VERSION="Boron" # Platform Type of the WN in the SubCluster # Check: http://goc.grid.sinica.edu.tw/gocwiki/How_to_publish_my_machine_architecture # uname -a CE_OS_ARCH=x86_64 # Total physical memory of a WN in the SubCluster # expressed in Megabytes. CE_MINPHYSMEM=16000 # Total virtual memory of a WN in the SubCluster # expressed in Megabytes. # [SARA] The swap space size plus RAM size is the total amount of virtual memory #CE_MINVIRTMEM=number CE_MINVIRTMEM=23000 # Total number of real CPUs/physical chips in # the SubCluster, including the nodes part of the # SubCluster that are temporary down or offline. CE_PHYSCPU=8 # Total number of cores/hyperthreaded CPUs in # the SubCluster, including the nodes part of the # SubCluster that are temporary down or offline CE_LOGCPU=8 # Number of Logical CPUs (cores) of the WN in the # SubCluster CE_SMPSIZE=8 # Performance index of your fabric in SpecInt 2000 # System administrators MUST set this variable as indicated in page 5 # https://twiki.cern.ch/twiki/pub/LCG/WLCGCommonComputingReadinessChallenges/WLCG_GlueSchemaUsage-1.8.pdf. # For some examples of Spec values see http://www.specbench.org/osg/cpu2000/results/cint2000.html #CE_SI00=number CE_SI00=100 # Performance index of your fabric in SpecFloat 2000 # For some examples of Spec values see http://www.specbench.org/osg/cpu2000/results/cint2000.html #CE_SF00=number CE_SF00=100 # Set this variable to either TRUE or FALSE to express # the permission for direct outbound connectivity # for the WNs in the SubCluster CE_OUTBOUNDIP=FALSE # Set this variable to either TRUE or FALSE to express # the permission for inbound connectivity # for the WNs in the SubCluster #CE_INBOUNDIP=FALSE CE_INBOUNDIP=TRUE # Space separated list of software tags supported by the site # e.g. CE_RUNTIMEENV="LCG-2 LCG-2_1_0 LCG-2_1_1 LCG-2_2_0 GLITE-3_0_0 GLITE-3_1_0 R-GMA" #CE_RUNTIMEENV="tag1 [tag2 [...]]" CE_RUNTIMEENV="LCG EMI" # For the following variables, please check more detailed information in: # https://twiki.cern.ch/twiki/bin/view/LCG/Site-info_configuration_variables#site_info_def # This YAIM variable is a blank separated list and is used to set the GlueCECapability # attribute where: 1) CPUScalingReferenceSI00=: this is the reference # CPU SI00 that has to be calculated in two possible ways: a) If the batch system scales # the published CPU time limit (GlueCEPolicyMaxCPUTime) to a reference CPU power then # CPUScalingReferenceSI00 should be the SI00 rating for that reference; b) If the batch # system does not scale the time limit then CPUScalingReferenceSI00 should be the SI00 # rating of the least powerful core in the cluster. Sites which have moved to the HEP-SPEC # benchmark should use it but converted to SI00 units using the scaling factor of 250, # i.e. SI00 = 250*HEP-SPEC. 2) Share=:: this value is used to express VO # fairshares targets. If there is no special share, this value MUST NOT be published. # can assume values between 1 and 100 (it represents a percentage). Please note # that the sum of the shares over all WLCG VOs MUST BE less than or equal to 100. If the # worker nodes behind the CE provide the glexec facility (for WLCG VOs), an extra capability # glexec must be added to the list. # # The following values must be defined by the sys admin: # - CPUScalingReferenceSI00= # - Share=: (optional, multiple definitons) #CE_CAPABILITY="CPUScalingReferenceSI00=value [Share=vo-name1:value [Share=vo-name2:value [...]]]" CE_CAPABILITY="CPUScalingReferenceSI00=20" # For the following variables, please check more detailed information in: # https://twiki.cern.ch/twiki/bin/view/LCG/Site-info_configuration_variables#site_info_def # This YAIM variable is used to set the GlueHostProcessorOtherDescription attribute. The value # of this variable MUST be set to: # Cores=[,Benchmark=-HEP-SPEC06] where # is equal to the number of cores per CPU of a typical # Worker Node in a SubCluster. The second value of this attribute MUST be published only in the # case the CPU power of the SubCluster is computed using the Benchmark HEP-SPEC06. # The following values must be defined by the sys admin: # - Cores=value # - value-HEP-SPEC06 (mandatory), where "value" is the CPU power computed # using the HEP-SPEC06 benchmark # More information on how to calculate and set this values # can be found at https://wiki.egi.eu/wiki/HEP_SPEC06 #CE_OTHERDESCR="Cores=value[,Benchmark=value-HEP-SPEC06]" CE_OTHERDESCR="Cores=1[,Benchmark=1024]" ######################################## # Batch server configuration variables # ######################################## # Hostname of the Batch server # Change this if your batch server is not installed # in the same host of the CE BATCH_SERVER=$CE_HOST # Jobmanager specific settings. Please, define: # lcgpbs, lcglfs, lcgsge or lcgcondor #JOB_MANAGER=my_job_manager JOB_MANAGER=pbs # torque, lsf, sge or condor #CE_BATCH_SYS=my_batch_system CE_BATCH_SYS=pbs #BATCH_LOG_DIR=my_batch_system_log_directory BATCH_LOG_DIR=/var/torque #BATCH_VERSION=my_batch_system_version # `rpm -qa |grep torque` BATCH_VERSION=2.5.7-7 ################################ # APEL configuration variables # ################################ # Database password for the APEL DB. #APEL_DB_PASSWORD="APELDB_PWD" APEL_DB_PASSWORD="APELDB_PWD" APEL_MYSQL_HOST=fake_host.domain ############################### # WMS configuration variables # ############################### # Hostname of the WMS #WMS_HOST=my-wms.$MY_DOMAIN ################################### # myproxy configuration variables # ################################### # Hostname of the PX #PX_HOST=my-px.$MY_DOMAIN ################################ # RGMA configuration variables # ################################ # Hostname of the RGMA server #MON_HOST=my-mon.$MY_DOMAIN ################################### # FTS configuration variables # ################################### # FTS endpoint #FTS_SERVER_URL="https://fts.${MY_DOMAIN}:8443/path/glite-data-transfer-fts" ############################### # DPM configuration variables # ############################### # Hostname of the DPM head node #DPM_HOST="my-dpm.$MY_DOMAIN" ######################## # SE general variables # ######################## # Space separated list of SEs hostnames #SE_LIST="SE1 SE2 SE3" SE_LIST="=emi2rc-sl5-dpm.cern.ch" # Space separated list of SE hosts from SE_LIST containing # the export directory from the Storage Element and the # mount directory common to the worker nodes that are part # of the Computing Element. If any of the SEs in SE_LIST # does not support the mount concept, do not define # anything for that SE in this variable. If this is the case # for all the SEs in SE_LIST then put the value "none" #SE_MOUNT_INFO_LIST="[SE1:export_dir1,mount_dir1 [SE2:export_dir2,mount_dir2 [...]]|none]" SE_MOUNT_INFO_LIST="none" # Variable necessary to configure Gridview service client on the SEs. # It sets the location and filename of the gridftp server logfile on # different types of SEs. Needed gridftp logfile for gridview is the # netlogger file which contain info for each transfer (created with # -Z/-log-transfer option for globus-gridftp-server). # Ex: DATE=20071206082249.108921 HOST=hostname.cern.ch PROG=globus-gridftp-server # NL.EVNT=FTP_INFO START=20071206082248.831173 USER=atlas102 FILE=/storage/atlas/ # BUFFER=0 BLOCK=262144 NBYTES=330 VOLUME=/ STREAMS=1 STRIPES=1 DEST=[127.0.0.1] # TYPE=LIST CODE=226 # Default locations for DPM: /var/log/dpm-gsiftp/dpm-gsiftp.log # and SE_classic: /var/log/globus-gridftp.log #SE_GRIDFTP_LOGFILE=path_to_gridftp_logfile.log ################################ # BDII configuration variables # ################################ # Hostname of the top level BDII #BDII_HOST=my-bdii.$MY_DOMAIN BDII_HOST=certtbrc-bdii-site.cern.ch # Hostname of the site BDII SITE_BDII_HOST=gks-028.$MY_DOMAIN # Uncomment this variable if you want to define a list of # top level BDIIs to support the automatic failover in the GFAL clients # BDII_LIST=my-bdii1.$MY_DOMAIN:port1[,my-bdii22.$MY_DOMAIN:port2[...]] ############################## # VO configuration variables # ############################## # If you are configuring a DNS-like VO, please check # the following URL: https://twiki.cern.ch/twiki/bin/view/LCG/YaimGuide400#vo_d_directory # Space separated list of VOs supported by your site #VOS="vo1 [vo2 [...]]" VOS="dteam dech" # Prefix of the experiment software directory in your CE VO_SW_DIR=/opt/exp_soft # Space separated list of queues configured in your CE QUEUES="dteam_q dech_q cert" # For each queue defined in QUEUES, define a _GROUP_ENABLE variable # which is a space separated list of VO names and VOMS FQANs: # Ex.: MYQUEUE_GROUP_ENABLE="ops atlas cms /cms/Higgs /cms/ROLE=production" # In QUEUE names containing dots and dashes replace them with underscore: # Ex.: QUEUES="my.test-queue" # MY_TEST_QUEUE_GROUP_ENABLE="ops atlas" #_GROUP_ENABLE="fqan1 [fqan2 [...]]" DTEAM_Q_GROUP_ENABLE="dteam" DECH_Q_GROUP_ENABLE="dech" CERT_GROUP_ENABLE="dteam dech" # Optional variable to define the default SE used by the VO. # Define the SE hostname if you want a specific SE to be the default one. # If this variable is not defined, the first SE in SE_LIST will be used # as the default one. # VO__DEFAULT_SE=vo-default-se # Optional variable to define a list of LBs used by the VO. # Define a space separated list of LB hostnames. # If this variable is not defined LB_HOST will be used. # VO__LB_HOSTS="vo-lb1 [vo-lb2 [...]]" # Optional variable to automatically add wildcards per FQAN # in the LCMAPS gripmap file and groupmap file. Set it to 'yes' # if you want to add the wildcards in your VO. Do not define it # or set it to 'no' if you do not want to configure wildcards in your VO. # VO__MAP_WILDCARDS=no # Optional variable to define the Myproxy server supported by the VO. # Define the Myproxy hostname if you want a specific Myproxy server. # If this variable is not defined PX_HOST will be used. # VO__PX_HOST=vo-myproxy # Optional variable to define a list of RBs used by the VO. # Define a space separated list of RB hostnames. # If this variable is not defined RB_HOST will be used. # VO__RBS="vo-rb1 [vo-rb2 [...]]" # Area on the WN for the installation of the experiment software. # If on the WNs a predefined shared area has been mounted where # VO managers can pre-install software, then these variable # should point to this area. If instead there is not a shared # area and each job must install the software, then this variables # should contain a dot ( . ). Anyway the mounting of shared areas, # as well as the local installation of VO software is not managed # by yaim and should be handled locally by Site Administrators. #VO__SW_DIR=wn_exp_soft_dir # This variable contains the vomses file parameters needed # to contact a VOMS server. Multiple VOMS servers can be given # if the parameters are enclosed in single quotes. #VO__VOMSES="'vo_name voms_server_hostname port voms_server_host_cert_dn vo_name' ['...']" # DN of the CA that signs the VOMS server certificate. # Multiple values can be given if enclosed in single quotes. # Note that there must be as many entries as in the VO__VOMSES variable. # There is a one to one relationship in the elements of both lists, # so the order must be respected #VO__VOMS_CA_DN="'voms_server_ca_dn' ['...']" # A list of the VOMS servers used to create the DN grid-map file. # Multiple values can be given if enclosed in single quotes. #VO__VOMS_SERVERS="'vomss://:8443/voms/?/' ['...']" # Optional variable to define a list of WMSs used by the VO. # Define a space separated list of WMS hostnames. # If this variable is not defined WMS_HOST will be used. # VO__WMS_HOSTS="vo-wms1 [vo-wms2 [...]]" # Optional variable to create a grid-mapfile with mappings to ordinary # pool accounts, not containing mappings to special users. # - UNPRIVILEGED_MKGRIDMAP=no or undefined, will contain # special users if defined in groups.conf # - UNPRIVILEGED_MKGRIDMAP=yes, will create a grid-mapfile # containing only mappings to ordinary pool accounts. # VO__UNPRIVILEGED_MKGRIDMAP=no # gLite pool account home directory for the user accounts specified in USERS_CONF. # Define this variable if you would like to use a directory different than /home. # VO__USER_HOME_PREFIX=/pool_account_home_dir # Examples for the following VOs are included below: # # atlas # alice # lhcb # cms # dteam # biomed # ops # # VOs should check the CIC portal http://cic.in2p3.fr for the VO ID card information # # ######### # atlas # ######### # VO_ATLAS_SW_DIR=$VO_SW_DIR/atlas # VO_ATLAS_DEFAULT_SE=$SE_HOST # VO_ATLAS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/atlas # VO_ATLAS_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/atlas?/atlas/' # VO_ATLAS_VOMSES="\ # 'atlas lcg-voms.cern.ch 15001 \ # /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch atlas 24' \ # 'atlas voms.cern.ch 15001 \ # /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch atlas 24' \ # 'atlas vo.racf.bnl.gov 15003 \ # /DC=org/DC=doegrids/OU=Services/CN=vo.racf.bnl.gov atlas 24' \ # " # VO_ATLAS_VOMS_CA_DN="\ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=org/DC=DOEGrids/OU=Certificate Authorities/CN=DOEGrids CA 1' \ # " # ########## # alice # ########## # VO_ALICE_SW_DIR=$VO_SW_DIR/alice # VO_ALICE_DEFAULT_SE=$SE_HOST # VO_ALICE_STORAGE_DIR=$CLASSIC_STORAGE_DIR/alice # VO_ALICE_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/alice?/alice/' # VO_ALICE_VOMSES="\ # 'alice lcg-voms.cern.ch 15000 \ # /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch alice 24' \ # 'alice voms.cern.ch 15000 \ # /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch alice 24' \ # " # VO_ALICE_VOMS_CA_DN="\ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # " # ####### # cms # ####### # VO_CMS_SW_DIR=$VO_SW_DIR/cms # VO_CMS_DEFAULT_SE=$SE_HOST # VO_CMS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/cms # VO_CMS_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/cms?/cms/' # VO_CMS_VOMSES="\ # 'cms lcg-voms.cern.ch 15002 \ # /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch cms 24' \ # 'cms voms.cern.ch 15002 \ # /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch cms 24' \ # 'cms voms.fnal.gov 15015 \ # /DC=org/DC=doegrids/OU=Services/CN=http/voms.fnal.gov cms 24' \ # " # VO_CMS_VOMS_CA_DN="\ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=org/DC=DOEGrids/OU=Certificate Authorities/CN=DOEGrids CA 1' \ # " # ######## # lhcb # ######## # VO_LHCB_SW_DIR=$VO_SW_DIR/lhcb # VO_LHCB_DEFAULT_SE=$SE_HOST # VO_LHCB_STORAGE_DIR=$CLASSIC_STORAGE_DIR/lhcb # VO_LHCB_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/lhcb?/lhcb/' # VO_LHCB_VOMSES="\ # 'lhcb lcg-voms.cern.ch 15003 \ # /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch lhcb 24' \ # 'lhcb voms.cern.ch 15003 \ # /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch lhcb 24' \ # " # VO_LHCB_VOMS_CA_DN="\ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # " # ######### # dteam # ######### VO_DTEAM_SW_DIR=$VO_SW_DIR/dteam VO_DTEAM_DEFAULT_SE=$SE_HOST VO_DTEAM_STORAGE_DIR=$CLASSIC_STORAGE_DIR/dteam VO_DTEAM_VOMS_SERVERS='vomss://voms.hellasgrid.gr:8443/voms/dteam?/dteam/' VO_DTEAM_VOMSES="\ 'dteam voms.hellasgrid.gr 15004 \ /C=GR/O=HellasGrid/OU=hellasgrid.gr/CN=voms.hellasgrid.gr dteam 24' \ 'dteam voms2.hellasgrid.gr 15004 \ /C=GR/O=HellasGrid/OU=hellasgrid.gr/CN=voms2.hellasgrid.gr dteam 24' \ " VO_DTEAM_VOMS_CA_DN="\ '/C=GR/O=HellasGrid/OU=Certification Authorities/CN=HellasGrid CA 2006' \ '/C=GR/O=HellasGrid/OU=Certification Authorities/CN=HellasGrid CA 2006' \ " ########## # biomed # ########## # VO_BIOMED_SW_DIR=$VO_SW_DIR/biomed # VO_BIOMED_DEFAULT_SE=$SE_HOST # VO_BIOMED_STORAGE_DIR=$CLASSIC_STORAGE_DIR/biomed # VO_BIOMED_VOMS_SERVERS="vomss://cclcgvomsli01.in2p3.fr:8443/voms/biomed?/biomed/" # VO_BIOMED_VOMSES="\ # 'biomed cclcgvomsli01.in2p3.fr 15000 \ # /O=GRID-FR/C=FR/O=CNRS/OU=CC-IN2P3/CN=cclcgvomsli01.in2p3.fr biomed 24' \ # " # VO_BIOMED_VOMS_CA_DN="\ # '/C=FR/O=CNRS/CN=GRID2-FR' \ # " # ####### # ops # ####### # VO_OPS_SW_DIR=$VO_SW_DIR/ops # VO_OPS_DEFAULT_SE=$SE_HOST # VO_OPS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/ops # VO_OPS_VOMS_SERVERS="vomss://voms.cern.ch:8443/voms/ops?/ops/" # VO_OPS_VOMSES="\ # 'ops lcg-voms.cern.ch 15009 \ # /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch ops 24' \ # 'ops voms.cern.ch 15009 \ # /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch ops 24' \ # " # VO_OPS_VOMS_CA_DN="\ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' \ # " VO_DECH_SW_DIR=$VO_SW_DIR/dech VO_DECH_VOMS_CA_DN="'/C=DE/O=GermanGrid/CN=GridKa-CA'" VO_DECH_VOMSES="'dech glite-io.scai.fraunhofer.de 15000 /C=DE/O=GermanGrid/OU=Fraunhofer SCAI/CN=host/glite-io.scai.fraunhofer.de dech'"