Line: 1 to 1 | ||||||||
---|---|---|---|---|---|---|---|---|
################################### # General configuration variables # ################################### # List of the batch nodes hostnames and optionally the subcluster ID the # WN belongs to. An example file is available in # ${INSTALL_ROOT}/glite/yaim/examples/wn-list.conf # Change the path according to your site settings. #WN_LIST=${INSTALL_ROOT}/glite/yaim/examples/wn-list.conf WN_LIST=/root/wn-list.conf # List of unix users to be created in the service nodes. # The format is as follows: # UID:LOGIN:GID1,GID2,...:GROUP1,GROUP2,...:VO:FLAG: # An example file is available in ${INSTALL_ROOT}/glite/yaim/examples/ig-users.conf # Change the path according to your site settings. # For more information please check ${INSTALL_ROOT}/glite/yaim/examples/users.conf.README USERS_CONF=${YAIM_ROOT}/glite/yaim/examples/ig-users.conf # List of the local accounts which a user should be mapped to. # The format is as follows: # "VOMS_FQAN":GROUP:GID:FLAG:[VO] # An example file is available in ${INSTALL_ROOT}/glite/yaim/examples/ig-groups.conf # Change the path according to your site settings. # For more information please check ${INSTALL_ROOT}/glite/yaim/examples/groups.conf.README # NOTE: comment out this variable if you want to specify a groups.conf per VO # under the group.d/ directory. GROUPS_CONF=${YAIM_ROOT}/glite/yaim/examples/ig-groups.conf # Uncomment this variable if you want to specify a local groups.conf # It is similar to GROUPS_CONF but used to specify a separate file # where local accounts specific to the site are defined. # LOCAL_GROUPS_CONF=my_local_groups.conf # Uncomment this variable if you are installing a mysql server # It is the MySQL admin password. MYSQL_PASSWORD=infncnaf # Uncomment this variable if you want to explicitely use pool # accounts for special users when generating the grid-mapfile. # If not defined, YAIM will decide whether to use special # pool accounts or not automatically # SPECIAL_POOL_ACCOUNTS=yes or no # INFN-GRID: Keep in site-info.def # WARNING! This variable will be removed in a future release JAVA_LOCATION="/usr/java/latest" ################################ # Site configuration variables # ################################ # Human-readable name of your site SITE_NAME=INFN-CNAF ######################################### # ARGUS authorisation framework control # ######################################### # Set USE_ARGUS to yes to enable the configuration of ARGUS USE_ARGUS=yes # In case ARGUS is to be used the following should be set # The ARGUS service PEPD endpoints as a space separated list: #ARGUS_PEPD_ENDPOINTS="http://pepd.example.org:8154/authz" ARGUS_PEPD_ENDPOINTS="https://vgrid06.cnaf.infn.it:8154/authz" # ARGUS resource identities: The resource ID can be set # for the cream CE, WMS and other nodes respectively. # If a resource ID is left unset the ARGUS configuration # will be skipped on the associated node. # CREAM_PEPC_RESOURCEID=urn:mysitename.org:resource:ce # WMS_PEPC_RESOURCEID=urn:mysitename.org:resource:wms # GENERAL_PEPC_RESOURCEID=urn:mysitename.org:resource:other CREAM_PEPC_RESOURCEID="http://cnaf.infn.it/cremino" # INFN-GRID: space separed list of the IP addresses of the NTP servers # (preferably set a local ntp server and a public one, ex. pool.ntp.org) #NTP_HOSTS_IP="131.154.1.53 131.154.1.103 pool.ntp.org" NTP_HOSTS_IP="ripe.cnaf.infn.it ntp-3.infn.it ntp-1.infn.it" #--------------------------# # Private network settings # #--------------------------# # INFN-GRID: Set PRIVATE_NETWORK=true to use WN on private network PRIVATE_NETWORK=false # INFN-GRID: If PRIVATE_NETWORK=true, uncomment and write the internal domain name #MY_INT_DOMAIN=your_internal_domain # INFN-GRID: If PRIVATE_NETWORK=true, uncomment and write your internal network #INT_NET=your_intarnal_network # INFN-GRID: If PRIVATE_NETWORK=true, uncomment and write the internal FQDN hostname of CE #CE_INT_HOST=your_internal_ce_host.$MY_INT_DOMAIN # INFN-GRID: If PRIVATE_NETWORK=true, uncomment and write the internal hostname # of the host exporting the directory used to install the application software #INT_HOST_SW_DIR=$CE_INT_HOST ############################## # CE configuration variables # ############################## # Hostname of the CE CE_HOST=cremoso.$MY_DOMAIN ############################ # SubCluster configuration # ############################ # Name of the processor model as defined by the vendor # for the Worker Nodes in a SubCluster. CE_CPU_MODEL=Opteron # Name of the processor vendor # for the Worker Nodes in a SubCluster CE_CPU_VENDOR=AuthenticAMD # Processor clock speed expressed in MHz # for the Worker Nodes in a SubCluster. CE_CPU_SPEED=2589 # For the following variables please check: # https://wiki.egi.eu/wiki/Operations/HOWTO05 # # Operating system name used on the Worker Nodes # part of the SubCluster. CE_OS="ScientificSL" # Operating system release used on the Worker Nodes # part of the SubCluster. CE_OS_RELEASE="5.5" # Operating system version used on the Worker Nodes # part of the SubCluster. CE_OS_VERSION="Boron" # Platform Type of the WN in the SubCluster # Check: https://wiki.egi.eu/wiki/Operations/HOWTO06 CE_OS_ARCH=x86_64 # Total physical memory of a WN in the SubCluster # expressed in Megabytes. CE_MINPHYSMEM=4096 # Total virtual memory of a WN in the SubCluster # expressed in Megabytes. CE_MINVIRTMEM=1024 # Total number of real CPUs/physical chips in # the SubCluster, including the nodes part of the # SubCluster that are temporary down or offline. CE_PHYSCPU=0 # Total number of cores/hyperthreaded CPUs in # the SubCluster, including the nodes part of the # SubCluster that are temporary down or offline CE_LOGCPU=0 # Number of Logical CPUs (cores) of the WN in the # SubCluster CE_SMPSIZE=2 # Performance index of your fabric in SpecInt 2000 CE_SI00=1039 # Performance index of your fabric in SpecFloat 2000 CE_SF00=951 # Set this variable to either TRUE or FALSE to express # the permission for direct outbound connectivity # for the WNs in the SubCluster CE_OUTBOUNDIP=TRUE # Set this variable to either TRUE or FALSE to express # the permission for inbound connectivity # for the WNs in the SubCluster CE_INBOUNDIP=FALSE # Space separated list of software tags supported by the site # e.g. CE_RUNTIMEENV="LCG-2 LCG-2_1_0 LCG-2_1_1 LCG-2_2_0 GLITE-3_0_0 GLITE-3_1_0 R-GMA" # INFN-GRID: Add the following tags to runtime environment: # * If your site belongs to INFN write CITY in capitals (ex. PADOVA) # otherwise INSTITUTE-CITY in capitals (ex. SNS-PISA) # * Write the average value of SpecInt2000 and SpecFloat2000 for your WNs; # please note that now a '_' is used as separator in place of '=' # (see at http://grid-it.cnaf.infn.it/fileadmin/Certification/MetricHowTo.pdf) # SI00MeanPerCPU_<your_value> # SF00MeanPerCPU_<your_value> #CE_RUNTIMEENV="tag1 [tag2 [...]]" CE_RUNTIMEENV=" CNAF SI00MeanPerCPU_1039 SF00MeanPerCPU_951 GLITE-3_0_0 GLITE-3_1_0 GLITE-3_2_0 " # For the following variables, please check more detailed information in: # https://twiki.cern.ch/twiki/bin/view/LCG/Site-info_configuration_variables#site_info_def # # The following values must be defined by the sys admin: # - CPUScalingReferenceSI00=<referenceCPU-SI00> # - Share=<vo-name>:<vo-share> (optional, multiple definitons) #CE_CAPABILITY="CPUScalingReferenceSI00=value [Share=vo-name1:value [Share=vo-name2:value [...]]]" CE_CAPABILITY="CPUScalingReferenceSI00=1039 glexec" # The following values must be defined by the sys admin: # - Cores=value # - value-HEP-SPEC06 (optional), where value is the CPU power computed # using the HEP-SPEC06 benchmark #CE_OTHERDESCR="Cores=value[,Benchmark=value-HEP-SPEC06]" CE_OTHERDESCR="Cores=1,Benchmark=4.156-HEP-SPEC06" ######################################## # Batch server configuration variables # ######################################## #Set it to 'no' if you want to disable the maui configuration in YAIM #Default value: yes | ||||||||
Changed: | ||||||||
< < | CONFIG_MAUI="no" | |||||||
> > | #CONFIG_MAUI="yes" | |||||||
#Set it to 'no' if you want to disable the /var/torque/server_priv/nodes configuration in YAIM #Default value: yes | ||||||||
Changed: | ||||||||
< < | CONFIG_TORQUE_NODES="no" | |||||||
> > | #CONFIG_TORQUE_NODES="yes" | |||||||
# Hostname of the Batch server
# Change this if your batch server is not installed
# in the same host of the CE
# INFN-GRID: Set this variable to the hostname of your batch master server, that
# that could be different from current CE (for example in the case of
# sites with two CEs). Do not comment it!
BATCH_SERVER=cremino.cnaf.infn.it
# Jobmanager specific settings. Please, define:
# lcgpbs, lcglsf, lcgsge or lcgcondor
# INFN-GRID: for CREAM CE write it without 'lcg' prefix: pbs, lsf, sge, condor
JOB_MANAGER=pbs
# torque, lsf, sge or condor
# INFN-GRID: write here 'pbs' even if you are using torque (for compatibility
# with MPI support on Globus), write 'lsf' if you using LSF.
CE_BATCH_SYS=pbs
# INFN-GRID: only for torque/pbs; path for batch-system log files
# BATCH_LOG_DIR=/var/torque/
BATCH_LOG_DIR=/var/torque/
BATCH_VERSION=2.5.7
# INFN-GRID: only for LSF; path for the batch-system bin files
#BATCH_BIN_DIR=my_batch_system_bin_dir
# INFN-GRID: only for LSF; path where lsf.conf is located
#BATCH_CONF_DIR=lsf_install_path/conf
#Path of a file containing the munge key. Munge is required since Torque version 2.5.7. This file will be copied to /etc/munge/munge.key
MUNGE_KEY_FILE=/etc/munge/munge.key
################################
# APEL configuration variables #
################################
# Database password for the APEL DB.
APEL_DB_PASSWORD=macchevvoi
MON_HOST=$CE_HOST
########################
# SE general variables #
########################
STORM_HOST=sunstorm.$MY_DOMAIN
STORM2_HOST=darkstorm.$MY_DOMAIN
# Space separated list of SEs hostnames
SE_LIST="$STORM_HOST $STORM2_HOST"
# Space separated list of SE hosts from SE_LIST containing
# the export directory from the Storage Element and the
# mount directory common to the worker nodes that are part
# of the Computing Element. If any of the SEs in SE_LIST
# does not support the mount concept, do not define
# anything for that SE in this variable. If this is the case
# for all the SEs in SE_LIST then put the value "none"
#SE_MOUNT_INFO_LIST="[SE1:export_dir1,mount_dir1 [SE2:export_dir2,mount_dir2 [...]]|none]"
SE_MOUNT_INFO_LIST="none"
################################
# BDII configuration variables #
################################
# Hostname of the top level BDII
# INFN-GRID: Default BDII Top for Italy (if you do not have your own)
BDII_HOST=egee-bdii.cnaf.infn.it
##############################
# VO configuration variables #
##############################
# If you are configuring a DNS-like VO, please check
# the following URL: https://twiki.cern.ch/twiki/bin/view/LCG/YaimGuide400#vo_d_directory
# Space separated list of VOs supported by your site
# INFN-GRID: Available VOs (alphabetically sorted - count 44):
# alice ams02.cern.ch argo atlas babar
# bio biomed cdf cms compassit compchem cyclops
# comput-er.it d4science.research-infrastructures.eu dteam enea
# enmr.eu esr eticsproject.eu euasia euchina euindia
# eumed gilda geant4 gear glast.org
# gridit inaf infngrid lhcb
# libi lights.infn.it magic omiieurope ops
# pacs.infn.it pamela planck superbvo.org theophys
# tps.infn.it virgo zeus
# INFN-GRID: Your site must support the following certification VOs:
# dteam infngrid ops
#VOS="vo1 [vo2 [...]]"
VOS="comput-er.it dteam glast.org infngrid ops gridit enmr.eu"
# Prefix of the experiment software directory in your CE
# INFN-GRID: Agreement on software directory name
VO_SW_DIR=/opt/exp_soft
# Space separated list of queues configured in your CE
# INFN-GRID: Your site must configure a queue that support certification VOs
#QUEUES="q1 [q2 [...]]"
QUEUES="cert prod cloudtf"
# For each queue defined in QUEUES, define a _GROUP_ENABLE variable
# which is a space separated list of VO names and VOMS FQANs:
# Ex.: MYQUEUE_GROUP_ENABLE="ops atlas cms /cms/Higgs /cms/ROLE=production"
# In QUEUE names containing dots and dashes replace them with underscore:
# Ex.: QUEUES="my.test-queue"
# MY_TEST_QUEUE_GROUP_ENABLE="ops atlas"
# INFN-GRID: Here an example of the settings for certification queue:
# CERT_GROUP_ENABLE="dteam infngrid ops"
#<queue-name>_GROUP_ENABLE="fqan1 [fqan2 [...]]"
CERT_GROUP_ENABLE="dteam infngrid ops /dteam/ROLE=lcgadmin /dteam/ROLE=production /ops/ROLE=lcgadmin /ops/ROLE=pilot /infngrid/ROLE=SoftwareManager /infngrid/ROLE=pilot"
PROD_GROUP_ENABLE="comput-er.it gridit glast.org /comput-er.it/ROLE=SoftwareManager /gridit/ROLE=SoftwareManager /glast.org/ROLE=SoftwareManager /glast.org/ROLE=prod"
CLOUDTF_GROUP_ENABLE="dteam /dteam/ROLE=lcgadmin /dteam/ROLE=production enmr.eu /enmr.eu/ROLE=SoftwareManager"
# Optional variable to define the default SE used by the VO.
# Define the SE hostname if you want a specific SE to be the default one.
# If this variable is not defined, the first SE in SE_LIST will be used
# as the default one.
# VO_ |