Difference: NotesAboutInstallationAndConfigurationOfAWMSAndLB (4 vs. 5)

Revision 52012-01-24 - AndreaCristofori

Line: 1 to 1
 
META TOPICPARENT name="WebHome"

Notes about Installation and Configuration of a WMS

Line: 1631 to 1631
 
Changed:
<
<
Please not to enable only the VOs that you need to support on your WMS. If more users/groups are needed they can be configured by copying the files
>
>
Please enable only the VOs (variable VOS) that you need to support on your WMS. If more users/groups are needed they can be configured by copying the files:
 
/opt/glite/yaim/examples/groups.conf
/opt/glite/yaim/examples/users.conf
in the same directory of your siteinfo.def and modify it as needed.
Added:
>
>
The siteinfo.def itself should look similar to the following one (which is suitable for a WMS and LB co-hosted and that supports the VOs: cms, ops and dteam)
 
Added:
>
>
# YAIM example site configuration file - adapt it to your site!
#ACTIVEMQ_TOPIC = /WMSMonitor 

MY_DOMAIN=cnaf.infn.it

# Node names
# Note: - SE_HOST -->  Removed, see CLASSIC_HOST, DCACHE_ADMIN, DPM_HOST below
#       - REG_HOST --> There is only 1 central registry for the time being.


GLITE_LB_WMS_DN="/C=IT/O=INFN/OU=Host/L=CNAF/CN=wms003.cnaf.infn.it"
GLITE_LB_SUPER_USERS="/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Alessandro Paolini,/C=IT/O=INFN/OU=Personal Certificate/L=CNAF/CN=Andrea Cristofori,/C=IT/O=INFN/OU=Personal Certificate/L=Padova/CN=Sergio Traldi"
GLITE_LB_AUTHZ_REGISTER_JOBS=".*"


CE_HOST=my-ce.$MY_DOMAIN
RB_HOST=my-rb.$MY_DOMAIN

WMS_HOST=wms003.$MY_DOMAIN
LB_HOST=wms003.$MY_DOMAIN:9000
GLITE_LB_TYPE=both
PX_HOST=myproxy.$MY_DOMAIN
BDII_HOST=egee-bdii.$MY_DOMAIN
#BDII_HOST=certtbpre-bdii.cern.ch


MON_HOST=my-mon.$MY_DOMAIN

# debugging information.
# Possible values: NONE, ABORT, ERROR, WARNING, INFO, DEBUG
YAIM_LOGGING_LEVEL=INFO


# Set this to "yes" your site provides an X509toKERBEROS Authentication Server 
# Only for sites with Experiment Software Area under AFS 
GSSKLOG=no
GSSKLOG_SERVER=my-gssklog.$MY_DOMAIN

WN_LIST=/opt/glite/yaim/etc/wn-list.conf
#USERS_CONF=/root/users.conf
#GROUPS_CONF=/root/groups.conf
USERS_CONF=/opt/glite/yaim/examples/users.conf
GROUPS_CONF=/opt/glite/yaim/examples/groups.conf
FUNCTIONS_DIR=/opt/glite/yaim/functions

GLOBUS_TCP_PORT_RANGE="20000,25000"

# Choose a good password ! And be sure that this file cannot be read by
# any grid job !
MYSQL_PASSWORD=d3pass
APEL_DB_PASSWORD="APELDB_PWD"


# GRID_TRUSTED_BROKERS: DNs of services (RBs) allowed to renew/retrives 
# credentials from/at the myproxy server. Put single quotes around each trusted DN !!! 

GRID_TRUSTED_BROKERS="
'broker one'
'broker two'
"

# The RB now uses the DLI by default; set VOs here which should use RLS
RB_RLS="" # "atlas cms"

# GridIce server host name (usually run on the MON node).
GRIDICE_SERVER_HOST=$MON_HOST

# Site-wide settings 
SITE_EMAIL=root@localhost
SITE_CRON_EMAIL=$SITE_EMAIL  # not yet used will appear in a later release
SITE_SUPPORT_EMAIL=$SITE_EMAIL
SITE_NAME=INFN-CNAF
SITE_LOC="City, Country"
SITE_LAT=0.0 # -90 to 90 degrees
SITE_LONG=0.0 # -180 to 180 degrees
SITE_WEB="http://www.my-site.org"
SITE_TIER="TIER 2"
SITE_SUPPORT_SITE="my-bigger-site.their_domain"
#SITE_HTTP_PROXY="myproxy.my.domain"


# BDII/GIP specific settings
BDII_SITE_TIMEOUT=120
BDII_RESOURCE_TIMEOUT=`expr "$BDII_SITE_TIMEOUT" - 5`
GIP_RESPONSE=`expr "$BDII_RESOURCE_TIMEOUT" - 5`
GIP_FRESHNESS=60
GIP_CACHE_TTL=300
GIP_TIMEOUT=150

BDII_HTTP_URL="http://grid-deployment.web.cern.ch/grid-deployment/gis/lcg2-bdii/dteam/lcg2-all-sites.conf"

# The Freedom of Choice of Resources service allows a top-level BDII
# to be instructed to remove VO-specific access control lines for
# resources that do not meet the VO requirements 
BDII_FCR=http://lcg-fcr.cern.ch:8083/fcr-data/exclude.ldif 

# Ex.: BDII_REGIONS="CE SE RB PX VOBOX"
BDII_REGIONS="CE SE"    # list of the services provided by the site
BDII_CE_URL="ldap://$CE_HOST:2135/mds-vo-name=local,o=grid"
BDII_SE_URL="ldap://$CLASSIC_HOST:2135/mds-vo-name=local,o=grid"
BDII_RB_URL="ldap://$RB_HOST:2135/mds-vo-name=local,o=grid"
BDII_PX_URL="ldap://$PX_HOST:2135/mds-vo-name=local,o=grid"
BDII_LFC_URL="ldap://$LFC_HOST:2135/mds-vo-name=local,o=grid"
BDII_VOBOX_URL="ldap://$VOBOX_HOST:2135/mds-vo-name=local,o=grid"
BDII_FTS_URL="ldap://$FTS_HOST:2170/mds-vo-name=resource,o=grid"


# E2EMONIT specific settings
# This specifies the location to download the host specific configuration file
E2EMONIT_LOCATION=grid-deployment.web.cern.ch/grid-deployment/e2emonit/production

#
# Replace this with the siteid supplied by the person setting up the networking 
# topology.
E2EMONIT_SITEID=my.siteid

# VOS="atlas alice gridit lhcb cms dteam biomed"
# Space separated list of supported VOs by your site

#VOS="ops dteam cms gridit glast.org superbvo.org t2k.org vo.londongrid.ac.uk testers.eu-emi.eu"
VOS="ops dteam cms"
QUEUES=${VOS}

# For each queue define a _GROUP_ENABLE variable which is a list
# of VO names and VOMS FQANs
# Ex.: MYQUEUE_GROUP_ENABLE="ops atlas cms /VO=cms/GROUP=/cms/Susy"
# In DNS like VO names dots and dashes shoul be replaced with underscore:
# Ex.: MYQUEUE_GROUP_ENABLE="my.test-queue"
#      MY_TEST_QUEUE_GROUP_ENABLE="ops atlas"

OPS_GROUP_ENABLE="ops"
DTEAM_GROUP_ENABLE="dteam"
CMS_GROUP_ENABLE="cms"

VO_SW_DIR=/opt/exp_soft

# Set this if you want a scratch directory for jobs
EDG_WL_SCRATCH=""

# VO specific settings. For help see: https://lcg-sft.cern.ch/yaimtool/yaimtool.py
VO_ATLAS_SW_DIR=$VO_SW_DIR/atlas
VO_ATLAS_DEFAULT_SE=$CLASSIC_HOST
VO_ATLAS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/atlas

VO_ATLAS_VOMS_POOL_PATH="/lcg1"
VO_ATLAS_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/atlas?/atlas/'
#VO_ATLAS_VOMS_EXTRA_MAPS="'Role=production production' 'usatlas .usatlas'"
VO_ATLAS_VOMSES="'atlas lcg-voms.cern.ch 15001 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch atlas' 'atlas voms.cern.ch 15001 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch atlas'"
#VO_ATLAS_RBS="atlasrb1.cern.ch atlasrb2.cern.ch"

VO_ALICE_SW_DIR=$VO_SW_DIR/alice
VO_ALICE_DEFAULT_SE=$CLASSIC_HOST
VO_ALICE_STORAGE_DIR=$CLASSIC_STORAGE_DIR/alice

VO_ALICE_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/alice?/alice/'
VO_ALICE_VOMSES="'alice lcg-voms.cern.ch 15000 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch  alice' 'alice voms.cern.ch 15000 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch alice'"

VO_CMS_SW_DIR=$VO_SW_DIR/cms
VO_CMS_DEFAULT_SE=$CLASSIC_HOST
VO_CMS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/cms

VO_CMS_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/cms?/cms/'
VO_CMS_VOMSES="'cms lcg-voms.cern.ch 15002 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch cms' 'cms voms.cern.ch 15002 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch cms'"
VO_CMS_VOMS_CA_DN="'/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority'"


VO_LHCB_SW_DIR=$VO_SW_DIR/lhcb
VO_LHCB_DEFAULT_SE=$CLASSIC_HOST
VO_LHCB_STORAGE_DIR=$CLASSIC_STORAGE_DIR/lhcb

VO_LHCB_VOMS_SERVERS='vomss://voms.cern.ch:8443/voms/lhcb?/lhcb/'
VO_LHCB_VOMS_EXTRA_MAPS="lcgprod lhcbprod"
VO_LHCB_VOMSES="'lhcb lcg-voms.cern.ch 15003 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch lhcb' 'lhcb voms.cern.ch 15003 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch lhcb'"


VO_DTEAM_SW_DIR=$VO_SW_DIR/dteam
VO_DTEAM_DEFAULT_SE=$CLASSIC_HOST
VO_DTEAM_STORAGE_DIR=$CLASSIC_STORAGE_DIR/dteam
M_VOMS_SERVERS='vomss://voms.hellasgrid.gr:8443/voms/dteam?/dteam/'
VO_DTEAM_VOMSES="'dteam lcg-voms.cern.ch 15004 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch dteam 24' 'dteam voms.cern.ch 15004 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch dteam 24' 'dteam voms.hellasgrid.gr 15004 /C=GR/O=HellasGrid/OU=hellasgrid.gr/CN=voms.hellasgrid.gr dteam 24' 'dteam voms2.hellasgrid.gr 15004 /C=GR/O=HellasGrid/OU=hellasgrid.gr/CN=voms2.hellasgrid.gr dteam 24'"
VO_DTEAM_VOMS_CA_DN="'/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' '/C=GR/O=HellasGrid/OU=Certification Authorities/CN=HellasGrid CA 2006' '/C=GR/O=HellasGrid/OU=Certification Authorities/CN=HellasGrid CA 2006'"

VO_BIOMED_SW_DIR=$VO_SW_DIR/biomed
VO_BIOMED_DEFAULT_SE=$CLASSIC_HOST
VO_BIOMED_STORAGE_DIR=$CLASSIC_STORAGE_DIR/biomed

VO_BIOMED_VOMS_SERVERS="vomss://cclcgvomsli01.in2p3.fr:8443/voms/biomed?/biomed/"
VO_BIOMED_VOMSES="biomed cclcgvomsli01.in2p3.fr 15000 /O=GRID-FR/C=FR/O=CNRS/OU=CC-LYON/CN=cclcgvomsli01.in2p3.fr biomed"




VO_OPS_SW_DIR=$VO_SW_DIR/ops
VO_OPS_DEFAULT_SE=$CLASSIC_HOST
VO_OPS_STORAGE_DIR=$CLASSIC_STORAGE_DIR/ops
VO_OPS_VOMS_SERVERS="vomss://voms.cern.ch:8443/voms/ops?/ops/"
VO_OPS_VOMSES="'ops lcg-voms.cern.ch 15009 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch ops 24' 'ops voms.cern.ch 15004 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch ops 24'"
VO_OPS_VOMS_CA_DN="'/DC=ch/DC=cern/CN=CERN Trusted Certification Authority' '/DC=ch/DC=cern/CN=CERN Trusted Certification Authority'"



#VO_OPS_VOMSES="'ops lcg-voms.cern.ch 15009 /DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch ops' 'ops voms.cern.ch 15004 /DC=ch/DC=cern/OU=computers/CN=voms.cern.ch ops'"


##########
# gridit #
##########
VO_GRIDIT_SW_DIR=$VO_SW_DIR/gridit
VO_GRIDIT_DEFAULT_SE=$SE_HOST
VO_GRIDIT_STORAGE_DIR=$CLASSIC_STORAGE_DIR/gridit
VO_GRIDIT_VOMS_SERVERS="'vomss://voms.cnaf.infn.it:8443/voms/gridit?/gridit' 'vomss://voms-01.pd.infn.it:8443/voms/gridit?/gridit'"
VO_GRIDIT_VOMSES="'gridit voms.cnaf.infn.it 15008 /C=IT/O=INFN/OU=Host/L=CNAF/CN=voms.cnaf.infn.it gridit' 'gridit voms-01.pd.infn.it 15008 /C=IT/O=INFN/OU=Host/L=Padova/CN=voms-01.pd.infn.it gridit'"
VO_GRIDIT_VOMS_CA_DN="'/C=IT/O=INFN/CN=INFN CA' '/C=IT/O=INFN/CN=INFN CA'"



###############
# EMI testbed #
###############
SW_DIR=$VO_SW_DIR/testers.eu-emi.eu
DEFAULT_SE=$SE_HOST
STORAGE_DIR=$CLASSIC_STORAGE_DIR/testers.eu-emi.eu
VOMS_SERVERS="'vomss://emitestbed07.cnaf.infn.it:8443/voms/testers.eu-emi.eu?/testers.eu-emi.eu' 'vomss://emitestbed01.cnaf.infn.it:8443/voms/testers.eu-emi.eu?/testers.eu-emi.eu'"
VOMSES="'testers.eu-emi.eu emitestbed07.cnaf.infn.it 15002 /C=IT/O=INFN/OU=Host/L=CNAF/CN=emitestbed07.cnaf.infn.it testers.eu-emi.eu' 'testers.eu-emi.eu emitestbed01.cnaf.infn.it 15002 /C=IT/O=INFN/OU=Host/L=CNAF/CN=emitestbed01.cnaf.infn.it testers.eu-emi.eu'"
VOMS_CA_DN="'/C=IT/O=INFN/CN=INFN CA' '/C=IT/O=INFN/CN=INFN CA'"

Once the configuration is ready issue the command:

 
[root@wms003 ~]# /opt/glite/yaim/bin/yaim -c -s /root/site-info_wms003.def -n WMS -n LB 2>&1 | tee /root/siteinfo-wms003_conf_WMS-LB.`hostname -s`.`date +%Y%m%d-%H%M%S`.log
Line: 1830 to 2061
 
INFO
Configuration Complete. [ OK ]
INFO
YAIM terminated succesfully.
Added:
>
>

After a successful configuration is advisable to comment the line:

*/5 * * * * root . /usr/libexec/grid-env.sh ; sh /usr/libexec/glite-wms-check-daemons.sh > /dev/null 2>&1

in /etc/cron.d/glite-wms-check-daemons.cron Stop the services with:

[root@wms3 ~]# service gLite stop
*** glite-lb-bkserverd:
Stopping glite-lb-notif-interlogd (16109) ... done
Stopping glite-lb-bkserverd (16056) ... done
glite-jp-importer not running
Stopping glite-lb-interlogd (16146) ... done

*** glite-lb-locallogger:
Stopping glite-lb-logd ... done
Stopping glite-lb-interlogd ... done

*** glite-proxy-renewald:
Stopping ProxyRenewal Daemon: glite-proxy-renewd ... done

*** glite-wms-ice:
stopping ICE... ok

*** glite-wms-jc:
Stopping JobController daemon(s)
        Stopping JobController...                          [  OK  ]
A stale lock file still exists, removing it..              [  OK  ]
        Stopping CondorG...                                [  OK  ]

*** glite-wms-lm:
Stopping LogMonitor...                                     [  OK  ]
A stale lock file still exists, removing it..              [  OK  ]

*** glite-wms-wm:
stopping workload manager... ok

*** glite-wms-wmproxy:

*** globus-gridftp:
Stopping globus-gridftp-server:                            [  OK  ]

Change the variable /etc/glite-wms/glite_wms.conf

    MaxOutputSandboxSize  =  -1;

with something like:

    MaxOutputSandboxSize  =  55000000;
to limit the maximum output sandbox size to about 50Mb. Restart the services:

[root@wms3 ~]# service gLite start
*** globus-gridftp:
Starting globus-gridftp-server:                            [  OK  ]

*** glite-wms-wmproxy:

*** glite-wms-wm:
starting workload manager... ok

*** glite-wms-lm:
Starting LogMonitor...                                     [  OK  ]

*** glite-wms-jc:
Starting JobController daemon(s)
        Starting JobController...Create jobdir queue object.
                                                           [  OK  ]
        Starting CondorG...                                [  OK  ]

*** glite-wms-ice:
starting ICE... ok

*** glite-proxy-renewald:
Starting ProxyRenewal Daemon: glite-proxy-renewd ... done

*** glite-lb-locallogger:
Starting glite-lb-logd ...This is LocalLogger, part of Workload Management System in EU DataGrid & EGEE.
 done
Starting glite-lb-interlogd ... done

*** glite-lb-bkserverd:
Starting glite-lb-bkserver ... done
Starting glite-lb-notif-interlogd ... done
Starting glite-lb-interlog for proxy ... done
 </>
<--/twistyPlugin-->

-- AndreaCristofori - 2012-01-23 \ No newline at end of file

 
This site is powered by the TWiki collaboration platformCopyright © 2008-2020 by the contributing authors. All material on this collaboration platform is the property of the contributing authors.
Ideas, requests, problems regarding TWiki? Send feedback