Testing the Installation
To check your installation setup, execute the
foamInstallationTest script (in the
OpenFOAM-2.0.1/bin directory).
If no problems are reported, proceed to getting started with
OpenFOAM; otherwise, go back and check you have installed the software correctly
and/or contact your system administrator.
Getting Started
Create a project directory within the $HOME/OpenFOAM directory named
-2.0.1 and create a directory named run within it, e.g. by typing:
$ mkdir -p $HOME/OpenFOAM/OpenFOAM-2.0.1/$FOAM_RUN
Copy the tutorial examples directory in the OpenFOAM distribution to the run directory. If the OpenFOAM environment variables are set correctly,
then the following command will be correct:
$ cp -r $FOAM_TUTORIALS $HOME/OpenFOAM/OpenFOAM-2.0.1/$FOAM_RUN
Run the first example case of incompressible laminar flow in a cavity:
$ cd $FOAM_RUN/tutorials/incompressible/icoFoam/cavity
$ blockMesh
$ icoFoam
$ paraFoam
Running OpenFOAM in Parallel
Create a new use case
The results from the previous example are generated using a fairly coarse mesh.
In this new case we will demonstrate the parallel processing capability of OpenFOAM access to multiple processors.
$ cd $FOAM_RUN/tutorials/incompressible/icoFoam/cavity/system
$ cat decomposeParDict
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 1.6 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object decomposeParDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
numberOfSubdomains 4; <== Change Here!
method scotch;
simpleCoeffs
{
n ( 2 2 1 );
delta 0.001;
}
hierarchicalCoeffs
{
n ( 1 1 1 );
delta 0.001;
order xyz;
}
scotchCoeffs
{
processorWeights (
1 <== Add Here!
1 <== Add Here!
1 <== Add Here!
1 <== Add Here!
);
}
manualCoeffs
{
dataFile "";
}
distributed no;
roots ( );
// ************************************************************************* //
Create a new tar of the test case:
$ tar zcvf cavity.tar.gz cavity
cavity/
cavity/system/
cavity/system/fvSchemes
cavity/system/fvSolution
cavity/system/controlDict
cavity/system/decomposeParDict
cavity/constant/
cavity/constant/transportProperties
cavity/constant/polyMesh/
cavity/constant/polyMesh/boundary
cavity/constant/polyMesh/blockMeshDict
cavity/0/
cavity/0/U
cavity/0/p
Testing OpenFOAM in a Grid Infrastructure
This section provides some hints for testing OpenFOAM job on the GRIDIT Infrastructure.
Post-configuration on the LSF master node and client nodes
Add in the /etc/bashrc file the following settings for sourcing the OpenFoam bashrc profile
. /opt/exp_soft/gridit/OpenFOAM/OpenFOAM-2.0.1/etc/bashrc
Creation of a new MPI wrapper
Add in /opt/i2g/etc/mpi-start/ the definition of a new MPI wrapper for OpenFoam-2.0.1.
This wrapper has to be replicated in all the LSF client nodes.
$ cat /etc/mpi-start/openmpi_openfoam.mpi
#!/bin/sh
#
# Copyright (c) 2006-2007 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# (c) 2009 Instituto de Fisica de Cantabria - CSIC.
#
# specifies where Open MPI is installed
export MPI_OPENMPI_PATH=/opt/exp_soft/gridit/OpenFOAM/ThirdParty-2.0.1/platforms/linux64Gcc44/openmpi-1.5.3/
export MPI_OPENMPI_MPIEXEC=/opt/exp_soft/gridit/OpenFOAM/ThirdParty-2.0.1/platforms/linux64Gcc44/openmpi-1.5.3/bin/mpiexec
if test "x$I2G_OPENMPI_PREFIX" = "x" ; then
if test "x$MPI_OPENMPI_PATH" = "x" ; then
if test "x$MPI_START_MPI_PREFIX" != "x" ; then
export I2G_OPENMPI_PREFIX=$MPI_START_MPI_PREFIX
else
export I2G_OPENMPI_PREFIX=/opt/i2g/openmpi
debug_msg "use default installtion : $I2G_OPENMPI_PREFIX"
fi
else
export I2G_OPENMPI_PREFIX=$MPI_OPENMPI_PATH
debug_msg "use user provided prefix : $MPI_OPENMPI_PATH"
fi
else
debug_msg "use user provided prefix : $I2G_OPENMPI_PREFIX"
fi
# activate MPI
mpi_start_activate_mpi $I2G_OPENMPI_PREFIX "$MPI_START_MPI_MODULE"
# add necessary PATH to the environment variables
#debug_msg "prepend Open MPI to PATH and LD_LIBRARY_PATH"
export PATH=$I2G_OPENMPI_PREFIX/bin:$PATH
export LD_LIBRARY_PATH=$I2G_OPENMPI_PREFIX/lib:$LD_LIBRARY_PATH
if test "x$I2G_MPI_TYPE" != "xopenmpi" ; then
# we are not the primary MPI
# fall back to save settings that should work always
debug_msg ""
debug_msg "disable PBS, SGE"
OPENMPI_PARAMS="-mca pls ^tm,gridengine -mca ras ^tm,gridengine "
#OPENMPI_PARAMS="$OPENMPI_PARAMS -x PACX_DEBUG_NODE=$PACX_DEBUG_NODE"
debug_msg "export GLOBUS_TCP_PORT_RANGE : $GLOBUS_TCP_PORT_RANGE"
OPENMPI_PARAMS="$OPENMPI_PARAMS -x GLOBUS_TCP_PORT_RANGE "
fi
#
# start an mpi job
#
mpi_exec () {
#handle Open MPI 1.2.2 + PBS bug
if test "x$PBS_NODEFILE" = "x" ; then
debug_msg "found openmpi and a non-PBS batch system, set machinefile and np parameters"
export I2G_MACHINEFILE_AND_NP="-machinefile $MPI_START_MACHINEFILE -np $I2G_MPI_NP"
else
debug_msg "found openmpi and PBS, don't set machinefile"
export I2G_MACHINEFILE_AND_NP="-np $I2G_MPI_NP"
fi
#set the parameters to be always used with Open MPI:
MPI_SPECIFIC_PARAMS="-wdir $PWD "
#check for Marmot
if test "x$I2G_USE_MARMOT" = "x1" ; then
debug_msg "export LD_PRELOAD for Open MPI"
MPI_SPECIFIC_PARAMS="$MPI_SPECIFIC_PARAMS -x LD_PRELOAD -x MARMOT_MAX_TIMEOUT_DEADLOCK -x MARMOT_LOGFILE_PATH"
fi
#if test "x$I2G_USE_MPITRACE" ="x1" ; then
#MPI_SPECIFIC_PARAMS="-x MPITRACE_ON -x MPTRACE_DIR"
#fi
# check for user supplied mpiexec
MPIEXEC=`which mpiexec`
if test "x$MPI_OPENMPI_MPIEXEC" != "x" ; then
MPIEXEC="$MPI_OPENMPI_MPIEXEC $I2G_MPI_MPIEXEC_PARAMS"
debug_msg "using user supplied startup : '$MPIEXEC'"
MPI_SPECIFIC_PARAMS="$MPI_SPECIFIC_PARAMS -x X509_USER_PROXY --prefix $I2G_OPENMPI_PREFIX $OPENMPI_PARAMS"
. $MPI_START_PREFIX/../etc/mpi-start/generic_mpiexec.sh
generic_mpiexec
elif test "x$MPI_OPENMPI_MPIRUN" != "x" ; then
MPIEXEC="$MPI_OPENMPI_MPIRUN $MPI_OPENMPI_MPIRUN_PARAMS"
debug_msg "using user supplied startup : '$MPIEXEC'"
MPI_SPECIFIC_PARAMS="$MPI_SPECIFIC_PARAMS -x X509_USER_PROXY --prefix $I2G_OPENMPI_PREFIX $OPENMPI_PARAMS"
. $MPI_START_PREFIX/../etc/mpi-start/generic_mpiexec.sh
generic_mpiexec
else
MPI_SPECIFIC_PARAMS="$MPI_SPECIFIC_PARAMS -x X509_USER_PROXY --prefix $I2G_OPENMPI_PREFIX $OPENMPI_PARAMS"
. $MPI_START_PREFIX/../etc/mpi-start/generic_mpiexec.sh
generic_mpiexec
fi
return $?
}
mpi_start () {
. $MPI_START_PREFIX/../etc/mpi-start/generic_mpi_start.sh
generic_mpi_start
return $?
}
JDL & script files (sequential mode)
This is an example of JDL file that can be used for testing OpenFoam in sequential mode:
[
Type = "Job";
JobType = "Normal";
Executable = "/bin/bash";
Arguments = "start_openfoam.sh";
StdOutput = "log.out";
StdError = "log.err";
InputSandbox = {"start_openfoam.sh"};
OutputSandbox = {"log.err","log.out","openfoam.log","openfoam.err"};
Requirements = Member("VO-gridit-OpenFoam-2.0.1",other.GlueHostApplicationSoftwareRunTimeEnvironment);
Rank = (other.GlueCEStateWaitingJobs == 0 ? other.GlueCEStateFreeCPUs : -other.GlueCEStateWaitingJobs);
]
This is the bash script sent in InputSandbox with the JDL file:
#!/bin/sh
echo "+ Running OpenFoam-2.0.1 on "`hostname -f` as `whoami`
echo;echo "+ Copying the OpenFoam example..."
cp -R $FOAM_TUTORIALS/incompressible/icoFoam/cavity $PWD
cd $PWD/cavity
echo; echo "+ Starting at "`date`
blockMesh
icoFoam >./openfoam.log 2>./openfoam.err
cp openfoam.* ../
# Testing the scratch area
cp openfoam.* $VO_GRIDIT_SW_DIR/scratch
chmod a+w $VO_GRIDIT_SW_DIR/scratch/openfoam.*
echo "+ Done at "`date`
Submission
$ glite-wms-job-submit -a -r grid012.ct.infn.it:2119/jobmanager-lcglsf-short
-e https://wmsdecide.dir.garr.it:7443/glite_wms_wmproxy_server
-o jobID
openfoam-2.0.1.jdl
Connecting to the service https://wmsdecide.dir.garr.it:7443/glite_wms_wmproxy_server
===================== glite-wms-job-submit Success =====================
The job has been successfully submitted to the WMProxy
Your job identifier is:
https://lb-4.dir.garr.it:9000/vUAJk3Sq3xBnviahz7HSqg
The job identifier has been saved in the following file:
/home/larocca/OpenFoam-2.0.1/jobID
=====================================================================
Checking Status
$ glite-wms-job-status --noint -i jobID
====================== glite-wms-job-status Success ====================
BOOKKEEPING INFORMATION:
Status info for the Job : https://lb-4.dir.garr.it:9000/vUAJk3Sq3xBnviahz7HSqg
Current Status: Done (Success)
Exit code: 0
Status Reason: Job terminated successfully
Destination: grid012.ct.infn.it:2119/jobmanager-lcglsf-short
Submitted: Fri Dec 2 16:59:56 2011 CET
=====================================================================
Downloading Results
$ glite-wms-job-output --dir . --noint -i jobID
Connecting to the service https://wmsdecide.dir.garr.it:7443/glite_wms_wmproxy_server
===============================================================================
JOB GET OUTPUT OUTCOME
Output sandbox files for the job:
https://lb-4.dir.garr.it:9000/vUAJk3Sq3xBnviahz7HSqg
have been successfully retrieved and stored in the directory:
/home/larocca/OpenFoam-2.0.1/larocca_vUAJk3Sq3xBnviahz7HSqg
================================================================================
JDL & script files (parallel mode)
This is an example of JDL file that can be used for testing OpenFoam in parallel mode:
[
JobType = "NORMAL";
CPUNumber = 4;
Executable = "mpi-start-wrapper.sh";
Arguments = "icoFoam OPENMPI_OPENFOAM";
StdOutput = "mpi-start.out";
StdError = "mpi-start.err";
InputSandbox = {"mpi-start-wrapper.sh","bei-hooks.sh","cavity.tar.gz"};
OutputSandbox = {"mpi-start.err","mpi-start.out","results.tgz"};
Environment = {"I2G_MPI_PRE_RUN_HOOK=./bei-hooks.sh","I2G_MPI_POST_RUN_HOOK=./bei-hooks.sh"};
Requirements = Member("VO-gridit-OpenFoam-2.0.1",other.GlueHostApplicationSoftwareRunTimeEnvironment);
]
MPI wrapper and hooks are shown below:
$ cat mpi-start-wrapper.sh
#!/bin/bash
# Pull in the arguments.
MY_EXECUTABLE=$1
MPI_FLAVOR=$2
# Convert flavor to lowercase for passing to mpi-start.
MPI_FLAVOR_LOWER=`echo $MPI_FLAVOR | tr '[:upper:]' '[:lower:]'`
# Pull out the correct paths for the requested flavor.
eval MPI_PATH=`printenv MPI_${MPI_FLAVOR}_PATH`
# Ensure the prefix is correctly set. Don't rely on the defaults.
#eval I2G_${MPI_FLAVOR}_PREFIX=$MPI_PATH
#export I2G_${MPI_FLAVOR}_PREFIX
# Touch the executable. It exist must for the shared file system check.
# If it does not, then mpi-start may try to distribute the executable
# when it shouldn't.
touch $MY_EXECUTABLE
# Setup for mpi-start.
export I2G_MPI_APPLICATION=$MY_EXECUTABLE
export I2G_MPI_APPLICATION_ARGS="-parallel"
export I2G_MPI_TYPE=$MPI_FLAVOR_LOWER
export I2G_MPI_PRE_RUN_HOOK=$PWD/bei-hooks.sh
export I2G_MPI_POST_RUN_HOOK=$PWD/bei-hooks.sh
export I2G_MPI_START_HOOKS_LOCAL=bei-hooks.sh
export I2G_MPI_FILE_DIST="ssh"
# If these are set then you will get more debugging information.
export I2G_MPI_START_VERBOSE=1
export I2G_MPI_START_TRACE=1
export I2G_MPI_START_DEBUG=1
# Invoke mpi-start.
$I2G_MPI_START
source $I2G_MPI_START_HOOKS_LOCAL
$ cat bei-hooks.sh
#!/bin/sh
export OUTPUT_DIR=cavity
export OUTPUT_ARCHIVE=results.tgz
export OUTPUT_SE=lfn:/grid/gridit/ebogdan
export OUTPUT_VO=gridit
export STORAGE_HOST=atlasse.lnf.infn.it
export JOBID=$(echo ${GRID_JOBID} | awk '{ l=length($1); s=substr($1,l-21); print s }')
export GLOBUS_TCP_PORT_RANGE=20000,25000
export MPI_START_SHARED_FS=0
pre_run_hook () {
echo;echo "=============================================================="
echo "- Starting OpenFoam (ver.2.0.1) ~ the Open Source CFD toolbox "
echo
echo "OpenFOAM (Open Source Field Operation and Manipulation) is a C++"
echo "toolbox for the development of customized numerical solvers and"
echo "pre-/post-processing utilities for the solution of continuum"
echo "mechanics problems, including computational fluid dynamics (CFD)."
echo "Official OpenFOAM web site http://www.openfoam.com/"
echo
echo "- [Job settings]"
echo "- JOBID="$JOBID
echo "- MPI_START_SHARED_FS="$MPI_START_SHARED_FS
echo "- MPI_SHARED_HOME="$MPI_SHARED_HOME
echo "- MPI_SHARED_HOME_PATH="$MPI_SHARED_HOME_PATH
echo "- OUTPUT_DIR="$OUTPUT_DIR
echo "- OUTPUT_ARCHIVE="$OUTPUT_ARCHIVE
echo "- OUTPUT_VO="$OUTPUT_VO
echo "- STORAGE_HOST="$STORAGE_HOST
echo
echo "=[PRE_RUN_HOOK] Started"
echo "- Downloading tar archive from SE..."
#echo lcg-cp --vo $OUTPUT_VO $OUTPUT_SE/pitz.tgz file:$PWD/pitz.tgz
#lcg-cp --vo $OUTPUT_VO $OUTPUT_SE/pitz.tgz file:$PWD/pitz.tgz
tar zxf cavity.tar.gz
echo;echo "- Summarizing disk usage of each FILE, recursively for directories"
du -h cavity
cd $OUTPUT_DIR
echo;echo "=[blockMesh] ==========================================="
blockMesh
echo;echo "=[decomposePar] ========================================"
decomposePar
echo "=[PRE_RUN_HOOK] Finished"
return 0
}
# the first parameter is the name of a host in the
copy_from_remote_node() {
if [[ $1 == `hostname -a` || $1 == `hostname -f` || $1 == "localhost" ]]; then
echo "I skip the local host: " $1
return 1
fi
# copy the results data on master node
CMD="scp -r $1:\"$PWD\" .."
#CMD="scp -r $1:\"$PWD\" ./$1"
echo "- Copying file from remote host [" $1 "]"
echo $CMD
$CMD
}
post_run_hook () {
echo "=[POST_RUN_HOOK] Started"
#echo "post_run_hook called"
if [ "x$MPI_START_SHARED_FS" == "x0" ] ; then
echo "- Gathering output results from remote hosts [FS=0]"
mpi_start_foreach_host copy_from_remote_node
fi
echo; echo "- Working directory => "$PWD
echo "- Listing data from the master [ "`hostname -f`" ] node..."
ls -al $PWD
echo; echo "- Summarizing disk usage of each FILE, recursively for directories"
du -h $PWD
echo; echo "- Listing the contents of directories in a tree-like format"
tree -L 3
echo "- Packing the final results data"
#cd $OUTPUT_DIR
cd ..
tar czf $OUTPUT_ARCHIVE $OUTPUT_DIR
if [ $? -eq 0 ] ; then
echo;echo "- Uploading the data to the grid SE"
echo lcg-cr --vo $OUTPUT_VO -d $STORAGE_HOST -l $OUTPUT_SE/results_$JOBID.tgz file:$PWD/$OUTPUT_ARCHIVE
lcg-cr --vo $OUTPUT_VO -d $STORAGE_HOST -l $OUTPUT_SE/results_$JOBID.tgz file:$PWD/$OUTPUT_ARCHIVE
else echo "=[ERROR ] Some problems occurred during packing of data. Please, check log file for more details"
fi
echo "=[POST_RUN_HOOK] Finished"
return 0
}