Project

General

Profile

osg-example_root.sh

Yen-Chu Chen, 02/27/2018 12:49 PM

 
1
#!/bin/bash
2
#
3
set -x
4
echo Start  `date`
5
echo Site:${GLIDEIN_ResourceName}
6
echo "the worker node is " `hostname` "OS: "  `uname -a`
7
echo "You are running as user `whoami`"
8

    
9
#Always cd to the scratch area!
10

    
11
cd $_CONDOR_SCRATCH_DIR
12

    
13
IFDH_OPTION=""
14

    
15
# set group based on the EXPERIMENT evnironment variable set by jobsub
16
GROUP=$EXPERIMENT
17

    
18
if [ -z $GROUP ]; then
19

    
20
# try to figure out what group the user is in
21
GROUP=`id -gn`
22
fi
23

    
24

    
25
case $GROUP in
26

    
27
seaquest|e906)
28
SCRATCH_DIR="/pnfs/e906/scratch/users"
29
#IFDH_OPTION="--force=cpn"
30
;;
31
fermilab)
32
SCRATCH_DIR="/pnfs/fermilab/volatile"
33
#SCRATCH_DIR="/grid/data"
34
export CPN_LOCK_GROUP=gpcf
35
;;
36
esac
37

    
38
voms-proxy-info --all
39

    
40
### Force use of SLF6 versions for systems with 3.x kernels
41
case `uname -r` in
42
    3.*) export UPS_OVERRIDE="-h Linux64bit+2.6-2.12";;
43
esac
44

    
45
echo "This job is using worker node : " >> job_output_${CLUSTER}.${PROCESS}.log
46
hostname >> job_output_${CLUSTER}.${PROCESS}.log
47

    
48
#ls /cvmfs/fermilab.opensciencegrid.org/products/common/etc/* >> job_output_${CLUSTER}.${PROCESS}.log
49
#ls /cvmfs/fermilab.opensciencegrid.org/products/larsoft/* >> job_output_${CLUSTER}.${PROCESS}.log
50
source /cvmfs/fermilab.opensciencegrid.org/products/common/etc/setups
51
source /cvmfs/fermilab.opensciencegrid.org/products/larsoft/setup
52
setup ifdhc v2_2_3 -z /cvmfs/fermilab.opensciencegrid.org/products/common/db
53

    
54
export EXPERIMENT="seaquest"
55

    
56
export PROGRAM="AnaMain.C"
57

    
58

    
59
echo "setup seaquest software " >> job_output_${CLUSTER}.${PROCESS}.log
60

    
61
#source /cvmfs/seaquest.opensciencegrid.org/seaquest/software/current/setup.sh
62
source /cvmfs/seaquest.opensciencegrid.org/seaquest/software/current/Distribution/setup/setup.sh
63

    
64
# To use root V5.34.28
65
source ${SEAQUEST_INSTALL_ROOT}/externals/root/root-5.34.28/bin/thisroot.sh
66

    
67
export LD_LIBRARY_PATH="/cvmfs/seaquest.opensciencegrid.org/seaquest/users/chenyc/":$LD_LIBRARY_PATH
68

    
69
echo "Here is the your environment in this job: " >> job_output_${CLUSTER}.${PROCESS}.log
70
env >> job_output_${CLUSTER}.${PROCESS}.log
71

    
72

    
73
echo "Getting the data file to process:"
74
ifdh cp -D /pnfs/e906/scratch/users/chenyc/run_list.txt .
75

    
76
inFile=`cat run_list.txt | awk -v iN=${PROCESS} '{if ($1==iN+1) print $2}'`
77
echo "Input file for process ",${PROCESS} ," is " $inFile >> job_output_${CLUSTER}.${PROCESS}.log
78

    
79
varLength=`echo $inFile | wc | awk '{print $3}'`
80
echo "Length of input file name is $varLength ." >> job_output_${CLUSTER}.${PROCESS}.log
81

    
82
if [[ $varLength > 2 ]] ; then
83
  echo "Getting ", $inFile >> job_output_${CLUSTER}.${PROCESS}.log
84
  ifdh cp -D /pnfs/e906/scratch/users/chenyc/roots/$inFile .
85
else 
86
  echo "No input file defined!!! " >> job_output_${CLUSTER}.${PROCESS}.log
87
fi
88

    
89
ifdh cp -D /pnfs/e906/scratch/users/chenyc/AnaBackground.C .
90
ifdh cp -D /pnfs/e906/scratch/users/chenyc/AnaBackground.h .
91
ifdh cp -D /pnfs/e906/scratch/users/chenyc/AnaBackground_C.so .
92
ifdh cp -D /pnfs/e906/scratch/users/chenyc/$PROGRAM .
93

    
94

    
95
echo "Print the program to log file." >> job_output_${CLUSTER}.${PROCESS}.log
96
echo "===========================================================================" >> job_output_${CLUSTER}.${PROCESS}.log
97
cat $PROGRAM >> job_output_${CLUSTER}.${PROCESS}.log
98
if [ $? -ne 0 ] ; then	
99
   echo "Error trying to print the program."
100
   ls -l $PROGRAM >> job_output_${CLUSTER}.${PROCESS}.log
101
fi
102
echo "===========================================================================" >> job_output_${CLUSTER}.${PROCESS}.log
103

    
104
# To run the program
105
echo "Checking the working directory: " >> job_output_${CLUSTER}.${PROCESS}.log
106
ls -l * >> job_output_${CLUSTER}.${PROCESS}.log
107

    
108
echo "" >> job_output_${CLUSTER}.${PROCESS}.log
109
echo "which root ?" >> job_output_${CLUSTER}.${PROCESS}.log
110
which root >> job_output_${CLUSTER}.${PROCESS}.log
111

    
112
#
113
echo ""
114
echo "Before running the job ... " >> job_output_${CLUSTER}.${PROCESS}.log
115
#
116
if [ -f $inFile ] ; then
117
  echo "Running ..." >> job_output_${CLUSTER}.${PROCESS}.log
118
  root -l -q -b -n AnaMain.C\(\"$inFile\"\) >> job_output_${CLUSTER}.${PROCESS}.log
119
  echo "Problem with running the $PROGRAM ? " $? >> job_output_${CLUSTER}.${PROCESS}.log
120
else
121
  echo "No input file to run on!"
122
fi
123

    
124
#if [ $? -ne 0 ]; then
125
#	echo "Problem with running the $PROGRAM ; $? "
126
#fi
127

    
128
# Check what happened.
129
echo "After running the job ... " >> job_output_${CLUSTER}.${PROCESS}.log
130
ls -l * >> job_output_${CLUSTER}.${PROCESS}.log
131

    
132
echo "group = $GROUP"
133
#id
134

    
135
# If GRID_USER is not set for some reason, try to get it from the proxy
136
if [ -z ${GRID_USER} ]; then
137
GRID_USER=`basename $X509_USER_PROXY | cut -d "_" -f 2`
138
fi
139

    
140
echo "GRID_USER = `echo $GRID_USER`"
141

    
142
# let's try an ldd on ifdh
143

    
144
ldd `which ifdh`
145

    
146
sleep $[ ( $RANDOM % 10 )  + 1 ]m
147

    
148
umask 002
149

    
150
if [ -z "$SCRATCH_DIR" ]; then
151
    echo "Invalid scratch directory, not copying back"
152
    echo "I am going to dump the log file to the main job stdout in this case."
153
    cat job_output_${CLUSTER}.${PROCESS}.log
154
else
155

    
156
# Very useful for debugging problems with copies
157
export IFDH_DEBUG=1
158

    
159
    # first do lfdh ls to check if directory exists
160
    ifdh ls ${SCRATCH_DIR}/$GRID_USER
161
    # A non-zero exit value probably means it doesn't exist yet, or does not have group write permission, 
162
    # so send a useful message saying that is probably the issue
163
    if [ $? -ne 0 && -z "$IFDH_OPTION" ] ; then	
164
	echo "Unable to read ${SCRATCH_DIR}/$GRID_USER. Make sure that you have created this directory and given it group write permission (chmod g+w ${SCRATCH_DIR}/$GRID_USER)."
165
	exit 74
166
    else
167
        # directory already exists, so let's copy
168
	ifdh cp -D $IFDH_OPTION job_output_${CLUSTER}.${PROCESS}.log ${SCRATCH_DIR}/${GRID_USER}/logs
169
	if [ $? -ne 0 ]; then
170
	    echo "Error $? when copying to dCache scratch area!"
171
	    echo "If you created ${SCRATCH_DIR}/${GRID_USER} yourself,"
172
	    echo " make sure that it has group write permission."
173
	    exit 73
174
	fi
175

    
176
	if [ -f "massBackground.root" ]; then
177
	   mv massBackground.root massBackground_${CLUSTER}.${PROCESS}.root
178
	   ifdh cp -D massBackground_${CLUSTER}.${PROCESS}.root ${SCRATCH_DIR}/${GRID_USER}/roots_out
179
        fi
180

    
181
    fi
182
fi
183

    
184
echo "End `date`"
185

    
186
exit 0