Project

General

Profile

osg-example_Ana.sh

modified for general analysis to run on GRID - Yen-Chu Chen, 02/16/2018 02:04 PM

 
1
#!/bin/bash
2
set -x
3
echo Start  `date`
4
echo Site:${GLIDEIN_ResourceName}
5
echo "the worker node is " `hostname` "OS: "  `uname -a`
6
echo "You are running as user `whoami`"
7

    
8
#Always cd to the scratch area!
9

    
10
cd $_CONDOR_SCRATCH_DIR
11

    
12
IFDH_OPTION=""
13

    
14
# set group based on the EXPERIMENT evnironment variable set by jobsub
15
GROUP=$EXPERIMENT
16

    
17
if [ -z $GROUP ]; then
18

    
19
# try to figure out what group the user is in
20
GROUP=`id -gn`
21
fi
22

    
23
SCRATCH_DIR="/pnfs/e906/scratch/users"
24
#IFDH_OPTION="--force=cpn"
25

    
26
voms-proxy-info --all
27

    
28
### Force use of SLF6 versions for systems with 3.x kernels
29
case `uname -r` in
30
    3.*) export UPS_OVERRIDE="-h Linux64bit+2.6-2.12";;
31
esac
32

    
33
echo "This job is using worker node : " >> job_output_${CLUSTER}.${PROCESS}.log
34
hostname >> job_output_${CLUSTER}.${PROCESS}.log
35

    
36
#ls /cvmfs/fermilab.opensciencegrid.org/products/common/etc/* >> job_output_${CLUSTER}.${PROCESS}.log
37
#ls /cvmfs/fermilab.opensciencegrid.org/products/larsoft/* >> job_output_${CLUSTER}.${PROCESS}.log
38
source /cvmfs/fermilab.opensciencegrid.org/products/common/etc/setups
39
source /cvmfs/fermilab.opensciencegrid.org/products/larsoft/setup
40
setup ifdhc v2_2_3 -z /cvmfs/fermilab.opensciencegrid.org/products/common/db
41

    
42
export EXPERIMENT="seaquest"
43
export PROGRAM="AnaBackground"
44
#export PROGRAM="hello"
45

    
46
export DATA="run_015780_R007_tgtDump_1_mix1.root"
47

    
48

    
49
echo "setup seaquest software " >> job_output_${CLUSTER}.${PROCESS}.log
50

    
51
source /cvmfs/seaquest.opensciencegrid.org/seaquest/software/current/Distribution/setup/setup.sh
52

    
53
echo "Here is the your environment in this job: " > job_output_${CLUSTER}.${PROCESS}.log
54
env >> job_output_${CLUSTER}.${PROCESS}.log
55

    
56

    
57
ifdh cp -D /pnfs/e906/scratch/users/chenyc/roots/$DATA .
58
ifdh cp -D /pnfs/e906/scratch/users/chenyc/$PROGRAM.cc .
59

    
60
ifdh cp -D /pnfs/e906/scratch/users/chenyc/$PROGRAM .
61
chmod a+x $PROGRAM
62

    
63

    
64
echo "Print the program to log file." >> job_output_${CLUSTER}.${PROCESS}.log
65
echo "===========================================================================" >> job_output_${CLUSTER}.${PROCESS}.log
66
cat $PROGRAM".cc" >> job_output_${CLUSTER}.${PROCESS}.log
67
if [ $? -ne 0 ] ; then	
68
   echo "Error trying to print the program."
69
   ls -l $PROGRAM".cc" >> job_output_${CLUSTER}.${PROCESS}.log
70
fi
71
echo "===========================================================================" >> job_output_${CLUSTER}.${PROCESS}.log
72

    
73
# To run the program
74
echo "Checking the working directory: " >> job_output_${CLUSTER}.${PROCESS}.log
75
ls -l * >> job_output_${CLUSTER}.${PROCESS}.log
76

    
77
echo "" >> job_output_${CLUSTER}.${PROCESS}.log
78

    
79
#
80
echo ""
81
echo "Before running the job ... " >> job_output_${CLUSTER}.${PROCESS}.log
82
#
83
echo "Running ..." >> job_output_${CLUSTER}.${PROCESS}.log
84
./$PROGRAM $DATA >> job_output_${CLUSTER}.${PROCESS}.log
85
if [ $? -ne 0 ]; then
86
	echo "Problem with running the $PROGRAM ? " $? >> job_output_${CLUSTER}.${PROCESS}.log
87
fi
88

    
89
# Check what happened.
90
echo "After running the job ... " >> job_output_${CLUSTER}.${PROCESS}.log
91
ls -l * >> job_output_${CLUSTER}.${PROCESS}.log
92

    
93
echo "group = $GROUP"
94
#id
95

    
96
# If GRID_USER is not set for some reason, try to get it from the proxy
97
if [ -z ${GRID_USER} ]; then
98
GRID_USER=`basename $X509_USER_PROXY | cut -d "_" -f 2`
99
fi
100

    
101
echo "GRID_USER = `echo $GRID_USER`"
102

    
103
# let's try an ldd on ifdh
104

    
105
ldd `which ifdh`
106

    
107
sleep $[ ( $RANDOM % 10 )  + 1 ]m
108

    
109
umask 002
110

    
111
if [ -z "$SCRATCH_DIR" ]; then
112
    echo "Invalid scratch directory, not copying back"
113
    echo "I am going to dump the log file to the main job stdout in this case."
114
    cat job_output_${CLUSTER}.${PROCESS}.log
115
else
116

    
117
# Very useful for debugging problems with copies
118
export IFDH_DEBUG=1
119

    
120
    # first do lfdh ls to check if directory exists
121
    ifdh ls ${SCRATCH_DIR}/$GRID_USER
122
    # A non-zero exit value probably means it doesn't exist yet, or does not have group write permission, 
123
    # so send a useful message saying that is probably the issue
124
    if [ $? -ne 0 && -z "$IFDH_OPTION" ] ; then	
125
	echo "Unable to read ${SCRATCH_DIR}/$GRID_USER. Make sure that you have created this directory and given it group write permission (chmod g+w ${SCRATCH_DIR}/$GRID_USER)."
126
	exit 74
127
    else
128
        # directory already exists, so let's copy
129
	ifdh cp -D $IFDH_OPTION job_output_${CLUSTER}.${PROCESS}.log ${SCRATCH_DIR}/${GRID_USER}/logs
130
	if [ $? -ne 0 ]; then
131
	    echo "Error $? when copying to dCache scratch area!"
132
	    echo "If you created ${SCRATCH_DIR}/${GRID_USER} yourself,"
133
	    echo " make sure that it has group write permission."
134
	    exit 73
135
	fi
136

    
137
	ifdh cp -D $IFDH_OPTION roots/* ${SCRATCH_DIR}/${GRID_USER}/roots
138

    
139
	ifdh cp -D $IFDH_OPTION csvs/* ${SCRATCH_DIR}/${GRID_USER}/csvs
140

    
141
	if [ -f "massBackground.root" ]; then
142
	   mv massBackground.root massBackground_${CLUSTER}.${PROCESS}.root
143
	   ifdh cp -D massBackground_${CLUSTER}.${PROCESS}.root ${SCRATCH_DIR}/${GRID_USER}/roots_out
144
        fi
145

    
146
    fi
147
fi
148

    
149
echo "End `date`"
150

    
151
exit 0