Project

General

Profile

osg-example.sh

Yen-Chu Chen, 01/24/2018 03:32 PM

 
1
#!/bin/bash
2
set -x
3
echo Start  `date`
4
echo Site:${GLIDEIN_ResourceName}
5
echo "the worker node is " `hostname` "OS: "  `uname -a`
6
echo "You are running as user `whoami`"
7

    
8
#Always cd to the scratch area!
9

    
10
cd $_CONDOR_SCRATCH_DIR
11

    
12
IFDH_OPTION=""
13

    
14
# set group based on the EXPERIMENT evnironment variable set by jobsub
15
GROUP=$EXPERIMENT
16

    
17
if [ -z $GROUP ]; then
18

    
19
# try to figure out what group the user is in
20
GROUP=`id -gn`
21
fi
22

    
23

    
24
case $GROUP in
25

    
26
e938)
27
SCRATCH_DIR=/pnfs/minerva/scratch/users
28
;;
29
minerva)
30
SCRATCH_DIR=/pnfs/minerva/scratch/users
31
;;
32
e875)
33
#need to check this
34
SCRATCH_DIR=/pnfs/minos/scratch/users
35
;;
36
minos)
37
#need to check this                                                                                                                                                                                                                           
38
SCRATCH_DIR=/pnfs/minos/scratch/users
39
;;
40
mars)
41
SCRATCH_DIR=""
42
;;
43
lbnemars)
44
SCRATCH_DIR="/lbne/data/lbnemars/users/"
45
;;
46
marslbne)
47
SCRATCH_DIR="/lbne/data/marslbne/users/"
48
;;
49
marsmu2e)
50
SCRATCH_DIR=""
51
;;
52
marsgm2)
53
SCRATCH_DIR=""
54
;;
55
marsaccel)
56
SCRATCH_DIR=""
57
;;
58
larrand)
59
#pnfs/scene? probably not....
60
SCRATCH_DIR=""
61
;;
62
nova)
63
SCRATCH_DIR=/pnfs/nova/scratch/users
64
;;
65
t-962)
66
SCRATCH_DIR="/argoneut/data/users"
67
;;
68
argoneut)
69
SCRATCH_DIR="/argoneut/data/users"
70
;;
71
mu2e)
72
SCRATCH_DIR=/pnfs/mu2e/scratch/users
73
;;
74
microboone)
75
SCRATCH_DIR=/pnfs/uboone/scratch/users
76
;;
77
uboone)
78
SCRATCH_DIR=/pnfs/uboone/scratch/users
79
;;
80
lbne)
81
SCRATCH_DIR=/pnfs/lbne/scratch/users
82
;;
83
seaquest|e906)
84
SCRATCH_DIR="/pnfs/e906/scratch/users"
85
#IFDH_OPTION="--force=cpn"
86
;;
87
coupp)
88
SCRATCH_DIR=""
89
;;
90
gm2)
91
# g-2 does not allow the gm2ana user to write to pnfs so we have to use blueArc for now
92
#SCRATCH_DIR=/gm2/data/users
93
SCRATCH_DIR=/pnfs/GM2/scratch/users
94
;; 
95
t-1034)
96
# lariat... no pnfs yet
97
SCRATCH_DIR=/pnfs/lariat/scratch/users
98
;;
99
lariat)
100
SCRATCH_DIR=/pnfs/lariat/scratch/users
101
;;
102
darkside)
103
SCRATCH_DIR="/pnfs/darkside/scratch/users"
104
;;
105
lar1nd)
106
SCRATCH_DIR="/pnfs/lar1nd/scratch/users"
107
;;
108
lsst)
109
SCRATCH_DIR="/pnfs/lsst/scratch/users"
110
;;
111
annie)
112
SCRATCH_DIR=""
113
;;
114
numix)
115
SCRATCH_DIR=""
116
;;
117
fermilab)
118
SCRATCH_DIR="/pnfs/fermilab/volatile"
119
#SCRATCH_DIR="/grid/data"
120
export CPN_LOCK_GROUP=gpcf
121
;;
122
esac
123

    
124
voms-proxy-info --all
125

    
126
### Force use of SLF6 versions for systems with 3.x kernels
127
case `uname -r` in
128
    3.*) export UPS_OVERRIDE="-h Linux64bit+2.6-2.12";;
129
esac
130

    
131
#source /cvmfs/oasis.opensciencegrid.org/fermilab/products/common/etc/setup
132
#source /cvmfs/oasis.opensciencegrid.org/fermilab/products/larsoft/setup
133
#setup ifdhc v1_5_1a -z /cvmfs/oasis.opensciencegrid.org/fermilab/products/common/db
134

    
135
source /cvmfs/fermilab.opensciencegrid.org/products/common/etc/setups
136
source /cvmfs/fermilab.opensciencegrid.org/products/larsoft/setup
137
#setup ifdhc v1_8_4 -z /cvmfs/fermilab.opensciencegrid.org/products/common/db
138
setup ifdhc v2_2_3 -z /cvmfs/fermilab.opensciencegrid.org/products/common/db
139

    
140
export EXPERIMENT="seaquest"
141

    
142

    
143
echo "Here is the your environment in this job: " > job_output_${CLUSTER}.${PROCESS}.log
144
env >> job_output_${CLUSTER}.${PROCESS}.log
145

    
146
echo "group = $GROUP"
147
#id
148

    
149
# If GRID_USER is not set for some reason, try to get it from the proxy
150
if [ -z ${GRID_USER} ]; then
151
GRID_USER=`basename $X509_USER_PROXY | cut -d "_" -f 2`
152
fi
153

    
154
echo "GRID_USER = `echo $GRID_USER`"
155

    
156
# let's try an ldd on ifdh
157

    
158
ldd `which ifdh`
159

    
160
sleep $[ ( $RANDOM % 10 )  + 1 ]m
161

    
162
umask 002
163

    
164
if [ -z "$SCRATCH_DIR" ]; then
165
    echo "Invalid scratch directory, not copying back"
166
    echo "I am going to dump the log file to the main job stdout in this case."
167
    cat job_output_${CLUSTER}.${PROCESS}.log
168
else
169

    
170
# Very useful for debugging problems with copies
171
export IFDH_DEBUG=1
172

    
173
    # first do lfdh ls to check if directory exists
174
    ifdh ls ${SCRATCH_DIR}/$GRID_USER
175
    # A non-zero exit value probably means it doesn't exist yet, or does not have group write permission, 
176
    # so send a useful message saying that is probably the issue
177
    if [ $? -ne 0 && -z "$IFDH_OPTION" ] ; then	
178
	echo "Unable to read ${SCRATCH_DIR}/$GRID_USER. Make sure that you have created this directory and given it group write permission (chmod g+w ${SCRATCH_DIR}/$GRID_USER)."
179
	exit 74
180
    else
181
        # directory already exists, so let's copy
182
	ifdh cp -D $IFDH_OPTION job_output_${CLUSTER}.${PROCESS}.log ${SCRATCH_DIR}/${GRID_USER}
183
	if [ $? -ne 0 ]; then
184
	    echo "Error $? when copying to dCache scratch area!"
185
	    echo "If you created ${SCRATCH_DIR}/${GRID_USER} yourself,"
186
	    echo " make sure that it has group write permission."
187
	    exit 73
188
	fi
189
	ifdh cp -D $IFDH_OPTION rootlocation ${SCRATCH_DIR}/${GRID_USER}
190
	ifdh cp -D $IFDH_OPTION pythoninfo ${SCRATCH_DIR}/${GRID_USER}
191
	ifdh cp -D $IFDH_OPTION mysqldbinfo ${SCRATCH_DIR}/${GRID_USER}
192
    fi
193
fi
194

    
195
echo "End `date`"
196

    
197
exit 0