Project

General

Profile

osg-example.sh

Ken Herner's test script for the OSG - Markus Diefenthaler, 09/08/2015 12:27 PM

 
1
#!/bin/bash
2
set -x
3
echo Start  `date`
4
echo Site:${GLIDEIN_ResourceName}
5
echo "the worker node is " `hostname` "OS: "  `uname -a`
6
echo "You are running as user `whoami`"
7

    
8
#Always cd to the scratch area!
9

    
10
cd $_CONDOR_SCRATCH_DIR
11

    
12
IFDH_OPTION=""
13

    
14
# set group based on the EXPERIMENT evnironment variable set by jobsub
15
GROUP=$EXPERIMENT
16

    
17
if [ -z $GROUP ]; then
18

    
19
# try to figure out what group the user is in
20
GROUP=`id -gn`
21
fi
22

    
23

    
24
case $GROUP in
25

    
26
e938)
27
SCRATCH_DIR=/pnfs/minerva/scratch/users
28
;;
29
minerva)
30
SCRATCH_DIR=/pnfs/minerva/scratch/users
31
;;
32
e875)
33
#need to check this
34
SCRATCH_DIR=/pnfs/minos/scratch/users
35
;;
36
minos)
37
#need to check this                                                                                                                                                                                                                           
38
SCRATCH_DIR=/pnfs/minos/scratch/users
39
;;
40
mars)
41
SCRATCH_DIR=""
42
;;
43
lbnemars)
44
SCRATCH_DIR="/lbne/data/lbnemars/users/"
45
;;
46
marslbne)
47
SCRATCH_DIR="/lbne/data/marslbne/users/"
48
;;
49
marsmu2e)
50
SCRATCH_DIR=""
51
;;
52
marsgm2)
53
SCRATCH_DIR=""
54
;;
55
marsaccel)
56
SCRATCH_DIR=""
57
;;
58
larrand)
59
#pnfs/scene? probably not....
60
SCRATCH_DIR=""
61
;;
62
nova)
63
SCRATCH_DIR=/pnfs/nova/scratch/users
64
;;
65
t-962)
66
SCRATCH_DIR="/argoneut/data/users"
67
;;
68
argoneut)
69
SCRATCH_DIR="/argoneut/data/users"
70
;;
71
mu2e)
72
SCRATCH_DIR=/pnfs/mu2e/scratch/users
73
;;
74
microboone)
75
SCRATCH_DIR=/pnfs/uboone/scratch/users
76
;;
77
uboone)
78
SCRATCH_DIR=/pnfs/uboone/scratch/users
79
;;
80
lbne)
81
SCRATCH_DIR=/pnfs/lbne/scratch/users
82
;;
83
seaquest|e906)
84
SCRATCH_DIR="/pnfs/e906/scratch/users"
85
#IFDH_OPTION="--force=cpn"
86
;;
87
coupp)
88
SCRATCH_DIR=""
89
;;
90
gm2)
91
# g-2 does not allow the gm2ana user to write to pnfs so we have to use blueArc for now
92
#SCRATCH_DIR=/gm2/data/users
93
SCRATCH_DIR=/pnfs/GM2/scratch/users
94
;; 
95
t-1034)
96
# lariat... no pnfs yet
97
SCRATCH_DIR=/pnfs/lariat/scratch/users
98
;;
99
lariat)
100
SCRATCH_DIR=/pnfs/lariat/scratch/users
101
;;
102
darkside)
103
SCRATCH_DIR="/pnfs/darkside/scratch/users"
104
;;
105
lar1nd)
106
SCRATCH_DIR="/pnfs/lar1nd/scratch/users"
107
;;
108
lsst)
109
SCRATCH_DIR="/pnfs/lsst/scratch/users"
110
;;
111
annie)
112
SCRATCH_DIR=""
113
;;
114
numix)
115
SCRATCH_DIR=""
116
;;
117
fermilab)
118
SCRATCH_DIR="/pnfs/fermilab/volatile"
119
#SCRATCH_DIR="/grid/data"
120
export CPN_LOCK_GROUP=gpcf
121
;;
122
esac
123

    
124
voms-proxy-info --all
125

    
126
### Force use of SLF6 versions for systems with 3.x kernels
127
case `uname -r` in
128
    3.*) export UPS_OVERRIDE="-h Linux64bit+2.6-2.12";;
129
esac
130

    
131
#source /cvmfs/oasis.opensciencegrid.org/fermilab/products/common/etc/setup
132
#source /cvmfs/oasis.opensciencegrid.org/fermilab/products/larsoft/setup
133
#setup ifdhc v1_5_1a -z /cvmfs/oasis.opensciencegrid.org/fermilab/products/common/db
134

    
135
source /cvmfs/fermilab.opensciencegrid.org/products/common/etc/setups
136
source /cvmfs/fermilab.opensciencegrid.org/products/larsoft/setup
137
setup ifdhc v1_8_4 -z /cvmfs/fermilab.opensciencegrid.org/products/common/db
138

    
139
echo "Here is the your environment in this job: " > job_output_${CLUSTER}.${PROCESS}.log
140
env >> job_output_${CLUSTER}.${PROCESS}.log
141

    
142
echo "group = $GROUP"
143

    
144
# If GRID_USER is not set for some reason, try to get it from the proxy
145
if [ -z ${GRID_USER} ]; then
146
GRID_USER=`basename $X509_USER_PROXY | cut -d "_" -f 2`
147
fi
148

    
149
echo "GRID_USER = `echo $GRID_USER`"
150

    
151
# let's try an ldd on ifdh
152

    
153
ldd `which ifdh`
154

    
155
sleep $[ ( $RANDOM % 10 )  + 1 ]m
156

    
157
umask 002
158

    
159
if [ -z "$SCRATCH_DIR" ]; then
160
    echo "Invalid scratch directory, not copying back"
161
    echo "I am going to dump the log file to the main job stdout in this case."
162
    cat job_output_${CLUSTER}.${PROCESS}.log
163
else
164

    
165
# Very useful for debugging problems with copies
166
export IFDH_DEBUG=1
167

    
168
    # first do lfdh ls to check if directory exists
169
    ifdh ls ${SCRATCH_DIR}/$GRID_USER
170
    # A non-zero exit value probably means it doesn't exist yet, or does not have group write permission, 
171
    # so send a useful message saying that is probably the issue
172
    if [ $? -ne 0 && -z "$IFDH_OPTION" ] ; then	
173
	echo "Unable to read ${SCRATCH_DIR}/$GRID_USER. Make sure that you have created this directory and given it group write permission (chmod g+w ${SCRATCH_DIR}/$GRID_USER)."
174
	exit 74
175
    else
176
        # directory already exists, so let's copy
177
	ifdh cp -D $IFDH_OPTION job_output_${CLUSTER}.${PROCESS}.log ${SCRATCH_DIR}/${GRID_USER}
178
	if [ $? -ne 0 ]; then
179
	    echo "Error $? when copying to dCache scratch area!"
180
	    echo "If you created ${SCRATCH_DIR}/${GRID_USER} yourself,"
181
	    echo " make sure that it has group write permission."
182
	    exit 73
183
	fi
184
    fi
185
fi
186

    
187
echo "End `date`"
188

    
189
exit 0