#!/bin/bash # # This a resource file for the scripts, in which paths, variables # and setups are defined # # This setup file is for the machine data in La Palma # it is also used for the other machines which share the home of data # # setup to use ftools export HEADAS=/opt/heasoft-6.11/x86_64-unknown-linux-gnu-libc2.13-0/ export HEADASPROMPT=/dev/null # setup to use ROOT root=/opt/root_v5.34.36/bin/thisroot.sh source $root # software versions export factpath=/home/fact/operation # path where programs of FACT++ are linked export mars=/home/fact/SW.automatic.processing/Mars.svn.2014.05.26 # site processingsite=lp sitekey=3 storagesite=wue # logging and setup logpath=/home/fact/logs.automatic.processing/autologs lockpath=/home/fact/logs.automatic.processing/locks #setuppath=/magic/simulated/setup # data paths anapath=/data1/analysis # on newdata #anapath=/newdaq/analysis_bu # temporarily to newdaq drstimepath=$anapath/drs_time_calib auxdata=/loc_data/aux auxdata_for_sed=$(printf "%s\n" "$auxdata" | sed 's/[][\.*^$(){}?+|/]/\\&/g') rawdata=/loc_data/raw rawdata_for_sed=$(printf "%s\n" "$rawdata" | sed 's/[][\.*^$(){}?+|/]/\\&/g') ziprawdata=/loc_data/zipraw ziprawdata_for_sed=$(printf "%s\n" "$ziprawdata" | sed 's/[][\.*^$(){}?+|/]/\\&/g') flarealertspath="/home/fact/flare.alerts" # get paths for mars, macros and scripts # in case the variable is not yet set macrospath=$mars/datacenter/macros scriptspath=$mars/datacenter/scripts # rcfiles # dependencies of steps steps=$mars/resources/steps_fact.rc # files with db information sqlrc=$mars/sql.rc sqlpw=/home/fact/.mysql.pw #addresses to which information about full disks is sent deladrs="shift@fact-project.org" #addresses to which the errors are sent erradrs="dorner@astro.uni-wuerzburg.de" #addresses to which the changes are sent adrs="dorner@astro.uni-wuerzburg.de" # # setup QLA # # setup for Step1.sh # disklimitdaq: transfer and qla don't start when less than this is available on /daq # with more than 90% disk usage the data taking is affected disklimitnewdata=90000000 disklimitnewdata=5000000 disklimitnewdata2=500000000 # bandwidth limit for transfer from newdaq to daq bwlimit=90000 # number of RunCallisto.sh started on daq numruncallistos=20 #numruncallistos=8 # setup RunCallisto.sh # number of callisto.C (else RunCallisto.sh is doing sleep) running on daq numcallistos=10 #numcallistos=5 # seconds waiting before checking again number of callistos callistowait=60 # setup for ZipRawData.sh numprocpigz=3 # number of processes used for pigz limitpigz="90M" # transfer from daq to data # setup for FillNumEvts.sh and Step3.sh resulttable1="AnalysisResultsRunLP" resulttable2="AnalysisResultsNightLP" firstnight=20121213 # setup for warning/errors # print warnings/errors (eg for missing files) only after a delay of few days to account for transfer transferdelay=3 #days checknight=`date +%Y%m%d --date="-${transferdelay}day"`