diff --git a/Full/run b/Full/run index 62c1f4ba9c56e6e09e24ca42582f9fd8022640df..a0bb28b98d9c39ce6a7881934b5089cffdc7a88b 100755 --- a/Full/run +++ b/Full/run @@ -1,7 +1,7 @@ #!/bin/zsh -nevents=$1 -id=$2 +nevents=1000 +id=$1 cd $id @@ -13,10 +13,15 @@ py=${cfi}_GEN_SIM.py ls $py -echo "process.RandomNumberGeneratorService.generator.initialSeed = cms.untracked.uint32($id)" >> $py +echo "process.RandomNumberGeneratorService.generator.initialSeed = cms.untracked.uint32($((id+1)))" >> $py cmsRun $py cmsDriver.py step2 --conditions auto:run2_mc -s DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@relval2016 --datatier GEN-SIM-DIGI-RAW-HLTDEBUG -n $nevents --era Run2_2016 --eventcontent FEVTDEBUGHLT --filein file:step1.root --fileout file:step2.root #> step2_TTbar_13+TTbar_13+DIGIUP15+RECOUP15+HARVESTUP15+ALCATTUP15.log 2>&1 + + +cmsDriver.py step2 --conditions auto:run2_mc -s DIGI:pdigi_valid,L1,DIGI2RAW,HLT:@relval2016 --datatier GEN-SIM-DIGI-RAW-HLTDEBUG -n 1000 --era Run2_2016 --eventcontent FEVTDEBUGHLT --filein file:step1.root --fileout file:step2.root > step2_TTbar_13+TTbar_13+DIGIUP15+RECOUP15+HARVESTUP15+ALCATTUP15.log 2>&1 + + cmsDriver.py step3 --runUnscheduled --conditions auto:run2_mc -s RAW2DIGI,L1Reco,RECO,RECOSIM,EI,PAT,VALIDATION:@standardValidation+@miniAODValidation,DQM:@standardDQM+@ExtraHLT+@miniAODDQM --datatier GEN-SIM-RECO,AODSIM,MINIAODSIM,DQMIO -n $nevents --era Run2_2016 --eventcontent RECOSIM,AODSIM,MINIAODSIM,DQM --filein file:step2.root --fileout file:step3.root #> step3_TTbar_13+TTbar_13+DIGIUP15+RECOUP15+HARVESTUP15+ALCATTUP15.log 2>&1 diff --git a/README.md b/README.md index 90fea188c72944b77eb1f9cc070a8458b554af8b..846636e6917c867062ad2f17a09446de1b718f3e 100644 --- a/README.md +++ b/README.md @@ -17,9 +17,17 @@ cmsrel CMSSW_10_6_22 Finally clone this repository in `$CMSSW_BASE/src`. +### In case of change of CMSSW version + +The version of CMSSW may change in the future. +To track down the releases for the local architecture, just enter: +``` +ls -1 -d /cvmfs/cms.cern.ch/*/cms/cmssw/CMSSW_10_6_22/src +``` + ## Execution -Load the environement (necessary each time you open a new shell): +Start a new shell (to avoid possible conflicts), and load the environement: ``` source init ``` @@ -46,7 +54,8 @@ Just do: ./parallel ``` No option is necessary. -You can change the number of events +You can change the number of events in the script itself. +Beware that each time you run this command, the former root files are removed. This approach can be useful to ensure that different seed are used for each job (the second option of `run`, which we previously ignored). Always check the occupancy of the local machine with `htop`, and don't go for this option if too many people are on this machine. @@ -58,6 +67,9 @@ Similar, just another script: ./submit ``` This should be the privileged approach for large-scale production. +Here too, remember that each time you rerun the command, you actually remove the former run. + +In case you want to extend the statistics of some existing sample, just clone this repo and run it from scratch. #### Troubleshooting @@ -70,8 +82,16 @@ If your job is on hold and you want to know more: ``` condor_q -global -better-analyze JOBID ``` +(You get the job id when running `condor_q`.) + +If a or several jobs was or were put on hold, and if you could fix the issue, then you can release the job as follows: +``` +condor_release -all +``` If you want to kill all your jobs: ``` condor_rm -all ``` + +Otherwise, consult the [official documentation](https://htcondor.readthedocs.io/en/latest/man-pages/index.html). diff --git a/parallel b/parallel index 85fd6d7fe0a605fda684b5b2235cb9a5ae6e75e3..160f63472583f6edfa060c5ac247db802a58e895 100755 --- a/parallel +++ b/parallel @@ -4,8 +4,9 @@ export NJOBS=2 for i in {1..$NJOBS} do - rm -rf $i - mkdir $i - ./run $i & + j=$((i-1)) + rm -rf $j + mkdir $j + ./run $j & done wait diff --git a/submit b/submit index 20151dbbfbc8a3b49695b008564cd50e4d3648cf..338c81f5d128e0991365fee264e26f2891122199 100755 --- a/submit +++ b/submit @@ -1,16 +1,17 @@ #!/bin/zsh -eval `/usr/bin/modulecmd zsh use -a /afs/desy.de/group/cms/modulefiles/` -eval `/usr/bin/modulecmd zsh load cmssw` -eval `scramv1 runtime -sh` +#eval `/usr/bin/modulecmd zsh use -a /afs/desy.de/group/cms/modulefiles/` +#eval `/usr/bin/modulecmd zsh load cmssw` +#eval `scramv1 runtime -sh` export LD_LIBRARY_PATH_STORED=$LD_LIBRARY_PATH export NJOBS=1000 for i in {1..$NJOBS} do - rm -rf $i - mkdir $i + j=$((i-1)) + rm -rf $j + mkdir $j done -condor_submit job +condor_submit -batch-name ${PWD##*/} job