| 57 | Options: |
| 58 | -h, --help show this help message and exit |
| 59 | --split=SPLIT Number of sub-jobs to which a job is split |
| 60 | --nFilesPerJob=NFILESPERJOB |
| 61 | Number of files on which each sub-job runs |
| 62 | --nEventsPerJob=NEVENTSPERJOB |
| 63 | Number of events on which each sub-job runs |
| 64 | --nEventsPerFile=NEVENTSPERFILE |
| 65 | Number of events per file |
| 66 | --site=SITE Site name where jobs are sent |
| 67 | (default:ANALY_BNL_ATLAS_1 |
| 68 | --inDS=INDS Name of an input dataset |
| 69 | --minDS=MINDS Dataset name for minimum bias stream |
| 70 | --nMin=NMIN Number of minimum bias files per one signal file |
| 71 | --cavDS=CAVDS Dataset name for cavern stream |
| 72 | --nCav=NCAV Number of cavern files per one signal file |
| 73 | --libDS=LIBDS Name of a library dataset |
| 74 | --beamHaloADS=BEAMHALOADS |
| 75 | Dataset name for beam halo A-side |
| 76 | --beamHaloCDS=BEAMHALOCDS |
| 77 | Dataset name for beam halo C-side |
| 78 | --nBeamHaloA=NBEAMHALOA |
| 79 | Number of beam halo files for A-side per sub job |
| 80 | --nBeamHaloC=NBEAMHALOC |
| 81 | Number of beam halo files for C-side per sub job |
| 82 | --beamGasHDS=BEAMGASHDS |
| 83 | Dataset name for beam gas Hydrogen |
| 84 | --beamGasCDS=BEAMGASCDS |
| 85 | Dataset name for beam gas Carbon |
| 86 | --beamGasODS=BEAMGASODS |
| 87 | Dataset name for beam gas Oxygen |
| 88 | --nBeamGasH=NBEAMGASH |
| 89 | Number of beam gas files for Hydrogen per sub job |
| 90 | --nBeamGasC=NBEAMGASC |
| 91 | Number of beam gas files for Carbon per sub job |
| 92 | --nBeamGasO=NBEAMGASO |
| 93 | Number of beam gas files for Oxygen per sub job |
| 94 | --outDS=OUTDS Name of an output dataset. OUTDS will contain all |
| 95 | output files |
| 96 | --destSE=DESTSE Destination strorage element. All outputs go to DESTSE |
| 97 | (default :%BNL_ATLAS_2) |
| 98 | --nFiles=NFILES, --nfiles=NFILES |
| 99 | Use an limited number of files in the input dataset |
| 100 | --nSkipFiles=NSKIPFILES |
| 101 | Skip N files in the input dataset |
| 102 | -v Verbose |
| 103 | -l, --long Send job to a long queue |
| 104 | --blong Send build job to a long queue |
| 105 | --cloud=CLOUD cloud where jobs are submitted (default:US) |
| 106 | --noBuild Skip buildJob |
| 107 | --individualOutDS Create individual output dataset for each data-type. |
| 108 | By default, all output files are added to one output |
| 109 | dataset |
| 110 | --noRandom Enter random seeds manually |
| 111 | --memory=MEMORY Required memory size |
| 112 | --official Produce official dataset |
| 113 | --extFile=EXTFILE pathena exports files with some special extensions |
| 114 | (.C, .dat, .py .xml) in the current directory. If you |
| 115 | want to add other files, specify their names, e.g., |
| 116 | data1,root,data2.doc |
| 117 | --extOutFile=EXTOUTFILE |
| 118 | define extra output files, e.g., |
| 119 | output1.txt,output2.dat |
| 120 | --supStream=SUPSTREAM |
| 121 | suppress some output streams. e.g., ESD,TAG |
| 122 | --noSubmit Don't submit jobs |
| 123 | --generalInput Read input files with general format except |
| 124 | POOL,ROOT,ByteStream |
| 125 | --tmpDir=TMPDIR Temporary directory in which an archive file is |
| 126 | created |
| 127 | --shipInput Ship input files to remote WNs |
| 128 | --noLock Don't create a lock for local database access |
| 129 | --fileList=FILELIST List of files in the input dataset to be run |
| 130 | --myproxy=MYPROXY Name of the myproxy server |
| 131 | --dbRelease=DBRELEASE |
| 132 | DBRelease or CDRelease (DatasetName:FileName). e.g., d |
| 133 | do.000001.Atlas.Ideal.DBRelease.v050101:DBRelease-5.1. |
| 134 | 1.tar.gz |
| 135 | --addPoolFC=ADDPOOLFC |
| 136 | file names to be inserted into PoolFileCatalog.xml |
| 137 | except input files. e.g., MyCalib1.root,MyGeom2.root |
| 138 | --skipScan Skip LRC/LFC lookup at job submission |
| 139 | --inputFileList=INPUTFILELIST |
| 140 | name of file which contains a list of files to be run |
| 141 | in the input dataset |
| 142 | --removeFileList=REMOVEFILELIST |
| 143 | name of file which contains a list of files to be |
| 144 | removed from the input dataset |
| 145 | --corCheck Enable a checker to skip corrupted files |
| 146 | --prestage EXPERIMENTAL : Enable prestager. Make sure that you |
| 147 | are authorized |
| 148 | --novoms don't use VOMS extensions |
| 149 | --useNextEvent Set this option if your jobO uses theApp.nextEvent() |
| 150 | e.g. for G4 |
| 151 | --ara use Athena ROOT Access |
| 152 | --ares use Athena ROOT Access + PyAthena, i.e., use athena.py |
| 153 | instead of python on WNs |
| 154 | --araOutFile=ARAOUTFILE |
| 155 | define output files for ARA, e.g., |
| 156 | output1.root,output2.root |
| 157 | --trf=TRF run transformation, e.g. --trf "csc_atlfast_trf.py %IN |
| 158 | %OUT.AOD.root %OUT.ntuple.root -1 0" |
| 159 | --spaceToken=SPACETOKEN |
| 160 | spacetoken for outputs. e.g., ATLASLOCALGROUPDISK |
| 161 | --notSkipMissing If input files are not read from SE, they will be |
| 162 | skipped by default. This option disables the |
| 163 | functionality |
| 164 | --burstSubmit=BURSTSUBMIT |
| 165 | Please don't use this option. Only for site validation |
| 166 | by experts |
| 167 | --devSrv Please don't use this option. Only for developers to |
| 168 | use the dev panda server |
| 169 | --useAIDA use AIDA |
| 170 | --inputType=INPUTTYPE |
| 171 | File type in input dataset which contains multiple |
| 172 | file types |
| 173 | --mcData=MCDATA Create a symlink with linkName to .dat which is |
| 174 | contained in input file |
| 175 | --pfnList=PFNLIST Name of file which contains a list of input PFNs. |
| 176 | Those files can be un-registered in DDM |
| 177 | --useExperimental use experimental features |
| 178 | -c COMMAND One-liner, runs before any jobOs |
| 179 | -p BOOTSTRAP location of bootstrap file |
| 180 | |
| 181 | }}} |
| 182 | ' |