source: fact/tools/pyscripts/pyfact/pyfact.py@ 13512

Last change on this file since 13512 was 13512, checked in by neise, 12 years ago
using now class FactFits instead of class fits
  • Property svn:executable set to *
File size: 30.0 KB
Line 
1#!/usr/bin/python -tt
2#
3# Werner Lustermann, Dominik Neise
4# ETH Zurich, TU Dortmund
5#
6from ctypes import *
7import numpy as np
8import pprint # for SlowData
9from scipy import signal
10
11# get the ROOT stuff + my shared libs
12from ROOT import gSystem
13# factfits_h.so is made from factfits.h and is used to access the data
14# make sure the location of factfits_h.so is in LD_LIBRARY_PATH.
15# having it in PYTHONPATH is *not* sufficient
16gSystem.Load('factfits_h.so')
17from ROOT import *
18
19class RawDataFeeder( object ):
20 """ Wrapper class for RawData class
21 capable of iterating over multiple RawData Files
22 """
23
24 def __init__(self, filelist):
25 """ *filelist* list of files to iterate over
26 the list should contain tuples, or sublists of two filenames
27 the first should be a data file (\*.fits.gz)
28 the second should be an amplitude calibration file(\*.drs.fits.gz)
29 """
30 # sanity check for input
31 if type(filelist) != type(list()):
32 raise TypeError('filelist should be a list')
33 for entry in filelist:
34 if len(entry) != 2:
35 raise TypeError('the entries of filelist should have length == 2')
36 for path in entry:
37 if type(path) != type(str()):
38 raise TypeError('the entries of filelist should be path, i.e. of type str()')
39 #todo check if 'path' is a valid path
40 # else: throw an Exception, or Warning?
41
42 self.filelist = filelist
43 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
44 del filelist[0]
45
46 def __iter__(self):
47 return self
48
49 def next():
50 """ Method being called by the iterator.
51 Since the RawData Objects are simply looped over, the event_id from the
52 RawData object will not be unique.
53 Each RawData obejct will start with event_id = 1 as usual.
54 """
55 try:
56 return self._current_RawData.next()
57 except StopIteration:
58 # current_RawData was completely processed
59 # delete it (I hope this calls the destructor of the fits file and/or closes it)
60 del self._current_RawData
61 # and remake it, if possible
62 if len(self.filelist) > 0:
63 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
64 del filelist[0]
65 else:
66 raise
67
68
69
70class RawData( object ):
71 """ raw data access and calibration
72
73 - open raw data file and drs calibration file
74 - performs amplitude calibration
75 - performs baseline substraction if wanted
76 - provides all data in an array:
77 row = number of pixel
78 col = length of region of interest
79
80 """
81
82
83 def __init__(self, data_file_name, calib_file_name,
84 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
85 baseline_file_name='',
86 return_dict = None,
87 do_calibration = True):
88 """ initialize object
89
90 open data file and calibration data file
91 get basic information about the data in data_file_name
92 allocate buffers for data access
93
94 data_file_name : fits or fits.gz file of the data including the path
95 calib_file_name : fits or fits.gz file containing DRS calibration data
96 baseline_file_name : npy file containing the baseline values
97 """
98 self.__module__='pyfact'
99 # manual implementation of default value, but I need to find out
100 # if the user of this class is aware of the new option
101 if return_dict == None:
102 print 'Warning: Rawdata.__init__() has a new option "return_dict"'
103 print 'the default value of this option is False, so nothing changes for you at the moment.'
104 print
105 print 'you probably want, to get a dictionary out of the next() method anyway'
106 print ' so please change your scripts and set this option to True, for the moment'
107 print 'e.g. like this: run = RawData(data_filename, calib_filename, return_dict = True)'
108 print "after a while, the default value, will turn to True .. so you don't have to give the option anymore"
109 print 'and some time later, the option will not be supported anymore'
110 return_dict = False
111 self.return_dict = return_dict
112
113 self.do_calibration = do_calibration
114
115 self.data_file_name = data_file_name
116 self.calib_file_name = calib_file_name
117 self.baseline_file_name = baseline_file_name
118
119 self.user_action_calib = user_action_calib
120
121 # baseline correction: True / False
122 if len(baseline_file_name) == 0:
123 self.correct_baseline = False
124 else:
125 self.correct_baseline = True
126
127 # access data file
128 try:
129 data_file = FactFits(self.data_file_name)
130 except IOError:
131 print 'problem accessing data file: ', data_file_name
132 raise # stop ! no data
133 #: data file (fits object)
134 self.data_file = data_file
135
136 # get basic information about the data file
137 #: region of interest (number of DRS slices read)
138 self.nroi = data_file.GetUInt('NROI')
139 #: number of pixels (should be 1440)
140 self.npix = data_file.GetUInt('NPIX')
141 #: number of events in the data run
142 self.nevents = data_file.GetNumRows()
143
144 # allocate the data memories
145 self.event_id = c_ulong()
146 self.trigger_type = c_ushort()
147 #: 1D array with raw data
148 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
149 #: slice where drs readout started
150 self.start_cells = np.zeros( self.npix, np.int16 )
151 #: time when the FAD was triggered, in some strange units...
152 self.board_times = np.zeros( 40, np.int32 )
153
154 # set the pointers to the data++
155 data_file.SetPtrAddress('EventNum', self.event_id)
156 data_file.SetPtrAddress('TriggerType', self.trigger_type)
157 data_file.SetPtrAddress('StartCellData', self.start_cells)
158 data_file.SetPtrAddress('Data', self.data)
159 data_file.SetPtrAddress('BoardTime', self.board_times)
160
161 # open the calibration file
162 try:
163 calib_file = FactFits(self.calib_file_name)
164 except IOError:
165 print 'problem accessing calibration file: ', calib_file_name
166 raise
167 #: drs calibration file
168 self.calib_file = calib_file
169
170 baseline_mean = calib_file.GetN('BaselineMean')
171 gain_mean = calib_file.GetN('GainMean')
172 trigger_offset_mean = calib_file.GetN('TriggerOffsetMean')
173
174 self.Nblm = baseline_mean / self.npix
175 self.Ngm = gain_mean / self.npix
176 self.Ntom = trigger_offset_mean / self.npix
177
178 self.blm = np.zeros(baseline_mean, np.float32).reshape(self.npix , self.Nblm)
179 self.gm = np.zeros(gain_mean, np.float32).reshape(self.npix , self.Ngm)
180 self.tom = np.zeros(trigger_offset_mean, np.float32).reshape(self.npix , self.Ntom)
181
182 calib_file.SetPtrAddress('BaselineMean', self.blm)
183 calib_file.SetPtrAddress('GainMean', self.gm)
184 calib_file.SetPtrAddress('TriggerOffsetMean', self.tom)
185 calib_file.GetRow(0)
186
187 # make calibration constants double, so we never need to roll
188 self.blm = np.hstack((self.blm, self.blm))
189 self.gm = np.hstack((self.gm, self.gm))
190 self.tom = np.hstack((self.tom, self.tom))
191
192 self.v_bsl = np.zeros(self.npix) # array of baseline values (all ZERO)
193
194 def __iter__(self):
195 """ iterator """
196 return self
197
198 def __add__(self, jump_over):
199 self.data_file.GetRow(jump_over)
200 return self
201
202 def next(self):
203 """ used by __iter__ """
204 if self.data_file.GetNextRow() == False:
205 raise StopIteration
206 else:
207 if self.do_calibration == True:
208 self.calibrate_drs_amplitude()
209
210 #print 'nevents = ', self.nevents, 'event_id = ', self.event_id.value
211 if self.return_dict:
212 return self.__dict__
213 else:
214 return self.acal_data, self.start_cells, self.trigger_type.value
215
216 def next_event(self):
217 """ load the next event from disk and calibrate it
218 """
219 self.data_file.GetNextRow()
220 self.calibrate_drs_amplitude()
221
222 def calibrate_drs_amplitude(self):
223 """ perform the drs amplitude calibration of the event data
224
225 """
226 # shortcuts
227 blm = self.blm
228 gm = self.gm
229 tom = self.tom
230
231 to_mV = 2000./4096.
232 #: 2D array with amplitude calibrated dat in mV
233 acal_data = self.data * to_mV # convert ADC counts to mV
234
235
236 for pixel in range( self.npix ):
237 #shortcuts
238 sc = self.start_cells[pixel]
239 roi = self.nroi
240 # rotate the pixel baseline mean to the Data startCell
241 acal_data[pixel,:] -= blm[pixel,sc:sc+roi]
242 # the 'trigger offset mean' does not need to be rolled
243 # on the contrary, it seems there is an offset in the DRS data,
244 # which is related to its distance to the startCell, not to its
245 # distance to the beginning of the physical pipeline in the DRS chip
246 acal_data[pixel,:] -= tom[pixel,0:roi]
247 # rotate the pixel gain mean to the Data startCell
248 acal_data[pixel,:] /= gm[pixel,sc:sc+roi]
249
250
251 self.acal_data = acal_data * 1907.35
252
253 self.user_action_calib( self.acal_data,
254 np.reshape(self.data, (self.npix, self.nroi) ), blm, tom, gm, self.start_cells, self.nroi)
255
256
257 def baseline_read_values(self, file, bsl_hist='bsl_sum/hplt_mean'):
258 """
259
260 open ROOT file with baseline histogram and read baseline values
261 file name of the root file
262 bsl_hist path to the histogram containing the basline values
263
264 """
265
266 try:
267 f = TFile(file)
268 except:
269 print 'Baseline data file could not be read: ', file
270 return
271
272 h = f.Get(bsl_hist)
273
274 for i in range(self.npix):
275 self.v_bsl[i] = h.GetBinContent(i+1)
276
277 f.Close()
278
279 def baseline_correct(self):
280 """ subtract baseline from the data
281
282 """
283
284 for pixel in range(self.npix):
285 self.acal_data[pixel,:] -= self.v_bsl[pixel]
286
287 def info(self):
288 """ print run information
289
290 """
291
292 print 'data file: ', data_file_name
293 print 'calib file: ', calib_file_name
294 print 'calibration file'
295 print 'N baseline_mean: ', self.Nblm
296 print 'N gain mean: ', self.Ngm
297 print 'N TriggeroffsetMean: ', self.Ntom
298
299# -----------------------------------------------------------------------------
300class RawDataFake( object ):
301 """ raw data FAKE access similar to real RawData access
302 """
303
304
305 def __init__(self, data_file_name, calib_file_name,
306 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
307 baseline_file_name=''):
308 self.__module__='pyfact'
309
310 self.nroi = 300
311 self.npix = 9
312 self.nevents = 1000
313
314 self.simulator = None
315
316 self.time = np.ones(1024) * 0.5
317
318
319 self.event_id = c_ulong(0)
320 self.trigger_type = c_ushort(4)
321 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
322 self.start_cells = np.zeros( self.npix, np.int16 )
323 self.board_times = np.zeros( 40, np.int32 )
324 def __iter__(self):
325 """ iterator """
326 return self
327
328 def next(self):
329 """ used by __iter__ """
330 self.event_id = c_ulong(self.event_id.value + 1)
331 self.board_times = self.board_times + 42
332
333 if self.event_id.value >= self.nevents:
334 raise StopIteration
335 else:
336 self._make_event_data()
337
338 return self.__dict__
339
340 def _make_event_data(self):
341 sample_times = self.time.cumsum() - time[0]
342
343 # random start cell
344 self.start_cells = np.ones( self.npix, np.int16 ) * np.random.randint(0,1024)
345
346 starttime = self.start_cells[0]
347
348 signal = self._std_sinus_simu(sample_times, starttime)
349
350 data = np.vstack( (signal,signal) )
351 for i in range(8):
352 data = np.vstack( (data,signal) )
353
354 self.data = data
355
356 def _std_sinus_simu(self, times, starttime):
357 period = 10 # in ns
358
359 # give a jitter on starttime
360 starttime = np.random.normal(startime, 0.05)
361
362 phase = 0.0
363 signal = 10 * np.sin(times * 2*np.pi/period + starttime + phase)
364
365 # add some noise
366 noise = np.random.normal(0.0, 0.5, signal.shape)
367 signal += noise
368 return signal
369
370 def info(self):
371 """ print run information
372
373 """
374
375 print 'data file: ', data_file_name
376 print 'calib file: ', calib_file_name
377 print 'calibration file'
378 print 'N baseline_mean: ', self.Nblm
379 print 'N gain mean: ', self.Ngm
380 print 'N TriggeroffsetMean: ', self.Ntom
381
382# -----------------------------------------------------------------------------
383
384class SlowData( FactFits ):
385 """ -Fact SlowData File-
386 A Python wrapper for the fits-class implemented in pyfits.h
387 provides easy access to the fits file meta data.
388 * dictionary of file metadata - self.meta
389 * dict of table metadata - self.columns
390 * variable table column access, thus possibly increased speed while looping
391 """
392 def __init__(self, path):
393 """ creates meta and columns dictionaries
394 """
395 self.path = path
396 try:
397 FactFits.__init__(self,path)
398 except IOError:
399 print 'problem accessing data file: ', data_file_name
400 raise # stop ! no data
401
402 self.meta = self._make_meta_dict()
403 self.columns = self._make_columns_dict()
404
405 self.treat_meta_dict()
406
407
408 # list of columns, which are already registered
409 # see method register()
410 self._registered_cols = []
411 # dict of column data, this is used, in order to be able to remove
412 # the ctypes of
413 self._table_cols = {}
414
415 # I need to count the rows, since the normal loop mechanism seems not to work.
416 self._current_row = 0
417
418 self.stacked_cols = {}
419
420 def _make_meta_dict(self):
421 """ This method retrieves meta information about the fits file and
422 stores this information in a dict
423 return: dict
424 key: string - all capital letters
425 value: tuple( numerical value, string comment)
426 """
427 # intermediate variables for file metadata dict generation
428 keys=self.GetPy_KeyKeys()
429 values=self.GetPy_KeyValues()
430 comments=self.GetPy_KeyComments()
431 types=self.GetPy_KeyTypes()
432
433 if len(keys) != len(values):
434 raise TypeError('len(keys)',len(keys),' != len(values)', len(values))
435 if len(keys) != len(types):
436 raise TypeError('len(keys)',len(keys),' != len(types)', len(types))
437 if len(keys) != len(comments):
438 raise TypeError('len(keys)',len(keys),' != len(comments)', len(comments))
439
440 meta_dict = {}
441 for i in range(len(keys)):
442 type = types[i]
443 if type == 'I':
444 value = int(values[i])
445 elif type == 'F':
446 value = float(values[i])
447 elif type == 'B':
448 if values[i] == 'T':
449 value = True
450 elif values[i] == 'F':
451 value = False
452 else:
453 raise TypeError("meta-type is 'B', but meta-value is neither 'T' nor 'F'. meta-value:",values[i])
454 elif type == 'T':
455 value = values[i]
456 else:
457 raise TypeError("unknown meta-type: known meta types are: I,F,B and T. meta-type:",type)
458 meta_dict[keys[i]]=(value, comments[i])
459 return meta_dict
460
461
462 def _make_columns_dict(self):
463 """ This method retrieves information about the columns
464 stored inside the fits files internal binary table.
465 returns: dict
466 key: string column name -- all capital letters
467 values: tuple(
468 number of elements in table field - integer
469 size of element in bytes -- this is not really interesting for any user
470 might be ommited in future versions
471 type - a single character code -- should be translated into
472 a comrehensible word
473 unit - string like 'mV' or 'ADC count'
474 """
475 # intermediate variables for file table-metadata dict generation
476 keys=self.GetPy_ColumnKeys()
477 #offsets=self.GetPy_ColumnOffsets() #not needed on python level...
478 nums=self.GetPy_ColumnNums()
479 sizes=self.GetPy_ColumnSizes()
480 types=self.GetPy_ColumnTypes()
481 units=self.GetPy_ColumnUnits()
482
483 # zip the values
484 values = zip(nums,sizes,types,units)
485 # create the columns dictionary
486 columns = dict(zip(keys ,values))
487 return columns
488
489 def stack(self, on=True):
490 self.next()
491 for col in self._registered_cols:
492 if isinstance( self.dict[col], type(np.array('')) ):
493 self.stacked_cols[col] = self.dict[col]
494 else:
495# elif isinstance(self.dict[col], ctypes._SimpleCData):
496 self.stacked_cols[col] = np.array(self.dict[col])
497# else:
498# raise TypeError("I don't know how to stack "+col+". It is of type: "+str(type(self.dict[col])))
499
500 def register(self, input_str):
501 columns = self.columns
502 if input_str.lower() == 'all':
503 for col in columns:
504 self._register(col)
505 else:
506 #check if colname is in columns:
507 if input_str not in columns:
508 error_msg = 'colname:'+ input_str +' is not a column in the binary table.\n'
509 error_msg+= 'possible colnames are\n'
510 for key in columns:
511 error_msg += key+'\n'
512 raise KeyError(error_msg)
513 else:
514 self._register(input_str)
515
516 # 'private' method, do not use
517 def _register( self, colname):
518 columns = self.columns
519 local = None
520
521 number_of_elements = int(columns[colname][0])
522 size_of_elements_in_bytes = int(columns[colname][1])
523 ctypecode_of_elements = columns[colname][2]
524 physical_unit_of_elements = columns[colname][3]
525
526 # snippet from the C++ source code, or header file to be precise:
527 #case 'L': gLog << "bool(8)"; break;
528 #case 'B': gLog << "byte(8)"; break;
529 #case 'I': gLog << "short(16)"; break;
530 #case 'J': gLog << "int(32)"; break;
531 #case 'K': gLog << "int(64)"; break;
532 #case 'E': gLog << "float(32)"; break;
533 #case 'D': gLog << "double(64)"; break;
534
535
536
537 # the fields inside the columns can either contain single numbers,
538 # or whole arrays of numbers as well.
539 # we treat single elements differently...
540 if number_of_elements == 1:
541 # allocate some memory for a single number according to its type
542 if ctypecode_of_elements == 'J': # J is for a 4byte int, i.e. an unsigned long
543 local = ctypes.c_ulong()
544 un_c_type = long
545 elif ctypecode_of_elements == 'I': # I is for a 2byte int, i.e. an unsinged int
546 local = ctypes.c_ushort()
547 un_c_type = int
548 elif ctypecode_of_elements == 'B': # B is for a byte
549 local = ctypes.c_ubyte()
550 un_c_type = int
551 elif ctypecode_of_elements == 'D':
552 local = ctypes.c_double()
553 un_c_type = float
554 elif ctypecode_of_elements == 'E':
555 local = ctypes.c_float()
556 un_c_type = float
557 elif ctypecode_of_elements == 'A':
558 local = ctypes.c_uchar()
559 un_c_type = chr
560 elif ctypecode_of_elements == 'K':
561 local = ctypes.c_ulonglong()
562 un_c_type = long
563 else:
564 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
565 else:
566 if ctypecode_of_elements == 'B': # B is for a byte
567 nptype = np.int8
568 elif ctypecode_of_elements == 'A': # A is for a char .. but I don't know how to handle it
569 nptype = np.int8
570 elif ctypecode_of_elements == 'I': # I is for a 2byte int
571 nptype = np.int16
572 elif ctypecode_of_elements == 'J': # J is for a 4byte int
573 nptype = np.int32
574 elif ctypecode_of_elements == 'K': # B is for a byte
575 nptype = np.int64
576 elif ctypecode_of_elements == 'E': # B is for a byte
577 nptype = np.float32
578 elif ctypecode_of_elements == 'D': # B is for a byte
579 nptype = np.float64
580 else:
581 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
582 local = np.zeros( number_of_elements, nptype)
583
584 # Set the Pointer Address
585 self.SetPtrAddress(colname, local)
586 self._table_cols[colname] = local
587 if number_of_elements > 1:
588 self.__dict__[colname] = local
589 self.dict[colname] = local
590 else:
591 # remove any traces of ctypes:
592 self.__dict__[colname] = local.value
593 self.dict[colname] = local.value
594 self._registered_cols.append(colname)
595
596
597 def treat_meta_dict(self):
598 """make 'interesting' meta information available like normal members.
599 non interesting are:
600 TFORM, TUNIT, and TTYPE
601 since these are available via the columns dict.
602 """
603
604 self.number_of_rows = self.meta['NAXIS2'][0]
605 self.number_of_columns = self.meta['TFIELDS'][0]
606
607 # there are some information in the meta dict, which are alsways there:
608 # there are regarded as not interesting:
609 uninteresting_meta = {}
610 uninteresting_meta['arraylike'] = {}
611 uninteresting = ['NAXIS', 'NAXIS1', 'NAXIS2',
612 'TFIELDS',
613 'XTENSION','EXTNAME','EXTREL',
614 'BITPIX', 'PCOUNT', 'GCOUNT',
615 'ORIGIN',
616 'PACKAGE', 'COMPILED', 'CREATOR',
617 'TELESCOP','TIMESYS','TIMEUNIT','VERSION']
618 for key in uninteresting:
619 if key in self.meta:
620 uninteresting_meta[key]=self.meta[key]
621 del self.meta[key]
622
623 # the table meta data contains
624
625
626 # shortcut to access the meta dict. But this needs to
627 # be cleaned up quickly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
628 meta = self.meta
629
630 # loop over keys:
631 # * try to find array-like keys
632 arraylike = {}
633 singlelike = []
634 for key in self.meta:
635 stripped = key.rstrip('1234567890')
636 if stripped == key:
637 singlelike.append(key)
638 else:
639 if stripped not in arraylike:
640 arraylike[stripped] = 0
641 else:
642 arraylike[stripped] += 1
643 newmeta = {}
644 for key in singlelike:
645 newmeta[key.lower()] = meta[key]
646 for key in arraylike:
647 uninteresting_meta['arraylike'][key.lower()] = []
648 for i in range(arraylike[key]+1):
649 if key+str(i) in meta:
650 uninteresting_meta['arraylike'][key.lower()].append(meta[key+str(i)])
651 self.ui_meta = uninteresting_meta
652 # make newmeta self
653 for key in newmeta:
654 self.__dict__[key]=newmeta[key]
655
656 dict = self.__dict__.copy()
657 del dict['meta']
658 del dict['ui_meta']
659 self.dict = dict
660
661 def __iter__(self):
662 """ iterator """
663 return self
664
665 def next(self):
666 """ used by __iter__ """
667 # Here one might check, if looping makes any sense, and if not
668 # one could stop looping or so...
669 # like this:
670 #
671 # if len(self._registered_cols) == 0:
672 # print 'warning: looping without any registered columns'
673 if self._current_row < self.number_of_rows:
674 if self.GetNextRow() == False:
675 raise StopIteration
676 for col in self._registered_cols:
677 if isinstance(self._table_cols[col], ctypes._SimpleCData):
678 self.__dict__[col] = self._table_cols[col].value
679 self.dict[col] = self._table_cols[col].value
680
681 for col in self.stacked_cols:
682 if isinstance(self.dict[col], type(np.array(''))):
683 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],self.dict[col]) )
684 else:
685 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],np.array(self.dict[col])) )
686 self._current_row += 1
687 else:
688 raise StopIteration
689 return self
690
691 def show(self):
692 pprint.pprint(self.dict)
693
694
695
696
697class fnames( object ):
698 """ organize file names of a FACT data run
699
700 """
701
702 def __init__(self, specifier = ['012', '023', '2011', '11', '24'],
703 rpath = '/scratch_nfs/res/bsl/',
704 zipped = True):
705 """
706 specifier : list of strings defined as:
707 [ 'DRS calibration file', 'Data file', 'YYYY', 'MM', 'DD']
708
709 rpath : directory path for the results; YYYYMMDD will be appended to rpath
710 zipped : use zipped (True) or unzipped (Data)
711
712 """
713
714 self.specifier = specifier
715 self.rpath = rpath
716 self.zipped = zipped
717
718 self.make( self.specifier, self.rpath, self.zipped )
719
720
721 def make( self, specifier, rpath, zipped ):
722 """ create (make) the filenames
723
724 names : dictionary of filenames, tags { 'data', 'drscal', 'results' }
725 data : name of the data file
726 drscal : name of the drs calibration file
727 results : radikal of file name(s) for results (to be completed by suffixes)
728 """
729
730 self.specifier = specifier
731
732 if zipped:
733 dpath = '/data00/fact-construction/raw/'
734 ext = '.fits.gz'
735 else:
736 dpath = '/data03/fact-construction/raw/'
737 ext = '.fits'
738
739 year = specifier[2]
740 month = specifier[3]
741 day = specifier[4]
742
743 yyyymmdd = year + month + day
744 dfile = specifier[1]
745 cfile = specifier[0]
746
747 rpath = rpath + yyyymmdd + '/'
748 self.rpath = rpath
749 self.names = {}
750
751 tmp = dpath + year + '/' + month + '/' + day + '/' + yyyymmdd + '_'
752 self.names['data'] = tmp + dfile + ext
753 self.names['drscal'] = tmp + cfile + '.drs' + ext
754 self.names['results'] = rpath + yyyymmdd + '_' + dfile + '_' + cfile
755
756 self.data = self.names['data']
757 self.drscal = self.names['drscal']
758 self.results = self.names['results']
759
760 def info( self ):
761 """ print complete filenames
762
763 """
764
765 print 'file names:'
766 print 'data: ', self.names['data']
767 print 'drs-cal: ', self.names['drscal']
768 print 'results: ', self.names['results']
769
770# end of class definition: fnames( object )
771
772def _test_SlowData( filename ):
773 file = FactFits(filename)
774 print '-'*70
775 print "opened :", filename, " as 'file'"
776 print
777 print '-'*70
778 print 'type file.show() to look at its contents'
779 print "type file.register( columnname ) or file.register('all') in order to register columns"
780 print
781 print " due column-registration you declare, that you would like to retrieve the contents of one of the columns"
782 print " after column-registration, the 'file' has new member variables, they are named like the columns"
783 print " PLEASE NOTE: immediatly after registration, the members exist, but they are empty."
784 print " the values are assigned only, when you call file.next() or when you loop over the 'file'"
785 print
786 print "in order to loop over it, just go like this:"
787 print "for row in file:"
788 print " print row.columnname_one, row.columnname_two"
789 print
790 print ""
791 print '-'*70
792
793
794
795def _test_iter( nevents ):
796 """ test for function __iter__ """
797
798 data_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_117.fits.gz'
799 calib_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_114.drs.fits.gz'
800# data_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_028.fits.gz'
801# calib_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_022.drs.fits.gz'
802 run = RawData( data_file_name, calib_file_name , return_dict=True)
803
804 for event in run:
805 print 'ev ', event['event_id'].value, 'data[0,0] = ', event['acal_data'][0,0], 'start_cell[0] = ', event['start_cells'][0], 'trigger type = ', event['trigger_type']
806 if run.event_id.value == nevents:
807 break
808
809if __name__ == '__main__':
810 """ tests """
811 import sys
812 if len(sys.argv) == 1:
813 print 'showing test of iterator of RawData class'
814 print 'in order to test the SlowData classe please use:', sys.argv[0], 'fits-file-name'
815 _test_iter(10)
816 else:
817 print 'showing test of SlowData class'
818 print 'in case you wanted to test the RawData class, please give no commandline arguments'
819 _test_SlowData(sys.argv[1])
Note: See TracBrowser for help on using the repository browser.