source: fact/tools/pyscripts/pyfact/pyfact.py@ 13644

Last change on this file since 13644 was 13644, checked in by neise, 12 years ago
RawData.board_times and RawData.start_cells are now numpy array. They were of type ROOT.PyShortBuffer before, which is not so easy to handle. Since it is not time critical, I decided to create them this way. RawData.info() is not giving a sufficient amount of info, I think. should be reworked. A RawData object has several member, which make no sense in all cases. E.g. some members related to the baseline correction. These members should only exist, in case the user wants to perform baseline correction.
  • Property svn:executable set to *
File size: 31.6 KB
Line 
1#!/usr/bin/python -tt
2#
3# Werner Lustermann, Dominik Neise
4# ETH Zurich, TU Dortmund
5#
6from ctypes import *
7import numpy as np
8import pprint # for SlowData
9from scipy import signal
10
11# get the ROOT stuff + my shared libs
12from ROOT import gSystem
13# factfits_h.so is made from factfits.h and is used to access the data
14# make sure the location of factfits_h.so is in LD_LIBRARY_PATH.
15# having it in PYTHONPATH is *not* sufficient
16gSystem.Load('factfits_h.so')
17gSystem.Load('calfactfits_h.so')
18from ROOT import *
19
20class RawDataFeeder( object ):
21 """ Wrapper class for RawData class
22 capable of iterating over multiple RawData Files
23 """
24
25 def __init__(self, filelist):
26 """ *filelist* list of files to iterate over
27 the list should contain tuples, or sublists of two filenames
28 the first should be a data file (\*.fits.gz)
29 the second should be an amplitude calibration file(\*.drs.fits.gz)
30 """
31 # sanity check for input
32 if type(filelist) != type(list()):
33 raise TypeError('filelist should be a list')
34 for entry in filelist:
35 if len(entry) != 2:
36 raise TypeError('the entries of filelist should have length == 2')
37 for path in entry:
38 if type(path) != type(str()):
39 raise TypeError('the entries of filelist should be path, i.e. of type str()')
40 #todo check if 'path' is a valid path
41 # else: throw an Exception, or Warning?
42
43 self.filelist = filelist
44 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
45 del filelist[0]
46
47 def __iter__(self):
48 return self
49
50 def next():
51 """ Method being called by the iterator.
52 Since the RawData Objects are simply looped over, the event_id from the
53 RawData object will not be unique.
54 Each RawData obejct will start with event_id = 1 as usual.
55 """
56 try:
57 return self._current_RawData.next()
58 except StopIteration:
59 # current_RawData was completely processed
60 # delete it (I hope this calls the destructor of the fits file and/or closes it)
61 del self._current_RawData
62 # and remake it, if possible
63 if len(self.filelist) > 0:
64 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
65 del filelist[0]
66 else:
67 raise
68
69
70
71class RawData( object ):
72 """ raw data access and calibration
73
74 - open raw data file and drs calibration file
75 - performs amplitude calibration
76 - performs baseline substraction if wanted
77 - provides all data in an array:
78 row = number of pixel
79 col = length of region of interest
80
81 """
82
83
84 def __init__(self, data_file_name, calib_file_name,
85 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
86 baseline_file_name='',
87 return_dict = True,
88 do_calibration = True,
89 use_CalFactFits = True):
90 """ initialize object
91
92 open data file and calibration data file
93 get basic information about the data in data_file_name
94 allocate buffers for data access
95
96 data_file_name : fits or fits.gz file of the data including the path
97 calib_file_name : fits or fits.gz file containing DRS calibration data
98 baseline_file_name : npy file containing the baseline values
99 """
100 self.__module__='pyfact'
101 # manual implementation of default value, but I need to find out
102 # if the user of this class is aware of the new option
103 if return_dict == False:
104 print 'DEPRECATION WARNING:'
105 print 'you are using RawData in a way, which is nor supported anymore.'
106 print ' Please set: return_dict = True, in the __init__ call'
107 self.return_dict = return_dict
108 self.use_CalFactFits = use_CalFactFits
109
110 self.do_calibration = do_calibration
111
112 self.data_file_name = data_file_name
113 self.calib_file_name = calib_file_name
114 self.baseline_file_name = baseline_file_name
115
116 self.user_action_calib = user_action_calib
117
118 # baseline correction: True / False
119 if len(baseline_file_name) == 0:
120 self.correct_baseline = False
121 else:
122 self.correct_baseline = True
123
124 # access data file
125 if use_CalFactFits:
126 try:
127 data_file = CalFactFits(data_file_name, calib_file_name)
128 except IOError:
129 print 'problem accessing data file: ', data_file_name
130 raise # stop ! no data
131
132 self.data_file = data_file
133 self.data = np.empty( data_file.npix * data_file.nroi, np.float64)
134 data_file.SetNpcaldataPtr(self.data)
135 self.data = self.data.reshape( data_file.npix, data_file.nroi )
136 self.acal_data = self.data
137
138 self.nroi = data_file.nroi
139 self.npix = data_file.npix
140 self.nevents = data_file.nevents
141
142 # Data per event
143 self.event_id = None
144 self.trigger_type = None
145 #self.start_cells = None
146 #self.board_times = None
147 self.start_cells = np.zeros( self.npix, np.int16 )
148 self.board_times = np.zeros( 40, np.int32 )
149
150 # data_file is a CalFactFits object
151 # data_file.datafile is one of the two FactFits objects hold by a CalFactFits.
152 # sorry for the strange naming ..
153 data_file.datafile.SetPtrAddress('StartCellData', self.start_cells)
154 data_file.datafile.SetPtrAddress('BoardTime', self.board_times)
155
156 else:
157 try:
158 data_file = FactFits(self.data_file_name)
159 except IOError:
160 print 'problem accessing data file: ', data_file_name
161 raise # stop ! no data
162
163 self.data_file = data_file
164
165 # get basic information about the data file
166 #: region of interest (number of DRS slices read)
167 self.nroi = data_file.GetUInt('NROI')
168 #: number of pixels (should be 1440)
169 self.npix = data_file.GetUInt('NPIX')
170 #: number of events in the data run
171 self.nevents = data_file.GetNumRows()
172
173 # allocate the data memories
174 self.event_id = c_ulong()
175 self.trigger_type = c_ushort()
176 #: 1D array with raw data
177 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
178 #: slice where drs readout started
179 self.start_cells = np.zeros( self.npix, np.int16 )
180 #: time when the FAD was triggered, in some strange units...
181 self.board_times = np.zeros( 40, np.int32 )
182
183 # set the pointers to the data++
184 data_file.SetPtrAddress('EventNum', self.event_id)
185 data_file.SetPtrAddress('TriggerType', self.trigger_type)
186 data_file.SetPtrAddress('StartCellData', self.start_cells)
187 data_file.SetPtrAddress('Data', self.data)
188 data_file.SetPtrAddress('BoardTime', self.board_times)
189
190 # open the calibration file
191 try:
192 calib_file = FactFits(self.calib_file_name)
193 except IOError:
194 print 'problem accessing calibration file: ', calib_file_name
195 raise
196 #: drs calibration file
197 self.calib_file = calib_file
198
199 baseline_mean = calib_file.GetN('BaselineMean')
200 gain_mean = calib_file.GetN('GainMean')
201 trigger_offset_mean = calib_file.GetN('TriggerOffsetMean')
202
203 self.Nblm = baseline_mean / self.npix
204 self.Ngm = gain_mean / self.npix
205 self.Ntom = trigger_offset_mean / self.npix
206
207 self.blm = np.zeros(baseline_mean, np.float32).reshape(self.npix , self.Nblm)
208 self.gm = np.zeros(gain_mean, np.float32).reshape(self.npix , self.Ngm)
209 self.tom = np.zeros(trigger_offset_mean, np.float32).reshape(self.npix , self.Ntom)
210
211 calib_file.SetPtrAddress('BaselineMean', self.blm)
212 calib_file.SetPtrAddress('GainMean', self.gm)
213 calib_file.SetPtrAddress('TriggerOffsetMean', self.tom)
214 calib_file.GetRow(0)
215
216 # make calibration constants double, so we never need to roll
217 self.blm = np.hstack((self.blm, self.blm))
218 self.gm = np.hstack((self.gm, self.gm))
219 self.tom = np.hstack((self.tom, self.tom))
220
221 self.v_bsl = np.zeros(self.npix) # array of baseline values (all ZERO)
222
223 def __iter__(self):
224 """ iterator """
225 return self
226
227 def next(self):
228 """ used by __iter__ """
229 if self.use_CalFactFits:
230 if self.data_file.GetCalEvent() == False:
231 raise StopIteration
232 else:
233 self.event_id = self.data_file.event_id
234 self.trigger_type = self.data_file.event_triggertype
235 #self.start_cells = self.data_file.event_offset
236 #self.board_times = self.data_file.event_boardtimes
237 #self.acal_data = self.data.copy().reshape(self.data_file.npix, self.data_file.nroi)
238 else:
239 if self.data_file.GetNextRow() == False:
240 raise StopIteration
241 else:
242 if self.do_calibration == True:
243 self.calibrate_drs_amplitude()
244
245 #print 'nevents = ', self.nevents, 'event_id = ', self.event_id.value
246 if self.return_dict:
247 return self.__dict__
248 else:
249 return self.acal_data, self.start_cells, self.trigger_type.value
250
251 def next_event(self):
252 """ load the next event from disk and calibrate it
253 """
254 if self.use_CalFactFits:
255 self.data_file.GetCalEvent()
256 else:
257 self.data_file.GetNextRow()
258 self.calibrate_drs_amplitude()
259
260 def calibrate_drs_amplitude(self):
261 """ perform the drs amplitude calibration of the event data
262
263 """
264 # shortcuts
265 blm = self.blm
266 gm = self.gm
267 tom = self.tom
268
269 to_mV = 2000./4096.
270 #: 2D array with amplitude calibrated dat in mV
271 acal_data = self.data * to_mV # convert ADC counts to mV
272
273
274 for pixel in range( self.npix ):
275 #shortcuts
276 sc = self.start_cells[pixel]
277 roi = self.nroi
278 # rotate the pixel baseline mean to the Data startCell
279 acal_data[pixel,:] -= blm[pixel,sc:sc+roi]
280 # the 'trigger offset mean' does not need to be rolled
281 # on the contrary, it seems there is an offset in the DRS data,
282 # which is related to its distance to the startCell, not to its
283 # distance to the beginning of the physical pipeline in the DRS chip
284 acal_data[pixel,:] -= tom[pixel,0:roi]
285 # rotate the pixel gain mean to the Data startCell
286 acal_data[pixel,:] /= gm[pixel,sc:sc+roi]
287
288
289 self.acal_data = acal_data * 1907.35
290
291 self.user_action_calib( self.acal_data,
292 np.reshape(self.data, (self.npix, self.nroi) ), blm, tom, gm, self.start_cells, self.nroi)
293
294
295 def baseline_read_values(self, file, bsl_hist='bsl_sum/hplt_mean'):
296 """
297
298 open ROOT file with baseline histogram and read baseline values
299 file name of the root file
300 bsl_hist path to the histogram containing the basline values
301
302 """
303
304 try:
305 f = TFile(file)
306 except:
307 print 'Baseline data file could not be read: ', file
308 return
309
310 h = f.Get(bsl_hist)
311
312 for i in range(self.npix):
313 self.v_bsl[i] = h.GetBinContent(i+1)
314
315 f.Close()
316
317 def baseline_correct(self):
318 """ subtract baseline from the data
319
320 """
321
322 for pixel in range(self.npix):
323 self.acal_data[pixel,:] -= self.v_bsl[pixel]
324
325 def info(self):
326 """ print run information
327
328 """
329 print 'data file: ', self.data_file_name
330 print 'calib file: ', self.calib_file_name
331 print '... we need more information printed here ... '
332
333# -----------------------------------------------------------------------------
334class RawDataFake( object ):
335 """ raw data FAKE access similar to real RawData access
336 """
337
338
339 def __init__(self, data_file_name, calib_file_name,
340 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
341 baseline_file_name=''):
342 self.__module__='pyfact'
343
344 self.nroi = 300
345 self.npix = 9
346 self.nevents = 1000
347
348 self.simulator = None
349
350 self.time = np.ones(1024) * 0.5
351
352
353 self.event_id = c_ulong(0)
354 self.trigger_type = c_ushort(4)
355 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
356 self.start_cells = np.zeros( self.npix, np.int16 )
357 self.board_times = np.zeros( 40, np.int32 )
358 def __iter__(self):
359 """ iterator """
360 return self
361
362 def next(self):
363 """ used by __iter__ """
364 self.event_id = c_ulong(self.event_id.value + 1)
365 self.board_times = self.board_times + 42
366
367 if self.event_id.value >= self.nevents:
368 raise StopIteration
369 else:
370 self._make_event_data()
371
372 return self.__dict__
373
374 def _make_event_data(self):
375 sample_times = self.time.cumsum() - time[0]
376
377 # random start cell
378 self.start_cells = np.ones( self.npix, np.int16 ) * np.random.randint(0,1024)
379
380 starttime = self.start_cells[0]
381
382 signal = self._std_sinus_simu(sample_times, starttime)
383
384 data = np.vstack( (signal,signal) )
385 for i in range(8):
386 data = np.vstack( (data,signal) )
387
388 self.data = data
389
390 def _std_sinus_simu(self, times, starttime):
391 period = 10 # in ns
392
393 # give a jitter on starttime
394 starttime = np.random.normal(startime, 0.05)
395
396 phase = 0.0
397 signal = 10 * np.sin(times * 2*np.pi/period + starttime + phase)
398
399 # add some noise
400 noise = np.random.normal(0.0, 0.5, signal.shape)
401 signal += noise
402 return signal
403
404 def info(self):
405 """ print run information
406
407 """
408
409 print 'data file: ', data_file_name
410 print 'calib file: ', calib_file_name
411 print 'calibration file'
412 print 'N baseline_mean: ', self.Nblm
413 print 'N gain mean: ', self.Ngm
414 print 'N TriggeroffsetMean: ', self.Ntom
415
416# -----------------------------------------------------------------------------
417
418class SlowData( FactFits ):
419 """ -Fact SlowData File-
420 A Python wrapper for the fits-class implemented in pyfits.h
421 provides easy access to the fits file meta data.
422 * dictionary of file metadata - self.meta
423 * dict of table metadata - self.columns
424 * variable table column access, thus possibly increased speed while looping
425 """
426 def __init__(self, path):
427 """ creates meta and columns dictionaries
428 """
429 self.path = path
430 try:
431 FactFits.__init__(self,path)
432 except IOError:
433 print 'problem accessing data file: ', data_file_name
434 raise # stop ! no data
435
436 self.meta = self._make_meta_dict()
437 self.columns = self._make_columns_dict()
438
439 self.treat_meta_dict()
440
441
442 # list of columns, which are already registered
443 # see method register()
444 self._registered_cols = []
445 # dict of column data, this is used, in order to be able to remove
446 # the ctypes of
447 self._table_cols = {}
448
449 # I need to count the rows, since the normal loop mechanism seems not to work.
450 self._current_row = 0
451
452 self.stacked_cols = {}
453
454 def _make_meta_dict(self):
455 """ This method retrieves meta information about the fits file and
456 stores this information in a dict
457 return: dict
458 key: string - all capital letters
459 value: tuple( numerical value, string comment)
460 """
461 # intermediate variables for file metadata dict generation
462 keys=self.GetPy_KeyKeys()
463 values=self.GetPy_KeyValues()
464 comments=self.GetPy_KeyComments()
465 types=self.GetPy_KeyTypes()
466
467 if len(keys) != len(values):
468 raise TypeError('len(keys)',len(keys),' != len(values)', len(values))
469 if len(keys) != len(types):
470 raise TypeError('len(keys)',len(keys),' != len(types)', len(types))
471 if len(keys) != len(comments):
472 raise TypeError('len(keys)',len(keys),' != len(comments)', len(comments))
473
474 meta_dict = {}
475 for i in range(len(keys)):
476 type = types[i]
477 if type == 'I':
478 value = int(values[i])
479 elif type == 'F':
480 value = float(values[i])
481 elif type == 'B':
482 if values[i] == 'T':
483 value = True
484 elif values[i] == 'F':
485 value = False
486 else:
487 raise TypeError("meta-type is 'B', but meta-value is neither 'T' nor 'F'. meta-value:",values[i])
488 elif type == 'T':
489 value = values[i]
490 else:
491 raise TypeError("unknown meta-type: known meta types are: I,F,B and T. meta-type:",type)
492 meta_dict[keys[i]]=(value, comments[i])
493 return meta_dict
494
495
496 def _make_columns_dict(self):
497 """ This method retrieves information about the columns
498 stored inside the fits files internal binary table.
499 returns: dict
500 key: string column name -- all capital letters
501 values: tuple(
502 number of elements in table field - integer
503 size of element in bytes -- this is not really interesting for any user
504 might be ommited in future versions
505 type - a single character code -- should be translated into
506 a comrehensible word
507 unit - string like 'mV' or 'ADC count'
508 """
509 # intermediate variables for file table-metadata dict generation
510 keys=self.GetPy_ColumnKeys()
511 #offsets=self.GetPy_ColumnOffsets() #not needed on python level...
512 nums=self.GetPy_ColumnNums()
513 sizes=self.GetPy_ColumnSizes()
514 types=self.GetPy_ColumnTypes()
515 units=self.GetPy_ColumnUnits()
516
517 # zip the values
518 values = zip(nums,sizes,types,units)
519 # create the columns dictionary
520 columns = dict(zip(keys ,values))
521 return columns
522
523 def stack(self, on=True):
524 self.next()
525 for col in self._registered_cols:
526 if isinstance( self.dict[col], type(np.array('')) ):
527 self.stacked_cols[col] = self.dict[col]
528 else:
529# elif isinstance(self.dict[col], ctypes._SimpleCData):
530 self.stacked_cols[col] = np.array(self.dict[col])
531# else:
532# raise TypeError("I don't know how to stack "+col+". It is of type: "+str(type(self.dict[col])))
533
534 def register(self, input_str):
535 columns = self.columns
536 if input_str.lower() == 'all':
537 for col in columns:
538 self._register(col)
539 else:
540 #check if colname is in columns:
541 if input_str not in columns:
542 error_msg = 'colname:'+ input_str +' is not a column in the binary table.\n'
543 error_msg+= 'possible colnames are\n'
544 for key in columns:
545 error_msg += key+'\n'
546 raise KeyError(error_msg)
547 else:
548 self._register(input_str)
549
550 # 'private' method, do not use
551 def _register( self, colname):
552 columns = self.columns
553 local = None
554
555 number_of_elements = int(columns[colname][0])
556 size_of_elements_in_bytes = int(columns[colname][1])
557 ctypecode_of_elements = columns[colname][2]
558 physical_unit_of_elements = columns[colname][3]
559
560 # snippet from the C++ source code, or header file to be precise:
561 #case 'L': gLog << "bool(8)"; break;
562 #case 'B': gLog << "byte(8)"; break;
563 #case 'I': gLog << "short(16)"; break;
564 #case 'J': gLog << "int(32)"; break;
565 #case 'K': gLog << "int(64)"; break;
566 #case 'E': gLog << "float(32)"; break;
567 #case 'D': gLog << "double(64)"; break;
568
569
570
571 # the fields inside the columns can either contain single numbers,
572 # or whole arrays of numbers as well.
573 # we treat single elements differently...
574 if number_of_elements == 1:
575 # allocate some memory for a single number according to its type
576 if ctypecode_of_elements == 'J': # J is for a 4byte int, i.e. an unsigned long
577 local = ctypes.c_ulong()
578 un_c_type = long
579 elif ctypecode_of_elements == 'I': # I is for a 2byte int, i.e. an unsinged int
580 local = ctypes.c_ushort()
581 un_c_type = int
582 elif ctypecode_of_elements == 'B': # B is for a byte
583 local = ctypes.c_ubyte()
584 un_c_type = int
585 elif ctypecode_of_elements == 'D':
586 local = ctypes.c_double()
587 un_c_type = float
588 elif ctypecode_of_elements == 'E':
589 local = ctypes.c_float()
590 un_c_type = float
591 elif ctypecode_of_elements == 'A':
592 local = ctypes.c_uchar()
593 un_c_type = chr
594 elif ctypecode_of_elements == 'K':
595 local = ctypes.c_ulonglong()
596 un_c_type = long
597 else:
598 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
599 else:
600 if ctypecode_of_elements == 'B': # B is for a byte
601 nptype = np.int8
602 elif ctypecode_of_elements == 'A': # A is for a char .. but I don't know how to handle it
603 nptype = np.int8
604 elif ctypecode_of_elements == 'I': # I is for a 2byte int
605 nptype = np.int16
606 elif ctypecode_of_elements == 'J': # J is for a 4byte int
607 nptype = np.int32
608 elif ctypecode_of_elements == 'K': # B is for a byte
609 nptype = np.int64
610 elif ctypecode_of_elements == 'E': # B is for a byte
611 nptype = np.float32
612 elif ctypecode_of_elements == 'D': # B is for a byte
613 nptype = np.float64
614 else:
615 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
616 local = np.zeros( number_of_elements, nptype)
617
618 # Set the Pointer Address
619 self.SetPtrAddress(colname, local)
620 self._table_cols[colname] = local
621 if number_of_elements > 1:
622 self.__dict__[colname] = local
623 self.dict[colname] = local
624 else:
625 # remove any traces of ctypes:
626 self.__dict__[colname] = local.value
627 self.dict[colname] = local.value
628 self._registered_cols.append(colname)
629
630
631 def treat_meta_dict(self):
632 """make 'interesting' meta information available like normal members.
633 non interesting are:
634 TFORM, TUNIT, and TTYPE
635 since these are available via the columns dict.
636 """
637
638 self.number_of_rows = self.meta['NAXIS2'][0]
639 self.number_of_columns = self.meta['TFIELDS'][0]
640
641 # there are some information in the meta dict, which are alsways there:
642 # there are regarded as not interesting:
643 uninteresting_meta = {}
644 uninteresting_meta['arraylike'] = {}
645 uninteresting = ['NAXIS', 'NAXIS1', 'NAXIS2',
646 'TFIELDS',
647 'XTENSION','EXTNAME','EXTREL',
648 'BITPIX', 'PCOUNT', 'GCOUNT',
649 'ORIGIN',
650 'PACKAGE', 'COMPILED', 'CREATOR',
651 'TELESCOP','TIMESYS','TIMEUNIT','VERSION']
652 for key in uninteresting:
653 if key in self.meta:
654 uninteresting_meta[key]=self.meta[key]
655 del self.meta[key]
656
657 # the table meta data contains
658
659
660 # shortcut to access the meta dict. But this needs to
661 # be cleaned up quickly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
662 meta = self.meta
663
664 # loop over keys:
665 # * try to find array-like keys
666 arraylike = {}
667 singlelike = []
668 for key in self.meta:
669 stripped = key.rstrip('1234567890')
670 if stripped == key:
671 singlelike.append(key)
672 else:
673 if stripped not in arraylike:
674 arraylike[stripped] = 0
675 else:
676 arraylike[stripped] += 1
677 newmeta = {}
678 for key in singlelike:
679 newmeta[key.lower()] = meta[key]
680 for key in arraylike:
681 uninteresting_meta['arraylike'][key.lower()] = []
682 for i in range(arraylike[key]+1):
683 if key+str(i) in meta:
684 uninteresting_meta['arraylike'][key.lower()].append(meta[key+str(i)])
685 self.ui_meta = uninteresting_meta
686 # make newmeta self
687 for key in newmeta:
688 self.__dict__[key]=newmeta[key]
689
690 dict = self.__dict__.copy()
691 del dict['meta']
692 del dict['ui_meta']
693 self.dict = dict
694
695 def __iter__(self):
696 """ iterator """
697 return self
698
699 def next(self):
700 """ used by __iter__ """
701 # Here one might check, if looping makes any sense, and if not
702 # one could stop looping or so...
703 # like this:
704 #
705 # if len(self._registered_cols) == 0:
706 # print 'warning: looping without any registered columns'
707 if self._current_row < self.number_of_rows:
708 if self.GetNextRow() == False:
709 raise StopIteration
710 for col in self._registered_cols:
711 if isinstance(self._table_cols[col], ctypes._SimpleCData):
712 self.__dict__[col] = self._table_cols[col].value
713 self.dict[col] = self._table_cols[col].value
714
715 for col in self.stacked_cols:
716 if isinstance(self.dict[col], type(np.array(''))):
717 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],self.dict[col]) )
718 else:
719 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],np.array(self.dict[col])) )
720 self._current_row += 1
721 else:
722 raise StopIteration
723 return self
724
725 def show(self):
726 pprint.pprint(self.dict)
727
728
729
730
731class fnames( object ):
732 """ organize file names of a FACT data run
733
734 """
735
736 def __init__(self, specifier = ['012', '023', '2011', '11', '24'],
737 rpath = '/scratch_nfs/res/bsl/',
738 zipped = True):
739 """
740 specifier : list of strings defined as:
741 [ 'DRS calibration file', 'Data file', 'YYYY', 'MM', 'DD']
742
743 rpath : directory path for the results; YYYYMMDD will be appended to rpath
744 zipped : use zipped (True) or unzipped (Data)
745
746 """
747
748 self.specifier = specifier
749 self.rpath = rpath
750 self.zipped = zipped
751
752 self.make( self.specifier, self.rpath, self.zipped )
753
754
755 def make( self, specifier, rpath, zipped ):
756 """ create (make) the filenames
757
758 names : dictionary of filenames, tags { 'data', 'drscal', 'results' }
759 data : name of the data file
760 drscal : name of the drs calibration file
761 results : radikal of file name(s) for results (to be completed by suffixes)
762 """
763
764 self.specifier = specifier
765
766 if zipped:
767 dpath = '/data00/fact-construction/raw/'
768 ext = '.fits.gz'
769 else:
770 dpath = '/data03/fact-construction/raw/'
771 ext = '.fits'
772
773 year = specifier[2]
774 month = specifier[3]
775 day = specifier[4]
776
777 yyyymmdd = year + month + day
778 dfile = specifier[1]
779 cfile = specifier[0]
780
781 rpath = rpath + yyyymmdd + '/'
782 self.rpath = rpath
783 self.names = {}
784
785 tmp = dpath + year + '/' + month + '/' + day + '/' + yyyymmdd + '_'
786 self.names['data'] = tmp + dfile + ext
787 self.names['drscal'] = tmp + cfile + '.drs' + ext
788 self.names['results'] = rpath + yyyymmdd + '_' + dfile + '_' + cfile
789
790 self.data = self.names['data']
791 self.drscal = self.names['drscal']
792 self.results = self.names['results']
793
794 def info( self ):
795 """ print complete filenames
796
797 """
798
799 print 'file names:'
800 print 'data: ', self.names['data']
801 print 'drs-cal: ', self.names['drscal']
802 print 'results: ', self.names['results']
803
804# end of class definition: fnames( object )
805
806def _test_SlowData( filename ):
807 print '-'*70
808 print "opened :", filename, " as 'file'"
809 print
810 print '-'*70
811 print 'type file.show() to look at its contents'
812 print "type file.register( columnname ) or file.register('all') in order to register columns"
813 print
814 print " due column-registration you declare, that you would like to retrieve the contents of one of the columns"
815 print " after column-registration, the 'file' has new member variables, they are named like the columns"
816 print " PLEASE NOTE: immediatly after registration, the members exist, but they are empty."
817 print " the values are assigned only, when you call file.next() or when you loop over the 'file'"
818 print
819 print "in order to loop over it, just go like this:"
820 print "for row in file:"
821 print " print row.columnname_one, row.columnname_two"
822 print
823 print ""
824 print '-'*70
825
826
827
828def _test_iter( nevents ):
829 """ test for function __iter__ """
830
831 data_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_117.fits.gz'
832 calib_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_114.drs.fits.gz'
833# data_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_028.fits.gz'
834# calib_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_022.drs.fits.gz'
835 run = RawData( data_file_name, calib_file_name , return_dict=True)
836
837 for event in run:
838 print 'ev ', event['event_id'], 'data[0,0] = ', event['acal_data'][0,0], 'start_cell[0] = ', event['start_cells'][0], 'trigger type = ', event['trigger_type']
839 if run.event_id == nevents:
840 break
841
842if __name__ == '__main__':
843 """ tests """
844 import sys
845 if len(sys.argv) == 1:
846 print 'showing test of iterator of RawData class'
847 print 'in order to test the SlowData classe please use:', sys.argv[0], 'fits-file-name'
848 _test_iter(10)
849
850
851 else:
852 print 'showing test of SlowData class'
853 print 'in case you wanted to test the RawData class, please give no commandline arguments'
854 file = SlowData(sys.argv[1])
855 _test_SlowData(sys.argv[1])
Note: See TracBrowser for help on using the repository browser.