source: fact/tools/pyscripts/pyfact/pyfact.py@ 13591

Last change on this file since 13591 was 13518, checked in by neise, 12 years ago
RawData uses CalFactFits as default, see __init__() parameter use_CalFactFits
  • Property svn:executable set to *
File size: 31.8 KB
Line 
1#!/usr/bin/python -tt
2#
3# Werner Lustermann, Dominik Neise
4# ETH Zurich, TU Dortmund
5#
6from ctypes import *
7import numpy as np
8import pprint # for SlowData
9from scipy import signal
10
11# get the ROOT stuff + my shared libs
12from ROOT import gSystem
13# factfits_h.so is made from factfits.h and is used to access the data
14# make sure the location of factfits_h.so is in LD_LIBRARY_PATH.
15# having it in PYTHONPATH is *not* sufficient
16gSystem.Load('factfits_h.so')
17gSystem.Load('calfactfits_h.so')
18from ROOT import *
19
20class RawDataFeeder( object ):
21 """ Wrapper class for RawData class
22 capable of iterating over multiple RawData Files
23 """
24
25 def __init__(self, filelist):
26 """ *filelist* list of files to iterate over
27 the list should contain tuples, or sublists of two filenames
28 the first should be a data file (\*.fits.gz)
29 the second should be an amplitude calibration file(\*.drs.fits.gz)
30 """
31 # sanity check for input
32 if type(filelist) != type(list()):
33 raise TypeError('filelist should be a list')
34 for entry in filelist:
35 if len(entry) != 2:
36 raise TypeError('the entries of filelist should have length == 2')
37 for path in entry:
38 if type(path) != type(str()):
39 raise TypeError('the entries of filelist should be path, i.e. of type str()')
40 #todo check if 'path' is a valid path
41 # else: throw an Exception, or Warning?
42
43 self.filelist = filelist
44 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
45 del filelist[0]
46
47 def __iter__(self):
48 return self
49
50 def next():
51 """ Method being called by the iterator.
52 Since the RawData Objects are simply looped over, the event_id from the
53 RawData object will not be unique.
54 Each RawData obejct will start with event_id = 1 as usual.
55 """
56 try:
57 return self._current_RawData.next()
58 except StopIteration:
59 # current_RawData was completely processed
60 # delete it (I hope this calls the destructor of the fits file and/or closes it)
61 del self._current_RawData
62 # and remake it, if possible
63 if len(self.filelist) > 0:
64 self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
65 del filelist[0]
66 else:
67 raise
68
69
70
71class RawData( object ):
72 """ raw data access and calibration
73
74 - open raw data file and drs calibration file
75 - performs amplitude calibration
76 - performs baseline substraction if wanted
77 - provides all data in an array:
78 row = number of pixel
79 col = length of region of interest
80
81 """
82
83
84 def __init__(self, data_file_name, calib_file_name,
85 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
86 baseline_file_name='',
87 return_dict = None,
88 do_calibration = True,
89 use_CalFactFits = True):
90 """ initialize object
91
92 open data file and calibration data file
93 get basic information about the data in data_file_name
94 allocate buffers for data access
95
96 data_file_name : fits or fits.gz file of the data including the path
97 calib_file_name : fits or fits.gz file containing DRS calibration data
98 baseline_file_name : npy file containing the baseline values
99 """
100 self.__module__='pyfact'
101 # manual implementation of default value, but I need to find out
102 # if the user of this class is aware of the new option
103 if return_dict == None:
104 print 'Warning: Rawdata.__init__() has a new option "return_dict"'
105 print 'the default value of this option is False, so nothing changes for you at the moment.'
106 print
107 print 'you probably want, to get a dictionary out of the next() method anyway'
108 print ' so please change your scripts and set this option to True, for the moment'
109 print 'e.g. like this: run = RawData(data_filename, calib_filename, return_dict = True)'
110 print "after a while, the default value, will turn to True .. so you don't have to give the option anymore"
111 print 'and some time later, the option will not be supported anymore'
112 return_dict = False
113 self.return_dict = return_dict
114 self.use_CalFactFits = use_CalFactFits
115
116 self.do_calibration = do_calibration
117
118 self.data_file_name = data_file_name
119 self.calib_file_name = calib_file_name
120 self.baseline_file_name = baseline_file_name
121
122 self.user_action_calib = user_action_calib
123
124 # baseline correction: True / False
125 if len(baseline_file_name) == 0:
126 self.correct_baseline = False
127 else:
128 self.correct_baseline = True
129
130 # access data file
131 if use_CalFactFits:
132 try:
133 data_file = CalFactFits(data_file_name, calib_file_name)
134 except IOError:
135 print 'problem accessing data file: ', data_file_name
136 raise # stop ! no data
137
138 self.data_file = data_file
139 self.data = np.empty( data_file.npix * data_file.nroi, np.float64)
140 data_file.SetNpcaldataPtr(self.data)
141 self.data = self.data.reshape( data_file.npix, data_file.nroi )
142 self.acal_data = self.data
143
144 self.nroi = data_file.nroi
145 self.npix = data_file.npix
146 self.nevents = data_file.nevents
147
148 # Data per event
149 self.event_id = None
150 self.trigger_type = None
151 self.start_cells = None
152 self.board_times = None
153
154 else:
155 try:
156 data_file = FactFits(self.data_file_name)
157 except IOError:
158 print 'problem accessing data file: ', data_file_name
159 raise # stop ! no data
160
161 self.data_file = data_file
162
163 # get basic information about the data file
164 #: region of interest (number of DRS slices read)
165 self.nroi = data_file.GetUInt('NROI')
166 #: number of pixels (should be 1440)
167 self.npix = data_file.GetUInt('NPIX')
168 #: number of events in the data run
169 self.nevents = data_file.GetNumRows()
170
171 # allocate the data memories
172 self.event_id = c_ulong()
173 self.trigger_type = c_ushort()
174 #: 1D array with raw data
175 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
176 #: slice where drs readout started
177 self.start_cells = np.zeros( self.npix, np.int16 )
178 #: time when the FAD was triggered, in some strange units...
179 self.board_times = np.zeros( 40, np.int32 )
180
181 # set the pointers to the data++
182 data_file.SetPtrAddress('EventNum', self.event_id)
183 data_file.SetPtrAddress('TriggerType', self.trigger_type)
184 data_file.SetPtrAddress('StartCellData', self.start_cells)
185 data_file.SetPtrAddress('Data', self.data)
186 data_file.SetPtrAddress('BoardTime', self.board_times)
187
188 # open the calibration file
189 try:
190 calib_file = FactFits(self.calib_file_name)
191 except IOError:
192 print 'problem accessing calibration file: ', calib_file_name
193 raise
194 #: drs calibration file
195 self.calib_file = calib_file
196
197 baseline_mean = calib_file.GetN('BaselineMean')
198 gain_mean = calib_file.GetN('GainMean')
199 trigger_offset_mean = calib_file.GetN('TriggerOffsetMean')
200
201 self.Nblm = baseline_mean / self.npix
202 self.Ngm = gain_mean / self.npix
203 self.Ntom = trigger_offset_mean / self.npix
204
205 self.blm = np.zeros(baseline_mean, np.float32).reshape(self.npix , self.Nblm)
206 self.gm = np.zeros(gain_mean, np.float32).reshape(self.npix , self.Ngm)
207 self.tom = np.zeros(trigger_offset_mean, np.float32).reshape(self.npix , self.Ntom)
208
209 calib_file.SetPtrAddress('BaselineMean', self.blm)
210 calib_file.SetPtrAddress('GainMean', self.gm)
211 calib_file.SetPtrAddress('TriggerOffsetMean', self.tom)
212 calib_file.GetRow(0)
213
214 # make calibration constants double, so we never need to roll
215 self.blm = np.hstack((self.blm, self.blm))
216 self.gm = np.hstack((self.gm, self.gm))
217 self.tom = np.hstack((self.tom, self.tom))
218
219 self.v_bsl = np.zeros(self.npix) # array of baseline values (all ZERO)
220
221 def __iter__(self):
222 """ iterator """
223 return self
224
225 def next(self):
226 """ used by __iter__ """
227 if self.use_CalFactFits:
228 if self.data_file.GetCalEvent() == False:
229 raise StopIteration
230 else:
231 self.event_id = self.data_file.event_id
232 self.trigger_type = self.data_file.event_triggertype
233 self.start_cells = self.data_file.event_offset
234 self.board_times = self.data_file.event_boardtimes
235 #self.acal_data = self.data.copy().reshape(self.data_file.npix, self.data_file.nroi)
236 else:
237 if self.data_file.GetNextRow() == False:
238 raise StopIteration
239 else:
240 if self.do_calibration == True:
241 self.calibrate_drs_amplitude()
242
243 #print 'nevents = ', self.nevents, 'event_id = ', self.event_id.value
244 if self.return_dict:
245 return self.__dict__
246 else:
247 return self.acal_data, self.start_cells, self.trigger_type.value
248
249 def next_event(self):
250 """ load the next event from disk and calibrate it
251 """
252 if self.use_CalFactFits:
253 self.data_file.GetCalEvent()
254 else:
255 self.data_file.GetNextRow()
256 self.calibrate_drs_amplitude()
257
258 def calibrate_drs_amplitude(self):
259 """ perform the drs amplitude calibration of the event data
260
261 """
262 # shortcuts
263 blm = self.blm
264 gm = self.gm
265 tom = self.tom
266
267 to_mV = 2000./4096.
268 #: 2D array with amplitude calibrated dat in mV
269 acal_data = self.data * to_mV # convert ADC counts to mV
270
271
272 for pixel in range( self.npix ):
273 #shortcuts
274 sc = self.start_cells[pixel]
275 roi = self.nroi
276 # rotate the pixel baseline mean to the Data startCell
277 acal_data[pixel,:] -= blm[pixel,sc:sc+roi]
278 # the 'trigger offset mean' does not need to be rolled
279 # on the contrary, it seems there is an offset in the DRS data,
280 # which is related to its distance to the startCell, not to its
281 # distance to the beginning of the physical pipeline in the DRS chip
282 acal_data[pixel,:] -= tom[pixel,0:roi]
283 # rotate the pixel gain mean to the Data startCell
284 acal_data[pixel,:] /= gm[pixel,sc:sc+roi]
285
286
287 self.acal_data = acal_data * 1907.35
288
289 self.user_action_calib( self.acal_data,
290 np.reshape(self.data, (self.npix, self.nroi) ), blm, tom, gm, self.start_cells, self.nroi)
291
292
293 def baseline_read_values(self, file, bsl_hist='bsl_sum/hplt_mean'):
294 """
295
296 open ROOT file with baseline histogram and read baseline values
297 file name of the root file
298 bsl_hist path to the histogram containing the basline values
299
300 """
301
302 try:
303 f = TFile(file)
304 except:
305 print 'Baseline data file could not be read: ', file
306 return
307
308 h = f.Get(bsl_hist)
309
310 for i in range(self.npix):
311 self.v_bsl[i] = h.GetBinContent(i+1)
312
313 f.Close()
314
315 def baseline_correct(self):
316 """ subtract baseline from the data
317
318 """
319
320 for pixel in range(self.npix):
321 self.acal_data[pixel,:] -= self.v_bsl[pixel]
322
323 def info(self):
324 """ print run information
325
326 """
327
328 print 'data file: ', data_file_name
329 print 'calib file: ', calib_file_name
330 print 'calibration file'
331 print 'N baseline_mean: ', self.Nblm
332 print 'N gain mean: ', self.Ngm
333 print 'N TriggeroffsetMean: ', self.Ntom
334
335# -----------------------------------------------------------------------------
336class RawDataFake( object ):
337 """ raw data FAKE access similar to real RawData access
338 """
339
340
341 def __init__(self, data_file_name, calib_file_name,
342 user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
343 baseline_file_name=''):
344 self.__module__='pyfact'
345
346 self.nroi = 300
347 self.npix = 9
348 self.nevents = 1000
349
350 self.simulator = None
351
352 self.time = np.ones(1024) * 0.5
353
354
355 self.event_id = c_ulong(0)
356 self.trigger_type = c_ushort(4)
357 self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
358 self.start_cells = np.zeros( self.npix, np.int16 )
359 self.board_times = np.zeros( 40, np.int32 )
360 def __iter__(self):
361 """ iterator """
362 return self
363
364 def next(self):
365 """ used by __iter__ """
366 self.event_id = c_ulong(self.event_id.value + 1)
367 self.board_times = self.board_times + 42
368
369 if self.event_id.value >= self.nevents:
370 raise StopIteration
371 else:
372 self._make_event_data()
373
374 return self.__dict__
375
376 def _make_event_data(self):
377 sample_times = self.time.cumsum() - time[0]
378
379 # random start cell
380 self.start_cells = np.ones( self.npix, np.int16 ) * np.random.randint(0,1024)
381
382 starttime = self.start_cells[0]
383
384 signal = self._std_sinus_simu(sample_times, starttime)
385
386 data = np.vstack( (signal,signal) )
387 for i in range(8):
388 data = np.vstack( (data,signal) )
389
390 self.data = data
391
392 def _std_sinus_simu(self, times, starttime):
393 period = 10 # in ns
394
395 # give a jitter on starttime
396 starttime = np.random.normal(startime, 0.05)
397
398 phase = 0.0
399 signal = 10 * np.sin(times * 2*np.pi/period + starttime + phase)
400
401 # add some noise
402 noise = np.random.normal(0.0, 0.5, signal.shape)
403 signal += noise
404 return signal
405
406 def info(self):
407 """ print run information
408
409 """
410
411 print 'data file: ', data_file_name
412 print 'calib file: ', calib_file_name
413 print 'calibration file'
414 print 'N baseline_mean: ', self.Nblm
415 print 'N gain mean: ', self.Ngm
416 print 'N TriggeroffsetMean: ', self.Ntom
417
418# -----------------------------------------------------------------------------
419
420class SlowData( FactFits ):
421 """ -Fact SlowData File-
422 A Python wrapper for the fits-class implemented in pyfits.h
423 provides easy access to the fits file meta data.
424 * dictionary of file metadata - self.meta
425 * dict of table metadata - self.columns
426 * variable table column access, thus possibly increased speed while looping
427 """
428 def __init__(self, path):
429 """ creates meta and columns dictionaries
430 """
431 self.path = path
432 try:
433 FactFits.__init__(self,path)
434 except IOError:
435 print 'problem accessing data file: ', data_file_name
436 raise # stop ! no data
437
438 self.meta = self._make_meta_dict()
439 self.columns = self._make_columns_dict()
440
441 self.treat_meta_dict()
442
443
444 # list of columns, which are already registered
445 # see method register()
446 self._registered_cols = []
447 # dict of column data, this is used, in order to be able to remove
448 # the ctypes of
449 self._table_cols = {}
450
451 # I need to count the rows, since the normal loop mechanism seems not to work.
452 self._current_row = 0
453
454 self.stacked_cols = {}
455
456 def _make_meta_dict(self):
457 """ This method retrieves meta information about the fits file and
458 stores this information in a dict
459 return: dict
460 key: string - all capital letters
461 value: tuple( numerical value, string comment)
462 """
463 # intermediate variables for file metadata dict generation
464 keys=self.GetPy_KeyKeys()
465 values=self.GetPy_KeyValues()
466 comments=self.GetPy_KeyComments()
467 types=self.GetPy_KeyTypes()
468
469 if len(keys) != len(values):
470 raise TypeError('len(keys)',len(keys),' != len(values)', len(values))
471 if len(keys) != len(types):
472 raise TypeError('len(keys)',len(keys),' != len(types)', len(types))
473 if len(keys) != len(comments):
474 raise TypeError('len(keys)',len(keys),' != len(comments)', len(comments))
475
476 meta_dict = {}
477 for i in range(len(keys)):
478 type = types[i]
479 if type == 'I':
480 value = int(values[i])
481 elif type == 'F':
482 value = float(values[i])
483 elif type == 'B':
484 if values[i] == 'T':
485 value = True
486 elif values[i] == 'F':
487 value = False
488 else:
489 raise TypeError("meta-type is 'B', but meta-value is neither 'T' nor 'F'. meta-value:",values[i])
490 elif type == 'T':
491 value = values[i]
492 else:
493 raise TypeError("unknown meta-type: known meta types are: I,F,B and T. meta-type:",type)
494 meta_dict[keys[i]]=(value, comments[i])
495 return meta_dict
496
497
498 def _make_columns_dict(self):
499 """ This method retrieves information about the columns
500 stored inside the fits files internal binary table.
501 returns: dict
502 key: string column name -- all capital letters
503 values: tuple(
504 number of elements in table field - integer
505 size of element in bytes -- this is not really interesting for any user
506 might be ommited in future versions
507 type - a single character code -- should be translated into
508 a comrehensible word
509 unit - string like 'mV' or 'ADC count'
510 """
511 # intermediate variables for file table-metadata dict generation
512 keys=self.GetPy_ColumnKeys()
513 #offsets=self.GetPy_ColumnOffsets() #not needed on python level...
514 nums=self.GetPy_ColumnNums()
515 sizes=self.GetPy_ColumnSizes()
516 types=self.GetPy_ColumnTypes()
517 units=self.GetPy_ColumnUnits()
518
519 # zip the values
520 values = zip(nums,sizes,types,units)
521 # create the columns dictionary
522 columns = dict(zip(keys ,values))
523 return columns
524
525 def stack(self, on=True):
526 self.next()
527 for col in self._registered_cols:
528 if isinstance( self.dict[col], type(np.array('')) ):
529 self.stacked_cols[col] = self.dict[col]
530 else:
531# elif isinstance(self.dict[col], ctypes._SimpleCData):
532 self.stacked_cols[col] = np.array(self.dict[col])
533# else:
534# raise TypeError("I don't know how to stack "+col+". It is of type: "+str(type(self.dict[col])))
535
536 def register(self, input_str):
537 columns = self.columns
538 if input_str.lower() == 'all':
539 for col in columns:
540 self._register(col)
541 else:
542 #check if colname is in columns:
543 if input_str not in columns:
544 error_msg = 'colname:'+ input_str +' is not a column in the binary table.\n'
545 error_msg+= 'possible colnames are\n'
546 for key in columns:
547 error_msg += key+'\n'
548 raise KeyError(error_msg)
549 else:
550 self._register(input_str)
551
552 # 'private' method, do not use
553 def _register( self, colname):
554 columns = self.columns
555 local = None
556
557 number_of_elements = int(columns[colname][0])
558 size_of_elements_in_bytes = int(columns[colname][1])
559 ctypecode_of_elements = columns[colname][2]
560 physical_unit_of_elements = columns[colname][3]
561
562 # snippet from the C++ source code, or header file to be precise:
563 #case 'L': gLog << "bool(8)"; break;
564 #case 'B': gLog << "byte(8)"; break;
565 #case 'I': gLog << "short(16)"; break;
566 #case 'J': gLog << "int(32)"; break;
567 #case 'K': gLog << "int(64)"; break;
568 #case 'E': gLog << "float(32)"; break;
569 #case 'D': gLog << "double(64)"; break;
570
571
572
573 # the fields inside the columns can either contain single numbers,
574 # or whole arrays of numbers as well.
575 # we treat single elements differently...
576 if number_of_elements == 1:
577 # allocate some memory for a single number according to its type
578 if ctypecode_of_elements == 'J': # J is for a 4byte int, i.e. an unsigned long
579 local = ctypes.c_ulong()
580 un_c_type = long
581 elif ctypecode_of_elements == 'I': # I is for a 2byte int, i.e. an unsinged int
582 local = ctypes.c_ushort()
583 un_c_type = int
584 elif ctypecode_of_elements == 'B': # B is for a byte
585 local = ctypes.c_ubyte()
586 un_c_type = int
587 elif ctypecode_of_elements == 'D':
588 local = ctypes.c_double()
589 un_c_type = float
590 elif ctypecode_of_elements == 'E':
591 local = ctypes.c_float()
592 un_c_type = float
593 elif ctypecode_of_elements == 'A':
594 local = ctypes.c_uchar()
595 un_c_type = chr
596 elif ctypecode_of_elements == 'K':
597 local = ctypes.c_ulonglong()
598 un_c_type = long
599 else:
600 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
601 else:
602 if ctypecode_of_elements == 'B': # B is for a byte
603 nptype = np.int8
604 elif ctypecode_of_elements == 'A': # A is for a char .. but I don't know how to handle it
605 nptype = np.int8
606 elif ctypecode_of_elements == 'I': # I is for a 2byte int
607 nptype = np.int16
608 elif ctypecode_of_elements == 'J': # J is for a 4byte int
609 nptype = np.int32
610 elif ctypecode_of_elements == 'K': # B is for a byte
611 nptype = np.int64
612 elif ctypecode_of_elements == 'E': # B is for a byte
613 nptype = np.float32
614 elif ctypecode_of_elements == 'D': # B is for a byte
615 nptype = np.float64
616 else:
617 raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
618 local = np.zeros( number_of_elements, nptype)
619
620 # Set the Pointer Address
621 self.SetPtrAddress(colname, local)
622 self._table_cols[colname] = local
623 if number_of_elements > 1:
624 self.__dict__[colname] = local
625 self.dict[colname] = local
626 else:
627 # remove any traces of ctypes:
628 self.__dict__[colname] = local.value
629 self.dict[colname] = local.value
630 self._registered_cols.append(colname)
631
632
633 def treat_meta_dict(self):
634 """make 'interesting' meta information available like normal members.
635 non interesting are:
636 TFORM, TUNIT, and TTYPE
637 since these are available via the columns dict.
638 """
639
640 self.number_of_rows = self.meta['NAXIS2'][0]
641 self.number_of_columns = self.meta['TFIELDS'][0]
642
643 # there are some information in the meta dict, which are alsways there:
644 # there are regarded as not interesting:
645 uninteresting_meta = {}
646 uninteresting_meta['arraylike'] = {}
647 uninteresting = ['NAXIS', 'NAXIS1', 'NAXIS2',
648 'TFIELDS',
649 'XTENSION','EXTNAME','EXTREL',
650 'BITPIX', 'PCOUNT', 'GCOUNT',
651 'ORIGIN',
652 'PACKAGE', 'COMPILED', 'CREATOR',
653 'TELESCOP','TIMESYS','TIMEUNIT','VERSION']
654 for key in uninteresting:
655 if key in self.meta:
656 uninteresting_meta[key]=self.meta[key]
657 del self.meta[key]
658
659 # the table meta data contains
660
661
662 # shortcut to access the meta dict. But this needs to
663 # be cleaned up quickly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
664 meta = self.meta
665
666 # loop over keys:
667 # * try to find array-like keys
668 arraylike = {}
669 singlelike = []
670 for key in self.meta:
671 stripped = key.rstrip('1234567890')
672 if stripped == key:
673 singlelike.append(key)
674 else:
675 if stripped not in arraylike:
676 arraylike[stripped] = 0
677 else:
678 arraylike[stripped] += 1
679 newmeta = {}
680 for key in singlelike:
681 newmeta[key.lower()] = meta[key]
682 for key in arraylike:
683 uninteresting_meta['arraylike'][key.lower()] = []
684 for i in range(arraylike[key]+1):
685 if key+str(i) in meta:
686 uninteresting_meta['arraylike'][key.lower()].append(meta[key+str(i)])
687 self.ui_meta = uninteresting_meta
688 # make newmeta self
689 for key in newmeta:
690 self.__dict__[key]=newmeta[key]
691
692 dict = self.__dict__.copy()
693 del dict['meta']
694 del dict['ui_meta']
695 self.dict = dict
696
697 def __iter__(self):
698 """ iterator """
699 return self
700
701 def next(self):
702 """ used by __iter__ """
703 # Here one might check, if looping makes any sense, and if not
704 # one could stop looping or so...
705 # like this:
706 #
707 # if len(self._registered_cols) == 0:
708 # print 'warning: looping without any registered columns'
709 if self._current_row < self.number_of_rows:
710 if self.GetNextRow() == False:
711 raise StopIteration
712 for col in self._registered_cols:
713 if isinstance(self._table_cols[col], ctypes._SimpleCData):
714 self.__dict__[col] = self._table_cols[col].value
715 self.dict[col] = self._table_cols[col].value
716
717 for col in self.stacked_cols:
718 if isinstance(self.dict[col], type(np.array(''))):
719 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],self.dict[col]) )
720 else:
721 self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],np.array(self.dict[col])) )
722 self._current_row += 1
723 else:
724 raise StopIteration
725 return self
726
727 def show(self):
728 pprint.pprint(self.dict)
729
730
731
732
733class fnames( object ):
734 """ organize file names of a FACT data run
735
736 """
737
738 def __init__(self, specifier = ['012', '023', '2011', '11', '24'],
739 rpath = '/scratch_nfs/res/bsl/',
740 zipped = True):
741 """
742 specifier : list of strings defined as:
743 [ 'DRS calibration file', 'Data file', 'YYYY', 'MM', 'DD']
744
745 rpath : directory path for the results; YYYYMMDD will be appended to rpath
746 zipped : use zipped (True) or unzipped (Data)
747
748 """
749
750 self.specifier = specifier
751 self.rpath = rpath
752 self.zipped = zipped
753
754 self.make( self.specifier, self.rpath, self.zipped )
755
756
757 def make( self, specifier, rpath, zipped ):
758 """ create (make) the filenames
759
760 names : dictionary of filenames, tags { 'data', 'drscal', 'results' }
761 data : name of the data file
762 drscal : name of the drs calibration file
763 results : radikal of file name(s) for results (to be completed by suffixes)
764 """
765
766 self.specifier = specifier
767
768 if zipped:
769 dpath = '/data00/fact-construction/raw/'
770 ext = '.fits.gz'
771 else:
772 dpath = '/data03/fact-construction/raw/'
773 ext = '.fits'
774
775 year = specifier[2]
776 month = specifier[3]
777 day = specifier[4]
778
779 yyyymmdd = year + month + day
780 dfile = specifier[1]
781 cfile = specifier[0]
782
783 rpath = rpath + yyyymmdd + '/'
784 self.rpath = rpath
785 self.names = {}
786
787 tmp = dpath + year + '/' + month + '/' + day + '/' + yyyymmdd + '_'
788 self.names['data'] = tmp + dfile + ext
789 self.names['drscal'] = tmp + cfile + '.drs' + ext
790 self.names['results'] = rpath + yyyymmdd + '_' + dfile + '_' + cfile
791
792 self.data = self.names['data']
793 self.drscal = self.names['drscal']
794 self.results = self.names['results']
795
796 def info( self ):
797 """ print complete filenames
798
799 """
800
801 print 'file names:'
802 print 'data: ', self.names['data']
803 print 'drs-cal: ', self.names['drscal']
804 print 'results: ', self.names['results']
805
806# end of class definition: fnames( object )
807
808def _test_SlowData( filename ):
809 print '-'*70
810 print "opened :", filename, " as 'file'"
811 print
812 print '-'*70
813 print 'type file.show() to look at its contents'
814 print "type file.register( columnname ) or file.register('all') in order to register columns"
815 print
816 print " due column-registration you declare, that you would like to retrieve the contents of one of the columns"
817 print " after column-registration, the 'file' has new member variables, they are named like the columns"
818 print " PLEASE NOTE: immediatly after registration, the members exist, but they are empty."
819 print " the values are assigned only, when you call file.next() or when you loop over the 'file'"
820 print
821 print "in order to loop over it, just go like this:"
822 print "for row in file:"
823 print " print row.columnname_one, row.columnname_two"
824 print
825 print ""
826 print '-'*70
827
828
829
830def _test_iter( nevents ):
831 """ test for function __iter__ """
832
833 data_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_117.fits.gz'
834 calib_file_name = '/data00/fact-construction/raw/2011/11/24/20111124_114.drs.fits.gz'
835# data_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_028.fits.gz'
836# calib_file_name = '/home/luster/win7/FACT/data/raw/20120114/20120114_022.drs.fits.gz'
837 run = RawData( data_file_name, calib_file_name , return_dict=True)
838
839 for event in run:
840 print 'ev ', event['event_id'], 'data[0,0] = ', event['acal_data'][0,0], 'start_cell[0] = ', event['start_cells'][0], 'trigger type = ', event['trigger_type']
841 if run.event_id == nevents:
842 break
843
844if __name__ == '__main__':
845 """ tests """
846 import sys
847 if len(sys.argv) == 1:
848 print 'showing test of iterator of RawData class'
849 print 'in order to test the SlowData classe please use:', sys.argv[0], 'fits-file-name'
850 _test_iter(10)
851
852
853 else:
854 print 'showing test of SlowData class'
855 print 'in case you wanted to test the RawData class, please give no commandline arguments'
856 file = SlowData(sys.argv[1])
857 _test_SlowData(sys.argv[1])
Note: See TracBrowser for help on using the repository browser.