1 | #!/usr/bin/python -tt
|
---|
2 | #
|
---|
3 | # Werner Lustermann, Dominik Neise
|
---|
4 | # ETH Zurich, TU Dortmund
|
---|
5 | #
|
---|
6 | import sys
|
---|
7 | from ctypes import *
|
---|
8 | import numpy as np
|
---|
9 | import pprint # for SlowData
|
---|
10 | from scipy import signal
|
---|
11 |
|
---|
12 | # get the ROOT stuff + my shared libs
|
---|
13 | import ROOT
|
---|
14 | # factfits_h.so is made from factfits.h and is used to access the data
|
---|
15 | # make sure the location of factfits_h.so is in LD_LIBRARY_PATH.
|
---|
16 | # having it in PYTHONPATH is *not* sufficient
|
---|
17 | hostname = ROOT.gSystem.HostName()
|
---|
18 | libz_path_dict = {
|
---|
19 | # hostname : /path/to/libz.so
|
---|
20 | 'isdc' : "/usr/lib64/libz.so",
|
---|
21 | 'neiseLenovo' : "/usr/lib/libz.so",
|
---|
22 | 'factcontrol' : "/usr/lib/libz.so",
|
---|
23 | "max-K50AB" : "/usr/lib/x86_64-linux-gnu/libz.so",
|
---|
24 | "watz" : "/usr/lib/x86_64-linux-gnu/libz.so",
|
---|
25 | "fact03" : "/usr/lib/x86_64-linux-gnu/libz.so",
|
---|
26 | "grolsch" : "/usr/lib/i386-linux-gnu/libz.so",
|
---|
27 | }
|
---|
28 | libz_loaded = False
|
---|
29 | for my_hostname in libz_path_dict:
|
---|
30 | if my_hostname in hostname:
|
---|
31 | ROOT.gSystem.Load(libz_path_dict[my_hostname])
|
---|
32 | libz_loaded = True
|
---|
33 | if not libz_loaded:
|
---|
34 | print """Warning - Warning - Warning - Warning - Warning - Warning - Warning
|
---|
35 | I most probably need to load libz.so but I don't know where it is.
|
---|
36 |
|
---|
37 | Please edit pyfact.py around line 16-24 and insert your hostname and your
|
---|
38 | path to your libz.so
|
---|
39 | Sorry for the inconvenience.
|
---|
40 | """
|
---|
41 | sys.exit(-1)
|
---|
42 |
|
---|
43 |
|
---|
44 | #NOTE: This part has to be adapted for gcc > 4.7, where -std=c++11 can/should(?) be used.
|
---|
45 | root_make_string = ROOT.gSystem.GetMakeSharedLib()
|
---|
46 | if not "-std=c++0x" in root_make_string:
|
---|
47 | root_make_string = root_make_string.replace('$Opt', '$Opt -std=c++0x -D HAVE_ZLIB')
|
---|
48 | ROOT.gSystem.SetMakeSharedLib(root_make_string)
|
---|
49 |
|
---|
50 | ROOT.gROOT.ProcessLine(".L extern_Mars_mcore/izstream.h+O")
|
---|
51 | ROOT.gROOT.ProcessLine(".L extern_Mars_mcore/fits.h+O")
|
---|
52 | ROOT.gROOT.ProcessLine(".L extern_Mars_mcore/zfits.h+O")
|
---|
53 | ROOT.gROOT.ProcessLine(".L extern_Mars_mcore/factfits.h+O")
|
---|
54 | ROOT.gROOT.ProcessLine(".L calfactfits.h+O")
|
---|
55 |
|
---|
56 | ROOT.gInterpreter.GenerateDictionary("map<string,fits::Entry>","map;string;extern_Mars_mcore/fits.h")
|
---|
57 | ROOT.gInterpreter.GenerateDictionary("pair<string,fits::Entry>","map;string;extern_Mars_mcore/fits.h")
|
---|
58 | ROOT.gInterpreter.GenerateDictionary("map<string,fits::Table::Column>","map;string;extern_Mars_mcore/fits.h")
|
---|
59 | ROOT.gInterpreter.GenerateDictionary("pair<string,fits::Table::Column>","map;string;extern_Mars_mcore/fits.h")
|
---|
60 |
|
---|
61 | #ROOT.gSystem.Load('my_string_h.so')
|
---|
62 | ROOT.gSystem.Load('extern_Mars_mcore/fits_h.so')
|
---|
63 | ROOT.gSystem.Load('extern_Mars_mcore/izstream_h.so')
|
---|
64 | ROOT.gSystem.Load('extern_Mars_mcore/zfits_h.so')
|
---|
65 | ROOT.gSystem.Load('extern_Mars_mcore/factfits_h.so')
|
---|
66 | ROOT.gSystem.Load('calfactfits_h.so')
|
---|
67 | from ROOT import *
|
---|
68 |
|
---|
69 | class RawDataFeeder( object ):
|
---|
70 | """ Wrapper class for RawData class
|
---|
71 | capable of iterating over multiple RawData Files
|
---|
72 | """
|
---|
73 |
|
---|
74 | def __init__(self, filelist):
|
---|
75 | """ *filelist* list of files to iterate over
|
---|
76 | the list should contain tuples, or sublists of two filenames
|
---|
77 | the first should be a data file (\*.fits.gz)
|
---|
78 | the second should be an amplitude calibration file(\*.drs.fits.gz)
|
---|
79 | """
|
---|
80 |
|
---|
81 | self.__module__ = 'pyfact'
|
---|
82 |
|
---|
83 | # sanity check for input
|
---|
84 | if type(filelist) != type(list()):
|
---|
85 | raise TypeError('filelist should be a list')
|
---|
86 | for entry in filelist:
|
---|
87 | if len(entry) != 2:
|
---|
88 | raise TypeError('the entries of filelist should have length == 2')
|
---|
89 | for path in entry:
|
---|
90 | if type(path) != type(str()):
|
---|
91 | raise TypeError('the entries of filelist should be path, i.e. of type str()')
|
---|
92 | #todo check if 'path' is a valid path
|
---|
93 | # else: throw an Exception, or Warning?
|
---|
94 |
|
---|
95 | self.filelist = filelist
|
---|
96 | self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
|
---|
97 | del filelist[0]
|
---|
98 |
|
---|
99 | def __iter__(self):
|
---|
100 | return self
|
---|
101 |
|
---|
102 | def next():
|
---|
103 | """ Method being called by the iterator.
|
---|
104 | Since the RawData Objects are simply looped over, the event_id from the
|
---|
105 | RawData object will not be unique.
|
---|
106 | Each RawData obejct will start with event_id = 1 as usual.
|
---|
107 | """
|
---|
108 | try:
|
---|
109 | return self._current_RawData.next()
|
---|
110 | except StopIteration:
|
---|
111 | # current_RawData was completely processed
|
---|
112 | # delete it (I hope this calls the destructor of the fits file and/or closes it)
|
---|
113 | del self._current_RawData
|
---|
114 | # and remake it, if possible
|
---|
115 | if len(self.filelist) > 0:
|
---|
116 | self._current_RawData = RawData(filelist[0][0], filelist[0][1], return_dict=True)
|
---|
117 | del filelist[0]
|
---|
118 | else:
|
---|
119 | raise
|
---|
120 |
|
---|
121 |
|
---|
122 |
|
---|
123 | class RawData( object ):
|
---|
124 | """ raw data access and calibration
|
---|
125 |
|
---|
126 | class is **iterable**
|
---|
127 |
|
---|
128 | - open raw data file and drs calibration file
|
---|
129 | - performs amplitude calibration
|
---|
130 | - performs baseline substraction if wanted
|
---|
131 | - provides all data in an array:
|
---|
132 | row = number of pixel
|
---|
133 | col = length of region of interest
|
---|
134 |
|
---|
135 | """
|
---|
136 |
|
---|
137 |
|
---|
138 | def __init__(self, data_file_name, calib_file_name,
|
---|
139 | baseline_file_name='',
|
---|
140 | return_dict = True,
|
---|
141 | use_CalFactFits = True,
|
---|
142 | do_calibration = True,
|
---|
143 | user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None):
|
---|
144 | """ -constructor-
|
---|
145 |
|
---|
146 | - open data file and calibration data file
|
---|
147 | - get basic information about the data in data_file_name
|
---|
148 | - allocate buffers for data access
|
---|
149 |
|
---|
150 | *data_file_name* : fits or fits.gz file of the data including the path
|
---|
151 |
|
---|
152 | *calib_file_name* : fits or fits.gz file containing DRS calibration data
|
---|
153 |
|
---|
154 | *baseline_file_name* : npy file containing the baseline values
|
---|
155 |
|
---|
156 | *return_dict* : this option will be removed in future releases.
|
---|
157 | formerly the next() method returned only a subset of (important) event information,
|
---|
158 | and it was not transparent how to retrieve the other (less important) information.
|
---|
159 | Nowadays next() returns self.__dict__ which contains everything we were able to find in the fits file.
|
---|
160 |
|
---|
161 | *use_CalFactFits* : formerly the DRS amplitude calibration was
|
---|
162 | implemented in python. But for performance reasons this was now moved into
|
---|
163 | a C++ class called CalFactFits. For test purposes, this option can be set to
|
---|
164 | False, but this is not really maintained anymore. If DRS the DRS calibration algorithm is
|
---|
165 | beeing updated in C++ it may not be updated in the python implementation.
|
---|
166 |
|
---|
167 | *do_calibration* : In case *use_CalFactFits* is False, one may choose
|
---|
168 | not to calibrate the data at all, thus safe quite some time.
|
---|
169 | This is imho only needed in case one is interesting in learning something about the
|
---|
170 | calibration algorithm itself.
|
---|
171 |
|
---|
172 | *user_action_calib* : callback function, intended for tests of the DRS calibration algorithm.
|
---|
173 | but since this is not done in the Python regime anymore, this function is never called.
|
---|
174 | (depending on *use_CalFactFits* of course)
|
---|
175 | """
|
---|
176 | self.__module__='pyfact'
|
---|
177 | # manual implementation of default value, but I need to find out
|
---|
178 | # if the user of this class is aware of the new option
|
---|
179 | if return_dict == False:
|
---|
180 | print 'DEPRECATION WARNING:'
|
---|
181 | print 'you are using RawData in a way, which is nor supported anymore.'
|
---|
182 | print ' Please set: return_dict = True, in the __init__ call'
|
---|
183 | self.return_dict = return_dict
|
---|
184 | self.use_CalFactFits = use_CalFactFits
|
---|
185 |
|
---|
186 | self.do_calibration = do_calibration
|
---|
187 |
|
---|
188 | self.data_file_name = data_file_name
|
---|
189 | self.calib_file_name = calib_file_name
|
---|
190 | self.baseline_file_name = baseline_file_name
|
---|
191 |
|
---|
192 | self.user_action_calib = user_action_calib
|
---|
193 |
|
---|
194 | # baseline correction: True / False
|
---|
195 | if len(baseline_file_name) == 0:
|
---|
196 | self.correct_baseline = False
|
---|
197 | else:
|
---|
198 | self.correct_baseline = True
|
---|
199 |
|
---|
200 |
|
---|
201 | # access data file
|
---|
202 | if use_CalFactFits:
|
---|
203 | try:
|
---|
204 | data_file = CalFactFits(data_file_name, calib_file_name)
|
---|
205 | except IOError:
|
---|
206 | print 'problem accessing data file: ', data_file_name
|
---|
207 | raise # stop ! no data
|
---|
208 |
|
---|
209 | #: either CalFactFits object or FactFits object, depending on *use_CalFactFits*
|
---|
210 | self.data_file = data_file
|
---|
211 | #: 1440x300 nparray containing the event data. pixel sorted according to CHID
|
---|
212 | self.data = np.empty( data_file.npix * data_file.nroi, np.float64)
|
---|
213 | data_file.SetNpcaldataPtr(self.data)
|
---|
214 | self.data = self.data.reshape( data_file.npix, data_file.nroi )
|
---|
215 | #: copy of data. here for historical reasons
|
---|
216 | self.acal_data = self.data
|
---|
217 | #: region of interest. (number of DRS slices read).
|
---|
218 | # for FACT data mostly 300. for special runs sometimes 1024.
|
---|
219 | self.nroi = data_file.nroi
|
---|
220 | #: number of Pixel in FACT. should be 1440
|
---|
221 | self.npix = data_file.npix
|
---|
222 | #: the total number of events in the data_file
|
---|
223 | self.nevents = data_file.nevents
|
---|
224 |
|
---|
225 | # Data per event
|
---|
226 | #: starting at 1
|
---|
227 | self.event_id = None
|
---|
228 |
|
---|
229 | #: data=4 ; the rest I don't know by heart .. should be documented here :-)
|
---|
230 | self.trigger_type = None
|
---|
231 | #self.start_cells = None
|
---|
232 | #self.board_times = None
|
---|
233 | #: slice where drs readout started for all DRS chips (160) .. but enlarged to the size of 1440 pixel. thus there are always 9 equal numbers inside.
|
---|
234 | self.start_cells = np.zeros( self.npix, np.int16 )
|
---|
235 | #: each FAD has an onboard clock running from startup time. Currently I don't know the time unit. However this is an array of 40 times, since we have 40 boards.
|
---|
236 | self.board_times = np.zeros( 40, np.int32 )
|
---|
237 | self._unixtime_tuple = np.zeros( 2, np.int32 )
|
---|
238 | self.unixtime = None
|
---|
239 |
|
---|
240 | # data_file is a CalFactFits object
|
---|
241 | # data_file.datafile is one of the two FactFits objects hold by a CalFactFits.
|
---|
242 | # sorry for the strange naming ..
|
---|
243 | data_file.datafile.SetPtrAddress('StartCellData', self.start_cells)
|
---|
244 | data_file.datafile.SetPtrAddress('BoardTime', self.board_times)
|
---|
245 | data_file.datafile.SetPtrAddress('UnixTimeUTC', self._unixtime_tuple)
|
---|
246 |
|
---|
247 |
|
---|
248 | else:
|
---|
249 | try:
|
---|
250 | data_file = factfits(self.data_file_name)
|
---|
251 | except IOError:
|
---|
252 | print 'problem accessing data file: ', data_file_name
|
---|
253 | raise # stop ! no data
|
---|
254 |
|
---|
255 | self.data_file = data_file
|
---|
256 |
|
---|
257 | # get basic information about the data file
|
---|
258 | self.nroi = data_file.GetUInt('NROI')
|
---|
259 | self.npix = data_file.GetUInt('NPIX')
|
---|
260 | self.nevents = data_file.GetNumRows()
|
---|
261 |
|
---|
262 | # allocate the data memories
|
---|
263 | self.event_id = c_ulong()
|
---|
264 | self.trigger_type = c_ushort()
|
---|
265 | self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
|
---|
266 | self.start_cells = np.zeros( self.npix, np.int16 )
|
---|
267 | self.board_times = np.zeros( 40, np.int32 )
|
---|
268 | self._unixtime_tuple = np.zeros(2, np.int32 )
|
---|
269 |
|
---|
270 | # set the pointers to the data++
|
---|
271 | data_file.SetPtrAddress('EventNum', self.event_id)
|
---|
272 | data_file.SetPtrAddress('TriggerType', self.trigger_type)
|
---|
273 | data_file.SetPtrAddress('StartCellData', self.start_cells)
|
---|
274 | data_file.SetPtrAddress('Data', self.data)
|
---|
275 | data_file.SetPtrAddress('BoardTime', self.board_times)
|
---|
276 | data_file.SetPtrAddress('UnixTimeUTC', self._unixtime_tuple)
|
---|
277 |
|
---|
278 | # open the calibration file
|
---|
279 | try:
|
---|
280 | calib_file = factfits(self.calib_file_name)
|
---|
281 | except IOError:
|
---|
282 | print 'problem accessing calibration file: ', calib_file_name
|
---|
283 | raise
|
---|
284 | #: drs calibration file
|
---|
285 | self.calib_file = calib_file
|
---|
286 |
|
---|
287 | baseline_mean = calib_file.GetN('BaselineMean')
|
---|
288 | gain_mean = calib_file.GetN('GainMean')
|
---|
289 | trigger_offset_mean = calib_file.GetN('TriggerOffsetMean')
|
---|
290 |
|
---|
291 | self.Nblm = baseline_mean / self.npix
|
---|
292 | self.Ngm = gain_mean / self.npix
|
---|
293 | self.Ntom = trigger_offset_mean / self.npix
|
---|
294 |
|
---|
295 | self.blm = np.zeros(baseline_mean, np.float32).reshape(self.npix , self.Nblm)
|
---|
296 | self.gm = np.zeros(gain_mean, np.float32).reshape(self.npix , self.Ngm)
|
---|
297 | self.tom = np.zeros(trigger_offset_mean, np.float32).reshape(self.npix , self.Ntom)
|
---|
298 |
|
---|
299 | calib_file.SetPtrAddress('BaselineMean', self.blm)
|
---|
300 | calib_file.SetPtrAddress('GainMean', self.gm)
|
---|
301 | calib_file.SetPtrAddress('TriggerOffsetMean', self.tom)
|
---|
302 | calib_file.GetRow(0)
|
---|
303 |
|
---|
304 | # make calibration constants double, so we never need to roll
|
---|
305 | self.blm = np.hstack((self.blm, self.blm))
|
---|
306 | self.gm = np.hstack((self.gm, self.gm))
|
---|
307 | self.tom = np.hstack((self.tom, self.tom))
|
---|
308 |
|
---|
309 | self.v_bsl = np.zeros(self.npix) # array of baseline values (all ZERO)
|
---|
310 |
|
---|
311 | def __iter__(self):
|
---|
312 | """ iterator """
|
---|
313 | return self
|
---|
314 |
|
---|
315 | def next(self):
|
---|
316 | """ used by __iter__
|
---|
317 |
|
---|
318 | returns self.__dict__
|
---|
319 | """
|
---|
320 | if self.use_CalFactFits:
|
---|
321 | if self.data_file.GetCalEvent() == False:
|
---|
322 | raise StopIteration
|
---|
323 | else:
|
---|
324 | self.event_id = self.data_file.event_id
|
---|
325 | self.trigger_type = self.data_file.event_triggertype
|
---|
326 | #self.start_cells = self.data_file.event_offset
|
---|
327 | #self.board_times = self.data_file.event_boardtimes
|
---|
328 | #self.acal_data = self.data.copy().reshape(self.data_file.npix, self.data_file.nroi)
|
---|
329 |
|
---|
330 | self.unixtime = self._unixtime_tuple[0] + self._unixtime_tuple[1]/1.e6
|
---|
331 |
|
---|
332 | else:
|
---|
333 | if self.data_file.GetNextRow() == False:
|
---|
334 | raise StopIteration
|
---|
335 | else:
|
---|
336 | if self.do_calibration == True:
|
---|
337 | self.calibrate_drs_amplitude()
|
---|
338 |
|
---|
339 | #print 'nevents = ', self.nevents, 'event_id = ', self.event_id.value
|
---|
340 | if self.return_dict:
|
---|
341 | return self.__dict__
|
---|
342 | else:
|
---|
343 | return self.acal_data, self.start_cells, self.trigger_type.value
|
---|
344 |
|
---|
345 | def next_event(self):
|
---|
346 | """ ---- DEPRICATED ----
|
---|
347 |
|
---|
348 | load the next event from disk and calibrate it
|
---|
349 | """
|
---|
350 | if self.use_CalFactFits:
|
---|
351 | self.data_file.GetCalEvent()
|
---|
352 | else:
|
---|
353 | self.data_file.GetNextRow()
|
---|
354 | self.calibrate_drs_amplitude()
|
---|
355 |
|
---|
356 | def calibrate_drs_amplitude(self):
|
---|
357 | """ --- DEPRICATED ---
|
---|
358 |
|
---|
359 | since the DRS calibration is done by the C++ class CalFactFits
|
---|
360 |
|
---|
361 | perform the drs amplitude calibration of the event data
|
---|
362 | """
|
---|
363 | # shortcuts
|
---|
364 | blm = self.blm
|
---|
365 | gm = self.gm
|
---|
366 | tom = self.tom
|
---|
367 |
|
---|
368 | to_mV = 2000./4096.
|
---|
369 | #: 2D array with amplitude calibrated dat in mV
|
---|
370 | acal_data = self.data * to_mV # convert ADC counts to mV
|
---|
371 |
|
---|
372 |
|
---|
373 | for pixel in range( self.npix ):
|
---|
374 | #shortcuts
|
---|
375 | sc = self.start_cells[pixel]
|
---|
376 | roi = self.nroi
|
---|
377 | # rotate the pixel baseline mean to the Data startCell
|
---|
378 | acal_data[pixel,:] -= blm[pixel,sc:sc+roi]
|
---|
379 | # the 'trigger offset mean' does not need to be rolled
|
---|
380 | # on the contrary, it seems there is an offset in the DRS data,
|
---|
381 | # which is related to its distance to the startCell, not to its
|
---|
382 | # distance to the beginning of the physical pipeline in the DRS chip
|
---|
383 | acal_data[pixel,:] -= tom[pixel,0:roi]
|
---|
384 | # rotate the pixel gain mean to the Data startCell
|
---|
385 | acal_data[pixel,:] /= gm[pixel,sc:sc+roi]
|
---|
386 |
|
---|
387 |
|
---|
388 | self.acal_data = acal_data * 1907.35
|
---|
389 |
|
---|
390 | self.user_action_calib( self.acal_data,
|
---|
391 | np.reshape(self.data, (self.npix, self.nroi) ), blm, tom, gm, self.start_cells, self.nroi)
|
---|
392 |
|
---|
393 |
|
---|
394 | def baseline_read_values(self, file, bsl_hist='bsl_sum/hplt_mean'):
|
---|
395 | """
|
---|
396 | open ROOT file with baseline histogram and read baseline values
|
---|
397 |
|
---|
398 | *file* : name of the root file
|
---|
399 |
|
---|
400 | *bsl_hist* : path to the histogram containing the basline values
|
---|
401 | """
|
---|
402 |
|
---|
403 | try:
|
---|
404 | f = TFile(file)
|
---|
405 | except:
|
---|
406 | print 'Baseline data file could not be read: ', file
|
---|
407 | return
|
---|
408 |
|
---|
409 | h = f.Get(bsl_hist)
|
---|
410 |
|
---|
411 | for i in range(self.npix):
|
---|
412 | self.v_bsl[i] = h.GetBinContent(i+1)
|
---|
413 |
|
---|
414 | f.Close()
|
---|
415 |
|
---|
416 | def baseline_correct(self):
|
---|
417 | """ subtract baseline from the data
|
---|
418 |
|
---|
419 | DN 08.06.2011: I didn't use this function at all so far... don't know how well it works.
|
---|
420 | """
|
---|
421 |
|
---|
422 | for pixel in range(self.npix):
|
---|
423 | self.acal_data[pixel,:] -= self.v_bsl[pixel]
|
---|
424 |
|
---|
425 | def info(self):
|
---|
426 | """ print run information
|
---|
427 |
|
---|
428 | not very well implemented ... we need more info here.
|
---|
429 | """
|
---|
430 | print 'data file: ', self.data_file_name
|
---|
431 | print 'calib file: ', self.calib_file_name
|
---|
432 | print '... we need more information printed here ... '
|
---|
433 |
|
---|
434 | # -----------------------------------------------------------------------------
|
---|
435 | class RawDataFake( object ):
|
---|
436 | """ raw data FAKE access similar to real RawData access
|
---|
437 |
|
---|
438 | DO NOT USE ... its not working
|
---|
439 | """
|
---|
440 |
|
---|
441 |
|
---|
442 | def __init__(self, data_file_name, calib_file_name,
|
---|
443 | user_action_calib=lambda acal_data, data, blm, tom, gm, scells, nroi: None,
|
---|
444 | baseline_file_name=''):
|
---|
445 | self.__module__='pyfact'
|
---|
446 |
|
---|
447 | self.nroi = 300
|
---|
448 | self.npix = 9
|
---|
449 | self.nevents = 1000
|
---|
450 |
|
---|
451 | self.simulator = None
|
---|
452 |
|
---|
453 | self.time = np.ones(1024) * 0.5
|
---|
454 |
|
---|
455 |
|
---|
456 | self.event_id = c_ulong(0)
|
---|
457 | self.trigger_type = c_ushort(4)
|
---|
458 | self.data = np.zeros( self.npix * self.nroi, np.int16 ).reshape(self.npix ,self.nroi)
|
---|
459 | self.start_cells = np.zeros( self.npix, np.int16 )
|
---|
460 | self.board_times = np.zeros( 40, np.int32 )
|
---|
461 | def __iter__(self):
|
---|
462 | """ iterator """
|
---|
463 | return self
|
---|
464 |
|
---|
465 | def next(self):
|
---|
466 | """ used by __iter__ """
|
---|
467 | self.event_id = c_ulong(self.event_id.value + 1)
|
---|
468 | self.board_times = self.board_times + 42
|
---|
469 |
|
---|
470 | if self.event_id.value >= self.nevents:
|
---|
471 | raise StopIteration
|
---|
472 | else:
|
---|
473 | self._make_event_data()
|
---|
474 |
|
---|
475 | return self.__dict__
|
---|
476 |
|
---|
477 | def _make_event_data(self):
|
---|
478 | sample_times = self.time.cumsum() - time[0]
|
---|
479 |
|
---|
480 | # random start cell
|
---|
481 | self.start_cells = np.ones( self.npix, np.int16 ) * np.random.randint(0,1024)
|
---|
482 |
|
---|
483 | starttime = self.start_cells[0]
|
---|
484 |
|
---|
485 | signal = self._std_sinus_simu(sample_times, starttime)
|
---|
486 |
|
---|
487 | data = np.vstack( (signal,signal) )
|
---|
488 | for i in range(8):
|
---|
489 | data = np.vstack( (data,signal) )
|
---|
490 |
|
---|
491 | self.data = data
|
---|
492 |
|
---|
493 | def _std_sinus_simu(self, times, starttime):
|
---|
494 | period = 10 # in ns
|
---|
495 |
|
---|
496 | # give a jitter on starttime
|
---|
497 | starttime = np.random.normal(startime, 0.05)
|
---|
498 |
|
---|
499 | phase = 0.0
|
---|
500 | signal = 10 * np.sin(times * 2*np.pi/period + starttime + phase)
|
---|
501 |
|
---|
502 | # add some noise
|
---|
503 | noise = np.random.normal(0.0, 0.5, signal.shape)
|
---|
504 | signal += noise
|
---|
505 | return signal
|
---|
506 |
|
---|
507 | def info(self):
|
---|
508 | """ print run information
|
---|
509 |
|
---|
510 | """
|
---|
511 |
|
---|
512 | print 'data file: ', data_file_name
|
---|
513 | print 'calib file: ', calib_file_name
|
---|
514 | print 'calibration file'
|
---|
515 | print 'N baseline_mean: ', self.Nblm
|
---|
516 | print 'N gain mean: ', self.Ngm
|
---|
517 | print 'N TriggeroffsetMean: ', self.Ntom
|
---|
518 |
|
---|
519 | # -----------------------------------------------------------------------------
|
---|
520 | import ctypes
|
---|
521 |
|
---|
522 | class SlowData( object ):
|
---|
523 | """ -Fact SlowData File-
|
---|
524 |
|
---|
525 | A Python wrapper for the fits-class implemented in factfits.h
|
---|
526 | provides easy access to the fits file meta data.
|
---|
527 |
|
---|
528 | * dictionary of file metadata - self.meta
|
---|
529 | * dict of table metadata - self.columns
|
---|
530 | * variable table column access, thus possibly increased speed while looping
|
---|
531 | """
|
---|
532 | def __del__(self):
|
---|
533 | del self.f
|
---|
534 |
|
---|
535 | def __init__(self, path):
|
---|
536 | """ creates meta and columns dictionaries
|
---|
537 | """
|
---|
538 | import os
|
---|
539 |
|
---|
540 | if not os.path.exists(path):
|
---|
541 | raise IOError(path+' was not found')
|
---|
542 | self.path = path
|
---|
543 | self.__module__ = 'pyfact'
|
---|
544 | try:
|
---|
545 | self.f = factfits(path)
|
---|
546 | except IOError:
|
---|
547 | print 'problem accessing data file: ', data_file_name
|
---|
548 | raise # stop ! no data
|
---|
549 |
|
---|
550 | self.meta = self._make_meta_dict()
|
---|
551 | self.columns = self._make_columns_dict()
|
---|
552 |
|
---|
553 | self._treat_meta_dict()
|
---|
554 |
|
---|
555 |
|
---|
556 | # list of columns, which are already registered
|
---|
557 | # see method register()
|
---|
558 | self._registered_cols = []
|
---|
559 | # dict of column data, this is used, in order to be able to remove
|
---|
560 | # the ctypes of
|
---|
561 | self._table_cols = {}
|
---|
562 |
|
---|
563 | # I need to count the rows, since the normal loop mechanism seems not to work.
|
---|
564 | self._current_row = 0
|
---|
565 |
|
---|
566 | self.stacked_cols = {}
|
---|
567 |
|
---|
568 | def _make_meta_dict__old(self):
|
---|
569 | """ This method retrieves meta information about the fits file and
|
---|
570 | stores this information in a dict
|
---|
571 | return: dict
|
---|
572 | key: string - all capital letters
|
---|
573 | value: tuple( numerical value, string comment)
|
---|
574 | """
|
---|
575 | # abbreviation
|
---|
576 | f = self.f
|
---|
577 |
|
---|
578 | # intermediate variables for file metadata dict generation
|
---|
579 |
|
---|
580 | keys=f.GetPy_KeyKeys()
|
---|
581 | values=f.GetPy_KeyValues()
|
---|
582 | comments=f.GetPy_KeyComments()
|
---|
583 | types=f.GetPy_KeyTypes()
|
---|
584 |
|
---|
585 | if len(keys) != len(values):
|
---|
586 | raise TypeError('len(keys)',len(keys),' != len(values)', len(values))
|
---|
587 | if len(keys) != len(types):
|
---|
588 | raise TypeError('len(keys)',len(keys),' != len(types)', len(types))
|
---|
589 | if len(keys) != len(comments):
|
---|
590 | raise TypeError('len(keys)',len(keys),' != len(comments)', len(comments))
|
---|
591 |
|
---|
592 | meta_dict = {}
|
---|
593 | for i in range(len(keys)):
|
---|
594 | type = types[i]
|
---|
595 | if type == 'I':
|
---|
596 | value = int(values[i])
|
---|
597 | elif type == 'F':
|
---|
598 | value = float(values[i])
|
---|
599 | elif type == 'B':
|
---|
600 | if values[i] == 'T':
|
---|
601 | value = True
|
---|
602 | elif values[i] == 'F':
|
---|
603 | value = False
|
---|
604 | else:
|
---|
605 | raise TypeError("meta-type is 'B', but meta-value is neither 'T' nor 'F'. meta-value:",values[i])
|
---|
606 | elif type == 'T':
|
---|
607 | value = values[i]
|
---|
608 | else:
|
---|
609 | raise TypeError("unknown meta-type: known meta types are: I,F,B and T. meta-type:",type)
|
---|
610 | meta_dict[keys[i]]=(value, comments[i])
|
---|
611 | return meta_dict
|
---|
612 |
|
---|
613 | def _make_meta_dict(self):
|
---|
614 | meta_dict = {}
|
---|
615 | for key,entry in self.f.GetKeys():
|
---|
616 | type = entry.type
|
---|
617 | fitsString = entry.fitsString # the original 80-char line from the FITS header
|
---|
618 | comment = entry.comment
|
---|
619 | value = entry.value
|
---|
620 |
|
---|
621 | if type == 'I':
|
---|
622 | value = int(value)
|
---|
623 | elif type == 'F':
|
---|
624 | value = float(value)
|
---|
625 | elif type == 'B':
|
---|
626 | if value == 'T':
|
---|
627 | value = True
|
---|
628 | elif value == 'F':
|
---|
629 | value = False
|
---|
630 | else:
|
---|
631 | raise TypeError("meta-type is 'B', but meta-value is neither 'T' nor 'F'. meta-value:",value)
|
---|
632 | elif type == 'T':
|
---|
633 | value = value
|
---|
634 | else:
|
---|
635 | raise TypeError("unknown meta-type: known meta types are: I,F,B and T. meta-type:",type)
|
---|
636 | meta_dict[key]=(value, comment)
|
---|
637 | return meta_dict
|
---|
638 |
|
---|
639 |
|
---|
640 |
|
---|
641 | def _make_columns_dict(self):
|
---|
642 | """ This method retrieves information about the columns
|
---|
643 | stored inside the fits files internal binary table.
|
---|
644 | returns: dict
|
---|
645 | key: string column name -- all capital letters
|
---|
646 | values: tuple(
|
---|
647 | number of elements in table field - integer
|
---|
648 | size of element in bytes -- this is not really interesting for any user
|
---|
649 | might be ommited in future versions
|
---|
650 | type - a single character code -- should be translated into
|
---|
651 | a comrehensible word
|
---|
652 | unit - string like 'mV' or 'ADC count'
|
---|
653 | """
|
---|
654 | ## abbreviation
|
---|
655 | #f = self.f
|
---|
656 | #
|
---|
657 | ## intermediate variables for file table-metadata dict generation
|
---|
658 | #keys=f.GetPy_ColumnKeys()
|
---|
659 | ##offsets=self.GetPy_ColumnOffsets() #not needed on python level...
|
---|
660 | #nums=f.GetPy_ColumnNums()
|
---|
661 | #sizes=f.GetPy_ColumnSizes()
|
---|
662 | #types=f.GetPy_ColumnTypes()
|
---|
663 | #units=f.GetPy_ColumnUnits()
|
---|
664 |
|
---|
665 | ## zip the values
|
---|
666 | #values = zip(nums,sizes,types,units)
|
---|
667 | ## create the columns dictionary
|
---|
668 | #columns = dict(zip(keys ,values))
|
---|
669 |
|
---|
670 |
|
---|
671 | columns = {}
|
---|
672 | for key,col in self.f.GetColumns():
|
---|
673 | columns[key]=( col.num, col.size, col.type, col.unit)
|
---|
674 | return columns
|
---|
675 |
|
---|
676 | def stack(self, on=True):
|
---|
677 | self.next()
|
---|
678 | for col in self._registered_cols:
|
---|
679 | if isinstance( self.dict[col], type(np.array('')) ):
|
---|
680 | self.stacked_cols[col] = self.dict[col]
|
---|
681 | else:
|
---|
682 | # elif isinstance(self.dict[col], ctypes._SimpleCData):
|
---|
683 | self.stacked_cols[col] = np.array(self.dict[col])
|
---|
684 | # else:
|
---|
685 | # raise TypeError("I don't know how to stack "+col+". It is of type: "+str(type(self.dict[col])))
|
---|
686 |
|
---|
687 | def register(self, col_name):
|
---|
688 | """ register for a column in the fits file
|
---|
689 |
|
---|
690 | after the call, this SlowData object will have a new member variable
|
---|
691 | self.col_name, if col_name is a key in self.colums
|
---|
692 |
|
---|
693 | the value will be updated after each call of next(), or while iterating over self.
|
---|
694 | NB: the initial value is zero(s)
|
---|
695 |
|
---|
696 | *col_name* : name of a key in self.columns, or 'all' to choose all.
|
---|
697 | """
|
---|
698 | columns = self.columns
|
---|
699 | if col_name.lower() == 'all':
|
---|
700 | for col in columns:
|
---|
701 | self._register(col)
|
---|
702 | else:
|
---|
703 | #check if colname is in columns:
|
---|
704 | if col_name not in columns:
|
---|
705 | error_msg = 'colname:'+ col_name +' is not a column in the binary table.\n'
|
---|
706 | error_msg+= 'possible colnames are\n'
|
---|
707 | for key in columns:
|
---|
708 | error_msg += key+' '
|
---|
709 | raise KeyError(error_msg)
|
---|
710 | else:
|
---|
711 | self._register(col_name)
|
---|
712 |
|
---|
713 | # 'private' method, do not use
|
---|
714 | def _register( self, colname):
|
---|
715 |
|
---|
716 | columns = self.columns
|
---|
717 | f = self.f
|
---|
718 | local = None
|
---|
719 |
|
---|
720 | number_of_elements = int(columns[colname][0])
|
---|
721 | size_of_elements_in_bytes = int(columns[colname][1])
|
---|
722 | ctypecode_of_elements = columns[colname][2]
|
---|
723 | physical_unit_of_elements = columns[colname][3]
|
---|
724 |
|
---|
725 | # snippet from the C++ source code, or header file to be precise:
|
---|
726 | #case 'L': gLog << "bool(8)"; break;
|
---|
727 | #case 'B': gLog << "byte(8)"; break;
|
---|
728 | #case 'I': gLog << "short(16)"; break;
|
---|
729 | #case 'J': gLog << "int(32)"; break;
|
---|
730 | #case 'K': gLog << "int(64)"; break;
|
---|
731 | #case 'E': gLog << "float(32)"; break;
|
---|
732 | #case 'D': gLog << "double(64)"; break;
|
---|
733 |
|
---|
734 |
|
---|
735 |
|
---|
736 | # the fields inside the columns can either contain single numbers,
|
---|
737 | # or whole arrays of numbers as well.
|
---|
738 | # we treat single elements differently...
|
---|
739 | if number_of_elements == 0:
|
---|
740 | return
|
---|
741 | if number_of_elements == 1:
|
---|
742 | # allocate some memory for a single number according to its type
|
---|
743 | if ctypecode_of_elements == 'J': # J is for a 4byte int, i.e. an unsigned long
|
---|
744 | local = ctypes.c_ulong()
|
---|
745 | un_c_type = long
|
---|
746 | elif ctypecode_of_elements == 'I': # I is for a 2byte int, i.e. an unsinged int
|
---|
747 | local = ctypes.c_ushort()
|
---|
748 | un_c_type = int
|
---|
749 | elif ctypecode_of_elements == 'B': # B is for a byte
|
---|
750 | local = ctypes.c_ubyte()
|
---|
751 | un_c_type = int
|
---|
752 | elif ctypecode_of_elements == 'D':
|
---|
753 | local = ctypes.c_double()
|
---|
754 | un_c_type = float
|
---|
755 | elif ctypecode_of_elements == 'E':
|
---|
756 | local = ctypes.c_float()
|
---|
757 | un_c_type = float
|
---|
758 | elif ctypecode_of_elements == 'A':
|
---|
759 | local = ctypes.c_uchar()
|
---|
760 | un_c_type = chr
|
---|
761 | elif ctypecode_of_elements == 'K':
|
---|
762 | local = ctypes.c_ulonglong()
|
---|
763 | un_c_type = long
|
---|
764 | else:
|
---|
765 | raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
|
---|
766 | else:
|
---|
767 | if ctypecode_of_elements == 'B': # B is for a byte
|
---|
768 | nptype = np.int8
|
---|
769 | elif ctypecode_of_elements == 'A': # A is for a char .. but I don't know how to handle it
|
---|
770 | nptype = np.int8
|
---|
771 | elif ctypecode_of_elements == 'I': # I is for a 2byte int
|
---|
772 | nptype = np.int16
|
---|
773 | elif ctypecode_of_elements == 'J': # J is for a 4byte int
|
---|
774 | nptype = np.int32
|
---|
775 | elif ctypecode_of_elements == 'K': # B is for a byte
|
---|
776 | nptype = np.int64
|
---|
777 | elif ctypecode_of_elements == 'E': # B is for a byte
|
---|
778 | nptype = np.float32
|
---|
779 | elif ctypecode_of_elements == 'D': # B is for a byte
|
---|
780 | nptype = np.float64
|
---|
781 | else:
|
---|
782 | raise TypeError('unknown ctypecode_of_elements:',ctypecode_of_elements)
|
---|
783 | local = np.zeros( number_of_elements, nptype)
|
---|
784 |
|
---|
785 | # Set the Pointer Address
|
---|
786 | try:
|
---|
787 | f.SetPtrAddress(colname, local)
|
---|
788 | except TypeError:
|
---|
789 | print 'something was wrong with SetPtrAddress()'
|
---|
790 | print 'Type of colname', type(colname)
|
---|
791 | print 'colname:', colname
|
---|
792 | print 'Type of local', type(local)
|
---|
793 | print 'length of local', len(local)
|
---|
794 | print 'local should be alle zeros, since "local = np.zeros( number_of_elements, nptype)" '
|
---|
795 | raise
|
---|
796 |
|
---|
797 | self._table_cols[colname] = local
|
---|
798 | if number_of_elements > 1:
|
---|
799 | self.__dict__[colname] = local
|
---|
800 | self.dict[colname] = local
|
---|
801 | else:
|
---|
802 | # remove any traces of ctypes:
|
---|
803 | self.__dict__[colname] = local.value
|
---|
804 | self.dict[colname] = local.value
|
---|
805 | self._registered_cols.append(colname)
|
---|
806 |
|
---|
807 |
|
---|
808 | def _treat_meta_dict(self):
|
---|
809 | """make 'interesting' meta information available like normal members.
|
---|
810 | non interesting are:
|
---|
811 | TFORM, TUNIT, and TTYPE
|
---|
812 | since these are available via the columns dict.
|
---|
813 | """
|
---|
814 |
|
---|
815 | self.number_of_rows = self.meta['NAXIS2'][0]
|
---|
816 | self.number_of_columns = self.meta['TFIELDS'][0]
|
---|
817 |
|
---|
818 | # there are some information in the meta dict, which are alsways there:
|
---|
819 | # there are regarded as not interesting:
|
---|
820 | uninteresting_meta = {}
|
---|
821 | uninteresting_meta['arraylike'] = {}
|
---|
822 | uninteresting = ['NAXIS', 'NAXIS1', 'NAXIS2',
|
---|
823 | 'TFIELDS',
|
---|
824 | 'XTENSION','EXTNAME','EXTREL',
|
---|
825 | 'BITPIX', 'PCOUNT', 'GCOUNT',
|
---|
826 | 'ORIGIN',
|
---|
827 | 'PACKAGE', 'COMPILED', 'CREATOR',
|
---|
828 | 'TELESCOP','TIMESYS','TIMEUNIT','VERSION']
|
---|
829 | for key in uninteresting:
|
---|
830 | if key in self.meta:
|
---|
831 | uninteresting_meta[key]=self.meta[key]
|
---|
832 | del self.meta[key]
|
---|
833 |
|
---|
834 | # the table meta data contains
|
---|
835 |
|
---|
836 |
|
---|
837 | # shortcut to access the meta dict. But this needs to
|
---|
838 | # be cleaned up quickly!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
---|
839 | meta = self.meta
|
---|
840 |
|
---|
841 | # loop over keys:
|
---|
842 | # * try to find array-like keys
|
---|
843 | arraylike = {}
|
---|
844 | singlelike = []
|
---|
845 | for key in self.meta:
|
---|
846 | stripped = key.rstrip('1234567890')
|
---|
847 | if stripped == key:
|
---|
848 | singlelike.append(key)
|
---|
849 | else:
|
---|
850 | if stripped not in arraylike:
|
---|
851 | arraylike[stripped] = 0
|
---|
852 | else:
|
---|
853 | arraylike[stripped] += 1
|
---|
854 | newmeta = {}
|
---|
855 | for key in singlelike:
|
---|
856 | newmeta[key.lower()] = meta[key]
|
---|
857 | for key in arraylike:
|
---|
858 | uninteresting_meta['arraylike'][key.lower()] = []
|
---|
859 | for i in range(arraylike[key]+1):
|
---|
860 | if key+str(i) in meta:
|
---|
861 | uninteresting_meta['arraylike'][key.lower()].append(meta[key+str(i)])
|
---|
862 | self.ui_meta = uninteresting_meta
|
---|
863 | # make newmeta self
|
---|
864 | for key in newmeta:
|
---|
865 | self.__dict__[key]=newmeta[key]
|
---|
866 |
|
---|
867 | dict = self.__dict__.copy()
|
---|
868 | del dict['meta']
|
---|
869 | del dict['ui_meta']
|
---|
870 | self.dict = dict
|
---|
871 |
|
---|
872 | def __iter__(self):
|
---|
873 | """ iterator """
|
---|
874 | return self
|
---|
875 |
|
---|
876 | def next(self):
|
---|
877 | """ use to iterate over the file
|
---|
878 |
|
---|
879 | do not forget to call register() before iterating over the file
|
---|
880 | call show() in order to find out, what parameters register() accepts.
|
---|
881 | or just call register('all') in case you are unsure.
|
---|
882 |
|
---|
883 | returns self
|
---|
884 | """
|
---|
885 | # abbreviaition
|
---|
886 | f = self.f
|
---|
887 |
|
---|
888 | # Here one might check, if looping makes any sense, and if not
|
---|
889 | # one could stop looping or so...
|
---|
890 | # like this:
|
---|
891 | #
|
---|
892 | # if len(self._registered_cols) == 0:
|
---|
893 | # print 'warning: looping without any registered columns'
|
---|
894 | if self._current_row < self.number_of_rows:
|
---|
895 | if f.GetNextRow() == False:
|
---|
896 | raise StopIteration
|
---|
897 | for col in self._registered_cols:
|
---|
898 | if isinstance(self._table_cols[col], ctypes._SimpleCData):
|
---|
899 | self.__dict__[col] = self._table_cols[col].value
|
---|
900 | self.dict[col] = self._table_cols[col].value
|
---|
901 |
|
---|
902 | for col in self.stacked_cols:
|
---|
903 | if isinstance(self.dict[col], type(np.array(''))):
|
---|
904 | self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],self.dict[col]) )
|
---|
905 | else:
|
---|
906 | self.stacked_cols[col] = np.vstack( (self.stacked_cols[col],np.array(self.dict[col])) )
|
---|
907 | self._current_row += 1
|
---|
908 | else:
|
---|
909 | raise StopIteration
|
---|
910 | return self
|
---|
911 |
|
---|
912 | def show(self):
|
---|
913 | """
|
---|
914 | """
|
---|
915 | pprint.pprint(self.dict)
|
---|
916 |
|
---|
917 |
|
---|
918 |
|
---|
919 |
|
---|
920 | class fnames( object ):
|
---|
921 | """ organize file names of a FACT data run
|
---|
922 |
|
---|
923 | """
|
---|
924 |
|
---|
925 | def __init__(self, specifier = ['012', '023', '2011', '11', '24'],
|
---|
926 | rpath = '/scratch_nfs/res/bsl/',
|
---|
927 | zipped = True):
|
---|
928 | """
|
---|
929 | specifier : list of strings defined as:
|
---|
930 | [ 'DRS calibration file', 'Data file', 'YYYY', 'MM', 'DD']
|
---|
931 |
|
---|
932 | rpath : directory path for the results; YYYYMMDD will be appended to rpath
|
---|
933 | zipped : use zipped (True) or unzipped (Data)
|
---|
934 |
|
---|
935 | """
|
---|
936 |
|
---|
937 | self.specifier = specifier
|
---|
938 | self.rpath = rpath
|
---|
939 | self.zipped = zipped
|
---|
940 |
|
---|
941 | self.make( self.specifier, self.rpath, self.zipped )
|
---|
942 |
|
---|
943 |
|
---|
944 | def make( self, specifier, rpath, zipped ):
|
---|
945 | """ create (make) the filenames
|
---|
946 |
|
---|
947 | names : dictionary of filenames, tags { 'data', 'drscal', 'results' }
|
---|
948 | data : name of the data file
|
---|
949 | drscal : name of the drs calibration file
|
---|
950 | results : radikal of file name(s) for results (to be completed by suffixes)
|
---|
951 | """
|
---|
952 |
|
---|
953 | self.specifier = specifier
|
---|
954 |
|
---|
955 | if zipped:
|
---|
956 | dpath = '/data00/fact-construction/raw/'
|
---|
957 | ext = '.fits.gz'
|
---|
958 | else:
|
---|
959 | dpath = '/data03/fact-construction/raw/'
|
---|
960 | ext = '.fits'
|
---|
961 |
|
---|
962 | year = specifier[2]
|
---|
963 | month = specifier[3]
|
---|
964 | day = specifier[4]
|
---|
965 |
|
---|
966 | yyyymmdd = year + month + day
|
---|
967 | dfile = specifier[1]
|
---|
968 | cfile = specifier[0]
|
---|
969 |
|
---|
970 | rpath = rpath + yyyymmdd + '/'
|
---|
971 | self.rpath = rpath
|
---|
972 | self.names = {}
|
---|
973 |
|
---|
974 | tmp = dpath + year + '/' + month + '/' + day + '/' + yyyymmdd + '_'
|
---|
975 | self.names['data'] = tmp + dfile + ext
|
---|
976 | self.names['drscal'] = tmp + cfile + '.drs' + ext
|
---|
977 | self.names['results'] = rpath + yyyymmdd + '_' + dfile + '_' + cfile
|
---|
978 |
|
---|
979 | self.data = self.names['data']
|
---|
980 | self.drscal = self.names['drscal']
|
---|
981 | self.results = self.names['results']
|
---|
982 |
|
---|
983 | def info( self ):
|
---|
984 | """ print complete filenames
|
---|
985 |
|
---|
986 | """
|
---|
987 |
|
---|
988 | print 'file names:'
|
---|
989 | print 'data: ', self.names['data']
|
---|
990 | print 'drs-cal: ', self.names['drscal']
|
---|
991 | print 'results: ', self.names['results']
|
---|
992 |
|
---|
993 | # end of class definition: fnames( object )
|
---|
994 |
|
---|
995 | def _test_SlowData( filename ):
|
---|
996 | print '-'*70
|
---|
997 | print "opened :", filename, " as 'file'"
|
---|
998 | print
|
---|
999 | print '-'*70
|
---|
1000 | print 'type file.show() to look at its contents'
|
---|
1001 | print "type file.register( columnname ) or file.register('all') in order to register columns"
|
---|
1002 | print
|
---|
1003 | print " due column-registration you declare, that you would like to retrieve the contents of one of the columns"
|
---|
1004 | print " after column-registration, the 'file' has new member variables, they are named like the columns"
|
---|
1005 | print " PLEASE NOTE: immediatly after registration, the members exist, but they are empty."
|
---|
1006 | print " the values are assigned only, when you call file.next() or when you loop over the 'file'"
|
---|
1007 | print
|
---|
1008 | print "in order to loop over it, just go like this:"
|
---|
1009 | print "for row in file:"
|
---|
1010 | print " print row.columnname_one, row.columnname_two"
|
---|
1011 | print
|
---|
1012 | print ""
|
---|
1013 | print '-'*70
|
---|
1014 |
|
---|
1015 |
|
---|
1016 |
|
---|
1017 | def _test_iter( nevents ):
|
---|
1018 | """ test for function __iter__ """
|
---|
1019 |
|
---|
1020 | data_file_name = '/fact/raw/2011/11/24/20111124_117.fits.gz'
|
---|
1021 | calib_file_name = '/fact/raw/2011/11/24/20111124_114.drs.fits.gz'
|
---|
1022 | print 'the files for this test are:'
|
---|
1023 | print 'data file:', data_file_name
|
---|
1024 | print 'calib file:', calib_file_name
|
---|
1025 | run = RawData( data_file_name, calib_file_name , return_dict=True)
|
---|
1026 |
|
---|
1027 | for event in run:
|
---|
1028 | print 'ev ', event['event_id'], 'data[0,0] = ', event['acal_data'][0,0], 'start_cell[0] = ', event['start_cells'][0], 'trigger type = ', event['trigger_type']
|
---|
1029 | if run.event_id == nevents:
|
---|
1030 | break
|
---|
1031 |
|
---|
1032 | if __name__ == '__main__':
|
---|
1033 | """ tests """
|
---|
1034 |
|
---|
1035 | f = fits(sys.argv[1])
|
---|
1036 | test_m1 = ROOT.std.map(str,ROOT.fits.Entry)()
|
---|
1037 | test_m2 = ROOT.std.map(str,ROOT.fits.Table.Column)()
|
---|
1038 | print "len(test_m1)", len(test_m1)
|
---|
1039 | print "len(test_m2)", len(test_m2)
|
---|
1040 |
|
---|
1041 | for k1 in f.GetKeys():
|
---|
1042 | pass
|
---|
1043 | print k1
|
---|
1044 | for k2 in f.GetColumns():
|
---|
1045 | pass
|
---|
1046 | print k2
|
---|
1047 |
|
---|
1048 | sd = SlowData(sys.argv[1])
|
---|