1 | #!/usr/bin/python -tti
|
---|
2 | #
|
---|
3 | # calculation of DRS Time Calibration constants
|
---|
4 | # initial implementation by Remo Dietlicher and Werner Lustermann (ETH Zurich)
|
---|
5 | # based on C++ classes by Oliver Grimm (ETH Zurich)
|
---|
6 | #
|
---|
7 | # this python implementation was coded by
|
---|
8 | # D. Neise (TU Dortmund) April 2012
|
---|
9 | #
|
---|
10 | import numpy as np
|
---|
11 | from pyfact import RawData
|
---|
12 | from drs_spikes import DRSSpikes
|
---|
13 | from extractor import ZeroXing
|
---|
14 | class CalculateDrsTimeCalibrationConstants( object ):
|
---|
15 | """ basic analysis class for the calculation of DRS time aperture jitter calibration constants
|
---|
16 | """
|
---|
17 |
|
---|
18 | def __init__(self, dpath, cpath):
|
---|
19 | """
|
---|
20 | *dpath*: data file path, file should contain a periodic signal in eahc 9th channel
|
---|
21 | *cpath*: std DRS amplitude calibration path
|
---|
22 | """
|
---|
23 | #dfname = '/data00/fact-construction/raw/2011/11/24/20111124_113.fits.gz'
|
---|
24 | #calfname = '/data00/fact-construction/raw/2011/11/24/20111124_111.drs.fits.gz'
|
---|
25 | self.data_path = dpath
|
---|
26 | self.calib_path = cpath
|
---|
27 | self.run = RawData(dpath, cpath, return_dict = True)
|
---|
28 |
|
---|
29 | if self.run.nroi != 1024:
|
---|
30 | raise TypeError('Time Calibration Data must have ROI=1024, ROI of '+dpath+' is: '+str(self.run.nroi))
|
---|
31 |
|
---|
32 | self.despike = DRSSpikes()
|
---|
33 | self.zero_crossing_finder = ZeroXing(slope = -1) # -1 means falling edge
|
---|
34 | self.fsampling = 2. # sampling frequency
|
---|
35 | self.freq = 250. # testfrequency
|
---|
36 | self.P_nom = 1000./self.freq # nominal Period due to testfrequency
|
---|
37 | self.DAMPING = 0.02 # Damping applied to correction
|
---|
38 |
|
---|
39 | # this ndarray will contain the best estimate of the true width of each slice after a __call__
|
---|
40 | # but it is used to determine the current best estimate inside the __call__ as well,
|
---|
41 | # since the algorithm is iteratively implemented
|
---|
42 | #self.time_calib = np.linspace( 0. , 512, num=1024, endpoint=False).repeat(run.npix/9).reshape( (1024,run.npix/9) ).transpose()
|
---|
43 | self.time_calib = np.ones( (self.run.npix/9, 1024) )/self.fsampling
|
---|
44 | def __call__(self):
|
---|
45 | """ the class-call method performs the actual calculation based on the input data
|
---|
46 | returns: tuple of length 160 containing tuples of length 1024 containing the length of each cell in ns
|
---|
47 | the class contains self.result after the call.
|
---|
48 | """
|
---|
49 | # loop over events
|
---|
50 | counter = 0
|
---|
51 | for event in self.run:
|
---|
52 | print 'counter', counter
|
---|
53 | counter +=1
|
---|
54 | # in the next() method, which is called during looping,
|
---|
55 | # the data is automatically amplitude calibrated.
|
---|
56 | # we just need every 9th pixel, so this is a place
|
---|
57 | # for optimization
|
---|
58 | data = event['acal_data']
|
---|
59 |
|
---|
60 | # slice out every 9th channel
|
---|
61 | niners_ids = range(8,self.run.npix,9) # [8, 17, 26, ..., 1430, 1439]
|
---|
62 | data = data[niners_ids,:]
|
---|
63 | start_cell = event['start_cells'][niners_ids,:]
|
---|
64 |
|
---|
65 | data = self.despike( data )
|
---|
66 |
|
---|
67 | # shift data down by the mean # possible in one line
|
---|
68 | # calculate mean
|
---|
69 | mean = data.mean(1)
|
---|
70 | # make mean in the same shape as data, for elementwise subtraction
|
---|
71 | mean = mean.repeat(data.shape[1]).reshape(data.shape)
|
---|
72 | # subtraction
|
---|
73 | data = data - mean
|
---|
74 |
|
---|
75 | # find zero crossings with falling edge
|
---|
76 | # the output is a list, of lists, of slices.
|
---|
77 | # each slice, shows a zero crossing. in fact, is is the slice where
|
---|
78 | # the data is still positive ... in the next slice it is negative, (or just zero).
|
---|
79 | # This was called 'MeanXing' before
|
---|
80 | all_crossings = self.zero_crossing_finder(data)
|
---|
81 |
|
---|
82 | # now we want to calculate the exact time of the crossings
|
---|
83 | # but in units of nanoseconds, not in units of slices.
|
---|
84 | # In order to transform slices into nanoseconds, we already use self.time_calib
|
---|
85 | # rolled appropriately, and summed up in order to have the integral timing calibration
|
---|
86 | #
|
---|
87 | # the list, 'time_of_all_crossings' will contain sublists, for each chip
|
---|
88 | # which in turn will contain the times, in ns, for each zero-crossing
|
---|
89 | time_of_all_crossings = []
|
---|
90 | # the list, 'float_slice_of_all_crossings'
|
---|
91 | # is similar to the list above, but i just contains the position of the crossings
|
---|
92 | # in the pipeline, but more precisely than 'all_crossings', the position is linear interpolated
|
---|
93 | float_slice_of_all_crossings = []
|
---|
94 |
|
---|
95 | for chip_id,crossings in enumerate(all_crossings):
|
---|
96 | time_of_all_crossings.append([])
|
---|
97 | float_slice_of_all_crossings.append([])
|
---|
98 | time = np.roll(self.time_calib[chip_id], -start_cell[chip_id]).cumsum()
|
---|
99 | for crossing in crossings:
|
---|
100 | # performing linear interpolation of time of zero crossing
|
---|
101 | # what fraction of a slide is the interpolated crossing away from the integral slice id
|
---|
102 | fraction_of_slice = data[chip_id,crossing] / (data[chip_id,crossing]-data[chip_id,crossing+1])
|
---|
103 | float_slice_of_all_crossings[-1].append(fraction_of_slice + crossing)
|
---|
104 | slope = time[crossing+1] - time[crossing]
|
---|
105 | time_of_crossing = time[crossing] + slope * fraction_of_slice
|
---|
106 | time_of_all_crossings[-1].append(time_of_crossing)
|
---|
107 |
|
---|
108 | # Now one can use those two lists, which were just created
|
---|
109 | # to calculate the number(float) of slices between two consecutive crossings
|
---|
110 | # as well as the *measured* time between two crossings.
|
---|
111 | # on the other hand, the period of the test signal is known to be self.P_nom
|
---|
112 | # so in case the measured time is longer or shorter than the known Period
|
---|
113 | # the associated time of all involved slices
|
---|
114 | # is assumed to be a bit longer or short than 1./self.fsampling
|
---|
115 |
|
---|
116 | # first we make the sublists of the lists to be numpy.arrays
|
---|
117 | for i in range(len(float_slice_of_all_crossings)): #both list have equal length
|
---|
118 | float_slice_of_all_crossings[i] = np.array(float_slice_of_all_crossings[i])
|
---|
119 | time_of_all_crossings[i] = np.array(time_of_all_crossings[i])
|
---|
120 |
|
---|
121 | # now we can easily calculate the measured periods using np.diff()
|
---|
122 | all_measured_periods = []
|
---|
123 | for chip_times in time_of_all_crossings:
|
---|
124 | all_measured_periods.append(np.diff(chip_times))
|
---|
125 |
|
---|
126 | for chip,chip_periods in enumerate(all_measured_periods):
|
---|
127 |
|
---|
128 | for i,period in enumerate(chip_periods):
|
---|
129 | #shortcuts to the involved slices
|
---|
130 | start = all_crossings[chip][i]
|
---|
131 | end = all_crossings[chip][i+1]
|
---|
132 | interval = end-start
|
---|
133 | rest = 1024-interval
|
---|
134 |
|
---|
135 | delta_period = (self.P_nom - period) * self.DAMPING
|
---|
136 | # nominal_number_of_slices
|
---|
137 | nom_num_slices = period * self.fsampling
|
---|
138 |
|
---|
139 | # now the delta_period is distributed to all slices
|
---|
140 | # I do not exactly understand the +1 here ... error prone!!!
|
---|
141 | # the following explanation assumes delta_period is positive
|
---|
142 | # the slices between start and end get a positive correction value
|
---|
143 | # This correction value is given by delta_period / nom_num_slices
|
---|
144 | pos_fraction = delta_period / nom_num_slices
|
---|
145 | # the total positive correction sums up to
|
---|
146 | total_positive_correction = interval * pos_fraction
|
---|
147 | # the total negative correction must sum up to the sampe value
|
---|
148 | total_negative_correction = rest * (interval * (pos_fraction/rest))
|
---|
149 | # so we can call the product following 'rest' the 'neg_fraction
|
---|
150 | neg_fraction = -1 * interval/rest * pos_fraction
|
---|
151 | #print '1. should be zero', total_positive_correction - total_negative_correction
|
---|
152 | #assert total_positive_correction - total_negative_correction == 0
|
---|
153 |
|
---|
154 | correction = np.zeros(1024)
|
---|
155 | correction[start+1:end+1] = pos_fraction
|
---|
156 | correction[:start+1] = neg_fraction
|
---|
157 | correction[end+1:] = neg_fraction
|
---|
158 | #print '2. should be zero', correction.sum()
|
---|
159 | #assert correction.sum() == 0
|
---|
160 |
|
---|
161 | # now we have to add these correction values to self.time_calib
|
---|
162 | self.time_calib[chip,:] += np.roll(correction, start_cell[chip])
|
---|