| 1 | #!/usr/bin/python -tti
|
|---|
| 2 | #
|
|---|
| 3 | # calculation of DRS Time Calibration constants
|
|---|
| 4 | # initial implementation by Remo Dietlicher and Werner Lustermann (ETH Zurich)
|
|---|
| 5 | # based on C++ classes by Oliver Grimm (ETH Zurich)
|
|---|
| 6 | #
|
|---|
| 7 | # this python implementation was coded by
|
|---|
| 8 | # D. Neise (TU Dortmund) April 2012
|
|---|
| 9 | #
|
|---|
| 10 | import numpy as np
|
|---|
| 11 | from pyfact import RawData
|
|---|
| 12 | from drs_spikes import DRSSpikes
|
|---|
| 13 | from extractor import ZeroXing
|
|---|
| 14 | class CalculateDrsTimeCalibrationConstants( object ):
|
|---|
| 15 | """ basic analysis class for the calculation of DRS time aperture jitter calibration constants
|
|---|
| 16 | """
|
|---|
| 17 |
|
|---|
| 18 | def __init__(self, dpath, cpath):
|
|---|
| 19 | """
|
|---|
| 20 | *dpath*: data file path, file should contain a periodic signal in eahc 9th channel
|
|---|
| 21 | *cpath*: std DRS amplitude calibration path
|
|---|
| 22 | """
|
|---|
| 23 | #dfname = '/data00/fact-construction/raw/2011/11/24/20111124_113.fits.gz'
|
|---|
| 24 | #calfname = '/data00/fact-construction/raw/2011/11/24/20111124_111.drs.fits.gz'
|
|---|
| 25 | self.data_path = dpath
|
|---|
| 26 | self.calib_path = cpath
|
|---|
| 27 | self.run = RawData(dpath, cpath, return_dict = True)
|
|---|
| 28 |
|
|---|
| 29 | if self.run.nroi != 1024:
|
|---|
| 30 | raise TypeError('Time Calibration Data must have ROI=1024, ROI of '+dpath+' is: '+str(self.run.nroi))
|
|---|
| 31 |
|
|---|
| 32 | self.despike = DRSSpikes()
|
|---|
| 33 | self.zero_crossing_finder = ZeroXing(slope = -1) # -1 means falling edge
|
|---|
| 34 | self.fsampling = 2. # sampling frequency
|
|---|
| 35 | self.freq = 250. # testfrequency
|
|---|
| 36 | self.P_nom = 1000./self.freq # nominal Period due to testfrequency
|
|---|
| 37 | self.DAMPING = 0.02 # Damping applied to correction
|
|---|
| 38 |
|
|---|
| 39 | # this ndarray will contain the best estimate of the true width of each slice after a __call__
|
|---|
| 40 | # but it is used to determine the current best estimate inside the __call__ as well,
|
|---|
| 41 | # since the algorithm is iteratively implemented
|
|---|
| 42 | #self.time_calib = np.linspace( 0. , 512, num=1024, endpoint=False).repeat(run.npix/9).reshape( (1024,run.npix/9) ).transpose()
|
|---|
| 43 | self.time_calib = np.ones( (self.run.npix/9, 1024) )/self.fsampling
|
|---|
| 44 | def __call__(self):
|
|---|
| 45 | """ the class-call method performs the actual calculation based on the input data
|
|---|
| 46 | returns: tuple of length 160 containing tuples of length 1024 containing the length of each cell in ns
|
|---|
| 47 | the class contains self.result after the call.
|
|---|
| 48 | """
|
|---|
| 49 | # loop over events
|
|---|
| 50 | counter = 0
|
|---|
| 51 | for event in self.run:
|
|---|
| 52 | print 'counter', counter
|
|---|
| 53 | counter +=1
|
|---|
| 54 | # in the next() method, which is called during looping,
|
|---|
| 55 | # the data is automatically amplitude calibrated.
|
|---|
| 56 | # we just need every 9th pixel, so this is a place
|
|---|
| 57 | # for optimization
|
|---|
| 58 | data = event['acal_data']
|
|---|
| 59 |
|
|---|
| 60 | # slice out every 9th channel
|
|---|
| 61 | niners_ids = range(8,self.run.npix,9) # [8, 17, 26, ..., 1430, 1439]
|
|---|
| 62 | data = data[niners_ids,:]
|
|---|
| 63 | start_cell = event['start_cells'][niners_ids,:]
|
|---|
| 64 |
|
|---|
| 65 | data = self.despike( data )
|
|---|
| 66 |
|
|---|
| 67 | # shift data down by the mean # possible in one line
|
|---|
| 68 | # calculate mean
|
|---|
| 69 | mean = data.mean(1)
|
|---|
| 70 | # make mean in the same shape as data, for elementwise subtraction
|
|---|
| 71 | mean = mean.repeat(data.shape[1]).reshape(data.shape)
|
|---|
| 72 | # subtraction
|
|---|
| 73 | data = data - mean
|
|---|
| 74 |
|
|---|
| 75 | # find zero crossings with falling edge
|
|---|
| 76 | # the output is a list, of lists, of slices.
|
|---|
| 77 | # each slice, shows a zero crossing. in fact, is is the slice where
|
|---|
| 78 | # the data is still positive ... in the next slice it is negative, (or just zero).
|
|---|
| 79 | # This was called 'MeanXing' before
|
|---|
| 80 | all_crossings = self.zero_crossing_finder(data)
|
|---|
| 81 |
|
|---|
| 82 | # now we have to calculate the exact time of the crossings
|
|---|
| 83 | # but in units of nanoseconds therefor we use self.time_calib
|
|---|
| 84 | # rolled appropriately, and summed up in order to have the integral timing calibration
|
|---|
| 85 | #
|
|---|
| 86 | #the list, 'time_of_all_crossings' will contain sublists,
|
|---|
| 87 | # which in turn will contain the times, in ns, for each zero-crossing
|
|---|
| 88 | time_of_all_crossings = []
|
|---|
| 89 | # the list, 'float_slice_of_all_crossings'
|
|---|
| 90 | # is similar to the list above, but i just contains the position of the crossings
|
|---|
| 91 | # in the pipeline, but more precisely than 'all_crossings', the position is linear interpolated
|
|---|
| 92 | float_slice_of_all_crossings = []
|
|---|
| 93 |
|
|---|
| 94 | for chip_id,crossings in enumerate(all_crossings):
|
|---|
| 95 | time_of_all_crossings.append([])
|
|---|
| 96 | float_slice_of_all_crossings.append([])
|
|---|
| 97 | time = np.roll(self.time_calib[chip_id], -start_cell[chip_id]).cumsum()
|
|---|
| 98 | for crossing in crossings:
|
|---|
| 99 | # performing linear interpolation of time of zero crossing
|
|---|
| 100 | # what fraction of a slide is the interpolated crossing away from the integral slice id
|
|---|
| 101 | fraction_of_slice = data[chip_id,crossing] / (data[chip_id,crossing]-data[chip_id,crossing+1])
|
|---|
| 102 | float_slice_of_all_crossings[-1].append(fraction_of_slice + crossing)
|
|---|
| 103 | slope = time[crossing+1] - time[crossing]
|
|---|
| 104 | time_of_crossing = time[crossing] + slope * fraction_of_slice
|
|---|
| 105 | time_of_all_crossings[-1].append(time_of_crossing)
|
|---|
| 106 |
|
|---|
| 107 | # Now one can use those two lists, which were just created
|
|---|
| 108 | # to calculate the number(float) of slices between two consecutive crossings
|
|---|
| 109 | # as well as the *measured* time between two crossings.
|
|---|
| 110 | # on the other hand, the period of the test signal is known to be self.P_nom
|
|---|
| 111 | # so in case the measured time is longer or shorter than the known Period
|
|---|
| 112 | # the associated time of all involved slices
|
|---|
| 113 | # is assumed to be a bit longer or short than 1./self.fsampling
|
|---|
| 114 |
|
|---|
| 115 | # first we make the sublists of the lists to be numpy.arrays
|
|---|
| 116 | for i in range(len(float_slice_of_all_crossings)): #both list have equal length
|
|---|
| 117 | float_slice_of_all_crossings[i] = np.array(float_slice_of_all_crossings[i])
|
|---|
| 118 | time_of_all_crossings[i] = np.array(time_of_all_crossings[i])
|
|---|
| 119 |
|
|---|
| 120 | # now we can easily calculate the measured periods using np.diff()
|
|---|
| 121 | all_measured_periods = []
|
|---|
| 122 | for chip_times in time_of_all_crossings:
|
|---|
| 123 | all_measured_periods.append(np.diff(chip_times))
|
|---|
| 124 |
|
|---|
| 125 | for chip,chip_periods in enumerate(all_measured_periods):
|
|---|
| 126 |
|
|---|
| 127 | for i,period in enumerate(chip_periods):
|
|---|
| 128 | #shortcuts to the involved slices
|
|---|
| 129 | start = all_crossings[chip][i]
|
|---|
| 130 | end = all_crossings[chip][i+1]
|
|---|
| 131 | interval = end-start
|
|---|
| 132 | rest = 1024-interval
|
|---|
| 133 |
|
|---|
| 134 | delta_period = (self.P_nom - period) * self.DAMPING
|
|---|
| 135 | # nominal_number_of_slices
|
|---|
| 136 | nom_num_slices = period * self.fsampling
|
|---|
| 137 |
|
|---|
| 138 | # now the delta_period is distributed to all slices
|
|---|
| 139 | # I do not exactly understand the +1 here ... error prone!!!
|
|---|
| 140 | # the following explanation assumes delta_period is positive
|
|---|
| 141 | # the slices between start and end get a positive correction value
|
|---|
| 142 | # This correction value is given by delta_period / nom_num_slices
|
|---|
| 143 | pos_fraction = delta_period / nom_num_slices
|
|---|
| 144 | # the total positive correction sums up to
|
|---|
| 145 | total_positive_correction = interval * pos_fraction
|
|---|
| 146 | # the total negative correction must sum up to the sampe value
|
|---|
| 147 | total_negative_correction = rest * (interval * (pos_fraction/rest))
|
|---|
| 148 | # so we can call the product following 'rest' the 'neg_fraction
|
|---|
| 149 | neg_fraction = -1 * interval/rest * pos_fraction
|
|---|
| 150 | #print '1. should be zero', total_positive_correction - total_negative_correction
|
|---|
| 151 | #assert total_positive_correction - total_negative_correction == 0
|
|---|
| 152 |
|
|---|
| 153 | correction = np.zeros(1024)
|
|---|
| 154 | correction[start+1:end+1] = pos_fraction
|
|---|
| 155 | correction[:start+1] = neg_fraction
|
|---|
| 156 | correction[end+1:] = neg_fraction
|
|---|
| 157 | #print '2. should be zero', correction.sum()
|
|---|
| 158 | #assert correction.sum() == 0
|
|---|
| 159 |
|
|---|
| 160 | # now we have to add these correction values to self.time_calib
|
|---|
| 161 | self.time_calib[chip,:] += np.roll(correction, start_cell[chip])
|
|---|