#!/usr/bin/python -tti # # calculation of DRS Time Calibration constants # initial implementation by Remo Dietlicher and Werner Lustermann (ETH Zurich) # based on C++ classes by Oliver Grimm (ETH Zurich) # # this python implementation was coded by # D. Neise (TU Dortmund) April 2012 # import numpy as np from pyfact import RawData from drs_spikes import DRSSpikes from extractors import ZeroXing class CalculateDrsTimeCalibrationConstants( object ): """ basic analysis class for the calculation of DRS time aperture jitter calibration constants """ def __init__(self, dpath, cpath): """ *dpath*: data file path, file should contain a periodic signal in eahc 9th channel *cpath*: std DRS amplitude calibration path """ #dfname = '/data00/fact-construction/raw/2011/11/24/20111124_113.fits.gz' #calfname = '/data00/fact-construction/raw/2011/11/24/20111124_111.drs.fits.gz' self.data_path = dpath self.calib_path = cpath self.run = RawData(dpath, cpath, return_dict = True) if self.run.nroi != 1024: raise TypeError('Time Calibration Data must have ROI=1024, ROI of '+dpath+' is: '+str(self.run.nroi)) self.despike = DRSSpikes() self.zero_crossing_finder = ZeroXing(slope = -1) # -1 means falling edge self.fsampling = 2. # sampling frequency self.freq = 250. # testfrequency self.P_nom = 1000./freq # nominal Period due to testfrequency self.DAMPING = 0.02 # Damping applied to correction # this ndarray will contain the best estimate of the true width of each slice after a __call__ # but it is used to determine the current best estimate inside the __call__ as well, # since the algorithm is iteratively implemented #self.time_calib = np.linspace( 0. , 512, num=1024, endpoint=False).repeat(run.npix/9).reshape( (1024,run.npix/9) ).transpose() self.time_calib = np.ones( (self.run.npix/9, 1024) )/self.fsampling def __call__(self): """ the class-call method performs the actual calculation based on the input data returns: tuple of length 160 containing tuples of length 1024 containing the length of each cell in ns the class contains self.result after the call. """ # loop over events for event in self.run: # in the next() method, which is called during looping, # the data is automatically amplitude calibrated. # we just need every 9th pixel, so this is a place # for optimization data = event['acal_data'] # slice out every 9th channel niners_ids = range(8,self.run.npix,9) # [8, 17, 26, ..., 1430, 1439] data = data[niners_ids,:] start_cell = event['start_cells'][niners_ids,:] data = despike( data ) # shift data down by the mean # possible in one line # calculate mean mean = data.mean(1) # make mean in the same shape as data, for elementwise subtraction mean = mean.repeat(data.shape[1]).reshape(data.shape) # subtraction data = data - mean # find zero crossings with falling edge # the output is a list, of lists, of slices. # each slice, shows a zero crossing. in fact, is is the slice where # the data is still positive ... in the next slice it is negative, (or just zero). # This was called 'MeanXing' before all_crossings = self.zero_crossing_finder(data) # now we have to calculate the exact time of the crossings # but in units of nanoseconds therefor we use self.time_calib # rolled appropriately, and summed up in order to have the integral timing calibration # #the list, 'time_of_all_crossings' will contain sublists, # which in turn will contain the times, in ns, for each zero-crossing time_of_all_crossings = [] # the list, 'float_slice_of_all_crossings' # is similar to the list above, but i just contains the position of the crossings # in the pipeline, but more precisely than 'all_crossings', the position is linear interpolated float_slice_of_all_crossings = [] for chip_id,crossings in enumerate(all_crossings): time_of_all_crossings.append([]) float_slice_of_all_crossings.append([]) time = np.roll(self.time_calib[chip_id], -start_cell[chip_id]).cumsum() for crossing in crossings: # performing linear interpolation of time of zero crossing # what fraction of a slide is the interpolated crossing away from the integral slice id fraction_of_slice = data[chip_id,crossing] / (data[chip_id,crossing]-data[chip_id,crossing+1]) float_slice_of_all_crossings[-1].append(fraction_of_slice + crossing) slope = time[crossing+1] - time[crossing] time_of_crossing = time[crossing] + slope * fraction_of_slice time_of_all_crossings[-1].append(time_of_crossing) # Now one can use those two lists, which were just created # to calculate the number(float) of slices between two consecutive crossings # as well as the *measured* time between two crossings. # on the other hand, the period of the test signal is known to be self.P_nom # so in case the measured time is longer or shorter than the known Period # the associated time of all involved slices # is assumed to be a bit longer or short than 1./self.fsampling # first we make the sublists of the lists to be numpy.arrays for i in len(float_slice_of_all_crossings): #both list have equal length float_slice_of_all_crossings[i] = np.array(float_slice_of_all_crossings[i]) time_of_all_crossings[i] = np.array(time_of_all_crossings[i]) # now we can easily calculate the measured periods using np.diff() all_measured_periods = [] for chip_times in time_of_all_crossings: all_measured_periods.append(np.diff(chip_times)) for chid,chip_periods in enumerate(all_measured_periods): for i,period in enumerate(chip_periods): #shortcuts to the involved slices start = all_crossings[chip][i] end = all_crossings[chip][i+1] interval = end-start rest = 1024-interval delta_period = (self.P_nom - period) * self.DAMPING # nominal_number_of_slices nom_num_slices = period * self.fsampling # now the delta_period is distributed to all slices # I do not exactly understand the +1 here ... error prone!!! # the following explanation assumes delta_period is positive # the slices between start and end get a positive correction value # This correction value is given by delta_period / nom_num_slices pos_fraction = delta_period / nom_num_slices # the total positive correction sums up to total_positive_correction = interval * pos_fraction # the total negative correction must sum up to the sampe value total_negative_correction = rest * (interval * (pos_fraction/rest)) # so we can call the product following 'rest' the 'neg_fraction neg_fraction = -1 * interval/rest * important assert total_positive_correction - total_negative_correction == 0 correction = np.zeros(1024) correction[start+1:end+1] = pos_fraction correction[:start+1] = neg_fraction correction[end+1:] = neg_fraction assert correction.sum() == 0 # now we have to add these correction values to self.time_calib self.time_calib[chip,:] += np.roll(correction, start_cell[chip])