Index: /fact/tools/pyscripts/sandbox/dneise/timecal.py
===================================================================
--- /fact/tools/pyscripts/sandbox/dneise/timecal.py	(revision 13403)
+++ /fact/tools/pyscripts/sandbox/dneise/timecal.py	(revision 13404)
@@ -8,15 +8,5 @@
 # D. Neise (TU Dortmund) April 2012
 #
-import pyfact
-from myhisto import *
-from hist import *
 import numpy as np
-import numpy.random as rnd
-from scipy import interpolate as ip
-from ROOT import *
-from time import time
-from optparse import OptionParser
-
-
 from pyfact     import RawData
 from drs_spikes import DRSSpikes
@@ -36,15 +26,20 @@
         self.calib_path = cpath
         self.run = RawData(dpath, cpath, return_dict = True)
+        
+        if self.run.nroi != 1024:
+            raise TypeError('Time Calibration Data must have ROI=1024, ROI of '+dpath+' is: '+str(self.run.nroi))
+        
         self.despike = DRSSpikes()
-        self.zX = ZeroXing()
+        self.zero_crossing_finder = ZeroXing(slope = -1) # -1 means falling edge
         self.fsampling = 2.         # sampling frequency
         self.freq = 250.            # testfrequency
         self.P_nom = 1000./freq     # nominal Period due to testfrequency
         self.DAMPING = 0.02         # Damping applied to correction
-        self.NChip = 1              # Number of Chips
-        self.NEvents = 300          # Number of Events to Process
         
-        self.t_nom = np.ones([run.npix, run.nroi])/fsampling
-
+        # this ndarray will contain the best estimate of the true width of each slice after a __call__
+        # but it is used to determine the current best estimate inside the __call__ as well,
+        # since the algorithm is iteratively implemented
+        #self.time_calib = np.linspace( 0. , 512, num=1024, endpoint=False).repeat(run.npix/9).reshape( (1024,run.npix/9) ).transpose()
+        self.time_calib = np.ones( (self.run.npix/9, 1024) )/self.fsampling
     def __call__(self):
         """ the class-call method performs the actual calculation based on the input data
@@ -54,283 +49,108 @@
         # loop over events
         for event in self.run:
-            data = despike( event['acal_data'] )
-            start_cells = event['start_cells']
-            MeanXing, TimeXing, NumXing = Calculate_Crossing(data, event['start_cells'])
-
-
-    def Calculate_Crossing(self, data, start_cells):
-        TimeXing = np.zeros(self.run.nroi)
-        MeanXing = np.zeros(self.run.nroi)
-        NumXing = 0
-        CellTime = CellTimef(np.roll(t_nom, -Start))
-        
-        for i in range(rd.NROI-1):
-            if (Data[i] > Mean) & (Data[i+1] < Mean):
-                MeanXing[NumXing] = i
+            # in the next() method, which is called during looping,
+            # the data is automatically amplitude calibrated.
+            # we just need every 9th pixel, so this is a place 
+            # for optimization
+            data = event['acal_data']
+            
+            # slice out every 9th channel
+            niners_ids = range(8,self.run.npix,9) # [8, 17, 26, ..., 1430, 1439]
+            data = data[niners_ids,:]
+            start_cell = event['start_cells'][niners_ids,:]
+            
+            data = despike( data )
+            
+            # shift data down by the mean           # possible in one line
+                # calculate mean
+            mean = data.mean(1)
+                # make mean in the same shape as data, for elementwise subtraction
+            mean = mean.repeat(data.shape[1]).reshape(data.shape)
+                # subtraction
+            data = data - mean
+            
+            # find zero crossings with falling edge
+            # the output is a list, of lists, of slices.
+            # each slice, shows a zero crossing. in fact, is is the slice where
+            # the data is still positive ... in the next slice it is negative, (or just zero).
+            # This was called 'MeanXing' before
+            all_crossings = self.zero_crossing_finder(data)
+            
+            # now we have to calculate the exact time of the crossings
+            # but in units of nanoseconds therefor we use self.time_calib
+            # rolled appropriately, and summed up in order to have the integral timing calibration
+            #
+            #the list, 'time_of_all_crossings' will contain sublists,
+            # which in turn will contain the times, in ns, for each zero-crossing
+            time_of_all_crossings = []
+            # the list, 'float_slice_of_all_crossings'
+            # is similar to the list above, but i just contains the position of the crossings
+            # in the pipeline, but more precisely than 'all_crossings', the position is linear interpolated
+            float_slice_of_all_crossings = []
+            
+            for chip_id,crossings in enumerate(all_crossings):
+                time_of_all_crossings.append([])
+                float_slice_of_all_crossings.append([])
+                time = np.roll(self.time_calib[chip_id], -start_cell[chip_id]).cumsum()
+                for crossing in crossings:
+                    # performing linear interpolation of time of zero crossing
+                    # what fraction of a slide is the interpolated crossing away from the integral slice id
+                    fraction_of_slice = data[chip_id,crossing] / (data[chip_id,crossing]-data[chip_id,crossing+1])
+                    float_slice_of_all_crossings[-1].append(fraction_of_slice + crossing)
+                    slope = time[crossing+1] - time[crossing]
+                    time_of_crossing = time[crossing] + slope * fraction_of_slice
+                    time_of_all_crossings[-1].append(time_of_crossing)
+            
+            # Now one can use those two lists, which were just created
+            # to calculate the number(float) of slices between two consecutive crossings
+            # as well as the *measured* time between two crossings.
+            # on the other hand, the period of the test signal is known to be self.P_nom
+            # so in case the measured time is longer or shorter than the known Period
+            # the associated time of all involved slices 
+            # is assumed to be a bit longer or short than 1./self.fsampling
+            
+            # first we make the sublists of the lists to be numpy.arrays 
+            for i in len(float_slice_of_all_crossings): #both list have equal length
+                float_slice_of_all_crossings[i] = np.array(float_slice_of_all_crossings[i])
+                time_of_all_crossings[i] = np.array(time_of_all_crossings[i])
+            
+            # now we can easily calculate the measured periods using np.diff()
+            all_measured_periods = []
+            for chip_times in time_of_all_crossings:
+                all_measured_periods.append(np.diff(chip_times))
+            
+            for chid,chip_periods in enumerate(all_measured_periods):
                 
-                FirstCell = CellTime[i]
-                SecondCell = CellTime[i+1]
-                
-                TimeXing[NumXing] = FirstCell+(SecondCell-FirstCell)/(1.-Data[i+1]/(Data[i]))*(1.-Mean/Data[i])
-                NumXing += 1
-        
-        # calculate the frequency of the testwave
-        freqp = fsampling*NumXing*1000./1024
-        if(np.abs(freqp-freq) > 20):
-            print "This is not a timecalibration run!!"
-            raise SystemExit
-                            
-        return MeanXing, TimeXing, NumXing
-
-        
-        for Event in range( NEvents ):
-            print "Event ", Event
-            Chip = 0
-
-            # loop over every 9th channel in Chip
-            for Pix in range(NPIX)[8::9]:
-                print "Pix ", Pix
-                
-                # calculate t_nom for each pixel
-                MeanXing, TimeXing, NumXing = Crossing(rd.acalData[Pix], np.average(rd.acalData[Pix]), t_nom[Pix], rd.startCells[Pix], Chip)
-                h.list[Chip].dict["numxing"].Fill(NumXing)
-                #h.list[Chip].dict["start"].Fill(rd.startCells[Pix])
-                t_nom[Pix] = Corr(MeanXing, NumXing, TimeXing, t_nom[Pix], rd.startCells[Pix], Chip)
-                
-                Chip += 1
-                
-            rd.next()
-
-        t_nom = t_nom[8::9]
-
-        if(save == "yes"):	
-            np.save(str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip), t_nom)
-            np.savetxt(str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip), t_nom)
-
-        for Chip in range(NChip):
-            for i in range(rd.NROI):
-                h.list[Chip].dict["int_dev"].SetBinContent(i+1, np.sum(t_nom[Chip][:i])-i*1/fsampling)
-                h.list[Chip].dict["diff_dev"].SetBinContent(i+1, np.sum(t_nom[Chip][i] - 1/fsampling))
-            
-        pyfact.SaveHistograms(h.list, str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip)+".root", "RECREATE")
-
-
-        t_f = time()
-
-
-
-        print "time ellapsed		= ",  (t_f - t_s)/60., " min."	
-        print "Speichern		= ", save
-        print "NChip			= ", NChip
-        print "NEvents			= ", NEventsp
-        print "Histo saved as		= ", str(dfname[-20:-8])+str(NEvents)+"x"+str(NChip)+".root"
-        if(save == "yes"):
-            print "CellTime saved as	= ", str(dfname[-20:-8])+str(NEvents)+"x"+str(NChip)+".npy"
-
-
-
-parser = OptionParser()
-parser.add_option('-e', '--extended', action = 'store_true', dest='extend', default = False )
-(options, args) = parser.parse_args()
-
-save = "yes"
-
-if(options.extend):
-
-	NChip = int(raw_input("Wieviele Chips? ([1, ... , "+str(rd.NPIX/9)+"]): "))
-	NEventsp = int(raw_input("Wieviele Events? ([1, ... , "+str(rd.NEvents)+"]) "))
-	save = raw_input("Soll CellTime gespeichert werden? (yes/no) ")
-	
-	
-h = hist_list(tcalHistograms, NChip, "Chip")
-
-
-# Functions needed for calibration
-
-# find negative-going crossings of mean value
-def Crossing(Data, Mean, t_nom, Start, Chip):
-	TimeXing = np.zeros(rd.NROI)
-	MeanXing = np.zeros(rd.NROI)
-	NumXing = 0
-	CellTime = CellTimef(np.roll(t_nom, -Start))
-	
-	for i in range(rd.NROI-1):
-		if ((Data[i] > Mean) & (Data[i+1] < Mean)):
-			MeanXing[NumXing] = i
-			
-			FirstCell = CellTime[i]
-			SecondCell = CellTime[i+1]
-			
-			TimeXing[NumXing] = FirstCell+(SecondCell-FirstCell)/(1.-Data[i+1]/(Data[i]))*(1.-Mean/(Data[i]))
-			NumXing += 1
-	
-	# calculate the frequency of the testwave
-	freqp = fsampling*NumXing*1000./1024
-	if(np.abs(freqp-freq) > 20):
-		print "This is not a timecalibration run!!"
-		raise SystemExit
-						
-	return MeanXing, TimeXing, NumXing
-		
-def CrossingOliver(Data, Mean, t_nom, Start, Chip):
-	TimeXing = np.zeros(lpipe)
-	MeanXing = np.zeros(lpipe)
-	NumXing = 0
-	CellTime = CellTimef(t_nom)
-	
-	
-	for i in range(lpipe-1):
-		if ((Data[i] > Mean) & (Data[i+1] < Mean)):
-			MeanXing[NumXing] = np.mod(i+Start, lpipe)
-			
-			FirstCell = CellTime[np.mod(i+Start,lpipe)]
-			SecondCell = CellTime[np.mod(i+1+Start, lpipe)]
-			
-			if(SecondCell < FirstCell): SecondCell =+ lpipe/fsampl
-			
-			TimeXing[NumXing] = FirstCell+(SecondCell-FirstCell)/(1.-Data[i+1]/(Data[i]))*(1.-Mean/(Data[i]))
-			NumXing += 1
-			
-				
-	return MeanXing, TimeXing, NumXing
-
-# Determine time between crossings and apply correction
-def Corr(MeanXing, NumXing, TimeXing, t_nom, Start, Chip):
-
-	# loop over mean crossings
-	for i in range(int(NumXing-1)):
-		I1 = MeanXing[i]
-		I2 = MeanXing[i+1]
-		Period = TimeXing[i+1] - TimeXing[i]
-		
-		
-		if(np.abs(1-Period/P_nom) > 0.2):
-			#print "Skipping zero crossing due to too large deviation of period."
-			h.list[Chip].dict["loleftout"].Fill(MeanXing[i])
-			#h.list[Chip].dict["leftout"].Fill(np.mod((MeanXing[i]+Start), rd.NROI))
-			continue
-			
-		#h.list[Chip].dict["lomeanxing"].Fill(MeanXing[i])
-		h.list[Chip].dict["meanxing"].Fill(np.mod((MeanXing[i]+Start), rd.NROI))
-		
-		# calculate the frequency of the testwave
-		freqp = 1000./Period
-		h.list[Chip].dict["freq"].Fill(freqp)
-			
-		C = (P_nom-Period)*DAMPING
-		numnom = Period*fsampling
-		Correction = np.zeros(rd.NROI)
-		Correction[I1+1:I2+1] += C/numnom
-		Correction[:I1+1] -= (I2-I1)*C/(numnom*(rd.NROI-(I2-I1)))
-		Correction[I2+1:] -= (I2-I1)*C/(numnom*(rd.NROI-(I2-I1)))
-		
-		Correction = np.roll(Correction, Start)
-		t_nom += Correction
-	#h.list[Chip].dict["lomeanxing"].Fill(MeanXing[-1])
-	h.list[Chip].dict["meanxing"].Fill(np.mod((MeanXing[-1]+Start), rd.NROI))
-			
-	return t_nom
-	
-def CorrOliver(MeanXing, NumXing, TimeXing, t_nom, Start, Chip):
-	
-	CellTime = CellTimef(t_nom)
-	for i in range(int(NumXing-1)):
-		I1 = MeanXing[i]
-		I2 = MeanXing[i+1]
-		Period = TimeXing[i+1] - TimeXing[i]
-		if(Period<0): Period += rd.NROI/fsampl
-		
-		if(np.abs(1-Period/P_nom) > 0.2):
-			#print "Skipping zero crossing due to too large deviation of period."
-			continue
-			
-		Correction = (P_nom-Period)*DAMPING
-		
-		# Positive correction from MeanXing[i] to MeanXing[i+1]
-		# and negative to all others
-		Time = 0
-		for j in range(rd.NROI):
-			diff = CellTime[j+1] - CellTime[j]
-			if ((I2>I1 and j>I1 and j<=I2) or (I2<I1 and (j<=I2 or j>I1))):
-				diff += Correction/np.mod(I2-I1+rd.NROI, rd.NROI)
-			else:
-				diff -= Correction/(rd.NROI-np.mod(I2-I1+rd.NROI, rd.NROI))
-			CellTime[j] = Time
-			Time += diff
-		CellTime[rd.NROI] = Time
-	
-	print CellTime[-1]	
-	t_nom = t_nomf(CellTime)
-	
-	return t_nom
-	
-
-	
-# Summing up the nominal times for each cell to the total cell time.
-def CellTimef(t_nom):
-	CellTime = np.zeros(rd.NROI+1)
-	for i in range(rd.NROI):
-		CellTime[i+1] = CellTime[i] + t_nom[i]
-		
-	return CellTime
-	
-# getting t_nom from CellTime
-def t_nomf(CellTime):
-	t_nom = np.zeros(rd.NROI)
-	for i in range(rd.NROI):
-		t_nom[i] = CellTime[i+1]-CellTime[i]
-		
-	return t_nom
-	
-
-rd.next()
-NPIX = NChip*9
-NEvents = NEventsp
-
-# create array to put the calibrated times for each cell in
-t_nom = np.ones([NPIX, rd.NROI])/fsampling
-# loop over events
-for Event in range( NEvents ):
-	print "Event ", Event
-	Chip = 0
-
-	# loop over every 9th channel in Chip
-	for Pix in range(NPIX)[8::9]:
-		print "Pix ", Pix
-		
-		# calculate t_nom for each pixel
-		MeanXing, TimeXing, NumXing = Crossing(rd.acalData[Pix], np.average(rd.acalData[Pix]), t_nom[Pix], rd.startCells[Pix], Chip)
-		h.list[Chip].dict["numxing"].Fill(NumXing)
-		#h.list[Chip].dict["start"].Fill(rd.startCells[Pix])
-		t_nom[Pix] = Corr(MeanXing, NumXing, TimeXing, t_nom[Pix], rd.startCells[Pix], Chip)
-		
-		Chip += 1
-		
-	rd.next()
-
-t_nom = t_nom[8::9]
-
-if(save == "yes"):	
-	np.save(str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip), t_nom)
-	np.savetxt(str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip), t_nom)
-
-for Chip in range(NChip):
-	for i in range(rd.NROI):
-		h.list[Chip].dict["int_dev"].SetBinContent(i+1, np.sum(t_nom[Chip][:i])-i*1/fsampling)
-		h.list[Chip].dict["diff_dev"].SetBinContent(i+1, np.sum(t_nom[Chip][i] - 1/fsampling))
-	
-pyfact.SaveHistograms(h.list, str(dfname[-20:-8])+"_"+str(NEvents)+"x"+str(NChip)+".root", "RECREATE")
-
-
-t_f = time()
-
-
-
-print "time ellapsed		= ",  (t_f - t_s)/60., " min."	
-print "Speichern		= ", save
-print "NChip			= ", NChip
-print "NEvents			= ", NEventsp
-print "Histo saved as		= ", str(dfname[-20:-8])+str(NEvents)+"x"+str(NChip)+".root"
-if(save == "yes"):
-	print "CellTime saved as	= ", str(dfname[-20:-8])+str(NEvents)+"x"+str(NChip)+".npy"
-
+                for i,period in enumerate(chip_periods):
+                    #shortcuts to the involved slices
+                    start = all_crossings[chip][i]
+                    end = all_crossings[chip][i+1]
+                    interval = end-start
+                    rest = 1024-interval 
+                    
+                    delta_period = (self.P_nom - period) * self.DAMPING
+                    # nominal_number_of_slices 
+                    nom_num_slices = period * self.fsampling
+                    
+                    # now the delta_period is distributed to all slices
+                        # I do not exactly understand the +1 here ... error prone!!!
+                    # the following explanation assumes delta_period is positive
+                    # the slices between start and end get a positive correction value
+                    # This correction value is given by delta_period / nom_num_slices
+                    pos_fraction = delta_period / nom_num_slices
+                    # the total positive correction sums up to
+                    total_positive_correction = interval * pos_fraction
+                    # the total negative correction must sum up to the sampe value
+                    total_negative_correction = rest * (interval * (pos_fraction/rest))
+                    # so we can call the product following 'rest' the 'neg_fraction
+                    neg_fraction = -1 * interval/rest * important
+                    assert total_positive_correction - total_negative_correction == 0
+                    
+                    correction = np.zeros(1024)
+                    correction[start+1:end+1] = pos_fraction
+                    correction[:start+1]      = neg_fraction
+                    correction[end+1:]        = neg_fraction
+                    assert correction.sum() == 0
+                    
+                    # now we have to add these correction values to self.time_calib
+                    self.time_calib[chip,:] += np.roll(correction, start_cell[chip])
