Changeset 13415
- Timestamp:
- 04/23/12 13:28:06 (13 years ago)
- Location:
- fact/tools/pyscripts
- Files:
-
- 3 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
fact/tools/pyscripts/map_dn.txt
r12869 r13415 1 # CHID Y X x_euk y_euk y_h x_h softID hardID1 # CHID Y X y_euk x_euk y_h x_h softID hardID 2 2 0 -7 21 18.1865334794732 3 3 21 1348 0 3 3 1 -7 22 19.0525588832576 3.5 4 22 1419 1 -
fact/tools/pyscripts/pyfact/plotters.py
r13402 r13415 114 114 plt.ion() 115 115 116 chid, y,x,xe,ye,yh,xh,softid,hardid = np.loadtxt(map_file_path ,unpack=True) 116 chid, y,x,ye,xe,yh,xh,softid,hardid = np.loadtxt(map_file_path ,unpack=True) 117 117 118 self.xe = xe 118 # -ye in order to correct for the sign difference between my mapping file 119 # and FACTmap111030.txt 120 self.ye = -ye 119 self.ye = ye 121 120 122 121 self.H = (6,0,30./180.*3.1415926) -
fact/tools/pyscripts/sandbox/dneise/timecal.py
r13404 r13415 11 11 from pyfact import RawData 12 12 from drs_spikes import DRSSpikes 13 from extractor simport ZeroXing13 from extractor import ZeroXing 14 14 class CalculateDrsTimeCalibrationConstants( object ): 15 15 """ basic analysis class for the calculation of DRS time aperture jitter calibration constants … … 34 34 self.fsampling = 2. # sampling frequency 35 35 self.freq = 250. # testfrequency 36 self.P_nom = 1000./ freq # nominal Period due to testfrequency36 self.P_nom = 1000./self.freq # nominal Period due to testfrequency 37 37 self.DAMPING = 0.02 # Damping applied to correction 38 38 … … 48 48 """ 49 49 # loop over events 50 counter = 0 50 51 for event in self.run: 52 print 'counter', counter 53 counter +=1 51 54 # in the next() method, which is called during looping, 52 55 # the data is automatically amplitude calibrated. … … 60 63 start_cell = event['start_cells'][niners_ids,:] 61 64 62 data = despike( data )65 data = self.despike( data ) 63 66 64 67 # shift data down by the mean # possible in one line … … 111 114 112 115 # first we make the sublists of the lists to be numpy.arrays 113 for i in len(float_slice_of_all_crossings): #both list have equal length116 for i in range(len(float_slice_of_all_crossings)): #both list have equal length 114 117 float_slice_of_all_crossings[i] = np.array(float_slice_of_all_crossings[i]) 115 118 time_of_all_crossings[i] = np.array(time_of_all_crossings[i]) … … 120 123 all_measured_periods.append(np.diff(chip_times)) 121 124 122 for chi d,chip_periods in enumerate(all_measured_periods):125 for chip,chip_periods in enumerate(all_measured_periods): 123 126 124 127 for i,period in enumerate(chip_periods): … … 144 147 total_negative_correction = rest * (interval * (pos_fraction/rest)) 145 148 # so we can call the product following 'rest' the 'neg_fraction 146 neg_fraction = -1 * interval/rest * important 147 assert total_positive_correction - total_negative_correction == 0 149 neg_fraction = -1 * interval/rest * pos_fraction 150 #print '1. should be zero', total_positive_correction - total_negative_correction 151 #assert total_positive_correction - total_negative_correction == 0 148 152 149 153 correction = np.zeros(1024) … … 151 155 correction[:start+1] = neg_fraction 152 156 correction[end+1:] = neg_fraction 153 assert correction.sum() == 0 157 #print '2. should be zero', correction.sum() 158 #assert correction.sum() == 0 154 159 155 160 # now we have to add these correction values to self.time_calib
Note:
See TracChangeset
for help on using the changeset viewer.