#!/usr/bin/python -tt # # from pyfact import RawData from drs_spikes import DRSSpikes from fir_filter import CFD from fir_filter import SlidingAverage import sys import numpy as np data_filename = 'data/20111017_010.fits.gz' calib_filename = 'data/20111017_006.drs.fits.gz' run = RawData(data_filename, calib_filename, return_dict = True, do_calibration=True) despike = DRSSpikes() smooth = SlidingAverage(7) cfd = CFD() thr = 3 filter_delay = 3 search_window_size = 12 # shortcut sws = search_window_size #plt.ion() #fig = plt.figure() #fig.hold(True) # we try to determine the: # * Position # * and Height (in two versions: filtered, and unfiltered data) # of dark count peaks in: # a) all events # b) all pixel # # we limit ourselfs to 10 peaks per pixel max! # and 1000 events max! # # So in order to store this stuff, we prepare an n-dim-array PEAKS_MAX = 10 # TODO: this operation might under certain circumstances need way to much mem. # I would like to solve this, using try except, and in case of a 'MemoryError' # reduce the number of events. # So I will process the events, in e.g. chunks of 1000 and save the results # The user will then find a certain nuber of ndarray in the output npz file. # But this should not be a major problem. result_peak_positions = np.ones( (run.nevents, run.npix, PEAKS_MAX), dtype=np.int16) * -1 result_peak_unfiltered_height = np.ones( (run.nevents, run.npix, PEAKS_MAX), dtype=np.float32) * -np.inf result_peak_smoothed_height = np.ones( (run.nevents, run.npix, PEAKS_MAX), dtype=np.float32) * -np.inf for event in run: event_id = event['event_id'].value - 1 data = event['acal_data'] data = despike(data) data_orig = data.copy() data = smooth(data) filtered = cfd(data) filtered = smooth(filtered) # this is a loop over all pixel of this event pixel_id = -1 for dat, fil, orig in zip(data, filtered, data_orig): pixel_id += 1 print event_id, pixel_id # plt.cla() prod = fil[:-1] * fil[1:] cand = np.where( prod <= 0)[0] if len(cand) == 0: continue # zero crossing with rising edge cross = cand[np.where(fil[cand] < 0)[0]] if len(cross) == 0: continue over_thr = cross[np.where(dat[cross-4] > thr)[0]] # Now since we have these values, we will throw away all those, # which are probably on a falling edge of its predecessor dover = np.diff(over_thr) if len(dover) == 0: good = over_thr else: good = [] good.append(over_thr[0]) for i in range(len(dover)): if dover[-i-1] > 100: good.append(over_thr[-i-1]) good = np.array(sorted(good)) # these positions, we just found, do not exactly point to the maximum # of a peak, but the peak will be on the left side of it. # we use the smoothed data to find the position of the local maximum # and then stopre this position and the value of both # the smoothed data and the original data. max_pos = np.zeros( good.shape ) max_smoothed = np.zeros( good.shape ) max_orig = np.zeros( good.shape ) for i in range(len(good)): # We search for a local maximum in a window of size 12 if len(dat[good[i]-sws:good[i]]) > 0: max_pos[i] = good[i]-sws + np.argmax(dat[good[i]-sws:good[i]]) max_smoothed[i] = dat[max_pos[i]] max_orig[i] = orig[max_pos[i]-filter_delay] result_peak_positions[event_id,pixel_id, :len(max_pos)] = max_pos result_peak_unfiltered_height[event_id,pixel_id, :len(max_pos)] =max_orig result_peak_smoothed_height[event_id,pixel_id, :len(max_pos)] = max_smoothed # plt.plot(max_pos, max_smoothed, 'ro') # plt.plot(max_pos, max_orig, 'bo') # plt.plot(np.arange(len(dat)), dat, 'k:') # ret = raw_input('pixel-loop?') # if ret == 'q': # break # ret = raw_input('event-loop?') # if ret == 'q': # break np.savez('20111017_010-006.npz', result_peak_positions = result_peak_positions, result_peak_unfiltered_height = result_peak_unfiltered_height, result_peak_smoothed_height = result_peak_smoothed_height ) #output = open(out_filename, 'wb') #cPickle.dump(data_filename, output, -1) #cPickle.dump(calib_filename, output, -1) #cPickle.dump(peak_list, output, -1)