Index: /fact/tools/pyscripts/pyfact/FITS.h
===================================================================
--- /fact/tools/pyscripts/pyfact/FITS.h	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/FITS.h	(revision 17690)
@@ -0,0 +1,112 @@
+/*
+ *
+ * FITS.h
+ *
+ * Global fits header
+ *
+ * Author: lyard
+ *
+ */
+
+#ifndef MARS_FITS
+#define MARS_FITS
+
+#include <stdint.h>
+
+namespace FITS
+{
+
+    //Identifier of the compression schemes processes
+    enum CompressionProcess_t
+    {
+        kFactRaw       = 0x0,
+        kFactSmoothing = 0x1,
+        kFactHuffman16 = 0x2
+    };
+
+    //ordering of the columns / rows
+    enum RowOrdering_t
+    {
+        kOrderByCol = 'C',
+        kOrderByRow = 'R'
+    };
+
+#ifdef __CINT__
+    struct TileHeader;
+    struct BlockHeader;
+#else
+    //Structure helper for tiles headers
+    struct TileHeader
+    {
+      char     id[4];
+      uint32_t numRows;
+      uint64_t size;
+
+      TileHeader() {}
+
+      TileHeader(uint32_t nRows,
+                 uint64_t s) : id({'T', 'I', 'L', 'E'}),
+                                 numRows(nRows),
+                                 size(s)
+      { };
+    } __attribute__((__packed__));
+
+    //Structure helper for blocks headers and compresion schemes
+    struct BlockHeader
+    {
+        uint64_t      size;
+        char          ordering;
+        unsigned char numProcs;
+        uint16_t      processings[];
+
+        BlockHeader(uint64_t      s=0,
+                    char          o=kOrderByRow,
+                    unsigned char n=1) : size(s),
+                                         ordering(o),
+                                         numProcs(n)
+        {}
+    } __attribute__((__packed__));
+#endif
+
+    //Helper structure to simplify the initialization and handling of compressed blocks headers
+    struct Compression
+    {
+        BlockHeader header;
+        std::vector<uint16_t> sequence;
+
+        Compression(const std::vector<uint16_t> &seq, const RowOrdering_t &order=kOrderByCol)
+            : header(0, order, seq.size()), sequence(seq)
+        {
+
+        }
+
+        Compression(const CompressionProcess_t &compression=kFactRaw, const RowOrdering_t &order=kOrderByCol)
+            : header(0, order, 1), sequence(1)
+        {
+            sequence[0] = compression;
+        }
+
+#ifdef __MARS__ // needed for CINT
+        Compression(const int &compression)
+            : header(0, kOrderByCol, 1), sequence(1)
+        {
+            sequence[0] = compression;
+        }
+#endif
+
+        RowOrdering_t getOrdering() const { return RowOrdering_t(header.ordering); }
+        uint32_t getSizeOnDisk() const { return sizeof(BlockHeader) + sizeof(uint16_t)*header.numProcs; }
+        CompressionProcess_t getProc(uint32_t i) const { return CompressionProcess_t(sequence[i]); }
+        uint16_t getNumProcs() const { return header.numProcs; }
+
+        void SetBlockSize(uint64_t size) { header.size = size; }
+        void Memcpy(char *dest) const
+        {
+            memcpy(dest, &header, sizeof(BlockHeader));
+            memcpy(dest+sizeof(BlockHeader), sequence.data(), header.numProcs*sizeof(uint16_t));
+        }
+    };
+};
+
+#endif //_FITS_H_
+
Index: /fact/tools/pyscripts/pyfact/clean.sh
===================================================================
--- /fact/tools/pyscripts/pyfact/clean.sh	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/clean.sh	(revision 17690)
@@ -0,0 +1,2 @@
+rm *.so *.d
+rm extern_Mars_mcore/*.so extern_Mars_mcore/*.d
Index: /fact/tools/pyscripts/pyfact/factfits.h
===================================================================
--- /fact/tools/pyscripts/pyfact/factfits.h	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/factfits.h	(revision 17690)
@@ -0,0 +1,206 @@
+/*
+ * factfits.h
+ *
+ *  Created on: May 26, 2013
+ *      Author: lyard
+ */
+
+#ifndef MARS_FACTFITS
+#define MARS_FACTFITS
+
+#include "zfits.h"
+
+class factfits : public zfits
+{
+public:
+    // Default constructor
+    factfits(const std::string& fname, const std::string& tableName="", bool force=false) :
+        zfits(fname, tableName, force),
+        fOffsetCalibration(0),
+        fOffsetStartCellData(0),
+        fOffsetData(0),
+        fNumRoi(0)
+    {
+        if (init())
+            readDrsCalib(fname);
+    }
+
+    // Alternative constructor
+    factfits(const std::string& fname, const std::string& fout, const std::string& tableName, bool force=false) :
+        zfits(fname, fout, tableName, force),
+        fOffsetCalibration(0),
+        fOffsetStartCellData(0),
+        fOffsetData(0),
+        fNumRoi(0)
+    {
+        if (init())
+            readDrsCalib(fname);
+    }
+
+private:
+
+    void StageRow(size_t row, char* dest)
+    {
+        zfits::StageRow(row, dest);
+
+        // This file does not contain fact data or no calibration to be applied
+        if (fOffsetCalibration.empty())
+            return;
+
+        //re-get the pointer to the data to access the offsets
+        const uint8_t offset = (row*fTable.bytes_per_row)%4;
+
+        int16_t *startCell = reinterpret_cast<int16_t*>(fBufferRow.data() + offset + fOffsetStartCellData);
+        int16_t *data      = reinterpret_cast<int16_t*>(fBufferRow.data() + offset + fOffsetData);
+
+         /*
+         for (uint32_t i=0; i<1440*1024; i+=1024, startCell++)
+         {
+             if (*startCell < 0)
+             {
+                 data += fNumRoi;
+                 continue;
+             }
+             for (uint32_t j=0; j<fNumRoi; j++, data++)
+                 *data += fOffsetCalibration[i + (*startCell+j)%1024];
+         }
+         */
+
+        // This version is faster because the compilers optimization
+        // is not biased by the evaluation of %1024
+        for (int ch=0; ch<1440; ch++)
+        {
+            if (startCell[ch]<0)
+            {
+                data += fNumRoi;
+                continue;
+            }
+
+            const int16_t modStart = startCell[ch] % 1024;
+            const int16_t *off = fOffsetCalibration.data() + ch*1024;
+
+            const int16_t *cal = off+modStart;
+            const int16_t *end_stride = data+fNumRoi;
+
+            if (modStart+fNumRoi>1024)
+            {
+                while (cal<off+1024)
+                    *data++ += *cal++;
+
+                cal = off;
+            }
+            while (data<end_stride)
+                *data++ += *cal++;
+        }
+
+    }
+
+    bool init()
+    {
+        if (!HasKey("NPIX") || !HasKey("NROI"))
+            return false;
+
+        if (Get<uint16_t>("NPIX")!=1440)
+            return false;
+
+        fNumRoi = Get<uint16_t>("NROI");
+        if (fNumRoi>1024)
+            return false;
+
+        // check column details for Data
+        const Table::Columns::const_iterator it = fTable.cols.find("Data");
+        if (it==fTable.cols.end() || it->second.num!=1440*fNumRoi || it->second.type!='I')
+            return false;
+
+        // check column details for StartCellData
+        const Table::Columns::const_iterator is = fTable.cols.find("StartCellData");
+        if (is==fTable.cols.end() || is->second.num!=1440 || is->second.type!='I')
+            return false;
+
+        fOffsetStartCellData = is->second.offset;
+        fOffsetData          = it->second.offset;
+
+        return true;
+    }
+
+    //  Read the Drs calibration data
+    void readDrsCalib(const std::string& fileName)
+    {
+        //should not be mandatory, but improves the perfs a lot when reading not compressed, gzipped files
+        if (!IsCompressedFITS())
+            return;
+
+        zfits calib(fileName, "ZDrsCellOffsets");
+
+        if (calib.bad())
+        {
+            clear(rdstate()|std::ios::badbit);
+            return;
+        }
+
+        if (calib.eof())
+            return;
+
+        // Check correct size and format of table
+        if (calib.GetNumRows() != 1)
+        {
+            clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+            throw std::runtime_error("Table 'ZDrsCellOffsets' found, but not with one row as expected");
+#else
+            gLog << ___err___ << "ERROR - Table 'ZDrsCellOffsets' found, but not with one row as expected" << std::endl;
+            return;
+#endif
+        }
+        if (calib.GetStr("TTYPE1") != "OffsetCalibration")
+        {
+            clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+            throw std::runtime_error("Table 'ZDrsCellOffsets' found, but first column is not the one expected");
+#else
+            gLog << ___err___ << "ERROR - Table 'ZDrsCellOffsets' found, but first column is not the one expected" << std::endl;
+            return;
+#endif
+        }
+        bool isColumnPresent = false;
+        if (calib.HasKey("TFORM1") && calib.GetStr("TFORM1") == "1474560I") isColumnPresent = true;
+        if (calib.HasKey("ZFORM1") && calib.GetStr("ZFORM1") == "1474560I") isColumnPresent = true;
+        if (!isColumnPresent)  // 1024*1440
+        {
+            clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+            throw std::runtime_error("Table 'ZDrsCellOffsets' has wrong column format (TFROM1)");
+#else
+            gLog << ___err___ << "ERROR - Table 'ZDrsCellOffsets' has wrong column format (TFORM1)" << std::endl;
+            return;
+#endif
+        }
+
+        fOffsetCalibration.resize(1024*1440);
+
+        calib.SetPtrAddress("OffsetCalibration", fOffsetCalibration.data());
+        if (calib.GetNextRow())
+            return;
+
+        clear(rdstate()|std::ios::badbit);
+
+#ifdef __EXCEPTIONS
+        throw std::runtime_error("Reading column 'OffsetCalibration' failed.");
+#else
+        gLog << ___err___ << "ERROR - Reading column 'OffsetCalibration' failed." << std::endl;
+#endif
+
+    }
+
+    std::vector<int16_t> fOffsetCalibration; ///< integer values of the drs calibration used for compression
+
+    size_t fOffsetStartCellData;
+    size_t fOffsetData;
+
+    uint16_t fNumRoi;
+
+#warning Time marker channels currently unhandled
+
+}; //class factfits
+
+#endif
Index: /fact/tools/pyscripts/pyfact/huffman.h
===================================================================
--- /fact/tools/pyscripts/pyfact/huffman.h	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/huffman.h	(revision 17690)
@@ -0,0 +1,435 @@
+#ifndef FACT_huffman
+#define FACT_huffman
+
+#include <string.h>
+#include <stdint.h>
+
+#include <set>
+#include <string>
+#include <vector>
+
+#define MAX_SYMBOLS (1<<16)
+
+// ================================================================
+
+namespace Huffman
+{
+    static unsigned long numbytes_from_numbits(unsigned long numbits)
+    {
+        return numbits / 8 + (numbits % 8 ? 1 : 0);
+    }
+
+    struct TreeNode
+    {
+        TreeNode *parent;
+        union
+        {
+            struct
+            {
+                TreeNode *zero, *one;
+            };
+            uint16_t symbol;
+        };
+
+        size_t count;
+        bool isLeaf;
+
+        TreeNode(uint16_t sym, size_t cnt=0) : parent(0), isLeaf(true)
+        {
+            symbol = sym;
+            count  = cnt;
+        }
+
+        TreeNode(TreeNode *n0=0, TreeNode *n1=0) : parent(0), isLeaf(false)
+        {
+            count = n0 && n1 ?  n0->count + n1->count : 0;
+            zero  = n0 && n1 ? (n0->count > n1->count ? n0 : n1) : NULL;
+            one   = n0 && n1 ? (n0->count > n1->count ? n1 : n0) : NULL;
+
+            if (n0)
+                n0->parent = this;
+
+            if (n1)
+                n1->parent = this;
+        }
+
+        ~TreeNode()
+        {
+            if (isLeaf)
+                return;
+
+            if (zero)
+                delete zero;
+            if (one)
+                delete one;
+        }
+
+        bool operator() (const TreeNode *hn1, const TreeNode *hn2) const
+        {
+            return hn1->count < hn2->count;
+        }
+    };
+
+
+    struct Encoder
+    {
+        struct Code
+        {
+            size_t bits;
+            uint8_t numbits;
+
+            Code() : numbits(0) { }
+        };
+
+        size_t count;
+        Code lut[1<<16];
+
+        bool CreateEncoder(const TreeNode *n, size_t bits=0, uint8_t nbits=0)
+        {
+            if (n->isLeaf)
+            {
+#ifdef __EXCEPTIONS
+                if (nbits>sizeof(size_t)*8)
+                    throw std::runtime_error("Too many different symbols - this should not happen!");
+#else
+                if (nbits>sizeof(size_t)*8)
+                {
+                    count = 0;
+                    return false;
+                }
+#endif
+                lut[n->symbol].bits    = bits;
+                lut[n->symbol].numbits = nbits==0 ? 1 : nbits;
+                count++;
+                return true;
+            }
+
+            return
+                CreateEncoder(n->zero, bits,              nbits+1) &&
+                CreateEncoder(n->one,  bits | (1<<nbits), nbits+1);
+
+        }
+
+        void WriteCodeTable(std::string &out) const
+        {
+            out.append((char*)&count, sizeof(size_t));
+
+            for (uint32_t i=0; i<MAX_SYMBOLS; i++)
+            {
+                const Code &n = lut[i];
+                if (n.numbits==0)
+                    continue;
+
+                // Write the 2 byte symbol.
+                out.append((char*)&i, sizeof(uint16_t));
+                if (count==1)
+                    return;
+
+                // Write the 1 byte code bit length.
+                out.append((char*)&n.numbits, sizeof(uint8_t));
+
+                // Write the code bytes.
+                uint32_t numbytes = numbytes_from_numbits(n.numbits);
+                out.append((char*)&n.bits, numbytes);
+            }
+        }
+
+        void Encode(std::string &out, const uint16_t *bufin, size_t bufinlen) const
+        {
+            if (count==1)
+                return;
+
+            uint8_t curbyte = 0;
+            uint8_t curbit  = 0;
+
+            for (uint32_t i=0; i<bufinlen; ++i)
+            {
+                const uint16_t &symbol = bufin[i];
+
+                const Code *code = lut+symbol;
+
+                uint8_t nbits = code->numbits;
+                const uint8_t *bits = (uint8_t*)&code->bits;
+
+                while (nbits>0)
+                {
+                    // Number of bits available in the current byte
+                    const uint8_t free_bits = 8 - curbit;
+
+                    // Write bits to current byte
+                    curbyte |= *bits<<curbit;
+
+                    // If the byte has been filled, put it into the output buffer
+                    // If the bits exceed the current byte step to the next byte
+                    // and fill it properly
+                    if (nbits>=free_bits)
+                    {
+                        out += curbyte;
+                        curbyte = *bits>>free_bits;
+
+                        bits++;
+                    }
+
+                    // Adapt the number of available bits, the number of consumed bits
+                    // and the bit-pointer accordingly
+                    const uint8_t consumed = nbits>8 ? 8 : nbits;
+                    nbits  -= consumed;
+                    curbit += consumed;
+                    curbit %= 8;
+                }
+            }
+
+            // If the buffer-byte is half-full, also add it to the output buffer
+            if (curbit>0)
+                out += curbyte;
+        }
+
+        Encoder(const uint16_t *bufin, size_t bufinlen) : count(0)
+        {
+            uint64_t counts[MAX_SYMBOLS];
+            memset(counts, 0, sizeof(uint64_t)*MAX_SYMBOLS);
+
+            // Count occurances
+            for (const uint16_t *p=bufin; p<bufin+bufinlen; p++)
+                counts[*p]++;
+
+            // Copy all occuring symbols into a sorted list
+            std::multiset<TreeNode*, TreeNode> set;
+            for (int i=0; i<MAX_SYMBOLS; i++)
+                if (counts[i])
+                    set.insert(new TreeNode(i, counts[i]));
+
+            // Create the tree bottom-up
+            while (set.size()>1)
+            {
+                auto it = set.begin();
+
+                auto it1 = it++;
+                auto it2 = it;
+
+                TreeNode *nn = new TreeNode(*it1, *it2);
+
+                set.erase(it1, ++it2);
+
+                set.insert(nn);
+            }
+
+            // get the root of the tree
+            const TreeNode *root = *set.begin();
+
+            CreateEncoder(root);
+
+            // This will delete the whole tree
+            delete root;
+        }
+
+    };
+
+
+
+    struct Decoder
+    {
+        uint16_t symbol;
+        uint8_t nbits;
+        bool isLeaf;
+
+        Decoder *lut;
+
+        Decoder() : isLeaf(false), lut(NULL)
+        {
+        }
+
+        ~Decoder()
+        {
+            if (lut)
+                delete [] lut;
+        }
+
+        void Set(uint16_t sym, uint8_t n=0, size_t bits=0)
+        {
+            if (!lut)
+                lut = new Decoder[256];
+
+            if (n>8)
+            {
+                lut[bits&0xff].Set(sym, n-8, bits>>8);
+                return;
+            }
+
+            const int nn = 1<<(8-n);
+
+            for (int i=0; i<nn; i++)
+            {
+                const uint8_t key = bits | (i<<n);
+
+                lut[key].symbol = sym;
+                lut[key].isLeaf = true;
+                lut[key].nbits  = n;
+            }
+        }
+
+        void Build(const TreeNode &p, uint64_t bits=0, uint8_t n=0)
+        {
+            if (p.isLeaf)
+            {
+                Set(p.symbol, n, bits);
+                return;
+            }
+
+            Build(*p.zero, bits,          n+1);
+            Build(*p.one,  bits | (1<<n), n+1);
+        }
+
+        Decoder(const TreeNode &p) : symbol(0), isLeaf(false), lut(NULL)
+        {
+            Build(p);
+        }
+
+        const uint8_t *Decode(const uint8_t *in_ptr, const uint8_t *in_end,
+                              uint16_t *out_ptr, const uint16_t *out_end) const
+        {
+            Decoder const *p = this;
+
+            if (in_ptr==in_end)
+            {
+                while (out_ptr < out_end)
+                    *out_ptr++ = p->lut->symbol;
+                return in_ptr;
+            }
+
+            uint8_t curbit = 0;
+            while (in_ptr<in_end && out_ptr<out_end)
+            {
+                const uint16_t *two = (uint16_t*)in_ptr;
+
+                const uint8_t curbyte = (*two >> curbit);
+
+#ifdef __EXCEPTIONS
+                if (!p->lut)
+                    throw std::runtime_error("Unknown bitcode in stream!");
+#else
+                if (!p->lut)
+                    return NULL;
+#endif
+
+                p = p->lut + curbyte;
+                if (!p->isLeaf)
+                {
+                    in_ptr++;
+                    continue;
+                }
+
+                *out_ptr++ = p->symbol;
+                curbit += p->nbits;
+
+                p = this;
+
+                if (curbit>=8)
+                {
+                    curbit %= 8;
+                    in_ptr++;
+                }
+
+            }
+
+            return curbit ? in_ptr+1 : in_ptr;
+        }
+
+        Decoder(const uint8_t* bufin, int64_t &pindex) : isLeaf(false), lut(NULL)
+        {
+            // FIXME: Sanity check for size missing....
+
+            // Read the number of entries.
+            size_t count=0;
+            memcpy(&count, bufin + pindex, sizeof(count));
+            pindex += sizeof(count);
+
+            // Read the entries.
+            for (size_t i=0; i<count; i++)
+            {
+                uint16_t sym;
+                memcpy(&sym, bufin + pindex, sizeof(uint16_t));
+                pindex += sizeof(uint16_t);
+
+                if (count==1)
+                {
+                    Set(sym);
+                    break;
+                }
+
+                uint8_t numbits;
+                memcpy(&numbits, bufin + pindex, sizeof(uint8_t));
+                pindex += sizeof(uint8_t);
+
+                const uint8_t numbytes = numbytes_from_numbits(numbits);
+
+#ifdef __EXCEPTIONS
+                if (numbytes>sizeof(size_t))
+                    throw std::runtime_error("Number of bytes for a single symbol exceeds maximum.");
+#else
+                if (numbytes>sizeof(size_t))
+                {
+                    pindex = -1;
+                    return;
+                }
+#endif
+                size_t bits=0;
+                memcpy(&bits, bufin+pindex, numbytes);
+                pindex += numbytes;
+
+                Set(sym, numbits, bits);
+            }
+        }
+    };
+
+    inline bool Encode(std::string &bufout, const uint16_t *bufin, size_t bufinlen)
+    {
+        const Encoder encoder(bufin, bufinlen);
+
+#ifndef __EXCEPTIONS
+        if (encoder.count==0)
+            return false;
+#endif
+
+        bufout.append((char*)&bufinlen, sizeof(size_t));
+        encoder.WriteCodeTable(bufout);
+        encoder.Encode(bufout, bufin, bufinlen);
+
+        return true;
+    }
+
+    inline int64_t Decode(const uint8_t *bufin,
+                          size_t         bufinlen,
+                          std::vector<uint16_t> &pbufout)
+    {
+        int64_t i = 0;
+
+        // Read the number of data bytes this encoding represents.
+        size_t data_count = 0;
+        memcpy(&data_count, bufin, sizeof(size_t));
+        i += sizeof(size_t);
+
+
+        pbufout.resize(data_count);
+
+        const Decoder decoder(bufin, i);
+
+#ifndef __EXCEPTIONS
+        if (i==-1)
+            return -1;
+#endif
+
+        const uint8_t *in_ptr =
+            decoder.Decode(bufin+i, bufin+bufinlen,
+                           pbufout.data(), pbufout.data()+data_count);
+
+#ifndef __EXCEPTIONS
+        if (!in_ptr)
+            return -1;
+#endif
+
+        return in_ptr-bufin;
+    }
+};
+
+#endif
Index: /fact/tools/pyscripts/pyfact/izstream.h
===================================================================
--- /fact/tools/pyscripts/pyfact/izstream.h	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/izstream.h	(revision 17690)
@@ -0,0 +1,170 @@
+#ifndef MARS_izstream
+#define MARS_izstream
+
+#include <string.h>
+
+#include <istream>
+#include <streambuf>
+
+#ifdef __CINT__
+typedef void *gzFile;
+#else
+#include <zlib.h>
+#endif
+
+class izstream : public std::streambuf, public std::istream
+{
+private:
+    static const int fgBufferSize = 2048*1024*2;
+
+    gzFile fFile;   // file handle for compressed file
+    char  *fBuffer; // data buffer
+
+    int underflow()
+    {
+        if (gptr() && gptr()<egptr())
+            return * reinterpret_cast<unsigned char *>(gptr());
+
+        if (!is_open())
+            return EOF;
+
+        // gptr()-eback(): if more than four bytes are already flushed
+        const int iputback = gptr()-eback()>4 ? 4 : gptr()-eback();
+
+        // Copy the last four bytes flushed into the putback area
+        memcpy(fBuffer+(4-iputback), gptr()-iputback, iputback);
+
+        // Fill the buffer starting at the current file position and reset buffer
+        // pointers by calling setg
+        const int num = gzread(fFile, fBuffer+4, fgBufferSize-4);
+        if (num <= 0) // ERROR or EOF
+            return EOF;
+
+        // reset buffer pointers
+        setg(fBuffer+(4-iputback), fBuffer+4, fBuffer+4+num);
+
+        // return next character
+        return *reinterpret_cast<unsigned char *>(gptr());
+    }
+
+
+public:
+    izstream() : std::istream(this), fFile(0)
+    {
+        fBuffer = new char[fgBufferSize];
+        setg(fBuffer+4, fBuffer+4, fBuffer+4);
+    }
+    izstream(const char *name) : std::istream(this), fFile(0)
+    {
+        fBuffer = new char[fgBufferSize];
+        setg(fBuffer+4, fBuffer+4, fBuffer+4);
+        open(name);
+    }
+    ~izstream() { izstream::close(); delete [] fBuffer; }
+
+    int is_open() { return fFile!=0; }
+
+    // --------------------------------------------------------------------------
+    //
+    // Open a file by name. Test if it is open like for an ifstream
+    // It doesn't matter whether the file is gzip compressed or not.
+    //
+    void open(const char* name)
+    {
+        if (is_open())
+        {
+            clear(rdstate()|std::ios::failbit);
+            return;
+        }
+
+        fFile = gzopen(name, "rb");
+        if (fFile == 0)
+        {
+            clear(rdstate()|std::ios::failbit);
+            return;
+        }
+    }
+    // --------------------------------------------------------------------------
+    //
+    // Close an open file.
+    //
+    void close()
+    {
+        if (!is_open())
+            return;
+
+        if (gzclose(fFile) != Z_OK)
+            clear(rdstate()|std::ios::failbit);
+
+        fFile = 0;
+    }
+
+    std::streambuf::pos_type seekoff(std::streambuf::off_type offset, std::ios_base::seekdir dir,
+                                     std::ios_base::openmode = std::ios_base::in)
+    {
+        // Using a switch instead results in:
+        //  In member function `virtual std::streampos izstream::seekoff(long int, std::_Ios_Seekdir, std::_Ios_Openmode)':
+        //  warning: enumeration value `_M_ios_seekdir_end' not handled in switch
+        //  warning: case value `0' not in enumerated type `_Ios_Seekdir'
+        //  warning: case value `1' not in enumerated type `_Ios_Seekdir'
+        //  warning: case value `2' not in enumerated type `_Ios_Seekdir'
+
+        if (dir==std::ios::end)
+        {
+            clear(rdstate()|std::ios::failbit);
+            return EOF;
+        }
+
+        // We only do relative seeking to avoid unnecessary decompression
+        // of the whole file
+        if (dir==std::ios::beg)
+            offset -= tellg();
+
+        // Calculate future position in streambuffer
+        const char *ptr = gptr()+offset;
+
+        // This is the number of bytes still available in the buffer
+        const size_t sbuf = egptr()-gptr();
+
+        // Check if the new position will still be in the buffer
+        // In this case the target data was already decompressed.
+        if (ptr>=eback() && ptr<egptr())
+        {
+            // Absolute position in z-stream
+            const z_off_t zpos = gztell(fFile)-sbuf; //gzseek(fFile, 0, SEEK_CUR);
+
+            gbump(offset);
+
+            return zpos+offset;
+        }
+
+        const streampos pos = gzseek(fFile, offset-sbuf, SEEK_CUR);
+
+        // Buffer is empty - force refilling
+        setg(fBuffer+4, fBuffer+4, fBuffer+4);
+
+        return pos<0 ? streampos(EOF) : pos;
+
+        /*
+         // SEEK_END not supported by zlib
+         if (dir==ios::end)
+         {
+             // Position in z-stream
+             const z_off_t zpos = gzseek(fFile, offset, SEEK_END);
+             if (zpos<0)
+                 return EOF;
+
+             return fill_buffer()==EOF ? EOF : zpos;
+         }
+         */
+        return EOF;
+    }
+
+    std::streambuf::pos_type seekpos(std::streambuf::pos_type pos,
+                                     std::ios_base::openmode = std::ios_base::in)
+    {
+        return seekoff(pos, std::ios::beg);
+    }
+};
+
+#endif
Index: /fact/tools/pyscripts/pyfact/makelibs.C
===================================================================
--- /fact/tools/pyscripts/pyfact/makelibs.C	(revision 17689)
+++ /fact/tools/pyscripts/pyfact/makelibs.C	(revision 17690)
@@ -26,8 +26,8 @@
         gSystem->Load("/usr/lib/x86_64-linux-gnu/libz.so");
     }
-    gROOT->ProcessLine(".L extern_Mars_mcore/izstream.h+O");
+    gROOT->ProcessLine(".L izstream.h+O");
     gROOT->ProcessLine(".L fits.h+O");
-    gROOT->ProcessLine(".L extern_Mars_mcore/zfits.h+O");
-    gROOT->ProcessLine(".L extern_Mars_mcore/factfits.h+O");
+    gROOT->ProcessLine(".L zfits.h+O");
+    gROOT->ProcessLine(".L factfits.h+O");
     gROOT->ProcessLine(".L calfactfits.h+O");
 }
Index: /fact/tools/pyscripts/pyfact/pyfact.py
===================================================================
--- /fact/tools/pyscripts/pyfact/pyfact.py	(revision 17689)
+++ /fact/tools/pyscripts/pyfact/pyfact.py	(revision 17690)
@@ -15,7 +15,7 @@
 # having it in PYTHONPATH is *not* sufficient
 gSystem.Load('fits_h.so')
-gSystem.Load('extern_Mars_mcore/izstream_h.so')
-gSystem.Load('extern_Mars_mcore/zfits_h.so')
-gSystem.Load('extern_Mars_mcore/factfits_h.so')
+gSystem.Load('izstream_h.so')
+gSystem.Load('zfits_h.so')
+gSystem.Load('factfits_h.so')
 gSystem.Load('calfactfits_h.so')
 from ROOT import *
Index: /fact/tools/pyscripts/pyfact/zfits.h
===================================================================
--- /fact/tools/pyscripts/pyfact/zfits.h	(revision 17690)
+++ /fact/tools/pyscripts/pyfact/zfits.h	(revision 17690)
@@ -0,0 +1,697 @@
+/*
+ * zfits.h
+ *
+ *  Created on: May 16, 2013
+ *      Author: lyard
+ */
+
+#ifndef MARS_zfits
+#define MARS_zfits
+
+#include "fits.h"
+#include "huffman.h"
+
+#include "FITS.h"
+
+class zfits : public fits
+{
+public:
+
+    // Basic constructor
+    zfits(const std::string& fname, const std::string& tableName="", bool force=false)
+        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-1), fHeapOff(0), fTileSize(0)
+    {
+        open(fname.c_str());
+        Constructor(fname, "", tableName, force);
+//        InitCompressionReading();
+    }
+
+    // Alternative constructor
+    zfits(const std::string& fname, const std::string& fout, const std::string& tableName, bool force=false)
+        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-1), fHeapOff(0), fTileSize(0)
+    {
+        open(fname.c_str());
+        Constructor(fname, fout, tableName, force);
+//        InitCompressionReading();
+    }
+
+    //  Skip the next row
+    bool SkipNextRow()
+    {
+        if (!fTable.is_compressed)
+            return fits::SkipNextRow();
+
+        fRow++;
+        return true;
+    }
+
+    virtual bool IsFileOk() const
+    {
+        bool rawsum = true;
+
+        if (HasKey("RAWSUM"))
+        {
+                std::ostringstream str;
+                str << fRawsum.val();
+                rawsum = (GetStr("RAWSUM") == str.str());
+        }
+
+        return fits::IsFileOk() && rawsum;
+    };
+
+    size_t GetNumRows() const
+    {
+        if (fTable.is_compressed)
+            return fTable.Get<size_t>("ZNAXIS2");
+        else
+            return fTable.Get<size_t>("NAXIS2");
+    }
+    size_t GetBytesPerRow() const
+    {
+        if (fTable.is_compressed)
+            return fTable.Get<size_t>("ZNAXIS1");
+        else
+            return fTable.Get<size_t>("NAXIS1");
+    }
+
+protected:
+
+    //  Stage the requested row to internal buffer
+    //  Does NOT return data to users
+    virtual void StageRow(size_t row, char* dest)
+    {
+        if (!fTable.is_compressed)
+        {
+            fits::StageRow(row, dest);
+            return;
+        }
+        ReadBinaryRow(row, dest);
+    }
+
+private:
+
+    // Do what it takes to initialize the compressed structured
+    void InitCompressionReading()
+    {
+        fCatalogInitialized = true;
+
+        if (!fTable.is_compressed)
+            return;
+
+        //The constructor may have failed
+        if (!good())
+            return;
+
+        if (fTable.is_compressed)
+            for (auto it=fTable.sorted_cols.cbegin(); it!= fTable.sorted_cols.cend(); it++)
+            {
+                if (it->comp == kCompFACT)
+                    continue;
+
+                clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+                throw std::runtime_error("Only the FACT compression scheme is handled by this reader.");
+#else
+                gLog << ___err___ << "ERROR - Only the FACT compression scheme is handled by this reader." << std::endl;
+                return;
+#endif
+            }
+
+        fColumnOrdering.resize(fTable.sorted_cols.size(), FITS::kOrderByRow);
+
+        //Get compressed specific keywords
+        fNumTiles       = fTable.is_compressed ? GetInt("NAXIS2") : 0;
+        fNumRowsPerTile = fTable.is_compressed ? GetInt("ZTILELEN") : 0;
+
+        //read the file's catalog
+        ReadCatalog();
+
+        //give it some space for uncompressing
+        AllocateBuffers();
+    }
+
+    // Copy decompressed data to location requested by user
+    void MoveColumnDataToUserSpace(char* dest, const char* src, const Table::Column& c)
+    {
+        if (!fTable.is_compressed)
+        {
+            fits::MoveColumnDataToUserSpace(dest, src, c);
+            return;
+        }
+
+        memcpy(dest, src, c.num*c.size);
+    }
+
+    bool  fCatalogInitialized;
+
+    std::vector<char> fBuffer;           ///<store the uncompressed rows
+    std::vector<char> fTransposedBuffer; ///<intermediate buffer to transpose the rows
+    std::vector<char> fCompressedBuffer; ///<compressed rows
+    std::vector<char> fColumnOrdering;   ///< ordering of the column's rows. Can change from tile to tile.
+
+    size_t fNumTiles;       ///< Total number of tiles
+    size_t fNumRowsPerTile; ///< Number of rows per compressed tile
+    int64_t fCurrentRow;    ///< current row in memory signed because we need -1
+    size_t fShrinkFactor;   ///< shrink factor
+
+    streamoff fHeapOff;           ///< offset from the beginning of the file of the binary data
+    streamoff fHeapFromDataStart; ///< offset from the beginning of the data table
+
+    std::vector<std::vector<std::pair<int64_t, int64_t>>> fCatalog;     ///< Catalog, i.e. the main table that points to the compressed data.
+    std::vector<size_t>                                   fTileSize;    ///< size in bytes of each compressed tile
+    std::vector<std::vector<size_t>>                      fTileOffsets; ///< offset from start of tile of a given compressed column
+
+    Checksum fRawsum;   ///< Checksum of the uncompressed, raw data
+
+    // Get buffer space
+    void AllocateBuffers()
+    {
+        uint32_t buffer_size = fTable.bytes_per_row*fNumRowsPerTile;
+        uint32_t compressed_buffer_size = fTable.bytes_per_row*fNumRowsPerTile +
+            //use a bit more memory for block headers. 256 char coding the compression sequence max.
+            fTable.num_cols*(sizeof(FITS::BlockHeader)+256) +
+            //a bit more for the tile headers
+            sizeof(FITS::TileHeader) +
+            //and a bit more for checksuming
+            8;
+
+        if (buffer_size % 4 != 0)
+            buffer_size += 4 - (buffer_size%4);
+
+        if (compressed_buffer_size % 4 != 0)
+            compressed_buffer_size += 4 - (compressed_buffer_size%4);
+
+        fBuffer.resize(buffer_size);
+
+        fTransposedBuffer.resize(buffer_size);
+        fCompressedBuffer.resize(compressed_buffer_size);
+    }
+
+    // Read catalog data. I.e. the address of the compressed data inside the heap
+    void ReadCatalog()
+    {
+        std::vector<char> readBuf(16);
+        fCatalog.resize(fNumTiles);
+
+        const streampos catalogStart = tellg();
+
+        fChkData.reset();
+
+        //do the actual reading
+        for (uint32_t i=0;i<fNumTiles;i++)
+            for (uint32_t j=0;j<fTable.num_cols;j++)
+            {
+                read(readBuf.data(), 2*sizeof(int64_t));
+                fChkData.add(readBuf);
+                //swap the bytes
+                int64_t tempValues[2] = {0,0};
+                revcpy<8>(reinterpret_cast<char*>(tempValues), readBuf.data(), 2);
+                if (tempValues[0] < 0 || tempValues[1] < 0)
+                {
+                    clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error("Negative value in the catalog");
+#else
+                    gLog << ___err___ << "ERROR - negative value in the catalog" << std::endl;
+                    return;
+#endif
+                }
+                //add catalog entry
+                fCatalog[i].emplace_back(tempValues[0], tempValues[1]);
+            }
+
+        //see if there is a gap before heap data
+        fHeapOff = tellg()+fTable.GetHeapShift();
+        fHeapFromDataStart = fNumTiles*fTable.num_cols*2*sizeof(int64_t) + fTable.GetHeapShift();
+
+        //check if the catalog has been shrinked
+        fShrinkFactor = HasKey("ZSHRINK") ? GetUInt("ZSHRINK") : 1;
+
+        if (fNumRowsPerTile%fShrinkFactor)
+        {
+            clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+            throw std::runtime_error("Rows per tile and shrink factor do not match");
+#else
+            gLog << ___err___ << "ERROR - Rows per tile and shrink factor do not match" << std::endl;
+            return;
+#endif
+        }
+
+        if (fShrinkFactor>0)
+            fNumRowsPerTile /= fShrinkFactor;
+
+        //compute the total size of each compressed tile
+        fTileSize.resize(fNumTiles);
+        fTileOffsets.resize(fNumTiles);
+        for (uint32_t i=0;i<fNumTiles;i++)
+        {
+            fTileSize[i] = 0;
+            for (uint32_t j=0;j<fTable.num_cols;j++)
+            {
+                fTileSize[i] += fCatalog[i][j].first;
+                fTileOffsets[i].emplace_back(fCatalog[i][j].second - fCatalog[i][0].second);
+            }
+        }
+
+        if (!fCopy.is_open())
+            return;
+
+        //write catalog and heap gap to target file
+        seekg(catalogStart);
+
+        const size_t catSize = fTable.GetHeapShift() + fTable.total_bytes;
+
+        std::vector<char> buf(catSize);
+        read(buf.data(), catSize);
+
+        fCopy.write(buf.data(), catSize);
+        if (!fCopy)
+            clear(rdstate()|std::ios::badbit);
+    }
+
+    //overrides fits.h method with empty one
+    //work is done in ReadBinaryRow because it requires volatile data from ReadBinaryRow
+    virtual void WriteRowToCopyFile(size_t row)
+    {
+        if (row == fRow+1)
+            fRawsum.add(fBufferRow);
+    }
+
+    // Compressed version of the read row, even files with shrunk catalogs
+    // can be read fully sequentially so that streaming, e.g. through
+    // stdout/stdin, is possible.
+    bool ReadBinaryRow(const size_t &rowNum, char *bufferToRead)
+    {
+        if (rowNum >= GetNumRows())
+            return false;
+
+        if (!fCatalogInitialized)
+            InitCompressionReading();
+
+        // Book keeping, where are we?
+        const int64_t requestedTile      = rowNum        / fNumRowsPerTile;
+        const int64_t currentTile        = fCurrentRow   / fNumRowsPerTile;
+
+        const int64_t requestedSuperTile = requestedTile / fShrinkFactor;
+        //const int64_t currentSuperTile   = currentTile   / fShrinkFactor;
+
+        const int64_t requestedSubTile   = requestedTile % fShrinkFactor;
+        //const int64_t currentSubTile     = currentTile   % fShrinkFactor;
+
+        // Is this the first tile we read at all?
+        const bool isFirstTile = fCurrentRow<0;
+
+        // Is this just the next tile in the sequence?
+        const bool isNextTile = requestedTile==currentTile+1 || isFirstTile;
+
+        fCurrentRow = rowNum;
+
+        // Do we have to read a new tile from disk?
+        if (requestedTile!=currentTile || isFirstTile)
+        {
+            //skip to the beginning of the tile
+            const int64_t superTileStart = fCatalog[requestedSuperTile][0].second - sizeof(FITS::TileHeader);
+
+            std::vector<size_t> offsets = fTileOffsets[requestedSuperTile];
+
+            // If this is a sub tile we might have to step forward a bit and
+            // seek for the sub tile. If we were just reading the previous one
+            // we can skip that.
+            if (!isNextTile || isFirstTile)
+            {
+                // step to the beginnig of the super tile
+                seekg(fHeapOff+superTileStart);
+
+                // If there are sub tiles we might have to seek through the super tile
+                for (uint32_t k=0; k<requestedSubTile; k++)
+                {
+                    // Read header
+                    FITS::TileHeader header;
+                    read((char*)&header, sizeof(FITS::TileHeader));
+
+                    // Skip to the next header
+                    seekg(header.size-sizeof(FITS::TileHeader), cur);
+                }
+            }
+
+            // this is now the beginning of the sub-tile we want to read
+            const int64_t subTileStart = tellg() - fHeapOff;
+            // calculate the 32 bits offset of the current tile.
+            const uint32_t offset = (subTileStart + fHeapFromDataStart)%4;
+
+            // start of destination buffer (padding comes later)
+            char *destBuffer = fCompressedBuffer.data()+offset;
+
+            // Store the current tile size once known
+            size_t currentTileSize = 0;
+
+            // If this is a request for a sub tile which is not cataloged
+            // recalculate the offsets from the buffer, once read
+            if (requestedSubTile>0)
+            {
+                // Read header
+                read(destBuffer, sizeof(FITS::TileHeader));
+
+                // Get size of tile
+                currentTileSize = reinterpret_cast<FITS::TileHeader*>(destBuffer)->size;
+
+                // now read the remaining bytes of this tile
+                read(destBuffer+sizeof(FITS::TileHeader), currentTileSize-sizeof(FITS::TileHeader));
+
+                // Calculate the offsets recursively
+                offsets[0] = 0;
+
+                //skip through the columns
+                for (size_t i=0; i<fTable.num_cols-1; i++)
+                {
+                    //zero sized column do not have headers. Skip it
+                    if (fTable.sorted_cols[i].num == 0)
+                    {
+                        offsets[i+1] = offsets[i];
+                        continue;
+                    }
+
+                    const char *pos = destBuffer + offsets[i] + sizeof(FITS::TileHeader);
+                    offsets[i+1] = offsets[i] + reinterpret_cast<const FITS::BlockHeader*>(pos)->size;
+                }
+            }
+            else
+            {
+                // If we are reading the first tile of a super tile, all information
+                // is already available.
+                currentTileSize = fTileSize[requestedSuperTile] + sizeof(FITS::TileHeader);
+                read(destBuffer, currentTileSize);
+            }
+
+
+            // If we are reading sequentially, calcualte checksum
+            if (isNextTile)
+            {
+                // Padding for checksum calculation
+                memset(fCompressedBuffer.data(),   0, offset);
+                memset(destBuffer+currentTileSize, 0, fCompressedBuffer.size()-currentTileSize-offset);
+                fChkData.add(fCompressedBuffer);
+            }
+
+            // Check if we are writing a copy of the file
+            if (isNextTile && fCopy.is_open() && fCopy.good())
+            {
+                fCopy.write(fCompressedBuffer.data()+offset, currentTileSize);
+                if (!fCopy)
+                    clear(rdstate()|std::ios::badbit);
+            }
+            else
+                if (fCopy.is_open())
+                    clear(rdstate()|std::ios::badbit);
+
+
+            // uncompress  the buffer
+            const uint32_t thisRoundNumRows = (GetNumRows()<fCurrentRow + fNumRowsPerTile) ? GetNumRows()%fNumRowsPerTile : fNumRowsPerTile;
+            UncompressBuffer(offsets, thisRoundNumRows, offset+sizeof(FITS::TileHeader));
+
+            // pointer to column (source buffer)
+            const char *src = fTransposedBuffer.data();
+
+            uint32_t i=0;
+            for (auto it=fTable.sorted_cols.cbegin(); it!=fTable.sorted_cols.cend(); it++, i++)
+            {
+                char *buffer = fBuffer.data() + it->offset; // pointer to column (destination buffer)
+
+                switch (fColumnOrdering[i])
+                {
+                case FITS::kOrderByRow:
+                    // regular, "semi-transposed" copy
+                    for (char *dest=buffer; dest<buffer+thisRoundNumRows*fTable.bytes_per_row; dest+=fTable.bytes_per_row) // row-by-row
+                    {
+                        memcpy(dest, src, it->bytes);
+                        src += it->bytes;  // next column
+                    }
+                    break;
+
+                case FITS::kOrderByCol:
+                    // transposed copy
+                    for (char *elem=buffer; elem<buffer+it->bytes; elem+=it->size) // element-by-element (arrays)
+                    {
+                        for (char *dest=elem; dest<elem+thisRoundNumRows*fTable.bytes_per_row; dest+=fTable.bytes_per_row) // row-by-row
+                        {
+                                memcpy(dest, src, it->size);
+                                src += it->size; // next element
+                        }
+                    }
+                    break;
+
+                default:
+                    clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error("Unkown column ordering scheme found");
+#else
+                    gLog << ___err___ << "ERROR - unkown column ordering scheme" << std::endl;
+                    return false;
+#endif
+                    break;
+                };
+            }
+        }
+
+        //Data loaded and uncompressed. Copy it to destination
+        memcpy(bufferToRead, fBuffer.data()+fTable.bytes_per_row*(fCurrentRow%fNumRowsPerTile), fTable.bytes_per_row);
+        return good();
+    }
+
+    // Read a bunch of uncompressed data
+    uint32_t UncompressUNCOMPRESSED(char*       dest,
+                                    const char* src,
+                                    uint32_t    numElems,
+                                    uint32_t    sizeOfElems)
+    {
+        memcpy(dest, src, numElems*sizeOfElems);
+        return numElems*sizeOfElems;
+    }
+
+    // Read a bunch of data compressed with the Huffman algorithm
+    uint32_t UncompressHUFFMAN16(char*       dest,
+                                 const char* src,
+                                 uint32_t    numChunks)
+    {
+        std::vector<uint16_t> uncompressed;
+
+        //read compressed sizes (one per row)
+        const uint32_t* compressedSizes = reinterpret_cast<const uint32_t*>(src);
+        src += sizeof(uint32_t)*numChunks;
+
+        //uncompress the rows, one by one
+        uint32_t sizeWritten = 0;
+        for (uint32_t j=0;j<numChunks;j++)
+        {
+            Huffman::Decode(reinterpret_cast<const unsigned char*>(src), compressedSizes[j], uncompressed);
+
+            memcpy(dest, uncompressed.data(), uncompressed.size()*sizeof(uint16_t));
+
+            sizeWritten += uncompressed.size()*sizeof(uint16_t);
+            dest        += uncompressed.size()*sizeof(uint16_t);
+            src         += compressedSizes[j];
+        }
+        return sizeWritten;
+    }
+
+    // Apply the inverse transform of the integer smoothing
+    uint32_t UnApplySMOOTHING(int16_t*   data,
+                              uint32_t   numElems)
+    {
+        //un-do the integer smoothing
+        for (uint32_t j=2;j<numElems;j++)
+            data[j] = data[j] + (data[j-1]+data[j-2])/2;
+
+        return numElems*sizeof(uint16_t);
+    }
+
+    // Data has been read from disk. Uncompress it !
+    void UncompressBuffer(const std::vector<size_t> &offsets,
+                          const uint32_t &thisRoundNumRows,
+                          const uint32_t offset)
+    {
+        char *dest = fTransposedBuffer.data();
+
+        //uncompress column by column
+        for (uint32_t i=0; i<fTable.sorted_cols.size(); i++)
+        {
+            const fits::Table::Column &col = fTable.sorted_cols[i];
+            if (col.num == 0)
+                continue;
+
+            //get the compression flag
+            const int64_t compressedOffset = offsets[i]+offset;
+
+            const FITS::BlockHeader* head = reinterpret_cast<FITS::BlockHeader*>(&fCompressedBuffer[compressedOffset]);
+
+            fColumnOrdering[i] = head->ordering;
+
+            const uint32_t numRows = (head->ordering==FITS::kOrderByRow) ? thisRoundNumRows : col.num;
+            const uint32_t numCols = (head->ordering==FITS::kOrderByCol) ? thisRoundNumRows : col.num;
+
+            const char *src = fCompressedBuffer.data()+compressedOffset+sizeof(FITS::BlockHeader)+sizeof(uint16_t)*head->numProcs;
+
+            for (int32_t j=head->numProcs-1;j >= 0; j--)
+            {
+                uint32_t sizeWritten=0;
+
+                switch (head->processings[j])
+                {
+                case FITS::kFactRaw:
+                    sizeWritten = UncompressUNCOMPRESSED(dest, src, numRows*numCols, col.size);
+                    break;
+
+                case FITS::kFactSmoothing:
+                    sizeWritten = UnApplySMOOTHING(reinterpret_cast<int16_t*>(dest), numRows*numCols);
+                    break;
+
+                case FITS::kFactHuffman16:
+                    sizeWritten = UncompressHUFFMAN16(dest, src, numRows);
+                    break;
+
+                default:
+                    clear(rdstate()|std::ios::badbit);
+
+                    std::ostringstream str;
+                    str << "Unkown processing applied to data. Col " << i << " proc " << j << " out of " << (int)head->numProcs;
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error(str.str());
+#else
+                    gLog << ___err___ << "ERROR - Unknown processing applied to data. Aborting" << std::endl;
+                    return;
+#endif
+                }
+                //increment destination counter only when processing done.
+                if (j==0)
+                    dest+= sizeWritten;
+            }
+        }
+    }
+
+    void CheckIfFileIsConsistent(bool update_catalog=false)
+    {
+        //goto start of heap
+        const streamoff whereAreWe = tellg();
+        seekg(fHeapOff);
+
+        //init number of rows to zero
+        uint64_t numRows = 0;
+
+        //get number of columns from header
+        const size_t numCols = fTable.num_cols;
+
+        std::vector<std::vector<std::pair<int64_t, int64_t> > > catalog;
+
+        FITS::TileHeader tileHead;
+        FITS::BlockHeader columnHead;
+
+        streamoff offsetInHeap = 0;
+        //skip through the heap
+        while (true)
+        {
+            read((char*)(&tileHead), sizeof(FITS::TileHeader));
+            //end of file
+            if (!good())
+                break;
+
+            //padding or corrupt data
+            if (memcmp(tileHead.id, "TILE", 4))
+            {
+                clear(rdstate()|std::ios::badbit);
+                break;
+            }
+
+            //a new tile begins here
+            catalog.emplace_back();
+            offsetInHeap += sizeof(FITS::TileHeader);
+
+            //skip through the columns
+            for (size_t i=0;i<numCols;i++)
+            {
+                //zero sized column do not have headers. Skip it
+                if (fTable.sorted_cols[i].num == 0)
+                {
+                    catalog.back().emplace_back(0,0);
+                    continue;
+                }
+
+                //read column header
+                read((char*)(&columnHead), sizeof(FITS::BlockHeader));
+
+                //corrupted tile
+                if (!good())
+                    break;
+
+                catalog.back().emplace_back((int64_t)(columnHead.size),offsetInHeap);
+                offsetInHeap += columnHead.size;
+                seekg(fHeapOff+offsetInHeap);
+            }
+
+            //if we ain't good, this means that something went wrong inside the current tile.
+            if (!good())
+            {
+                catalog.pop_back();
+                break;
+            }
+            //current tile is complete. Add rows
+            numRows += tileHead.numRows;
+        }
+
+        if (numRows != fTable.num_rows)
+        {
+            clear(rdstate()|std::ios::badbit);
+            std::ostringstream str;
+            str << "Heap data does not agree with header: " << numRows << " calculated vs " << fTable.num_rows << " from header.";
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error(str.str());
+#else
+                    gLog << ___err___ << "ERROR - " << str.str() << std::endl;
+                    return;
+#endif
+        }
+
+        if (update_catalog)
+        {
+            fCatalog = catalog;
+            //clear the bad bit before seeking back (we hit eof)
+            clear();
+            seekg(whereAreWe);
+            return;
+        }
+
+        if (catalog.size() != fCatalog.size())
+        {
+                    clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error("Heap data does not agree with header.");
+#else
+                    gLog << ___err___ << "ERROR - Heap data does not agree with header." << std::endl;
+                    return;
+#endif
+        }
+
+        for (uint32_t i=0;i<catalog.size(); i++)
+            for (uint32_t j=0;j<numCols;j++)
+            {
+                if (catalog[i][j].first  != fCatalog[i][j].first ||
+                    catalog[i][j].second != fCatalog[i][j].second)
+                {
+                    clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+                    throw std::runtime_error("Heap data does not agree with header.");
+#else
+                    gLog << ___err___ << "ERROR - Heap data does not agree with header." << std::endl;
+                    return;
+#endif
+                }
+            }
+        //go back to start of heap
+        //clear the bad bit before seeking back (we hit eof)
+        clear();
+        seekg(whereAreWe);
+    }
+
+};//class zfits
+
+#endif 
