Index: /trunk/Mars/mcore/zfits.h
===================================================================
--- /trunk/Mars/mcore/zfits.h	(revision 17401)
+++ /trunk/Mars/mcore/zfits.h	(revision 17402)
@@ -20,5 +20,5 @@
     // Basic constructor
     zfits(const std::string& fname, const std::string& tableName="", bool force=false)
-        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-1), fHeapOff(0), fTileSize(0)
+        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-2), fHeapOff(0), fTileSize(0)
     {
         open(fname.c_str());
@@ -29,5 +29,5 @@
     // Alternative contstructor
     zfits(const std::string& fname, const std::string& fout, const std::string& tableName, bool force=false)
-        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-1), fHeapOff(0), fTileSize(0)
+        : fCatalogInitialized(false), fNumTiles(0), fNumRowsPerTile(0), fCurrentRow(-2), fHeapOff(0), fTileSize(0)
     {
         open(fname.c_str());
@@ -129,7 +129,4 @@
         //give it some space for uncompressing
         AllocateBuffers();
-
-        //check that heap agrees with head
-        //CheckIfFileIsConsistent();
     }
 
@@ -156,4 +153,5 @@
     size_t fNumRowsPerTile; ///< Number of rows per compressed tile
     int64_t fCurrentRow;    ///< current row in memory signed because we need -1
+    size_t fShrinkFactor;   ///< shrink factor
 
     streamoff fHeapOff;           ///< offset from the beginning of the file of the binary data
@@ -228,14 +226,19 @@
 
         //check if the catalog has been shrinked
-        uint32_t shrink_factor = 1;
-        if (HasKey("ZSHRINK"))
-                shrink_factor = GetInt("ZSHRINK");
-
-        if (shrink_factor != 1)
-        {
-            CheckIfFileIsConsistent(true);
-            fNumTiles = fCatalog.size();
-            fNumRowsPerTile /= shrink_factor;
-        }
+        fShrinkFactor = HasKey("ZSHRINK") ? GetUInt("ZSHRINK") : 1;
+
+        if (fNumRowsPerTile%fShrinkFactor)
+        {
+            clear(rdstate()|std::ios::badbit);
+#ifdef __EXCEPTIONS
+            throw std::runtime_error("Rows per tile and shrink factor do not match");
+#else
+            gLog << ___err___ << "ERROR - Rows per tile and shrink factor do not match" << std::endl;
+            return;
+#endif
+        }
+
+        if (fShrinkFactor>0)
+            fNumRowsPerTile /= fShrinkFactor;
 
         //compute the total size of each compressed tile
@@ -276,5 +279,7 @@
     }
 
-    // Compressed version of the read row
+    // Compressed version of the read row, even files with shrunk catalogs
+    // can be read fully sequentially so that streaming, e.g. through
+    // stdout/stdin, is possible.
     bool ReadBinaryRow(const size_t &rowNum, char *bufferToRead)
     {
@@ -285,41 +290,111 @@
             InitCompressionReading();
 
-        const uint32_t requestedTile = rowNum/fNumRowsPerTile;
-        const uint32_t currentTile   = fCurrentRow/fNumRowsPerTile;
-
-        bool addCheckSum = ((requestedTile == currentTile+1) || (fCurrentRow == -1));
+        // Book keeping, where are we?
+        const int64_t requestedTile      = rowNum        / fNumRowsPerTile;
+        const int64_t currentTile        = fCurrentRow   / fNumRowsPerTile;
+
+        const int64_t requestedSuperTile = requestedTile / fShrinkFactor;
+        const int64_t currentSuperTile   = currentTile   / fShrinkFactor;
+
+        const int64_t requestedSubTile   = requestedTile % fShrinkFactor;
+        const int64_t currentSubTile     = currentTile   % fShrinkFactor;
 
         fCurrentRow = rowNum;
-        //should we read yet another chunk of data ?
-        if (requestedTile != currentTile)
-        {
-            //read yet another chunk from the file
-            const int64_t sizeToRead = fTileSize[requestedTile] + sizeof(FITS::TileHeader);
-
+
+        // Is this just the next tile in the sequence?
+        const bool isNextTile = requestedTile==currentTile+1;
+
+        // Do we have to read a new tile from disk?
+        if (requestedTile!=currentTile)
+        {
             //skip to the beginning of the tile
-            const int64_t tileStart =  fCatalog[requestedTile][0].second - sizeof(FITS::TileHeader);
-
-            seekg(fHeapOff+tileStart);
-
-            //calculate the 32 bits offset of the current tile.
-            const uint32_t offset = (tileStart + fHeapFromDataStart)%4;
-
-            //point the tile header where it should be
-            //we ain't checking the header now
-//            TileHeader* tHead = reinterpret_cast<TileHeader*>(fCompressedBuffer.data()+offset);
-
-            ZeroBufferForChecksum(fCompressedBuffer, fCompressedBuffer.size()-(sizeToRead+offset+8));
-
-            //read one tile from disk
-            read(fCompressedBuffer.data()+offset, sizeToRead);
-
-            if (addCheckSum)
+            const int64_t superTileStart = fCatalog[requestedSuperTile][0].second - sizeof(FITS::TileHeader);
+
+            std::vector<size_t> offsets = fTileOffsets[requestedSuperTile];
+
+            // If this is a sub tile we might have to step forward a bit and
+            // seek for the sub tile. If we were just reading the previous one
+            // we can skip that.
+            if (!isNextTile)
+            {
+                // step to the beginnig of the super tile
+                seekg(fHeapOff+superTileStart);
+
+                // If there are sub tiles we might have to seek through the super tile
+                for (uint32_t k=0; k<requestedSubTile; k++)
+                {
+                    // Read header
+                    FITS::TileHeader header;
+                    read((char*)&header, sizeof(FITS::TileHeader));
+
+                    // Skip to the next header
+                    seekg(header.size-sizeof(FITS::TileHeader), cur);
+                }
+            }
+
+            // this is now the beginning of the sub-tile we want to read
+            const int64_t subTileStart = tellg() - fHeapOff;
+
+            // calculate the 32 bits offset of the current tile.
+            const uint32_t offset = (subTileStart + fHeapFromDataStart)%4;
+
+            // start of destination buffer (padding comes later)
+            char *destBuffer = fCompressedBuffer.data()+offset;
+
+            // Store the current tile size once known
+            size_t currentTileSize = 0;
+
+            // If this is a request for a sub tile which is not cataloged
+            // recalculate the offsets from the buffer, once read
+            if (requestedSubTile>0)
+            {
+                // Read header
+                read(destBuffer, sizeof(FITS::TileHeader));
+
+                // Get size of tile
+                currentTileSize = reinterpret_cast<FITS::TileHeader*>(destBuffer)->size;
+
+                // now read the remaining bytes of this tile
+                read(destBuffer+sizeof(FITS::TileHeader), currentTileSize-sizeof(FITS::TileHeader));
+
+                // Calculate the offsets recursively
+                offsets[0] = 0;
+
+                //skip through the columns
+                for (size_t i=0; i<fTable.num_cols-1; i++)
+                {
+                    //zero sized column do not have headers. Skip it
+                    if (fTable.sorted_cols[i].num == 0)
+                    {
+                        offsets[i+1] = offsets[i];
+                        continue;
+                    }
+
+                    const char *pos = destBuffer + offsets[i] + sizeof(FITS::TileHeader);
+                    offsets[i+1] = offsets[i] + reinterpret_cast<const FITS::BlockHeader*>(pos)->size;
+                }
+            }
+            else
+            {
+                // If we are reading the first tile of a super tile, all information
+                // is already available.
+                currentTileSize = fTileSize[requestedSuperTile] + sizeof(FITS::TileHeader);
+                read(destBuffer, currentTileSize);
+            }
+
+
+            // If we are reading sequentially, calcuakte checksum
+            if (isNextTile)
+            {
+                // Padding for checksum calculation
+                memset(fCompressedBuffer.data(),   0, offset);
+                memset(destBuffer+currentTileSize, 0, fCompressedBuffer.size()-currentTileSize-offset);
                 fChkData.add(fCompressedBuffer);
-
-            if (requestedTile == currentTile+1 &&
-                fCopy.is_open() &&
-                fCopy.good())
-            {
-                fCopy.write(fCompressedBuffer.data()+offset, sizeToRead);
+            }
+
+            // Check if we are writing a copy of the file
+            if (isNextTile && fCopy.is_open() && fCopy.good())
+            {
+                fCopy.write(fCompressedBuffer.data()+offset, currentTileSize);
                 if (!fCopy)
                     clear(rdstate()|std::ios::badbit);
@@ -329,8 +404,8 @@
                     clear(rdstate()|std::ios::badbit);
 
+
+            // uncompress  the buffer
             const uint32_t thisRoundNumRows = (GetNumRows()<fCurrentRow + fNumRowsPerTile) ? GetNumRows()%fNumRowsPerTile : fNumRowsPerTile;
-
-            //uncompress it
-            UncompressBuffer(requestedTile, thisRoundNumRows, offset+sizeof(FITS::TileHeader));
+            UncompressBuffer(offsets, thisRoundNumRows, offset+sizeof(FITS::TileHeader));
 
             // pointer to column (source buffer)
@@ -431,5 +506,5 @@
 
     // Data has been read from disk. Uncompress it !
-    void UncompressBuffer(const uint32_t &catalogCurrentRow,
+    void UncompressBuffer(const std::vector<size_t> &offsets,
                           const uint32_t &thisRoundNumRows,
                           const uint32_t offset)
@@ -445,5 +520,5 @@
 
             //get the compression flag
-            const int64_t compressedOffset = fTileOffsets[catalogCurrentRow][i]+offset;
+            const int64_t compressedOffset = offsets[i]+offset;
 
             const FITS::BlockHeader* head = reinterpret_cast<FITS::BlockHeader*>(&fCompressedBuffer[compressedOffset]);
@@ -496,5 +571,5 @@
     {
         //goto start of heap
-        streamoff whereAreWe = tellg();
+        const streamoff whereAreWe = tellg();
         seekg(fHeapOff);
 
@@ -503,5 +578,5 @@
 
         //get number of columns from header
-        size_t numCols = fTable.num_cols;
+        const size_t numCols = fTable.num_cols;
 
         std::vector<std::vector<std::pair<int64_t, int64_t> > > catalog;
@@ -527,5 +602,5 @@
 
             //a new tile begins here
-            catalog.emplace_back(std::vector<std::pair<int64_t, int64_t> >(0));
+            catalog.emplace_back();
             offsetInHeap += sizeof(FITS::TileHeader);
 
