Index: trunk/FACT++/src/fitsCompressor.cc
===================================================================
--- trunk/FACT++/src/fitsCompressor.cc	(revision 16423)
+++ trunk/FACT++/src/fitsCompressor.cc	(revision 16443)
@@ -633,5 +633,5 @@
     {
         _transposedBuffer[i] = new char[_rowWidth*_numRowsPerTile];
-        _compressedBuffer[i] = new char[_rowWidth*_numRowsPerTile + 1024*1024]; //use a bit more memory, in case the compression algorithms uses more
+        _compressedBuffer[i] = new char[_rowWidth*_numRowsPerTile + _columns.size()]; //use a bit more memory for compression flags
         if (_transposedBuffer[i] == NULL || _compressedBuffer[i] == NULL)
             return false;
@@ -736,5 +736,5 @@
     header.push_back(HeaderEntry("CHECKSUM", "'0000000000000000'  ", "Checksum for the whole HDU"));
     header.push_back(HeaderEntry("DATASUM" ,  "         0"         , "Checksum for the data block"));
-    header.push_back(HeaderEntry("EXTNAME" , "'DrsCalib'          ", "name of this binary table extension"));
+    header.push_back(HeaderEntry("EXTNAME" , "'IntCalibration'    ", "name of this binary table extension"));
     header.push_back(HeaderEntry("TTYPE1"  , "'OffsetCalibration' ", "label for field   1"));
     header.push_back(HeaderEntry("TFORM1"  , "'1474560I'          ", "data format of field: 2-byte INTEGER"));
@@ -1071,5 +1071,5 @@
         setHeaderKey(HeaderEntry("TFIELDS", _columns.size(), "number of fields in each row"));
         int64_t heapSize = 0;
-        uint32_t compressedOffset = 0;
+        int64_t compressedOffset = 0;
         for (uint32_t i=0;i<_catalog.size();i++)
             for (uint32_t j=0;j<_catalog[i].size();j++)
@@ -1077,4 +1077,5 @@
                 heapSize += _catalog[i][j].first;
                 //set the catalog offsets to their actual values
+                if (compressedOffset < 0) return false;
                 _catalog[i][j].second = compressedOffset;
                 compressedOffset += _catalog[i][j].first;
@@ -1198,6 +1199,11 @@
         previousHuffmanSize = huffmanOutput.size();
     }
-    memcpy(&dest[huffmanOffset], huffmanOutput.data(), huffmanOutput.size());
-    return huffmanOutput.size() + huffmanOffset;
+    const size_t totalSize = huffmanOutput.size() + huffmanOffset;
+
+    //only copy if not larger than not-compressed size
+    if (totalSize < numRows*sizeOfElems*numRowElems)
+        memcpy(&dest[huffmanOffset], huffmanOutput.data(), huffmanOutput.size());
+
+    return totalSize;
 }
 
@@ -1731,5 +1737,6 @@
             for (int j=0;j<1440;j++)
             {
-                int thisStartCell = reinterpret_cast<int16_t*>(&buffer[startCellOffset])[j];
+                const int thisStartCell = reinterpret_cast<int16_t*>(&buffer[startCellOffset])[j];
+                if (thisStartCell > -1)
                 for (int k=0;k<numSlices;k++)
                     reinterpret_cast<int16_t*>(&buffer[dataOffset])[numSlices*j + k] -= drsCalib16[1024*j + (thisStartCell+k)%1024];
@@ -1745,5 +1752,10 @@
 
     inFile.close();
-    outFile.close();
+    if (!outFile.close())
+    {
+        cout << "Something went wrong while writing the catalog: negative index" << endl;
+        return false;
+    }
+
     delete[] drsCalib16;
 
@@ -1762,5 +1774,8 @@
 
     //get a compressed reader
-    factfits verifyFile(fileNameOut, tableName, false);
+//TEMP try to copy the file too
+//    string copyName("/scratch/copyFile.fz");
+    string copyName("");
+    factfits verifyFile(fileNameOut, copyName, tableName, false);
 
     //and the header of the compressed file
