| 1 | /*
|
|---|
| 2 | * zofits.h
|
|---|
| 3 | *
|
|---|
| 4 | * FACT native compressed FITS writer
|
|---|
| 5 | * Author: lyard
|
|---|
| 6 | */
|
|---|
| 7 |
|
|---|
| 8 | #include "ofits.h"
|
|---|
| 9 | #include "zfits.h"
|
|---|
| 10 | #include "Queue.h"
|
|---|
| 11 | #include "MemoryManager.h"
|
|---|
| 12 |
|
|---|
| 13 | #ifdef USE_BOOST_THREADS
|
|---|
| 14 | #include <boost/thread.hpp>
|
|---|
| 15 | #endif
|
|---|
| 16 |
|
|---|
| 17 | #ifndef __MARS__
|
|---|
| 18 | namespace std
|
|---|
| 19 | {
|
|---|
| 20 | #else
|
|---|
| 21 | using namespace std;
|
|---|
| 22 | #endif
|
|---|
| 23 |
|
|---|
| 24 | class zofits : public ofits
|
|---|
| 25 | {
|
|---|
| 26 | /// Overriding of the begin() operator to get the smallest item in the list instead of the true begin
|
|---|
| 27 | template<class S>
|
|---|
| 28 | struct QueueMin : std::list<S>
|
|---|
| 29 | {
|
|---|
| 30 | typename std::list<S>::iterator begin()
|
|---|
| 31 | {
|
|---|
| 32 | return min_element(std::list<S>::begin(), std::list<S>::end());
|
|---|
| 33 | }
|
|---|
| 34 | };
|
|---|
| 35 |
|
|---|
| 36 | /// Parameters required to write a tile to disk
|
|---|
| 37 | struct WriteTarget
|
|---|
| 38 | {
|
|---|
| 39 | bool operator < (const WriteTarget& other)
|
|---|
| 40 | {
|
|---|
| 41 | return tile_num < other.tile_num;
|
|---|
| 42 | }
|
|---|
| 43 |
|
|---|
| 44 | uint32_t tile_num; ///< Tile index of the data (to make sure that they are written in the correct order)
|
|---|
| 45 | uint32_t size; ///< Size to write
|
|---|
| 46 | shared_ptr<MemoryChunk> data; ///< Memory block to write
|
|---|
| 47 | };
|
|---|
| 48 |
|
|---|
| 49 | /// Parameters required to compress a tile of data
|
|---|
| 50 | struct CompressionTarget
|
|---|
| 51 | {
|
|---|
| 52 | shared_ptr<MemoryChunk> src; ///< Original data
|
|---|
| 53 | shared_ptr<MemoryChunk> transposed_src; ///< Transposed data
|
|---|
| 54 | WriteTarget target; ///< Compressed data
|
|---|
| 55 | uint32_t num_rows; ///< Number of rows to compress
|
|---|
| 56 | };
|
|---|
| 57 |
|
|---|
| 58 | public:
|
|---|
| 59 | /// constructors
|
|---|
| 60 | /// @param numTiles how many data groups should be pre-reserved ?
|
|---|
| 61 | /// @param rowPerTile how many rows will be grouped together in a single tile
|
|---|
| 62 | /// @param maxUsableMem how many bytes of memory can be used by the compression buffers
|
|---|
| 63 | zofits(uint32_t numTiles = fgNumTiles,
|
|---|
| 64 | uint32_t rowPerTile = fgRowPerTile,
|
|---|
| 65 | uint64_t maxUsableMem= fgMaxUsableMem) : ofits(),
|
|---|
| 66 | fMemPool(0, maxUsableMem),
|
|---|
| 67 | fWriteToDiskQueue(bind(&zofits::WriteBufferToDisk, this, placeholders::_1), false)
|
|---|
| 68 | {
|
|---|
| 69 | InitMemberVariables(numTiles, rowPerTile, maxUsableMem);
|
|---|
| 70 | SetNumThreads(fgNumQueues);
|
|---|
| 71 | }
|
|---|
| 72 |
|
|---|
| 73 | /// @param fname the target filename
|
|---|
| 74 | /// @param numTiles how many data groups should be pre-reserved ?
|
|---|
| 75 | /// @param rowPerTile how many rows will be grouped together in a single tile
|
|---|
| 76 | /// @param maxUsableMem how many bytes of memory can be used by the compression buffers
|
|---|
| 77 | zofits(const char* fname,
|
|---|
| 78 | uint32_t numTiles = fgNumTiles,
|
|---|
| 79 | uint32_t rowPerTile = fgRowPerTile,
|
|---|
| 80 | uint64_t maxUsableMem= fgMaxUsableMem) : ofits(fname),
|
|---|
| 81 | fMemPool(0, maxUsableMem),
|
|---|
| 82 | fWriteToDiskQueue(bind(&zofits::WriteBufferToDisk, this, placeholders::_1), false)
|
|---|
| 83 | {
|
|---|
| 84 | InitMemberVariables(numTiles, rowPerTile, maxUsableMem);
|
|---|
| 85 | SetNumThreads(fgNumQueues);
|
|---|
| 86 | }
|
|---|
| 87 |
|
|---|
| 88 | /// destructors
|
|---|
| 89 | virtual ~zofits()
|
|---|
| 90 | {
|
|---|
| 91 | }
|
|---|
| 92 |
|
|---|
| 93 | //initialization of member variables
|
|---|
| 94 | /// @param nt number of tiles
|
|---|
| 95 | /// @param rpt number of rows per tile
|
|---|
| 96 | /// @param maxUsableMem max amount of RAM to be used by the compression buffers
|
|---|
| 97 | void InitMemberVariables(const uint32_t nt=0, const uint32_t rpt=0, const uint64_t maxUsableMem=0)
|
|---|
| 98 | {
|
|---|
| 99 | if (nt == 0)
|
|---|
| 100 | throw runtime_error("There must be at least 1 tile of data (0 specified). This is required by the FITS standard. Please try again with num_tile >= 1.");
|
|---|
| 101 |
|
|---|
| 102 | fCheckOffset = 0;
|
|---|
| 103 | fNumQueues = 0;
|
|---|
| 104 |
|
|---|
| 105 | fNumTiles = nt;
|
|---|
| 106 | fNumRowsPerTile = rpt;
|
|---|
| 107 |
|
|---|
| 108 | fBuffer = NULL;
|
|---|
| 109 | fRealRowWidth = 0;
|
|---|
| 110 | fCatalogExtraRows = 0;
|
|---|
| 111 | fCatalogOffset = 0;
|
|---|
| 112 |
|
|---|
| 113 | fMaxUsableMem = maxUsableMem;
|
|---|
| 114 | #ifdef __EXCEPTIONS
|
|---|
| 115 | fThreadsException = exception_ptr();
|
|---|
| 116 | #endif
|
|---|
| 117 | }
|
|---|
| 118 |
|
|---|
| 119 | /// write the header of the binary table
|
|---|
| 120 | /// @param name the name of the table to be created
|
|---|
| 121 | /// @return the state of the file
|
|---|
| 122 | virtual bool WriteTableHeader(const char* name="DATA")
|
|---|
| 123 | {
|
|---|
| 124 | reallocateBuffers();
|
|---|
| 125 |
|
|---|
| 126 | ofits::WriteTableHeader(name);
|
|---|
| 127 |
|
|---|
| 128 | if (fNumQueues != 0)
|
|---|
| 129 | {
|
|---|
| 130 | //start the compression queues
|
|---|
| 131 | for (auto it=fCompressionQueues.begin(); it!= fCompressionQueues.end(); it++)
|
|---|
| 132 | it->start();
|
|---|
| 133 |
|
|---|
| 134 | //start the disk writer
|
|---|
| 135 | fWriteToDiskQueue.start();
|
|---|
| 136 | }
|
|---|
| 137 |
|
|---|
| 138 | //mark that no tile has been written so far
|
|---|
| 139 | fLatestWrittenTile = -1;
|
|---|
| 140 |
|
|---|
| 141 | return good();
|
|---|
| 142 | }
|
|---|
| 143 |
|
|---|
| 144 | /// open a new file.
|
|---|
| 145 | /// @param filename the name of the file
|
|---|
| 146 | /// @param Whether or not the name of the extension should be added or not
|
|---|
| 147 | void open(const char* filename, bool addEXTNAMEKey=true)
|
|---|
| 148 | {
|
|---|
| 149 | ofits::open(filename, addEXTNAMEKey);
|
|---|
| 150 |
|
|---|
| 151 | //add compression-related header entries
|
|---|
| 152 | SetBool( "ZTABLE", true, "Table is compressed");
|
|---|
| 153 | SetInt( "ZNAXIS1", 0, "Width of uncompressed rows");
|
|---|
| 154 | SetInt( "ZNAXIS2", 0, "Number of uncompressed rows");
|
|---|
| 155 | SetInt( "ZPCOUNT", 0, "");
|
|---|
| 156 | SetInt( "ZHEAPPTR", 0, "");
|
|---|
| 157 | SetInt( "ZTILELEN", fNumRowsPerTile, "Number of rows per tile");
|
|---|
| 158 | SetInt( "THEAP", 0, "");
|
|---|
| 159 | SetStr( "RAWSUM", " 0", "Checksum of raw little endian data");
|
|---|
| 160 | SetFloat("ZRATIO", 0, "Compression ratio");
|
|---|
| 161 |
|
|---|
| 162 | fCatalogExtraRows = 0;
|
|---|
| 163 | fRawSum.reset();
|
|---|
| 164 | }
|
|---|
| 165 |
|
|---|
| 166 | /// Super method. does nothing as zofits does not know about DrsOffsets
|
|---|
| 167 | /// @return the state of the file
|
|---|
| 168 | virtual bool WriteDrsOffsetsTable()
|
|---|
| 169 | {
|
|---|
| 170 | return good();
|
|---|
| 171 | }
|
|---|
| 172 |
|
|---|
| 173 | /// Returns the number of bytes per uncompressed row
|
|---|
| 174 | /// @return number of bytes per uncompressed row
|
|---|
| 175 | uint32_t GetBytesPerRow() const
|
|---|
| 176 | {
|
|---|
| 177 | return fRealRowWidth;
|
|---|
| 178 | }
|
|---|
| 179 |
|
|---|
| 180 | /// Write the data catalog
|
|---|
| 181 | /// @return the state of the file
|
|---|
| 182 | bool WriteCatalog()
|
|---|
| 183 | {
|
|---|
| 184 | const uint32_t one_catalog_row_size = fTable.num_cols*2*sizeof(uint64_t);
|
|---|
| 185 | const uint32_t total_catalog_size = fCatalog.size()*one_catalog_row_size;
|
|---|
| 186 |
|
|---|
| 187 | // swap the catalog bytes before writing
|
|---|
| 188 | vector<char> swapped_catalog(total_catalog_size);
|
|---|
| 189 | uint32_t shift = 0;
|
|---|
| 190 | for (auto it=fCatalog.begin(); it!=fCatalog.end(); it++)
|
|---|
| 191 | {
|
|---|
| 192 | revcpy<sizeof(uint64_t)>(swapped_catalog.data() + shift, (char*)(it->data()), fTable.num_cols*2);
|
|---|
| 193 | shift += one_catalog_row_size;
|
|---|
| 194 | }
|
|---|
| 195 |
|
|---|
| 196 | // first time writing ? remember where we are
|
|---|
| 197 | if (fCatalogOffset == 0)
|
|---|
| 198 | fCatalogOffset = tellp();
|
|---|
| 199 |
|
|---|
| 200 | // remember where we came from
|
|---|
| 201 | const off_t where_are_we = tellp();
|
|---|
| 202 |
|
|---|
| 203 | // write to disk
|
|---|
| 204 | seekp(fCatalogOffset);
|
|---|
| 205 | write(swapped_catalog.data(), total_catalog_size);
|
|---|
| 206 | if (where_are_we != fCatalogOffset)
|
|---|
| 207 | seekp(where_are_we);
|
|---|
| 208 |
|
|---|
| 209 | // udpate checksum
|
|---|
| 210 | fCatalogSum.reset();
|
|---|
| 211 | fCatalogSum.add(swapped_catalog.data(), total_catalog_size);
|
|---|
| 212 |
|
|---|
| 213 | return good();
|
|---|
| 214 | }
|
|---|
| 215 |
|
|---|
| 216 | /// Applies the DrsOffsets calibration to the data. Does nothing as zofits knows nothing about drsoffsets.
|
|---|
| 217 | virtual void DrsOffsetCalibrate(char* )
|
|---|
| 218 | {
|
|---|
| 219 |
|
|---|
| 220 | }
|
|---|
| 221 |
|
|---|
| 222 | /// Grows the catalog in case not enough rows were allocated
|
|---|
| 223 | void GrowCatalog()
|
|---|
| 224 | {
|
|---|
| 225 | uint32_t orig_catalog_size = fCatalog.size();
|
|---|
| 226 |
|
|---|
| 227 | fCatalog.resize(fCatalog.size()*2);
|
|---|
| 228 | for (uint32_t i=orig_catalog_size;i<fCatalog.size(); i++)
|
|---|
| 229 | {
|
|---|
| 230 | fCatalog[i].resize(fTable.num_cols);
|
|---|
| 231 | for (auto it=(fCatalog[i].begin()); it!=fCatalog[i].end(); it++)
|
|---|
| 232 | *it = CatalogEntry(0,0);
|
|---|
| 233 | }
|
|---|
| 234 |
|
|---|
| 235 | fCatalogExtraRows += orig_catalog_size;
|
|---|
| 236 | fNumTiles += orig_catalog_size;
|
|---|
| 237 | }
|
|---|
| 238 |
|
|---|
| 239 | /// write one row of data
|
|---|
| 240 | /// @param ptr the source buffer
|
|---|
| 241 | /// @param the number of bytes to write
|
|---|
| 242 | /// @return the state of the file. WARNING: with multithreading, this will most likely be the state of the file before the data is actually written
|
|---|
| 243 | bool WriteRow(const void* ptr, size_t cnt, bool = true)
|
|---|
| 244 | {
|
|---|
| 245 | if (cnt != fRealRowWidth)
|
|---|
| 246 | {
|
|---|
| 247 | #ifdef __EXCEPTIONS
|
|---|
| 248 | throw runtime_error("Wrong size of row given to WriteRow");
|
|---|
| 249 | #else
|
|---|
| 250 | gLog << ___err___ << "ERROR - Wrong size of row given to WriteRow" << endl;
|
|---|
| 251 | return false;
|
|---|
| 252 | #endif
|
|---|
| 253 | }
|
|---|
| 254 |
|
|---|
| 255 | if (fTable.num_rows >= fNumRowsPerTile*fNumTiles)
|
|---|
| 256 | {
|
|---|
| 257 | // GrowCatalog();
|
|---|
| 258 | #ifdef __EXCEPTIONS
|
|---|
| 259 | throw runtime_error("Maximum number of rows exceeded for this file");
|
|---|
| 260 | #else
|
|---|
| 261 | gLog << ___err___ << "ERROR - Maximum number of rows exceeded for this file" << endl;
|
|---|
| 262 | return false;
|
|---|
| 263 | #endif
|
|---|
| 264 | }
|
|---|
| 265 |
|
|---|
| 266 | //copy current row to pool or rows waiting for compression
|
|---|
| 267 | char* target_location = fBuffer + fRealRowWidth*(fTable.num_rows%fNumRowsPerTile);
|
|---|
| 268 | memcpy(target_location, ptr, fRealRowWidth);
|
|---|
| 269 |
|
|---|
| 270 | //for now, make an extra copy of the data, for RAWSUM checksuming.
|
|---|
| 271 | //Ideally this should be moved to the threads
|
|---|
| 272 | //However, because the RAWSUM must be calculated before the tile is transposed, I am not sure whether
|
|---|
| 273 | //one extra memcpy per row written is worse than 100 rows checksumed when the tile is full....
|
|---|
| 274 | const uint32_t rawOffset = (fTable.num_rows*fRealRowWidth)%4;
|
|---|
| 275 | char* buffer = fRawSumBuffer.data() + rawOffset;
|
|---|
| 276 | auto ib = fRawSumBuffer.begin();
|
|---|
| 277 | auto ie = fRawSumBuffer.rbegin();
|
|---|
| 278 | *ib++ = 0;
|
|---|
| 279 | *ib++ = 0;
|
|---|
| 280 | *ib++ = 0;
|
|---|
| 281 | *ib = 0;
|
|---|
| 282 |
|
|---|
| 283 | *ie++ = 0;
|
|---|
| 284 | *ie++ = 0;
|
|---|
| 285 | *ie++ = 0;
|
|---|
| 286 | *ie = 0;
|
|---|
| 287 |
|
|---|
| 288 | memcpy(buffer, ptr, fRealRowWidth);
|
|---|
| 289 |
|
|---|
| 290 | fRawSum.add(fRawSumBuffer, false);
|
|---|
| 291 |
|
|---|
| 292 | DrsOffsetCalibrate(target_location);
|
|---|
| 293 |
|
|---|
| 294 | fTable.num_rows++;
|
|---|
| 295 |
|
|---|
| 296 | if (fTable.num_rows % fNumRowsPerTile == 0)
|
|---|
| 297 | {
|
|---|
| 298 | CompressionTarget compress_target;
|
|---|
| 299 | SetNextCompression(compress_target);
|
|---|
| 300 |
|
|---|
| 301 | if (fNumQueues == 0)
|
|---|
| 302 | { //no worker threads. do everything in-line
|
|---|
| 303 | uint64_t size_to_write = CompressBuffer(compress_target);
|
|---|
| 304 |
|
|---|
| 305 | WriteTarget write_target;
|
|---|
| 306 | write_target.size = size_to_write;
|
|---|
| 307 | write_target.data = compress_target.target.data;
|
|---|
| 308 | write_target.tile_num = compress_target.target.tile_num;
|
|---|
| 309 |
|
|---|
| 310 | return WriteBufferToDisk(write_target);
|
|---|
| 311 | }
|
|---|
| 312 | else
|
|---|
| 313 | {
|
|---|
| 314 | //if all queues are empty, use queue 0
|
|---|
| 315 | uint32_t min_index = 0;
|
|---|
| 316 | uint32_t min_size = numeric_limits<uint32_t>::max();
|
|---|
| 317 | uint32_t current_index = 0;
|
|---|
| 318 |
|
|---|
| 319 | for (auto it=fCompressionQueues.begin(); it!=fCompressionQueues.end(); it++)
|
|---|
| 320 | {
|
|---|
| 321 | if (it->size() < min_size)
|
|---|
| 322 | {
|
|---|
| 323 | min_index = current_index;
|
|---|
| 324 | min_size = it->size();
|
|---|
| 325 | }
|
|---|
| 326 | current_index++;
|
|---|
| 327 | }
|
|---|
| 328 |
|
|---|
| 329 | if (!fCompressionQueues[min_index].post(compress_target))
|
|---|
| 330 | throw runtime_error("The compression queues are not started. Did you close the file before writing this row ?");
|
|---|
| 331 | }
|
|---|
| 332 | }
|
|---|
| 333 |
|
|---|
| 334 | return good();
|
|---|
| 335 | }
|
|---|
| 336 |
|
|---|
| 337 | /// update the real number of rows
|
|---|
| 338 | void FlushNumRows()
|
|---|
| 339 | {
|
|---|
| 340 | SetInt("NAXIS2", (fTable.num_rows + fNumRowsPerTile-1)/fNumRowsPerTile);
|
|---|
| 341 | SetInt("ZNAXIS2", fTable.num_rows);
|
|---|
| 342 | FlushHeader();
|
|---|
| 343 | }
|
|---|
| 344 |
|
|---|
| 345 | /// Setup the environment to compress yet another tile of data
|
|---|
| 346 | /// @param target the struct where to host the produced parameters
|
|---|
| 347 | void SetNextCompression(CompressionTarget& target)
|
|---|
| 348 | {
|
|---|
| 349 | //get space for transposed data
|
|---|
| 350 | shared_ptr<MemoryChunk> transposed_data = fMemPool.malloc();
|
|---|
| 351 |
|
|---|
| 352 | //fill up write to disk target
|
|---|
| 353 | WriteTarget write_target;
|
|---|
| 354 | write_target.tile_num = (fTable.num_rows-1)/fNumRowsPerTile;
|
|---|
| 355 | write_target.size = 0;
|
|---|
| 356 | write_target.data = fMemPool.malloc();
|
|---|
| 357 |
|
|---|
| 358 | //fill up compression target
|
|---|
| 359 | target.src = fSmartBuffer;
|
|---|
| 360 | target.transposed_src = transposed_data;
|
|---|
| 361 | target.target = write_target;
|
|---|
| 362 | target.num_rows = fTable.num_rows;
|
|---|
| 363 |
|
|---|
| 364 | //get a new buffer to host the incoming data
|
|---|
| 365 | fSmartBuffer = fMemPool.malloc();
|
|---|
| 366 | fBuffer = fSmartBuffer.get()->get();
|
|---|
| 367 | }
|
|---|
| 368 |
|
|---|
| 369 | /// Shrinks a catalog that is too long to fit into the reserved space at the beginning of the file.
|
|---|
| 370 | void ShrinkCatalog()
|
|---|
| 371 | {
|
|---|
| 372 | //did we write more rows than what the catalog could host ?
|
|---|
| 373 | if (fCatalogExtraRows != 0)
|
|---|
| 374 | {
|
|---|
| 375 | //how many rows can the regular catalog host ?
|
|---|
| 376 | const uint32_t max_regular_rows = (fCatalog.size() - fCatalogExtraRows)*fNumRowsPerTile;
|
|---|
| 377 | //what's the shrink factor to be applied ?
|
|---|
| 378 | const uint32_t shrink_factor = fTable.num_rows/max_regular_rows + ((fTable.num_rows%max_regular_rows) ? 1 : 0);
|
|---|
| 379 |
|
|---|
| 380 | //shrink the catalog !
|
|---|
| 381 | for (uint32_t i=0; i<fTable.num_rows/fNumRowsPerTile; i+= shrink_factor)
|
|---|
| 382 | {//add the elements one by one, so that the empty ones at the end (i.e. fTable.num_rows%shrink_factor) do not create havok
|
|---|
| 383 | const uint32_t target_catalog_row = i/shrink_factor;
|
|---|
| 384 | //move data from current row (i) to target row
|
|---|
| 385 | for (uint32_t j=0; j<fTable.num_cols; j++)
|
|---|
| 386 | {
|
|---|
| 387 | fCatalog[target_catalog_row][j].second = fCatalog[i][j].second;
|
|---|
| 388 | fCatalog[target_catalog_row][j].first = 0;
|
|---|
| 389 | uint64_t last_size = fCatalog[i][j].first;
|
|---|
| 390 | uint64_t last_offset = fCatalog[i][j].second;
|
|---|
| 391 |
|
|---|
| 392 | for (uint32_t k=1; k<shrink_factor; k++)
|
|---|
| 393 | {
|
|---|
| 394 | if (fCatalog[i+k][j].second != 0)
|
|---|
| 395 | {
|
|---|
| 396 | fCatalog[target_catalog_row][j].first += fCatalog[i+k][j].second - last_offset;
|
|---|
| 397 | }
|
|---|
| 398 | else
|
|---|
| 399 | {
|
|---|
| 400 | fCatalog[target_catalog_row][j].first += last_size;
|
|---|
| 401 | break;
|
|---|
| 402 | }
|
|---|
| 403 | last_size = fCatalog[i+k][j].first;
|
|---|
| 404 | last_offset = fCatalog[i+k][j].second;
|
|---|
| 405 | }
|
|---|
| 406 | }
|
|---|
| 407 | }
|
|---|
| 408 |
|
|---|
| 409 | fCatalog.resize(fCatalog.size() - fCatalogExtraRows);
|
|---|
| 410 |
|
|---|
| 411 | //update header keywords
|
|---|
| 412 | const uint32_t new_num_rows_per_tiles = fNumRowsPerTile*shrink_factor;
|
|---|
| 413 | const uint32_t new_num_tiles_written = (fTable.num_rows + new_num_rows_per_tiles-1)/new_num_rows_per_tiles;
|
|---|
| 414 | SetInt("THEAP", new_num_tiles_written*2*sizeof(int64_t)*fTable.num_cols);
|
|---|
| 415 | SetInt("NAXIS2", new_num_tiles_written);
|
|---|
| 416 | SetInt("ZTILELEN", new_num_rows_per_tiles);
|
|---|
| 417 | cout << "New num rows per tiles: " << new_num_rows_per_tiles << " shrink factor: " << shrink_factor << endl;
|
|---|
| 418 | cout << "Num tiles written: " << new_num_tiles_written << endl;
|
|---|
| 419 | }
|
|---|
| 420 | }
|
|---|
| 421 |
|
|---|
| 422 | /// close an open file.
|
|---|
| 423 | /// @return the state of the file
|
|---|
| 424 | bool close()
|
|---|
| 425 | {
|
|---|
| 426 | // stop compression and write threads
|
|---|
| 427 | for (auto it=fCompressionQueues.begin(); it != fCompressionQueues.end(); it++)
|
|---|
| 428 | it->wait();
|
|---|
| 429 |
|
|---|
| 430 | fWriteToDiskQueue.wait();
|
|---|
| 431 |
|
|---|
| 432 | if (tellp() < 0)
|
|---|
| 433 | {
|
|---|
| 434 | #ifdef __EXCEPTIONS
|
|---|
| 435 | throw runtime_error("Looks like the file has been closed already");
|
|---|
| 436 | #else
|
|---|
| 437 | return false;
|
|---|
| 438 | #endif
|
|---|
| 439 | }
|
|---|
| 440 |
|
|---|
| 441 | #ifdef __EXCEPTIONS
|
|---|
| 442 | //check if something hapenned while the compression threads were working
|
|---|
| 443 | if (fThreadsException != exception_ptr())
|
|---|
| 444 | {
|
|---|
| 445 | //if so, re-throw the exception that was generated
|
|---|
| 446 | rethrow_exception(fThreadsException);
|
|---|
| 447 | }
|
|---|
| 448 | #endif
|
|---|
| 449 |
|
|---|
| 450 | //write the last tile of data (if any
|
|---|
| 451 | if (fTable.num_rows%fNumRowsPerTile != 0)
|
|---|
| 452 | {
|
|---|
| 453 | CompressionTarget compress_target;
|
|---|
| 454 | SetNextCompression(compress_target);
|
|---|
| 455 |
|
|---|
| 456 | //set number of threads to zero before calling compressBuffer
|
|---|
| 457 | int32_t backup_num_queues = fNumQueues;
|
|---|
| 458 | fNumQueues = 0;
|
|---|
| 459 | uint64_t size_to_write = CompressBuffer(compress_target);
|
|---|
| 460 | fNumQueues = backup_num_queues;
|
|---|
| 461 |
|
|---|
| 462 | WriteTarget write_target;
|
|---|
| 463 | write_target.size = size_to_write;
|
|---|
| 464 | write_target.data = compress_target.target.data;
|
|---|
| 465 | write_target.tile_num = compress_target.target.tile_num;
|
|---|
| 466 |
|
|---|
| 467 | if (!WriteBufferToDisk(write_target))
|
|---|
| 468 | throw runtime_error("Something went wrong while writing the last tile...");
|
|---|
| 469 | }
|
|---|
| 470 |
|
|---|
| 471 | AlignTo2880Bytes();
|
|---|
| 472 |
|
|---|
| 473 | //update header keywords
|
|---|
| 474 | SetInt("ZNAXIS1", fRealRowWidth);
|
|---|
| 475 | SetInt("ZNAXIS2", fTable.num_rows);
|
|---|
| 476 |
|
|---|
| 477 | SetInt("ZHEAPPTR", fCatalog.size()*fTable.num_cols*sizeof(uint64_t)*2);
|
|---|
| 478 |
|
|---|
| 479 | const uint32_t total_num_tiles_written = (fTable.num_rows + fNumRowsPerTile-1)/fNumRowsPerTile;
|
|---|
| 480 | const uint32_t total_catalog_width = 2*sizeof(int64_t)*fTable.num_cols;
|
|---|
| 481 |
|
|---|
| 482 | SetInt("THEAP", total_num_tiles_written*total_catalog_width);
|
|---|
| 483 | SetInt("NAXIS1", total_catalog_width);
|
|---|
| 484 | SetInt("NAXIS2", total_num_tiles_written);
|
|---|
| 485 |
|
|---|
| 486 | ostringstream str;
|
|---|
| 487 | str << fRawSum.val();
|
|---|
| 488 | SetStr("RAWSUM", str.str());
|
|---|
| 489 |
|
|---|
| 490 | int64_t heap_size = 0;
|
|---|
| 491 | int64_t compressed_offset = 0;
|
|---|
| 492 |
|
|---|
| 493 | for (uint32_t i=0; i<total_num_tiles_written; i++)
|
|---|
| 494 | {
|
|---|
| 495 | compressed_offset += sizeof(TileHeader);
|
|---|
| 496 | heap_size += sizeof(TileHeader);
|
|---|
| 497 | for (uint32_t j=0; j<fCatalog[i].size(); j++)
|
|---|
| 498 | {
|
|---|
| 499 | heap_size += fCatalog[i][j].first;
|
|---|
| 500 | fCatalog[i][j].second = compressed_offset;
|
|---|
| 501 | compressed_offset += fCatalog[i][j].first;
|
|---|
| 502 | if (fCatalog[i][j].first == 0)
|
|---|
| 503 | fCatalog[i][j].second = 0;
|
|---|
| 504 | }
|
|---|
| 505 | }
|
|---|
| 506 |
|
|---|
| 507 | const float compression_ratio = (float)(fRealRowWidth*fTable.num_rows)/(float)heap_size;
|
|---|
| 508 | SetFloat("ZRATIO", compression_ratio);
|
|---|
| 509 |
|
|---|
| 510 | //add to the heap size the size of the gap between the catalog and the actual heap
|
|---|
| 511 | heap_size += (fCatalog.size() - total_num_tiles_written)*fTable.num_cols*sizeof(uint64_t)*2;
|
|---|
| 512 |
|
|---|
| 513 | SetInt("PCOUNT", heap_size, "size of special data area");
|
|---|
| 514 |
|
|---|
| 515 | //Just for updating the fCatalogSum value
|
|---|
| 516 | WriteCatalog();
|
|---|
| 517 |
|
|---|
| 518 | fDataSum += fCatalogSum;
|
|---|
| 519 |
|
|---|
| 520 | const Checksum checksm = UpdateHeaderChecksum();
|
|---|
| 521 |
|
|---|
| 522 | ofstream::close();
|
|---|
| 523 |
|
|---|
| 524 | if ((checksm+fDataSum).valid())
|
|---|
| 525 | return true;
|
|---|
| 526 |
|
|---|
| 527 | ostringstream sout;
|
|---|
| 528 | sout << "Checksum (" << std::hex << checksm.val() << ") invalid.";
|
|---|
| 529 | #ifdef __EXCEPTIONS
|
|---|
| 530 | throw runtime_error(sout.str());
|
|---|
| 531 | #else
|
|---|
| 532 | gLog << ___err___ << "ERROR - " << sout.str() << endl;
|
|---|
| 533 | return false;
|
|---|
| 534 | #endif
|
|---|
| 535 | }
|
|---|
| 536 |
|
|---|
| 537 | /// Overload of the ofits method. Just calls the zofits specific one with default, uncompressed options for this column
|
|---|
| 538 | bool AddColumn(uint32_t cnt, char typechar, const string& name, const string& unit, const string& comment="", bool addHeaderKeys=true)
|
|---|
| 539 | {
|
|---|
| 540 | return AddColumn(kFactRaw, cnt, typechar, name, unit, comment, addHeaderKeys);
|
|---|
| 541 | }
|
|---|
| 542 |
|
|---|
| 543 | /// Overload of the simplified compressed version
|
|---|
| 544 | bool AddColumn(const FITS::Compression &comp, uint32_t cnt, char typechar, const string& name, const string& unit, const string& comment="", bool addHeaderKeys=true)
|
|---|
| 545 | {
|
|---|
| 546 | if (!ofits::AddColumn(1, 'Q', name, unit, comment, addHeaderKeys))
|
|---|
| 547 | return false;
|
|---|
| 548 |
|
|---|
| 549 | Table::Column col;
|
|---|
| 550 | size_t size = SizeFromType(typechar);
|
|---|
| 551 |
|
|---|
| 552 | col.name = name;
|
|---|
| 553 | col.type = typechar;
|
|---|
| 554 | col.num = cnt;
|
|---|
| 555 | col.size = size;
|
|---|
| 556 | col.offset = fRealRowWidth;
|
|---|
| 557 |
|
|---|
| 558 | fRealRowWidth += size*cnt;
|
|---|
| 559 |
|
|---|
| 560 | fRealColumns.emplace_back(CompressedColumn(col, comp));
|
|---|
| 561 |
|
|---|
| 562 | ostringstream strKey, strVal, strCom;
|
|---|
| 563 | strKey << "ZFORM" << fRealColumns.size();
|
|---|
| 564 | strVal << cnt << typechar;
|
|---|
| 565 | strCom << "format of " << name << " [" << CommentFromType(typechar);
|
|---|
| 566 | SetStr(strKey.str(), strVal.str(), strCom.str());
|
|---|
| 567 |
|
|---|
| 568 | strKey.str("");
|
|---|
| 569 | strVal.str("");
|
|---|
| 570 | strCom.str("");
|
|---|
| 571 | strKey << "ZCTYP" << fRealColumns.size();
|
|---|
| 572 | strVal << "FACT";
|
|---|
| 573 | strCom << "Compression type FACT";
|
|---|
| 574 | SetStr(strKey.str(), strVal.str(), strCom.str());
|
|---|
| 575 |
|
|---|
| 576 | return true;
|
|---|
| 577 | }
|
|---|
| 578 |
|
|---|
| 579 | /// static setter for the default number of threads to use. -1 means all available physical cores
|
|---|
| 580 | static void SetDefaultNumThreads (int32_t num) { fgNumQueues = num;}
|
|---|
| 581 | static void SetDefaultNumTiles (uint32_t num) { fgNumTiles = num;}
|
|---|
| 582 | static void SetDefaultNumRowPerTile(uint32_t num) { fgRowPerTile = num;}
|
|---|
| 583 | static void SetDefaultMaxUsableMem (uint64_t size) { fgMaxUsableMem = size;}
|
|---|
| 584 |
|
|---|
| 585 | static int32_t GetDefaultNumThreads() { return fgNumQueues;}
|
|---|
| 586 | static uint32_t GetDefaultNumTiles() { return fgNumTiles;}
|
|---|
| 587 | static uint32_t GetDefaultNumRowPerTile() { return fgRowPerTile;}
|
|---|
| 588 | static uint64_t GetDefaulMaxUsableMem() { return fgMaxUsableMem;}
|
|---|
| 589 |
|
|---|
| 590 | /// Get and set the actual number of threads for this object
|
|---|
| 591 | int32_t GetNumThreads() const { return fNumQueues;}
|
|---|
| 592 | bool SetNumThreads(int32_t num)
|
|---|
| 593 | {
|
|---|
| 594 | if (is_open())
|
|---|
| 595 | {
|
|---|
| 596 | #ifdef __EXCEPTIONS
|
|---|
| 597 | throw runtime_error("File must be closed before changing the number of compression threads");
|
|---|
| 598 | #else
|
|---|
| 599 | gLog << ___err___ << "ERROR - File must be closed before changing the number of compression threads" << endl;
|
|---|
| 600 | #endif
|
|---|
| 601 | return false;
|
|---|
| 602 | }
|
|---|
| 603 |
|
|---|
| 604 | //get number of physically available threads
|
|---|
| 605 | #ifdef USE_BOOST_THREADS
|
|---|
| 606 | int32_t num_available_cores = boost::thread::hardware_concurrency();
|
|---|
| 607 | #else
|
|---|
| 608 | int32_t num_available_cores = thread::hardware_concurrency();
|
|---|
| 609 | #endif
|
|---|
| 610 |
|
|---|
| 611 | // could not detect number of available cores from system properties...
|
|---|
| 612 | // assume that 5 cores are available (4 compression, 1 write)
|
|---|
| 613 | if (num_available_cores == 0)
|
|---|
| 614 | num_available_cores = 5;
|
|---|
| 615 |
|
|---|
| 616 | // Throw an exception if too many cores are requested
|
|---|
| 617 | if (num > num_available_cores)
|
|---|
| 618 | {
|
|---|
| 619 | ostringstream str;
|
|---|
| 620 | str << "You will be using more threads(" << num << ") than available cores(" << num_available_cores << "). Expect sub-optimal performances";
|
|---|
| 621 | #ifdef __EXCEPTIONS
|
|---|
| 622 | throw runtime_error(str.str());
|
|---|
| 623 | #else
|
|---|
| 624 | gLog << ___err___ << "WARNING - " << str.str() << endl;
|
|---|
| 625 | #endif
|
|---|
| 626 | }
|
|---|
| 627 |
|
|---|
| 628 | if (num == -1)
|
|---|
| 629 | num = num_available_cores-2; // 1 for writing, 1 for the main thread
|
|---|
| 630 |
|
|---|
| 631 | if (fCompressionQueues.size() == (uint32_t)num)
|
|---|
| 632 | return true;
|
|---|
| 633 |
|
|---|
| 634 | //cannot be const, as resize does not want it that way
|
|---|
| 635 | Queue<CompressionTarget> queue(bind(&zofits::CompressBuffer, this, placeholders::_1), false);
|
|---|
| 636 |
|
|---|
| 637 | //shrink if required
|
|---|
| 638 | if ((uint32_t)num < fCompressionQueues.size())
|
|---|
| 639 | {
|
|---|
| 640 | fCompressionQueues.resize(num, queue);
|
|---|
| 641 | return true;
|
|---|
| 642 | }
|
|---|
| 643 |
|
|---|
| 644 | //grow if required
|
|---|
| 645 | fCompressionQueues.resize(num, queue);
|
|---|
| 646 |
|
|---|
| 647 | fNumQueues = num;
|
|---|
| 648 |
|
|---|
| 649 | return true;
|
|---|
| 650 | }
|
|---|
| 651 |
|
|---|
| 652 | protected:
|
|---|
| 653 |
|
|---|
| 654 | /// Allocates the required objects.
|
|---|
| 655 | void reallocateBuffers()
|
|---|
| 656 | {
|
|---|
| 657 | const size_t chunk_size = fRealRowWidth*fNumRowsPerTile + fRealColumns.size()*sizeof(BlockHeader) + sizeof(TileHeader) + 8; //+8 for checksuming;
|
|---|
| 658 | fMemPool.setChunkSize(chunk_size);
|
|---|
| 659 |
|
|---|
| 660 | fSmartBuffer = fMemPool.malloc();
|
|---|
| 661 | fBuffer = fSmartBuffer.get()->get();
|
|---|
| 662 |
|
|---|
| 663 | fRawSumBuffer.resize(fRealRowWidth + 4-fRealRowWidth%4); //for checksuming
|
|---|
| 664 |
|
|---|
| 665 | //give the catalog enough space
|
|---|
| 666 | fCatalog.resize(fNumTiles);
|
|---|
| 667 | for (uint32_t i=0;i<fNumTiles;i++)
|
|---|
| 668 | {
|
|---|
| 669 | fCatalog[i].resize(fRealColumns.size());
|
|---|
| 670 | for (auto it=fCatalog[i].begin(); it!=fCatalog[i].end(); it++)
|
|---|
| 671 | *it = CatalogEntry(0,0);
|
|---|
| 672 | }
|
|---|
| 673 | }
|
|---|
| 674 |
|
|---|
| 675 | /// Actually does the writing to disk (and checksuming)
|
|---|
| 676 | /// @param src the buffer to write
|
|---|
| 677 | /// @param sizeToWrite how many bytes should be written
|
|---|
| 678 | /// @return the state of the file
|
|---|
| 679 | bool writeCompressedDataToDisk(char* src, const uint32_t sizeToWrite)
|
|---|
| 680 | {
|
|---|
| 681 | char* checkSumPointer = src+4;
|
|---|
| 682 | int32_t extraBytes = 0;
|
|---|
| 683 | uint32_t sizeToChecksum = sizeToWrite;
|
|---|
| 684 | if (fCheckOffset != 0)
|
|---|
| 685 | {//should we extend the array to the left ?
|
|---|
| 686 | sizeToChecksum += fCheckOffset;
|
|---|
| 687 | checkSumPointer -= fCheckOffset;
|
|---|
| 688 | memset(checkSumPointer, 0, fCheckOffset);
|
|---|
| 689 | }
|
|---|
| 690 | if (sizeToChecksum%4 != 0)
|
|---|
| 691 | {//should we extend the array to the right ?
|
|---|
| 692 | extraBytes = 4 - (sizeToChecksum%4);
|
|---|
| 693 | memset(checkSumPointer+sizeToChecksum, 0,extraBytes);
|
|---|
| 694 | sizeToChecksum += extraBytes;
|
|---|
| 695 | }
|
|---|
| 696 |
|
|---|
| 697 | //do the checksum
|
|---|
| 698 | fDataSum.add(checkSumPointer, sizeToChecksum);
|
|---|
| 699 |
|
|---|
| 700 | fCheckOffset = (4 - extraBytes)%4;
|
|---|
| 701 | //write data to disk
|
|---|
| 702 | write(src+4, sizeToWrite);
|
|---|
| 703 |
|
|---|
| 704 | return good();
|
|---|
| 705 | }
|
|---|
| 706 |
|
|---|
| 707 | /// Compress a given buffer based on the target. This is the method executed by the threads
|
|---|
| 708 | /// @param target the struct hosting the parameters of the compression
|
|---|
| 709 | /// @return number of bytes of the compressed data, or always 1 when used by the Queues
|
|---|
| 710 | uint32_t CompressBuffer(const CompressionTarget& target)
|
|---|
| 711 | {
|
|---|
| 712 | uint64_t compressed_size = 0;
|
|---|
| 713 | #ifdef __EXCEPTIONS
|
|---|
| 714 | try
|
|---|
| 715 | {
|
|---|
| 716 | #endif
|
|---|
| 717 | //transpose the original data
|
|---|
| 718 | copyTransposeTile(target.src.get()->get(), target.transposed_src.get()->get());
|
|---|
| 719 |
|
|---|
| 720 | //compress the buffer
|
|---|
| 721 | compressed_size = compressBuffer(target.target.data.get()->get(), target.transposed_src.get()->get(), target.num_rows);
|
|---|
| 722 | #ifdef __EXCEPTIONS
|
|---|
| 723 | }
|
|---|
| 724 | catch (...)
|
|---|
| 725 | {
|
|---|
| 726 | fThreadsException = current_exception();
|
|---|
| 727 | if (fNumQueues == 0)
|
|---|
| 728 | rethrow_exception(fThreadsException);
|
|---|
| 729 | }
|
|---|
| 730 | #endif
|
|---|
| 731 |
|
|---|
| 732 | if (fNumQueues == 0)
|
|---|
| 733 | return compressed_size;
|
|---|
| 734 |
|
|---|
| 735 | //post the result to the writing queue
|
|---|
| 736 | //get a copy so that it becomes non-const
|
|---|
| 737 | WriteTarget wt;
|
|---|
| 738 | wt.tile_num = target.target.tile_num;
|
|---|
| 739 | wt.size = compressed_size;
|
|---|
| 740 | wt.data = target.target.data;
|
|---|
| 741 |
|
|---|
| 742 | fWriteToDiskQueue.post(wt);
|
|---|
| 743 |
|
|---|
| 744 | // if used by the queue, always return true as the elements are not ordered
|
|---|
| 745 | return 1;
|
|---|
| 746 | }
|
|---|
| 747 |
|
|---|
| 748 | /// Write one compressed tile to disk. This is the method executed by the writing thread
|
|---|
| 749 | /// @param target the struct hosting the write parameters
|
|---|
| 750 | bool WriteBufferToDisk(const WriteTarget& target)
|
|---|
| 751 | {
|
|---|
| 752 | //is this the tile we're supposed to write ?
|
|---|
| 753 | if (target.tile_num != (uint32_t)(fLatestWrittenTile+1))
|
|---|
| 754 | return false;
|
|---|
| 755 |
|
|---|
| 756 | fLatestWrittenTile++;
|
|---|
| 757 |
|
|---|
| 758 | #ifdef __EXCEPTIONS
|
|---|
| 759 | try
|
|---|
| 760 | {
|
|---|
| 761 | #endif
|
|---|
| 762 | if (!writeCompressedDataToDisk(target.data.get()->get(), target.size))
|
|---|
| 763 | {//could not write the data to disk
|
|---|
| 764 | ostringstream str;
|
|---|
| 765 | str << "An error occured while writing to disk: ";
|
|---|
| 766 | if (eof())
|
|---|
| 767 | str << "End-Of-File";
|
|---|
| 768 | if (failbit)
|
|---|
| 769 | str << "Logical error on i/o operation";
|
|---|
| 770 | if (badbit)
|
|---|
| 771 | str << "Writing error on i/o operation";
|
|---|
| 772 | #ifdef __EXCEPTIONS
|
|---|
| 773 | throw runtime_error(str.str());
|
|---|
| 774 | #else
|
|---|
| 775 | gLog << ___err___ << "ERROR - " << str.str() << endl;
|
|---|
| 776 | #endif
|
|---|
| 777 | }
|
|---|
| 778 | #ifdef __EXCEPTIONS
|
|---|
| 779 | }
|
|---|
| 780 | catch(...)
|
|---|
| 781 | {
|
|---|
| 782 | fThreadsException = current_exception();
|
|---|
| 783 | if (fNumQueues == 0)
|
|---|
| 784 | rethrow_exception(fThreadsException);
|
|---|
| 785 | }
|
|---|
| 786 | #endif
|
|---|
| 787 | return true;
|
|---|
| 788 | }
|
|---|
| 789 |
|
|---|
| 790 | /// Compress a given buffer based on its source and destination
|
|---|
| 791 | //src cannot be const, as applySMOOTHING is done in place
|
|---|
| 792 | /// @param dest the buffer hosting the compressed data
|
|---|
| 793 | /// @param src the buffer hosting the transposed data
|
|---|
| 794 | /// @param num_rows the number of uncompressed rows in the transposed buffer
|
|---|
| 795 | /// @param the number of bytes of the compressed data
|
|---|
| 796 | uint64_t compressBuffer(char* dest, char* src, uint32_t num_rows)
|
|---|
| 797 | {
|
|---|
| 798 | const uint32_t thisRoundNumRows = (num_rows%fNumRowsPerTile) ? num_rows%fNumRowsPerTile : fNumRowsPerTile;
|
|---|
| 799 | const uint32_t currentCatalogRow = (num_rows-1)/fNumRowsPerTile;
|
|---|
| 800 | uint32_t offset = 0;
|
|---|
| 801 |
|
|---|
| 802 | //skip the checksum reserved area
|
|---|
| 803 | dest += 4;
|
|---|
| 804 |
|
|---|
| 805 | //skip the 'TILE' marker and tile size entry
|
|---|
| 806 | uint64_t compressedOffset = sizeof(TileHeader);
|
|---|
| 807 |
|
|---|
| 808 | //now compress each column one by one by calling compression on arrays
|
|---|
| 809 | for (uint32_t i=0;i<fRealColumns.size();i++)
|
|---|
| 810 | {
|
|---|
| 811 | fCatalog[currentCatalogRow][i].second = compressedOffset;
|
|---|
| 812 |
|
|---|
| 813 | if (fRealColumns[i].col.num == 0) continue;
|
|---|
| 814 |
|
|---|
| 815 | Compression& head = fRealColumns[i].block_head;
|
|---|
| 816 |
|
|---|
| 817 | //set the default byte telling if uncompressed the compressed Flag
|
|---|
| 818 | const uint64_t previousOffset = compressedOffset;
|
|---|
| 819 |
|
|---|
| 820 | //skip header data
|
|---|
| 821 | compressedOffset += head.getSizeOnDisk();
|
|---|
| 822 |
|
|---|
| 823 | for (uint32_t j=0;j<head.getNumProcs();j++)//sequence.size(); j++)
|
|---|
| 824 | {
|
|---|
| 825 | switch (head.getProc(j))
|
|---|
| 826 | {
|
|---|
| 827 | case kFactRaw:
|
|---|
| 828 | compressedOffset += compressUNCOMPRESSED(dest + compressedOffset, src + offset, thisRoundNumRows*fRealColumns[i].col.size*fRealColumns[i].col.num);
|
|---|
| 829 | break;
|
|---|
| 830 | case kFactSmoothing:
|
|---|
| 831 | applySMOOTHING(src + offset, thisRoundNumRows*fRealColumns[i].col.num);
|
|---|
| 832 | break;
|
|---|
| 833 | case kFactHuffman16:
|
|---|
| 834 | if (head.getOrdering() == kOrderByCol)
|
|---|
| 835 | compressedOffset += compressHUFFMAN16(dest + compressedOffset, src + offset, thisRoundNumRows, fRealColumns[i].col.size, fRealColumns[i].col.num);
|
|---|
| 836 | else
|
|---|
| 837 | compressedOffset += compressHUFFMAN16(dest + compressedOffset, src + offset, fRealColumns[i].col.num, fRealColumns[i].col.size, thisRoundNumRows);
|
|---|
| 838 | break;
|
|---|
| 839 | }
|
|---|
| 840 | }
|
|---|
| 841 |
|
|---|
| 842 | //check if compressed size is larger than uncompressed
|
|---|
| 843 | if ((head.getProc(0) != kFactRaw) && (compressedOffset - previousOffset > fRealColumns[i].col.size*fRealColumns[i].col.num*thisRoundNumRows+head.getSizeOnDisk()))// && two)
|
|---|
| 844 | {//if so set flag and redo it uncompressed
|
|---|
| 845 | // cout << "Redoing uncompressed ! " << endl;
|
|---|
| 846 | //de-smooth !
|
|---|
| 847 | if (head.getProc(0) == kFactSmoothing)
|
|---|
| 848 | UnApplySMOOTHING(src+offset, fRealColumns[i].col.num*thisRoundNumRows);
|
|---|
| 849 |
|
|---|
| 850 | Compression he;
|
|---|
| 851 |
|
|---|
| 852 | compressedOffset = previousOffset + he.getSizeOnDisk();
|
|---|
| 853 | compressedOffset += compressUNCOMPRESSED(dest + compressedOffset, src + offset, thisRoundNumRows*fRealColumns[i].col.size*fRealColumns[i].col.num);
|
|---|
| 854 |
|
|---|
| 855 | he.SetBlockSize(compressedOffset - previousOffset);
|
|---|
| 856 | he.Memcpy(dest+previousOffset);
|
|---|
| 857 |
|
|---|
| 858 | offset += thisRoundNumRows*fRealColumns[i].col.size*fRealColumns[i].col.num;
|
|---|
| 859 |
|
|---|
| 860 | fCatalog[currentCatalogRow][i].first = compressedOffset - fCatalog[currentCatalogRow][i].second;
|
|---|
| 861 | continue;
|
|---|
| 862 | }
|
|---|
| 863 |
|
|---|
| 864 | head.SetBlockSize(compressedOffset - previousOffset);
|
|---|
| 865 | head.Memcpy(dest + previousOffset);
|
|---|
| 866 |
|
|---|
| 867 | offset += thisRoundNumRows*fRealColumns[i].col.size*fRealColumns[i].col.num;
|
|---|
| 868 | fCatalog[currentCatalogRow][i].first = compressedOffset - fCatalog[currentCatalogRow][i].second;
|
|---|
| 869 | }
|
|---|
| 870 |
|
|---|
| 871 | TileHeader tile_head(thisRoundNumRows, compressedOffset);
|
|---|
| 872 | memcpy(dest, &tile_head, sizeof(TileHeader));
|
|---|
| 873 |
|
|---|
| 874 | return compressedOffset;
|
|---|
| 875 | }
|
|---|
| 876 |
|
|---|
| 877 | /// Transpose a tile to a new buffer
|
|---|
| 878 | /// @param src buffer hosting the regular, row-ordered data
|
|---|
| 879 | /// @param dest the target buffer that will receive the transposed data
|
|---|
| 880 | void copyTransposeTile(const char* src, char* dest)
|
|---|
| 881 | {
|
|---|
| 882 | const uint32_t thisRoundNumRows = (fTable.num_rows%fNumRowsPerTile) ? fTable.num_rows%fNumRowsPerTile : fNumRowsPerTile;
|
|---|
| 883 |
|
|---|
| 884 | //copy the tile and transpose it
|
|---|
| 885 | for (uint32_t i=0;i<fRealColumns.size();i++)
|
|---|
| 886 | {
|
|---|
| 887 | switch (fRealColumns[i].block_head.getOrdering())
|
|---|
| 888 | {
|
|---|
| 889 | case kOrderByRow:
|
|---|
| 890 | for (uint32_t k=0;k<thisRoundNumRows;k++)
|
|---|
| 891 | {//regular, "semi-transposed" copy
|
|---|
| 892 | memcpy(dest, src+k*fRealRowWidth+fRealColumns[i].col.offset, fRealColumns[i].col.size*fRealColumns[i].col.num);
|
|---|
| 893 | dest += fRealColumns[i].col.size*fRealColumns[i].col.num;
|
|---|
| 894 | }
|
|---|
| 895 | break;
|
|---|
| 896 |
|
|---|
| 897 | case kOrderByCol :
|
|---|
| 898 | for (uint32_t j=0;j<fRealColumns[i].col.num;j++)
|
|---|
| 899 | for (uint32_t k=0;k<thisRoundNumRows;k++)
|
|---|
| 900 | {//transposed copy
|
|---|
| 901 | memcpy(dest, src+k*fRealRowWidth+fRealColumns[i].col.offset+fRealColumns[i].col.size*j, fRealColumns[i].col.size);
|
|---|
| 902 | dest += fRealColumns[i].col.size;
|
|---|
| 903 | }
|
|---|
| 904 | break;
|
|---|
| 905 | };
|
|---|
| 906 | }
|
|---|
| 907 | }
|
|---|
| 908 |
|
|---|
| 909 | /// Specific compression functions
|
|---|
| 910 | /// @param dest the target buffer
|
|---|
| 911 | /// @param src the source buffer
|
|---|
| 912 | /// @param size number of bytes to copy
|
|---|
| 913 | /// @return number of bytes written
|
|---|
| 914 | uint32_t compressUNCOMPRESSED(char* dest, const char* src, uint32_t size)
|
|---|
| 915 | {
|
|---|
| 916 | memcpy(dest, src, size);
|
|---|
| 917 | return size;
|
|---|
| 918 | }
|
|---|
| 919 |
|
|---|
| 920 | /// Do huffman encoding
|
|---|
| 921 | /// @param dest the buffer that will receive the compressed data
|
|---|
| 922 | /// @param src the buffer hosting the transposed data
|
|---|
| 923 | /// @param numRows number of rows of data in the transposed buffer
|
|---|
| 924 | /// @param sizeOfElems size in bytes of one data elements
|
|---|
| 925 | /// @param numRowElems number of elements on each row
|
|---|
| 926 | /// @return number of bytes written
|
|---|
| 927 | uint32_t compressHUFFMAN16(char* dest, const char* src, uint32_t numRows, uint32_t sizeOfElems, uint32_t numRowElems)
|
|---|
| 928 | {
|
|---|
| 929 | string huffmanOutput;
|
|---|
| 930 | uint32_t previousHuffmanSize = 0;
|
|---|
| 931 | if (numRows < 2)
|
|---|
| 932 | {//if we have less than 2 elems to compress, Huffman encoder does not work (and has no point). Just return larger size than uncompressed to trigger the raw storage.
|
|---|
| 933 | return numRows*sizeOfElems*numRowElems + 1000;
|
|---|
| 934 | }
|
|---|
| 935 | if (sizeOfElems < 2 )
|
|---|
| 936 | {
|
|---|
| 937 | #ifdef __EXCEPTIONS
|
|---|
| 938 | throw runtime_error("HUFMANN16 can only encode columns with 16-bit or longer types");
|
|---|
| 939 | #else
|
|---|
| 940 | gLog << ___err___ << "ERROR - HUFMANN16 can only encode columns with 16-bit or longer types" << endl;
|
|---|
| 941 | return 0;
|
|---|
| 942 | #endif
|
|---|
| 943 | }
|
|---|
| 944 | uint32_t huffmanOffset = 0;
|
|---|
| 945 | for (uint32_t j=0;j<numRowElems;j++)
|
|---|
| 946 | {
|
|---|
| 947 | Huffman::Encode(huffmanOutput,
|
|---|
| 948 | reinterpret_cast<const uint16_t*>(&src[j*sizeOfElems*numRows]),
|
|---|
| 949 | numRows*(sizeOfElems/2));
|
|---|
| 950 | reinterpret_cast<uint32_t*>(&dest[huffmanOffset])[0] = huffmanOutput.size() - previousHuffmanSize;
|
|---|
| 951 | huffmanOffset += sizeof(uint32_t);
|
|---|
| 952 | previousHuffmanSize = huffmanOutput.size();
|
|---|
| 953 | }
|
|---|
| 954 | const size_t totalSize = huffmanOutput.size() + huffmanOffset;
|
|---|
| 955 |
|
|---|
| 956 | //only copy if not larger than not-compressed size
|
|---|
| 957 | if (totalSize < numRows*sizeOfElems*numRowElems)
|
|---|
| 958 | memcpy(&dest[huffmanOffset], huffmanOutput.data(), huffmanOutput.size());
|
|---|
| 959 |
|
|---|
| 960 | return totalSize;
|
|---|
| 961 | }
|
|---|
| 962 |
|
|---|
| 963 | /// Applies Thomas' DRS4 smoothing
|
|---|
| 964 | /// @param data where to apply it
|
|---|
| 965 | /// @param numElems how many elements of type int16_t are stored in the buffer
|
|---|
| 966 | /// @return number of bytes modified
|
|---|
| 967 | uint32_t applySMOOTHING(char* data, uint32_t numElems)
|
|---|
| 968 | {
|
|---|
| 969 | int16_t* short_data = reinterpret_cast<int16_t*>(data);
|
|---|
| 970 | for (int j=numElems-1;j>1;j--)
|
|---|
| 971 | short_data[j] = short_data[j] - (short_data[j-1]+short_data[j-2])/2;
|
|---|
| 972 |
|
|---|
| 973 | return numElems*sizeof(int16_t);
|
|---|
| 974 | }
|
|---|
| 975 |
|
|---|
| 976 | /// Apply the inverse transform of the integer smoothing
|
|---|
| 977 | /// @param data where to apply it
|
|---|
| 978 | /// @param numElems how many elements of type int16_t are stored in the buffer
|
|---|
| 979 | /// @return number of bytes modified
|
|---|
| 980 | uint32_t UnApplySMOOTHING(char* data, uint32_t numElems)
|
|---|
| 981 | {
|
|---|
| 982 | int16_t* short_data = reinterpret_cast<int16_t*>(data);
|
|---|
| 983 | //un-do the integer smoothing
|
|---|
| 984 | for (uint32_t j=2;j<numElems;j++)
|
|---|
| 985 | short_data[j] = short_data[j] + (short_data[j-1]+short_data[j-2])/2;
|
|---|
| 986 |
|
|---|
| 987 | return numElems*sizeof(uint16_t);
|
|---|
| 988 | }
|
|---|
| 989 |
|
|---|
| 990 |
|
|---|
| 991 |
|
|---|
| 992 | //thread related stuff
|
|---|
| 993 | MemoryManager fMemPool; ///< Actual memory manager, providing memory for the compression buffers
|
|---|
| 994 | static int32_t fgNumQueues; ///< Default number of threads to be used by the objects
|
|---|
| 995 | static uint32_t fgNumTiles; ///< Default number of reserved tiles
|
|---|
| 996 | static uint32_t fgRowPerTile; ///< Default number of rows per tile
|
|---|
| 997 | static uint64_t fgMaxUsableMem; ///< Default usable memory PER OBJECT
|
|---|
| 998 | int32_t fNumQueues; ///< Current number of threads that will be used by this object
|
|---|
| 999 | uint64_t fMaxUsableMem; ///< Maximum number of bytes that can be allocated by the memory manager
|
|---|
| 1000 | int32_t fLatestWrittenTile; ///< Index of the last tile written to disk (for correct ordering while using several threads)
|
|---|
| 1001 |
|
|---|
| 1002 | vector<Queue<CompressionTarget>> fCompressionQueues; ///< Processing queues (=threads)
|
|---|
| 1003 | Queue<WriteTarget, QueueMin<WriteTarget>> fWriteToDiskQueue; ///< Writing queue (=thread)
|
|---|
| 1004 |
|
|---|
| 1005 | // catalog related stuff
|
|---|
| 1006 | struct CatalogEntry
|
|---|
| 1007 | {
|
|---|
| 1008 | CatalogEntry(int64_t f=0, int64_t s=0) : first(f), second(s) {};
|
|---|
| 1009 | int64_t first; ///< Size of this column in the tile
|
|---|
| 1010 | int64_t second; ///< offset of this column in the tile, from the start of the heap area
|
|---|
| 1011 | } __attribute__((__packed__));
|
|---|
| 1012 |
|
|---|
| 1013 | typedef vector<CatalogEntry> CatalogRow;
|
|---|
| 1014 | typedef vector<CatalogRow> CatalogType;
|
|---|
| 1015 | CatalogType fCatalog; ///< Catalog for this file
|
|---|
| 1016 | // uint32_t fCatalogSize; ///< Actual catalog size (.size() is slow on large lists)
|
|---|
| 1017 | uint32_t fNumTiles; ///< Number of pre-reserved tiles
|
|---|
| 1018 | uint32_t fNumRowsPerTile; ///< Number of rows per tile
|
|---|
| 1019 | off_t fCatalogOffset; ///< Offset of the catalog from the beginning of the file
|
|---|
| 1020 | uint32_t fCatalogExtraRows; ///< Number of extra rows written on top of the initial capacity of the file
|
|---|
| 1021 |
|
|---|
| 1022 | // checksum related stuff
|
|---|
| 1023 | Checksum fCatalogSum; ///< Checksum of the catalog
|
|---|
| 1024 | Checksum fRawSum; ///< Raw sum (specific to FACT)
|
|---|
| 1025 | int32_t fCheckOffset; ///< offset to the data pointer to calculate the checksum
|
|---|
| 1026 |
|
|---|
| 1027 | // data layout related stuff
|
|---|
| 1028 | /// Regular columns augmented with compression informations
|
|---|
| 1029 | struct CompressedColumn
|
|---|
| 1030 | {
|
|---|
| 1031 | CompressedColumn(const Table::Column& c, const Compression& h) : col(c),
|
|---|
| 1032 | block_head(h)
|
|---|
| 1033 | {}
|
|---|
| 1034 | Table::Column col; ///< the regular column entry
|
|---|
| 1035 | Compression block_head; ///< the compression data associated with that column
|
|---|
| 1036 | };
|
|---|
| 1037 | vector<CompressedColumn> fRealColumns; ///< Vector hosting the columns of the file
|
|---|
| 1038 | uint32_t fRealRowWidth; ///< Width in bytes of one uncompressed row
|
|---|
| 1039 | shared_ptr<MemoryChunk> fSmartBuffer; ///< Smart pointer to the buffer where the incoming rows are written
|
|---|
| 1040 | char* fBuffer; ///< regular version of fSmartBuffer
|
|---|
| 1041 | vector<char> fRawSumBuffer;///< buffer used for checksuming the incoming data, before compression
|
|---|
| 1042 |
|
|---|
| 1043 | #ifdef __EXCEPTIONS
|
|---|
| 1044 | exception_ptr fThreadsException; ///< exception pointer to store exceptions coming from the threads
|
|---|
| 1045 | #endif
|
|---|
| 1046 |
|
|---|
| 1047 | };
|
|---|
| 1048 |
|
|---|
| 1049 | int32_t zofits::fgNumQueues = 0;
|
|---|
| 1050 | uint32_t zofits::fgNumTiles = 1000;
|
|---|
| 1051 | uint32_t zofits::fgRowPerTile = 100;
|
|---|
| 1052 | uint64_t zofits::fgMaxUsableMem = 1073741824; // one gigabyte
|
|---|
| 1053 |
|
|---|
| 1054 | #ifndef __MARS__
|
|---|
| 1055 | }; //namespace std
|
|---|
| 1056 | #endif
|
|---|