mirror of
				https://github.com/facebook/zstd.git
				synced 2025-10-31 16:47:48 +02:00 
			
		
		
		
	added ability to split input files for dictionary training
using command -B# This is the same behavior as benchmark module, which can also split input into arbitrary size blocks, using -B#.
This commit is contained in:
		| @@ -845,6 +845,35 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long | ||||
|     </b>/* advanced parameters - may not remain available after API update */<b> | ||||
|     ZSTD_p_forceMaxWindow=1100, </b>/* Force back-reference distances to remain < windowSize,<b> | ||||
|                               * even when referencing into Dictionary content (default:0) */ | ||||
|     ZSTD_p_enableLongDistanceMatching=1200,  </b>/* Enable long distance matching.<b> | ||||
|                                          * This parameter is designed to improve the compression | ||||
|                                          * ratio for large inputs with long distance matches. | ||||
|                                          * This increases the memory usage as well as window size. | ||||
|                                          * Note: setting this parameter sets all the LDM parameters | ||||
|                                          * as well as ZSTD_p_windowLog. It should be set after | ||||
|                                          * ZSTD_p_compressionLevel and before ZSTD_p_windowLog and | ||||
|                                          * other LDM parameters. Setting the compression level | ||||
|                                          * after this parameter overrides the window log, though LDM | ||||
|                                          * will remain enabled until explicitly disabled. */ | ||||
|     ZSTD_p_ldmHashLog,   </b>/* Size of the table for long distance matching, as a power of 2.<b> | ||||
|                           * Larger values increase memory usage and compression ratio, but decrease | ||||
|                           * compression speed. | ||||
|                           * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX | ||||
|                           * (default: 20). */ | ||||
|     ZSTD_p_ldmMinMatch,  </b>/* Minimum size of searched matches for long distance matcher.<b> | ||||
|                           * Larger/too small values usually decrease compression ratio. | ||||
|                           * Must be clamped between ZSTD_LDM_MINMATCH_MIN | ||||
|                           * and ZSTD_LDM_MINMATCH_MAX (default: 64). */ | ||||
|     ZSTD_p_ldmBucketSizeLog,  </b>/* Log size of each bucket in the LDM hash table for collision resolution.<b> | ||||
|                                * Larger values usually improve collision resolution but may decrease | ||||
|                                * compression speed. | ||||
|                                * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX (default: 3). */ | ||||
|     ZSTD_p_ldmHashEveryLog,  </b>/* Frequency of inserting/looking up entries in the LDM hash table.<b> | ||||
|                               * The default is MAX(0, (windowLog - ldmHashLog)) to | ||||
|                               * optimize hash table usage. | ||||
|                               * Larger values improve compression speed. Deviating far from the | ||||
|                               * default value will likely result in a decrease in compression ratio. | ||||
|                               * Must be clamped between 0 and ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN. */ | ||||
|  | ||||
| } ZSTD_cParameter; | ||||
| </b></pre><BR> | ||||
|   | ||||
							
								
								
									
										139
									
								
								programs/dibio.c
									
									
									
									
									
								
							
							
						
						
									
										139
									
								
								programs/dibio.c
									
									
									
									
									
								
							| @@ -53,13 +53,12 @@ static const size_t g_maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((siz | ||||
| *  Console display | ||||
| ***************************************/ | ||||
| #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__) | ||||
| #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } | ||||
| static int g_displayLevel = 0;   /* 0 : no display;   1: errors;   2: default;  4: full information */ | ||||
| #define DISPLAYLEVEL(l, ...) if (displayLevel>=l) { DISPLAY(__VA_ARGS__); } | ||||
|  | ||||
| #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \ | ||||
|             if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \ | ||||
| #define DISPLAYUPDATE(l, ...) if (displayLevel>=l) { \ | ||||
|             if ((DIB_clockSpan(g_time) > refreshRate) || (displayLevel>=4)) \ | ||||
|             { g_time = clock(); DISPLAY(__VA_ARGS__); \ | ||||
|             if (g_displayLevel>=4) fflush(stderr); } } | ||||
|             if (displayLevel>=4) fflush(stderr); } } | ||||
| static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10; | ||||
| static clock_t g_time = 0; | ||||
|  | ||||
| @@ -76,9 +75,9 @@ static clock_t DIB_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } | ||||
| #define EXM_THROW(error, ...)                                             \ | ||||
| {                                                                         \ | ||||
|     DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \ | ||||
|     DISPLAYLEVEL(1, "Error %i : ", error);                                \ | ||||
|     DISPLAYLEVEL(1, __VA_ARGS__);                                         \ | ||||
|     DISPLAYLEVEL(1, "\n");                                                \ | ||||
|     DISPLAY("Error %i : ", error);                                        \ | ||||
|     DISPLAY(__VA_ARGS__);                                                 \ | ||||
|     DISPLAY("\n");                                                        \ | ||||
|     exit(error);                                                          \ | ||||
| } | ||||
|  | ||||
| @@ -102,30 +101,42 @@ const char* DiB_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCo | ||||
|  * @return : nb of files effectively loaded into `buffer` | ||||
|  * *bufferSizePtr is modified, it provides the amount data loaded within buffer */ | ||||
| static unsigned DiB_loadFiles(void* buffer, size_t* bufferSizePtr, | ||||
|                               size_t* fileSizes, | ||||
|                               const char** fileNamesTable, unsigned nbFiles) | ||||
|                               size_t* chunkSizes, | ||||
|                               const char** fileNamesTable, unsigned nbFiles, size_t targetChunkSize, | ||||
|                               unsigned displayLevel) | ||||
| { | ||||
|     char* const buff = (char*)buffer; | ||||
|     size_t pos = 0; | ||||
|     unsigned n; | ||||
|     unsigned nbLoadedChunks = 0, fileIndex; | ||||
|  | ||||
|     for (n=0; n<nbFiles; n++) { | ||||
|         const char* const fileName = fileNamesTable[n]; | ||||
|     for (fileIndex=0; fileIndex<nbFiles; fileIndex++) { | ||||
|         const char* const fileName = fileNamesTable[fileIndex]; | ||||
|         unsigned long long const fs64 = UTIL_getFileSize(fileName); | ||||
|         size_t const fileSize = (size_t) MIN(fs64, SAMPLESIZE_MAX); | ||||
|         if (fileSize > *bufferSizePtr-pos) break; | ||||
|         {   FILE* const f = fopen(fileName, "rb"); | ||||
|             if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno)); | ||||
|             DISPLAYUPDATE(2, "Loading %s...       \r", fileName); | ||||
|             { size_t const readSize = fread(buff+pos, 1, fileSize, f); | ||||
|               if (readSize != fileSize) EXM_THROW(11, "Pb reading %s", fileName); | ||||
|               pos += readSize; } | ||||
|             fileSizes[n] = fileSize; | ||||
|             fclose(f); | ||||
|     }   } | ||||
|         unsigned long long remainingToLoad = fs64; | ||||
|         U32 const nbChunks = targetChunkSize ? (U32)((fs64 + (targetChunkSize-1)) / targetChunkSize) : 1; | ||||
|         U64 const chunkSize = targetChunkSize ? MIN(targetChunkSize, fs64) : fs64; | ||||
|         size_t const maxChunkSize = MIN(chunkSize, SAMPLESIZE_MAX); | ||||
|         U32 cnb; | ||||
|         FILE* const f = fopen(fileName, "rb"); | ||||
|         if (f==NULL) EXM_THROW(10, "zstd: dictBuilder: %s %s ", fileName, strerror(errno)); | ||||
|         DISPLAYUPDATE(2, "Loading %s...       \r", fileName); | ||||
|         for (cnb=0; cnb<nbChunks; cnb++) { | ||||
|             size_t const toLoad = MIN(maxChunkSize, remainingToLoad); | ||||
|             if (toLoad > *bufferSizePtr-pos) break; | ||||
|             {   size_t const readSize = fread(buff+pos, 1, toLoad, f); | ||||
|                 if (readSize != toLoad) EXM_THROW(11, "Pb reading %s", fileName); | ||||
|                 pos += readSize; | ||||
|                 chunkSizes[nbLoadedChunks++] = toLoad; | ||||
|                 remainingToLoad -= targetChunkSize; | ||||
|                 if (toLoad < targetChunkSize) { | ||||
|                     fseek(f, (targetChunkSize - toLoad), SEEK_CUR); | ||||
|         }   }   } | ||||
|         fclose(f); | ||||
|     } | ||||
|     DISPLAYLEVEL(2, "\r%79s\r", ""); | ||||
|     *bufferSizePtr = pos; | ||||
|     return n; | ||||
|     DISPLAYLEVEL(4, "loaded : %u KB \n", (U32)(pos >> 10)) | ||||
|     return nbLoadedChunks; | ||||
| } | ||||
|  | ||||
| #define DiB_rotl32(x,r) ((x << r) | (x >> (32 - r))) | ||||
| @@ -207,18 +218,28 @@ static void DiB_saveDict(const char* dictFileName, | ||||
| } | ||||
|  | ||||
|  | ||||
| static int g_tooLargeSamples = 0; | ||||
| static U64 DiB_totalCappedFileSize(const char** fileNamesTable, unsigned nbFiles) | ||||
| typedef struct { | ||||
|     U64 totalSizeToLoad; | ||||
|     unsigned oneSampleTooLarge; | ||||
|     unsigned nbChunks; | ||||
| } fileStats; | ||||
|  | ||||
| static fileStats DiB_fileStats(const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, unsigned displayLevel) | ||||
| { | ||||
|     U64 total = 0; | ||||
|     fileStats fs; | ||||
|     unsigned n; | ||||
|     memset(&fs, 0, sizeof(fs)); | ||||
|     for (n=0; n<nbFiles; n++) { | ||||
|         U64 const fileSize = UTIL_getFileSize(fileNamesTable[n]); | ||||
|         U64 const cappedFileSize = MIN(fileSize, SAMPLESIZE_MAX); | ||||
|         total += cappedFileSize; | ||||
|         g_tooLargeSamples |= (fileSize > 2*SAMPLESIZE_MAX); | ||||
|         U32 const nbChunks = (U32)(chunkSize ? (fileSize + (chunkSize-1)) / chunkSize : 1); | ||||
|         U64 const chunkToLoad = chunkSize ? MIN(chunkSize, fileSize) : fileSize; | ||||
|         size_t const cappedChunkSize = MIN(chunkToLoad, SAMPLESIZE_MAX); | ||||
|         fs.totalSizeToLoad += cappedChunkSize * nbChunks; | ||||
|         fs.oneSampleTooLarge |= (chunkSize > 2*SAMPLESIZE_MAX); | ||||
|         fs.nbChunks += nbChunks; | ||||
|     } | ||||
|     return total; | ||||
|     DISPLAYLEVEL(4, "Preparing to load : %u KB \n", (U32)(fs.totalSizeToLoad >> 10)); | ||||
|     return fs; | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -235,63 +256,65 @@ size_t ZDICT_trainFromBuffer_unsafe_legacy(void* dictBuffer, size_t dictBufferCa | ||||
|  | ||||
|  | ||||
| int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, | ||||
|                        const char** fileNamesTable, unsigned nbFiles, | ||||
|                        const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, | ||||
|                        ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams, | ||||
|                        int optimizeCover) | ||||
| { | ||||
|     unsigned displayLevel = params ? params->zParams.notificationLevel : | ||||
|                             coverParams ? coverParams->zParams.notificationLevel : | ||||
|                             0;   /* should never happen */ | ||||
|     void* const dictBuffer = malloc(maxDictSize); | ||||
|     size_t* const fileSizes = (size_t*)malloc(nbFiles * sizeof(size_t)); | ||||
|     unsigned long long const totalSizeToLoad = DiB_totalCappedFileSize(fileNamesTable, nbFiles); | ||||
|     fileStats const fs = DiB_fileStats(fileNamesTable, nbFiles, chunkSize, displayLevel); | ||||
|     size_t* const chunkSizes = (size_t*)malloc(fs.nbChunks * sizeof(size_t)); | ||||
|     size_t const memMult = params ? MEMMULT : COVER_MEMMULT; | ||||
|     size_t const maxMem =  DiB_findMaxMem(totalSizeToLoad * memMult) / memMult; | ||||
|     size_t benchedSize = (size_t) MIN ((unsigned long long)maxMem, totalSizeToLoad); | ||||
|     void* const srcBuffer = malloc(benchedSize+NOISELENGTH); | ||||
|     size_t const maxMem =  DiB_findMaxMem(fs.totalSizeToLoad * memMult) / memMult; | ||||
|     size_t loadedSize = (size_t) MIN ((unsigned long long)maxMem, fs.totalSizeToLoad); | ||||
|     void* const srcBuffer = malloc(loadedSize+NOISELENGTH); | ||||
|     int result = 0; | ||||
|  | ||||
|     /* Checks */ | ||||
|     if (params) g_displayLevel = params->zParams.notificationLevel; | ||||
|     else if (coverParams) g_displayLevel = coverParams->zParams.notificationLevel; | ||||
|     else EXM_THROW(13, "Neither dictionary algorithm selected");   /* should not happen */ | ||||
|     if ((!fileSizes) || (!srcBuffer) || (!dictBuffer)) | ||||
|     if ((!chunkSizes) || (!srcBuffer) || (!dictBuffer)) | ||||
|         EXM_THROW(12, "not enough memory for DiB_trainFiles");   /* should not happen */ | ||||
|     if (g_tooLargeSamples) { | ||||
|         DISPLAYLEVEL(2, "!  Warning : some samples are very large \n"); | ||||
|         DISPLAYLEVEL(2, "!  Note that dictionary is only useful for small files or beginning of large files. \n"); | ||||
|         DISPLAYLEVEL(2, "!  As a consequence, only the first %u bytes of each file are loaded \n", SAMPLESIZE_MAX); | ||||
|     if (fs.oneSampleTooLarge) { | ||||
|         DISPLAYLEVEL(2, "!  Warning : some sample(s) are very large \n"); | ||||
|         DISPLAYLEVEL(2, "!  Note that dictionary is only useful for small samples. \n"); | ||||
|         DISPLAYLEVEL(2, "!  As a consequence, only the first %u bytes of each sample are loaded \n", SAMPLESIZE_MAX); | ||||
|     } | ||||
|     if ((nbFiles < 5) || (totalSizeToLoad < 9 * (unsigned long long)maxDictSize)) { | ||||
|     if (fs.nbChunks < 5) { | ||||
|         DISPLAYLEVEL(2, "!  Warning : nb of samples too low for proper processing ! \n"); | ||||
|         DISPLAYLEVEL(2, "!  Please provide _one file per sample_. \n"); | ||||
|         DISPLAYLEVEL(2, "!  Do not concatenate samples together into a single file, \n"); | ||||
|         DISPLAYLEVEL(2, "!  as dictBuilder will be unable to find the beginning of each sample, \n"); | ||||
|         DISPLAYLEVEL(2, "!  resulting in poor dictionary quality. \n"); | ||||
|         EXM_THROW(14, "nb of samples too low");   /* we now clearly forbid this case */ | ||||
|     } | ||||
|     if (fs.totalSizeToLoad < (unsigned long long)(8 * maxDictSize)) { | ||||
|         DISPLAYLEVEL(2, "!  Warning : data size of samples too small for target dictionary size \n"); | ||||
|         DISPLAYLEVEL(2, "!  Samples should be about 100x larger than target dictionary size \n"); | ||||
|     } | ||||
|  | ||||
|     /* init */ | ||||
|     if (benchedSize < totalSizeToLoad) | ||||
|         DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(benchedSize >> 20)); | ||||
|     if (loadedSize < fs.totalSizeToLoad) | ||||
|         DISPLAYLEVEL(1, "Not enough memory; training on %u MB only...\n", (unsigned)(loadedSize >> 20)); | ||||
|  | ||||
|     /* Load input buffer */ | ||||
|     DISPLAYLEVEL(3, "Shuffling input files\n"); | ||||
|     DiB_shuffle(fileNamesTable, nbFiles); | ||||
|     nbFiles = DiB_loadFiles(srcBuffer, &benchedSize, fileSizes, fileNamesTable, nbFiles); | ||||
|     nbFiles = DiB_loadFiles(srcBuffer, &loadedSize, chunkSizes, fileNamesTable, nbFiles, chunkSize, displayLevel); | ||||
|  | ||||
|     {   size_t dictSize; | ||||
|         if (params) { | ||||
|             DiB_fillNoise((char*)srcBuffer + benchedSize, NOISELENGTH);   /* guard band, for end of buffer condition */ | ||||
|             DiB_fillNoise((char*)srcBuffer + loadedSize, NOISELENGTH);   /* guard band, for end of buffer condition */ | ||||
|             dictSize = ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, maxDictSize, | ||||
|                                                            srcBuffer, fileSizes, nbFiles, | ||||
|                                                            srcBuffer, chunkSizes, fs.nbChunks, | ||||
|                                                            *params); | ||||
|         } else if (optimizeCover) { | ||||
|             dictSize = ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, maxDictSize, | ||||
|                                                            srcBuffer, fileSizes, nbFiles, | ||||
|                                                            srcBuffer, chunkSizes, fs.nbChunks, | ||||
|                                                            coverParams); | ||||
|             if (!ZDICT_isError(dictSize)) { | ||||
|                 DISPLAYLEVEL(2, "k=%u\nd=%u\nsteps=%u\n", coverParams->k, coverParams->d, coverParams->steps); | ||||
|             } | ||||
|         } else { | ||||
|             dictSize = ZDICT_trainFromBuffer_cover(dictBuffer, maxDictSize, srcBuffer, | ||||
|                                                    fileSizes, nbFiles, *coverParams); | ||||
|                                                    chunkSizes, fs.nbChunks, *coverParams); | ||||
|         } | ||||
|         if (ZDICT_isError(dictSize)) { | ||||
|             DISPLAYLEVEL(1, "dictionary training failed : %s \n", ZDICT_getErrorName(dictSize));   /* should not happen */ | ||||
| @@ -306,7 +329,7 @@ int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, | ||||
|     /* clean up */ | ||||
| _cleanup: | ||||
|     free(srcBuffer); | ||||
|     free(chunkSizes); | ||||
|     free(dictBuffer); | ||||
|     free(fileSizes); | ||||
|     return result; | ||||
| } | ||||
|   | ||||
| @@ -32,7 +32,7 @@ | ||||
|     @return : 0 == ok. Any other : error. | ||||
| */ | ||||
| int DiB_trainFromFiles(const char* dictFileName, unsigned maxDictSize, | ||||
|                        const char** fileNamesTable, unsigned nbFiles, | ||||
|                        const char** fileNamesTable, unsigned nbFiles, size_t chunkSize, | ||||
|                        ZDICT_legacy_params_t *params, ZDICT_cover_params_t *coverParams, | ||||
|                        int optimizeCover); | ||||
|  | ||||
|   | ||||
| @@ -184,6 +184,8 @@ Typical gains range from 10% (at 64KB) to x5 better (at <1KB). | ||||
|     Dictionary saved into `file` (default name: dictionary). | ||||
| * `--maxdict=#`: | ||||
|     Limit dictionary to specified size (default: 112640). | ||||
| * `-B#`: | ||||
|     Split input files in blocks of size # (default: no split) | ||||
| * `--dictID=#`: | ||||
|     A dictionary ID is a locally unique ID that a decoder can use to verify it is | ||||
|     using the right dictionary. | ||||
| @@ -373,7 +375,7 @@ The list of available _options_: | ||||
|     default value will likely result in a decrease in compression ratio. | ||||
|  | ||||
|     The default value is `wlog - ldmhlog`. | ||||
|   | ||||
|  | ||||
| ### -B#: | ||||
| Select the size of each compression job. | ||||
| This parameter is available only when multi-threading is enabled. | ||||
|   | ||||
| @@ -759,13 +759,13 @@ int main(int argCount, const char* argv[]) | ||||
|             int const optimize = !coverParams.k || !coverParams.d; | ||||
|             coverParams.nbThreads = nbThreads; | ||||
|             coverParams.zParams = zParams; | ||||
|             operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, NULL, &coverParams, optimize); | ||||
|             operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, NULL, &coverParams, optimize); | ||||
|         } else { | ||||
|             ZDICT_legacy_params_t dictParams; | ||||
|             memset(&dictParams, 0, sizeof(dictParams)); | ||||
|             dictParams.selectivityLevel = dictSelect; | ||||
|             dictParams.zParams = zParams; | ||||
|             operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, &dictParams, NULL, 0); | ||||
|             operationResult = DiB_trainFromFiles(outFileName, maxDictSize, filenameTable, filenameIdx, blockSize, &dictParams, NULL, 0); | ||||
|         } | ||||
| #endif | ||||
|         goto _end; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user