size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) { return ZBUFF_compressInit_advanced(zbc, ZSTD_getParams(compressionLevel, 0)); }
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* zcs, int compressionLevel) { ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0); return ZSTDMT_initCStream_internal(zcs, NULL, 0, 1, params, 0); }
ZSTDLIB_API size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) { return ZBUFF_compressInit_advanced(zbc, dict, dictSize, ZSTD_getParams(compressionLevel, 0)); }
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) { ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3; size_t const overlapSize = (size_t)1 << (params.cParams.windowLog - overlapLog); size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2); unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + 1; unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads); size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks; size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */ size_t remainingSrcSize = srcSize; const char* const srcStart = (const char*)src; unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize)); /* presumes avgChunkSize >= 256 KB, which should be the case */ size_t frameStartPos = 0, dstBufferPos = 0; DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes ", params.cParams.windowLog, (U32)chunkTargetSize); DEBUGLOG(2, "nbChunks : %2u (chunkSize : %u bytes) ", nbChunks, (U32)avgChunkSize); params.fParams.contentSizeFlag = 1; if (nbChunks==1) { /* fallback to single-thread mode */ ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; return ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel); } { unsigned u; for (u=0; u<nbChunks; u++) { size_t const chunkSize = MIN(remainingSrcSize, avgChunkSize); size_t const dstBufferCapacity = ZSTD_compressBound(chunkSize); buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity }; buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity); ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool); size_t dictSize = u ? overlapSize : 0; if ((cctx==NULL) || (dstBuffer.start==NULL)) { mtctx->jobs[u].cSize = ERROR(memory_allocation); /* job result */ mtctx->jobs[u].jobCompleted = 1; nbChunks = u+1; break; /* let's wait for previous jobs to complete, but don't start new ones */ } mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize; mtctx->jobs[u].dictSize = dictSize; mtctx->jobs[u].srcSize = chunkSize; mtctx->jobs[u].fullFrameSize = srcSize; mtctx->jobs[u].params = params; mtctx->jobs[u].dstBuff = dstBuffer; mtctx->jobs[u].cctx = cctx; mtctx->jobs[u].firstChunk = (u==0); mtctx->jobs[u].lastChunk = (u==nbChunks-1); mtctx->jobs[u].jobCompleted = 0; mtctx->jobs[u].jobCompleted_mutex = &mtctx->jobCompleted_mutex; mtctx->jobs[u].jobCompleted_cond = &mtctx->jobCompleted_cond; DEBUGLOG(3, "posting job %u (%u bytes)", u, (U32)chunkSize); DEBUG_PRINTHEX(3, mtctx->jobs[u].srcStart, 12); POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]); frameStartPos += chunkSize; dstBufferPos += dstBufferCapacity; remainingSrcSize -= chunkSize; } } /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */ { unsigned chunkID; size_t error = 0, dstPos = 0; for (chunkID=0; chunkID<nbChunks; chunkID++) { DEBUGLOG(3, "waiting for chunk %u ", chunkID); PTHREAD_MUTEX_LOCK(&mtctx->jobCompleted_mutex); while (mtctx->jobs[chunkID].jobCompleted==0) { DEBUGLOG(4, "waiting for jobCompleted signal from chunk %u", chunkID); pthread_cond_wait(&mtctx->jobCompleted_cond, &mtctx->jobCompleted_mutex); } pthread_mutex_unlock(&mtctx->jobCompleted_mutex); DEBUGLOG(3, "ready to write chunk %u ", chunkID); ZSTDMT_releaseCCtx(mtctx->cctxPool, mtctx->jobs[chunkID].cctx); mtctx->jobs[chunkID].cctx = NULL; mtctx->jobs[chunkID].srcStart = NULL; { size_t const cSize = mtctx->jobs[chunkID].cSize; if (ZSTD_isError(cSize)) error = cSize; if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); if (chunkID) { /* note : chunk 0 is already written directly into dst */ if (!error) memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize); /* may overlap if chunk decompressed within dst */ if (chunkID >= compressWithinDst) /* otherwise, it decompresses within dst */ ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff); mtctx->jobs[chunkID].dstBuff = g_nullBuffer; } dstPos += cSize ; } } if (!error) DEBUGLOG(3, "compressed size : %u ", (U32)dstPos); return error ? error : dstPos; } }