static void rfx_decode_component(RFX_CONTEXT* context, const UINT32* quantization_values, const BYTE* data, int size, INT16* buffer) { INT16* dwt_buffer; dwt_buffer = BufferPool_Take(context->priv->BufferPool, -1); /* dwt_buffer */ PROFILER_ENTER(context->priv->prof_rfx_decode_component); PROFILER_ENTER(context->priv->prof_rfx_rlgr_decode); context->rlgr_decode(context->mode, data, size, buffer, 4096); PROFILER_EXIT(context->priv->prof_rfx_rlgr_decode); PROFILER_ENTER(context->priv->prof_rfx_differential_decode); rfx_differential_decode(buffer + 4032, 64); PROFILER_EXIT(context->priv->prof_rfx_differential_decode); PROFILER_ENTER(context->priv->prof_rfx_quantization_decode); context->quantization_decode(buffer, quantization_values); PROFILER_EXIT(context->priv->prof_rfx_quantization_decode); PROFILER_ENTER(context->priv->prof_rfx_dwt_2d_decode); context->dwt_2d_decode(buffer, dwt_buffer); PROFILER_EXIT(context->priv->prof_rfx_dwt_2d_decode); PROFILER_EXIT(context->priv->prof_rfx_decode_component); BufferPool_Return(context->priv->BufferPool, dwt_buffer); }
void rfx_message_free(RFX_CONTEXT* context, RFX_MESSAGE* message) { int i; RFX_TILE* tile; if (message) { if ((message->rects) && (message->freeRects)) { free(message->rects); } if (message->tiles) { for (i = 0; i < message->numTiles; i++) { if (!(tile = message->tiles[i])) continue; if (tile->YCbCrData) { BufferPool_Return(context->priv->BufferPool, tile->YCbCrData); tile->YCbCrData = NULL; } ObjectPool_Return(context->priv->TilePool, (void*) tile); } free(message->tiles); } if (!message->freeArray) free(message); } }
static void PresentationContext_unref(PresentationContext *presentation) { VideoClientContextPriv *priv; MAPPED_GEOMETRY *geometry; if (!presentation) return; if (InterlockedDecrement(&presentation->refCounter) != 0) return; geometry = presentation->geometry; if (geometry) { geometry->MappedGeometryUpdate = NULL; geometry->MappedGeometryClear = NULL; geometry->custom = NULL; mappedGeometryUnref(geometry); } priv = presentation->video->priv; h264_context_free(presentation->h264); Stream_Free(presentation->currentSample, TRUE); presentation->video->deleteSurface(presentation->video, presentation->surface); BufferPool_Return(priv->surfacePool, presentation->surfaceData); yuv_context_free(presentation->yuv); free(presentation); }
int TestBufferPool(int argc, char* argv[]) { DWORD PoolSize; int BufferSize; wBufferPool* pool; BYTE* Buffers[10]; DWORD DefaultSize = 1234; pool = BufferPool_New(TRUE, -1, 16); if (!pool) return -1; Buffers[0] = BufferPool_Take(pool, DefaultSize); Buffers[1] = BufferPool_Take(pool, DefaultSize); Buffers[2] = BufferPool_Take(pool, 2048); if (!Buffers[0] || !Buffers[1] || !Buffers[2]) return -1; BufferSize = BufferPool_GetBufferSize(pool, Buffers[0]); if (BufferSize != DefaultSize) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: %"PRIu32"\n", BufferSize, DefaultSize); return -1; } BufferSize = BufferPool_GetBufferSize(pool, Buffers[1]); if (BufferSize != DefaultSize) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: %"PRIu32"\n", BufferSize, DefaultSize); return -1; } BufferSize = BufferPool_GetBufferSize(pool, Buffers[2]); if (BufferSize != 2048) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: 2048\n", BufferSize); return -1; } BufferPool_Return(pool, Buffers[1]); PoolSize = BufferPool_GetPoolSize(pool); if (PoolSize != 2) { printf("BufferPool_GetPoolSize failure: Actual: %"PRIu32" Expected: 2\n", PoolSize); return -1; } BufferPool_Clear(pool); BufferPool_Free(pool); return 0; }
static void VideoFrame_free(VideoFrame **pframe) { VideoFrame *frame = *pframe; mappedGeometryUnref(frame->geometry); BufferPool_Return(frame->presentation->video->priv->surfacePool, frame->surfaceData); PresentationContext_unref(frame->presentation); free(frame); *pframe = NULL; }
int progressive_decompress_tile_first(PROGRESSIVE_CONTEXT* progressive, RFX_PROGRESSIVE_TILE* tile) { BYTE* pBuffer; INT16* pSrcDst[3]; PROGRESSIVE_BLOCK_REGION* region; RFX_COMPONENT_CODEC_QUANT* quantY; RFX_COMPONENT_CODEC_QUANT* quantCb; RFX_COMPONENT_CODEC_QUANT* quantCr; RFX_PROGRESSIVE_CODEC_QUANT* quantProgVal; printf("ProgressiveTileFirst: quantIdx Y: %d Cb: %d Cr: %d xIdx: %d yIdx: %d flags: %d quality: %d yLen: %d cbLen: %d crLen: %d tailLen: %d\n", tile->quantIdxY, tile->quantIdxCb, tile->quantIdxCr, tile->xIdx, tile->yIdx, tile->flags, tile->quality, tile->yLen, tile->cbLen, tile->crLen, tile->tailLen); region = &(progressive->region); if (tile->quantIdxY >= region->numQuant) return -1; quantY = &(region->quantVals[tile->quantIdxY]); if (tile->quantIdxCb >= region->numQuant) return -1; quantCb = &(region->quantVals[tile->quantIdxCb]); if (tile->quantIdxCr >= region->numQuant) return -1; quantCr = &(region->quantVals[tile->quantIdxCr]); if (tile->quality == 0xFF) { quantProgVal = &(progressive->quantProgValFull); } else { if (tile->quality >= region->numProgQuant) return -1; quantProgVal = &(region->quantProgVals[tile->quality]); } pBuffer = (BYTE*) BufferPool_Take(progressive->bufferPool, -1); pSrcDst[0] = (INT16*)((BYTE*)(&pBuffer[((8192 + 32) * 0) + 16])); /* Y/R buffer */ pSrcDst[1] = (INT16*)((BYTE*)(&pBuffer[((8192 + 32) * 1) + 16])); /* Cb/G buffer */ pSrcDst[2] = (INT16*)((BYTE*)(&pBuffer[((8192 + 32) * 2) + 16])); /* Cr/B buffer */ progressive_rfx_decode_component(progressive, quantY, tile->yData, tile->yLen, pSrcDst[0]); /* Y */ progressive_rfx_decode_component(progressive, quantCb, tile->cbData, tile->cbLen, pSrcDst[1]); /* Cb */ progressive_rfx_decode_component(progressive, quantCr, tile->crData, tile->crLen, pSrcDst[2]); /* Cr */ BufferPool_Return(progressive->bufferPool, pBuffer); return 1; }
/* stride is bytes between rows in the output buffer. */ BOOL rfx_decode_rgb(RFX_CONTEXT* context, RFX_TILE* tile, BYTE* rgb_buffer, int stride) { INT16* pSrcDst[3]; UINT32 *y_quants, *cb_quants, *cr_quants; static const prim_size_t roi_64x64 = { 64, 64 }; const primitives_t *prims = primitives_get(); PROFILER_ENTER(context->priv->prof_rfx_decode_rgb); y_quants = context->quants + (tile->quantIdxY * 10); cb_quants = context->quants + (tile->quantIdxCb * 10); cr_quants = context->quants + (tile->quantIdxCr * 10); pSrcDst[0] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* y_r_buffer */ pSrcDst[1] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* cb_g_buffer */ pSrcDst[2] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* cr_b_buffer */ rfx_decode_component(context, y_quants, tile->YData, tile->YLen, pSrcDst[0]); /* YData */ rfx_decode_component(context, cb_quants, tile->CbData, tile->CbLen, pSrcDst[1]); /* CbData */ rfx_decode_component(context, cr_quants, tile->CrData, tile->CrLen, pSrcDst[2]); /* CrData */ PROFILER_ENTER(context->priv->prof_rfx_ycbcr_to_rgb); prims->yCbCrToRGB_16s16s_P3P3((const INT16**) pSrcDst, 64 * sizeof(INT16), pSrcDst, 64 * sizeof(INT16), &roi_64x64); PROFILER_EXIT(context->priv->prof_rfx_ycbcr_to_rgb); PROFILER_ENTER(context->priv->prof_rfx_decode_format_rgb); rfx_decode_format_rgb(pSrcDst[0], pSrcDst[1], pSrcDst[2], context->pixel_format, rgb_buffer, stride); PROFILER_EXIT(context->priv->prof_rfx_decode_format_rgb); PROFILER_EXIT(context->priv->prof_rfx_decode_rgb); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[0] - 16); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[1] - 16); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[2] - 16); return TRUE; }
int TestBufferPool(int argc, char* argv[]) { int PoolSize; int BufferSize; int DefaultSize; wBufferPool* pool; BYTE* Buffers[10]; DefaultSize = 1234; pool = BufferPool_New(TRUE, DefaultSize, 16); Buffers[0] = BufferPool_Take(pool, -1); Buffers[1] = BufferPool_Take(pool, 0); Buffers[2] = BufferPool_Take(pool, 2048); PoolSize = BufferPool_GetPoolSize(pool); if (PoolSize != 3) { printf("BufferPool_GetPoolSize failure: Actual: %d Expected: %d\n", PoolSize, 3); return -1; } BufferSize = BufferPool_GetBufferSize(pool, Buffers[0]); if (BufferSize != DefaultSize) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: %d\n", BufferSize, DefaultSize); return -1; } BufferSize = BufferPool_GetBufferSize(pool, Buffers[1]); if (BufferSize != DefaultSize) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: %d\n", BufferSize, DefaultSize); return -1; } BufferSize = BufferPool_GetBufferSize(pool, Buffers[2]); if (BufferSize != 2048) { printf("BufferPool_GetBufferSize failure: Actual: %d Expected: %d\n", BufferSize, 2048); return -1; } BufferPool_Return(pool, Buffers[1]); PoolSize = BufferPool_GetPoolSize(pool); if (PoolSize != 2) { printf("BufferPool_GetPoolSize failure: Actual: %d Expected: %d\n", PoolSize, 2); return -1; } BufferPool_Clear(pool); PoolSize = BufferPool_GetPoolSize(pool); if (PoolSize != 0) { printf("BufferPool_GetPoolSize failure: Actual: %d Expected: %d\n", PoolSize, 0); return -1; } BufferPool_Free(pool); return 0; }
static UINT video_VideoData(VideoClientContext* context, TSMM_VIDEO_DATA *data) { VideoClientContextPriv *priv = context->priv; PresentationContext *presentation; int status; presentation = priv->currentPresentation; if (!presentation) { WLog_ERR(TAG, "no current presentation"); return CHANNEL_RC_OK; } if (presentation->PresentationId != data->PresentationId) { WLog_ERR(TAG, "current presentation id=%d doesn't match data id=%d", presentation->PresentationId, data->PresentationId); return CHANNEL_RC_OK; } if (!Stream_EnsureRemainingCapacity(presentation->currentSample, data->cbSample)) { WLog_ERR(TAG, "unable to expand the current packet"); return CHANNEL_RC_NO_MEMORY; } Stream_Write(presentation->currentSample, data->pSample, data->cbSample); if (data->CurrentPacketIndex == data->PacketsInSample) { H264_CONTEXT *h264 = presentation->h264; UINT64 startTime = GetTickCount64(), timeAfterH264; MAPPED_GEOMETRY *geom = presentation->geometry; Stream_SealLength(presentation->currentSample); Stream_SetPosition(presentation->currentSample, 0); status = h264->subsystem->Decompress(h264, Stream_Pointer(presentation->currentSample), Stream_Length(presentation->currentSample)); if (status == 0) return CHANNEL_RC_OK; if (status < 0) return CHANNEL_RC_OK; timeAfterH264 = GetTickCount64(); if (data->SampleNumber == 1) { presentation->lastPublishTime = startTime; } presentation->lastPublishTime += (data->hnsDuration / 10000); if (presentation->lastPublishTime <= timeAfterH264 + 10) { int dropped = 0; /* if the frame is to be published in less than 10 ms, let's consider it's now */ yuv_to_rgb(presentation, presentation->surfaceData); context->showSurface(context, presentation->surface); priv->publishedFrames++; /* cleanup previously scheduled frames */ EnterCriticalSection(&priv->framesLock); while (Queue_Count(priv->frames) > 0) { VideoFrame *frame = Queue_Dequeue(priv->frames); if (frame) { priv->droppedFrames++; VideoFrame_free(&frame); dropped++; } } LeaveCriticalSection(&priv->framesLock); if (dropped) WLog_DBG(TAG, "showing frame (%d dropped)", dropped); } else { BOOL enqueueResult; VideoFrame *frame = calloc(1, sizeof(*frame)); if (!frame) { WLog_ERR(TAG, "unable to create frame"); return CHANNEL_RC_NO_MEMORY; } mappedGeometryRef(geom); frame->presentation = presentation; frame->publishTime = presentation->lastPublishTime; frame->geometry = geom; frame->w = presentation->SourceWidth; frame->h = presentation->SourceHeight; frame->surfaceData = BufferPool_Take(priv->surfacePool, frame->w * frame->h * 4); if (!frame->surfaceData) { WLog_ERR(TAG, "unable to allocate frame data"); mappedGeometryUnref(geom); free(frame); return CHANNEL_RC_NO_MEMORY; } if (!yuv_to_rgb(presentation, frame->surfaceData)) { WLog_ERR(TAG, "error during YUV->RGB conversion"); BufferPool_Return(priv->surfacePool, frame->surfaceData); mappedGeometryUnref(geom); free(frame); return CHANNEL_RC_NO_MEMORY; } InterlockedIncrement(&presentation->refCounter); EnterCriticalSection(&priv->framesLock); enqueueResult = Queue_Enqueue(priv->frames, frame); LeaveCriticalSection(&priv->framesLock); if (!enqueueResult) { WLog_ERR(TAG, "unable to enqueue frame"); VideoFrame_free(&frame); return CHANNEL_RC_NO_MEMORY; } WLog_DBG(TAG, "scheduling frame in %"PRIu32" ms", (frame->publishTime-startTime)); } } return CHANNEL_RC_OK; }
static PresentationContext *PresentationContext_new(VideoClientContext *video, BYTE PresentationId, UINT32 x, UINT32 y, UINT32 width, UINT32 height) { VideoClientContextPriv *priv = video->priv; PresentationContext *ret = calloc(1, sizeof(*ret)); if (!ret) return NULL; ret->video = video; ret->PresentationId = PresentationId; ret->h264 = h264_context_new(FALSE); if (!ret->h264) { WLog_ERR(TAG, "unable to create a h264 context"); goto error_h264; } h264_context_reset(ret->h264, width, height); ret->currentSample = Stream_New(NULL, 4096); if (!ret->currentSample) { WLog_ERR(TAG, "unable to create current packet stream"); goto error_currentSample; } ret->surfaceData = BufferPool_Take(priv->surfacePool, width * height * 4); if (!ret->surfaceData) { WLog_ERR(TAG, "unable to allocate surfaceData"); goto error_surfaceData; } ret->surface = video->createSurface(video, ret->surfaceData, x, y, width, height); if (!ret->surface) { WLog_ERR(TAG, "unable to create surface"); goto error_surface; } ret->yuv = yuv_context_new(FALSE); if (!ret->yuv) { WLog_ERR(TAG, "unable to create YUV decoder"); goto error_yuv; } yuv_context_reset(ret->yuv, width, height); ret->refCounter = 1; return ret; error_yuv: video->deleteSurface(video, ret->surface); error_surface: BufferPool_Return(priv->surfacePool, ret->surfaceData); error_surfaceData: Stream_Free(ret->currentSample, TRUE); error_currentSample: h264_context_free(ret->h264); error_h264: free(ret); return NULL; }
/* stride is bytes between rows in the output buffer. */ BOOL rfx_decode_rgb(RFX_CONTEXT* context, wStream* data_in, int y_size, const UINT32* y_quants, int cb_size, const UINT32* cb_quants, int cr_size, const UINT32* cr_quants, BYTE* rgb_buffer, int stride) { INT16* pSrcDst[3]; static const prim_size_t roi_64x64 = { 64, 64 }; const primitives_t *prims = primitives_get(); PROFILER_ENTER(context->priv->prof_rfx_decode_rgb); pSrcDst[0] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* y_r_buffer */ pSrcDst[1] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* cb_g_buffer */ pSrcDst[2] = (INT16*)((BYTE*)BufferPool_Take(context->priv->BufferPool, -1) + 16); /* cr_b_buffer */ #if 0 if (context->priv->UseThreads) { PTP_WORK work_objects[3]; RFX_COMPONENT_WORK_PARAM params[3]; params[0].context = context; params[0].quantization_values = y_quants; params[0].buffer = stream_get_tail(data_in); params[0].capacity = y_size; params[0].buffer = pSrcDst[0]; stream_seek(data_in, y_size); params[1].context = context; params[1].quantization_values = cb_quants; params[1].buffer = stream_get_tail(data_in); params[1].capacity = cb_size; params[1].buffer = pSrcDst[1]; stream_seek(data_in, cb_size); params[2].context = context; params[2].quantization_values = cr_quants; params[2].buffer = stream_get_tail(data_in); params[2].capacity = cr_size; params[2].buffer = pSrcDst[2]; stream_seek(data_in, cr_size); work_objects[0] = CreateThreadpoolWork((PTP_WORK_CALLBACK) rfx_decode_component_work_callback, (void*) ¶ms[0], &context->priv->ThreadPoolEnv); work_objects[1] = CreateThreadpoolWork((PTP_WORK_CALLBACK) rfx_decode_component_work_callback, (void*) ¶ms[1], &context->priv->ThreadPoolEnv); work_objects[2] = CreateThreadpoolWork((PTP_WORK_CALLBACK) rfx_decode_component_work_callback, (void*) ¶ms[2], &context->priv->ThreadPoolEnv); SubmitThreadpoolWork(work_objects[0]); SubmitThreadpoolWork(work_objects[1]); SubmitThreadpoolWork(work_objects[2]); WaitForThreadpoolWorkCallbacks(work_objects[0], FALSE); WaitForThreadpoolWorkCallbacks(work_objects[1], FALSE); WaitForThreadpoolWorkCallbacks(work_objects[2], FALSE); } else #endif { if (stream_get_left(data_in) < y_size+cb_size+cr_size) { DEBUG_WARN("rfx_decode_rgb: packet too small for y_size+cb_size+cr_size"); return FALSE; } rfx_decode_component(context, y_quants, stream_get_tail(data_in), y_size, pSrcDst[0]); /* YData */ stream_seek(data_in, y_size); rfx_decode_component(context, cb_quants, stream_get_tail(data_in), cb_size, pSrcDst[1]); /* CbData */ stream_seek(data_in, cb_size); rfx_decode_component(context, cr_quants, stream_get_tail(data_in), cr_size, pSrcDst[2]); /* CrData */ stream_seek(data_in, cr_size); } prims->yCbCrToRGB_16s16s_P3P3((const INT16**) pSrcDst, 64 * sizeof(INT16), pSrcDst, 64 * sizeof(INT16), &roi_64x64); PROFILER_ENTER(context->priv->prof_rfx_decode_format_rgb); rfx_decode_format_rgb(pSrcDst[0], pSrcDst[1], pSrcDst[2], context->pixel_format, rgb_buffer, stride); PROFILER_EXIT(context->priv->prof_rfx_decode_format_rgb); PROFILER_EXIT(context->priv->prof_rfx_decode_rgb); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[0] - 16); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[1] - 16); BufferPool_Return(context->priv->BufferPool, (BYTE*)pSrcDst[2] - 16); return TRUE; }