/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _VIDDEC2FRONT_Msg *msg = (_VIDDEC2FRONT_Msg *)visaMsg; VIDDEC2FRONT_Handle handle = (VIDDEC2FRONT_Handle)visaHandle; // Int i, j; // XDM1_BufDesc inBufs; // XDM_BufDesc outBufs; IVIDDEC2FRONT_OutArgs *pOutArgs; IVIDDEC2FRONT_Status *pStatus; // IVIDDEC2_CodecClassConfig *codecClassConfig; // Int numBufs; #if 0 /* get stub/skeleton config data; can be NULL (for old codecs) */ codecClassConfig = (IVIDDEC2_CodecClassConfig *) VISA_getCodecClassConfig( visaHandle ); #endif /* perform the requested VIDDEC2 operation by parsing message. */ switch (msg->visa.cmd) { case _VIDDEC2FRONT_CPROCESS: { #if 0 /* unmarshall inBufs and outBufs */ inBufs = msg->cmd.process.inBufs; outBufs.bufs = msg->cmd.process.outBufs; outBufs.numBufs = msg->cmd.process.numOutBufs; outBufs.bufSizes = msg->cmd.process.outBufSizes; /* invalidate cache for all input buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inBufs.descs[i].buf != NULL) { /* valid member of sparse array, * invalidate it unless user configured it not to */ if (codecClassConfig != NULL && codecClassConfig->manageInBufsCache[i] == FALSE) { /* do nothing, i.e. don't invalidate */ } else { Memory_cacheInv(inBufs.descs[i].buf, inBufs.descs[i].bufSize); } if (++numBufs == inBufs.numBufs) { break; } } } /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.bufs[i] != NULL) { /* valid member of sparse array, * invalidate it unless user configured it not to */ if (codecClassConfig != NULL && codecClassConfig->manageOutBufsCache[i] == FALSE) { /* do nothing, i.e. don't invalidate */ } else { Memory_cacheInv(outBufs.bufs[i], outBufs.bufSizes[i]); } if (++numBufs == outBufs.numBufs) { break; } } } #endif /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IVIDDEC2FRONT_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* * Note, there's no need to invalidate cache for * pOutArgs->decodedBuf bufs nor pOutArgs->displayBufs * bufs as the app doesn't provide OUT buffers to the * algorithm via these fields. */ /* make the process call */ msg->visa.status = VIDDEC2FRONT_process(handle, &(msg->cmd.process.inArgs), &(msg->cmd.process.context), pOutArgs); #if 0 /* * Writeback cache for all output buffers: * - .decodedBufs * - .displayBufs * - .mbDataBuf */ for (i = 0; (i < pOutArgs->decodedBufs.numBufs) && (i < IVIDEO_MAX_YUV_BUFFERS); i++) { if ((pOutArgs->decodedBufs.bufDesc[i].buf != NULL) && (XDM_ISACCESSMODE_WRITE( pOutArgs->decodedBufs.bufDesc[i].accessMask))) { Memory_cacheWb(pOutArgs->decodedBufs.bufDesc[i].buf, pOutArgs->decodedBufs.bufDesc[i].bufSize); } /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE( * outArgs->decodedBufs.bufDesc[i].accessMask); */ } for (i = 0; (pOutArgs->outputID[i] != 0) && (i < IVIDDEC2_MAX_IO_BUFFERS); i++) { for (j = 0; j < pOutArgs->displayBufs[i].numBufs; j++) { if ((pOutArgs->displayBufs[i].bufDesc[j].buf != NULL) && (XDM_ISACCESSMODE_WRITE( pOutArgs->displayBufs[i].bufDesc[j].accessMask))) { Memory_cacheWb(pOutArgs->displayBufs[i].bufDesc[j].buf, pOutArgs->displayBufs[i].bufDesc[j].bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE( * outArgs->displayBufs.bufDesc[i].accessMask); */ } } } if ((pOutArgs->outputMbDataID != 0) && (pOutArgs->mbDataBuf.buf != NULL) && (XDM_ISACCESSMODE_WRITE(pOutArgs->mbDataBuf.accessMask))) { Memory_cacheWb(pOutArgs->mbDataBuf.buf, pOutArgs->mbDataBuf.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(outArgs->mbDataBuf.accessMask); */ } #endif /* * Note that any changes to individual outBufs[i] values made by * the codec will automatically update msg->cmd.process.outBufs * as we pass the outBufs array by reference. */ break; } case _VIDDEC2FRONT_CCONTROL: { /* unmarshall status based on the "size" of params */ pStatus = (IVIDDEC2FRONT_Status *)((UInt)(&(msg->cmd.control.params)) + msg->cmd.control.params.size); #if 0 /* invalidate data buffer */ if (pStatus->data.buf != NULL) { Memory_cacheInv(pStatus->data.buf, pStatus->data.bufSize); } #endif msg->visa.status = VIDDEC2FRONT_control(handle, msg->cmd.control.id, &(msg->cmd.control.params), &(msg->cmd.control.context), pStatus); #if 0 /* writeback data buffer */ if ((pStatus->data.buf != NULL) && XDM_ISACCESSMODE_WRITE(pStatus->data.accessMask)) { Memory_cacheWb(pStatus->data.buf, pStatus->data.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.accessMask); */ } #endif break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _VIDTRANSCODE_Msg *msg = (_VIDTRANSCODE_Msg *)visaMsg; VIDTRANSCODE_Handle handle = (VIDTRANSCODE_Handle)visaHandle; Int i; XDM1_BufDesc inBufs; XDM_BufDesc outBufs; IVIDTRANSCODE_OutArgs *pOutArgs; IVIDTRANSCODE_Status *pStatus; Int numBufs; /* perform the requested VIDTRANSCODE operation by parsing message. */ switch (msg->visa.cmd) { case _VIDTRANSCODE_CPROCESS: { /* unmarshal inBufs and outBufs */ inBufs = msg->cmd.process.inBufs; outBufs.bufs = msg->cmd.process.outBufs; outBufs.numBufs = msg->cmd.process.numOutBufs; outBufs.bufSizes = msg->cmd.process.outBufSizes; if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* invalidate cache for all input buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(inBufs.descs[i].buf, inBufs.descs[i].bufSize); if (++numBufs == inBufs.numBufs) { break; } } } /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.bufs[i] != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(outBufs.bufs[i], outBufs.bufSizes[i]); if (++numBufs == outBufs.numBufs) { break; } } } } /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IVIDTRANSCODE_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* * Note, there's no need to invalidate cache for * pOutArgs->encodedBuf bufs as they're * not _really_ OUT buffers. Rather they're references to * the _real_ OUT buffers that are provided in outBufs - which * were already invalidated above. */ /* make the process call */ msg->visa.status = VIDTRANSCODE_process(handle, &inBufs, &outBufs, &(msg->cmd.process.inArgs), pOutArgs); if (SKEL_cachingPolicy == SKEL_WBINVALL) { Memory_cacheWbInvAll(); } else if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* writeback cache for encoded buffers */ for (i = 0; i < IVIDTRANSCODE_MAXOUTSTREAMS; i++) { if ((pOutArgs->outputID[0] != 0) && (pOutArgs->encodedBuf[i].buf != NULL) && XDM_ISACCESSMODE_WRITE(pOutArgs->encodedBuf[i].accessMask)) { Memory_cacheWb(pOutArgs->encodedBuf[i].buf, pOutArgs->encodedBuf[i].bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE( * pOutArgs.encodedBuf[i].accessMask); */ } } } /* * Note that any changes to individual outBufs[i] values made by * the codec will automatically update msg->cmd.process.outBufs * as we pass the outBufs array by reference. */ break; } case _VIDTRANSCODE_CCONTROL: { /* unmarshall status based on the "size" of params */ pStatus = (IVIDTRANSCODE_Status *)((UInt)(&(msg->cmd.control.dynParams)) + msg->cmd.control.dynParams.size); /* invalidate data buffer */ if (pStatus->data.buf != NULL) { Memory_cacheInv(pStatus->data.buf, pStatus->data.bufSize); } msg->visa.status = VIDTRANSCODE_control(handle, msg->cmd.control.id, &(msg->cmd.control.dynParams), pStatus); /* writeback data buffer */ if ((pStatus->data.buf != NULL) && XDM_ISACCESSMODE_WRITE(pStatus->data.accessMask)) { Memory_cacheWb(pStatus->data.buf, pStatus->data.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.accessMask); */ } break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
/* * ======== encode_decode ======== */ static Void encode_decode(VIDENC1_Handle enc, VIDDEC2_Handle dec, FILE *in, FILE *out) { Int n; Int32 status; VIDDEC2_InArgs decInArgs; VIDDEC2_OutArgs decOutArgs; VIDDEC2_DynamicParams decDynParams; VIDDEC2_Status decStatus; VIDENC1_InArgs encInArgs; VIDENC1_OutArgs encOutArgs; VIDENC1_DynamicParams encDynParams; VIDENC1_Status encStatus; IVIDEO1_BufDescIn encInBufDesc; XDM_BufDesc encOutBufDesc; XDAS_Int8 *encoded[XDM_MAX_IO_BUFFERS]; XDAS_Int32 encBufSizes[XDM_MAX_IO_BUFFERS]; XDM1_BufDesc decInBufDesc; XDM_BufDesc decOutBufDesc; XDAS_Int8 *dst[XDM_MAX_IO_BUFFERS]; XDAS_Int32 outBufSizes[XDM_MAX_IO_BUFFERS]; /* clear and initialize the buffer descriptors */ memset(encoded, 0, sizeof(encoded[0]) * XDM_MAX_IO_BUFFERS); memset(dst, 0, sizeof(dst[0]) * XDM_MAX_IO_BUFFERS); encoded[0] = encodedBuf; dst[0] = outBuf; encInBufDesc.numBufs = encOutBufDesc.numBufs = decInBufDesc.numBufs = decOutBufDesc.numBufs = 1; encOutBufDesc.bufSizes = encBufSizes; decOutBufDesc.bufSizes = outBufSizes; encInBufDesc.bufDesc[0].bufSize = encBufSizes[0] = decInBufDesc.descs[0].bufSize = outBufSizes[0] = NSAMPLES; encInBufDesc.bufDesc[0].buf = inBuf; encOutBufDesc.bufs = encoded; decInBufDesc.descs[0].buf = encoded[0]; decOutBufDesc.bufs = dst; encInBufDesc.frameWidth = 0; /* TODO */ encInBufDesc.frameHeight = 0; /* TODO */ encInBufDesc.framePitch = 0; /* TODO */ /* initialize all "sized" fields */ encInArgs.size = sizeof(encInArgs); decInArgs.size = sizeof(decInArgs); encOutArgs.size = sizeof(encOutArgs); decOutArgs.size = sizeof(decOutArgs); encDynParams.size = sizeof(encDynParams); decDynParams.size = sizeof(decDynParams); encStatus.size = sizeof(encStatus); decStatus.size = sizeof(decStatus); /* * Note that we use versionBuf in both the encoder and decoder. In this * application, this is okay, as there is always only one user of * the buffer. Not all applications can make this assumption. */ encStatus.data.buf = decStatus.data.buf = versionBuf; encStatus.data.bufSize = decStatus.data.bufSize = MAXVERSIONSIZE; /* if the codecs support it, dump their versions */ status = VIDDEC2_control(dec, XDM_GETVERSION, &decDynParams, &decStatus); Log_print1(Diags_USER1, "[+1] Decoder version: %s", (IArg)((status == VIDDEC2_EOK ? ((char *)decStatus.data.buf) : "[unknown]"))); status = VIDENC1_control(enc, XDM_GETVERSION, &encDynParams, &encStatus); Log_print1(Diags_USER1, "[+1] Encoder version: %s", (IArg)((status == VIDENC1_EOK ? ((char *)encStatus.data.buf) : "[unknown]"))); /* * This app expects the encoder to provide 1 buf in and get 1 buf out, * and the buf sizes of the in and out buffer must be able to handle * NSAMPLES bytes of data. */ status = VIDENC1_control(enc, XDM_GETSTATUS, &encDynParams, &encStatus); if (status != VIDENC1_EOK) { /* failure, report error and exit */ Log_print1(Diags_USER7, "[+7] encode control status = %ld", (IArg)status); return; } /* Validate this encoder codec will meet our buffer requirements */ if ((encInBufDesc.numBufs < encStatus.bufInfo.minNumInBufs) || (IFRAMESIZE < encStatus.bufInfo.minInBufSize[0]) || (encOutBufDesc.numBufs < encStatus.bufInfo.minNumOutBufs) || (EFRAMESIZE < encStatus.bufInfo.minOutBufSize[0])) { /* failure, report error and exit */ Log_print0(Diags_USER7, "[+7] Error: encoder codec feature conflict"); return; } status = VIDDEC2_control(dec, XDM_GETSTATUS, &decDynParams, &decStatus); if (status != VIDDEC2_EOK) { /* failure, report error and exit */ Log_print1(Diags_USER7, "[+7] decode control status = %ld", (IArg)status); return; } /* Validate this decoder codec will meet our buffer requirements */ if ((decInBufDesc.numBufs < decStatus.bufInfo.minNumInBufs) || (EFRAMESIZE < decStatus.bufInfo.minInBufSize[0]) || (decOutBufDesc.numBufs < decStatus.bufInfo.minNumOutBufs) || (OFRAMESIZE < decStatus.bufInfo.minOutBufSize[0])) { /* failure, report error and exit */ Log_print0(Diags_USER7, "[+7] App-> ERROR: decoder does not meet buffer requirements."); return; } /* * Read complete frames from in, encode, decode, and write to out. */ for (n = 0; fread(inBuf, IFRAMESIZE, 1, in) == 1; n++) { #ifdef CACHE_ENABLED #if defined(xdc_target__isaCompatible_64P) || \ defined(xdc_target__isaCompatible_64T) /* * fread() on this processor is implemented using CCS's stdio, which * is known to write into the cache, not physical memory. To meet * XDAIS DMA Rule 7, we must writeback the cache into physical * memory. Also, per DMA Rule 7, we must invalidate the buffer's * cache before providing it to any xDAIS algorithm. */ Memory_cacheWbInv(inBuf, IFRAMESIZE); #else #error Unvalidated config - add appropriate fread-related cache maintenance #endif /* Per DMA Rule 7, our output buffer cache lines must be cleaned */ Memory_cacheInv(encodedBuf, EFRAMESIZE); #endif Log_print1(Diags_USER1, "[+1] App-> Processing frame %d...", (IArg)n); /* * Encode the frame. * * Note, inputID == 0 is an error. This example doesn't account * for the case where 'n + 1' wraps to zero. */ encInArgs.inputID = n + 1; status = VIDENC1_process(enc, &encInBufDesc, &encOutBufDesc, &encInArgs, &encOutArgs); Log_print2(Diags_USER2, "[+2] App-> Encoder frame %d process returned - 0x%x)", (IArg)n, (IArg)status); if (status != VIDENC1_EOK) { Log_print3(Diags_USER7, "[+7] App-> Encoder frame %d processing FAILED, status = 0x%x, " "extendedError = 0x%x", (IArg)n, (IArg)status, (IArg)(encOutArgs.extendedError)); break; } /* * So far, so good. Validate our assumption that the encoder * provided encodedBuf as it's encOutArgs->encodedBuf.buf. If * that's not the case, we may be dealing with a codec that's * giving us out of order frames... and this simple app * doesn't support that. */ if (encOutArgs.encodedBuf.buf != encodedBuf) { Log_print0(Diags_USER7, "[+7] App-> Internal error. Unsupported encoder"); break; } #ifdef CACHE_ENABLED /* * Conditionally writeback the encoded buf from the previous * call. Also, as encodedBuf is an inBuf to the next process * call, conditionally invalidate it as well. */ if (XDM_ISACCESSMODE_WRITE(encOutArgs.encodedBuf.accessMask)) { Memory_cacheWbInv(encodedBuf, EFRAMESIZE); } /* * Per DMA Rule 7, our output buffer cache lines (for the * upcoming decoder) must be cleaned. */ Memory_cacheInv(outBuf, OFRAMESIZE); #endif /* decode the frame */ decInArgs.numBytes = EFRAMESIZE; decInArgs.inputID = 1; /* typically this varies by each frame */ status = VIDDEC2_process(dec, &decInBufDesc, &decOutBufDesc, &decInArgs, &decOutArgs); Log_print2(Diags_USER2, "[+2] App-> Decoder frame %d process returned - 0x%x)", (IArg)n, (IArg)status); if (status != VIDDEC2_EOK) { Log_print2(Diags_USER7, "[+7] App-> Decoder frame %d processing FAILED, status =" " 0x%x", (IArg)n, (IArg)status); break; } /* again, validate our assumption that we don't get out-of-order bufs */ if (decOutArgs.decodedBufs.bufDesc[0].buf != outBuf) { Log_print0(Diags_USER7, "[+7] App-> Internal error. Unsupported decoder"); break; } #ifdef CACHE_ENABLED /* Conditionally writeback the decoded buf */ if (XDM_ISACCESSMODE_WRITE( decOutArgs.decodedBufs.bufDesc[0].accessMask)) { Memory_cacheWb(outBuf, OFRAMESIZE); } #endif /* write to file */ fwrite(dst[0], OFRAMESIZE, 1, out); } Log_print1(Diags_USER1, "[+1] %d frames encoded/decoded", (IArg)n); }
/* * ======== processLoop ======== */ static Void processLoop(UNIVERSAL_Handle hUniversal, FILE *in, FILE *out, XDAS_Int8 *inBuf, XDAS_Int8 *outBuf, XDAS_Int8 *versionBuf) { Int n; Int32 status; UNIVERSAL_InArgs universalInArgs; UNIVERSAL_OutArgs universalOutArgs; UNIVERSAL_DynamicParams universalDynParams; UNIVERSAL_Status universalStatus; XDM1_BufDesc universalInBufDesc; XDM1_BufDesc universalOutBufDesc; /* initialize bufDescs */ universalInBufDesc.numBufs = universalOutBufDesc.numBufs = 1; universalInBufDesc.descs[0].bufSize = universalOutBufDesc.descs[0].bufSize = NSAMPLES; universalInBufDesc.descs[0].buf = inBuf; universalOutBufDesc.descs[0].buf = outBuf; /* initialize all "sized" fields */ universalInArgs.size = sizeof(universalInArgs); universalOutArgs.size = sizeof(universalOutArgs); universalDynParams.size = sizeof(universalDynParams); universalStatus.size = sizeof(universalStatus); /* if the codecs support it, dump their versions */ universalStatus.data.numBufs = 1; universalStatus.data.descs[0].buf = versionBuf; universalStatus.data.descs[0].bufSize = MAXVERSIONSIZE; universalStatus.data.descs[1].buf = NULL; #ifdef CACHE_ENABLED /* invalidate versionBuf it before the alg fills it */ Memory_cacheInv(versionBuf, MAXVERSIONSIZE); #endif status = UNIVERSAL_control(hUniversal, XDM_GETVERSION, &universalDynParams, &universalStatus); Log_print1(Diags_USER1, "[+1] Alg version: %s", (IArg)((status == UNIVERSAL_EOK ? ((char *)universalStatus.data.descs[0].buf) : "[unknown]"))); /* * Read complete frames from in, process them, and write to out. */ for (n = 0; fread(inBuf, IFRAMESIZE, 1, in) == 1; n++) { #ifdef CACHE_ENABLED #if defined(xdc_target__isaCompatible_64P) || \ defined(xdc_target__isaCompatible_64T) /* * fread() on this processor is implemented using CCS's stdio, which * is known to write into the cache, not physical memory. To meet * XDAIS DMA Rule 7, we must writeback the cache into physical * memory. Also, per DMA Rule 7, we must invalidate the buffer's * cache before providing it to any XDAIS algorithm. */ Memory_cacheWbInv(inBuf, IFRAMESIZE); #else #error Unvalidated config - add appropriate fread-related cache maintenance #endif /* Per DMA Rule 7, our output buffer cache lines must be cleaned */ Memory_cacheInv(outBuf, OFRAMESIZE); #endif Log_print1(Diags_USER1, "[+1] App-> Processing frame %d...", (IArg)n); /* * Transcode the frame. * * Note, inputID == 0 is an error. This example doesn't account * for the case where 'n + 1' wraps to zero. */ status = UNIVERSAL_process(hUniversal, &universalInBufDesc, &universalOutBufDesc, NULL, &universalInArgs, &universalOutArgs); Log_print2(Diags_USER2, "[+2] App-> Alg frame %d process returned - 0x%x", (IArg)n, (IArg)status); if (status != UNIVERSAL_EOK) { Log_print3(Diags_USER7, "[+7] App-> Alg frame %d processing FAILED, status = 0x%x, " "extendedError = 0x%x", (IArg)n, (IArg)status, (IArg)(universalOutArgs.extendedError)); break; } #ifdef CACHE_ENABLED /* * Conditionally writeback the processed buf from the previous * call. This [pessimistic] writeback illustrates the general * situation where the subsequent access of outBuf (fwrite(), in * this case), is not known to be via CPU/cache or DMA/physical mem. * * An optimized system, where the access mode of outBufs known, * may be able to eliminate this writeback. */ if (XDM_ISACCESSMODE_WRITE(universalOutBufDesc.descs[0].accessMask)) { Memory_cacheWb(outBuf, OFRAMESIZE); } #endif /* write to file */ fwrite(outBuf, OFRAMESIZE, 1, out); } Log_print1(Diags_USER1, "[+1] %d frames processed", (IArg)n); }
/* * ======== analyze ======== */ static Void analyze(VIDANALYTICS_Handle analyzer, FILE *in, FILE *out) { Int n; Int32 result; VIDANALYTICS_InArgs inArgs; VIDANALYTICS_OutArgs outArgs; VIDANALYTICS_DynamicParams dynParams; VIDANALYTICS_Status status; XDM1_BufDesc inBufDesc; XDM1_BufDesc outBufDesc; /* clear and initialize the buffer descriptors */ inBufDesc.numBufs = outBufDesc.numBufs = 1; inBufDesc.descs[0].bufSize = outBufDesc.descs[0].bufSize = NSAMPLES; inBufDesc.descs[0].buf = inBuf; outBufDesc.descs[0].buf = outBuf; /* initialize all "sized" fields */ inArgs.size = sizeof(inArgs); outArgs.size = sizeof(outArgs); dynParams.size = sizeof(dynParams); status.size = sizeof(status); /* if the codecs support it, dump their versions */ status.data.numBufs = 1; status.data.descs[0].buf = versionBuf; status.data.descs[0].bufSize = MAXVERSIONSIZE; result = VIDANALYTICS_control(analyzer, XDM_GETVERSION, &dynParams, &status); GT_1trace(curMask, GT_1CLASS, "Analyzer version: %s\n", (result == VIDANALYTICS_EOK ? ((char *)status.data.descs[0].buf) : "[unknown]")); /* * This app expects the analyzer to accept 1 buf in and provide 1 buf out, * and the buf sizes of the in and out buffer must be able to handle * NSAMPLES bytes of data. */ status.data.numBufs = 0; status.data.descs[0].buf = NULL; result = VIDANALYTICS_control(analyzer, XDM_GETBUFINFO, &dynParams, &status); if (result != VIDANALYTICS_EOK) { /* failure, report error and exit */ GT_1trace(curMask, GT_7CLASS, "VIDANALYTICS control status = %ld\n", status); return; } /* Validate this analyzer will meet our buffer requirements */ if ((inBufDesc.numBufs < status.bufInfo.minNumInBufs) || (IFRAMESIZE < status.bufInfo.minInBufSize[0]) || (outBufDesc.numBufs < status.bufInfo.minNumOutBufs) || (OFRAMESIZE < status.bufInfo.minOutBufSize[0])) { /* failure, report error and exit */ GT_0trace(curMask, GT_7CLASS, "Error: analyzer codec feature conflict\n"); return; } /* * Read complete frames from in, analyze, and write to out. */ for (n = 0; fread(inBuf, IFRAMESIZE, 1, in) == 1; n++) { #ifdef CACHE_ENABLED #ifdef xdc_target__isaCompatible_64P /* * fread() on this processor is implemented using CCS's stdio, which * is known to write into the cache, not physical memory. To meet * xDAIS DMA Rule 7, we must writeback the cache into physical * memory. Also, per DMA Rule 7, we must invalidate the buffer's * cache before providing it to any xDAIS algorithm. */ Memory_cacheWbInv(inBuf, IFRAMESIZE); #else #error Unvalidated config - add appropriate fread-related cache maintenance #endif /* Per DMA Rule 7, our output buffer cache lines must be cleaned */ Memory_cacheInv(outBuf, OFRAMESIZE); #endif GT_1trace(curMask, GT_1CLASS, "App-> Processing frame %d...\n", n); /* * Analyze the frame. */ result = VIDANALYTICS_process(analyzer, &inBufDesc, &outBufDesc, &inArgs, &outArgs); GT_2trace(curMask, GT_2CLASS, "App-> Analyzer frame %d process returned - 0x%x)\n", n, result); if (result != VIDANALYTICS_EOK) { GT_3trace(curMask, GT_7CLASS, "App-> Analyzer frame %d processing FAILED, result = 0x%x, " "extendedError = 0x%x\n", n, result, outArgs.extendedError); break; } #ifdef CACHE_ENABLED /* * Conditionally writeback the analyzed buf from the previous * call. */ if (XDM_ISACCESSMODE_WRITE(outBufDesc.descs[0].accessMask)) { Memory_cacheWb(outBuf, OFRAMESIZE); } #endif /* write to file */ fwrite(outBufDesc.descs[0].buf, OFRAMESIZE, 1, out); } GT_1trace(curMask, GT_1CLASS, "%d frames analyzed\n", n); }
static Bool writebackVideo2BufDesc(IVIDEO2_BufDesc *pBufDesc) { Int i; /* Check for a spec violation - probably should be an assert! */ if ((pBufDesc->numPlanes >=3) || (pBufDesc->numMetaPlanes >= 3)) { Log_print3(Diags_USER7, "[+7] ERROR> pBufDesc (0x%x) has invalid .numPlanes (0x%x) and/or " ".numMetaPlanes (0x%x) fields!", (IArg)pBufDesc, pBufDesc->numPlanes, pBufDesc->numMetaPlanes); return (FALSE); } for (i = 0; i < pBufDesc->numPlanes; i++) { if ((pBufDesc->planeDesc[i].buf != NULL) && (XDM_ISACCESSMODE_WRITE(pBufDesc->planeDesc[i].accessMask))) { if (pBufDesc->planeDesc[i].memType == XDM_MEMTYPE_RAW) { Memory_cacheWb(pBufDesc->planeDesc[i].buf, pBufDesc->planeDesc[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } /* * Since we've cacheWb this buffer, we arguably should reflect * this cache state and clear the WRITE bit in the .accessMask * field. However, we know the stub doesn't propogate this * field to the calling app, so this extra buffer management * detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pBufDesc->planeDesc[i].accessMask); */ } for (i = 0; i < pBufDesc->numMetaPlanes; i++) { if ((pBufDesc->metadataPlaneDesc[i].buf != NULL) && (XDM_ISACCESSMODE_WRITE( pBufDesc->metadataPlaneDesc[i].accessMask))) { if (pBufDesc->metadataPlaneDesc[i].memType == XDM_MEMTYPE_RAW) { Memory_cacheWb(pBufDesc->metadataPlaneDesc[i].buf, pBufDesc->metadataPlaneDesc[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pBufDesc->metadataPlaneDesc[i].accessMask); */ } return (TRUE); }
/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _VIDDEC3_Msg *msg = (_VIDDEC3_Msg *)visaMsg; VIDDEC3_Handle handle = (VIDDEC3_Handle)visaHandle; Int i; XDM2_BufDesc inBufs; XDM2_BufDesc outBufs; IVIDDEC3_OutArgs *pOutArgs; IVIDDEC3_Status *pStatus; IVIDDEC3_CodecClassConfig *codecClassConfig; Int numBufs; Bool success; /* get stub/skeleton config data; can be NULL (for old codecs) */ codecClassConfig = (IVIDDEC3_CodecClassConfig *)VISA_getCodecClassConfig(visaHandle); /* perform the requested VIDDEC2 operation by parsing message. */ switch (msg->visa.cmd) { case _VIDDEC3_CPROCESS: { /* unmarshall inBufs and outBufs */ inBufs = msg->cmd.process.inBufs; outBufs = msg->cmd.process.outBufs; /* invalidate cache for all input buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inBufs.descs[i].buf != NULL) { /* valid member of sparse array, * invalidate it unless user configured it not to */ if (codecClassConfig != NULL && codecClassConfig->manageInBufsCache[i] == FALSE) { /* do nothing, i.e. don't invalidate */ } else { if (inBufs.descs[i].memType == XDM_MEMTYPE_RAW) { Memory_cacheInv(inBufs.descs[i].buf, inBufs.descs[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } if (++numBufs == inBufs.numBufs) { break; } } } /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.descs[i].buf != NULL) { /* valid member of sparse array, * invalidate it unless user configured it not to */ if (codecClassConfig != NULL && codecClassConfig->manageOutBufsCache[i] == FALSE) { /* do nothing, i.e. don't invalidate */ } else { if (outBufs.descs[i].memType == XDM_MEMTYPE_RAW) { Memory_cacheInv(outBufs.descs[i].buf, outBufs.descs[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } if (++numBufs == outBufs.numBufs) { break; } } } /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IVIDDEC3_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* * Note, there's no need to invalidate cache for * pOutArgs->decodedBuf bufs nor pOutArgs->displayBufs * bufs as the app doesn't provide OUT buffers to the * algorithm via these fields. */ /* make the process call */ msg->visa.status = VIDDEC3_process(handle, &inBufs, &outBufs, &(msg->cmd.process.inArgs), pOutArgs); /* * We probably should only be doing this if msg->visa.status * is IVIDDEC3_EOK or _EFAIL and .extendedError is non-fatal. */ /* * Writeback cache for all output buffers: * - .decodedBufs * - .displayBufs */ /* * ======== .decodedBufs ======== */ success = writebackVideo2BufDesc(&pOutArgs->decodedBufs); if (!success) { return (VISA_EFAIL); } /* * ======== .displayBufs ======== */ /* identify which mode the displayBufs are returned as */ if (pOutArgs->displayBufsMode == IVIDDEC3_DISPLAYBUFS_EMBEDDED) { /* the display buffers are embedded in the outArgs struct */ for (i = 0; (pOutArgs->outputID[i] != 0) && (i < IVIDEO2_MAX_IO_BUFFERS); i++) { success = writebackVideo2BufDesc( &(pOutArgs->displayBufs.bufDesc[i])); if (!success) { return (VISA_EFAIL); } } } else { /* the display buffers are pointed to in the outArgs struct */ for (i = 0; (pOutArgs->outputID[i] != 0) && (i < IVIDEO2_MAX_IO_BUFFERS); i++) { success = writebackVideo2BufDesc( pOutArgs->displayBufs.pBufDesc[i]); if (!success) { return (VISA_EFAIL); } } } /* * Note that any changes to individual outBufs[i] values made by * the codec will automatically update msg->cmd.process.outBufs * as we pass the outBufs array by reference. */ break; } case _VIDDEC3_CCONTROL: { /* unmarshall status based on the "size" of params */ pStatus = (IVIDDEC3_Status *)((UInt)(&(msg->cmd.control.params)) + msg->cmd.control.params.size); /* invalidate data buffer */ if (pStatus->data.buf != NULL) { Memory_cacheInv(pStatus->data.buf, pStatus->data.bufSize); } msg->visa.status = VIDDEC3_control(handle, msg->cmd.control.id, &(msg->cmd.control.params), pStatus); /* writeback data buffer */ if ((pStatus->data.buf != NULL) && XDM_ISACCESSMODE_WRITE(pStatus->data.accessMask)) { Memory_cacheWb(pStatus->data.buf, pStatus->data.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.accessMask); */ } break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _AUDENC1_Msg *msg = (_AUDENC1_Msg *)visaMsg; AUDENC1_Handle handle = (AUDENC1_Handle)visaHandle; Int i; XDM1_BufDesc inBufs, outBufs; IAUDENC1_OutArgs *pOutArgs; IAUDENC1_Status *pStatus; Int numBufs; /* perform the requested AUDENC1 operation by parsing message. */ switch (msg->visa.cmd) { case _AUDENC1_CPROCESS: { /* unmarshal inBufs and outBufs */ inBufs = msg->cmd.process.inBufs; outBufs = msg->cmd.process.outBufs; if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* invalidate cache for all input buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(inBufs.descs[i].buf, inBufs.descs[i].bufSize); if (++numBufs == inBufs.numBufs) { break; } } } /* invalidate cache for buffers in inArgs */ if (msg->cmd.process.inArgs.ancData.buf != NULL) { Memory_cacheInv(msg->cmd.process.inArgs.ancData.buf, msg->cmd.process.inArgs.ancData.bufSize); } /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(outBufs.descs[i].buf, outBufs.descs[i].bufSize); if (++numBufs == outBufs.numBufs) { break; } } } } /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IAUDENC1_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* make the process call */ msg->visa.status = AUDENC1_process(handle, &inBufs, &outBufs, &(msg->cmd.process.inArgs), pOutArgs); if (SKEL_cachingPolicy == SKEL_WBINVALL) { Memory_cacheWbInvAll(); } else if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* writeback cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if ((outBufs.descs[i].buf != NULL) && XDM_ISACCESSMODE_WRITE(outBufs.descs[i].accessMask)) { /* valid member of sparse array, written to via CPU */ Memory_cacheWb(outBufs.descs[i].buf, outBufs.descs[i].bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(outBufs.descs[i].accessMask); */ if (++numBufs == outBufs.numBufs) { break; } } } } /* * Note that any changes to individual outBufs[i] values made by * the codec will automatically update msg->cmd.process.outBufs * as we pass the outBufs array by reference. */ break; } case _AUDENC1_CCONTROL: { /* unmarshall status based on the "size" of params */ pStatus = (IAUDENC1_Status *)((UInt)(&(msg->cmd.control.params)) + msg->cmd.control.params.size); /* invalidate data buffer */ if (pStatus->data.buf != NULL) { Memory_cacheInv(pStatus->data.buf, pStatus->data.bufSize); } msg->visa.status = AUDENC1_control(handle, msg->cmd.control.id, &(msg->cmd.control.params), pStatus); /* writeback data buffer */ if ((pStatus->data.buf != NULL) && XDM_ISACCESSMODE_WRITE(pStatus->data.accessMask)) { Memory_cacheWb(pStatus->data.buf, pStatus->data.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.accessMask); */ } break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
static int do_transcodeFrame(CodecEngine* _ce, const void* _srcFramePtr, size_t _srcFrameSize, void* _dstFramePtr, size_t _dstFrameSize, size_t* _dstFrameUsed) { if (_ce->m_srcBuffer == NULL || _ce->m_dstBuffer == NULL) return ENOTCONN; if (_srcFramePtr == NULL || _dstFramePtr == NULL) return EINVAL; if (_srcFrameSize > _ce->m_srcBufferSize || _dstFrameSize > _ce->m_dstBufferSize) return ENOSPC; VIDTRANSCODE_InArgs tcInArgs; memset(&tcInArgs, 0, sizeof(tcInArgs)); tcInArgs.size = sizeof(tcInArgs); tcInArgs.numBytes = _srcFrameSize; tcInArgs.inputID = 1; // must be non-zero, otherwise caching issues appear VIDTRANSCODE_OutArgs tcOutArgs; memset(&tcOutArgs, 0, sizeof(tcOutArgs)); tcOutArgs.size = sizeof(tcOutArgs); XDM1_BufDesc tcInBufDesc; memset(&tcInBufDesc, 0, sizeof(tcInBufDesc)); tcInBufDesc.numBufs = 1; tcInBufDesc.descs[0].buf = _ce->m_srcBuffer; tcInBufDesc.descs[0].bufSize = _srcFrameSize; XDM_BufDesc tcOutBufDesc; memset(&tcOutBufDesc, 0, sizeof(tcOutBufDesc)); XDAS_Int8* tcOutBufDesc_bufs[1]; XDAS_Int32 tcOutBufDesc_bufSizes[1]; tcOutBufDesc.numBufs = 1; tcOutBufDesc.bufs = tcOutBufDesc_bufs; tcOutBufDesc.bufs[0] = _ce->m_dstBuffer; tcOutBufDesc.bufSizes = tcOutBufDesc_bufSizes; tcOutBufDesc.bufSizes[0] = _dstFrameSize; memcpy(_ce->m_srcBuffer, _srcFramePtr, _srcFrameSize); Memory_cacheWbInv(_ce->m_srcBuffer, _ce->m_srcBufferSize); // invalidate and flush *whole* cache, not only written portion, just in case Memory_cacheInv(_ce->m_dstBuffer, _ce->m_dstBufferSize); // invalidate *whole* cache, not only expected portion, just in case XDAS_Int32 processResult = VIDTRANSCODE_process(_ce->m_vidtranscodeHandle, &tcInBufDesc, &tcOutBufDesc, &tcInArgs, &tcOutArgs); if (processResult != IVIDTRANSCODE_EOK) { fprintf(stderr, "VIDTRANSCODE_process(%zu -> %zu) failed: %"PRIi32"/%"PRIi32"\n", _srcFrameSize, _dstFrameSize, processResult, tcOutArgs.extendedError); return EILSEQ; } #warning Remove me after a while if everything is fine #if 0 // does not seems to be needed according to notes in CE skeletons (Wb is done by DSP side, not ARM) if (XDM_ISACCESSMODE_WRITE(tcOutArgs.encodedBuf[0].accessMask)) Memory_cacheWb(_ce->m_dstBuffer, _ce->m_dstBufferSize); #endif if (tcOutArgs.encodedBuf[0].bufSize > _dstFrameSize) { *_dstFrameUsed = _dstFrameSize; fprintf(stderr, "VIDTRANSCODE_process(%zu -> %zu) returned too large buffer %zu, truncated\n", _srcFrameSize, _dstFrameSize, *_dstFrameUsed); } else if (tcOutArgs.encodedBuf[0].bufSize < 0) { *_dstFrameUsed = 0; fprintf(stderr, "VIDTRANSCODE_process(%zu -> %zu) returned negative buffer size\n", _srcFrameSize, _dstFrameSize); } else *_dstFrameUsed = tcOutArgs.encodedBuf[0].bufSize; memcpy(_dstFramePtr, _ce->m_dstBuffer, *_dstFrameUsed); return 0; }
/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _VIDENC2_Msg *msg = (_VIDENC2_Msg *)visaMsg; VIDENC2_Handle handle = (VIDENC2_Handle)visaHandle; Int i; IVIDEO2_BufDesc inBufs; XDM2_BufDesc outBufs; IVIDENC2_OutArgs *pOutArgs; IVIDENC2_Status *pStatus; IVIDENC2_CodecClassConfig *codecClassConfig; Int numBufs; /* get stub/skeleton config data; can be NULL (for old codecs) */ codecClassConfig = (IVIDENC2_CodecClassConfig *) VISA_getCodecClassConfig(visaHandle); /* perform the requested VIDENC2 operation by parsing message. */ switch (msg->visa.cmd) { case _VIDENC2_CPROCESS: { /* unmarshall inBufs and outBufs */ inBufs = msg->cmd.process.inBufs; outBufs = msg->cmd.process.outBufs; /* Should assert inBufs.numPlanes and inBufs.numMetaPlanes * are < 3. */ /* invalidate cache for all input plane buffers */ for (i = 0; i < inBufs.numPlanes; i++) { if (inBufs.planeDesc[i].buf != NULL) { if ((codecClassConfig != NULL) && (codecClassConfig->manageInBufsPlaneDescCache[i] == FALSE)) { /* do nothing, i.e. don't invalidate */ } else { if (inBufs.planeDesc[i].memType == XDM_MEMTYPE_ROW) { Memory_cacheInv(inBufs.planeDesc[i].buf, inBufs.planeDesc[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } } } /* invalidate cache for all input meta plane buffers */ for (i = 0; i < inBufs.numMetaPlanes; i++) { if (inBufs.metadataPlaneDesc[i].buf != NULL) { if ((codecClassConfig != NULL) && (codecClassConfig->manageInBufsMetaPlaneDescCache[i] == FALSE)) { /* do nothing, i.e. don't invalidate */ } else { if (inBufs.metadataPlaneDesc[i].memType == XDM_MEMTYPE_ROW) { Memory_cacheInv(inBufs.metadataPlaneDesc[i].buf, inBufs.metadataPlaneDesc[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } } } /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.descs[i].buf != NULL) { /* valid member of sparse array, * invalidate it unless user configured it not to */ if (codecClassConfig != NULL && codecClassConfig->manageOutBufsCache[i] == FALSE) { /* do nothing, i.e. don't invalidate */ } else { if (outBufs.descs[i].memType == XDM_MEMTYPE_ROW) { Memory_cacheInv(outBufs.descs[i].buf, outBufs.descs[i].bufSize.bytes); } else { /* TODO:H are tiled buffers cacheable? */ } } if (++numBufs == outBufs.numBufs) { break; } } } /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IVIDENC2_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* * Note, there's no need to invalidate cache for * pOutArgs->encodedBuf bufs nor pOutArgs->reconBufs bufs as they're * not _really_ OUT buffers. Rather they're references to * the _real_ OUT buffers that are provided in outBufs - which * were already invalidated above. */ /* make the process call */ msg->visa.status = VIDENC2_process(handle, &inBufs, &outBufs, &(msg->cmd.process.inArgs), pOutArgs); #if 0 /* TODO! */ /* writeback cache for recon buffers */ for (i = 0; ((i < pOutArgs->reconBufs.numBufs) && (i < IVIDEO_MAX_YUV_BUFFERS)); i++) { if ((pOutArgs->reconBufs.bufDesc[i].buf != NULL) && (XDM_ISACCESSMODE_WRITE( pOutArgs->reconBufs.bufDesc[i].accessMask))) { Memory_cacheWb(pOutArgs->reconBufs.bufDesc[i].buf, pOutArgs->reconBufs.bufDesc[i].bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE( * pOutArgs->reconBufs.bufDesc[i].accessMask); */ } } #endif /* TODO! */ /* * Note that any changes to individual outBufs[i] values made by * the codec will automatically update msg->cmd.process.outBufs * as we pass the outBufs array by reference. */ break; } case _VIDENC2_CCONTROL: { /* unmarshall status based on the "size" of params */ pStatus = (IVIDENC2_Status *)((UInt)(&(msg->cmd.control.params)) + msg->cmd.control.params.size); /* invalidate data buffer */ if (pStatus->data.buf != NULL) { Memory_cacheInv(pStatus->data.buf, pStatus->data.bufSize); } msg->visa.status = VIDENC2_control(handle, msg->cmd.control.id, &(msg->cmd.control.params), pStatus); /* writeback data buffer */ if ((pStatus->data.buf != NULL) && XDM_ISACCESSMODE_WRITE(pStatus->data.accessMask)) { Memory_cacheWb(pStatus->data.buf, pStatus->data.bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.accessMask); */ } break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
/* * ======== call ======== */ static VISA_Status call(VISA_Handle visaHandle, VISA_Msg visaMsg) { _UNIVERSAL_Msg *msg = (_UNIVERSAL_Msg *)visaMsg; UNIVERSAL_Handle handle = (UNIVERSAL_Handle)visaHandle; Int i; XDM1_BufDesc inBufs, *pInBufs = &inBufs; XDM1_BufDesc outBufs, *pOutBufs = &outBufs; XDM1_BufDesc inOutBufs, *pInOutBufs = &inOutBufs; IUNIVERSAL_OutArgs *pOutArgs; IUNIVERSAL_Status *pStatus; Int numBufs; /* perform the requested UNIVERSAL operation by parsing message. */ switch (msg->visa.cmd) { case _UNIVERSAL_CPROCESS: { /* unmarshal buffers */ if (msg->cmd.process.inBufs.numBufs == 0) { pInBufs = NULL; } else { inBufs = msg->cmd.process.inBufs; if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* invalidate cache for all input buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(inBufs.descs[i].buf, inBufs.descs[i].bufSize); if (++numBufs == inBufs.numBufs) { break; } } } } } if (msg->cmd.process.outBufs.numBufs == 0) { pOutBufs = NULL; } else { outBufs = msg->cmd.process.outBufs; if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* invalidate cache for all output buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(outBufs.descs[i].buf, outBufs.descs[i].bufSize); if (++numBufs == outBufs.numBufs) { break; } } } } } if (msg->cmd.process.inOutBufs.numBufs == 0) { pInOutBufs = NULL; } else { inOutBufs = msg->cmd.process.inOutBufs; if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* invalidate cache for all in/out buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inOutBufs.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(inOutBufs.descs[i].buf, inOutBufs.descs[i].bufSize); if (++numBufs == inOutBufs.numBufs) { break; } } } } } /* unmarshall outArgs based on the "size" of inArgs */ pOutArgs = (IUNIVERSAL_OutArgs *)((UInt)(&(msg->cmd.process.inArgs)) + msg->cmd.process.inArgs.size); /* make the process call */ msg->visa.status = UNIVERSAL_process(handle, pInBufs, pOutBufs, pInOutBufs, &(msg->cmd.process.inArgs), pOutArgs); if (SKEL_cachingPolicy == SKEL_WBINVALL) { Memory_cacheWbInvAll(); } else if (SKEL_cachingPolicy == SKEL_LOCALBUFFERINVWB) { /* writeback cache for output buffers */ if (pOutBufs != NULL) { for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (outBufs.descs[i].buf != NULL) { if (XDM_ISACCESSMODE_WRITE(outBufs.descs[i] .accessMask)) { /* valid member of sparse array, written via CPU */ Memory_cacheWb(outBufs.descs[i].buf, outBufs.descs[i].bufSize); /* * Since we've cacheWb this buffer, we arguably * should reflect this cache state and clear the * WRITE bit in the .accessMask field. However, * we know the stub doesn't propogate this field * to the calling app, so this extra buffer * management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(outBufs.descs[i] * .accessMask); */ } if (++numBufs == outBufs.numBufs) { break; } } } } if (pInOutBufs != NULL) { /* writeback cache for in/out buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (inOutBufs.descs[i].buf != NULL) { if (XDM_ISACCESSMODE_WRITE(inOutBufs.descs[i] .accessMask)) { /* valid member of sparse array, written via CPU */ Memory_cacheWb(inOutBufs.descs[i].buf, inOutBufs.descs[i].bufSize); /* * Since we've cacheWb this buffer, we arguably * should reflect this cache state and clear the * WRITE bit in the .accessMask field. However, * we know the stub doesn't propogate this field * to the calling app, so this extra buffer * management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(inOutBufs.descs[i] * .accessMask); */ } if (++numBufs == inOutBufs.numBufs) { break; } } } } } break; } case _UNIVERSAL_CCONTROL: { /* unmarshall status based on the "size" of dynParams */ pStatus = (IUNIVERSAL_Status *)((UInt)(&(msg->cmd.control.dynParams)) + msg->cmd.control.dynParams.size); /* invalidate data buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (pStatus->data.descs[i].buf != NULL) { /* valid member of sparse array, manage it */ Memory_cacheInv(pStatus->data.descs[i].buf, pStatus->data.descs[i].bufSize); if (++numBufs == pStatus->data.numBufs) { break; } } } msg->visa.status = UNIVERSAL_control(handle, msg->cmd.control.id, &(msg->cmd.control.dynParams), pStatus); /* writeback data buffers */ for (i = 0, numBufs = 0; i < XDM_MAX_IO_BUFFERS; i++) { if (pStatus->data.descs[i].buf != NULL) { if (XDM_ISACCESSMODE_WRITE(pStatus->data.descs[i] .accessMask)) { /* valid member of sparse array, manage it */ Memory_cacheWb(pStatus->data.descs[i].buf, pStatus->data.descs[i].bufSize); /* * Since we've cacheWb this buffer, we arguably should * reflect this cache state and clear the WRITE bit in * the .accessMask field. However, we know the stub * doesn't propogate this field to the calling app, so * this extra buffer management detail isn't necessary: * * XDM_CLEARACCESSMODE_WRITE(pStatus->data.descs[i] * .accessMask); */ } if (++numBufs == pStatus->data.numBufs) { break; } } } break; } default: { msg->visa.status = VISA_EFAIL; break; } } return (VISA_EOK); }
/* * ======== processLoop ======== */ static Void processLoop(UNIVERSAL_Handle hUniversal, FILE *in, FILE *out) { Int n; Int32 status; UNIVERSAL_InArgs universalInArgs; UNIVERSAL_OutArgs universalOutArgs; UNIVERSAL_DynamicParams universalDynParams; UNIVERSAL_Status universalStatus; XDM1_BufDesc universalInBufDesc; XDM1_BufDesc universalOutBufDesc; /* initialize bufDescs */ universalInBufDesc.numBufs = universalOutBufDesc.numBufs = 1; universalInBufDesc.descs[0].bufSize = universalOutBufDesc.descs[0].bufSize = NSAMPLES; universalInBufDesc.descs[0].buf = inBuf; universalOutBufDesc.descs[0].buf = outBuf; /* initialize all "sized" fields */ universalInArgs.size = sizeof(universalInArgs); universalOutArgs.size = sizeof(universalOutArgs); universalDynParams.size = sizeof(universalDynParams); universalStatus.size = sizeof(universalStatus); /* if the codecs support it, dump their versions */ universalStatus.data.numBufs = 1; universalStatus.data.descs[0].buf = versionBuf; universalStatus.data.descs[0].bufSize = MAXVERSIONSIZE; universalStatus.data.descs[1].buf = NULL; status = UNIVERSAL_control(hUniversal, XDM_GETVERSION, &universalDynParams, &universalStatus); GT_1trace(curMask, GT_1CLASS, "Alg version: %s\n", (status == UNIVERSAL_EOK ? ((char *)universalStatus.data.descs[0].buf) : "[unknown]")); /* * Read complete frames from in, process them, and write to out. */ for (n = 0; fread(inBuf, IFRAMESIZE, 1, in) == 1; n++) { #ifdef CACHE_ENABLED #ifdef xdc_target__isaCompatible_64P /* * fread() on this processor is implemented using CCS's stdio, which * is known to write into the cache, not physical memory. To meet * xDAIS DMA Rule 7, we must writeback the cache into physical * memory. Also, per DMA Rule 7, we must invalidate the buffer's * cache before providing it to any xDAIS algorithm. */ Memory_cacheWbInv(inBuf, IFRAMESIZE); #else #error Unvalidated config - add appropriate fread-related cache maintenance #endif /* Per DMA Rule 7, our output buffer cache lines must be cleaned */ Memory_cacheInv(outBuf, OFRAMESIZE); #endif GT_1trace(curMask, GT_1CLASS, "App-> Processing frame %d...\n", n); /* * Transcode the frame. * * Note, inputID == 0 is an error. This example doesn't account * for the case where 'n + 1' wraps to zero. */ status = UNIVERSAL_process(hUniversal, &universalInBufDesc, &universalOutBufDesc, NULL, &universalInArgs, &universalOutArgs); GT_2trace(curMask, GT_2CLASS, "App-> Alg frame %d process returned - 0x%x)\n", n, status); if (status != UNIVERSAL_EOK) { GT_3trace(curMask, GT_7CLASS, "App-> Alg frame %d processing FAILED, status = 0x%x, " "extendedError = 0x%x\n", n, status, universalOutArgs.extendedError); break; } #ifdef CACHE_ENABLED /* * Conditionally writeback the processed buf from the previous * call. */ if (XDM_ISACCESSMODE_WRITE(universalOutBufDesc.descs[0].accessMask)) { Memory_cacheWb(outBuf, OFRAMESIZE); } #endif /* write to file */ fwrite(outBuf, OFRAMESIZE, 1, out); } GT_1trace(curMask, GT_1CLASS, "%d frames processed\n", n); }