コード例 #1
0
ファイル: main_sfm.cpp プロジェクト: neariot/sfm
int main(int argc, char* argv[])
{
    try
    {
        nvxio::Application &app = nvxio::Application::get();

        //
        // Parse command line arguments
        //

//        std::string sourceUri = app.findSampleFilePath("file:///dev/video0");

// "/home/ubuntu/VisionWorks-SFM-0.82-Samples/data/sfm/parking_sfm.mp4";


        std::string sourceUri = "/home/px4/test.mp4";
        std::string configFile = app.findSampleFilePath("sfm/sfm_config.ini");
        bool fullPipeline = false;
        std::string maskFile;
        bool noLoop = false;

        app.setDescription("This sample demonstrates Structure from Motion (SfM) algorithm");
        app.addOption(0, "mask", "Optional mask", nvxio::OptionHandler::string(&maskFile));
        app.addBooleanOption('f', "fullPipeline", "Run full SfM pipeline without using IMU data", &fullPipeline);
        app.addBooleanOption('n', "noLoop", "Run sample without loop", &noLoop);

        app.init(argc, argv);

        nvx_module_version_t sfmVersion;
        nvxSfmGetVersion(&sfmVersion);
        std::cout << "VisionWorks SFM version: " << sfmVersion.major << "." << sfmVersion.minor
                  << "." << sfmVersion.patch << sfmVersion.suffix << std::endl;

        std::string imuDataFile;
        std::string frameDataFile;
        if (!fullPipeline)
        {
            imuDataFile = app.findSampleFilePath("sfm/imu_data.txt");
            frameDataFile = app.findSampleFilePath("sfm/images_timestamps.txt");
        }

        if (app.getPreferredRenderName() != "default")
        {
            std::cerr << "The sample uses custom Render for GUI. --nvxio_render option is not supported!" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RENDER;
        }

        //
        // Read SfMParams
        //

        nvx::SfM::SfMParams params;

        std::string msg;
        if (!read(configFile, params, msg))
        {
            std::cout << msg << std::endl;
            return nvxio::Application::APP_EXIT_CODE_INVALID_VALUE;
        }

        //
        // Create OpenVX context
        //

        nvxio::ContextGuard context;

        //
        // Messages generated by the OpenVX framework will be processed by nvxio::stdoutLogCallback
        //

        vxRegisterLogCallback(context, &nvxio::stdoutLogCallback, vx_false_e);

        //
        // Add SfM kernels
        //

        NVXIO_SAFE_CALL(nvxSfmRegisterKernels(context));

        //
        // Create a Frame Source
        //

        std::unique_ptr<nvxio::FrameSource> source(
             nvxio::createDefaultFrameSource(context, sourceUri));

        if (!source || !source->open())
        {
            std::cout << "Can't open source file: " << sourceUri << std::endl;
//            int haha=3;
//            fprintf(stderr, "errno = %d \n", haha);
            return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE;
        }

        nvxio::FrameSource::Parameters sourceParams = source->getConfiguration();

        //
        // Create OpenVX Image to hold frames from video source
        //

        vx_image frame = vxCreateImage(context,
                                       sourceParams.frameWidth, sourceParams.frameHeight, sourceParams.format);
        NVXIO_CHECK_REFERENCE(frame);

        //
        // Load mask image if needed
        //

        vx_image mask = NULL;
        if (!maskFile.empty())
        {
            mask = nvxio::loadImageFromFile(context, maskFile, VX_DF_IMAGE_U8);

            vx_uint32 mask_width = 0, mask_height = 0;
            vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_WIDTH, &mask_width, sizeof(mask_width));
            vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_HEIGHT, &mask_height, sizeof(mask_height));

            if (mask_width != sourceParams.frameWidth || mask_height != sourceParams.frameHeight)
            {
                std::cerr << "The mask must have the same size as the input source." << std::endl;
                return nvxio::Application::APP_EXIT_CODE_INVALID_DIMENSIONS;
            }
        }

        //
        // Create 3D Render instance
        //
        std::unique_ptr<nvxio::Render3D> render3D(nvxio::createDefaultRender3D(context, 0, 0,
            "SfM Point Cloud", sourceParams.frameWidth, sourceParams.frameHeight));

        nvxio::Render::TextBoxStyle style = {{255, 255, 255, 255}, {0, 0, 0, 255}, {10, 10}};

        if (!render3D)
        {
            std::cerr << "Can't create a renderer" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RENDER;
        }

        float fovYinRad = 2.f * atanf(sourceParams.frameHeight / 2.f / params.pFy);
        render3D->setDefaultFOV(180.f / nvxio::PI_F * fovYinRad);

        EventData eventData;
        render3D->setOnKeyboardEventCallback(eventCallback, &eventData);

        //
        // Create SfM class instance
        //

        std::unique_ptr<nvx::SfM> sfm(nvx::SfM::createSfM(context, params));

        //
        // Create FenceDetectorWithKF class instance
        //
        FenceDetectorWithKF fenceDetector;


        nvxio::FrameSource::FrameStatus frameStatus;
        do
        {
            frameStatus = source->fetch(frame);
        }
        while (frameStatus == nvxio::FrameSource::TIMEOUT);

        if (frameStatus == nvxio::FrameSource::CLOSED)
        {
            std::cerr << "Source has no frames" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_FRAMESOURCE;
        }

        vx_status status = sfm->init(frame, mask, imuDataFile, frameDataFile);
        if (status != VX_SUCCESS)
        {
            std::cerr << "Failed to initialize the algorithm" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_ERROR;
        }

        const vx_size maxNumOfPoints = 2000;
        const vx_size maxNumOfPlanesVertices = 2000;
        vx_array filteredPoints = vxCreateArray(context, NVX_TYPE_POINT3F, maxNumOfPoints);
        vx_array planesVertices = vxCreateArray(context, NVX_TYPE_POINT3F, maxNumOfPlanesVertices);

        //
        // Run processing loop
        //

        vx_matrix model = vxCreateMatrix(context, VX_TYPE_FLOAT32, 4, 4);
        float eye_data[4*4] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1};
        vxWriteMatrix(model, eye_data);

        nvxio::Render3D::PointCloudStyle pcStyle = {0, 12};
        nvxio::Render3D::PlaneStyle fStyle = {0, 10};

        GroundPlaneSmoother groundPlaneSmoother(7);

        nvx::Timer totalTimer;
        totalTimer.tic();
        double proc_ms = 0;
        float yGroundPlane = 0;
        while (!eventData.shouldStop)
        {
            if (!eventData.pause)
            {
                frameStatus = source->fetch(frame);

                if (frameStatus == nvxio::FrameSource::TIMEOUT)
                {
                    continue;
                }
                if (frameStatus == nvxio::FrameSource::CLOSED)
                {
                    if(noLoop) break;

                    if (!source->open())
                    {
                        std::cerr << "Failed to reopen the source" << std::endl;
                        break;
                    }

                    do
                    {
                        frameStatus = source->fetch(frame);
                    }
                    while (frameStatus == nvxio::FrameSource::TIMEOUT);

                    sfm->init(frame, mask, imuDataFile, frameDataFile);

                    fenceDetector.reset();

                    continue;
                }

                // Process
                nvx::Timer procTimer;
                procTimer.tic();
                sfm->track(frame, mask);
                proc_ms = procTimer.toc();
            }

            // Print performance results
            sfm->printPerfs();

            if (!eventData.showPointCloud)
            {
                render3D->disableDefaultKeyboardEventCallback();
                render3D->putImage(frame);
            }
            else
            {
                render3D->enableDefaultKeyboardEventCallback();
            }

            filterPoints(sfm->getPointCloud(), filteredPoints);
            render3D->putPointCloud(filteredPoints, model, pcStyle);

            if (eventData.showFences)
            {
                fenceDetector.getFencePlaneVertices(filteredPoints, planesVertices);
                render3D->putPlanes(planesVertices, model, fStyle);
            }

            if (fullPipeline && eventData.showGP)
            {
                const float x1(-1.5), x2(1.5), z1(1), z2(4);

                vx_matrix gp = sfm->getGroundPlane();
                yGroundPlane = groundPlaneSmoother.getSmoothedY(gp, x1, z1);

                nvx_point3f_t pt[4] = {{x1, yGroundPlane, z1},
                                       {x1, yGroundPlane, z2},
                                       {x2, yGroundPlane, z2},
                                       {x2, yGroundPlane, z1}};

                vx_array gpPoints = vxCreateArray(context, NVX_TYPE_POINT3F, 4);
                vxAddArrayItems(gpPoints, 4, pt, sizeof(pt[0]));

                render3D->putPlanes(gpPoints, model, fStyle);
                vxReleaseArray(&gpPoints);
            }

            double total_ms = totalTimer.toc();

            // Add a delay to limit frame rate
            app.sleepToLimitFPS(total_ms);

            total_ms = totalTimer.toc();
            totalTimer.tic();

            std::string state = createInfo(fullPipeline, proc_ms, total_ms, eventData);
            render3D->putText(state.c_str(), style);

            if (!render3D->flush())
            {
                eventData.shouldStop = true;
            }
        }

        //
        // Release all objects
        //
        vxReleaseImage(&frame);
        vxReleaseImage(&mask);
        vxReleaseMatrix(&model);
        vxReleaseArray(&filteredPoints);
        vxReleaseArray(&planesVertices);
    }
    catch (const std::exception& e)
    {
        std::cerr << "Error: " << e.what() << std::endl;
        return nvxio::Application::APP_EXIT_CODE_ERROR;
    }

    return nvxio::Application::APP_EXIT_CODE_SUCCESS;
}
コード例 #2
0
FrameSource::FrameStatus GStreamerBaseFrameSourceImpl::fetch(vx_image image, vx_uint32 /*timeout*/)
{
    if (end)
    {
        close();
        return FrameSource::CLOSED;
    }

    handleGStreamerMessages();

    if (gst_app_sink_is_eos(GST_APP_SINK(sink)))
    {
        close();
        return FrameSource::CLOSED;
    }

    if ((lastFrameTimestamp.toc()/1000.0) > Application::get().getSourceDefaultTimeout())
    {
        close();
        return FrameSource::CLOSED;
    }

    lastFrameTimestamp.tic();

#if GST_VERSION_MAJOR == 0
    std::unique_ptr<GstBuffer, GStreamerObjectDeleter> bufferHolder(
        gst_app_sink_pull_buffer(GST_APP_SINK(sink)));
    GstBuffer* buffer = bufferHolder.get();
#else
    std::unique_ptr<GstSample, GStreamerObjectDeleter> sample(gst_app_sink_pull_sample(GST_APP_SINK(sink)));

    if (!sample)
    {
        close();
        return FrameSource::CLOSED;
    }

    GstBuffer* buffer = gst_sample_get_buffer(sample.get());
#endif

    gint          width;
    gint          height;

#if GST_VERSION_MAJOR == 0
    std::unique_ptr<GstCaps, GStreamerObjectDeleter> bufferCapsHolder(gst_buffer_get_caps(buffer));
    GstCaps* bufferCaps = bufferCapsHolder.get();
#else
    GstCaps* bufferCaps = gst_sample_get_caps(sample.get());
#endif
    // bail out in no caps
    assert(gst_caps_get_size(bufferCaps) == 1);
    GstStructure* structure = gst_caps_get_structure(bufferCaps, 0);

    // bail out if width or height are 0
    if (!gst_structure_get_int(structure, "width", &width) ||
            !gst_structure_get_int(structure, "height", &height))
    {
        close();
        return FrameSource::CLOSED;
    }

    int depth = 3;
#if GST_VERSION_MAJOR > 0
    depth = 0;
    const gchar* name = gst_structure_get_name(structure);
    const gchar* format = gst_structure_get_string(structure, "format");

    if (!name || !format)
    {
        close();
        return FrameSource::CLOSED;
    }

    // we support 2 types of data:
    //     video/x-raw, format=BGR   -> 8bit, 3 channels
    //     video/x-raw, format=GRAY8 -> 8bit, 1 channel
    if (strcasecmp(name, "video/x-raw") == 0)
    {
        if (strcasecmp(format, "RGB") == 0)
        {
            depth = 3;
        }
        else if(strcasecmp(format, "GRAY8") == 0)
        {
            depth = 1;
        }
    }
#endif
    if (depth == 0)
    {
        close();
        return FrameSource::CLOSED;
    }

    vx_imagepatch_addressing_t decodedImageAddr;
    decodedImageAddr.dim_x = width;
    decodedImageAddr.dim_y = height;
    decodedImageAddr.stride_x = depth;
    // GStreamer uses as stride width rounded up to the nearest multiple of 4
    decodedImageAddr.stride_y = ((width*depth+3)/4)*4;
    decodedImageAddr.scale_x = 1;
    decodedImageAddr.scale_y = 1;
    vx_image decodedImage = NULL;
    vx_df_image_e vx_type_map[5] = { VX_DF_IMAGE_VIRT, VX_DF_IMAGE_U8,
                                     VX_DF_IMAGE_VIRT, VX_DF_IMAGE_RGB, VX_DF_IMAGE_RGBX };

    // fetch image width and height
    vx_uint32 actual_width, actual_height;
    vx_df_image_e actual_format;
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_WIDTH, (void *)&actual_width, sizeof(actual_width)) );
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_HEIGHT, (void *)&actual_height, sizeof(actual_height)) );
    NVXIO_SAFE_CALL( vxQueryImage(image, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&actual_format, sizeof(actual_format)) );
    bool needScale = width != (int)configuration.frameWidth || height != (int)configuration.frameHeight;

    // config and actual image sized must be the same!
    if ((actual_height != configuration.frameHeight) ||
            (actual_width != configuration.frameWidth) ||
            (actual_format != configuration.format))
    {
        close();

        NVXIO_THROW_EXCEPTION("Actual image [ " << actual_width << " x " << actual_height <<
                              " ] does not equal configuration one [ " << configuration.frameWidth
                              << " x " << configuration.frameHeight << " ]");
    }

    // we assume that decoced image will have no more than 3 channels per pixel
    if (!devMem)
    {
        NVXIO_ASSERT( cudaSuccess == cudaMallocPitch(&devMem, &devMemPitch, width * 3, height) );
    }

    // check if decoded image format has changed
    if (scaledImage)
    {
        vx_df_image_e scaled_format;
        NVXIO_SAFE_CALL( vxQueryImage(scaledImage, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&scaled_format, sizeof(scaled_format)) );

        if (scaled_format != vx_type_map[depth])
        {
            vxReleaseImage(&scaledImage);
            scaledImage = NULL;
        }
    }

    if (needScale && !scaledImage)
    {
        scaledImage = vxCreateImage(vxContext, configuration.frameWidth,
                                    configuration.frameHeight, vx_type_map[depth]);
        NVXIO_CHECK_REFERENCE( scaledImage );
    }

#if GST_VERSION_MAJOR == 0
    bool needConvert = configuration.format != VX_DF_IMAGE_RGB;
    void * decodedPtr = GST_BUFFER_DATA(buffer);
#else
    GstMapInfo info;

    gboolean success = gst_buffer_map(buffer, &info, (GstMapFlags)GST_MAP_READ);
    if (!success)
    {
        printf("GStreamer: unable to map buffer\n");
        close();
        return FrameSource::CLOSED;
    }

    bool needConvert = configuration.format != vx_type_map[depth];
    void * decodedPtr = info.data;
#endif

    if (!needConvert && !needScale)
    {
        decodedImage = vxCreateImageFromHandle(vxContext, vx_type_map[depth], &decodedImageAddr,
                                               &decodedPtr, VX_IMPORT_TYPE_HOST);
        NVXIO_CHECK_REFERENCE( decodedImage );
        NVXIO_SAFE_CALL( nvxuCopyImage(vxContext, decodedImage, image) );
    }
    else
    {
        // 1. upload decoced image to CUDA buffer
        NVXIO_ASSERT( cudaSuccess == cudaMemcpy2D(devMem, devMemPitch,
                                                  decodedPtr, decodedImageAddr.stride_y,
                                                  decodedImageAddr.dim_x * depth, decodedImageAddr.dim_y,
                                                  cudaMemcpyHostToDevice) );

        // 2. create vx_image wrapper for decoded buffer
        decodedImageAddr.stride_y = static_cast<vx_int32>(devMemPitch);
        decodedImage = vxCreateImageFromHandle(vxContext, vx_type_map[depth], &decodedImageAddr,
                                               &devMem, NVX_IMPORT_TYPE_CUDA);
        NVXIO_CHECK_REFERENCE( decodedImage );

        if (needScale)
        {
            // 3. scale image
            NVXIO_SAFE_CALL( vxuScaleImage(vxContext, decodedImage, scaledImage, VX_INTERPOLATION_TYPE_BILINEAR) );

            // 4. convert to dst image
            NVXIO_SAFE_CALL( vxuColorConvert(vxContext, scaledImage, image) );
        }
        else
        {
            // 3. convert to dst image
            NVXIO_SAFE_CALL( vxuColorConvert(vxContext, decodedImage, image) );
        }
    }

#if GST_VERSION_MAJOR != 0
    gst_buffer_unmap(buffer, &info);
#endif

    NVXIO_SAFE_CALL( vxReleaseImage(&decodedImage) );

    return FrameSource::OK;
}
コード例 #3
0
int main(int argc, char* argv[])
{
    try
    {
        nvxio::Application &app = nvxio::Application::get();

        //
        // Parse command line arguments
        //

        std::string sourceUri = app.findSampleFilePath("cars.mp4");
        std::string configFile = app.findSampleFilePath("feature_tracker_demo_config.ini");

        app.setDescription("This demo demonstrates Feature Tracker algorithm");
        app.addOption('s', "source", "Source URI", nvxio::OptionHandler::string(&sourceUri));
        app.addOption('c', "config", "Config file path", nvxio::OptionHandler::string(&configFile));

#if defined USE_OPENCV || defined USE_GSTREAMER
        std::string maskFile;
        app.addOption('m', "mask", "Optional mask", nvxio::OptionHandler::string(&maskFile));
#endif

        app.init(argc, argv);

        //
        // Create OpenVX context
        //

        nvxio::ContextGuard context;

        //
        // Reads and checks input parameters
        //

        nvx::FeatureTracker::HarrisPyrLKParams params;
        std::string error;
        if (!read(configFile, params, error))
        {
            std::cout<<error;
            return nvxio::Application::APP_EXIT_CODE_INVALID_VALUE;
        }

        //
        // Create a Frame Source
        //

        std::unique_ptr<nvxio::FrameSource> source(
            nvxio::createDefaultFrameSource(context, sourceUri));

        if (!source || !source->open())
        {
            std::cerr << "Can't open source URI " << sourceUri << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE;
        }

        if (source->getSourceType() == nvxio::FrameSource::SINGLE_IMAGE_SOURCE)
        {
            std::cerr << "Can't work on a single image." << std::endl;
            return nvxio::Application::APP_EXIT_CODE_INVALID_FORMAT;
        }

        nvxio::FrameSource::Parameters sourceParams = source->getConfiguration();

        //
        // Create a Render
        //

        std::unique_ptr<nvxio::Render> renderer(nvxio::createDefaultRender(
            context, "Feature Tracker Demo", sourceParams.frameWidth, sourceParams.frameHeight));

        if (!renderer)
        {
            std::cerr << "Can't create a renderer" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_RENDER;
        }

        EventData eventData;
        renderer->setOnKeyboardEventCallback(eventCallback, &eventData);

        //
        // Messages generated by the OpenVX framework will be processed by nvxio::stdoutLogCallback
        //

        vxRegisterLogCallback(context, &nvxio::stdoutLogCallback, vx_false_e);

        //
        // Create OpenVX Image to hold frames from video source
        //

        vx_image frameExemplar = vxCreateImage(context,
            sourceParams.frameWidth, sourceParams.frameHeight, sourceParams.format);
        NVXIO_CHECK_REFERENCE(frameExemplar);
        vx_delay frame_delay = vxCreateDelay(context, (vx_reference)frameExemplar, 2);
        NVXIO_CHECK_REFERENCE(frame_delay);
        vxReleaseImage(&frameExemplar);

        vx_image prevFrame = (vx_image)vxGetReferenceFromDelay(frame_delay, -1);
        vx_image frame = (vx_image)vxGetReferenceFromDelay(frame_delay, 0);

        //
        // Load mask image if needed
        //

        vx_image mask = NULL;

#if defined USE_OPENCV || defined USE_GSTREAMER
        if (!maskFile.empty())
        {
            mask = nvxio::loadImageFromFile(context, maskFile, VX_DF_IMAGE_U8);

            vx_uint32 mask_width = 0, mask_height = 0;
            NVXIO_SAFE_CALL( vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_WIDTH, &mask_width, sizeof(mask_width)) );
            NVXIO_SAFE_CALL( vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_HEIGHT, &mask_height, sizeof(mask_height)) );

            if (mask_width != sourceParams.frameWidth || mask_height != sourceParams.frameHeight)
            {
                std::cerr << "The mask must have the same size as the input source." << std::endl;
                return nvxio::Application::APP_EXIT_CODE_INVALID_DIMENSIONS;
            }
        }
#endif

        //
        // Create FeatureTracker instance
        //

        std::unique_ptr<nvx::FeatureTracker> tracker(nvx::FeatureTracker::createHarrisPyrLK(context, params));

        nvxio::FrameSource::FrameStatus frameStatus;

        do
        {
            frameStatus = source->fetch(frame);
        } while (frameStatus == nvxio::FrameSource::TIMEOUT);

        if (frameStatus == nvxio::FrameSource::CLOSED)
        {
            std::cerr << "Source has no frames" << std::endl;
            return nvxio::Application::APP_EXIT_CODE_NO_FRAMESOURCE;
        }

        tracker->init(frame, mask);

        vxAgeDelay(frame_delay);

        //
        // Run processing loop
        //

        nvx::Timer totalTimer;
        totalTimer.tic();
        double proc_ms = 0;
        while (!eventData.shouldStop)
        {
            if (!eventData.pause)
            {
                frameStatus = source->fetch(frame);

                if (frameStatus == nvxio::FrameSource::TIMEOUT) {
                    continue;
                }
                if (frameStatus == nvxio::FrameSource::CLOSED) {
                    if (!source->open()) {
                        std::cerr << "Failed to reopen the source" << std::endl;
                        break;
                    }
                    continue;
                }

                //
                // Process
                //

                nvx::Timer procTimer;
                procTimer.tic();

                tracker->track(frame, mask);

                proc_ms = procTimer.toc();

                //
                // Print performance results
                //

                tracker->printPerfs();
            }

            //
            // show the previous frame
            //
            renderer->putImage(prevFrame);

            //
            // Draw arrows & state
            //

            drawArrows(renderer.get(), tracker->getPrevFeatures(), tracker->getCurrFeatures());

            double total_ms = totalTimer.toc();

            std::cout << "Display Time : " << total_ms << " ms" << std::endl << std::endl;

            //
            // Add a delay to limit frame rate
            //

            app.sleepToLimitFPS(total_ms);

            total_ms = totalTimer.toc();

            totalTimer.tic();

            displayState(renderer.get(), sourceParams, proc_ms, total_ms);

            if (!renderer->flush())
            {
                eventData.shouldStop = true;
            }

            if (!eventData.pause)
            {
                vxAgeDelay(frame_delay);
            }
        }

        //
        // Release all objects
        //

        vxReleaseImage(&mask);
        vxReleaseDelay(&frame_delay);
    }
    catch (const std::exception& e)
    {
        std::cerr << "Error: " << e.what() << std::endl;
        return nvxio::Application::APP_EXIT_CODE_ERROR;
    }

    return nvxio::Application::APP_EXIT_CODE_SUCCESS;
}
コード例 #4
0
ファイル: ConvertFrame.cpp プロジェクト: neariot/sfm
void convertFrame(vx_context vxContext,
                  vx_image frame,
                  const FrameSource::Parameters & configuration,
                  vx_imagepatch_addressing_t & decodedImageAddr,
                  void * decodedPtr,
                  bool is_cuda,
                  void *& devMem,
                  size_t & devMemPitch,
                  vx_image & scaledImage
                  )
{
    vx_df_image_e vx_type_map[5] = { VX_DF_IMAGE_VIRT, VX_DF_IMAGE_U8,
                                     VX_DF_IMAGE_VIRT, VX_DF_IMAGE_RGB, VX_DF_IMAGE_RGBX };
    vx_df_image_e decodedFormat = vx_type_map[decodedImageAddr.stride_x];

    // fetch image width and height
    vx_uint32 frameWidth, frameHeight;
    vx_df_image_e frameFormat;
    NVXIO_SAFE_CALL( vxQueryImage(frame, VX_IMAGE_ATTRIBUTE_WIDTH, (void *)&frameWidth, sizeof(frameWidth)) );
    NVXIO_SAFE_CALL( vxQueryImage(frame, VX_IMAGE_ATTRIBUTE_HEIGHT, (void *)&frameHeight, sizeof(frameHeight)) );
    NVXIO_SAFE_CALL( vxQueryImage(frame, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&frameFormat, sizeof(frameFormat)) );
    bool needScale = frameWidth != decodedImageAddr.dim_x ||
                     frameHeight != decodedImageAddr.dim_y;
    bool needConvert = frameFormat != decodedFormat;

    // config and actual image sized must be the same!
    if ((frameWidth != configuration.frameWidth) ||
            (frameHeight != configuration.frameHeight))
    {
        NVXIO_THROW_EXCEPTION("Actual image [ " << frameWidth << " x " << frameHeight <<
                              " ] is not equal to configuration one [ " << configuration.frameWidth
                              << " x " << configuration.frameHeight << " ]");
    }

    // allocate CUDA memory to copy decoded image to
    if (!is_cuda)
    {
        if (!devMem)
        {
            // we assume that decoded image will have no more than 4 channels per pixel
            NVXIO_ASSERT( cudaSuccess == cudaMallocPitch(&devMem, &devMemPitch, decodedImageAddr.dim_x * 4,
                                                         decodedImageAddr.dim_y) );
        }
    }

    // check if decoded image format has changed
    if (scaledImage)
    {
        vx_df_image_e scaledFormat;
        NVXIO_SAFE_CALL( vxQueryImage(scaledImage, VX_IMAGE_ATTRIBUTE_FORMAT, (void *)&scaledFormat, sizeof(scaledFormat)) );

        if (scaledFormat != decodedFormat)
        {
            NVXIO_SAFE_CALL( vxReleaseImage(&scaledImage) );
            scaledImage = NULL;
        }
    }

    if (needScale && !scaledImage)
    {
        scaledImage = vxCreateImage(vxContext, frameWidth, frameHeight, decodedFormat);
        NVXIO_CHECK_REFERENCE( scaledImage );
    }

    vx_image decodedImage = NULL;

    // 1. create vx_image wrapper
    if (is_cuda)
    {
        // a. create vx_image wrapper from CUDA pointer
        decodedImage = vxCreateImageFromHandle(vxContext, decodedFormat, &decodedImageAddr,
                                               &decodedPtr, NVX_IMPORT_TYPE_CUDA);
    }
    else
    {
        // a. upload decoded image to CUDA buffer
        NVXIO_ASSERT( cudaSuccess == cudaMemcpy2D(devMem, devMemPitch,
                                                  decodedPtr, decodedImageAddr.stride_y,
                                                  decodedImageAddr.dim_x * decodedImageAddr.stride_x,
                                                  decodedImageAddr.dim_y, cudaMemcpyHostToDevice) );

        // b. create vx_image wrapper for decoded buffer
        decodedImageAddr.stride_y = static_cast<vx_int32>(devMemPitch);
        decodedImage = vxCreateImageFromHandle(vxContext, decodedFormat, &decodedImageAddr,
                                               &devMem, NVX_IMPORT_TYPE_CUDA);
    }
    NVXIO_CHECK_REFERENCE( decodedImage );

    // 2. scale if necessary
    if (needScale)
    {
        // a. scale image
        NVXIO_SAFE_CALL( vxuScaleImage(vxContext, decodedImage, scaledImage, VX_INTERPOLATION_TYPE_BILINEAR) );
    }
    else
    {
        scaledImage = decodedImage;
    }

    // 3. convert / copy to dst image
    if (needConvert)
    {
        NVXIO_SAFE_CALL( vxuColorConvert(vxContext, scaledImage, frame) );
    }
    else
    {
        NVXIO_SAFE_CALL( nvxuCopyImage(vxContext, scaledImage, frame) );
    }

    if (!needScale)
        scaledImage = NULL;

    NVXIO_SAFE_CALL( vxReleaseImage(&decodedImage) );
}