VdpStatus
softVdpPresentationQueueBlockUntilSurfaceIdle(VdpPresentationQueue presentation_queue,
                                              VdpOutputSurface surface,
                                              VdpTime *first_presentation_time)

{
    if (!first_presentation_time)
        return VDP_STATUS_INVALID_POINTER;
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;
    handle_release(presentation_queue);

    VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
    if (NULL == surfData)
        return VDP_STATUS_INVALID_HANDLE;

    // TODO: use locking instead of busy loop
    while (surfData->status != VDP_PRESENTATION_QUEUE_STATUS_IDLE) {
        handle_release(surface);
        usleep(1000);
        surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
        if (!surfData)
            return VDP_STATUS_ERROR;
    }

    *first_presentation_time = surfData->first_presentation_time;
    handle_release(surface);
    return VDP_STATUS_OK;
}
VdpStatus
softVdpPresentationQueueCreate(VdpDevice device,
                               VdpPresentationQueueTarget presentation_queue_target,
                               VdpPresentationQueue *presentation_queue)
{
    if (!presentation_queue)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;

    VdpPresentationQueueTargetData *targetData =
        handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET);
    if (NULL == targetData) {
        handle_release(device);
        return VDP_STATUS_INVALID_HANDLE;
    }

    VdpPresentationQueueData *data = calloc(1, sizeof(VdpPresentationQueueData));
    if (NULL == data) {
        handle_release(device);
        handle_release(presentation_queue_target);
        return VDP_STATUS_RESOURCES;
    }

    data->type = HANDLETYPE_PRESENTATION_QUEUE;
    data->device = deviceData;
    data->target = targetData;
    data->bg_color.red = 0.0;
    data->bg_color.green = 0.0;
    data->bg_color.blue = 0.0;
    data->bg_color.alpha = 0.0;

    deviceData->refcount ++;
    targetData->refcount ++;
    *presentation_queue = handle_insert(data);

    // initialize queue
    data->queue.head = -1;
    data->queue.used = 0;
    for (unsigned int k = 0; k < PRESENTATION_QUEUE_LENGTH; k ++) {
        data->queue.item[k].next = -1;
        // other fields are zero due to calloc
    }
    for (unsigned int k = 0; k < PRESENTATION_QUEUE_LENGTH - 1; k ++)
        data->queue.freelist[k] = k + 1;
    data->queue.freelist[PRESENTATION_QUEUE_LENGTH - 1] = -1;
    data->queue.firstfree = 0;

    pthread_mutex_init(&data->queue_mutex, NULL);
    pthread_cond_init(&data->new_work_available, NULL);

    // launch worker thread
    pthread_create(&data->worker_thread, NULL, presentation_thread,
                   (void *)(size_t)(*presentation_queue));

    handle_release(device);
    handle_release(presentation_queue_target);
    return VDP_STATUS_OK;
}
VdpStatus
softVdpPresentationQueueQuerySurfaceStatus(VdpPresentationQueue presentation_queue,
                                           VdpOutputSurface surface,
                                           VdpPresentationQueueStatus *status,
                                           VdpTime *first_presentation_time)
{
    if (!status || !first_presentation_time)
        return VDP_STATUS_INVALID_POINTER;
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
    if (NULL == surfData) {
        handle_release(presentation_queue);
        return VDP_STATUS_INVALID_HANDLE;
    }

    *status = surfData->status;
    *first_presentation_time = surfData->first_presentation_time;

    handle_release(presentation_queue);
    handle_release(surface);

    return VDP_STATUS_OK;
}
VdpStatus
softVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target,
                     VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count,
                     VdpBitstreamBuffer const *bitstream_buffers)
{
    VdpStatus err_code;
    if (!picture_info || !bitstream_buffers)
        return VDP_STATUS_INVALID_POINTER;
    VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
    VdpVideoSurfaceData *dstSurfData = handle_acquire(target, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == decoderData || NULL == dstSurfData) {
        err_code = VDP_STATUS_INVALID_HANDLE;
        goto quit;
    }

    if (VDP_DECODER_PROFILE_H264_BASELINE == decoderData->profile ||
        VDP_DECODER_PROFILE_H264_MAIN ==     decoderData->profile ||
        VDP_DECODER_PROFILE_H264_HIGH ==     decoderData->profile)
    {
        // TODO: check exit code
        softVdpDecoderRender_h264(decoder, decoderData, dstSurfData, picture_info,
                                  bitstream_buffer_count, bitstream_buffers);
    } else {
        traceError("error (softVdpDecoderRender): no implementation for profile %s\n",
                   reverse_decoder_profile(decoderData->profile));
        err_code = VDP_STATUS_NO_IMPLEMENTATION;
        goto quit;
    }

    err_code = VDP_STATUS_OK;
quit:
    handle_release(decoder);
    handle_release(target);
    return err_code;
}
VdpStatus
softVdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target)
{
    VdpPresentationQueueTargetData *pqTargetData =
        handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET);
    if (NULL == pqTargetData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = pqTargetData->device;

    if (0 != pqTargetData->refcount) {
        traceError("warning (softVdpPresentationQueueTargetDestroy): non-zero reference"
                   "count (%d)\n", pqTargetData->refcount);
        handle_release(presentation_queue_target);
        return VDP_STATUS_ERROR;
    }

    // drawable may be destroyed already, so one should activate global context
    glx_context_push_thread_local(deviceData);
    glXDestroyContext(deviceData->display, pqTargetData->glc);

    GLenum gl_error = glGetError();
    glx_context_pop();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (VdpPresentationQueueTargetDestroy): gl error %d\n", gl_error);
        handle_release(presentation_queue_target);
        return VDP_STATUS_ERROR;
    }

    deviceData->refcount --;
    handle_expunge(presentation_queue_target);
    free(pqTargetData);
    return VDP_STATUS_OK;
}
VdpStatus
vdpVideoSurfaceDestroy(VdpVideoSurface surface)
{
    VdpVideoSurfaceData *videoSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == videoSurfData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = videoSurfData->deviceData;

    glx_ctx_push_thread_local(deviceData);
    glDeleteTextures(1, &videoSurfData->tex_id);
    GLenum gl_error = glGetError();
    glx_ctx_pop();

    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        handle_release(surface);
        return VDP_STATUS_ERROR;
    }

    if (deviceData->va_available) {
        // return VA surface to the free list
        if (videoSurfData->decoder != VDP_INVALID_HANDLE) {
            VdpDecoderData *dd = handle_acquire(videoSurfData->decoder, HANDLETYPE_DECODER);
            if (NULL != dd) {
                free_list_push(dd->free_list, &dd->free_list_head, videoSurfData->rt_idx);
                handle_release(videoSurfData->decoder);
            }
        }
        // .va_surf will be freed in VdpDecoderDestroy
    }

    if (videoSurfData->y_plane)
        free(videoSurfData->y_plane);
    if (videoSurfData->u_plane)
        free(videoSurfData->u_plane);
    // do not free videoSurfData->v_plane, it's just pointer into the middle of u_plane

    unref_device(deviceData);
    handle_expunge(surface);
    free(videoSurfData);
    return VDP_STATUS_OK;
}
VdpStatus
softVdpPresentationQueueGetBackgroundColor(VdpPresentationQueue presentation_queue,
                                           VdpColor *background_color)
{
    if (!background_color)
        return VDP_STATUS_INVALID_POINTER;
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;

    *background_color = pqData->bg_color;

    handle_release(presentation_queue);
    return VDP_STATUS_OK;
}
VdpStatus
softVdpDecoderGetParameters(VdpDecoder decoder, VdpDecoderProfile *profile,
                            uint32_t *width, uint32_t *height)
{
    if (!profile || !width || !height)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
    if (!decoderData)
        return VDP_STATUS_INVALID_HANDLE;

    *profile = decoderData->profile;
    *width   = decoderData->width;
    *height  = decoderData->height;

    handle_release(decoder);
    return VDP_STATUS_OK;
}
VdpStatus
vdpVideoSurfaceGetParameters(VdpVideoSurface surface, VdpChromaType *chroma_type,
                             uint32_t *width, uint32_t *height)
{
    if (!chroma_type || !width || !height)
        return VDP_STATUS_INVALID_POINTER;
    VdpVideoSurfaceData *videoSurf = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == videoSurf)
        return VDP_STATUS_INVALID_HANDLE;

    *chroma_type = videoSurf->chroma_type;
    *width       = videoSurf->width;
    *height      = videoSurf->height;

    handle_release(surface);
    return VDP_STATUS_OK;
}
Esempio n. 10
0
VdpStatus
softVdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable,
                                        VdpPresentationQueueTarget *target)
{
    if (!target)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;

    VdpPresentationQueueTargetData *data = calloc(1, sizeof(VdpPresentationQueueTargetData));
    if (NULL == data) {
        handle_release(device);
        return VDP_STATUS_RESOURCES;
    }

    data->type = HANDLETYPE_PRESENTATION_QUEUE_TARGET;
    data->device = deviceData;
    data->drawable = drawable;
    data->refcount = 0;

    pthread_mutex_lock(&global.glx_ctx_stack_mutex);
    GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
    XVisualInfo *vi;
    vi = glXChooseVisual(deviceData->display, deviceData->screen, att);
    if (NULL == vi) {
        traceError("error (softVdpPresentationQueueTargetCreateX11): glXChooseVisual failed\n");
        free(data);
        pthread_mutex_unlock(&global.glx_ctx_stack_mutex);
        handle_release(device);
        return VDP_STATUS_ERROR;
    }

    // create context for dislaying result (can share display lists with deviceData->glc
    data->glc = glXCreateContext(deviceData->display, vi, deviceData->root_glc, GL_TRUE);
    deviceData->refcount ++;
    *target = handle_insert(data);
    pthread_mutex_unlock(&global.glx_ctx_stack_mutex);

    handle_release(device);
    return VDP_STATUS_OK;
}
Esempio n. 11
0
VdpStatus
softVdpDecoderDestroy(VdpDecoder decoder)
{
    VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
    if (NULL == decoderData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = decoderData->device;

    if (deviceData->va_available) {
        VADisplay va_dpy = deviceData->va_dpy;
        vaDestroySurfaces(va_dpy, decoderData->render_targets, decoderData->num_render_targets);
        vaDestroyContext(va_dpy, decoderData->context_id);
        vaDestroyConfig(va_dpy, decoderData->config_id);
    }

    handle_expunge(decoder);
    deviceData->refcount --;
    free(decoderData);
    return VDP_STATUS_OK;
}
Esempio n. 12
0
VdpStatus
softVdpPresentationQueueSetBackgroundColor(VdpPresentationQueue presentation_queue,
                                           VdpColor *const background_color)
{
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;

    if (background_color) {
        pqData->bg_color = *background_color;
    } else {
        pqData->bg_color.red = 0.0;
        pqData->bg_color.green = 0.0;
        pqData->bg_color.blue = 0.0;
        pqData->bg_color.alpha = 0.0;
    }

    handle_release(presentation_queue);
    return VDP_STATUS_OK;
}
Esempio n. 13
0
VdpStatus
softVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
{
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;

    pthread_cancel(pqData->worker_thread);

    if (0 != pthread_join(pqData->worker_thread, NULL)) {
        traceError("VdpPresentationQueueDestroy: failed to stop worker thread");
        handle_release(presentation_queue);
        return VDP_STATUS_ERROR;
    }

    handle_expunge(presentation_queue);
    pqData->device->refcount --;
    pqData->target->refcount --;

    free(pqData);
    return VDP_STATUS_OK;
}
Esempio n. 14
0
VdpStatus
vdpDeviceDestroy(VdpDevice device)
{
    VdpStatus err_code;
    VdpDeviceData *data = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == data)
        return VDP_STATUS_INVALID_HANDLE;

    if (0 != data->refcount) {
        // Buggy client forgot to destroy dependend objects or decided that destroying
        // VdpDevice destroys all child object. Let's try to mitigate and prevent leakage.
        traceError("warning (%s): non-zero reference count (%d). Trying to free child objects.\n",
                   __func__, data->refcount);
        void *parent_object = data;
        handle_execute_for_all(destroy_child_objects, parent_object);
    }

    if (0 != data->refcount) {
        traceError("error (%s): still non-zero reference count (%d)\n", __func__, data->refcount);
        traceError("Here is the list of objects:\n");
        struct {
            int cnt;
            int total_cnt;
            VdpDeviceData *deviceData;
        } state = { .cnt = 0, .total_cnt = 0, .deviceData = data };

        handle_execute_for_all(print_handle_type, &state);
        traceError("Objects leaked: %d\n", state.cnt);
        traceError("Objects visited during scan: %d\n", state.total_cnt);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    // cleaup libva
    if (data->va_available)
        vaTerminate(data->va_dpy);

    glx_ctx_push_thread_local(data);
    glDeleteTextures(1, &data->watermark_tex_id);
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    destroy_shaders(data);
    glx_ctx_pop();

    glx_ctx_lock();
    glXMakeCurrent(data->display, None, NULL);
    glx_ctx_unlock();

    glx_ctx_unref_glc_hash_table(data->display);

    handle_xdpy_unref(data->display_orig);
    handle_expunge(device);
    pthread_mutex_destroy(&data->refcount_mutex);
    free(data);

    GLenum gl_error = glGetError();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        err_code = VDP_STATUS_ERROR;
        goto quit_skip_release;
    }

    return VDP_STATUS_OK;

quit:
    handle_release(device);
quit_skip_release:
    return err_code;
}
Esempio n. 15
0
VdpStatus
softVdpDecoderQueryCapabilities(VdpDevice device, VdpDecoderProfile profile, VdpBool *is_supported,
                                uint32_t *max_level, uint32_t *max_macroblocks,
                                uint32_t *max_width, uint32_t *max_height)
{
    VdpStatus err_code;
    if (!is_supported || !max_level || !max_macroblocks || !max_width || !max_height)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;

    *max_level = 0;
    *max_macroblocks = 0;
    *max_width = 0;
    *max_height = 0;

    if (!deviceData->va_available) {
        *is_supported = 0;
        err_code = VDP_STATUS_OK;
        goto quit;
    }

    VAProfile *va_profile_list = malloc(sizeof(VAProfile) * vaMaxNumProfiles(deviceData->va_dpy));
    if (NULL == va_profile_list) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    int num_profiles;
    VAStatus status = vaQueryConfigProfiles(deviceData->va_dpy, va_profile_list, &num_profiles);
    if (VA_STATUS_SUCCESS != status) {
        free(va_profile_list);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    struct {
        int mpeg2_simple;
        int mpeg2_main;
        int h264_baseline;
        int h264_main;
        int h264_high;
        int vc1_simple;
        int vc1_main;
        int vc1_advanced;
    } available_profiles = {0, 0, 0, 0, 0, 0, 0, 0};

    for (int k = 0; k < num_profiles; k ++) {
        switch (va_profile_list[k]) {
        case VAProfileMPEG2Main:
            available_profiles.mpeg2_main = 0;
            /* fall through */
        case VAProfileMPEG2Simple:
            available_profiles.mpeg2_simple = 0;
            break;

        case VAProfileH264High:
            available_profiles.h264_high = 1;
            /* fall through */
        case VAProfileH264Main:
            available_profiles.h264_main = 1;
            /* fall through */
        case VAProfileH264Baseline:
            available_profiles.h264_baseline = 1;
            /* fall though */
        case VAProfileH264ConstrainedBaseline:
            break;

        case VAProfileVC1Advanced:
            available_profiles.vc1_advanced = 0;
            /* fall though */
        case VAProfileVC1Main:
            available_profiles.vc1_main = 0;
            /* fall though */
        case VAProfileVC1Simple:
            available_profiles.vc1_simple = 0;
            break;

        // unhandled profiles
        case VAProfileH263Baseline:
        case VAProfileJPEGBaseline:
        default:
            // do nothing
            break;
        }
    }
    free(va_profile_list);

    *is_supported = 0;
    // TODO: How to determine max width and height width libva?
    *max_width = 2048;
    *max_height = 2048;
    *max_macroblocks = 16384;
    switch (profile) {
    case VDP_DECODER_PROFILE_MPEG2_SIMPLE:
        *is_supported = available_profiles.mpeg2_simple;
        *max_level = VDP_DECODER_LEVEL_MPEG2_HL;
        break;
    case VDP_DECODER_PROFILE_MPEG2_MAIN:
        *is_supported = available_profiles.mpeg2_main;
        *max_level = VDP_DECODER_LEVEL_MPEG2_HL;
        break;

    case VDP_DECODER_PROFILE_H264_BASELINE:
        *is_supported = available_profiles.h264_baseline;
        // TODO: Do underlying libva really support 5.1?
        *max_level = VDP_DECODER_LEVEL_H264_5_1;
        break;
    case VDP_DECODER_PROFILE_H264_MAIN:
        *is_supported = available_profiles.h264_main;
        *max_level = VDP_DECODER_LEVEL_H264_5_1;
        break;
    case VDP_DECODER_PROFILE_H264_HIGH:
        *is_supported = available_profiles.h264_high;
        *max_level = VDP_DECODER_LEVEL_H264_5_1;
        break;

    case VDP_DECODER_PROFILE_VC1_SIMPLE:
        *is_supported = available_profiles.vc1_simple;
        *max_level = VDP_DECODER_LEVEL_VC1_SIMPLE_MEDIUM;
        break;
    case VDP_DECODER_PROFILE_VC1_MAIN:
        *is_supported = available_profiles.vc1_main;
        *max_level = VDP_DECODER_LEVEL_VC1_MAIN_HIGH;
        break;
    case VDP_DECODER_PROFILE_VC1_ADVANCED:
        *is_supported = available_profiles.vc1_advanced;
        *max_level = VDP_DECODER_LEVEL_VC1_ADVANCED_L4;
        break;

    // unsupported
    case VDP_DECODER_PROFILE_MPEG1:
    case VDP_DECODER_PROFILE_MPEG4_PART2_SP:
    case VDP_DECODER_PROFILE_MPEG4_PART2_ASP:
    case VDP_DECODER_PROFILE_DIVX4_QMOBILE:
    case VDP_DECODER_PROFILE_DIVX4_MOBILE:
    case VDP_DECODER_PROFILE_DIVX4_HOME_THEATER:
    case VDP_DECODER_PROFILE_DIVX4_HD_1080P:
    case VDP_DECODER_PROFILE_DIVX5_QMOBILE:
    case VDP_DECODER_PROFILE_DIVX5_MOBILE:
    case VDP_DECODER_PROFILE_DIVX5_HOME_THEATER:
    case VDP_DECODER_PROFILE_DIVX5_HD_1080P:
    default:
        break;
    }

    err_code = VDP_STATUS_OK;
quit:
    handle_release(device);
    return err_code;
}
Esempio n. 16
0
static
VdpStatus
h264_translate_reference_frames(VdpVideoSurfaceData *dstSurfData, VdpDecoder decoder,
                                VdpDecoderData *decoderData,
                                VAPictureParameterBufferH264 *pic_param,
                                const VdpPictureInfoH264 *vdppi)
{
    // take new VA surface from buffer if needed
    if (VA_INVALID_SURFACE == dstSurfData->va_surf) {
        int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
        if (-1 == idx)
            return VDP_STATUS_RESOURCES;
        dstSurfData->decoder = decoder;
        dstSurfData->va_surf = decoderData->render_targets[idx];
        dstSurfData->rt_idx  = idx;
    }

    // current frame
    pic_param->CurrPic.picture_id   = dstSurfData->va_surf;
    pic_param->CurrPic.frame_idx    = vdppi->frame_num;
    pic_param->CurrPic.flags  = vdppi->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0;
    if (vdppi->field_pic_flag) {
        pic_param->CurrPic.flags |=
            vdppi->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD : VA_PICTURE_H264_TOP_FIELD;
    }

    pic_param->CurrPic.TopFieldOrderCnt     = vdppi->field_order_cnt[0];
    pic_param->CurrPic.BottomFieldOrderCnt  = vdppi->field_order_cnt[1];

    // mark all pictures invalid preliminary
    for (int k = 0; k < 16; k ++)
        reset_va_picture_h264(&pic_param->ReferenceFrames[k]);

    // reference frames
    for (int k = 0; k < vdppi->num_ref_frames; k ++) {
        if (VDP_INVALID_HANDLE == vdppi->referenceFrames[k].surface) {
            reset_va_picture_h264(&pic_param->ReferenceFrames[k]);
            continue;
        }

        VdpReferenceFrameH264 const *vdp_ref = &(vdppi->referenceFrames[k]);
        VdpVideoSurfaceData *vdpSurfData =
            handle_acquire(vdp_ref->surface, HANDLETYPE_VIDEO_SURFACE);
        VAPictureH264 *va_ref = &(pic_param->ReferenceFrames[k]);
        if (NULL == vdpSurfData) {
            traceError("error (h264_translate_reference_frames): NULL == vdpSurfData");
            return VDP_STATUS_ERROR;
        }

        // take new VA surface from buffer if needed
        if (VA_INVALID_SURFACE == vdpSurfData->va_surf) {
            int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
            if (-1 == idx)
                return VDP_STATUS_RESOURCES;
            dstSurfData->decoder = decoder;
            dstSurfData->va_surf = decoderData->render_targets[idx];
            dstSurfData->rt_idx  = idx;
        }

        va_ref->picture_id = vdpSurfData->va_surf;
        va_ref->frame_idx = vdp_ref->frame_idx;
        va_ref->flags = vdp_ref->is_long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
                                              : VA_PICTURE_H264_SHORT_TERM_REFERENCE;

        if (vdp_ref->top_is_reference && vdp_ref->bottom_is_reference) {
            // Full frame. This block intentionally left blank. No flags set.
        } else {
            if (vdp_ref->top_is_reference)
                va_ref->flags |= VA_PICTURE_H264_TOP_FIELD;
            else
                va_ref->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
        }

        va_ref->TopFieldOrderCnt    = vdp_ref->field_order_cnt[0];
        va_ref->BottomFieldOrderCnt = vdp_ref->field_order_cnt[1];
        handle_release(vdp_ref->surface);
    }

    return VDP_STATUS_OK;
}
Esempio n. 17
0
VdpStatus
softVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height,
                     uint32_t max_references, VdpDecoder *decoder)
{
    VdpStatus err_code;
    if (!decoder)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;
    if (!deviceData->va_available) {
        err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
        goto quit;
    }
    VADisplay va_dpy = deviceData->va_dpy;

    VdpDecoderData *data = calloc(1, sizeof(VdpDecoderData));
    if (NULL == data) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    data->type = HANDLETYPE_DECODER;
    data->device = deviceData;
    data->profile = profile;
    data->width = width;
    data->height = height;
    data->max_references = max_references;

    // initialize free_list. Initially they all free
    data->free_list_head = -1;
    for (int k = 0; k < MAX_RENDER_TARGETS; k ++) {
        free_list_push(data->free_list, &data->free_list_head, k);
    }

    VAProfile va_profile;
    VAStatus status;
    int final_try = 0;
    VdpDecoderProfile next_profile = profile;

    // Try to create decoder for asked profile. On failure try to create more advanced one
    while (! final_try) {
        profile = next_profile;
        switch (profile) {
        case VDP_DECODER_PROFILE_H264_BASELINE:
            va_profile = VAProfileH264Baseline;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            next_profile = VDP_DECODER_PROFILE_H264_MAIN;
            break;
        case VDP_DECODER_PROFILE_H264_MAIN:
            va_profile = VAProfileH264Main;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            next_profile = VDP_DECODER_PROFILE_H264_HIGH;
            break;
        case VDP_DECODER_PROFILE_H264_HIGH:
            va_profile = VAProfileH264High;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            // there is no more advanced profile, so it's final try
            final_try = 1;
            break;
        default:
            traceError("error (softVdpDecoderCreate): decoder %s not implemented\n",
                       reverse_decoder_profile(profile));
            err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
            goto quit_free_data;
        }

        status = vaCreateConfig(va_dpy, va_profile, VAEntrypointVLD, NULL, 0, &data->config_id);
        if (VA_STATUS_SUCCESS == status)        // break loop if decoder created
            break;
    }

    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    // Create surfaces. All video surfaces created here, rather than in VdpVideoSurfaceCreate.
    // VAAPI requires surfaces to be bound with context on its creation time, while VDPAU allows
    // to do it later. So here is a trick: VDP video surfaces get their va_surf dynamically in
    // DecoderRender.

    // TODO: check format of surfaces created
#if VA_CHECK_VERSION(0, 34, 0)
    status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, width, height,
        data->render_targets, data->num_render_targets, NULL, 0);
#else
    status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420,
        data->num_render_targets, data->render_targets);
#endif
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    status = vaCreateContext(va_dpy, data->config_id, width, height, VA_PROGRESSIVE,
        data->render_targets, data->num_render_targets, &data->context_id);
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    deviceData->refcount ++;
    *decoder = handle_insert(data);

    err_code = VDP_STATUS_OK;
    goto quit;

quit_free_data:
    free(data);
quit:
    handle_release(device);
    return err_code;
}
Esempio n. 18
0
static
void *
presentation_thread(void *param)
{
    pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER;
    pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
    VdpPresentationQueue presentation_queue = (VdpPresentationQueue)(size_t)param;
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return NULL;

    pthread_mutex_lock(&cond_mutex);
    while (1) {
        struct timespec now;
        clock_gettime(CLOCK_REALTIME, &now);
        struct timespec target_time = now;

        while (1) {
            int ret;
            handle_release(presentation_queue);
            pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
            ret = pthread_cond_timedwait(&pqData->new_work_available, &cond_mutex, &target_time);
            if (ret != 0 && ret != ETIMEDOUT) {
                traceError("%s: pthread_cond_timedwait failed with code %d\n", __func__, ret);
                goto quit;
            }

            struct timespec now;
            clock_gettime(CLOCK_REALTIME, &now);
            pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
            if (!pqData)
                goto quit;
            pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
            pthread_mutex_lock(&pqData->queue_mutex);
            if (pqData->queue.head != -1) {
                struct timespec ht = vdptime2timespec(pqData->queue.item[pqData->queue.head].t);
                if (now.tv_sec > ht.tv_sec ||
                    (now.tv_sec == ht.tv_sec && now.tv_nsec > ht.tv_nsec))
                {
                    // break loop and process event
                    pthread_mutex_unlock(&pqData->queue_mutex);
                    break;
                } else {
                    // sleep until next event
                    target_time = ht;
                }
            } else {
                // queue empty, no work to do. Wait for next event
                target_time = now;
                target_time.tv_sec += 1;
            }
            pthread_mutex_unlock(&pqData->queue_mutex);
        }

        // do event processing
        pthread_mutex_unlock(&pqData->queue_mutex);
        do_presentation_queue_display(pqData);
    }

quit:
    return NULL;
}
static
VdpStatus
vdpVideoSurfacePutBitsYCbCr_glsl(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format,
                                 void const *const *source_data, uint32_t const *source_pitches)
{
    VdpStatus err_code;
    if (!source_data || !source_pitches)
        return VDP_STATUS_INVALID_POINTER;
    // TODO: implement VDP_YCBCR_FORMAT_UYVY
    // TODO: implement VDP_YCBCR_FORMAT_YUYV
    // TODO: implement VDP_YCBCR_FORMAT_Y8U8V8A8
    // TODO: implement VDP_YCBCR_FORMAT_V8U8Y8A8

    VdpVideoSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == dstSurfData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = dstSurfData->deviceData;

    switch (source_ycbcr_format) {
    case VDP_YCBCR_FORMAT_NV12:
    case VDP_YCBCR_FORMAT_YV12:
        /* do nothing */
        break;
    case VDP_YCBCR_FORMAT_UYVY:
    case VDP_YCBCR_FORMAT_YUYV:
    case VDP_YCBCR_FORMAT_Y8U8V8A8:
    case VDP_YCBCR_FORMAT_V8U8Y8A8:
    default:
        traceError("error (%s): not implemented source YCbCr format '%s'\n", __func__,
                   reverse_ycbcr_format(source_ycbcr_format));
        err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
        goto err;
    }

    glx_ctx_push_thread_local(deviceData);
    glBindFramebuffer(GL_FRAMEBUFFER, dstSurfData->fbo_id);

    GLuint tex_id[2];
    glGenTextures(2, tex_id);
    glEnable(GL_TEXTURE_2D);

    switch (source_ycbcr_format) {
    case VDP_YCBCR_FORMAT_NV12:
        glActiveTexture(GL_TEXTURE1);
        glBindTexture(GL_TEXTURE_2D, tex_id[1]);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        // UV plane
        glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width/2, dstSurfData->height/2, 0,
                     GL_RG, GL_UNSIGNED_BYTE, source_data[1]);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, tex_id[0]);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        // Y plane
        glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width, dstSurfData->height, 0, GL_RED,
                     GL_UNSIGNED_BYTE, source_data[0]);
        break;
    case VDP_YCBCR_FORMAT_YV12:
        glActiveTexture(GL_TEXTURE1);
        glBindTexture(GL_TEXTURE_2D, tex_id[1]);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width/2, dstSurfData->height, 0,
                     GL_RED, GL_UNSIGNED_BYTE, NULL);
        // U plane
        glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[2]);
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, dstSurfData->width/2, dstSurfData->height/2, GL_RED,
                        GL_UNSIGNED_BYTE, source_data[2]);
        // V plane
        glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[1]);
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, dstSurfData->height/2, dstSurfData->width/2,
                        dstSurfData->height/2, GL_RED, GL_UNSIGNED_BYTE, source_data[1]);

        glActiveTexture(GL_TEXTURE0);
        glBindTexture(GL_TEXTURE_2D, tex_id[0]);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        // Y plane
        glPixelStorei(GL_UNPACK_ROW_LENGTH, source_pitches[0]);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, dstSurfData->width, dstSurfData->height, 0, GL_RED,
                     GL_UNSIGNED_BYTE, source_data[0]);
        break;
    }
    glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);

    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    glOrtho(0, dstSurfData->width, 0, dstSurfData->height, -1.0f, 1.0f);
    glViewport(0, 0, dstSurfData->width, dstSurfData->height);

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

    glMatrixMode(GL_TEXTURE);
    glLoadIdentity();

    glDisable(GL_BLEND);

    switch (source_ycbcr_format) {
    case VDP_YCBCR_FORMAT_NV12:
        glUseProgram(deviceData->shaders[glsl_NV12_RGBA].program);
        glUniform1i(deviceData->shaders[glsl_NV12_RGBA].uniform.tex_0, 0);
        glUniform1i(deviceData->shaders[glsl_NV12_RGBA].uniform.tex_1, 1);
        break;
    case VDP_YCBCR_FORMAT_YV12:
        glUseProgram(deviceData->shaders[glsl_YV12_RGBA].program);
        glUniform1i(deviceData->shaders[glsl_YV12_RGBA].uniform.tex_0, 0);
        glUniform1i(deviceData->shaders[glsl_YV12_RGBA].uniform.tex_1, 1);
        break;
    }

    glBegin(GL_QUADS);
        glTexCoord2f(0, 0); glVertex2f(0, 0);
        glTexCoord2f(1, 0); glVertex2f(dstSurfData->width, 0);
        glTexCoord2f(1, 1); glVertex2f(dstSurfData->width, dstSurfData->height);
        glTexCoord2f(0, 1); glVertex2f(0, dstSurfData->height);
    glEnd();

    glUseProgram(0);
    glFinish();
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    glDeleteTextures(2, tex_id);

    GLenum gl_error = glGetError();
    glx_ctx_pop();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        err_code = VDP_STATUS_ERROR;
        goto err;
    }

    err_code = VDP_STATUS_OK;
err:
    handle_release(surface);
    return err_code;
}
static
VdpStatus
vdpVideoSurfacePutBitsYCbCr_swscale(VdpVideoSurface surface, VdpYCbCrFormat source_ycbcr_format,
                                    void const *const *source_data, uint32_t const *source_pitches)
{
    VdpStatus err_code;
    // TODO: implement this
    VdpVideoSurfaceData *dstSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    // TODO: remove following (void)'s
    (void)vdpau_ycbcr_to_av_pixfmt;
    (void)source_pitches;
    (void)source_data;

    if (NULL == dstSurfData)
        return VDP_STATUS_INVALID_HANDLE;

    // sanity check
    switch (source_ycbcr_format) {
    case VDP_YCBCR_FORMAT_NV12:
        // fall through
    case VDP_YCBCR_FORMAT_YV12:
        if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_420) {
            err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
            goto err;
        }
        break;
    case VDP_YCBCR_FORMAT_UYVY:
        // fall through
    case VDP_YCBCR_FORMAT_YUYV:
        if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_422) {
            err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
            goto err;
        }
        break;
    case VDP_YCBCR_FORMAT_Y8U8V8A8:
        // fall through
    case VDP_YCBCR_FORMAT_V8U8Y8A8:
        if (dstSurfData->chroma_type != VDP_CHROMA_TYPE_444) {
            err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
            goto err;
        }
        break;
    default:
        err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
        goto err;
    }

    _video_surface_ensure_allocated(dstSurfData);
    dstSurfData->format = source_ycbcr_format;
    switch (source_ycbcr_format) {
    case VDP_YCBCR_FORMAT_NV12:

    case VDP_YCBCR_FORMAT_YV12:   // 420

    case VDP_YCBCR_FORMAT_UYVY:   // 422
    case VDP_YCBCR_FORMAT_YUYV:   // 422
    case VDP_YCBCR_FORMAT_Y8U8V8A8:   // 444
    case VDP_YCBCR_FORMAT_V8U8Y8A8:   // 444
        break;
    }




    err_code = VDP_STATUS_OK;
err:
    handle_release(surface);
    return err_code;
}
Esempio n. 21
0
static
void
do_presentation_queue_display(VdpPresentationQueueData *pqData)
{
    pthread_mutex_lock(&pqData->queue_mutex);
    assert(pqData->queue.used > 0);

    const int entry = pqData->queue.head;
    VdpDeviceData *deviceData = pqData->device;
    VdpOutputSurface surface = pqData->queue.item[entry].surface;
    const uint32_t clip_width = pqData->queue.item[entry].clip_width;
    const uint32_t clip_height = pqData->queue.item[entry].clip_height;

    // remove first entry from queue
    pqData->queue.used --;
    pqData->queue.freelist[pqData->queue.head] = pqData->queue.firstfree;
    pqData->queue.firstfree = pqData->queue.head;
    pqData->queue.head = pqData->queue.item[pqData->queue.head].next;
    pthread_mutex_unlock(&pqData->queue_mutex);

    VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
    if (surfData == NULL)
        return;

    glx_context_push_global(deviceData->display, pqData->target->drawable, pqData->target->glc);

    const uint32_t target_width  = (clip_width > 0)  ? clip_width  : surfData->width;
    const uint32_t target_height = (clip_height > 0) ? clip_height : surfData->height;

    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    glOrtho(0, target_width, target_height, 0, -1.0, 1.0);
    glViewport(0, 0, target_width, target_height);

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

    glMatrixMode(GL_TEXTURE);
    glLoadIdentity();
    glScalef(1.0f/surfData->width, 1.0f/surfData->height, 1.0f);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_BLEND);
    glBindTexture(GL_TEXTURE_2D, surfData->tex_id);
    glColor4f(1, 1, 1, 1);
    glBegin(GL_QUADS);
        glTexCoord2i(0, 0);                        glVertex2i(0, 0);
        glTexCoord2i(target_width, 0);             glVertex2i(target_width, 0);
        glTexCoord2i(target_width, target_height); glVertex2i(target_width, target_height);
        glTexCoord2i(0, target_height);            glVertex2i(0, target_height);
    glEnd();

    if (global.quirks.show_watermark) {
        glEnable(GL_BLEND);
        glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
        glBlendEquation(GL_FUNC_ADD);
        glBindTexture(GL_TEXTURE_2D, deviceData->watermark_tex_id);

        glMatrixMode(GL_TEXTURE);
        glLoadIdentity();

        glColor3f(0.8, 0.08, 0.35);
        glBegin(GL_QUADS);
            glTexCoord2i(0, 0);
            glVertex2i(target_width - watermark_width, target_height - watermark_height);

            glTexCoord2i(1, 0);
            glVertex2i(target_width, target_height - watermark_height);

            glTexCoord2i(1, 1);
            glVertex2i(target_width, target_height);

            glTexCoord2i(0, 1);
            glVertex2i(target_width - watermark_width, target_height);
        glEnd();
    }

    glXSwapBuffers(deviceData->display, pqData->target->drawable);

    struct timespec now;
    clock_gettime(CLOCK_REALTIME, &now);
    surfData->first_presentation_time = timespec2vdptime(now);
    surfData->status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;

    if (global.quirks.log_pq_delay) {
            const int64_t delta = timespec2vdptime(now) - surfData->queued_at;
            const struct timespec delta_ts = vdptime2timespec(delta);
            traceInfo("pqdelay %d.%09d %d.%09d\n", (int)now.tv_sec, (int)now.tv_nsec,
                      delta_ts.tv_sec, delta_ts.tv_nsec);
    }

    GLenum gl_error = glGetError();
    glx_context_pop();
    handle_release(surface);

    if (GL_NO_ERROR != gl_error) {
        traceError("error (VdpPresentationQueueDisplay): gl error %d\n", gl_error);
    }
}
VdpStatus
vdpVideoSurfaceCreate(VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height,
                      VdpVideoSurface *surface)
{
    VdpStatus err_code;
    if (!surface)
        return VDP_STATUS_INVALID_POINTER;
    if (chroma_type != VDP_CHROMA_TYPE_420 &&
        chroma_type != VDP_CHROMA_TYPE_422 &&
        chroma_type != VDP_CHROMA_TYPE_444)
    {
        return VDP_STATUS_INVALID_CHROMA_TYPE;
    }

    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;

    VdpVideoSurfaceData *data = calloc(1, sizeof(VdpVideoSurfaceData));
    if (NULL == data) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    data->type = HANDLETYPE_VIDEO_SURFACE;
    data->device = device;
    data->deviceData = deviceData;
    data->chroma_type = chroma_type;
    data->width = width;
    data->height = height;

    switch (chroma_type) {
    case VDP_CHROMA_TYPE_420:
        data->chroma_width = ((width + 1) & (~1u)) / 2;
        data->chroma_height = ((height + 1) & (~1u)) / 2;
        data->stride = (width + 0xfu) & (~0xfu);
        break;
    case VDP_CHROMA_TYPE_422:
        data->chroma_width = ((width + 1) & (~1u)) / 2;
        data->chroma_height = height;
        data->stride = (width + 2 * data->chroma_width + 0xfu) & (~0xfu);
        break;
    case VDP_CHROMA_TYPE_444:
        data->chroma_width = width;
        data->chroma_height = height;
        data->stride = (4 * width + 0xfu) & (~0xfu);
        break;
    }
    data->chroma_stride = (data->chroma_width + 0xfu) & (~0xfu);

    data->va_surf = VA_INVALID_SURFACE;
    data->tex_id = 0;
    data->sync_va_to_glx = 0;
    data->decoder = VDP_INVALID_HANDLE;
    data->y_plane = NULL;
    data->u_plane = NULL;
    data->v_plane = NULL;

    glx_ctx_push_thread_local(deviceData);
    glGenTextures(1, &data->tex_id);
    glBindTexture(GL_TEXTURE_2D, data->tex_id);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, data->width, data->height, 0,
                 GL_BGRA, GL_UNSIGNED_BYTE, NULL);

    glGenFramebuffers(1, &data->fbo_id);
    glBindFramebuffer(GL_FRAMEBUFFER, data->fbo_id);
    glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, data->tex_id, 0);
    GLenum gl_status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
    if (GL_FRAMEBUFFER_COMPLETE != gl_status) {
        traceError("error (%s): framebuffer not ready, %d, %s\n", __func__, gl_status,
                   gluErrorString(gl_status));
        glx_ctx_pop();
        free(data);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }
    glFinish();

    GLenum gl_error = glGetError();
    glx_ctx_pop();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        free(data);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    // no VA surface creation here. Actual pool of VA surfaces should be allocated already
    // by VdpDecoderCreate. VdpDecoderCreate will update ->va_surf field as needed.

    ref_device(deviceData);
    *surface = handle_insert(data);

    err_code = VDP_STATUS_OK;
quit:
    handle_release(device);
    return err_code;
}
Esempio n. 23
0
VdpStatus
softVdpPresentationQueueDisplay(VdpPresentationQueue presentation_queue, VdpOutputSurface surface,
                                uint32_t clip_width, uint32_t clip_height,
                                VdpTime earliest_presentation_time)
{
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;

    // push work to queue
    pthread_mutex_lock(&pqData->queue_mutex);
    while (pqData->queue.used >= PRESENTATION_QUEUE_LENGTH) {
        // wait while queue is full
        // TODO: check for deadlock here
        // TODO: is there a way to drop pqData->queue_mutex, and use only pqData->lock?
        pthread_mutex_unlock(&pqData->queue_mutex);
        handle_release(presentation_queue);
        usleep(10*1000);
        pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
        pthread_mutex_lock(&pqData->queue_mutex);
    }

    VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
    if (NULL == surfData) {
        pthread_mutex_unlock(&pqData->queue_mutex);
        handle_release(presentation_queue);
        return VDP_STATUS_INVALID_HANDLE;
    }
    if (pqData->device != surfData->device) {
        handle_release(surface);
        handle_release(presentation_queue);
        return VDP_STATUS_HANDLE_DEVICE_MISMATCH;
    }

    pqData->queue.used ++;
    int new_item = pqData->queue.firstfree;
    assert(new_item != -1);
    pqData->queue.firstfree = pqData->queue.freelist[new_item];

    pqData->queue.item[new_item].t = earliest_presentation_time;
    pqData->queue.item[new_item].clip_width = clip_width;
    pqData->queue.item[new_item].clip_height = clip_height;
    pqData->queue.item[new_item].surface = surface;
    surfData->first_presentation_time = 0;
    surfData->status = VDP_PRESENTATION_QUEUE_STATUS_QUEUED;

    // keep queue sorted
    if (pqData->queue.head == -1 ||
        earliest_presentation_time < pqData->queue.item[pqData->queue.head].t)
    {
        pqData->queue.item[new_item].next = pqData->queue.head;
        pqData->queue.head = new_item;
    } else {
        int ptr = pqData->queue.head;
        int prev = ptr;
        while (ptr != -1 && pqData->queue.item[ptr].t <= earliest_presentation_time) {
            prev = ptr;
            ptr = pqData->queue.item[ptr].next;
        }
        pqData->queue.item[new_item].next = ptr;
        pqData->queue.item[prev].next = new_item;
    }

    pthread_mutex_unlock(&pqData->queue_mutex);

    if (global.quirks.log_pq_delay) {
        struct timespec now;
        clock_gettime(CLOCK_REALTIME, &now);
        surfData->queued_at = timespec2vdptime(now);
    }

    pthread_cond_broadcast(&pqData->new_work_available);

    handle_release(presentation_queue);
    handle_release(surface);

    return VDP_STATUS_OK;
}
VdpStatus
vdpVideoSurfaceGetBitsYCbCr(VdpVideoSurface surface, VdpYCbCrFormat destination_ycbcr_format,
                            void *const *destination_data, uint32_t const *destination_pitches)
{
    VdpStatus err_code;
    if (!destination_data || !destination_pitches)
        return VDP_STATUS_INVALID_POINTER;
    VdpVideoSurfaceData *srcSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == srcSurfData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = srcSurfData->deviceData;
    VADisplay va_dpy = deviceData->va_dpy;

    if (deviceData->va_available) {
        VAImage q;
        vaDeriveImage(va_dpy, srcSurfData->va_surf, &q);
        if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc &&
            VDP_YCBCR_FORMAT_NV12 == destination_ycbcr_format)
        {
            uint8_t *img_data;
            vaMapBuffer(va_dpy, q.buf, (void **)&img_data);
            if (destination_pitches[0] == q.pitches[0] &&
                destination_pitches[1] == q.pitches[1])
            {
                const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height;
                memcpy(destination_data[0], img_data + q.offsets[0], sz);
                memcpy(destination_data[1], img_data + q.offsets[1], sz / 2);
            } else {
                uint8_t *src = img_data + q.offsets[0];
                uint8_t *dst = destination_data[0];
                for (unsigned int y = 0; y < q.height; y ++) {  // Y plane
                    memcpy (dst, src, q.width);
                    src += q.pitches[0];
                    dst += destination_pitches[0];
                }
                src = img_data + q.offsets[1];
                dst = destination_data[1];
                for (unsigned int y = 0; y < q.height / 2; y ++) {  // UV plane
                    memcpy(dst, src, q.width);  // q.width/2 samples of U and V each, hence q.width
                    src += q.pitches[1];
                    dst += destination_pitches[1];
                }
            }
            vaUnmapBuffer(va_dpy, q.buf);
        } else if (VA_FOURCC('N', 'V', '1', '2') == q.format.fourcc &&
                   VDP_YCBCR_FORMAT_YV12 == destination_ycbcr_format)
        {
            uint8_t *img_data;
            vaMapBuffer(va_dpy, q.buf, (void **)&img_data);

            // Y plane
            if (destination_pitches[0] == q.pitches[0]) {
                const uint32_t sz = (uint32_t)q.width * (uint32_t)q.height;
                memcpy(destination_data[0], img_data + q.offsets[0], sz);
            } else {
                uint8_t *src = img_data + q.offsets[0];
                uint8_t *dst = destination_data[0];
                for (unsigned int y = 0; y < q.height; y ++) {
                    memcpy (dst, src, q.width);
                    src += q.pitches[0];
                    dst += destination_pitches[0];
                }
            }

            // unpack mixed UV to separate planes
            for (unsigned int y = 0; y < q.height/2; y ++) {
                uint8_t *src = img_data + q.offsets[1] + y * q.pitches[1];
                uint8_t *dst_u = destination_data[1] + y * destination_pitches[1];
                uint8_t *dst_v = destination_data[2] + y * destination_pitches[2];

                for (unsigned int x = 0; x < q.width/2; x++) {
                    *dst_v++ = *src++;
                    *dst_u++ = *src++;
                }
            }

            vaUnmapBuffer(va_dpy, q.buf);
        } else {
            const char *c = (const char *)&q.format.fourcc;
            traceError("error (%s): not implemented conversion VA FOURCC %c%c%c%c -> %s\n",
                       __func__, *c, *(c+1), *(c+2), *(c+3),
                       reverse_ycbcr_format(destination_ycbcr_format));
            vaDestroyImage(va_dpy, q.image_id);
            err_code = VDP_STATUS_INVALID_Y_CB_CR_FORMAT;
            goto quit;
        }
        vaDestroyImage(va_dpy, q.image_id);
    } else {
        // software fallback
        traceError("error (%s): not implemented software fallback\n", __func__);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    GLenum gl_error = glGetError();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    err_code = VDP_STATUS_OK;
quit:
    handle_release(surface);
    return err_code;
}