コード例 #1
0
VdpStatus
softVdpPresentationQueueTargetDestroy(VdpPresentationQueueTarget presentation_queue_target)
{
    VdpPresentationQueueTargetData *pqTargetData =
        handle_acquire(presentation_queue_target, HANDLETYPE_PRESENTATION_QUEUE_TARGET);
    if (NULL == pqTargetData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = pqTargetData->device;

    if (0 != pqTargetData->refcount) {
        traceError("warning (softVdpPresentationQueueTargetDestroy): non-zero reference"
                   "count (%d)\n", pqTargetData->refcount);
        handle_release(presentation_queue_target);
        return VDP_STATUS_ERROR;
    }

    // drawable may be destroyed already, so one should activate global context
    glx_context_push_thread_local(deviceData);
    glXDestroyContext(deviceData->display, pqTargetData->glc);

    GLenum gl_error = glGetError();
    glx_context_pop();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (VdpPresentationQueueTargetDestroy): gl error %d\n", gl_error);
        handle_release(presentation_queue_target);
        return VDP_STATUS_ERROR;
    }

    deviceData->refcount --;
    handle_expunge(presentation_queue_target);
    free(pqTargetData);
    return VDP_STATUS_OK;
}
コード例 #2
0
ファイル: n2n_net.c プロジェクト: ggbg/n2n_v3
SOCKET open_socket(int local_port, int bind_any)
{
    SOCKET sock_fd;
    struct sockaddr_in local_address;
    int sockopt = 1;

    if ((sock_fd = socket(PF_INET, SOCK_DGRAM, 0)) < 0)
    {
        traceError("Unable to create socket [%s][%d]\n", strerror(errno), sock_fd);
        return (-1);
    }

#ifndef WIN32
    /* fcntl(sock_fd, F_SETFL, O_NONBLOCK); */
#endif

    setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, (char *) &sockopt, sizeof(sockopt));

    memset(&local_address, 0, sizeof(local_address));
    local_address.sin_family = AF_INET;
    local_address.sin_port = htons(local_port);
    local_address.sin_addr.s_addr = htonl(bind_any ? INADDR_ANY : INADDR_LOOPBACK);
    if (bind(sock_fd, (struct sockaddr*) &local_address, sizeof(local_address)) == -1)
    {
        traceError("Bind error [%s]\n", strerror(errno));
        return (-1);
    }

    return (sock_fd);
}
コード例 #3
0
ファイル: n2n_utils.c プロジェクト: ggbg/n2n_v3
/* Create the argv vector */
static void buildargv(const char * const linebuffer, effective_args_t *effective_args)
{
    const int INITIAL_MAXARGC = 16; /* Number of args + NULL in initial argv */
    int maxargc;
    int argc = 0;
    char **argv;
    char *buffer, *buff;

    buffer = (char *) calloc(1, strlen(linebuffer) + 2);
    if (!buffer)
    {
        traceError("Unable to allocate memory");
        exit(1);
    }
    strncpy(buffer, linebuffer, strlen(linebuffer));

    maxargc = INITIAL_MAXARGC;
    argv = (char **) malloc(maxargc * sizeof(char*));
    if (argv == NULL)
    {
        traceError("Unable to allocate memory");
        exit(1);
    }
    buff = buffer;
    while (buff)
    {
        char *p = strchr(buff, ' ');
        if (p)
        {
            *p = '\0';
            argv[argc++] = strdup(buff);
            while (*++p == ' ' && *p != '\0')
                ;
            buff = p;
            if (argc >= maxargc)
            {
                maxargc *= 2;
                argv = (char **) realloc(argv, maxargc * sizeof(char*));
                if (argv == NULL)
                {
                    traceError("Unable to re-allocate memory");
                    free(buffer);
                    exit(1);
                }
            }
        }
        else
        {
            argv[argc++] = strdup(buff);
            break;
        }
    }
    free(buffer);

    effective_args->argc = argc;
    effective_args->argv = argv;
}
コード例 #4
0
ファイル: n2n_utils.c プロジェクト: ggbg/n2n_v3
void build_effective_args(int argc, char *argv[], effective_args_t *effective_args)
{
    int i;

    char *linebuffer = (char *) malloc(MAX_CMDLINE_BUFFER_LENGTH);
    if (!linebuffer)
    {
        traceError("Unable to allocate memory");
        exit(1);
    }

    snprintf(linebuffer, MAX_CMDLINE_BUFFER_LENGTH, "%s", argv[0]);

#ifdef WIN32
    for (i = 0; i < (int) strlen(linebuffer); i++)
        if (linebuffer[i] == '\\')
            linebuffer[i] = '/';
#endif

    for (i = 1; i < argc; ++i)
    {
        if (argv[i][0] == '@')
        {
            if (readConfFile(&argv[i][1], linebuffer) < 0)
                exit(1); /* <<<<----- check */
        }
        else if ((strlen(linebuffer) + strlen(argv[i]) + 2) < MAX_CMDLINE_BUFFER_LENGTH)
        {
            strncat(linebuffer, " ", 1);
            strncat(linebuffer, argv[i], strlen(argv[i]));
        }
        else
        {
            traceError("too many argument");
            exit(1);
        }
    }
    /* strip trailing spaces */
    while (strlen(linebuffer) && linebuffer[ strlen(linebuffer) - 1 ] == ' ')
        linebuffer[ strlen(linebuffer) - 1 ] = '\0';

    /* build the new argv from the linebuffer */
    buildargv(linebuffer, effective_args);

    if (linebuffer)
    {
        free(linebuffer);
        linebuffer = NULL;
    }

    /* {int k;for(k=0;k<effectiveargc;++k)  printf("%s\n",effectiveargv[k]);} */
}
コード例 #5
0
ファイル: PhilSock.cpp プロジェクト: pmlt/comp445
net::ServerSocket::ServerSocket(int af, int protocol, bool trace, const struct sockaddr_in * name, int namelen):
    Socket(af, protocol, trace) {
    if (SOCKET_ERROR == ::bind(winsocket, (const sockaddr*)name, namelen)) {
        traceError(WSAGetLastError(), "SOCKET_ERROR while binding");
        throw new SocketException("Could not bind server socket!");
    }
}
コード例 #6
0
ファイル: n2n_net.c プロジェクト: ggbg/n2n_v3
/**
 * Send a datagram to a socket defined by a n2n_sock_t.
 *
 * @return -1 on error otherwise number of bytes sent
 */
ssize_t sendto_sock(int sock_fd,
                    const void *pktbuf, size_t pktsize,
                    const n2n_sock_t *dest)
{
    n2n_sock_str_t sockbuf;
    struct sockaddr_storage dst_addr;
    ssize_t sent;

    sock2sockaddr(&dst_addr, dest);

    //traceDebug("sendto_sock %lu to [%s]", pktsize, sock2str(sockbuf, dest));

    sent = sendto(sock_fd,
                  pktbuf, pktsize,
                  0 /* flags */,
                  (const struct sockaddr *) &dst_addr,
                  sizeof(struct sockaddr_in));

    if (sent < 0)
    {
        char *c = strerror(errno);
        traceError("sendto failed (%d) %s", errno, c);
    }
    else
    {
        traceDebug("sendto sent=%d", (signed int) sent);
    }

    return sent;
}
コード例 #7
0
VdpStatus
softVdpDecoderRender(VdpDecoder decoder, VdpVideoSurface target,
                     VdpPictureInfo const *picture_info, uint32_t bitstream_buffer_count,
                     VdpBitstreamBuffer const *bitstream_buffers)
{
    VdpStatus err_code;
    if (!picture_info || !bitstream_buffers)
        return VDP_STATUS_INVALID_POINTER;
    VdpDecoderData *decoderData = handle_acquire(decoder, HANDLETYPE_DECODER);
    VdpVideoSurfaceData *dstSurfData = handle_acquire(target, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == decoderData || NULL == dstSurfData) {
        err_code = VDP_STATUS_INVALID_HANDLE;
        goto quit;
    }

    if (VDP_DECODER_PROFILE_H264_BASELINE == decoderData->profile ||
        VDP_DECODER_PROFILE_H264_MAIN ==     decoderData->profile ||
        VDP_DECODER_PROFILE_H264_HIGH ==     decoderData->profile)
    {
        // TODO: check exit code
        softVdpDecoderRender_h264(decoder, decoderData, dstSurfData, picture_info,
                                  bitstream_buffer_count, bitstream_buffers);
    } else {
        traceError("error (softVdpDecoderRender): no implementation for profile %s\n",
                   reverse_decoder_profile(decoderData->profile));
        err_code = VDP_STATUS_NO_IMPLEMENTATION;
        goto quit;
    }

    err_code = VDP_STATUS_OK;
quit:
    handle_release(decoder);
    handle_release(target);
    return err_code;
}
コード例 #8
0
ファイル: n2n_net.c プロジェクト: ggbg/n2n_v3
static int extract_ipv6(n2n_sock_t *out, const char* str_orig)
{
    int retval = ( 1 != inet_pton(AF_INET6, str_orig, out->addr.v6) );
    if (retval)
    {
        traceError("Error extracting IPv6 address: %s", str_orig);
    }
    return retval;
}
コード例 #9
0
void
print_handle_type(int handle, void *item, void *p)
{
    VdpGenericData *gh = item;
    struct {
        int cnt;
        int total_cnt;
        VdpDeviceData *deviceData;
    } *pp = p;
    pp->total_cnt ++;

    if (gh) {
        if (pp->deviceData == gh->deviceData) {
            traceError("handle %d type = %d\n", handle, gh->type);
            pp->cnt ++;
        }
    }
}
コード例 #10
0
VdpStatus
softVdpPresentationQueueTargetCreateX11(VdpDevice device, Drawable drawable,
                                        VdpPresentationQueueTarget *target)
{
    if (!target)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;

    VdpPresentationQueueTargetData *data = calloc(1, sizeof(VdpPresentationQueueTargetData));
    if (NULL == data) {
        handle_release(device);
        return VDP_STATUS_RESOURCES;
    }

    data->type = HANDLETYPE_PRESENTATION_QUEUE_TARGET;
    data->device = deviceData;
    data->drawable = drawable;
    data->refcount = 0;

    pthread_mutex_lock(&global.glx_ctx_stack_mutex);
    GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
    XVisualInfo *vi;
    vi = glXChooseVisual(deviceData->display, deviceData->screen, att);
    if (NULL == vi) {
        traceError("error (softVdpPresentationQueueTargetCreateX11): glXChooseVisual failed\n");
        free(data);
        pthread_mutex_unlock(&global.glx_ctx_stack_mutex);
        handle_release(device);
        return VDP_STATUS_ERROR;
    }

    // create context for dislaying result (can share display lists with deviceData->glc
    data->glc = glXCreateContext(deviceData->display, vi, deviceData->root_glc, GL_TRUE);
    deviceData->refcount ++;
    *target = handle_insert(data);
    pthread_mutex_unlock(&global.glx_ctx_stack_mutex);

    handle_release(device);
    return VDP_STATUS_OK;
}
コード例 #11
0
VdpStatus
vdpVideoSurfaceDestroy(VdpVideoSurface surface)
{
    VdpVideoSurfaceData *videoSurfData = handle_acquire(surface, HANDLETYPE_VIDEO_SURFACE);
    if (NULL == videoSurfData)
        return VDP_STATUS_INVALID_HANDLE;
    VdpDeviceData *deviceData = videoSurfData->deviceData;

    glx_ctx_push_thread_local(deviceData);
    glDeleteTextures(1, &videoSurfData->tex_id);
    GLenum gl_error = glGetError();
    glx_ctx_pop();

    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        handle_release(surface);
        return VDP_STATUS_ERROR;
    }

    if (deviceData->va_available) {
        // return VA surface to the free list
        if (videoSurfData->decoder != VDP_INVALID_HANDLE) {
            VdpDecoderData *dd = handle_acquire(videoSurfData->decoder, HANDLETYPE_DECODER);
            if (NULL != dd) {
                free_list_push(dd->free_list, &dd->free_list_head, videoSurfData->rt_idx);
                handle_release(videoSurfData->decoder);
            }
        }
        // .va_surf will be freed in VdpDecoderDestroy
    }

    if (videoSurfData->y_plane)
        free(videoSurfData->y_plane);
    if (videoSurfData->u_plane)
        free(videoSurfData->u_plane);
    // do not free videoSurfData->v_plane, it's just pointer into the middle of u_plane

    unref_device(deviceData);
    handle_expunge(surface);
    free(videoSurfData);
    return VDP_STATUS_OK;
}
コード例 #12
0
ファイル: n2n_net.c プロジェクト: ggbg/n2n_v3
/**
 * Find the address and IP mode for the tuntap device.
 *
 *  s is one of these forms:
 *
 *  <host> := <hostname> | A.B.C.D
 *
 *  <host> | static:<host> | dhcp:<host>
 *
 *  If the mode is present (colon required) then fill ip_mode with that value
 *  otherwise do not change ip_mode. Fill ip_mode with everything after the
 *  colon if it is present; or s if colon is not present.(TODO - update)
 *
 *  return 0 on success and -1 on error
 */
int scan_address(uint32_t *ip_addr, ip_mode_t *ip_mode, const char *s)
{
    int retval = -1;//TODO use it?
    char *p;

    if ((NULL == s) || (NULL == ip_addr))
    {
        return -1;
    }

    p = strchr(s, ':');

    if (p)
    {
        /* colon is present */
        size_t host_off = p - s;

        if (ip_mode)
        {
            if (0 == strncmp(s, "static", host_off))
                *ip_mode = N2N_IPM_STATIC;

            else if (0 == strncmp(s, "dhcp", host_off))
                *ip_mode = N2N_IPM_DHCP;

            else
            {
                *ip_mode = N2N_IPM_NONE;
                traceError("Unknown IP mode: %.*s\n", host_off, s);
                return -1;
            }
        }

        /* move to IP position */
        s = p + 1;
    }

    *ip_addr = inet_addr(s);//TODO use a wrapping function

    return 0;
}
コード例 #13
0
ファイル: n2n_net.c プロジェクト: ggbg/n2n_v3
extern char *sock2str(n2n_sock_str_t out, const n2n_sock_t *sock)
{
    int r;
    ipstr_t ipstr;

    if (NULL == out)
        return NULL;

    if (NULL == inet_ntop(sock->family, &sock->addr, ipstr, 32/* TODO */))
    {
        traceError("inet_ntop() [%s]\n", strerror(errno));
        return NULL;
    }

    if (AF_INET6 == sock->family)
        r = snprintf(out, N2N_SOCKBUF_SIZE, "[%s]:%hu", ipstr, ntohs(sock->port));
    else
        r = snprintf(out, N2N_SOCKBUF_SIZE, "%s:%hu", ipstr, ntohs(sock->port));

    return out;
}
コード例 #14
0
static
void
destroy_child_objects(int handle, void *item, void *p)
{
    const void *parent = p;
    VdpGenericData *gh = item;
    if (gh) {
        if (parent == gh->deviceData) {
            switch (gh->type) {
            case HANDLETYPE_DEVICE:
                // do nothing
                break;
            case HANDLETYPE_PRESENTATION_QUEUE_TARGET:
                vdpPresentationQueueDestroy(handle);
                break;
            case HANDLETYPE_PRESENTATION_QUEUE:
                vdpPresentationQueueDestroy(handle);
                break;
            case HANDLETYPE_VIDEO_MIXER:
                vdpVideoMixerDestroy(handle);
                break;
            case HANDLETYPE_OUTPUT_SURFACE:
                vdpOutputSurfaceDestroy(handle);
                break;
            case HANDLETYPE_VIDEO_SURFACE:
                vdpVideoSurfaceDestroy(handle);
                break;
            case HANDLETYPE_BITMAP_SURFACE:
                vdpBitmapSurfaceDestroy(handle);
                break;
            case HANDLETYPE_DECODER:
                vdpDecoderDestroy(handle);
                break;
            default:
                traceError("warning (%s): unknown handle type %d\n", __func__, gh->type);
                break;
            }
        }
    }
}
コード例 #15
0
ファイル: ctx-stack.c プロジェクト: Dangku/libvdpau-va-gl
void
glx_ctx_ref_glc_hash_table(Display *dpy, int screen)
{
    glx_ctx_lock();
    if (0 == glc_hash_table_ref_count) {
        glc_hash_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
                                               NULL, value_destroy_func);
        glc_hash_table_ref_count = 1;

        GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
        root_vi = glXChooseVisual(dpy, screen, att);
        if (NULL == root_vi) {
            traceError("error (%s): glXChooseVisual failed\n", __func__);
            glx_ctx_unlock();
            return;
        }
        root_glc = glXCreateContext(dpy, root_vi, NULL, GL_TRUE);
    } else {
        glc_hash_table_ref_count ++;
    }
    glx_ctx_unlock();
}
コード例 #16
0
VdpStatus
softVdpPresentationQueueDestroy(VdpPresentationQueue presentation_queue)
{
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return VDP_STATUS_INVALID_HANDLE;

    pthread_cancel(pqData->worker_thread);

    if (0 != pthread_join(pqData->worker_thread, NULL)) {
        traceError("VdpPresentationQueueDestroy: failed to stop worker thread");
        handle_release(presentation_queue);
        return VDP_STATUS_ERROR;
    }

    handle_expunge(presentation_queue);
    pqData->device->refcount --;
    pqData->target->refcount --;

    free(pqData);
    return VDP_STATUS_OK;
}
コード例 #17
0
static
VdpStatus
h264_translate_reference_frames(VdpVideoSurfaceData *dstSurfData, VdpDecoder decoder,
                                VdpDecoderData *decoderData,
                                VAPictureParameterBufferH264 *pic_param,
                                const VdpPictureInfoH264 *vdppi)
{
    // take new VA surface from buffer if needed
    if (VA_INVALID_SURFACE == dstSurfData->va_surf) {
        int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
        if (-1 == idx)
            return VDP_STATUS_RESOURCES;
        dstSurfData->decoder = decoder;
        dstSurfData->va_surf = decoderData->render_targets[idx];
        dstSurfData->rt_idx  = idx;
    }

    // current frame
    pic_param->CurrPic.picture_id   = dstSurfData->va_surf;
    pic_param->CurrPic.frame_idx    = vdppi->frame_num;
    pic_param->CurrPic.flags  = vdppi->is_reference ? VA_PICTURE_H264_SHORT_TERM_REFERENCE : 0;
    if (vdppi->field_pic_flag) {
        pic_param->CurrPic.flags |=
            vdppi->bottom_field_flag ? VA_PICTURE_H264_BOTTOM_FIELD : VA_PICTURE_H264_TOP_FIELD;
    }

    pic_param->CurrPic.TopFieldOrderCnt     = vdppi->field_order_cnt[0];
    pic_param->CurrPic.BottomFieldOrderCnt  = vdppi->field_order_cnt[1];

    // mark all pictures invalid preliminary
    for (int k = 0; k < 16; k ++)
        reset_va_picture_h264(&pic_param->ReferenceFrames[k]);

    // reference frames
    for (int k = 0; k < vdppi->num_ref_frames; k ++) {
        if (VDP_INVALID_HANDLE == vdppi->referenceFrames[k].surface) {
            reset_va_picture_h264(&pic_param->ReferenceFrames[k]);
            continue;
        }

        VdpReferenceFrameH264 const *vdp_ref = &(vdppi->referenceFrames[k]);
        VdpVideoSurfaceData *vdpSurfData =
            handle_acquire(vdp_ref->surface, HANDLETYPE_VIDEO_SURFACE);
        VAPictureH264 *va_ref = &(pic_param->ReferenceFrames[k]);
        if (NULL == vdpSurfData) {
            traceError("error (h264_translate_reference_frames): NULL == vdpSurfData");
            return VDP_STATUS_ERROR;
        }

        // take new VA surface from buffer if needed
        if (VA_INVALID_SURFACE == vdpSurfData->va_surf) {
            int idx = free_list_pop(decoderData->free_list, &decoderData->free_list_head);
            if (-1 == idx)
                return VDP_STATUS_RESOURCES;
            dstSurfData->decoder = decoder;
            dstSurfData->va_surf = decoderData->render_targets[idx];
            dstSurfData->rt_idx  = idx;
        }

        va_ref->picture_id = vdpSurfData->va_surf;
        va_ref->frame_idx = vdp_ref->frame_idx;
        va_ref->flags = vdp_ref->is_long_term ? VA_PICTURE_H264_LONG_TERM_REFERENCE
                                              : VA_PICTURE_H264_SHORT_TERM_REFERENCE;

        if (vdp_ref->top_is_reference && vdp_ref->bottom_is_reference) {
            // Full frame. This block intentionally left blank. No flags set.
        } else {
            if (vdp_ref->top_is_reference)
                va_ref->flags |= VA_PICTURE_H264_TOP_FIELD;
            else
                va_ref->flags |= VA_PICTURE_H264_BOTTOM_FIELD;
        }

        va_ref->TopFieldOrderCnt    = vdp_ref->field_order_cnt[0];
        va_ref->BottomFieldOrderCnt = vdp_ref->field_order_cnt[1];
        handle_release(vdp_ref->surface);
    }

    return VDP_STATUS_OK;
}
コード例 #18
0
VdpStatus
softVdpDecoderCreate(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height,
                     uint32_t max_references, VdpDecoder *decoder)
{
    VdpStatus err_code;
    if (!decoder)
        return VDP_STATUS_INVALID_POINTER;
    VdpDeviceData *deviceData = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == deviceData)
        return VDP_STATUS_INVALID_HANDLE;
    if (!deviceData->va_available) {
        err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
        goto quit;
    }
    VADisplay va_dpy = deviceData->va_dpy;

    VdpDecoderData *data = calloc(1, sizeof(VdpDecoderData));
    if (NULL == data) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    data->type = HANDLETYPE_DECODER;
    data->device = deviceData;
    data->profile = profile;
    data->width = width;
    data->height = height;
    data->max_references = max_references;

    // initialize free_list. Initially they all free
    data->free_list_head = -1;
    for (int k = 0; k < MAX_RENDER_TARGETS; k ++) {
        free_list_push(data->free_list, &data->free_list_head, k);
    }

    VAProfile va_profile;
    VAStatus status;
    int final_try = 0;
    VdpDecoderProfile next_profile = profile;

    // Try to create decoder for asked profile. On failure try to create more advanced one
    while (! final_try) {
        profile = next_profile;
        switch (profile) {
        case VDP_DECODER_PROFILE_H264_BASELINE:
            va_profile = VAProfileH264Baseline;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            next_profile = VDP_DECODER_PROFILE_H264_MAIN;
            break;
        case VDP_DECODER_PROFILE_H264_MAIN:
            va_profile = VAProfileH264Main;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            next_profile = VDP_DECODER_PROFILE_H264_HIGH;
            break;
        case VDP_DECODER_PROFILE_H264_HIGH:
            va_profile = VAProfileH264High;
            data->num_render_targets = NUM_RENDER_TARGETS_H264;
            // there is no more advanced profile, so it's final try
            final_try = 1;
            break;
        default:
            traceError("error (softVdpDecoderCreate): decoder %s not implemented\n",
                       reverse_decoder_profile(profile));
            err_code = VDP_STATUS_INVALID_DECODER_PROFILE;
            goto quit_free_data;
        }

        status = vaCreateConfig(va_dpy, va_profile, VAEntrypointVLD, NULL, 0, &data->config_id);
        if (VA_STATUS_SUCCESS == status)        // break loop if decoder created
            break;
    }

    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    // Create surfaces. All video surfaces created here, rather than in VdpVideoSurfaceCreate.
    // VAAPI requires surfaces to be bound with context on its creation time, while VDPAU allows
    // to do it later. So here is a trick: VDP video surfaces get their va_surf dynamically in
    // DecoderRender.

    // TODO: check format of surfaces created
#if VA_CHECK_VERSION(0, 34, 0)
    status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_YUV420, width, height,
        data->render_targets, data->num_render_targets, NULL, 0);
#else
    status = vaCreateSurfaces(va_dpy, width, height, VA_RT_FORMAT_YUV420,
        data->num_render_targets, data->render_targets);
#endif
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    status = vaCreateContext(va_dpy, data->config_id, width, height, VA_PROGRESSIVE,
        data->render_targets, data->num_render_targets, &data->context_id);
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit_free_data;
    }

    deviceData->refcount ++;
    *decoder = handle_insert(data);

    err_code = VDP_STATUS_OK;
    goto quit;

quit_free_data:
    free(data);
quit:
    handle_release(device);
    return err_code;
}
コード例 #19
0
ファイル: n2n_keyfile.c プロジェクト: ggbg/n2n_v3
/* Read key control file and return the number of specs stored or a negative
 * error code.
 *
 * As the specs are read in the from and until time values are compared to
 * present time. Only those keys which are valid are stored.
 */
int n2n_read_keyfile(n2n_cipherspec_t *specs,    /* fill out this array of cipherspecs */
                     size_t numspecs,            /* number of slots in the array. */
                     const char *ctrlfile_path)  /* path to control file */
{
    /* Each line contains one cipherspec. */

    int       retval = 0;
    FILE     *fp = NULL;
    size_t    idx = 0;
    time_t    now = time(NULL);

    traceDebug("Reading '%s'\n", ctrlfile_path);

    fp = fopen(ctrlfile_path, "r");
    if (fp)
    {
        /* Read the file a line a time with fgets. */
        char line[N2N_KEYFILE_LINESIZE];
        size_t lineNum = 0;

        while (idx < numspecs)
        {
            n2n_cipherspec_t *k = &(specs[idx]);
            fgets(line, N2N_KEYFILE_LINESIZE, fp);
            ++lineNum;

            if (strlen(line) > 1)
            {
                if (0 == parseKeyLine(k, line))
                {
                    if (k->valid_until > now)
                    {
                        traceInfo(" --> [%u] from %lu, until %lu, transform=%hu, data=%s\n",
                                   idx, k->valid_from, k->valid_until, k->t, k->opaque);

                        ++retval;
                        ++idx;
                    }
                    else
                    {
                        traceInfo(" --X [%u] from %lu, until %lu, transform=%hu, data=%s\n",
                                   idx, k->valid_from, k->valid_until, k->t, k->opaque);

                    }
                }
                else
                {
                    traceWarning("Failed to decode line %u\n", lineNum);
                }
            }

            if (feof(fp))
            {
                break;
            }

            line[0] = 0; /* this line has been consumed */
        }

        fclose(fp);
        fp = NULL;
    }
    else
    {
        traceError("Failed to open '%s'\n", ctrlfile_path);
        retval = -1;
    }

    return retval;
}
コード例 #20
0
ファイル: tuntap_linux.c プロジェクト: ggbg/n2n_v3
/** @brief  Open and configure the TAP device for packet read/write.
 *
 *  This routine creates the interface via the tuntap driver then uses ifconfig
 *  to configure address/mask and MTU.
 *
 *  @param device      - [inout] a device info holder object
 *  @param dev         - user-defined name for the new iface, 
 *                       if NULL system will assign a name
 *  @param device_ip   - address of iface
 *  @param device_mask - netmask for device_ip
 *  @param mtu         - MTU for device_ip
 *
 *  @return - negative value on error
 *          - non-negative file-descriptor on success
 */
int tuntap_open(tuntap_dev_t *device, ip_mode_t ip_mode)
                //char *dev, /* user-definable interface name, eg. edge0 */
                //const char *address_mode, /* static or dhcp */
                //char *device_ip,
                //char *device_mask,
                //const char *device_mac,
                //int mtu)
{
    char *tuntap_device = "/dev/net/tun";
#define N2N_LINUX_SYSTEMCMD_SIZE 128
    char buf[N2N_LINUX_SYSTEMCMD_SIZE];
    struct ifreq ifr;
    int rc;

    //TODO
    ipstr_t ipstr;

    device->fd = open(tuntap_device, O_RDWR);
    if (device->fd < 0)
    {
        traceEvent(TRACE_ERROR, "Unable to open TUN/TAP device: ioctl() [%s][%d]\n", strerror(errno), errno);
        return -1;
    }

    traceEvent(TRACE_NORMAL, "Succesfully open %s\n", tuntap_device);
    memset(&ifr, 0, sizeof(ifr));
    ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* Want a TAP device for layer 2 frames. */
    strncpy(ifr.ifr_name, device->dev_name, IFNAMSIZ);
    rc = ioctl(device->fd, TUNSETIFF, (void *) &ifr);

    if (rc < 0)
    {
        traceError("ioctl() [%s][%d]\n", strerror(errno), rc);
        close(device->fd);
        return -1;
    }

    /* Store the device name for later reuse */
    strncpy(device->dev_name, ifr.ifr_name, MIN(IFNAMSIZ, N2N_IFNAMSIZ));

    if ( !is_empty_mac(device->mac_addr) )
    {
        /* Set the hw address before bringing the if up. */
        macstr_t macstr;
        snprintf(buf, sizeof(buf), "/sbin/ifconfig %s hw ether %s",
                 ifr.ifr_name, mac2str(macstr, device->mac_addr));
        system(buf);
        traceInfo("Setting MAC: %s", buf);
    }


    ipv4_to_str(ipstr, sizeof(ipstr_t), (const uint8_t *) &device->ip_addr);//TODO make array

    if (ip_mode == N2N_IPM_DHCP)
    {
        snprintf(buf, sizeof(buf), "/sbin/ifconfig %s %s mtu %d up",
                 ifr.ifr_name, ipstr, device->mtu);
    }
    else
    {
        ipstr_t maskstr;
        strcpy((char *) maskstr, inet_ntoa( *(  (struct in_addr *) &device->device_mask)  ));//TODO
        //intoa(device->device_mask, maskstr, sizeof(maskstr));

        snprintf(buf, sizeof(buf), "/sbin/ifconfig %s %s netmask %s mtu %d up",
                 ifr.ifr_name, ipstr, maskstr, device->mtu);
    }

    traceInfo("Bringing up: %s", buf);
    system(buf);

    //device->ip_addr = inet_addr(device_ip);
    //device->device_mask = inet_addr(device_mask);
    read_mac(device->dev_name, device->mac_addr);
    return (device->fd);
}
コード例 #21
0
ファイル: n2n_utils.c プロジェクト: ggbg/n2n_v3
/* parse the configuration file */
static int readConfFile(const char *filename, char * const linebuffer)
{
    struct stat stats;
    FILE *fd;
    char *buffer = NULL;

    buffer = (char *) malloc(MAX_CONFFILE_LINE_LENGTH);
    if (!buffer)
    {
        traceError("Unable to allocate memory");
        return -1;
    }

    if (stat(filename, &stats))
    {
        if (errno == ENOENT)
            traceError("parameter file %s not found/unable to access\n", filename);
        else
            traceError("cannot stat file %s, errno=%d\n", filename, errno);
        free(buffer);
        return -1;
    }

    fd = fopen(filename, "rb");
    if (!fd)
    {
        traceError("Unable to open parameter file '%s' (%d)...\n", filename, errno);
        free(buffer);
        return -1;
    }
    while (fgets(buffer, MAX_CONFFILE_LINE_LENGTH, fd))
    {
        char *p = NULL;

        /* strip out comments */
        p = strchr(buffer, '#');
        if (p)
            *p = '\0';

        /* remove \n */
        p = strchr(buffer, '\n');
        if (p)
            *p = '\0';

        /* strip out heading spaces */
        p = buffer;
        while (*p == ' ' && *p != '\0')
            ++p;
        if (p != buffer)
            strncpy(buffer, p, strlen(p) + 1);

        /* strip out trailing spaces */
        while (strlen(buffer) && buffer[strlen(buffer) - 1] == ' ')
            buffer[strlen(buffer) - 1] = '\0';

        /* check for nested @file option */
        if (strchr(buffer, '@'))
        {
            traceError("@file in file nesting is not supported\n");
            free(buffer);
            return -1;
        }
        if ((strlen(linebuffer) + strlen(buffer) + 2) < MAX_CMDLINE_BUFFER_LENGTH)
        {
            strncat(linebuffer, " ", 1);
            strncat(linebuffer, buffer, strlen(buffer));
        }
        else
        {
            traceError("too many argument");
            free(buffer);
            return -1;
        }
    }

    free(buffer);
    fclose(fd);

    return 0;
}
コード例 #22
0
VdpStatus
vdpDeviceCreateX11(Display *display_orig, int screen, VdpDevice *device,
                   VdpGetProcAddress **get_proc_address)
{
    if (!display_orig || !device)
        return VDP_STATUS_INVALID_POINTER;

    // Let's get own connection to the X server
    Display *display = handle_xdpy_ref(display_orig);
    if (NULL == display)
        return VDP_STATUS_ERROR;

    if (global.quirks.buggy_XCloseDisplay) {
        // XCloseDisplay could segfault on fglrx. To avoid calling XCloseDisplay,
        // make one more reference to xdpy copy.
        handle_xdpy_ref(display_orig);
    }

    VdpDeviceData *data = calloc(1, sizeof(VdpDeviceData));
    if (NULL == data)
        return VDP_STATUS_RESOURCES;

    glx_ctx_lock(); // use glx lock to serialize X calls
    data->type = HANDLETYPE_DEVICE;
    data->display = display;
    data->display_orig = display_orig;   // save supplied pointer too
    data->screen = screen;
    data->refcount = 0;
    pthread_mutex_init(&data->refcount_mutex, NULL);
    data->root = DefaultRootWindow(display);

    XWindowAttributes wnd_attrs;
    XGetWindowAttributes(display, data->root, &wnd_attrs);
    data->color_depth = wnd_attrs.depth;

    data->fn.glXBindTexImageEXT =
        (PFNGLXBINDTEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXBindTexImageEXT");
    data->fn.glXReleaseTexImageEXT =
        (PFNGLXRELEASETEXIMAGEEXTPROC)glXGetProcAddress((GLubyte *)"glXReleaseTexImageEXT");
    glx_ctx_unlock();

    if (!data->fn.glXBindTexImageEXT || !data->fn.glXReleaseTexImageEXT) {
        traceError("error (%s): can't get glXBindTexImageEXT address\n");
        free(data);
        return VDP_STATUS_RESOURCES;
    }

    // create master GLX context to share data between further created ones
    glx_ctx_ref_glc_hash_table(display, screen);
    data->root_glc = glx_ctx_get_root_context();

    glx_ctx_push_thread_local(data);

    glClearColor(0.0f, 0.0f, 0.0f, 0.0f);

    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

    // initialize VAAPI
    if (global.quirks.avoid_va) {
        // pretend there is no VA-API available
        data->va_available = 0;
    } else {
        data->va_dpy = vaGetDisplay(display);
        data->va_available = 0;

        VAStatus status = vaInitialize(data->va_dpy, &data->va_version_major,
                                       &data->va_version_minor);
        if (VA_STATUS_SUCCESS == status) {
            data->va_available = 1;
            traceInfo("libva (version %d.%d) library initialized\n",
                      data->va_version_major, data->va_version_minor);
        } else {
            data->va_available = 0;
            traceInfo("warning: failed to initialize libva. "
                      "No video decode acceleration available.\n");
        }
    }

    compile_shaders(data);

    glGenTextures(1, &data->watermark_tex_id);
    glBindTexture(GL_TEXTURE_2D, data->watermark_tex_id);

    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);

    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, watermark_width, watermark_height, 0, GL_BGRA,
                 GL_UNSIGNED_BYTE, watermark_data);
    glFinish();

    *device = handle_insert(data);
    if (get_proc_address)
        *get_proc_address = &vdpGetProcAddress;

    GLenum gl_error = glGetError();
    glx_ctx_pop();

    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        return VDP_STATUS_ERROR;
    }

    return VDP_STATUS_OK;
}
コード例 #23
0
static
void *
presentation_thread(void *param)
{
    pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER;
    pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
    VdpPresentationQueue presentation_queue = (VdpPresentationQueue)(size_t)param;
    VdpPresentationQueueData *pqData =
        handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
    if (NULL == pqData)
        return NULL;

    pthread_mutex_lock(&cond_mutex);
    while (1) {
        struct timespec now;
        clock_gettime(CLOCK_REALTIME, &now);
        struct timespec target_time = now;

        while (1) {
            int ret;
            handle_release(presentation_queue);
            pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
            ret = pthread_cond_timedwait(&pqData->new_work_available, &cond_mutex, &target_time);
            if (ret != 0 && ret != ETIMEDOUT) {
                traceError("%s: pthread_cond_timedwait failed with code %d\n", __func__, ret);
                goto quit;
            }

            struct timespec now;
            clock_gettime(CLOCK_REALTIME, &now);
            pqData = handle_acquire(presentation_queue, HANDLETYPE_PRESENTATION_QUEUE);
            if (!pqData)
                goto quit;
            pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
            pthread_mutex_lock(&pqData->queue_mutex);
            if (pqData->queue.head != -1) {
                struct timespec ht = vdptime2timespec(pqData->queue.item[pqData->queue.head].t);
                if (now.tv_sec > ht.tv_sec ||
                    (now.tv_sec == ht.tv_sec && now.tv_nsec > ht.tv_nsec))
                {
                    // break loop and process event
                    pthread_mutex_unlock(&pqData->queue_mutex);
                    break;
                } else {
                    // sleep until next event
                    target_time = ht;
                }
            } else {
                // queue empty, no work to do. Wait for next event
                target_time = now;
                target_time.tv_sec += 1;
            }
            pthread_mutex_unlock(&pqData->queue_mutex);
        }

        // do event processing
        pthread_mutex_unlock(&pqData->queue_mutex);
        do_presentation_queue_display(pqData);
    }

quit:
    return NULL;
}
コード例 #24
0
static
VdpStatus
compile_shaders(VdpDeviceData *deviceData)
{
    VdpStatus retval = VDP_STATUS_ERROR;

    for (int k = 0; k < SHADER_COUNT; k ++) {
        struct shader_s *s = &glsl_shaders[k];
        GLint errmsg_len;
        GLuint f_shader, program;
        int ok;


        f_shader = glCreateShader(GL_FRAGMENT_SHADER);
        glShaderSource(f_shader, 1, &s->body, &s->len);
        glCompileShader(f_shader);
        glGetShaderiv(f_shader, GL_COMPILE_STATUS, &ok);
        if (!ok) {
            glGetShaderiv(f_shader, GL_INFO_LOG_LENGTH, &errmsg_len);
            char *errmsg = malloc(errmsg_len);
            glGetShaderInfoLog(f_shader, errmsg_len, NULL, errmsg);
            traceError("error (%s): compilation of shader #%d failed with '%s'\n", __func__, k,
                       errmsg);
            free(errmsg);
            glDeleteShader(f_shader);
            goto err;
        }

        program = glCreateProgram();
        glAttachShader(program, f_shader);
        glLinkProgram(program);
        glGetProgramiv(program, GL_LINK_STATUS, &ok);
        if (!ok) {
            glGetProgramiv(program, GL_INFO_LOG_LENGTH, &errmsg_len);
            char *errmsg = malloc(errmsg_len);
            glGetProgramInfoLog(program, errmsg_len, NULL, errmsg);
            traceError("error (%s): linking of shader #%d failed with '%s'\n", __func__, k,
                       errmsg);
            free(errmsg);
            glDeleteProgram(program);
            glDeleteShader(f_shader);
            goto err;
        }

        deviceData->shaders[k].f_shader = f_shader;
        deviceData->shaders[k].program = program;

        switch (k) {
        case glsl_YV12_RGBA:
        case glsl_NV12_RGBA:
            deviceData->shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex[0]");
            deviceData->shaders[k].uniform.tex_1 = glGetUniformLocation(program, "tex[1]");
            break;
        case glsl_red_to_alpha_swizzle:
            deviceData->shaders[k].uniform.tex_0 = glGetUniformLocation(program, "tex_0");
            break;
        }
    }

    retval = VDP_STATUS_OK;
err:
    return retval;
}
コード例 #25
0
static
VdpStatus
softVdpDecoderRender_h264(VdpDecoder decoder, VdpDecoderData *decoderData,
                          VdpVideoSurfaceData *dstSurfData, VdpPictureInfo const *picture_info,
                          uint32_t bitstream_buffer_count,
                          VdpBitstreamBuffer const *bitstream_buffers)
{
    VdpDeviceData *deviceData = decoderData->device;
    VADisplay va_dpy = deviceData->va_dpy;
    VAStatus status;
    VdpStatus vs, err_code;
    VdpPictureInfoH264 const *vdppi = (void *)picture_info;

    // TODO: figure out where to get level
    uint32_t level = 41;

    // preparing picture parameters and IQ matrix
    VABufferID pic_param_buf, iq_matrix_buf;
    VAPictureParameterBufferH264 pic_param;
    VAIQMatrixBufferH264 iq_matrix;

    vs = h264_translate_reference_frames(dstSurfData, decoder, decoderData, &pic_param, vdppi);
    if (VDP_STATUS_OK != vs) {
        if (VDP_STATUS_RESOURCES == vs) {
            traceError("error (softVdpDecoderRender): no surfaces left in buffer\n");
            err_code = VDP_STATUS_RESOURCES;
        } else {
            err_code = VDP_STATUS_ERROR;
        }
        goto quit;
    }

    h264_translate_pic_param(&pic_param, decoderData->width, decoderData->height, vdppi, level);
    h264_translate_iq_matrix(&iq_matrix, vdppi);

    glx_context_lock();
    status = vaCreateBuffer(va_dpy, decoderData->context_id, VAPictureParameterBufferType,
        sizeof(VAPictureParameterBufferH264), 1, &pic_param, &pic_param_buf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    status = vaCreateBuffer(va_dpy, decoderData->context_id, VAIQMatrixBufferType,
        sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &iq_matrix_buf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    // send data to decoding hardware
    status = vaBeginPicture(va_dpy, decoderData->context_id, dstSurfData->va_surf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }
    status = vaRenderPicture(va_dpy, decoderData->context_id, &pic_param_buf, 1);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }
    status = vaRenderPicture(va_dpy, decoderData->context_id, &iq_matrix_buf, 1);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    vaDestroyBuffer(va_dpy, pic_param_buf);
    vaDestroyBuffer(va_dpy, iq_matrix_buf);
    glx_context_unlock();

    // merge bitstream buffers
    int total_bitstream_bytes = 0;
    for (unsigned int k = 0; k < bitstream_buffer_count; k ++)
        total_bitstream_bytes += bitstream_buffers[k].bitstream_bytes;

    uint8_t *merged_bitstream = malloc(total_bitstream_bytes);
    if (NULL == merged_bitstream) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    do {
        unsigned char *ptr = merged_bitstream;
        for (unsigned int k = 0; k < bitstream_buffer_count; k ++) {
            memcpy(ptr, bitstream_buffers[k].bitstream, bitstream_buffers[k].bitstream_bytes);
            ptr += bitstream_buffers[k].bitstream_bytes;
        }
    } while(0);

    // Slice parameters

    // All slice data have been merged into one continuous buffer. But we must supply
    // slices one by one to the hardware decoder, so we need to delimit them. VDPAU
    // requires bitstream buffers to include slice start code (0x00 0x00 0x01). Those
    // will be used to calculate offsets and sizes of slice data in code below.

    rbsp_state_t st_g;      // reference, global state
    rbsp_attach_buffer(&st_g, merged_bitstream, total_bitstream_bytes);
    int nal_offset = rbsp_navigate_to_nal_unit(&st_g);
    if (nal_offset < 0) {
        traceError("error (softVdpDecoderRender): no NAL header\n");
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    do {
        VASliceParameterBufferH264 sp_h264;
        memset(&sp_h264, 0, sizeof(VASliceParameterBufferH264));

        // make a copy of global rbsp state for using in slice header parser
        rbsp_state_t st = rbsp_copy_state(&st_g);
        rbsp_reset_bit_counter(&st);
        int nal_offset_next = rbsp_navigate_to_nal_unit(&st_g);

        // calculate end of current slice. Note (-3). It's slice start code length.
        const unsigned int end_pos = (nal_offset_next > 0) ? (nal_offset_next - 3)
                                                           : total_bitstream_bytes;
        sp_h264.slice_data_size     = end_pos - nal_offset;
        sp_h264.slice_data_offset   = 0;
        sp_h264.slice_data_flag     = VA_SLICE_DATA_FLAG_ALL;

        // TODO: this may be not entirely true for YUV444
        // but if we limiting to YUV420, that's ok
        int ChromaArrayType = pic_param.seq_fields.bits.chroma_format_idc;

        // parse slice header and use its data to fill slice parameter buffer
        parse_slice_header(&st, &pic_param, ChromaArrayType, vdppi->num_ref_idx_l0_active_minus1,
                           vdppi->num_ref_idx_l1_active_minus1, &sp_h264);

        VABufferID slice_parameters_buf;
        glx_context_lock();
        status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceParameterBufferType,
            sizeof(VASliceParameterBufferH264), 1, &sp_h264, &slice_parameters_buf);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }
        status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_parameters_buf, 1);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        VABufferID slice_buf;
        status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceDataBufferType,
            sp_h264.slice_data_size, 1, merged_bitstream + nal_offset, &slice_buf);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_buf, 1);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        vaDestroyBuffer(va_dpy, slice_parameters_buf);
        vaDestroyBuffer(va_dpy, slice_buf);
        glx_context_unlock();

        if (nal_offset_next < 0)        // nal_offset_next equals -1 when there is no slice
            break;                      // start code found. Thus that was the final slice.
        nal_offset = nal_offset_next;
    } while (1);

    glx_context_lock();
    status = vaEndPicture(va_dpy, decoderData->context_id);
    glx_context_unlock();
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    free(merged_bitstream);
    err_code = VDP_STATUS_OK;
quit:
    return err_code;
}
コード例 #26
0
ファイル: PhilSock.cpp プロジェクト: pmlt/comp445
net::ClientSocket::ClientSocket(int af, int protocol, bool trace, int local_port, const struct sockaddr_in * name, int namelen) :
    Socket(af, protocol, trace) {

    local_addr.sin_family = AF_INET;
    local_addr.sin_port = htons(local_port);
    local_addr.sin_addr.s_addr = htonl(INADDR_ANY);
    // This is dumb, but the router requires that the client socket binds to a static port.
    if (SOCKET_ERROR == ::bind(winsocket, (const sockaddr*)&local_addr, sizeof(local_addr))) {
        traceError(WSAGetLastError(), "SOCKET_ERROR while binding");
        throw new SocketException("Could not bind client socket!");
    }

    dest = *name;
    dest_len = namelen;

    // Three-way handshake.
    // Step 1: Send the sequence number to the server
    dgram _syn;
    syn(_syn);
    if (trace) tracefile << "CLIENT: Sending SYN message with Seq No " << _syn.seqno << "\n";
    int sentbytes = send_dgram(_syn);
    if (sentbytes == SOCKET_ERROR) {
        traceError(WSAGetLastError(), "SOCKET_ERROR while sendto");
        throw new SocketException("Could not send SYN");
    }

    sndr_seqno = nextSeqNo(_syn.seqno);

    // Step 2: Wait for SYNACK
    if (trace) tracefile << "CLIENT: Waiting for SYNACK message\n";
    while (true) {

        dgram _synack;
        int recvbytes = recv_dgram(_synack);
        if (recvbytes == SOCKET_ERROR) {
            if (WSAGetLastError() == WSAETIMEDOUT) {
                // Timed out, re-send syn
                send_dgram(_syn);
                continue; // Timed out, try again
            }
            // Some other error occured
            traceError(WSAGetLastError(), "SOCKET_ERROR while recv");
            throw new SocketException("Could not receive SYNACK message from server!");
        }
        if (recvbytes < sizeof(dgram)) continue; // Throw away unexpected packet
        if (_synack.type != SYNACK) continue; //Throw away unexpected packet

        if (trace) tracefile << "CLIENT: Received SYNACK with Seq No " << _synack.seqno << "\n";

        recv_seqno = nextSeqNo(_synack.seqno);

        // Step 3: Send ACK
        if (trace) tracefile << "CLIENT: Sending ACK in response to SYNACK\n";
        // Simulate loss of packet!
        send_ack(_synack);

        break;
    }
    if (trace) {
        tracefile << "CLIENT: Connection established!\n";
        tracefile << "Next sequence numbers will be:\n";
        tracefile << "  Client: " << sndr_seqno << "\n";
        tracefile << "  Server: " << recv_seqno << "\n";
    }
}
コード例 #27
0
ファイル: PhilSock.cpp プロジェクト: pmlt/comp445
void net::ServerSocket::listen(int backlog) {
    // Wait for a SYN
    dgram _syn, _synack, _ack;
    if (trace) tracefile << "SERVER: Waiting for SYN message\n";
    while (true) {
        int recvbytes = recv_dgram(_syn, true);
        if (recvbytes == SOCKET_ERROR) {
            if (WSAGetLastError() == WSAETIMEDOUT) continue; // Timed out, try again
            // Some other error occured
            traceError(WSAGetLastError(), "SOCKET_ERROR while recv");
            throw new SocketException("Could not receive SYN from a client!");
        }
        if (recvbytes < sizeof(dgram)) continue; // Throw away unexpected packet
        if (_syn.type != SYN) {
            if (_syn.type == DATA) {
                // Last packet of previous connection was dropped or delayed
                send_ack(_syn);
            }
            continue; //Throw away unexpected packet
        }

        break;
    }
    recv_seqno = nextSeqNo(_syn.seqno);

    if (trace) tracefile << "SERVER: Received SYN with Seq No " << _syn.seqno << "\n";

    // Complete the connection handshake.
    // Send a SYNACK
    synack(_synack, _syn);
    if (trace) tracefile << "SERVER: Sending SYNACK with Seq No " << _synack.seqno << "\n";
    send_dgram(_synack);

    if (trace) tracefile << "SERVER: Waiting for acknowledgement of SYNACK\n";
    while (true) {

        // Wait for ACK
        int recvbytes = recv_dgram(_ack);
        if (recvbytes == SOCKET_ERROR) {
            if (WSAGetLastError() == WSAETIMEDOUT) {
                // Timed out, re-send packet
                if (trace) tracefile << "SERVER: Timed-out, re-sending SYNACK\n";
                send_dgram(_synack);
                continue;
            }
            // Some other error occured
            traceError(WSAGetLastError(), "SOCKET_ERROR while recv");
            throw new SocketException("Could not receive SYNACK from client!");
        }
        if (recvbytes < sizeof(dgram)) continue; // Throw away unexpected packet
        if (_ack.type != ACK) continue; //Throw away unexpected packet
        if (_ack.seqno != _synack.seqno) {
            continue; // This ACK is not for our SYNACK!
        }

        if (trace) tracefile << "SERVER: Received ACK with Seq No " << _ack.seqno << "\n";
        break;
    }

    std::cout << "Accepted connection from " << inet_ntoa(dest.sin_addr) << ":"
              << std::hex << htons(dest.sin_port) << std::dec << std::endl;

    sndr_seqno = nextSeqNo(_synack.seqno);

    if (trace) {
        tracefile << "SERVER: Received SYNACK acknowledgement, connection established!\n";
        tracefile << "Next sequence numbers will be:\n";
        tracefile << "  Client: " << recv_seqno << "\n";
        tracefile << "  Server: " << sndr_seqno << "\n";
    }
}
コード例 #28
0
VdpStatus
vdpDeviceDestroy(VdpDevice device)
{
    VdpStatus err_code;
    VdpDeviceData *data = handle_acquire(device, HANDLETYPE_DEVICE);
    if (NULL == data)
        return VDP_STATUS_INVALID_HANDLE;

    if (0 != data->refcount) {
        // Buggy client forgot to destroy dependend objects or decided that destroying
        // VdpDevice destroys all child object. Let's try to mitigate and prevent leakage.
        traceError("warning (%s): non-zero reference count (%d). Trying to free child objects.\n",
                   __func__, data->refcount);
        void *parent_object = data;
        handle_execute_for_all(destroy_child_objects, parent_object);
    }

    if (0 != data->refcount) {
        traceError("error (%s): still non-zero reference count (%d)\n", __func__, data->refcount);
        traceError("Here is the list of objects:\n");
        struct {
            int cnt;
            int total_cnt;
            VdpDeviceData *deviceData;
        } state = { .cnt = 0, .total_cnt = 0, .deviceData = data };

        handle_execute_for_all(print_handle_type, &state);
        traceError("Objects leaked: %d\n", state.cnt);
        traceError("Objects visited during scan: %d\n", state.total_cnt);
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    // cleaup libva
    if (data->va_available)
        vaTerminate(data->va_dpy);

    glx_ctx_push_thread_local(data);
    glDeleteTextures(1, &data->watermark_tex_id);
    glBindFramebuffer(GL_FRAMEBUFFER, 0);
    destroy_shaders(data);
    glx_ctx_pop();

    glx_ctx_lock();
    glXMakeCurrent(data->display, None, NULL);
    glx_ctx_unlock();

    glx_ctx_unref_glc_hash_table(data->display);

    handle_xdpy_unref(data->display_orig);
    handle_expunge(device);
    pthread_mutex_destroy(&data->refcount_mutex);
    free(data);

    GLenum gl_error = glGetError();
    if (GL_NO_ERROR != gl_error) {
        traceError("error (%s): gl error %d\n", __func__, gl_error);
        err_code = VDP_STATUS_ERROR;
        goto quit_skip_release;
    }

    return VDP_STATUS_OK;

quit:
    handle_release(device);
quit_skip_release:
    return err_code;
}
コード例 #29
0
static
void
do_presentation_queue_display(VdpPresentationQueueData *pqData)
{
    pthread_mutex_lock(&pqData->queue_mutex);
    assert(pqData->queue.used > 0);

    const int entry = pqData->queue.head;
    VdpDeviceData *deviceData = pqData->device;
    VdpOutputSurface surface = pqData->queue.item[entry].surface;
    const uint32_t clip_width = pqData->queue.item[entry].clip_width;
    const uint32_t clip_height = pqData->queue.item[entry].clip_height;

    // remove first entry from queue
    pqData->queue.used --;
    pqData->queue.freelist[pqData->queue.head] = pqData->queue.firstfree;
    pqData->queue.firstfree = pqData->queue.head;
    pqData->queue.head = pqData->queue.item[pqData->queue.head].next;
    pthread_mutex_unlock(&pqData->queue_mutex);

    VdpOutputSurfaceData *surfData = handle_acquire(surface, HANDLETYPE_OUTPUT_SURFACE);
    if (surfData == NULL)
        return;

    glx_context_push_global(deviceData->display, pqData->target->drawable, pqData->target->glc);

    const uint32_t target_width  = (clip_width > 0)  ? clip_width  : surfData->width;
    const uint32_t target_height = (clip_height > 0) ? clip_height : surfData->height;

    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    glOrtho(0, target_width, target_height, 0, -1.0, 1.0);
    glViewport(0, 0, target_width, target_height);

    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();

    glMatrixMode(GL_TEXTURE);
    glLoadIdentity();
    glScalef(1.0f/surfData->width, 1.0f/surfData->height, 1.0f);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_BLEND);
    glBindTexture(GL_TEXTURE_2D, surfData->tex_id);
    glColor4f(1, 1, 1, 1);
    glBegin(GL_QUADS);
        glTexCoord2i(0, 0);                        glVertex2i(0, 0);
        glTexCoord2i(target_width, 0);             glVertex2i(target_width, 0);
        glTexCoord2i(target_width, target_height); glVertex2i(target_width, target_height);
        glTexCoord2i(0, target_height);            glVertex2i(0, target_height);
    glEnd();

    if (global.quirks.show_watermark) {
        glEnable(GL_BLEND);
        glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
        glBlendEquation(GL_FUNC_ADD);
        glBindTexture(GL_TEXTURE_2D, deviceData->watermark_tex_id);

        glMatrixMode(GL_TEXTURE);
        glLoadIdentity();

        glColor3f(0.8, 0.08, 0.35);
        glBegin(GL_QUADS);
            glTexCoord2i(0, 0);
            glVertex2i(target_width - watermark_width, target_height - watermark_height);

            glTexCoord2i(1, 0);
            glVertex2i(target_width, target_height - watermark_height);

            glTexCoord2i(1, 1);
            glVertex2i(target_width, target_height);

            glTexCoord2i(0, 1);
            glVertex2i(target_width - watermark_width, target_height);
        glEnd();
    }

    glXSwapBuffers(deviceData->display, pqData->target->drawable);

    struct timespec now;
    clock_gettime(CLOCK_REALTIME, &now);
    surfData->first_presentation_time = timespec2vdptime(now);
    surfData->status = VDP_PRESENTATION_QUEUE_STATUS_IDLE;

    if (global.quirks.log_pq_delay) {
            const int64_t delta = timespec2vdptime(now) - surfData->queued_at;
            const struct timespec delta_ts = vdptime2timespec(delta);
            traceInfo("pqdelay %d.%09d %d.%09d\n", (int)now.tv_sec, (int)now.tv_nsec,
                      delta_ts.tv_sec, delta_ts.tv_nsec);
    }

    GLenum gl_error = glGetError();
    glx_context_pop();
    handle_release(surface);

    if (GL_NO_ERROR != gl_error) {
        traceError("error (VdpPresentationQueueDisplay): gl error %d\n", gl_error);
    }
}
コード例 #30
0
ファイル: PhilSock.cpp プロジェクト: pmlt/comp445
void net::Socket::wait(size_t &recv, size_t &sent, statistics * stats) {
    dgram *sent_pkt, recv_pkt;
    bool *sndr_acked, *recv_acked;
    int *timeouts;

    sent_pkt = (dgram*)calloc(sizeof(dgram), params.window_size);
    sndr_acked = (bool*)calloc(sizeof(bool), params.window_size);
    recv_acked = (bool*)calloc(sizeof(bool), params.window_size);
    timeouts = (int*)calloc(sizeof(int), params.window_size);

    // Initialize to 0
    recv = 0;
    sent = 0;

    // Initialize
    for (int i = 0; i < params.window_size; i++) {
        data(sent_pkt[i], -1, 0, NULL);
        recv_acked[i] = false;
        sndr_acked[i] = false;
        timeouts[i] = 0;
    }

    if (stats != NULL) {
        stats->packets_required = (sndr_len / params.payload_size);
        if ((sndr_len % params.payload_size) > 0) stats->packets_required++;
        stats->transfer_time = clock();
        stats->packets_sent = 0;
    }

    //std::cout << "INIT JOBS: Sender(" << sndr_len << ", #" << sndr_seqno << "), Receiver(" << recv_len << ", #" << recv_seqno << ")\n";

    // Loop until there is no job left.
    while (sndr_len > 0 || recv_len > 0) {

        if (sndr_len > 0) {
            // We have some outstanding bytes to send.
            for (int i = 0; i < params.window_size; i++) {
                if (timeouts[i] > 0) continue; // Already sent, waiting for ACK
                if (sndr_acked[i]) continue; // Already ACKed
                if (sndr_len > i * params.payload_size) {
                    sent_pkt[i].size = min(params.payload_size, sndr_len - (i * params.payload_size));
                }
                else {
                    sent_pkt[i].size = 0;
                }
                if (sent_pkt[i].size == 0) continue; // Not a real packet (beyond our send buffer)
                data(sent_pkt[i], (sndr_seqno + i) % SEQNO_MAX, sent_pkt[i].size, sndr_buf + (i*params.payload_size));
                if (trace) tracefile << "SENDER: Sending packet #" << sent_pkt[i].seqno << " of size " << sent_pkt[i].size << "\n";
                send_dgram(sent_pkt[i]);
                timeouts[i] = params.timeout;
                if (stats != NULL) stats->packets_sent++;
            }
        }

        // Wait for a packet.
        clock_t begin_time = clock();
        int status = recv_dgram(recv_pkt);
        int recv_elapsed = (clock() - begin_time) / (CLOCKS_PER_SEC / 1000);

        if (status == SOCKET_ERROR) {
            if (WSAGetLastError() == WSAETIMEDOUT) {
                // Complete silence on the wire, try sending ALL packets again
                for (int i = 0; i < params.window_size; i++) {
                    timeouts[i] = 0;
                }
                continue;
            }
            // This is another type of error.
            traceError(WSAGetLastError(), "SOCKET_ERROR while waiting for packet.");
            throw new SocketException("Could not receive packet!");
        }

        // Update timeouts according to time elapsed during recv
        for (int i = 0; i < params.window_size; i++) {
            timeouts[i] -= recv_elapsed;
        }

        // We have something; how we react depends on the type of packet received.
        switch (recv_pkt.type) {
        case SYNACK:
            // Last ACK of connection handshake was lost. Re-ACK it.
            send_ack(recv_pkt);
            break; // Go back to waiting.
        case ACK:
            // Were we waiting for one of these?
            if (sndr_len > 0) {
                // Yes. Is this one of the ones we were waiting for?
                for (int i = 0; i < params.window_size; i++) {
                    if (sent_pkt[i].size < 0) continue; // Not a real packet.
                    if (sndr_acked[i]) continue; // Already ACKed, so whatever
                    if (recv_pkt.seqno == sent_pkt[i].seqno) {
                        if (trace) tracefile << "SENDER: Received ACK for packet #" << recv_pkt.seqno << "\n";
                        // Yes. Packet was acknowledged.
                        sndr_acked[i] = true;
                    }
                }
                // Slide the sender window until we have a non-acked packet heading it
                while (sndr_acked[0]) {
                    // Update sender state
                    sndr_seqno = nextSeqNo(sndr_seqno);
                    sndr_buf += sent_pkt[0].size;
                    sndr_len -= sent_pkt[0].size;
                    sent += sent_pkt[0].size;

                    // Slide window
                    for (int j = 0; j < params.window_size - 1; j++) {
                        sent_pkt[j] = sent_pkt[j + 1];
                        timeouts[j] = timeouts[j + 1];
                        sndr_acked[j] = sndr_acked[j + 1];
                    }
                    // The new "last packet" is automatically timed out because it has never been sent yet
                    data(sent_pkt[params.window_size - 1], -1, 0, NULL);
                    timeouts[params.window_size - 1] = 0;
                    sndr_acked[params.window_size - 1] = false;
                }
            }
            // In all other cases, simply discard.
            break;
        case DATA:
            // Were we waiting for one of these?
            if (recv_len > 0) {
                for (int i = 0; i < params.window_size; i++) {
                    size_t buf_offset = (i * params.payload_size);
                    if (buf_offset >= recv_len) continue; // Ignore window entries beyond buffer size
                    int seqno = (recv_seqno + i) % SEQNO_MAX;
                    if (recv_pkt.seqno == seqno) {
                        // Packet falls within window.
                        if (!recv_acked[i]) {
                            // Check that packet size matches expected size
                            size_t expected_size = min(params.payload_size, recv_len - (i * params.payload_size));
                            if (expected_size != recv_pkt.size) {
                                // Problematic case: we received a DATA packet with the correct SeqNo but wrong number of bytes
                                if (trace) tracefile << "RECEIVER: Packet #" << recv_pkt.seqno << " has unexpected size " << recv_pkt.size << " (expected " << expected_size << ").\n";
                                // This is DEFINITELY an un-recoverable error.
                                throw new SocketException("RECEIVER: DATA packet has correct sequence number but wrong size!");
                            }
                            // This is the first time we receive this packet.
                            if (trace) tracefile << "RECEIVER: Received expected DATA packet #" << recv_pkt.seqno << " of size " << recv_pkt.size << "\n";
                            memcpy(recv_buf + buf_offset, recv_pkt.payload, recv_pkt.size); // Copy payload bytes into buffer
                            recv_acked[i] = true;
                        }
                    }
                }
                // ACK the packet we have received
                if (trace) tracefile << "RECEIVER: Acknowledged packet #" << recv_pkt.seqno << "\n";
                send_ack(recv_pkt);
                // Slide the receiver window, if necessary.
                while (recv_acked[0]) {
                    // Update receiver state
                    size_t pktsize = min(params.payload_size, recv_len);

                    recv_seqno = nextSeqNo(recv_seqno);
                    recv_len -= pktsize;
                    recv_buf += pktsize;
                    recv += pktsize;

                    // Slide window
                    for (int j = 0; j < params.window_size - 1; j++) {
                        recv_acked[j] = recv_acked[j + 1];
                    }
                    recv_acked[params.window_size - 1] = false;
                }
            }
            else {
                // Problematic case: we received a DATA packet while we were not expecting any
                if (recv_seqno == recv_pkt.seqno) {
                    // This is the 1000$ bug that's not covered in the textbook
                    // Other side has switched mode and is sending early.
                    // Do NOT acknowledge this packet.
                    if (trace) tracefile << "RECEIVER: Unexpected packet #" << recv_pkt.seqno << " with size " << recv_pkt.size << " will NOT be acknowledged.\n";
                }
                else {
                    // This is probably an old DATA packet, acknowledge it
                    if (trace) tracefile << "RECEIVER: Acknowledging unexpected packet #" << recv_pkt.seqno << " with " << recv_pkt.size << " bytes.\n";
                    send_ack(recv_pkt);
                }
            }
            break;

        default:
            break; // Default is to discard packet.
        }
    }
    if (stats != NULL) {
        stats->transfer_time = (clock() - stats->transfer_time) / (CLOCKS_PER_SEC / 1000);
    }
    //std::cout << "FINISH JOBS: Sender(" << sndr_len << ", #" << sndr_seqno << "), Receiver(" << recv_len << ", #" << recv_seqno << ")\n";
    free(sent_pkt);
    free(recv_acked);
    free(sndr_acked);
    free(timeouts);
}