Example #1
0
static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image,
                     GLuint *out_textures)
{
    struct priv *p = hw->priv;
    GL *gl = hw->gl;
    VAStatus status;
    VAImage *va_image = &p->current_image;

    unref_image(hw);

    mp_image_setrefp(&p->current_ref, hw_image);

    va_lock(p->ctx);

    status = vaDeriveImage(p->display, va_surface_id(hw_image), va_image);
    if (!CHECK_VA_STATUS(p, "vaDeriveImage()"))
        goto err;

    int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc);
    if (mpfmt != IMGFMT_NV12 && mpfmt != IMGFMT_420P) {
        MP_FATAL(p, "unsupported VA image format %s\n",
                 mp_tag_str(va_image->format.fourcc));
        goto err;
    }

    if (!hw->converted_imgfmt) {
        MP_VERBOSE(p, "format: %s %s\n", mp_tag_str(va_image->format.fourcc),
                   mp_imgfmt_to_name(mpfmt));
        hw->converted_imgfmt = mpfmt;
    }

    if (hw->converted_imgfmt != mpfmt) {
        MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n",
                 mp_imgfmt_to_name(hw->converted_imgfmt), mp_imgfmt_to_name(mpfmt));
        goto err;
    }

    VABufferInfo buffer_info = {.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME};
    status = vaAcquireBufferHandle(p->display, va_image->buf, &buffer_info);
    if (!CHECK_VA_STATUS(p, "vaAcquireBufferHandle()"))
        goto err;
    p->buffer_acquired = true;

    struct mp_image layout = {0};
    mp_image_set_params(&layout, &hw_image->params);
    mp_image_setfmt(&layout, mpfmt);

    // (it would be nice if we could use EGL_IMAGE_INTERNAL_FORMAT_EXT)
    int drm_fmts[4] = {MP_FOURCC('R', '8', ' ', ' '),   // DRM_FORMAT_R8
                       MP_FOURCC('G', 'R', '8', '8'),   // DRM_FORMAT_GR88
                       MP_FOURCC('R', 'G', '2', '4'),   // DRM_FORMAT_RGB888
                       MP_FOURCC('R', 'A', '2', '4')};  // DRM_FORMAT_RGBA8888

    for (int n = 0; n < layout.num_planes; n++) {
        int attribs[20] = {EGL_NONE};
        int num_attribs = 0;

        ADD_ATTRIB(EGL_LINUX_DRM_FOURCC_EXT, drm_fmts[layout.fmt.bytes[n] - 1]);
        ADD_ATTRIB(EGL_WIDTH, mp_image_plane_w(&layout, n));
        ADD_ATTRIB(EGL_HEIGHT, mp_image_plane_h(&layout, n));
        ADD_ATTRIB(EGL_DMA_BUF_PLANE0_FD_EXT, buffer_info.handle);
        ADD_ATTRIB(EGL_DMA_BUF_PLANE0_OFFSET_EXT, va_image->offsets[n]);
        ADD_ATTRIB(EGL_DMA_BUF_PLANE0_PITCH_EXT, va_image->pitches[n]);

        p->images[n] = p->CreateImageKHR(eglGetCurrentDisplay(),
            EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
        if (!p->images[n])
            goto err;

        gl->BindTexture(GL_TEXTURE_2D, p->gl_textures[n]);
        p->EGLImageTargetTexture2DOES(GL_TEXTURE_2D, p->images[n]);

        out_textures[n] = p->gl_textures[n];
    }
    gl->BindTexture(GL_TEXTURE_2D, 0);

    if (va_image->format.fourcc == VA_FOURCC_YV12)
        MPSWAP(GLuint, out_textures[1], out_textures[2]);

    va_unlock(p->ctx);
    return 0;

err:
    va_unlock(p->ctx);
    MP_FATAL(p, "mapping VAAPI EGL image failed\n");
    unref_image(hw);
    return -1;
}
Example #2
0
static int dvd_read_sector(stream_t *stream, dvd_priv_t *d, unsigned char *data)
{
  int len;

  if(d->packs_left==0) {
    /**
     * If we're not at the end of this cell, we can determine the next
     * VOBU to display using the VOBU_SRI information section of the
     * DSI.  Using this value correctly follows the current angle,
     * avoiding the doubled scenes in The Matrix, and makes our life
     * really happy.
     *
     * Otherwise, we set our next address past the end of this cell to
     * force the code above to go to the next cell in the program.
     */
    if(d->dsi_pack.vobu_sri.next_vobu != SRI_END_OF_CELL) {
       d->cur_pack= d->dsi_pack.dsi_gi.nv_pck_lbn + ( d->dsi_pack.vobu_sri.next_vobu & 0x7fffffff );
       MP_DBG(stream, "Navi  new pos=0x%X  \n",d->cur_pack);
    } else {
      // end of cell! find next cell!
      MP_VERBOSE(stream, "--- END OF CELL !!! ---\n");
      d->cur_pack=d->cell_last_pack+1;
    }
  }

read_next:
  if(d->cur_pack>d->cell_last_pack) {
    // end of cell!
    int next=dvd_next_cell(stream, d);
    if(next>=0) {
      d->cur_cell=next;
      // if( d->cur_pgc->cell_playback[d->cur_cell].block_type
      // == BLOCK_TYPE_ANGLE_BLOCK ) d->cur_cell+=dvd_angle-1;
      d->cur_pack = d->cur_pgc->cell_playback[ d->cur_cell ].first_sector;
      d->cell_last_pack=d->cur_pgc->cell_playback[ d->cur_cell ].last_sector;
      MP_VERBOSE(stream, "DVD next cell: %d  pack: 0x%X-0x%X  \n",d->cur_cell,d->cur_pack,d->cell_last_pack);
    } else
        return -1; // EOF
  }

  len = DVDReadBlocks(d->title, d->cur_pack, 1, data);
  // only == 0 should indicate an error, but some dvdread version are buggy when used with dvdcss
  if(len <= 0) return -1; //error

  if(data[38]==0 && data[39]==0 && data[40]==1 && data[41]==0xBF &&
    data[1024]==0 && data[1025]==0 && data[1026]==1 && data[1027]==0xBF) {
       // found a Navi packet!!!
#if DVDREAD_VERSION >= LIBDVDREAD_VERSION(0,9,0)
    navRead_DSI(&d->dsi_pack, &(data[ DSI_START_BYTE ]));
#else
    navRead_DSI(&d->dsi_pack, &(data[ DSI_START_BYTE ]), sizeof(dsi_t));
#endif
    if(d->cur_pack != d->dsi_pack.dsi_gi.nv_pck_lbn ) {
      MP_VERBOSE(stream, "Invalid NAVI packet! lba=0x%X  navi=0x%X  \n",
        d->cur_pack,d->dsi_pack.dsi_gi.nv_pck_lbn);
    } else {
      // process!
      d->packs_left = d->dsi_pack.dsi_gi.vobu_ea;
      MP_DBG(stream, "Found NAVI packet! lba=0x%X  len=%d  \n",d->cur_pack,d->packs_left);
      //navPrint_DSI(&d->dsi_pack);
      MP_TRACE(stream, "\r### CELL %d: Navi: %d/%d  IFO: %d/%d   \n",d->cur_cell,
        d->dsi_pack.dsi_gi.vobu_c_idn,d->dsi_pack.dsi_gi.vobu_vob_idn,
        d->cur_pgc->cell_position[d->cur_cell].cell_nr,
        d->cur_pgc->cell_position[d->cur_cell].vob_id_nr);

      if(d->angle_seek) {
        int i,skip=0;
        for(i=0;i<9;i++)        // check if all values zero:
          if((skip=d->dsi_pack.sml_agli.data[i].address)!=0) break;
        if(skip && skip!=0x7fffffff) {
          // sml_agli table has valid data (at least one non-zero):
         d->cur_pack=d->dsi_pack.dsi_gi.nv_pck_lbn+
         d->dsi_pack.sml_agli.data[d->dvd_angle-1].address;
         d->angle_seek=0;
         d->cur_pack--;
         MP_VERBOSE(stream, "Angle-seek synced using sml_agli map!  new_lba=0x%X  \n",d->cur_pack);
        } else {
          // check if we're in the right cell, jump otherwise:
          if( (d->dsi_pack.dsi_gi.vobu_c_idn==d->cur_pgc->cell_position[d->cur_cell].cell_nr) &&
            (d->dsi_pack.dsi_gi.vobu_vob_idn==d->cur_pgc->cell_position[d->cur_cell].vob_id_nr) ){
            d->angle_seek=0;
            MP_VERBOSE(stream, "Angle-seek synced by cell/vob IDN search!  \n");
          } else {
            // wrong angle, skip this vobu:
            d->cur_pack=d->dsi_pack.dsi_gi.nv_pck_lbn+
            d->dsi_pack.dsi_gi.vobu_ea;
            d->angle_seek=2; // DEBUG
          }
        }
      }
    }
    ++d->cur_pack;
    goto read_next;
  }

  ++d->cur_pack;
  if(d->packs_left>=0) --d->packs_left;

  if(d->angle_seek) {
    if(d->angle_seek==2) MP_VERBOSE(stream, "!!! warning! reading packet while angle_seek !!!\n");
    goto read_next; // searching for Navi packet
  }

  return d->cur_pack-1;
}
Example #3
0
static int recreate_video_proc(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;
    HRESULT hr;

    destroy_video_proc(vf);

    D3D11_VIDEO_PROCESSOR_CONTENT_DESC vpdesc = {
        .InputFrameFormat = p->d3d_frame_format,
        .InputWidth = p->c_w,
        .InputHeight = p->c_h,
        .OutputWidth = p->params.w,
        .OutputHeight = p->params.h,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorEnumerator(p->video_dev, &vpdesc,
                                                          &p->vp_enum);
    if (FAILED(hr))
        goto fail;

    D3D11_VIDEO_PROCESSOR_CAPS caps;
    hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorCaps(p->vp_enum, &caps);
    if (FAILED(hr))
        goto fail;

    MP_VERBOSE(vf, "Found %d rate conversion caps.\n",
               (int)caps.RateConversionCapsCount);

    int rindex = -1;
    for (int n = 0; n < caps.RateConversionCapsCount; n++) {
        D3D11_VIDEO_PROCESSOR_RATE_CONVERSION_CAPS rcaps;
        hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorRateConversionCaps
                (p->vp_enum, n, &rcaps);
        if (FAILED(hr))
            goto fail;
        MP_VERBOSE(vf, "  - %d: 0x%08x\n", n, (unsigned)rcaps.ProcessorCaps);
        if (rcaps.ProcessorCaps & D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB)
        {
            MP_VERBOSE(vf, "       (matching)\n");
            if (rindex < 0)
                rindex = n;
        }
    }

    if (rindex < 0) {
        MP_WARN(vf, "No video deinterlacing processor found.\n");
        rindex = 0;
    }

    hr = ID3D11VideoDevice_CreateVideoProcessor(p->video_dev, p->vp_enum, rindex,
                                                &p->video_proc);
    if (FAILED(hr)) {
        MP_ERR(vf, "Failed to create D3D11 video processor.\n");
        goto fail;
    }

    // Note: libavcodec does not support cropping left/top with hwaccel.
    RECT src_rc = {
        .right = p->params.w,
        .bottom = p->params.h,
    };
    ID3D11VideoContext_VideoProcessorSetStreamSourceRect(p->video_ctx,
                                                         p->video_proc,
                                                         0, TRUE, &src_rc);

    // This is supposed to stop drivers from f*****g up the video quality.
    ID3D11VideoContext_VideoProcessorSetStreamAutoProcessingMode(p->video_ctx,
                                                                 p->video_proc,
                                                                 0, FALSE);

    ID3D11VideoContext_VideoProcessorSetStreamOutputRate(p->video_ctx,
                                                         p->video_proc,
                                                         0,
                                                         D3D11_VIDEO_PROCESSOR_OUTPUT_RATE_NORMAL,
                                                         FALSE, 0);

    D3D11_VIDEO_PROCESSOR_COLOR_SPACE csp = {
        .YCbCr_Matrix = p->params.colorspace != MP_CSP_BT_601,
        .Nominal_Range = p->params.colorlevels == MP_CSP_LEVELS_TV ? 1 : 2,
    };
    ID3D11VideoContext_VideoProcessorSetStreamColorSpace(p->video_ctx,
                                                         p->video_proc,
                                                         0, &csp);
    if (p->out_rgb) {
        if (p->params.colorspace != MP_CSP_BT_601 &&
            p->params.colorspace != MP_CSP_BT_709)
        {
            MP_WARN(vf, "Unsupported video colorspace (%s/%s). Consider "
                    "disabling hardware decoding, or using "
                    "--hwdec=d3d11va-copy to get correct output.\n",
                    m_opt_choice_str(mp_csp_names, p->params.colorspace),
                    m_opt_choice_str(mp_csp_levels_names, p->params.colorlevels));
        }
    } else {
        ID3D11VideoContext_VideoProcessorSetOutputColorSpace(p->video_ctx,
                                                             p->video_proc,
                                                             &csp);
    }

    return 0;
fail:
    destroy_video_proc(vf);
    return -1;
}

static int render(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;
    int res = -1;
    HRESULT hr;
    ID3D11VideoProcessorInputView *in_view = NULL;
    ID3D11VideoProcessorOutputView *out_view = NULL;
    struct mp_image *in = NULL, *out = NULL;
    out = mp_image_pool_get(p->pool, p->out_params.imgfmt, p->params.w, p->params.h);
    if (!out)
        goto cleanup;

    ID3D11Texture2D *d3d_out_tex = (void *)out->planes[1];

    in = mp_refqueue_get(p->queue, 0);
    if (!in)
        goto cleanup;
    ID3D11Texture2D *d3d_tex = (void *)in->planes[1];
    int d3d_subindex = (intptr_t)in->planes[2];

    mp_image_copy_attributes(out, in);

    D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
    if (!mp_refqueue_is_interlaced(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_top_field_first(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    D3D11_TEXTURE2D_DESC texdesc;
    ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
    if (!p->video_proc || p->c_w != texdesc.Width || p->c_h != texdesc.Height ||
        p->d3d_frame_format != d3d_frame_format)
    {
        p->c_w = texdesc.Width;
        p->c_h = texdesc.Height;
        p->d3d_frame_format = d3d_frame_format;
        if (recreate_video_proc(vf) < 0)
            goto cleanup;
    }

    if (!mp_refqueue_is_interlaced(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
    } else if (mp_refqueue_is_top_field(p->queue)) {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
    } else {
        d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
    }

    ID3D11VideoContext_VideoProcessorSetStreamFrameFormat(p->video_ctx,
                                                          p->video_proc,
                                                          0, d3d_frame_format);

    D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC indesc = {
        .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
        .Texture2D = {
            .ArraySlice = d3d_subindex,
        },
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorInputView(p->video_dev,
                                                         (ID3D11Resource *)d3d_tex,
                                                         p->vp_enum, &indesc,
                                                         &in_view);
    if (FAILED(hr)) {
        MP_ERR(vf, "Could not create ID3D11VideoProcessorInputView\n");
        goto cleanup;
    }

    D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outdesc = {
        .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
    };
    hr = ID3D11VideoDevice_CreateVideoProcessorOutputView(p->video_dev,
                                                          (ID3D11Resource *)d3d_out_tex,
                                                          p->vp_enum, &outdesc,
                                                          &out_view);
    if (FAILED(hr))
        goto cleanup;

    D3D11_VIDEO_PROCESSOR_STREAM stream = {
        .Enable = TRUE,
        .pInputSurface = in_view,
    };
    int frame = mp_refqueue_is_second_field(p->queue);
    hr = ID3D11VideoContext_VideoProcessorBlt(p->video_ctx, p->video_proc,
                                              out_view, frame, 1, &stream);
    if (FAILED(hr)) {
        MP_ERR(vf, "VideoProcessorBlt failed.\n");
        goto cleanup;
    }

    // Make sure the texture is updated correctly on the shared context.
    // (I'm not sure if this is correct, though it won't harm.)
    if (p->out_shared)
        ID3D11DeviceContext_Flush(p->device_ctx);

    res = 0;
cleanup:
    if (in_view)
        ID3D11VideoProcessorInputView_Release(in_view);
    if (out_view)
        ID3D11VideoProcessorOutputView_Release(out_view);
    if (res >= 0) {
        vf_add_output_frame(vf, out);
    } else {
        talloc_free(out);
    }
    mp_refqueue_next_field(p->queue);
    return res;
}

static int filter_out(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;

    if (!mp_refqueue_has_output(p->queue))
        return 0;

    // no filtering
    if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
        struct mp_image *in = mp_refqueue_get(p->queue, 0);
        vf_add_output_frame(vf, mp_image_new_ref(in));
        mp_refqueue_next(p->queue);
        return 0;
    }

    return render(vf);
}

static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
                    struct mp_image_params *out)
{
    struct vf_priv_s *p = vf->priv;

    flush_frames(vf);
    talloc_free(p->pool);
    p->pool = NULL;

    destroy_video_proc(vf);

    *out = *in;

    if (vf_next_query_format(vf, IMGFMT_D3D11VA) ||
        vf_next_query_format(vf, IMGFMT_D3D11NV12))
    {
        out->imgfmt = vf_next_query_format(vf, IMGFMT_D3D11VA)
                    ? IMGFMT_D3D11VA : IMGFMT_D3D11NV12;
        out->hw_subfmt = IMGFMT_NV12;
        p->out_format = DXGI_FORMAT_NV12;
        p->out_shared = false;
        p->out_rgb = false;
    } else {
        out->imgfmt = IMGFMT_D3D11RGB;
        out->hw_subfmt = IMGFMT_RGB0;
        p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
        p->out_shared = true;
        p->out_rgb = true;
    }

    p->require_filtering = in->hw_subfmt != out->hw_subfmt;

    p->params = *in;
    p->out_params = *out;

    p->pool = mp_image_pool_new(20);
    mp_image_pool_set_allocator(p->pool, alloc_pool, vf);
    mp_image_pool_set_lru(p->pool);

    return 0;
}

static void uninit(struct vf_instance *vf)
{
    struct vf_priv_s *p = vf->priv;

    destroy_video_proc(vf);

    flush_frames(vf);
    mp_refqueue_free(p->queue);
    talloc_free(p->pool);

    if (p->video_ctx)
        ID3D11VideoContext_Release(p->video_ctx);

    if (p->video_dev)
        ID3D11VideoDevice_Release(p->video_dev);

    if (p->device_ctx)
        ID3D11DeviceContext_Release(p->device_ctx);

    if (p->vo_dev)
        ID3D11Device_Release(p->vo_dev);
}

static int query_format(struct vf_instance *vf, unsigned int imgfmt)
{
    if (imgfmt == IMGFMT_D3D11VA ||
        imgfmt == IMGFMT_D3D11NV12 ||
        imgfmt == IMGFMT_D3D11RGB)
    {
        return vf_next_query_format(vf, IMGFMT_D3D11VA) ||
               vf_next_query_format(vf, IMGFMT_D3D11NV12) ||
               vf_next_query_format(vf, IMGFMT_D3D11RGB);
    }
    return 0;
}

static bool test_conversion(int in, int out)
{
    return (in == IMGFMT_D3D11VA ||
            in == IMGFMT_D3D11NV12 ||
            in == IMGFMT_D3D11RGB) &&
           (out == IMGFMT_D3D11VA ||
            out == IMGFMT_D3D11NV12 ||
            out == IMGFMT_D3D11RGB);
}

static int control(struct vf_instance *vf, int request, void* data)
{
    struct vf_priv_s *p = vf->priv;
    switch (request){
    case VFCTRL_GET_DEINTERLACE:
        *(int*)data = !!p->deint_enabled;
        return true;
    case VFCTRL_SET_DEINTERLACE:
        p->deint_enabled = !!*(int*)data;
        return true;
    case VFCTRL_SEEK_RESET:
        flush_frames(vf);
        return true;
    default:
        return CONTROL_UNKNOWN;
    }
}

static int vf_open(vf_instance_t *vf)
{
    struct vf_priv_s *p = vf->priv;

    vf->reconfig = reconfig;
    vf->filter_ext = filter_ext;
    vf->filter_out = filter_out;
    vf->query_format = query_format;
    vf->uninit = uninit;
    vf->control = control;

    p->queue = mp_refqueue_alloc();

    p->vo_dev = hwdec_devices_load(vf->hwdec_devs, HWDEC_D3D11VA);
    if (!p->vo_dev)
        return 0;

    ID3D11Device_AddRef(p->vo_dev);

    HRESULT hr;

    hr = ID3D11Device_QueryInterface(p->vo_dev, &IID_ID3D11VideoDevice,
                                     (void **)&p->video_dev);
    if (FAILED(hr))
        goto fail;

    ID3D11Device_GetImmediateContext(p->vo_dev, &p->device_ctx);
    if (!p->device_ctx)
        goto fail;
    hr = ID3D11DeviceContext_QueryInterface(p->device_ctx, &IID_ID3D11VideoContext,
                                            (void **)&p->video_ctx);
    if (FAILED(hr))
        goto fail;

    return 1;

fail:
    uninit(vf);
    return 0;
}

#define OPT_BASE_STRUCT struct vf_priv_s
static const m_option_t vf_opts_fields[] = {
    OPT_FLAG("deint", deint_enabled, 0),
    OPT_FLAG("interlaced-only", interlaced_only, 0),
    {0}
};

const vf_info_t vf_info_d3d11vpp = {
    .description = "D3D11 Video Post-Process Filter",
    .name = "d3d11vpp",
    .test_conversion = test_conversion,
    .open = vf_open,
    .priv_size = sizeof(struct vf_priv_s),
    .priv_defaults = &(const struct vf_priv_s) {
        .deint_enabled = 1,
        .interlaced_only = 1,
    },
    .options = vf_opts_fields,
};
Example #4
0
File: vo_xv.c Project: 2ion/mpv
static int preinit(struct vo *vo)
{
    XvPortID xv_p;
    int busy_ports = 0;
    unsigned int i;
    struct xvctx *ctx = vo->priv;
    int xv_adaptor = ctx->cfg_xv_adaptor;

    if (!vo_x11_init(vo))
        return -1;

    if (!vo_x11_create_vo_window(vo, NULL, "xv"))
        goto error;

    struct vo_x11_state *x11 = vo->x11;

    /* check for Xvideo extension */
    unsigned int ver, rel, req, ev, err;
    if (Success != XvQueryExtension(x11->display, &ver, &rel, &req, &ev, &err)) {
        MP_ERR(vo, "Xv not supported by this X11 version/driver\n");
        goto error;
    }

    /* check for Xvideo support */
    if (Success !=
        XvQueryAdaptors(x11->display, DefaultRootWindow(x11->display),
                        &ctx->adaptors, &ctx->ai)) {
        MP_ERR(vo, "XvQueryAdaptors failed.\n");
        goto error;
    }

    /* check adaptors */
    if (ctx->xv_port) {
        int port_found;

        for (port_found = 0, i = 0; !port_found && i < ctx->adaptors; i++) {
            if ((ctx->ai[i].type & XvInputMask)
                && (ctx->ai[i].type & XvImageMask)) {
                for (xv_p = ctx->ai[i].base_id;
                     xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports;
                     ++xv_p) {
                    if (xv_p == ctx->xv_port) {
                        port_found = 1;
                        break;
                    }
                }
            }
        }
        if (port_found) {
            if (XvGrabPort(x11->display, ctx->xv_port, CurrentTime))
                ctx->xv_port = 0;
        } else {
            MP_WARN(vo, "Invalid port parameter, overriding with port 0.\n");
            ctx->xv_port = 0;
        }
    }

    for (i = 0; i < ctx->adaptors && ctx->xv_port == 0; i++) {
        /* check if adaptor number has been specified */
        if (xv_adaptor != -1 && xv_adaptor != i)
            continue;

        if ((ctx->ai[i].type & XvInputMask) && (ctx->ai[i].type & XvImageMask)) {
            for (xv_p = ctx->ai[i].base_id;
                 xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports; ++xv_p)
                if (!XvGrabPort(x11->display, xv_p, CurrentTime)) {
                    ctx->xv_port = xv_p;
                    MP_VERBOSE(vo, "Using Xv Adapter #%d (%s)\n",
                               i, ctx->ai[i].name);
                    break;
                } else {
                    MP_WARN(vo, "Could not grab port %i.\n", (int) xv_p);
                    ++busy_ports;
                }
        }
    }
    if (!ctx->xv_port) {
        if (busy_ports)
            MP_ERR(vo, "Xvideo ports busy.\n");
        else
            MP_ERR(vo, "No Xvideo support found.\n");
        goto error;
    }

    if (!xv_init_colorkey(vo)) {
        goto error;             // bail out, colorkey setup failed
    }
    xv_enable_vsync(vo);
    xv_get_max_img_dim(vo, &ctx->max_width, &ctx->max_height);

    ctx->fo = XvListImageFormats(x11->display, ctx->xv_port,
                                 (int *) &ctx->formats);

    MP_WARN(vo, "Warning: this legacy VO has bad quality and performance, "
                "and will in particular result in blurry OSD and subtitles. "
                "You should fix your graphic drivers, or not force the xv VO.\n");
    return 0;

  error:
    uninit(vo);                 // free resources
    return -1;
}
Example #5
0
File: ao_lavc.c Project: Floens/mpv
// this should round samples down to frame sizes
// return: number of samples played
static int play(struct ao *ao, void **data, int samples, int flags)
{
    struct priv *ac = ao->priv;
    struct encode_lavc_context *ectx = ao->encode_lavc_ctx;
    int bufpos = 0;
    double nextpts;
    double outpts;
    int orig_samples = samples;

    pthread_mutex_lock(&ectx->lock);

    if (!encode_lavc_start(ectx)) {
        MP_WARN(ao, "not ready yet for encoding audio\n");
        pthread_mutex_unlock(&ectx->lock);
        return 0;
    }

    double pts = ectx->last_audio_in_pts;
    pts += ectx->samples_since_last_pts / (double)ao->samplerate;

    size_t num_planes = af_fmt_is_planar(ao->format) ? ao->channels.num : 1;

    void *tempdata = NULL;
    void *padded[MP_NUM_CHANNELS];

    if ((flags & AOPLAY_FINAL_CHUNK) && (samples % ac->aframesize)) {
       tempdata = talloc_new(NULL);
       size_t bytelen = samples * ao->sstride;
       size_t extralen = (ac->aframesize - 1) * ao->sstride;
       for (int n = 0; n < num_planes; n++) {
           padded[n] = talloc_size(tempdata, bytelen + extralen);
           memcpy(padded[n], data[n], bytelen);
           af_fill_silence((char *)padded[n] + bytelen, extralen, ao->format);
       }
       data = padded;
       samples = (bytelen + extralen) / ao->sstride;
    }

    if (pts == MP_NOPTS_VALUE) {
        MP_WARN(ao, "frame without pts, please report; synthesizing pts instead\n");
        // synthesize pts from previous expected next pts
        pts = ac->expected_next_pts;
    }

    if (ac->worst_time_base.den == 0) {
        //if (ac->codec->time_base.num / ac->codec->time_base.den >= ac->stream->time_base.num / ac->stream->time_base.den)
        if (ac->codec->time_base.num * (double) ac->stream->time_base.den >=
                ac->stream->time_base.num * (double) ac->codec->time_base.den) {
            MP_VERBOSE(ao, "NOTE: using codec time base (%d/%d) for pts "
                       "adjustment; the stream base (%d/%d) is not worse.\n",
                       (int)ac->codec->time_base.num,
                       (int)ac->codec->time_base.den,
                       (int)ac->stream->time_base.num,
                       (int)ac->stream->time_base.den);
            ac->worst_time_base = ac->codec->time_base;
            ac->worst_time_base_is_stream = 0;
        } else {
            MP_WARN(ao, "NOTE: not using codec time base (%d/%d) for pts "
                    "adjustment; the stream base (%d/%d) is worse.\n",
                    (int)ac->codec->time_base.num,
                    (int)ac->codec->time_base.den,
                    (int)ac->stream->time_base.num,
                    (int)ac->stream->time_base.den);
            ac->worst_time_base = ac->stream->time_base;
            ac->worst_time_base_is_stream = 1;
        }

        // NOTE: we use the following "axiom" of av_rescale_q:
        // if time base A is worse than time base B, then
        //   av_rescale_q(av_rescale_q(x, A, B), B, A) == x
        // this can be proven as long as av_rescale_q rounds to nearest, which
        // it currently does

        // av_rescale_q(x, A, B) * B = "round x*A to nearest multiple of B"
        // and:
        //    av_rescale_q(av_rescale_q(x, A, B), B, A) * A
        // == "round av_rescale_q(x, A, B)*B to nearest multiple of A"
        // == "round 'round x*A to nearest multiple of B' to nearest multiple of A"
        //
        // assume this fails. Then there is a value of x*A, for which the
        // nearest multiple of B is outside the range [(x-0.5)*A, (x+0.5)*A[.
        // Absurd, as this range MUST contain at least one multiple of B.
    }

    // Fix and apply the discontinuity pts offset.
    if (!ectx->options->rawts && ectx->options->copyts) {
        // fix the discontinuity pts offset
        nextpts = pts;
        if (ectx->discontinuity_pts_offset == MP_NOPTS_VALUE) {
            ectx->discontinuity_pts_offset = ectx->next_in_pts - nextpts;
        }
        else if (fabs(nextpts + ectx->discontinuity_pts_offset - ectx->next_in_pts) > 30) {
            MP_WARN(ao, "detected an unexpected discontinuity (pts jumped by "
                    "%f seconds)\n",
                    nextpts + ectx->discontinuity_pts_offset - ectx->next_in_pts);
            ectx->discontinuity_pts_offset = ectx->next_in_pts - nextpts;
        }

        outpts = pts + ectx->discontinuity_pts_offset;
    }
    else {
        outpts = pts;
    }

    // Shift pts by the pts offset first.
    outpts += encode_lavc_getoffset(ectx, ac->codec);

    while (samples - bufpos >= ac->aframesize) {
        void *start[MP_NUM_CHANNELS] = {0};
        for (int n = 0; n < num_planes; n++)
            start[n] = (char *)data[n] + bufpos * ao->sstride;
        encode(ao, outpts + bufpos / (double) ao->samplerate, start);
        bufpos += ac->aframesize;
    }

    // Calculate expected pts of next audio frame (input side).
    ac->expected_next_pts = pts + bufpos / (double) ao->samplerate;

    // Set next allowed input pts value (input side).
    if (!ectx->options->rawts && ectx->options->copyts) {
        nextpts = ac->expected_next_pts + ectx->discontinuity_pts_offset;
        if (nextpts > ectx->next_in_pts)
            ectx->next_in_pts = nextpts;
    }

    talloc_free(tempdata);

    int taken = FFMIN(bufpos, orig_samples);
    ectx->samples_since_last_pts += taken;

    pthread_mutex_unlock(&ectx->lock);

    if (flags & AOPLAY_FINAL_CHUNK) {
        if (bufpos < orig_samples) {
            MP_ERR(ao, "did not write enough data at the end\n");
        }
    } else {
        if (bufpos > orig_samples) {
            MP_ERR(ao, "audio buffer overflow (should never happen)\n");
        }
    }

    return taken;
}
Example #6
0
int reinit_video_chain(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    assert(!(mpctx->initialized_flags & INITIALIZED_VCODEC));
    assert(!mpctx->d_video);
    struct track *track = mpctx->current_track[0][STREAM_VIDEO];
    struct sh_stream *sh = track ? track->stream : NULL;
    if (!sh)
        goto no_video;

    MP_VERBOSE(mpctx, "[V] fourcc:0x%X  size:%dx%d  fps:%5.3f\n",
               sh->format,
               sh->video->disp_w, sh->video->disp_h,
               sh->video->fps);

    //================== Init VIDEO (codec & libvo) ==========================
    if (!opts->fixed_vo || !(mpctx->initialized_flags & INITIALIZED_VO)) {
        mpctx->video_out = init_best_video_out(mpctx->global, mpctx->input,
                                               mpctx->osd,
                                               mpctx->encode_lavc_ctx);
        if (!mpctx->video_out) {
            MP_FATAL(mpctx, "Error opening/initializing "
                    "the selected video_out (-vo) device.\n");
            goto err_out;
        }
        mpctx->mouse_cursor_visible = true;
        mpctx->initialized_flags |= INITIALIZED_VO;
    }

    update_window_title(mpctx, true);

    struct dec_video *d_video = talloc_zero(NULL, struct dec_video);
    mpctx->d_video = d_video;
    d_video->global = mpctx->global;
    d_video->log = mp_log_new(d_video, mpctx->log, "!vd");
    d_video->opts = mpctx->opts;
    d_video->header = sh;
    d_video->fps = sh->video->fps;
    d_video->vo = mpctx->video_out;
    mpctx->initialized_flags |= INITIALIZED_VCODEC;

    vo_control(mpctx->video_out, VOCTRL_GET_HWDEC_INFO, &d_video->hwdec_info);

    recreate_video_filters(mpctx);

    if (!video_init_best_codec(d_video, opts->video_decoders))
        goto err_out;

    bool saver_state = opts->pause || !opts->stop_screensaver;
    vo_control(mpctx->video_out, saver_state ? VOCTRL_RESTORE_SCREENSAVER
                                             : VOCTRL_KILL_SCREENSAVER, NULL);

    vo_control(mpctx->video_out, mpctx->paused ? VOCTRL_PAUSE
                                               : VOCTRL_RESUME, NULL);

    mpctx->sync_audio_to_video = !sh->attached_picture;
    mpctx->vo_pts_history_seek_ts++;

    // If we switch on video again, ensure audio position matches up.
    if (mpctx->d_audio)
        mpctx->audio_status = STATUS_SYNCING;

    reset_video_state(mpctx);
    reset_subtitle_state(mpctx);

    if (opts->force_fps) {
        d_video->fps = opts->force_fps;
        MP_INFO(mpctx, "FPS forced to be %5.3f.\n", d_video->fps);
    }
    if (!sh->video->fps && !opts->force_fps && !opts->correct_pts) {
        MP_ERR(mpctx, "FPS not specified in the "
               "header or invalid, use the -fps option.\n");
    }
    update_fps(mpctx);

    return 1;

err_out:
no_video:
    uninit_player(mpctx, INITIALIZED_VCODEC | (opts->force_vo ? 0 : INITIALIZED_VO));
    mp_deselect_track(mpctx, track);
    handle_force_window(mpctx, true);
    MP_INFO(mpctx, "Video: no video\n");
    return 0;
}
Example #7
0
void write_video(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct vo *vo = mpctx->video_out;

    if (!mpctx->d_video)
        return;

    update_fps(mpctx);

    // Whether there's still at least 1 video frame that can be shown.
    // If false, it means we can reconfig the VO if needed (normally, this
    // would disrupt playback, so only do it on !still_playing).
    bool still_playing = vo_has_next_frame(vo, true);
    // For the last frame case (frame is being displayed).
    still_playing |= mpctx->playing_last_frame;
    still_playing |= mpctx->last_frame_duration > 0;

    double frame_time = 0;
    int r = update_video(mpctx, endpts, !still_playing, &frame_time);
    MP_TRACE(mpctx, "update_video: %d (still_playing=%d)\n", r, still_playing);

    if (r == VD_WAIT) // Demuxer will wake us up for more packets to decode.
        return;

    if (r < 0) {
        MP_FATAL(mpctx, "Could not initialize video chain.\n");
        int uninit = INITIALIZED_VCODEC;
        if (!opts->force_vo)
            uninit |= INITIALIZED_VO;
        uninit_player(mpctx, uninit);
        if (!mpctx->current_track[STREAM_AUDIO])
            mpctx->stop_play = PT_NEXT_ENTRY;
        mpctx->error_playing = true;
        handle_force_window(mpctx, true);
        return; // restart loop
    }

    if (r == VD_EOF) {
        if (!mpctx->playing_last_frame && mpctx->last_frame_duration > 0) {
            mpctx->time_frame += mpctx->last_frame_duration;
            mpctx->last_frame_duration = 0;
            mpctx->playing_last_frame = true;
            MP_VERBOSE(mpctx, "showing last frame\n");
        }
    }

    if (r == VD_NEW_FRAME) {
        MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);

        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        if (mpctx->video_status >= STATUS_READY) {
            mpctx->time_frame += frame_time / opts->playback_speed;
            adjust_sync(mpctx, frame_time);
        }
    } else if (r == VD_EOF && mpctx->playing_last_frame) {
        // Let video timing code continue displaying.
        mpctx->video_status = STATUS_DRAINING;
        MP_VERBOSE(mpctx, "still showing last frame\n");
    } else if (r <= 0) {
        // EOF or error
        mpctx->delay = 0;
        mpctx->last_av_difference = 0;
        mpctx->video_status = STATUS_EOF;
        MP_VERBOSE(mpctx, "video EOF\n");
        return;
    } else {
        if (mpctx->video_status > STATUS_PLAYING)
            mpctx->video_status = STATUS_PLAYING;

        // Decode more in next iteration.
        mpctx->sleeptime = 0;
        MP_TRACE(mpctx, "filtering more video\n");
    }

    // Actual playback starts when both audio and video are ready.
    if (mpctx->video_status == STATUS_READY)
        return;

    if (mpctx->paused && mpctx->video_status >= STATUS_READY)
        return;

    mpctx->time_frame -= get_relative_time(mpctx);
    double audio_pts = playing_audio_pts(mpctx);
    if (!mpctx->sync_audio_to_video || mpctx->video_status < STATUS_READY) {
        mpctx->time_frame = 0;
    } else if (mpctx->audio_status == STATUS_PLAYING &&
               mpctx->video_status == STATUS_PLAYING)
    {
        double buffered_audio = ao_get_delay(mpctx->ao);
        MP_TRACE(mpctx, "audio delay=%f\n", buffered_audio);

        if (opts->autosync) {
            /* Smooth reported playback position from AO by averaging
             * it with the value expected based on previus value and
             * time elapsed since then. May help smooth video timing
             * with audio output that have inaccurate position reporting.
             * This is badly implemented; the behavior of the smoothing
             * now undesirably depends on how often this code runs
             * (mainly depends on video frame rate). */
            float predicted = (mpctx->delay / opts->playback_speed +
                                mpctx->time_frame);
            float difference = buffered_audio - predicted;
            buffered_audio = predicted + difference / opts->autosync;
        }

        mpctx->time_frame = (buffered_audio -
                                mpctx->delay / opts->playback_speed);
    } else {
        /* If we're more than 200 ms behind the right playback
         * position, don't try to speed up display of following
         * frames to catch up; continue with default speed from
         * the current frame instead.
         * If untimed is set always output frames immediately
         * without sleeping.
         */
        if (mpctx->time_frame < -0.2 || opts->untimed || vo->untimed)
            mpctx->time_frame = 0;
    }

    double vsleep = mpctx->time_frame - vo->flip_queue_offset;
    if (vsleep > 0.050) {
        mpctx->sleeptime = MPMIN(mpctx->sleeptime, vsleep - 0.040);
        return;
    }
    mpctx->sleeptime = 0;
    mpctx->playing_last_frame = false;

    // last frame case
    if (r != VD_NEW_FRAME)
        return;

    //=================== FLIP PAGE (VIDEO BLT): ======================


    mpctx->video_pts = mpctx->video_next_pts;
    mpctx->last_vo_pts = mpctx->video_pts;
    mpctx->playback_pts = mpctx->video_pts;

    update_subtitles(mpctx);
    update_osd_msg(mpctx);

    MP_STATS(mpctx, "vo draw frame");

    vo_new_frame_imminent(vo);

    MP_STATS(mpctx, "vo sleep");

    mpctx->time_frame -= get_relative_time(mpctx);
    mpctx->time_frame -= vo->flip_queue_offset;
    if (mpctx->time_frame > 0.001)
        mpctx->time_frame = timing_sleep(mpctx, mpctx->time_frame);
    mpctx->time_frame += vo->flip_queue_offset;

    int64_t t2 = mp_time_us();
    /* Playing with playback speed it's possible to get pathological
     * cases with mpctx->time_frame negative enough to cause an
     * overflow in pts_us calculation, thus the MPMAX. */
    double time_frame = MPMAX(mpctx->time_frame, -1);
    int64_t pts_us = mpctx->last_time + time_frame * 1e6;
    int duration = -1;
    double pts2 = vo_get_next_pts(vo, 0); // this is the next frame PTS
    if (mpctx->video_pts != MP_NOPTS_VALUE && pts2 == MP_NOPTS_VALUE) {
        // Make up a frame duration. Using the frame rate is not a good
        // choice, since the frame rate could be unset/broken/random.
        float fps = mpctx->d_video->fps;
        double frame_duration = fps > 0 ? 1.0 / fps : 0;
        pts2 = mpctx->video_pts + MPCLAMP(frame_duration, 0.0, 5.0);
    }
    if (pts2 != MP_NOPTS_VALUE) {
        // expected A/V sync correction is ignored
        double diff = (pts2 - mpctx->video_pts);
        diff /= opts->playback_speed;
        if (mpctx->time_frame < 0)
            diff += mpctx->time_frame;
        if (diff < 0)
            diff = 0;
        if (diff > 10)
            diff = 10;
        duration = diff * 1e6;
        mpctx->last_frame_duration = diff;
    }
    if (mpctx->video_status != STATUS_PLAYING)
        duration = -1;

    MP_STATS(mpctx, "start flip");
    vo_flip_page(vo, pts_us | 1, duration);
    MP_STATS(mpctx, "end flip");

    if (audio_pts != MP_NOPTS_VALUE)
        MP_STATS(mpctx, "value %f ptsdiff", mpctx->video_pts - audio_pts);

    mpctx->last_vo_flip_duration = (mp_time_us() - t2) * 0.000001;
    if (vo->driver->flip_page_timed) {
        // No need to adjust sync based on flip speed
        mpctx->last_vo_flip_duration = 0;
        // For print_status - VO call finishing early is OK for sync
        mpctx->time_frame -= get_relative_time(mpctx);
    }
    mpctx->shown_vframes++;
    if (mpctx->video_status < STATUS_PLAYING)
        mpctx->video_status = STATUS_READY;
    update_avsync(mpctx);
    screenshot_flip(mpctx);

    mp_notify(mpctx, MPV_EVENT_TICK, NULL);

    if (!mpctx->sync_audio_to_video)
        mpctx->video_status = STATUS_EOF;
}
Example #8
0
static bool recreate_dispmanx(struct ra_ctx *ctx)
{
    struct priv *p = ctx->priv;
    int display_nr = 0;
    int layer = 0;

    MP_VERBOSE(ctx, "Recreating DISPMANX state...\n");

    destroy_dispmanx(ctx);

    p->display = vc_dispmanx_display_open(display_nr);
    p->update = vc_dispmanx_update_start(0);
    if (!p->display || !p->update) {
        MP_FATAL(ctx, "Could not get DISPMANX objects.\n");
        goto fail;
    }

    uint32_t dispw, disph;
    if (graphics_get_display_size(0, &dispw, &disph) < 0) {
        MP_FATAL(ctx, "Could not get display size.\n");
        goto fail;
    }
    p->w = dispw;
    p->h = disph;

    if (ctx->vo->opts->fullscreen) {
        p->x = p->y = 0;
    } else {
        struct vo_win_geometry geo;
        struct mp_rect screenrc = {0, 0, p->w, p->h};

        vo_calc_window_geometry(ctx->vo, &screenrc, &geo);

        mp_rect_intersection(&geo.win, &screenrc);

        p->x = geo.win.x0;
        p->y = geo.win.y0;
        p->w = geo.win.x1 - geo.win.x0;
        p->h = geo.win.y1 - geo.win.y0;
    }

    // dispmanx is like a neanderthal version of Wayland - you can add an
    // overlay any place on the screen.
    VC_RECT_T dst = {.x = p->x, .y = p->y, .width = p->w, .height = p->h};
    VC_RECT_T src = {.width = p->w << 16, .height = p->h << 16};
    VC_DISPMANX_ALPHA_T alpha = {
        .flags = DISPMANX_FLAGS_ALPHA_FROM_SOURCE,
        .opacity = 0xFF,
    };
    p->window = vc_dispmanx_element_add(p->update, p->display, layer, &dst, 0,
                                        &src, DISPMANX_PROTECTION_NONE, &alpha,
                                        0, 0);
    if (!p->window) {
        MP_FATAL(ctx, "Could not add DISPMANX element.\n");
        goto fail;
    }

    vc_dispmanx_update_submit_sync(p->update);
    p->update = vc_dispmanx_update_start(0);

    p->egl_window = (EGL_DISPMANX_WINDOW_T){
        .element = p->window,
        .width = p->w,
        .height = p->h,
    };
    p->egl_surface = eglCreateWindowSurface(p->egl_display, p->egl_config,
                                            &p->egl_window, NULL);

    if (p->egl_surface == EGL_NO_SURFACE) {
        MP_FATAL(ctx, "Could not create EGL surface!\n");
        goto fail;
    }

    if (!eglMakeCurrent(p->egl_display, p->egl_surface, p->egl_surface,
                        p->egl_context))
    {
        MP_FATAL(ctx, "Failed to set context!\n");
        goto fail;
    }

    p->display_fps = 0;
    TV_GET_STATE_RESP_T tvstate;
    TV_DISPLAY_STATE_T tvstate_disp;
    if (!vc_tv_get_state(&tvstate) && !vc_tv_get_display_state(&tvstate_disp)) {
        if (tvstate_disp.state & (VC_HDMI_HDMI | VC_HDMI_DVI)) {
            p->display_fps = tvstate_disp.display.hdmi.frame_rate;

            HDMI_PROPERTY_PARAM_T param = {
                .property = HDMI_PROPERTY_PIXEL_CLOCK_TYPE,
            };
            if (!vc_tv_hdmi_get_property(&param) &&
                param.param1 == HDMI_PIXEL_CLOCK_TYPE_NTSC)
                p->display_fps = p->display_fps / 1.001;
        } else {
            p->display_fps = tvstate_disp.display.sdtv.frame_rate;
        }
    }

    p->win_params[0] = display_nr;
    p->win_params[1] = layer;
    p->win_params[2] = p->x;
    p->win_params[3] = p->y;

    ctx->vo->dwidth = p->w;
    ctx->vo->dheight = p->h;
    ra_gl_ctx_resize(ctx->swapchain, p->w, p->h, 0);

    ctx->vo->want_redraw = true;

    vo_event(ctx->vo, VO_EVENT_WIN_STATE);
    return true;

fail:
    destroy_dispmanx(ctx);
    return false;
}

static void rpi_swap_buffers(struct ra_ctx *ctx)
{
    struct priv *p = ctx->priv;
    eglSwapBuffers(p->egl_display, p->egl_surface);
}

static bool rpi_init(struct ra_ctx *ctx)
{
    struct priv *p = ctx->priv = talloc_zero(ctx, struct priv);

    bcm_host_init();

    vc_tv_register_callback(tv_callback, ctx);

    p->egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
    if (!eglInitialize(p->egl_display, NULL, NULL)) {
        MP_FATAL(ctx, "EGL failed to initialize.\n");
        goto fail;
    }

    if (!mpegl_create_context(ctx, p->egl_display, &p->egl_context, &p->egl_config))
        goto fail;

    if (recreate_dispmanx(ctx) < 0)
        goto fail;

    mpegl_load_functions(&p->gl, ctx->log);

    struct ra_gl_ctx_params params = {
        .swap_buffers = rpi_swap_buffers,
        .native_display_type = "MPV_RPI_WINDOW",
        .native_display = p->win_params,
    };

    if (!ra_gl_ctx_init(ctx, &p->gl, params))
        goto fail;

    return true;

fail:
    rpi_uninit(ctx);
    return false;
}

static bool rpi_reconfig(struct ra_ctx *ctx)
{
    return recreate_dispmanx(ctx);
}

static struct mp_image *take_screenshot(struct ra_ctx *ctx)
{
    struct priv *p = ctx->priv;

    if (!p->display)
        return NULL;

    struct mp_image *img = mp_image_alloc(IMGFMT_BGR0, p->w, p->h);
    if (!img)
        return NULL;

    DISPMANX_RESOURCE_HANDLE_T resource =
        vc_dispmanx_resource_create(VC_IMAGE_ARGB8888,
                                    img->w | ((img->w * 4) << 16), img->h,
                                    &(int32_t){0});
    if (!resource)
        goto fail;

    if (vc_dispmanx_snapshot(p->display, resource, 0))
        goto fail;

    VC_RECT_T rc = {.width = img->w, .height = img->h};
    if (vc_dispmanx_resource_read_data(resource, &rc, img->planes[0], img->stride[0]))
        goto fail;

    vc_dispmanx_resource_delete(resource);
    return img;

fail:
    vc_dispmanx_resource_delete(resource);
    talloc_free(img);
    return NULL;
}

static int rpi_control(struct ra_ctx *ctx, int *events, int request, void *arg)
{
    struct priv *p = ctx->priv;

    switch (request) {
    case VOCTRL_SCREENSHOT_WIN:
        *(struct mp_image **)arg = take_screenshot(ctx);
        return VO_TRUE;
    case VOCTRL_FULLSCREEN:
        recreate_dispmanx(ctx);
        return VO_TRUE;
    case VOCTRL_CHECK_EVENTS:
        if (atomic_fetch_and(&p->reload_display, 0)) {
            MP_WARN(ctx, "Recovering from display mode switch...\n");
            recreate_dispmanx(ctx);
        }
        return VO_TRUE;
    case VOCTRL_GET_DISPLAY_FPS:
        *(double *)arg = p->display_fps;
        return VO_TRUE;
    }

    return VO_NOTIMPL;
}

const struct ra_ctx_fns ra_ctx_rpi = {
    .type           = "opengl",
    .name           = "rpi",
    .reconfig       = rpi_reconfig,
    .control        = rpi_control,
    .init           = rpi_init,
    .uninit         = rpi_uninit,
};
Example #9
0
int dvb_set_channel(stream_t *stream, int card, int n)
{
    dvb_channels_list *new_list;
    dvb_channel_t *channel;
    dvb_priv_t *priv = stream->priv;
    char buf[4096];
    dvb_config_t *conf = (dvb_config_t *) priv->config;
    int devno;
    int i;

    if ((card < 0) || (card > conf->count)) {
        MP_ERR(stream, "dvb_set_channel: INVALID CARD NUMBER: %d vs %d, abort\n",
               card, conf->count);
        return 0;
    }

    devno = conf->cards[card].devno;
    new_list = conf->cards[card].list;
    if ((n > new_list->NUM_CHANNELS) || (n < 0)) {
        MP_ERR(stream, "dvb_set_channel: INVALID CHANNEL NUMBER: %d, for "
               "card %d, abort\n", n, card);
        return 0;
    }
    channel = &(new_list->channels[n]);

    if (priv->is_on) {  //the fds are already open and we have to stop the demuxers
        for (i = 0; i < priv->demux_fds_cnt; i++)
            dvb_demux_stop(priv->demux_fds[i]);

        priv->retry = 0;
        //empty both the stream's and driver's buffer
        while (dvb_streaming_read(stream, buf, 4096) > 0) {}
        if (priv->card != card) {
            dvbin_close(stream);
            if (!dvb_open_devices(priv, devno, channel->pids_cnt)) {
                MP_ERR(stream, "DVB_SET_CHANNEL, COULDN'T OPEN DEVICES OF "
                       "CARD: %d, EXIT\n", card);
                return 0;
            }
        } else {
            // close all demux_fds with pos > pids required for the new channel
            // or open other demux_fds if we have too few
            if (!dvb_fix_demuxes(priv, channel->pids_cnt))
                return 0;
        }
    } else {
        if (!dvb_open_devices(priv, devno, channel->pids_cnt)) {
            MP_ERR(stream, "DVB_SET_CHANNEL2, COULDN'T OPEN DEVICES OF "
                   "CARD: %d, EXIT\n", card);
            return 0;
        }
    }

    priv->card = card;
    priv->list = new_list;
    priv->retry = 5;
    new_list->current = n;
    priv->fd = priv->dvr_fd;
    MP_VERBOSE(stream, "DVB_SET_CHANNEL: new channel name=%s, card: %d, "
               "channel %d\n", channel->name, card, n);

    stream_drop_buffers(stream);

    if (channel->freq != priv->last_freq) {
        if (!dvb_tune(priv, channel->freq, channel->pol, channel->srate,
                      channel->diseqc, channel->tone,
                      channel->is_dvb_s2, channel->stream_id, channel->inv,
                      channel->mod, channel->gi,
                      channel->trans, channel->bw, channel->cr, channel->cr_lp,
                      channel->hier, priv->cfg_timeout))
            return 0;
    }

    priv->last_freq = channel->freq;
    priv->is_on = 1;

    if (channel->service_id != -1) {
        /* We need the PMT-PID in addition.
           If it has not yet beem resolved, do it now. */
        for (i = 0; i < channel->pids_cnt; i++) {
            if (channel->pids[i] == -1) {
                MP_VERBOSE(stream, "DVB_SET_CHANNEL: PMT-PID for service %d "
                           "not resolved yet, parsing PAT...\n",
                           channel->service_id);
                int pmt_pid = dvb_get_pmt_pid(priv, card, channel->service_id);
                MP_VERBOSE(stream, "DVB_SET_CHANNEL: Found PMT-PID: %d\n",
                           pmt_pid);
                channel->pids[i] = pmt_pid;
            }
        }
    }

    // sets demux filters and restart the stream
    for (i = 0; i < channel->pids_cnt; i++) {
        if (channel->pids[i] == -1) {
            // In case PMT was not resolved, skip it here.
            MP_ERR(stream, "DVB_SET_CHANNEL: PMT-PID not found, "
                           "teletext-decoding may fail.\n");
        } else {
            if (!dvb_set_ts_filt(priv, priv->demux_fds[i], channel->pids[i],
                                 DMX_PES_OTHER))
                return 0;
        }
    }

    return 1;
}
Example #10
0
int dvb_set_channel(stream_t *stream, int card, int n)
{
        dvb_channels_list *new_list;
        dvb_channel_t *channel;
        dvb_priv_t *priv = stream->priv;
        char buf[4096];
        dvb_config_t *conf = (dvb_config_t *) priv->config;
        int devno;
        int i;

        if((card < 0) || (card > conf->count))
        {
                MP_ERR(stream, "dvb_set_channel: INVALID CARD NUMBER: %d vs %d, abort\n", card, conf->count);
                return 0;
        }

        devno = conf->cards[card].devno;
        new_list = conf->cards[card].list;
        if((n > new_list->NUM_CHANNELS) || (n < 0))
        {
                MP_ERR(stream, "dvb_set_channel: INVALID CHANNEL NUMBER: %d, for card %d, abort\n", n, card);
                return 0;
        }
        channel = &(new_list->channels[n]);

        if(priv->is_on) //the fds are already open and we have to stop the demuxers
        {
                for(i = 0; i < priv->demux_fds_cnt; i++)
                        dvb_demux_stop(priv->demux_fds[i]);

                priv->retry = 0;
                while(dvb_streaming_read(stream, buf, 4096) > 0);       //empty both the stream's and driver's buffer
                if(priv->card != card)
                {
                        dvbin_close(stream);
                        if(! dvb_open_devices(priv, devno, channel->pids_cnt))
                        {
                                MP_ERR(stream, "DVB_SET_CHANNEL, COULDN'T OPEN DEVICES OF CARD: %d, EXIT\n", card);
                                return 0;
                        }
                }
                else    //close all demux_fds with pos > pids required for the new channel or open other demux_fds if we have too few
                {
                        if(! dvb_fix_demuxes(priv, channel->pids_cnt))
                                return 0;
                }
        }
        else
        {
                if(! dvb_open_devices(priv, devno, channel->pids_cnt))
                {
                        MP_ERR(stream, "DVB_SET_CHANNEL2, COULDN'T OPEN DEVICES OF CARD: %d, EXIT\n", card);
                        return 0;
                }
        }

        priv->card = card;
        priv->list = new_list;
        priv->retry = 5;
        new_list->current = n;
        priv->fd = priv->dvr_fd;
        MP_VERBOSE(stream, "DVB_SET_CHANNEL: new channel name=%s, card: %d, channel %d\n", channel->name, card, n);

        stream->buf_pos = stream->buf_len = 0;
        stream->pos = 0;

        if(channel->freq != priv->last_freq)
                if (! dvb_tune(priv, channel->freq, channel->pol, channel->srate, channel->diseqc, channel->tone,
                        channel->inv, channel->mod, channel->gi, channel->trans, channel->bw, channel->cr, channel->cr_lp, channel->hier, priv->cfg_timeout))
                        return 0;

        priv->last_freq = channel->freq;
        priv->is_on = 1;

        //sets demux filters and restart the stream
        for(i = 0; i < channel->pids_cnt; i++)
        {
                if(! dvb_set_ts_filt(priv,priv->demux_fds[i], channel->pids[i], DMX_PES_OTHER))
                        return 0;
        }

        return 1;
}
Example #11
0
File: x11.c Project: raeisi/mpv
static bool config_window_x11(struct MPGLContext *ctx, int flags)
{
    struct vo *vo = ctx->vo;
    struct glx_context *glx_ctx = ctx->priv;

    int glx_major, glx_minor;

    if (!glXQueryVersion(vo->x11->display, &glx_major, &glx_minor)) {
        MP_ERR(vo, "GLX not found.\n");
        return false;
    }
    // FBConfigs were added in GLX version 1.3.
    if (MPGL_VER(glx_major, glx_minor) <  MPGL_VER(1, 3)) {
        MP_ERR(vo, "GLX version older than 1.3.\n");
        return false;
    }

    int glx_attribs[] = {
        GLX_X_RENDERABLE, True,
        GLX_X_VISUAL_TYPE, GLX_TRUE_COLOR,
        GLX_RED_SIZE, 1,
        GLX_GREEN_SIZE, 1,
        GLX_BLUE_SIZE, 1,
        GLX_ALPHA_SIZE, 0,
        GLX_DOUBLEBUFFER, True,
        None
    };
    GLXFBConfig fbc = NULL;
    if (flags & VOFLAG_ALPHA) {
        set_glx_attrib(glx_attribs, GLX_ALPHA_SIZE, 1);
        fbc = select_fb_config(vo, glx_attribs, flags);
        if (!fbc) {
            set_glx_attrib(glx_attribs, GLX_ALPHA_SIZE, 0);
            flags &= ~VOFLAG_ALPHA;
        }
    }
    if (!fbc)
        fbc = select_fb_config(vo, glx_attribs, flags);
    if (!fbc) {
        MP_ERR(vo, "no GLX support present\n");
        return false;
    }
    MP_VERBOSE(vo, "GLX chose FB config with ID 0x%x\n", (int)(intptr_t)fbc);

    glx_ctx->fbc = fbc;
    glx_ctx->vinfo = glXGetVisualFromFBConfig(vo->x11->display, fbc);
    if (glx_ctx->vinfo) {
        MP_VERBOSE(vo, "GLX chose visual with ID 0x%x\n",
                   (int)glx_ctx->vinfo->visualid);
    } else {
        MP_WARN(vo, "Selected GLX FB config has no associated X visual\n");
    }


    glXGetFBConfigAttrib(vo->x11->display, fbc, GLX_RED_SIZE, &ctx->depth_r);
    glXGetFBConfigAttrib(vo->x11->display, fbc, GLX_GREEN_SIZE, &ctx->depth_g);
    glXGetFBConfigAttrib(vo->x11->display, fbc, GLX_BLUE_SIZE, &ctx->depth_b);

    if (!vo_x11_create_vo_window(vo, glx_ctx->vinfo, "gl"))
        return false;

    bool success = false;
    if (!(flags & VOFLAG_GLES)) {
        success = create_context_x11_gl3(ctx, flags, 300, false);
        if (!success)
            success = create_context_x11_old(ctx);
    }
    if (!success) // try ES
        success = create_context_x11_gl3(ctx, flags, 200, true);
    if (success && !glXIsDirect(vo->x11->display, glx_ctx->context))
        ctx->gl->mpgl_caps |= MPGL_CAP_SW;
    return success;
}
Example #12
0
File: ao_alsa.c Project: DZW314/mpv
static int control(struct ao *ao, enum aocontrol cmd, void *arg)
{
    struct priv *p = ao->priv;
    snd_mixer_t *handle = NULL;
    switch (cmd) {
    case AOCONTROL_GET_MUTE:
    case AOCONTROL_SET_MUTE:
    case AOCONTROL_GET_VOLUME:
    case AOCONTROL_SET_VOLUME:
    {
        int err;
        snd_mixer_elem_t *elem;
        snd_mixer_selem_id_t *sid;

        long pmin, pmax;
        long get_vol, set_vol;
        float f_multi;

        if (!af_fmt_is_pcm(ao->format))
            return CONTROL_FALSE;

        snd_mixer_selem_id_alloca(&sid);

        snd_mixer_selem_id_set_index(sid, p->cfg_mixer_index);
        snd_mixer_selem_id_set_name(sid, p->cfg_mixer_name);

        err = snd_mixer_open(&handle, 0);
        CHECK_ALSA_ERROR("Mixer open error");

        err = snd_mixer_attach(handle, p->cfg_mixer_device);
        CHECK_ALSA_ERROR("Mixer attach error");

        err = snd_mixer_selem_register(handle, NULL, NULL);
        CHECK_ALSA_ERROR("Mixer register error");

        err = snd_mixer_load(handle);
        CHECK_ALSA_ERROR("Mixer load error");

        elem = snd_mixer_find_selem(handle, sid);
        if (!elem) {
            MP_VERBOSE(ao, "Unable to find simple control '%s',%i.\n",
                       snd_mixer_selem_id_get_name(sid),
                       snd_mixer_selem_id_get_index(sid));
            goto alsa_error;
        }

        snd_mixer_selem_get_playback_volume_range(elem, &pmin, &pmax);
        f_multi = (100 / (float)(pmax - pmin));

        switch (cmd) {
        case AOCONTROL_SET_VOLUME: {
            ao_control_vol_t *vol = arg;
            set_vol = vol->left / f_multi + pmin + 0.5;

            err = snd_mixer_selem_set_playback_volume
                    (elem, SND_MIXER_SCHN_FRONT_LEFT, set_vol);
            CHECK_ALSA_ERROR("Error setting left channel");
            MP_DBG(ao, "left=%li, ", set_vol);

            set_vol = vol->right / f_multi + pmin + 0.5;

            err = snd_mixer_selem_set_playback_volume
                    (elem, SND_MIXER_SCHN_FRONT_RIGHT, set_vol);
            CHECK_ALSA_ERROR("Error setting right channel");
            MP_DBG(ao, "right=%li, pmin=%li, pmax=%li, mult=%f\n",
                   set_vol, pmin, pmax, f_multi);
            break;
        }
        case AOCONTROL_GET_VOLUME: {
            ao_control_vol_t *vol = arg;
            snd_mixer_selem_get_playback_volume
                (elem, SND_MIXER_SCHN_FRONT_LEFT, &get_vol);
            vol->left = (get_vol - pmin) * f_multi;
            snd_mixer_selem_get_playback_volume
                (elem, SND_MIXER_SCHN_FRONT_RIGHT, &get_vol);
            vol->right = (get_vol - pmin) * f_multi;
            MP_DBG(ao, "left=%f, right=%f\n", vol->left, vol->right);
            break;
        }
        case AOCONTROL_SET_MUTE: {
            bool *mute = arg;
            if (!snd_mixer_selem_has_playback_switch(elem))
                goto alsa_error;
            if (!snd_mixer_selem_has_playback_switch_joined(elem)) {
                snd_mixer_selem_set_playback_switch
                    (elem, SND_MIXER_SCHN_FRONT_RIGHT, !*mute);
            }
            snd_mixer_selem_set_playback_switch
                (elem, SND_MIXER_SCHN_FRONT_LEFT, !*mute);
            break;
        }
        case AOCONTROL_GET_MUTE: {
            bool *mute = arg;
            if (!snd_mixer_selem_has_playback_switch(elem))
                goto alsa_error;
            int tmp = 1;
            snd_mixer_selem_get_playback_switch
                (elem, SND_MIXER_SCHN_FRONT_LEFT, &tmp);
            *mute = !tmp;
            if (!snd_mixer_selem_has_playback_switch_joined(elem)) {
                snd_mixer_selem_get_playback_switch
                    (elem, SND_MIXER_SCHN_FRONT_RIGHT, &tmp);
                *mute &= !tmp;
            }
            break;
        }
        }
        snd_mixer_close(handle);
        return CONTROL_OK;
    }

    } //end switch
    return CONTROL_UNKNOWN;

alsa_error:
    if (handle)
        snd_mixer_close(handle);
    return CONTROL_ERROR;
}
Example #13
0
static int glx_init(struct MPGLContext *ctx, int flags)
{
    struct vo *vo = ctx->vo;
    struct glx_context *glx_ctx = ctx->priv;

    if (!vo_x11_init(ctx->vo))
        goto uninit;

    int glx_major, glx_minor;

    if (!glXQueryVersion(vo->x11->display, &glx_major, &glx_minor)) {
        MP_ERR(vo, "GLX not found.\n");
        goto uninit;
    }
    // FBConfigs were added in GLX version 1.3.
    if (MPGL_VER(glx_major, glx_minor) <  MPGL_VER(1, 3)) {
        MP_ERR(vo, "GLX version older than 1.3.\n");
        goto uninit;
    }

    int glx_attribs[] = {
        GLX_X_RENDERABLE, True,
        GLX_X_VISUAL_TYPE, GLX_TRUE_COLOR,
        GLX_RED_SIZE, 1,
        GLX_GREEN_SIZE, 1,
        GLX_BLUE_SIZE, 1,
        GLX_ALPHA_SIZE, 0,
        GLX_DOUBLEBUFFER, True,
        None
    };
    GLXFBConfig fbc = NULL;
    if (flags & VOFLAG_ALPHA) {
        set_glx_attrib(glx_attribs, GLX_ALPHA_SIZE, 1);
        fbc = select_fb_config(vo, glx_attribs, flags);
        if (!fbc) {
            set_glx_attrib(glx_attribs, GLX_ALPHA_SIZE, 0);
            flags &= ~VOFLAG_ALPHA;
        }
    }
    if (!fbc)
        fbc = select_fb_config(vo, glx_attribs, flags);
    if (!fbc) {
        MP_ERR(vo, "no GLX support present\n");
        goto uninit;
    }

    int fbid = -1;
    if (!glXGetFBConfigAttrib(vo->x11->display, fbc, GLX_FBCONFIG_ID, &fbid))
        MP_VERBOSE(vo, "GLX chose FB config with ID 0x%x\n", fbid);

    glx_ctx->fbc = fbc;
    glx_ctx->vinfo = glXGetVisualFromFBConfig(vo->x11->display, fbc);
    if (glx_ctx->vinfo) {
        MP_VERBOSE(vo, "GLX chose visual with ID 0x%x\n",
                   (int)glx_ctx->vinfo->visualid);
    } else {
        MP_WARN(vo, "Selected GLX FB config has no associated X visual\n");
    }

    if (!vo_x11_create_vo_window(vo, glx_ctx->vinfo, "gl"))
        goto uninit;

    bool success = false;
    if (!(flags & VOFLAG_GLES)) {
        for (int n = 0; mpgl_preferred_gl_versions[n]; n++) {
            int version = mpgl_preferred_gl_versions[n];
            MP_VERBOSE(vo, "Creating OpenGL %d.%d context...\n",
                       MPGL_VER_P(version));
            if (version >= 300) {
                success = create_context_x11_gl3(ctx, flags, version, false);
            } else {
                success = create_context_x11_old(ctx);
            }
            if (success)
                break;
        }
    }
    if (!success) // try ES
        success = create_context_x11_gl3(ctx, flags, 200, true);
    if (success && !glXIsDirect(vo->x11->display, glx_ctx->context))
        ctx->gl->mpgl_caps |= MPGL_CAP_SW;
    if (!success)
        goto uninit;

    return 0;

uninit:
    glx_uninit(ctx);
    return -1;
}
Example #14
0
static bool wayland_vk_init(struct ra_ctx *ctx)
{
    struct priv *p = ctx->priv = talloc_zero(ctx, struct priv);
    struct mpvk_ctx *vk = &p->vk;
    int msgl = ctx->opts.probing ? MSGL_V : MSGL_ERR;

    if (!mpvk_instance_init(vk, ctx->log, VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
                            ctx->opts.debug))
        goto error;

    if (!vo_wayland_init(ctx->vo))
        goto error;

    VkWaylandSurfaceCreateInfoKHR wlinfo = {
         .sType   = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
         .display = ctx->vo->wl->display,
         .surface = ctx->vo->wl->surface,
    };

    VkResult res = vkCreateWaylandSurfaceKHR(vk->inst, &wlinfo, MPVK_ALLOCATOR,
                                             &vk->surf);
    if (res != VK_SUCCESS) {
        MP_MSG(ctx, msgl, "Failed creating Wayland surface: %s\n", vk_err(res));
        goto error;
    }

    /* Because in Wayland clients render whenever they receive a callback from
     * the compositor, and the fact that the compositor usually stops sending
     * callbacks once the surface is no longer visible, using FIFO here would
     * mean the entire player would block on acquiring swapchain images. Hence,
     * use MAILBOX to guarantee that there'll always be a swapchain image and
     * the player won't block waiting on those */
    if (!ra_vk_ctx_init(ctx, vk, VK_PRESENT_MODE_MAILBOX_KHR))
        goto error;

    return true;

error:
    wayland_vk_uninit(ctx);
    return false;
}

static void resize(struct ra_ctx *ctx)
{
    struct vo_wayland_state *wl = ctx->vo->wl;

    MP_VERBOSE(wl, "Handling resize on the vk side\n");

    const int32_t width = wl->scaling*mp_rect_w(wl->geometry);
    const int32_t height = wl->scaling*mp_rect_h(wl->geometry);

    wl_surface_set_buffer_scale(wl->surface, wl->scaling);

    wl->vo->dwidth  = width;
    wl->vo->dheight = height;
}

static bool wayland_vk_reconfig(struct ra_ctx *ctx)
{
    if (!vo_wayland_reconfig(ctx->vo))
        return false;

    return true;
}

static int wayland_vk_control(struct ra_ctx *ctx, int *events, int request, void *arg)
{
    int ret = vo_wayland_control(ctx->vo, events, request, arg);
    if (*events & VO_EVENT_RESIZE) {
        resize(ctx);
        if (ra_vk_ctx_resize(ctx->swapchain, ctx->vo->dwidth, ctx->vo->dheight))
            return VO_ERROR;
    }
    return ret;
}
Example #15
0
static bool egl_create_context(struct vo_wayland_state *wl,
                               MPGLContext *ctx,
                               bool enable_alpha)
{
    EGLint major, minor, n;

    GL *gl = ctx->gl;
    const char *eglstr = "";

    if (!(wl->egl_context.egl.dpy = eglGetDisplay(wl->display.display)))
        return false;

    EGLint config_attribs[] = {
        EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
        EGL_RED_SIZE, 1,
        EGL_GREEN_SIZE, 1,
        EGL_BLUE_SIZE, 1,
        EGL_ALPHA_SIZE, enable_alpha,
        EGL_DEPTH_SIZE, 1,
        EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
        EGL_NONE
    };

    /* major and minor here returns the supported EGL version (e.g.: 1.4) */
    if (eglInitialize(wl->egl_context.egl.dpy, &major, &minor) != EGL_TRUE)
        return false;

    MP_VERBOSE(wl, "EGL version %d.%d\n", major, minor);

    EGLint context_attribs[] = {
        EGL_CONTEXT_MAJOR_VERSION_KHR,
        MPGL_VER_GET_MAJOR(ctx->requested_gl_version),
        EGL_NONE
    };

    if (eglBindAPI(EGL_OPENGL_API) != EGL_TRUE)
        return false;

    eglChooseConfig(wl->egl_context.egl.dpy, config_attribs,
                    &wl->egl_context.egl.conf, 1, &n);

    wl->egl_context.egl.ctx = eglCreateContext(wl->egl_context.egl.dpy,
                                               wl->egl_context.egl.conf,
                                               EGL_NO_CONTEXT,
                                               context_attribs);
    if (!wl->egl_context.egl.ctx) {
        /* fallback to any GL version */
        MP_WARN(wl, "can't create context for requested OpenGL version: "
                    "fall back to any version available\n");
        context_attribs[0] = EGL_NONE;
        wl->egl_context.egl.ctx = eglCreateContext(wl->egl_context.egl.dpy,
                                                   wl->egl_context.egl.conf,
                                                   EGL_NO_CONTEXT,
                                                   context_attribs);

        if (!wl->egl_context.egl.ctx)
            return false;
    }

    eglMakeCurrent(wl->egl_context.egl.dpy, NULL, NULL, wl->egl_context.egl.ctx);

    eglstr = eglQueryString(wl->egl_context.egl.dpy, EGL_EXTENSIONS);

    mpgl_load_functions(gl, (void*(*)(const GLubyte*))eglGetProcAddress, eglstr,
                        wl->log);

    return true;
}
Example #16
0
static int dvb_open(stream_t *stream)
{
    // I don't force  the file format bacause, although it's almost always TS,
    // there are some providers that stream an IP multicast with M$ Mpeg4 inside
    dvb_priv_t *priv = stream->priv;
    priv->log = stream->log;
    dvb_priv_t *p = priv;
    char *progname;
    int tuner_type = 0, i;

    priv->fe_fd = priv->sec_fd = priv->dvr_fd = -1;
    priv->config = dvb_get_config(stream);
    if (priv->config == NULL) {
        MP_ERR(stream, "DVB CONFIGURATION IS EMPTY, exit\n");
        return STREAM_ERROR;
    }

    priv->card = -1;
    for (i = 0; i < priv->config->count; i++) {
        if (priv->config->cards[i].devno + 1 == p->cfg_card) {
            priv->card = i;
            break;
        }
    }

    if (priv->card == -1) {
        MP_ERR(stream, "NO CONFIGURATION FOUND FOR CARD N. %d, exit\n",
               p->cfg_card);
        return STREAM_ERROR;
    }
    priv->timeout = p->cfg_timeout;

    tuner_type = priv->config->cards[priv->card].type;

    if (tuner_type == 0) {
        MP_VERBOSE(stream,
                   "OPEN_DVB: UNKNOWN OR UNDETECTABLE TUNER TYPE, EXIT\n");
        return STREAM_ERROR;
    }

    priv->tuner_type = tuner_type;

    MP_VERBOSE(stream, "OPEN_DVB: prog=%s, card=%d, type=%d\n",
               p->cfg_prog, priv->card + 1, priv->tuner_type);

    priv->list = priv->config->cards[priv->card].list;

    if ((!strcmp(p->cfg_prog, "")) && (priv->list != NULL)) {
        progname = priv->list->channels[0].name;
    } else {
        progname = p->cfg_prog;
    }


    if (!dvb_streaming_start(stream, tuner_type, progname))
        return STREAM_ERROR;

    stream->type = STREAMTYPE_DVB;
    stream->fill_buffer = dvb_streaming_read;
    stream->close = dvbin_close;
    stream->control = dvbin_stream_control;
    stream->streaming = true;

    stream->demuxer = "lavf";
    stream->lavf_type = "mpegts";

    return STREAM_OK;
}
Example #17
0
// Initialization and runtime control
static int control(struct af_instance *af, int cmd, void *arg)
{
    af_scaletempo_t *s = af->priv;
    switch (cmd) {
    case AF_CONTROL_REINIT: {
        struct mp_audio *data = (struct mp_audio *)arg;
        float srate = data->rate / 1000;
        int nch = data->nch;
        int use_int = 0;

        MP_VERBOSE(af, "[scaletempo] %.3f speed * %.3f scale_nominal = %.3f\n",
               s->speed, s->scale_nominal, s->scale);

        mp_audio_force_interleaved_format(data);
        mp_audio_copy_config(af->data, data);

        if (s->scale == 1.0) {
            if (s->speed_tempo && s->speed_pitch)
                return AF_DETACH;
            af->delay = 0;
            af->mul = 1;
            return af_test_output(af, data);
        }

        if (data->format == AF_FORMAT_S16) {
            use_int = 1;
        } else {
            mp_audio_set_format(af->data, AF_FORMAT_FLOAT);
        }
        int bps = af->data->bps;

        s->frames_stride        = srate * s->ms_stride;
        s->bytes_stride         = s->frames_stride * bps * nch;
        s->frames_stride_scaled = s->scale * s->frames_stride;
        s->frames_stride_error  = 0;
        af->mul = 1.0 / s->scale;
        af->delay = 0;

        int frames_overlap = s->frames_stride * s->percent_overlap;
        if (frames_overlap <= 0) {
            s->bytes_standing   = s->bytes_stride;
            s->samples_standing = s->bytes_standing / bps;
            s->output_overlap   = NULL;
            s->bytes_overlap    = 0;
        } else {
            s->samples_overlap  = frames_overlap * nch;
            s->bytes_overlap    = frames_overlap * nch * bps;
            s->bytes_standing   = s->bytes_stride - s->bytes_overlap;
            s->samples_standing = s->bytes_standing / bps;
            s->buf_overlap      = realloc(s->buf_overlap, s->bytes_overlap);
            s->table_blend      = realloc(s->table_blend, s->bytes_overlap * 4);
            if (!s->buf_overlap || !s->table_blend) {
                MP_FATAL(af, "[scaletempo] Out of memory\n");
                return AF_ERROR;
            }
            memset(s->buf_overlap, 0, s->bytes_overlap);
            if (use_int) {
                int32_t *pb = s->table_blend;
                int64_t blend = 0;
                for (int i = 0; i < frames_overlap; i++) {
                    int32_t v = blend / frames_overlap;
                    for (int j = 0; j < nch; j++)
                        *pb++ = v;
                    blend += 65536; // 2^16
                }
                s->output_overlap = output_overlap_s16;
            } else {
                float *pb = s->table_blend;
                for (int i = 0; i < frames_overlap; i++) {
                    float v = i / (float)frames_overlap;
                    for (int j = 0; j < nch; j++)
                        *pb++ = v;
                }
                s->output_overlap = output_overlap_float;
            }
        }

        s->frames_search = (frames_overlap > 1) ? srate * s->ms_search : 0;
        if (s->frames_search <= 0)
            s->best_overlap_offset = NULL;
        else {
            if (use_int) {
                int64_t t = frames_overlap;
                int32_t n = 8589934588LL / (t * t); // 4 * (2^31 - 1) / t^2
                s->buf_pre_corr = realloc(s->buf_pre_corr,
                                          s->bytes_overlap * 2 + UNROLL_PADDING);
                s->table_window = realloc(s->table_window,
                                          s->bytes_overlap * 2 - nch * bps * 2);
                if (!s->buf_pre_corr || !s->table_window) {
                    MP_FATAL(af, "[scaletempo] Out of memory\n");
                    return AF_ERROR;
                }
                memset((char *)s->buf_pre_corr + s->bytes_overlap * 2, 0,
                       UNROLL_PADDING);
                int32_t *pw = s->table_window;
                for (int i = 1; i < frames_overlap; i++) {
                    int32_t v = (i * (t - i) * n) >> 15;
                    for (int j = 0; j < nch; j++)
                        *pw++ = v;
                }
                s->best_overlap_offset = best_overlap_offset_s16;
            } else {
                s->buf_pre_corr = realloc(s->buf_pre_corr, s->bytes_overlap);
                s->table_window = realloc(s->table_window,
                                          s->bytes_overlap - nch * bps);
                if (!s->buf_pre_corr || !s->table_window) {
                    MP_FATAL(af, "[scaletempo] Out of memory\n");
                    return AF_ERROR;
                }
                float *pw = s->table_window;
                for (int i = 1; i < frames_overlap; i++) {
                    float v = i * (frames_overlap - i);
                    for (int j = 0; j < nch; j++)
                        *pw++ = v;
                }
                s->best_overlap_offset = best_overlap_offset_float;
            }
        }

        s->bytes_per_frame = bps * nch;
        s->num_channels    = nch;

        s->bytes_queue = (s->frames_search + s->frames_stride + frames_overlap)
                         * bps * nch;
        s->buf_queue = realloc(s->buf_queue, s->bytes_queue + UNROLL_PADDING);
        if (!s->buf_queue) {
            MP_FATAL(af, "[scaletempo] Out of memory\n");
            return AF_ERROR;
        }

        s->bytes_queued = 0;
        s->bytes_to_slide = 0;

        MP_DBG(af, "[scaletempo] "
               "%.2f stride_in, %i stride_out, %i standing, "
               "%i overlap, %i search, %i queue, %s mode\n",
               s->frames_stride_scaled,
               (int)(s->bytes_stride / nch / bps),
               (int)(s->bytes_standing / nch / bps),
               (int)(s->bytes_overlap / nch / bps),
               s->frames_search,
               (int)(s->bytes_queue / nch / bps),
               (use_int ? "s16" : "float"));

        return af_test_output(af, (struct mp_audio *)arg);
    }
    case AF_CONTROL_SET_PLAYBACK_SPEED: {
        if (s->speed_tempo) {
            if (s->speed_pitch)
                break;
            s->speed = *(double *)arg;
            s->scale = s->speed * s->scale_nominal;
        } else {
            if (s->speed_pitch) {
                s->speed = 1 / *(double *)arg;
                s->scale = s->speed * s->scale_nominal;
                break;
            }
        }
        return AF_OK;
    }
    case AF_CONTROL_RESET:
        s->bytes_queued = 0;
        s->bytes_to_slide = 0;
        s->frames_stride_error = 0;
        memset(s->buf_overlap, 0, s->bytes_overlap);
    }
Example #18
0
File: vo_xv.c Project: andre-d/mpv
/*
 * connect to server, create and map window,
 * allocate colors and (shared) memory
 */
static int config(struct vo *vo, uint32_t width, uint32_t height,
                  uint32_t d_width, uint32_t d_height, uint32_t flags,
                  uint32_t format)
{
    struct vo_x11_state *x11 = vo->x11;
    struct xvctx *ctx = vo->priv;
    int i;

    mp_image_unrefp(&ctx->original_image);

    ctx->image_height = height;
    ctx->image_width = width;
    ctx->image_format = format;

    if ((ctx->max_width != 0 && ctx->max_height != 0)
        && (ctx->image_width > ctx->max_width
            || ctx->image_height > ctx->max_height)) {
        MP_ERR(vo, "Source image dimensions are too high: %ux%u (maximum is %ux%u)\n",
               ctx->image_width, ctx->image_height, ctx->max_width,
               ctx->max_height);
        return -1;
    }

    /* check image formats */
    ctx->xv_format = 0;
    for (i = 0; i < ctx->formats; i++) {
        MP_VERBOSE(vo, "Xvideo image format: 0x%x (%4.4s) %s\n",
                   ctx->fo[i].id, (char *) &ctx->fo[i].id,
                   (ctx->fo[i].format == XvPacked) ? "packed" : "planar");
        if (ctx->fo[i].id == find_xv_format(format))
            ctx->xv_format = ctx->fo[i].id;
    }
    if (!ctx->xv_format)
        return -1;

    vo_x11_config_vo_window(vo, NULL, vo->dx, vo->dy, vo->dwidth,
                            vo->dheight, flags, "xv");

    if (ctx->xv_ck_info.method == CK_METHOD_BACKGROUND)
        XSetWindowBackground(x11->display, x11->window, ctx->xv_colorkey);

    MP_VERBOSE(vo, "using Xvideo port %d for hw scaling\n", ctx->xv_port);

    // In case config has been called before
    for (i = 0; i < ctx->num_buffers; i++)
        deallocate_xvimage(vo, i);

    ctx->num_buffers = 2;

    for (i = 0; i < ctx->num_buffers; i++) {
        if (!allocate_xvimage(vo, i)) {
            MP_FATAL(vo, "could not allocate Xv image data\n");
            return -1;
        }
    }

    ctx->current_buf = 0;
    ctx->current_ip_buf = 0;


    resize(vo);

    return 0;
}
Example #19
0
// Fill the VO buffer with a newly filtered or decoded image.
// returns VD_* code
static int video_output_image(struct MPContext *mpctx, double endpts,
                              bool reconfig_ok)
{
    struct vf_chain *vf = mpctx->d_video->vfilter;
    struct vo *vo = mpctx->video_out;

    // Already enough video buffered in VO?
    // (This implies vo_has_next_frame(vo, false/true) returns true.)
    if (!vo_needs_new_image(vo) && vo->params)
        return 1;

    // Filter a new frame.
    int r = video_decode_and_filter(mpctx);
    if (r < 0)
        return r; // error

    vf_output_frame(vf, false);
    if (vf->output) {
        double pts = vf->output->pts;

        // Always add these; they make backstepping after seeking faster.
        add_frame_pts(mpctx, pts);

        bool drop = false;
        bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING
                      && !mpctx->d_video->header->attached_picture;
        if (hrseek && pts < mpctx->hrseek_pts - .005)
            drop = true;
        if (endpts != MP_NOPTS_VALUE && pts >= endpts) {
            drop = true;
            r = VD_EOF;
        }
        if (drop) {
            talloc_free(vf->output);
            vf->output = NULL;
            return r;
        }
    }

    // Filter output is different from VO input?
    bool need_vo_reconfig = !vo->params  ||
        !mp_image_params_equal(&vf->output_params, vo->params);

    if (need_vo_reconfig) {
        // Draining VO buffers.
        if (vo_has_next_frame(vo, true))
            return 0; // EOF so that caller displays remaining VO frames

        // There was no decoded image yet - must not signal fake EOF.
        // Likewise, if there's no filtered frame yet, don't reconfig yet.
        if (!vf->output_params.imgfmt || !vf->output)
            return r;

        // Force draining.
        if (!reconfig_ok)
            return 0;

        struct mp_image_params p = vf->output_params;

        const struct vo_driver *info = mpctx->video_out->driver;
        MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n",
                info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt));
        MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);

        int vo_r = vo_reconfig(vo, &p, 0);
        if (vo_r < 0) {
            vf->initialized = -1;
            return VD_ERROR;
        }
        init_vo(mpctx);
        // Display the frame queued after this immediately.
        // (Neutralizes frame time calculation in update_video.)
        mpctx->video_next_pts = MP_NOPTS_VALUE;
    }

    // Queue new frame, if there's one.
    struct mp_image *img = vf_read_output_frame(vf);
    if (img) {
        vo_queue_image(vo, img);
        return VD_PROGRESS;
    }

    return r; // includes the true EOF case
}
Example #20
0
File: vo_xv.c Project: andre-d/mpv
static void draw_osd(struct vo *vo, struct osd_state *osd)
{
    struct xvctx *ctx = vo->priv;

    struct mp_image img = get_xv_buffer(vo, ctx->current_buf);

    struct mp_osd_res res = {
        .w = ctx->image_width,
        .h = ctx->image_height,
        .display_par = 1.0 / vo->aspdat.par,
    };

    osd_draw_on_image(osd, res, osd->vo_pts, 0, &img);
}

static void wait_for_completion(struct vo *vo, int max_outstanding)
{
#if HAVE_SHM && HAVE_XEXT
    struct xvctx *ctx = vo->priv;
    struct vo_x11_state *x11 = vo->x11;
    if (ctx->Shmem_Flag) {
        while (x11->ShmCompletionWaitCount > max_outstanding) {
            if (!ctx->Shm_Warned_Slow) {
                MP_WARN(vo, "X11 can't keep up! Waiting"
                        " for XShm completion events...\n");
                ctx->Shm_Warned_Slow = 1;
            }
            mp_sleep_us(1000);
            vo_x11_check_events(vo);
        }
    }
#endif
}

static void flip_page(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    put_xvimage(vo, ctx->xvimage[ctx->current_buf]);

    /* remember the currently visible buffer */
    ctx->current_buf = (ctx->current_buf + 1) % ctx->num_buffers;

    if (!ctx->Shmem_Flag)
        XSync(vo->x11->display, False);
}

static mp_image_t *get_screenshot(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    if (!ctx->original_image)
        return NULL;

    return mp_image_new_ref(ctx->original_image);
}

// Note: redraw_frame() can call this with NULL.
static void draw_image(struct vo *vo, mp_image_t *mpi)
{
    struct xvctx *ctx = vo->priv;

    wait_for_completion(vo, ctx->num_buffers - 1);

    struct mp_image xv_buffer = get_xv_buffer(vo, ctx->current_buf);
    if (mpi) {
        mp_image_copy(&xv_buffer, mpi);
    } else {
        mp_image_clear(&xv_buffer, 0, 0, xv_buffer.w, xv_buffer.h);
    }

    mp_image_setrefp(&ctx->original_image, mpi);
}

static int redraw_frame(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;

    draw_image(vo, ctx->original_image);
    return true;
}

static int query_format(struct vo *vo, uint32_t format)
{
    struct xvctx *ctx = vo->priv;
    uint32_t i;
    int flag = VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW;

    int fourcc = find_xv_format(format);
    if (fourcc) {
        for (i = 0; i < ctx->formats; i++) {
            if (ctx->fo[i].id == fourcc)
                return flag;
        }
    }
    return 0;
}

static void uninit(struct vo *vo)
{
    struct xvctx *ctx = vo->priv;
    int i;

    talloc_free(ctx->original_image);

    if (ctx->ai)
        XvFreeAdaptorInfo(ctx->ai);
    ctx->ai = NULL;
    if (ctx->fo) {
        XFree(ctx->fo);
        ctx->fo = NULL;
    }
    for (i = 0; i < ctx->num_buffers; i++)
        deallocate_xvimage(vo, i);
    // uninit() shouldn't get called unless initialization went past vo_init()
    vo_x11_uninit(vo);
}

static int preinit(struct vo *vo)
{
    XvPortID xv_p;
    int busy_ports = 0;
    unsigned int i;
    struct xvctx *ctx = vo->priv;
    int xv_adaptor = ctx->cfg_xv_adaptor;

    if (!vo_x11_init(vo))
        return -1;

    struct vo_x11_state *x11 = vo->x11;

    /* check for Xvideo extension */
    unsigned int ver, rel, req, ev, err;
    if (Success != XvQueryExtension(x11->display, &ver, &rel, &req, &ev, &err)) {
        MP_ERR(vo, "Xv not supported by this X11 version/driver\n");
        goto error;
    }

    /* check for Xvideo support */
    if (Success !=
        XvQueryAdaptors(x11->display, DefaultRootWindow(x11->display),
                        &ctx->adaptors, &ctx->ai)) {
        MP_ERR(vo, "XvQueryAdaptors failed.\n");
        goto error;
    }

    /* check adaptors */
    if (ctx->xv_port) {
        int port_found;

        for (port_found = 0, i = 0; !port_found && i < ctx->adaptors; i++) {
            if ((ctx->ai[i].type & XvInputMask)
                && (ctx->ai[i].type & XvImageMask)) {
                for (xv_p = ctx->ai[i].base_id;
                     xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports;
                     ++xv_p) {
                    if (xv_p == ctx->xv_port) {
                        port_found = 1;
                        break;
                    }
                }
            }
        }
        if (port_found) {
            if (XvGrabPort(x11->display, ctx->xv_port, CurrentTime))
                ctx->xv_port = 0;
        } else {
            MP_WARN(vo, "Invalid port parameter, overriding with port 0.\n");
            ctx->xv_port = 0;
        }
    }

    for (i = 0; i < ctx->adaptors && ctx->xv_port == 0; i++) {
        /* check if adaptor number has been specified */
        if (xv_adaptor != -1 && xv_adaptor != i)
            continue;

        if ((ctx->ai[i].type & XvInputMask) && (ctx->ai[i].type & XvImageMask)) {
            for (xv_p = ctx->ai[i].base_id;
                 xv_p < ctx->ai[i].base_id + ctx->ai[i].num_ports; ++xv_p)
                if (!XvGrabPort(x11->display, xv_p, CurrentTime)) {
                    ctx->xv_port = xv_p;
                    MP_VERBOSE(vo, "Using Xv Adapter #%d (%s)\n",
                               i, ctx->ai[i].name);
                    break;
                } else {
                    MP_WARN(vo, "Could not grab port %i.\n", (int) xv_p);
                    ++busy_ports;
                }
        }
    }
    if (!ctx->xv_port) {
        if (busy_ports)
            MP_ERR(vo,
                   "Could not find free Xvideo port - maybe another process is already\n"\
                   "using it. Close all video applications, and try again. If that does\n"\
                   "not help, see 'mpv -vo help' for other (non-xv) video out drivers.\n");
        else
            MP_ERR(vo,
                   "It seems there is no Xvideo support for your video card available.\n"\
                   "Run 'xvinfo' to verify its Xv support and read\n"\
                   "DOCS/HTML/en/video.html#xv!\n"\
                   "See 'mpv -vo help' for other (non-xv) video out drivers.\n"\
                   "Try -vo x11.\n");
        goto error;
    }

    if (!xv_init_colorkey(vo)) {
        goto error;             // bail out, colorkey setup failed
    }
    xv_enable_vsync(vo);
    xv_get_max_img_dim(vo, &ctx->max_width, &ctx->max_height);

    ctx->fo = XvListImageFormats(x11->display, ctx->xv_port,
                                 (int *) &ctx->formats);

    return 0;

  error:
    uninit(vo);                 // free resources
    return -1;
}
Example #21
0
File: vo_xv.c Project: 2ion/mpv
/*
 * create and map window,
 * allocate colors and (shared) memory
 */
static int reconfig(struct vo *vo, struct mp_image_params *params)
{
    struct vo_x11_state *x11 = vo->x11;
    struct xvctx *ctx = vo->priv;
    int i;

    mp_image_unrefp(&ctx->original_image);

    ctx->image_height = params->h;
    ctx->image_width  = params->w;
    ctx->image_format = params->imgfmt;

    if ((ctx->max_width != 0 && ctx->max_height != 0)
        && (ctx->image_width > ctx->max_width
            || ctx->image_height > ctx->max_height)) {
        MP_ERR(vo, "Source image dimensions are too high: %ux%u (maximum is %ux%u)\n",
               ctx->image_width, ctx->image_height, ctx->max_width,
               ctx->max_height);
        return -1;
    }

    /* check image formats */
    ctx->xv_format = 0;
    for (i = 0; i < ctx->formats; i++) {
        MP_VERBOSE(vo, "Xvideo image format: 0x%x (%4.4s) %s\n",
                   ctx->fo[i].id, (char *) &ctx->fo[i].id,
                   (ctx->fo[i].format == XvPacked) ? "packed" : "planar");
        if (ctx->fo[i].id == find_xv_format(ctx->image_format))
            ctx->xv_format = ctx->fo[i].id;
    }
    if (!ctx->xv_format)
        return -1;

    vo_x11_config_vo_window(vo);

    if (!ctx->f_gc && !ctx->vo_gc) {
        ctx->f_gc = XCreateGC(x11->display, x11->window, 0, 0);
        ctx->vo_gc = XCreateGC(x11->display, x11->window, 0, NULL);
        XSetForeground(x11->display, ctx->f_gc, 0);
    }

    if (ctx->xv_ck_info.method == CK_METHOD_BACKGROUND)
        XSetWindowBackground(x11->display, x11->window, ctx->xv_colorkey);

    MP_VERBOSE(vo, "using Xvideo port %d for hw scaling\n", ctx->xv_port);

    // In case config has been called before
    for (i = 0; i < ctx->num_buffers; i++)
        deallocate_xvimage(vo, i);

    ctx->num_buffers = ctx->cfg_buffers;

    for (i = 0; i < ctx->num_buffers; i++) {
        if (!allocate_xvimage(vo, i)) {
            MP_FATAL(vo, "could not allocate Xv image data\n");
            return -1;
        }
    }

    ctx->current_buf = 0;
    ctx->current_ip_buf = 0;

    int is_709 = params->colorspace == MP_CSP_BT_709;
    xv_set_eq(vo, ctx->xv_port, "bt_709", is_709 * 200 - 100);
    read_xv_csp(vo);

    resize(vo);

    return 0;
}
Example #22
0
static void get_disc_lang(struct stream *stream, struct sh_stream *sh)
{
    struct stream_lang_req req = {.type = sh->type, .id = sh->demuxer_id};
    if (stream->uncached_type == STREAMTYPE_DVD && sh->type == STREAM_SUB)
        req.id = req.id & 0x1F; // mpeg ID to index
    stream_control(stream, STREAM_CTRL_GET_LANG, &req);
    if (req.name[0])
        sh->lang = talloc_strdup(sh, req.name);
}

static void add_dvd_streams(demuxer_t *demuxer)
{
    struct priv *p = demuxer->priv;
    struct stream *stream = demuxer->stream;
    if (stream->uncached_type != STREAMTYPE_DVD)
        return;
    struct stream_dvd_info_req info;
    if (stream_control(stream, STREAM_CTRL_GET_DVD_INFO, &info) > 0) {
        for (int n = 0; n < MPMIN(32, info.num_subs); n++) {
            struct sh_stream *sh = new_sh_stream(demuxer, STREAM_SUB);
            if (!sh)
                break;
            sh->demuxer_id = n + 0x20;
            sh->codec = "dvd_subtitle";
            get_disc_lang(stream, sh);
            // p->streams _must_ match with p->slave->streams, so we can't add
            // it yet - it has to be done when the real stream appears, which
            // could be right on start, or any time later.
            p->dvd_subs[n] = sh;

            // emulate the extradata
            struct mp_csp_params csp = MP_CSP_PARAMS_DEFAULTS;
            csp.int_bits_in = 8;
            csp.int_bits_out = 8;
            float cmatrix[3][4];
            mp_get_yuv2rgb_coeffs(&csp, cmatrix);

            char *s = talloc_strdup(sh, "");
            s = talloc_asprintf_append(s, "palette: ");
            for (int i = 0; i < 16; i++) {
                int color = info.palette[i];
                int c[3] = {(color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff};
                mp_map_int_color(cmatrix, 8, c);
                color = (c[2] << 16) | (c[1] << 8) | c[0];

                if (i != 0)
                    s = talloc_asprintf_append(s, ", ");
                s = talloc_asprintf_append(s, "%06x", color);
            }
            s = talloc_asprintf_append(s, "\n");

            sh->sub->extradata = s;
            sh->sub->extradata_len = strlen(s);
        }
    }
}

static void add_streams(demuxer_t *demuxer)
{
    struct priv *p = demuxer->priv;

    for (int n = p->num_streams; n < p->slave->num_streams; n++) {
        struct sh_stream *src = p->slave->streams[n];
        if (src->sub) {
            struct sh_stream *sub = NULL;
            if (src->demuxer_id >= 0x20 && src->demuxer_id <= 0x3F)
                sub = p->dvd_subs[src->demuxer_id - 0x20];
            if (sub) {
                assert(p->num_streams == n); // directly mapped
                MP_TARRAY_APPEND(p, p->streams, p->num_streams, sub);
                continue;
            }
        }
        struct sh_stream *sh = new_sh_stream(demuxer, src->type);
        if (!sh)
            break;
        assert(p->num_streams == n); // directly mapped
        MP_TARRAY_APPEND(p, p->streams, p->num_streams, sh);
        // Copy all stream fields that might be relevant
        sh->codec = talloc_strdup(sh, src->codec);
        sh->format = src->format;
        sh->lav_headers = src->lav_headers;
        sh->demuxer_id = src->demuxer_id;
        if (src->video) {
            double ar;
            if (stream_control(demuxer->stream, STREAM_CTRL_GET_ASPECT_RATIO, &ar)
                                == STREAM_OK)
                sh->video->aspect = ar;
        }
        if (src->audio)
            sh->audio = src->audio;
        get_disc_lang(demuxer->stream, sh);
    }
    reselect_streams(demuxer);
}

static void d_seek(demuxer_t *demuxer, double rel_seek_secs, int flags)
{
    struct priv *p = demuxer->priv;

    if (demuxer->stream->uncached_type == STREAMTYPE_CDDA) {
        demux_seek(p->slave, rel_seek_secs, flags);
        return;
    }

    double pts = p->seek_pts;
    if (flags & SEEK_ABSOLUTE)
        pts = 0.0f;

    if (flags & SEEK_FACTOR) {
        double tmp = 0;
        stream_control(demuxer->stream, STREAM_CTRL_GET_TIME_LENGTH, &tmp);
        pts += tmp * rel_seek_secs;
    } else {
        pts += rel_seek_secs;
    }

    MP_VERBOSE(demuxer, "seek to: %f\n", pts);

    stream_control(demuxer->stream, STREAM_CTRL_SEEK_TO_TIME, &pts);
    demux_control(p->slave, DEMUXER_CTRL_RESYNC, NULL);

    p->seek_pts = pts;
    p->seek_reinit = true;
}

static void reset_pts(demuxer_t *demuxer)
{
    struct priv *p = demuxer->priv;

    double base;
    if (stream_control(demuxer->stream, STREAM_CTRL_GET_CURRENT_TIME, &base) < 1)
        base = 0;

    MP_VERBOSE(demuxer, "reset to time: %f\n", base);

    p->base_dts = p->last_dts = MP_NOPTS_VALUE;
    p->base_time = base;
    p->seek_reinit = false;
}

static int d_fill_buffer(demuxer_t *demuxer)
{
    struct priv *p = demuxer->priv;

    struct demux_packet *pkt = demux_read_any_packet(p->slave);
    if (!pkt)
        return 0;

    demux_update(p->slave);

    if (p->seek_reinit)
        reset_pts(demuxer);

    add_streams(demuxer);
    if (pkt->stream >= p->num_streams) { // out of memory?
        talloc_free(pkt);
        return 0;
    }

    struct sh_stream *sh = p->streams[pkt->stream];
    if (!demux_stream_is_selected(sh)) {
        talloc_free(pkt);
        return 1;
    }

    MP_TRACE(demuxer, "ipts: %d %f %f\n", sh->type, pkt->pts, pkt->dts);

    if (sh->type == STREAM_SUB) {
        if (p->base_dts == MP_NOPTS_VALUE)
            MP_WARN(demuxer, "subtitle packet along PTS reset\n");
    } else if (pkt->dts != MP_NOPTS_VALUE) {
        // Use the very first DTS to rebase the start time of the MPEG stream
        // to the playback time.
        if (p->base_dts == MP_NOPTS_VALUE)
            p->base_dts = pkt->dts;

        if (p->last_dts == MP_NOPTS_VALUE)
            p->last_dts = pkt->dts;

        if (fabs(p->last_dts - pkt->dts) >= DTS_RESET_THRESHOLD) {
            MP_WARN(demuxer, "PTS discontinuity: %f->%f\n", p->last_dts, pkt->dts);
            p->base_time += p->last_dts - p->base_dts;
            p->base_dts = pkt->dts - pkt->duration;
        }
        p->last_dts = pkt->dts;
    }

    if (p->base_dts != MP_NOPTS_VALUE) {
        double delta = -p->base_dts + p->base_time;
        if (pkt->pts != MP_NOPTS_VALUE)
            pkt->pts += delta;
        if (pkt->dts != MP_NOPTS_VALUE)
            pkt->dts += delta;
    }

    MP_TRACE(demuxer, "opts: %d %f %f\n", sh->type, pkt->pts, pkt->dts);

    if (pkt->pts != MP_NOPTS_VALUE)
        p->seek_pts = pkt->pts;

    demux_add_packet(sh, pkt);
    return 1;
}

static void add_stream_chapters(struct demuxer *demuxer)
{
    int num = 0;
    if (stream_control(demuxer->stream, STREAM_CTRL_GET_NUM_CHAPTERS, &num) < 1)
        return;
    for (int n = 0; n < num; n++) {
        double p = n;
        if (stream_control(demuxer->stream, STREAM_CTRL_GET_CHAPTER_TIME, &p) < 1)
            continue;
        demuxer_add_chapter(demuxer, bstr0(""), p * 1e9, 0, 0);
    }
}
Example #23
0
static int preinit(struct vo *vo)
{
    struct gl_priv *p = vo->priv;
    p->vo = vo;
    p->log = vo->log;

    int vo_flags = 0;

    if (p->renderer_opts->alpha_mode == 1)
        vo_flags |= VOFLAG_ALPHA;

    if (p->use_gl_debug)
        vo_flags |= VOFLAG_GL_DEBUG;

    if (p->es == 1)
        vo_flags |= VOFLAG_GLES;
    if (p->es == -1)
        vo_flags |= VOFLAG_NO_GLES;

    if (p->allow_sw)
        vo_flags |= VOFLAG_SW;

    p->glctx = mpgl_init(vo, p->backend, vo_flags);
    if (!p->glctx)
        goto err_out;
    p->gl = p->glctx->gl;

    p->glctx->dwm_flush_opt = p->dwm_flush;

    if (p->gl->SwapInterval) {
        p->gl->SwapInterval(p->swap_interval);
    } else {
        MP_VERBOSE(vo, "swap_control extension missing.\n");
    }

    p->renderer = gl_video_init(p->gl, vo->log, vo->global);
    if (!p->renderer)
        goto err_out;
    gl_video_set_osd_source(p->renderer, vo->osd);
    gl_video_set_options(p->renderer, p->renderer_opts);
    gl_video_configure_queue(p->renderer, vo);

    get_and_update_icc_profile(p);

    vo->hwdec_devs = hwdec_devices_create();

    hwdec_devices_set_loader(vo->hwdec_devs, call_request_hwdec_api, vo);

    int hwdec = vo->opts->hwdec_preload_api;
    if (hwdec == HWDEC_NONE)
        hwdec = vo->global->opts->hwdec_api;
    if (hwdec != HWDEC_NONE) {
        p->hwdec = gl_hwdec_load_api(p->vo->log, p->gl, vo->global,
                                     vo->hwdec_devs, hwdec);
        gl_video_set_hwdec(p->renderer, p->hwdec);
    }

    p->original_opts = m_sub_options_copy(p, &opengl_conf, p);

    return 0;

err_out:
    uninit(vo);
    return -1;
}
Example #24
0
/*
 * open device and setup parameters
 * return: 0=success -1=fail
 */
static int init(struct ao *ao)
{
    struct priv *p = ao->priv;

    struct af_to_par {
        int format, bits, sig;
    };
    static const struct af_to_par af_to_par[] = {
        {AF_FORMAT_U8,   8, 0},
        {AF_FORMAT_S16, 16, 1},
        {AF_FORMAT_S24, 24, 1},
        {AF_FORMAT_S32, 32, 1},
    };
    const struct af_to_par *ap;
    int i;

    p->hdl = sio_open(SIO_DEVANY, SIO_PLAY, 0);
    if (p->hdl == NULL) {
        MP_ERR(ao, "can't open sndio %s\n", SIO_DEVANY);
        goto error;
    }

    ao->format = af_fmt_from_planar(ao->format);

    sio_initpar(&p->par);
    for (i = 0, ap = af_to_par;; i++, ap++) {
        if (i == sizeof(af_to_par) / sizeof(struct af_to_par)) {
            MP_VERBOSE(ao, "unsupported format\n");
            p->par.bits = 16;
            p->par.sig = 1;
            p->par.le = SIO_LE_NATIVE;
            break;
        }
        if (ap->format == ao->format) {
            p->par.bits = ap->bits;
            p->par.sig = ap->sig;
            if (ap->bits > 8)
                p->par.le = SIO_LE_NATIVE;
            if (ap->bits != SIO_BPS(ap->bits))
                p->par.bps = ap->bits / 8;
            break;
        }
    }
    p->par.rate = ao->samplerate;

    struct mp_chmap_sel sel = {0};
    for (int n = 0; n < MP_NUM_CHANNELS+1; n++)
        mp_chmap_sel_add_map(&sel, &sndio_layouts[n]);

    if (!ao_chmap_sel_adjust(ao, &sel, &ao->channels))
        goto error;

    p->par.pchan = ao->channels.num;
    p->par.appbufsz = p->par.rate * 250 / 1000;    /* 250ms buffer */
    p->par.round = p->par.rate * 10 / 1000;    /*  10ms block size */
    if (!sio_setpar(p->hdl, &p->par)) {
        MP_ERR(ao, "couldn't set params\n");
        goto error;
    }
    if (!sio_getpar(p->hdl, &p->par)) {
        MP_ERR(ao, "couldn't get params\n");
        goto error;
    }
    if (p->par.bps > 1 && p->par.le != SIO_LE_NATIVE) {
        MP_ERR(ao, "swapped endian output not supported\n");
        goto error;
    }
    if (p->par.bits == 8 && p->par.bps == 1 && !p->par.sig) {
        ao->format = AF_FORMAT_U8;
    } else if (p->par.bits == 16 && p->par.bps == 2 && p->par.sig) {
        ao->format = AF_FORMAT_S16;
    } else if ((p->par.bits == 24 || p->par.msb) && p->par.bps == 3 && p->par.sig) {
        ao->format = AF_FORMAT_S24;
    } else if ((p->par.bits == 32 || p->par.msb) && p->par.bps == 4 && p->par.sig) {
        ao->format = AF_FORMAT_S32;
    } else {
        MP_ERR(ao, "couldn't set format\n");
        goto error;
    }

    p->havevol = sio_onvol(p->hdl, volcb, p);
    sio_onmove(p->hdl, movecb, p);
    if (!sio_start(p->hdl))
        MP_ERR(ao, "init: couldn't start\n");

    p->pfd = calloc (sio_nfds(p->hdl), sizeof (struct pollfd));
    if (!p->pfd)
        goto error;

    return 0;

error:
    if (p->hdl)
      sio_close(p->hdl);

    return -1;
}
Example #25
0
File: discnav.c Project: Nikoli/mpv
void mp_handle_nav(struct MPContext *mpctx)
{
    struct mp_nav_state *nav = mpctx->nav_state;
    if (!nav)
        return;
    while (1) {
        struct mp_nav_event *ev = NULL;
        stream_control(mpctx->stream, STREAM_CTRL_GET_NAV_EVENT, &ev);
        if (!ev)
            break;
        switch (ev->event) {
        case MP_NAV_EVENT_DRAIN: {
            nav->nav_draining = true;
            MP_VERBOSE(nav, "drain requested\n");
            break;
        }
        case MP_NAV_EVENT_RESET_ALL: {
            mpctx->stop_play = PT_RELOAD_DEMUXER;
            MP_VERBOSE(nav, "reload\n");
            // return immediately.
            // other events should be handled after reloaded.
            talloc_free(ev);
            return;
        }
        case MP_NAV_EVENT_RESET: {
            nav->nav_still_frame = 0;
            break;
        }
        case MP_NAV_EVENT_EOF:
            nav->nav_eof = true;
            break;
        case MP_NAV_EVENT_STILL_FRAME: {
            int len = ev->u.still_frame.seconds;
            MP_VERBOSE(nav, "wait for %d seconds\n", len);
            if (len > 0 && nav->nav_still_frame == 0)
                nav->nav_still_frame = len;
            break;
        }
        case MP_NAV_EVENT_MENU_MODE:
            nav->nav_menu = ev->u.menu_mode.enable;
            if (nav->nav_menu) {
                mp_input_enable_section(mpctx->input, "discnav-menu",
                                        MP_INPUT_ON_TOP);
            } else {
                mp_input_disable_section(mpctx->input, "discnav-menu");
            }
            break;
        case MP_NAV_EVENT_HIGHLIGHT: {
            MP_VERBOSE(nav, "highlight: %d %d %d - %d %d\n",
                       ev->u.highlight.display,
                       ev->u.highlight.sx, ev->u.highlight.sy,
                       ev->u.highlight.ex, ev->u.highlight.ey);
            osd_set_nav_highlight(mpctx->osd, NULL);
            nav->highlight[0] = ev->u.highlight.sx;
            nav->highlight[1] = ev->u.highlight.sy;
            nav->highlight[2] = ev->u.highlight.ex;
            nav->highlight[3] = ev->u.highlight.ey;
            nav->hi_visible = ev->u.highlight.display;
            update_resolution(mpctx);
            osd_set_nav_highlight(mpctx->osd, mpctx);
            break;
        }
        case MP_NAV_EVENT_OVERLAY: {
            osd_set_nav_highlight(mpctx->osd, NULL);
            for (int i = 0; i < 2; i++) {
                if (nav->overlays[i])
                    talloc_free(nav->overlays[i]);
                nav->overlays[i] = talloc_steal(nav, ev->u.overlay.images[i]);
            }
            update_resolution(mpctx);
            osd_set_nav_highlight(mpctx->osd, mpctx);
            break;
        }
        default: ; // ignore
        }
        talloc_free(ev);
    }
    if (mpctx->stop_play == AT_END_OF_FILE) {
        if (nav->nav_still_frame > 0) {
            // gross hack
            mpctx->time_frame += nav->nav_still_frame;
            mpctx->playing_last_frame = true;
            nav->nav_still_frame = -2;
        } else if (nav->nav_still_frame == -2) {
            struct mp_nav_cmd inp = {MP_NAV_CMD_SKIP_STILL};
            stream_control(mpctx->stream, STREAM_CTRL_NAV_CMD, &inp);
        }
    }
    if (nav->nav_draining && mpctx->stop_play == AT_END_OF_FILE) {
        MP_VERBOSE(nav, "execute drain\n");
        struct mp_nav_cmd inp = {MP_NAV_CMD_DRAIN_OK};
        stream_control(mpctx->stream, STREAM_CTRL_NAV_CMD, &inp);
        nav->nav_draining = false;
        stream_control(mpctx->stream, STREAM_CTRL_RESUME_CACHE, NULL);
    }
    // E.g. keep displaying still frames
    if (mpctx->stop_play == AT_END_OF_FILE && !nav->nav_eof)
        mpctx->stop_play = KEEP_PLAYING;
}
Example #26
0
int video_reconfig_filters(struct dec_video *d_video,
                           const struct mp_image_params *params)
{
    struct MPOpts *opts = d_video->opts;
    struct mp_image_params p = *params;
    struct sh_video *sh = d_video->header->video;

    MP_VERBOSE(d_video, "VIDEO:  %dx%d  %5.3f fps  %5.1f kbps (%4.1f kB/s)\n",
               p.w, p.h, sh->fps, sh->bitrate / 1000.0,
               sh->bitrate / 8000.0);

    MP_VERBOSE(d_video, "VDec: vo config request - %d x %d (%s)\n",
               p.w, p.h, vo_format_name(p.imgfmt));

    float decoder_aspect = p.d_w / (float)p.d_h;
    if (d_video->initial_decoder_aspect == 0)
        d_video->initial_decoder_aspect = decoder_aspect;

    // We normally prefer the container aspect, unless the decoder aspect
    // changes at least once.
    if (d_video->initial_decoder_aspect == decoder_aspect) {
        if (sh->aspect > 0)
            vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, sh->aspect);
    } else {
        // Even if the aspect switches back, don't use container aspect again.
        d_video->initial_decoder_aspect = -1;
    }

    float force_aspect = opts->movie_aspect;
    if (force_aspect > 0.0 && d_video->stream_aspect != 0.0)
        force_aspect = d_video->stream_aspect;

    if (force_aspect >= 0.0)
        vf_set_dar(&p.d_w, &p.d_h, p.w, p.h, force_aspect);

    if (abs(p.d_w - p.w) >= 4 || abs(p.d_h - p.h) >= 4) {
        MP_VERBOSE(d_video, "Aspect ratio is %.2f:1 - "
                   "scaling to correct movie aspect.\n", sh->aspect);
    } else {
        p.d_w = p.w;
        p.d_h = p.h;
    }

    // Apply user overrides
    if (opts->requested_colorspace != MP_CSP_AUTO)
        p.colorspace = opts->requested_colorspace;
    if (opts->requested_input_range != MP_CSP_LEVELS_AUTO)
        p.colorlevels = opts->requested_input_range;
    p.outputlevels = opts->requested_output_range;

    // Detect colorspace from resolution.
    // Make sure the user-overrides are consistent (no RGB csp for YUV, etc.).
    mp_image_params_guess_csp(&p);

    // Time to config libvo!
    MP_VERBOSE(d_video, "VO Config (%dx%d->%dx%d,0x%X)\n",
               p.w, p.h, p.d_w, p.d_h, p.imgfmt);

    if (vf_reconfig(d_video->vfilter, params, &p) < 0) {
        MP_FATAL(d_video, "Cannot initialize video filters.\n");
        return -1;
    }

    return 0;
}
Example #27
0
static int open_s_internal(stream_t *stream)
{
  int k;
  dvd_priv_t *d = stream->priv;

  struct dvd_opts *opts =
    mp_get_config_group(stream, stream->global, &dvd_conf);

  d->dvd_angle = opts->angle;

  MP_VERBOSE(stream, "URL: %s\n", stream->url);
  d->dvd_title = d->cfg_title + 1;
  if(1){
    //int ret,ret2;
    int ttn,pgc_id,pgn;
    dvd_reader_t *dvd;
    dvd_file_t *title;
    ifo_handle_t *vmg_file;
    tt_srpt_t *tt_srpt;
    ifo_handle_t *vts_file;
    pgc_t *pgc;
    /**
     * Open the disc.
     */
    if(d->cfg_device && d->cfg_device[0])
      d->dvd_device_current = d->cfg_device;
    else if(opts->device && opts->device[0])
      d->dvd_device_current = talloc_strdup(stream, opts->device);
    else
      d->dvd_device_current = DEFAULT_DVD_DEVICE;
    d->dvd_speed = opts->speed;
    dvd_set_speed(stream,d->dvd_device_current, d->dvd_speed);
#if defined(__APPLE__) || defined(__DARWIN__)
    /* Dynamic DVD drive selection on Darwin */
    if(!strcmp(d->dvd_device_current, "/dev/rdiskN")) {
      int i;
      size_t len = strlen(d->dvd_device_current)+1;
      char *temp_device = malloc(len);

      for (i = 1; i < 10; i++) {
        snprintf(temp_device, len, "/dev/rdisk%d", i);
        dvd = DVDOpen(temp_device);
        if(!dvd) {
          MP_ERR(stream, "Couldn't open DVD device: %s (%s)\n",temp_device,
                 mp_strerror(errno));
        } else {
#if DVDREAD_VERSION <= LIBDVDREAD_VERSION(0,9,4)
          dvd_file_t *dvdfile = DVDOpenFile(dvd,d->dvd_title,DVD_READ_INFO_FILE);
          if(!dvdfile) {
            MP_ERR(stream, "Couldn't open DVD device: %s (%s)\n",temp_device,
                   mp_strerror(errno));
            DVDClose(dvd);
            continue;
          }
          DVDCloseFile(dvdfile);
#endif
          break;
        }
      }
      free(temp_device);

      if(!dvd) {
        return STREAM_UNSUPPORTED;
      }
    } else
#endif /* defined(__APPLE__) || defined(__DARWIN__) */
    {
        dvd = DVDOpen(d->dvd_device_current);
        if(!dvd) {
          MP_ERR(stream, "Couldn't open DVD device: %s (%s)\n",
                 d->dvd_device_current, mp_strerror(errno));
          return STREAM_UNSUPPORTED;
        }
    }

    MP_VERBOSE(stream, "Reading disc structure, please wait...\n");

    /**
     * Load the video manager to find out the information about the titles on
     * this disc.
     */
    vmg_file = ifoOpen(dvd, 0);
    if(!vmg_file) {
      MP_ERR(stream, "Can't open VMG info!\n");
      DVDClose( dvd );
      return STREAM_UNSUPPORTED;
    }
    tt_srpt = vmg_file->tt_srpt;
    /**
     * Make sure our title number is valid.
     */
    MP_INFO(stream, "There are %d titles on this DVD.\n", tt_srpt->nr_of_srpts );
    if(d->dvd_title < 1 || d->dvd_title > tt_srpt->nr_of_srpts) {
      MP_ERR(stream, "Invalid DVD title number: %d\n", d->dvd_title);
      ifoClose( vmg_file );
      DVDClose( dvd );
      return STREAM_UNSUPPORTED;
    }
    --(d->dvd_title); // remap 1.. -> 0..
    /**
     * Make sure the angle number is valid for this title.
     */
    MP_INFO(stream, "There are %d angles in this DVD title.\n", tt_srpt->title[d->dvd_title].nr_of_angles);
    if(d->dvd_angle<1 || d->dvd_angle>tt_srpt->title[d->dvd_title].nr_of_angles) {
      MP_ERR(stream, "Invalid DVD angle number: %d\n", d->dvd_angle);
      goto fail;
    }

    ttn = tt_srpt->title[d->dvd_title].vts_ttn - 1;
    /**
     * Load the VTS information for the title set our title is in.
     */
    vts_file = ifoOpen( dvd, tt_srpt->title[d->dvd_title].title_set_nr );
    if(!vts_file) {
      MP_ERR(stream, "Cannot open the IFO file for DVD title %d.\n", tt_srpt->title[d->dvd_title].title_set_nr );
      goto fail;
    }
    /**
     * We've got enough info, time to open the title set data.
     */
    title = DVDOpenFile(dvd, tt_srpt->title[d->dvd_title].title_set_nr, DVD_READ_TITLE_VOBS);
    if(!title) {
      MP_ERR(stream, "Cannot open title VOBS (VTS_%02d_1.VOB).\n", tt_srpt->title[d->dvd_title].title_set_nr);
      ifoClose( vts_file );
      goto fail;
    }

    MP_VERBOSE(stream, "DVD successfully opened.\n");
    // store data
    d->dvd=dvd;
    d->title=title;
    d->vmg_file=vmg_file;
    d->tt_srpt=tt_srpt;
    d->vts_file=vts_file;
    d->cur_title = d->dvd_title;

    pgc = vts_file->vts_pgcit ? vts_file->vts_pgcit->pgci_srp[ttn].pgc : NULL;
    /**
     * Check number of audio channels and types
     */
    {
      d->nr_of_channels=0;
      if(vts_file->vts_pgcit) {
        int i;
        for(i=0;i<8;i++)
          if(pgc->audio_control[i] & 0x8000) {
            audio_attr_t * audio = &vts_file->vtsi_mat->vts_audio_attr[i];
            int language = 0;
            char tmp[] = "unknown";
            stream_language_t *audio_stream = &d->audio_streams[d->nr_of_channels];

            if(audio->lang_type == 1) {
              language=audio->lang_code;
              tmp[0]=language>>8;
              tmp[1]=language&0xff;
              tmp[2]=0;
            }

            audio_stream->language=language;
            audio_stream->id=pgc->audio_control[i] >> 8 & 7;
            switch(audio->audio_format) {
              case 0: // ac3
                audio_stream->id+=FIRST_AC3_AID;
                break;
              case 6: // dts
                audio_stream->id+=FIRST_DTS_AID;
                break;
              case 2: // mpeg layer 1/2/3
              case 3: // mpeg2 ext
                audio_stream->id+=FIRST_MPG_AID;
                break;
              case 4: // lpcm
                audio_stream->id+=FIRST_PCM_AID;
                break;
           }

           audio_stream->type=audio->audio_format;
           // Pontscho: to my mind, tha channels:
           //  1 - stereo
           //  5 - 5.1
           audio_stream->channels=audio->channels;
           MP_INFO(stream, "audio stream: %d format: %s (%s) language: %s aid: %d.\n",
             d->nr_of_channels,
             dvd_audio_stream_types[ audio->audio_format ],
             dvd_audio_stream_channels[ audio->channels ],
             tmp,
             audio_stream->id
           );

           d->nr_of_channels++;
         }
Example #28
0
static int control(struct ao *ao, enum aocontrol cmd, void *arg)
{
    struct wasapi_state *state = ao->priv;
    ao_control_vol_t *vol = arg;
    BOOL mute;

    switch (cmd) {
    case AOCONTROL_GET_VOLUME:
        if (state->opt_exclusive)
            IAudioEndpointVolume_GetMasterVolumeLevelScalar(state->pEndpointVolumeProxy,
                                                            &state->audio_volume);
        else
            ISimpleAudioVolume_GetMasterVolume(state->pAudioVolumeProxy,
                                               &state->audio_volume);

        /* check to see if user manually changed volume through mixer;
           this information is used in exclusive mode for restoring the mixer volume on uninit */
        if (state->audio_volume != state->previous_volume) {
            MP_VERBOSE(state, "Mixer difference: %.2g now, expected %.2g\n",
                       state->audio_volume, state->previous_volume);
            state->initial_volume = state->audio_volume;
        }

        vol->left = vol->right = 100.0f * state->audio_volume;
        return CONTROL_OK;
    case AOCONTROL_SET_VOLUME:
        state->audio_volume = vol->left / 100.f;
        if (state->opt_exclusive)
            IAudioEndpointVolume_SetMasterVolumeLevelScalar(state->pEndpointVolumeProxy,
                                                            state->audio_volume, NULL);
        else
            ISimpleAudioVolume_SetMasterVolume(state->pAudioVolumeProxy,
                                               state->audio_volume, NULL);

        state->previous_volume = state->audio_volume;
        return CONTROL_OK;
    case AOCONTROL_GET_MUTE:
        if (state->opt_exclusive)
            IAudioEndpointVolume_GetMute(state->pEndpointVolumeProxy, &mute);
        else
            ISimpleAudioVolume_GetMute(state->pAudioVolumeProxy, &mute);
        *(bool*)arg = mute;

        return CONTROL_OK;
    case AOCONTROL_SET_MUTE:
        mute = *(bool*)arg;
        if (state->opt_exclusive)
            IAudioEndpointVolume_SetMute(state->pEndpointVolumeProxy, mute, NULL);
        else
            ISimpleAudioVolume_SetMute(state->pAudioVolumeProxy, mute, NULL);

        return CONTROL_OK;
    case AOCONTROL_HAS_PER_APP_VOLUME:
        return CONTROL_TRUE;
    case AOCONTROL_UPDATE_STREAM_TITLE: {
        MP_VERBOSE(state, "Updating stream title to \"%s\"\n", (char*)arg);
        wchar_t *title = mp_from_utf8(NULL, (char*)arg);

        wchar_t *tmp = NULL;

        /* There is a weird race condition in the IAudioSessionControl itself --
           it seems that *sometimes* the SetDisplayName does not take effect and it still shows
           the old title. Use this loop to insist until it works. */
        do {
            IAudioSessionControl_SetDisplayName(state->pSessionControlProxy, title, NULL);

            SAFE_RELEASE(tmp, CoTaskMemFree(tmp));
            IAudioSessionControl_GetDisplayName(state->pSessionControlProxy, &tmp);
        } while (lstrcmpW(title, tmp));
        SAFE_RELEASE(tmp, CoTaskMemFree(tmp));
        talloc_free(title);

        return CONTROL_OK;
    }
    default:
        return CONTROL_UNKNOWN;
    }
}
Example #29
0
File: cache.c Project: Deadsign/mpv
// Runs in the cache thread.
// Returns true if reading was attempted, and the mutex was shortly unlocked.
static bool cache_fill(struct priv *s)
{
    int64_t read = s->read_filepos;
    int len = 0;

    // drop cache contents only if seeking backward or too much fwd.
    // This is also done for on-disk files, since it loses the backseek cache.
    // That in turn can cause major bandwidth increase and performance
    // issues with e.g. mov or badly interleaved files
    if (read < s->min_filepos || read > s->max_filepos + s->seek_limit) {
        MP_VERBOSE(s, "Dropping cache at pos %"PRId64", "
                   "cached range: %"PRId64"-%"PRId64".\n", read,
                   s->min_filepos, s->max_filepos);
        cache_drop_contents(s);
    }

    if (stream_tell(s->stream) != s->max_filepos && s->seekable) {
        MP_VERBOSE(s, "Seeking underlying stream: %"PRId64" -> %"PRId64"\n",
                   stream_tell(s->stream), s->max_filepos);
        stream_seek(s->stream, s->max_filepos);
        if (stream_tell(s->stream) != s->max_filepos)
            goto done;
    }

    // number of buffer bytes which should be preserved in backwards direction
    int64_t back = mp_clipi64(read - s->min_filepos, 0, s->back_size);

    // number of buffer bytes that are valid and can be read
    int64_t newb = FFMAX(s->max_filepos - read, 0);

    // max. number of bytes that can be written (starting from max_filepos)
    int64_t space = s->buffer_size - (newb + back);

    // offset into the buffer that maps to max_filepos
    int pos = s->max_filepos - s->offset;
    if (pos >= s->buffer_size)
        pos -= s->buffer_size; // wrap-around

    if (space < FILL_LIMIT) {
        s->idle = true;
        s->reads++; // don't stuck main thread
        return false;
    }

    // limit to end of buffer (without wrapping)
    if (pos + space >= s->buffer_size)
        space = s->buffer_size - pos;

    // limit read size (or else would block and read the entire buffer in 1 call)
    space = FFMIN(space, s->stream->read_chunk);

    // back+newb+space <= buffer_size
    int64_t back2 = s->buffer_size - (space + newb); // max back size
    if (s->min_filepos < (read - back2))
        s->min_filepos = read - back2;

    // The read call might take a long time and block, so drop the lock.
    pthread_mutex_unlock(&s->mutex);
    len = stream_read_partial(s->stream, &s->buffer[pos], space);
    pthread_mutex_lock(&s->mutex);

    double pts;
    if (stream_control(s->stream, STREAM_CTRL_GET_CURRENT_TIME, &pts) <= 0)
        pts = MP_NOPTS_VALUE;
    for (int64_t b_pos = pos; b_pos < pos + len + BYTE_META_CHUNK_SIZE;
         b_pos += BYTE_META_CHUNK_SIZE)
    {
        s->bm[b_pos / BYTE_META_CHUNK_SIZE] = (struct byte_meta){.stream_pts = pts};
    }

    s->max_filepos += len;
    if (pos + len == s->buffer_size)
        s->offset += s->buffer_size; // wrap...

done:
    s->eof = len <= 0;
    s->idle = s->eof;
    s->reads++;
    if (s->eof)
        MP_VERBOSE(s, "EOF reached.\n");

    pthread_cond_signal(&s->wakeup);

    return true;
}
Example #30
0
static int create(struct gl_hwdec *hw)
{
    GL *gl = hw->gl;

    struct priv *p = talloc_zero(hw, struct priv);
    hw->priv = p;
    p->current_image.buf = p->current_image.image_id = VA_INVALID_ID;
    p->log = hw->log;

    if (hw->hwctx)
        return -1;
    if (!eglGetCurrentDisplay())
        return -1;

    const char *exts = eglQueryString(eglGetCurrentDisplay(), EGL_EXTENSIONS);
    if (!exts)
        return -1;

    if (!strstr(exts, "EXT_image_dma_buf_import") ||
        !strstr(exts, "EGL_KHR_image_base") ||
        !strstr(gl->extensions, "GL_OES_EGL_image") ||
        !(gl->mpgl_caps & MPGL_CAP_TEX_RG))
        return -1;

    // EGL_KHR_image_base
    p->CreateImageKHR = (void *)eglGetProcAddress("eglCreateImageKHR");
    p->DestroyImageKHR = (void *)eglGetProcAddress("eglDestroyImageKHR");
    // GL_OES_EGL_image
    p->EGLImageTargetTexture2DOES =
        (void *)eglGetProcAddress("glEGLImageTargetTexture2DOES");

    if (!p->CreateImageKHR || !p->DestroyImageKHR ||
        !p->EGLImageTargetTexture2DOES)
        return -1;

    p->display = create_native_va_display(gl);
    if (!p->display)
        return -1;

    p->ctx = va_initialize(p->display, p->log, true);
    if (!p->ctx) {
        vaTerminate(p->display);
        return -1;
    }

    if (hw->probing && va_guess_if_emulated(p->ctx)) {
        destroy(hw);
        return -1;
    }

    MP_VERBOSE(p, "using VAAPI EGL interop\n");

    insane_hack(hw);
    if (!test_format(hw)) {
        destroy(hw);
        return -1;
    }

    hw->hwctx = &p->ctx->hwctx;
    return 0;
}