Ejemplo n.º 1
0
RetCode decode_slice(PictureDecoderData* pdd, nal_unit_t* nalu)
{
  RetCode r;
  slice_header_t* sh;
  seq_parameter_set_rbsp_t* sps;
  pic_parameter_set_rbsp_t* pps;
  Picture* pic;
  unsigned int bottom_field_flag;

  if (RET_SUCCESS != (r=parse_slice_header(nalu, &sh)))
    return r;

  sps = sh->sps;
  pps = sh->pps;
  bottom_field_flag = sh->bottom_field_flag;

  if (is_new_picture(pdd->prev_sh, sh, sps))
  {
    // Execute actions for starting a new picture decoding
    start_picture(pdd, sh, sps, pps);
    pic = pdd->pic;
  }

  pic->field_sh[pic->slice_num] = sh;

  // Store the slice header to compare it with the next slice
  pdd->prev_sh = sh;

  unsigned int entropy_coding_mode_flag = pps->entropy_coding_mode_flag;
  if (RET_SUCCESS != (r=decode_slice_data(pdd, nalu, sh, sps, pps, entropy_coding_mode_flag)))
    return r;

  LUD_DEBUG_ASSERT(RET_SUCCESS == parse_rbsp_slice_trailing_bits(nalu, entropy_coding_mode_flag));

  return RET_SUCCESS;
}
Ejemplo n.º 2
0
static int decode_iaxc_slice(struct iaxc_video_codec * c, int inlen,
		char * in, int * outlen, char * out)
{
	struct decoder_ctx *d = (struct decoder_ctx *) c->decstate;
	struct slice_header_t * sh_saved = &d->slice_header;
	struct slice_header_t sh_this;
	char * inp;
	int ret;

	inp = parse_slice_header(in, &sh_this);

	if ( !inp )
		return -1;

	inlen -= inp - in;

	if ( sh_this.source_id == sh_saved->source_id )
	{
		unsigned char frame_delta;

		frame_delta = sh_this.frame_index - sh_saved->frame_index;

		if ( frame_delta > 20 )
		{
			/* This is an old slice. It's too late, we ignore it. */
			return 1;
		}
		else if ( frame_delta > 0 )
		{
			/* This slice belongs to a future frame */
			if ( sh_saved->slice_index > 0 )
			{
				/* We have received slices for a previous
				 * frame (e.g. the one we were previously
				 * working on), so we go ahead and send this
				 * partial frame to the decoder and get setup
				 * for the new frame.
				 */

				ret = pass_frame_to_decoder(d->avctx, d->picture,
						d->frame_size, d->frame_buf,
						outlen, out);

				reset_decoder_frame_state(d);

				if ( ret )
					return -1;
			}

			sh_saved->frame_index = sh_this.frame_index;
		}
	}
	else
	{
		sh_saved->source_id = sh_this.source_id;
		sh_saved->frame_index = sh_this.frame_index;
		sh_saved->slice_index = 0;
		d->frame_size = 0;
	}

	if ( c->fragsize * sh_this.slice_index + inlen > d->frame_buf_len )
	{
		fprintf(stderr,
			"codec_ffmpeg: decode: slice overflows decoder frame buffer\n");
		return -1;
	}

	memcpy(d->frame_buf + c->fragsize * sh_this.slice_index,
			inp, inlen);
	sh_saved->slice_index++;
	d->frame_size = c->fragsize * sh_this.slice_index + inlen;

	if ( sh_saved->slice_index < sh_this.num_slices )
	{
		/* Do not decode yet, there are more slices coming for
		 * this frame.
		 */
		return 1;
	}

	ret = pass_frame_to_decoder(d->avctx, d->picture, d->frame_size,
			d->frame_buf, outlen, out);

	reset_decoder_frame_state(d);

	if ( ret )
		return -1;

	return 0;
}
Ejemplo n.º 3
0
static
VdpStatus
softVdpDecoderRender_h264(VdpDecoder decoder, VdpDecoderData *decoderData,
                          VdpVideoSurfaceData *dstSurfData, VdpPictureInfo const *picture_info,
                          uint32_t bitstream_buffer_count,
                          VdpBitstreamBuffer const *bitstream_buffers)
{
    VdpDeviceData *deviceData = decoderData->device;
    VADisplay va_dpy = deviceData->va_dpy;
    VAStatus status;
    VdpStatus vs, err_code;
    VdpPictureInfoH264 const *vdppi = (void *)picture_info;

    // TODO: figure out where to get level
    uint32_t level = 41;

    // preparing picture parameters and IQ matrix
    VABufferID pic_param_buf, iq_matrix_buf;
    VAPictureParameterBufferH264 pic_param;
    VAIQMatrixBufferH264 iq_matrix;

    vs = h264_translate_reference_frames(dstSurfData, decoder, decoderData, &pic_param, vdppi);
    if (VDP_STATUS_OK != vs) {
        if (VDP_STATUS_RESOURCES == vs) {
            traceError("error (softVdpDecoderRender): no surfaces left in buffer\n");
            err_code = VDP_STATUS_RESOURCES;
        } else {
            err_code = VDP_STATUS_ERROR;
        }
        goto quit;
    }

    h264_translate_pic_param(&pic_param, decoderData->width, decoderData->height, vdppi, level);
    h264_translate_iq_matrix(&iq_matrix, vdppi);

    glx_context_lock();
    status = vaCreateBuffer(va_dpy, decoderData->context_id, VAPictureParameterBufferType,
        sizeof(VAPictureParameterBufferH264), 1, &pic_param, &pic_param_buf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    status = vaCreateBuffer(va_dpy, decoderData->context_id, VAIQMatrixBufferType,
        sizeof(VAIQMatrixBufferH264), 1, &iq_matrix, &iq_matrix_buf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    // send data to decoding hardware
    status = vaBeginPicture(va_dpy, decoderData->context_id, dstSurfData->va_surf);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }
    status = vaRenderPicture(va_dpy, decoderData->context_id, &pic_param_buf, 1);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }
    status = vaRenderPicture(va_dpy, decoderData->context_id, &iq_matrix_buf, 1);
    if (VA_STATUS_SUCCESS != status) {
        glx_context_unlock();
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    vaDestroyBuffer(va_dpy, pic_param_buf);
    vaDestroyBuffer(va_dpy, iq_matrix_buf);
    glx_context_unlock();

    // merge bitstream buffers
    int total_bitstream_bytes = 0;
    for (unsigned int k = 0; k < bitstream_buffer_count; k ++)
        total_bitstream_bytes += bitstream_buffers[k].bitstream_bytes;

    uint8_t *merged_bitstream = malloc(total_bitstream_bytes);
    if (NULL == merged_bitstream) {
        err_code = VDP_STATUS_RESOURCES;
        goto quit;
    }

    do {
        unsigned char *ptr = merged_bitstream;
        for (unsigned int k = 0; k < bitstream_buffer_count; k ++) {
            memcpy(ptr, bitstream_buffers[k].bitstream, bitstream_buffers[k].bitstream_bytes);
            ptr += bitstream_buffers[k].bitstream_bytes;
        }
    } while(0);

    // Slice parameters

    // All slice data have been merged into one continuous buffer. But we must supply
    // slices one by one to the hardware decoder, so we need to delimit them. VDPAU
    // requires bitstream buffers to include slice start code (0x00 0x00 0x01). Those
    // will be used to calculate offsets and sizes of slice data in code below.

    rbsp_state_t st_g;      // reference, global state
    rbsp_attach_buffer(&st_g, merged_bitstream, total_bitstream_bytes);
    int nal_offset = rbsp_navigate_to_nal_unit(&st_g);
    if (nal_offset < 0) {
        traceError("error (softVdpDecoderRender): no NAL header\n");
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    do {
        VASliceParameterBufferH264 sp_h264;
        memset(&sp_h264, 0, sizeof(VASliceParameterBufferH264));

        // make a copy of global rbsp state for using in slice header parser
        rbsp_state_t st = rbsp_copy_state(&st_g);
        rbsp_reset_bit_counter(&st);
        int nal_offset_next = rbsp_navigate_to_nal_unit(&st_g);

        // calculate end of current slice. Note (-3). It's slice start code length.
        const unsigned int end_pos = (nal_offset_next > 0) ? (nal_offset_next - 3)
                                                           : total_bitstream_bytes;
        sp_h264.slice_data_size     = end_pos - nal_offset;
        sp_h264.slice_data_offset   = 0;
        sp_h264.slice_data_flag     = VA_SLICE_DATA_FLAG_ALL;

        // TODO: this may be not entirely true for YUV444
        // but if we limiting to YUV420, that's ok
        int ChromaArrayType = pic_param.seq_fields.bits.chroma_format_idc;

        // parse slice header and use its data to fill slice parameter buffer
        parse_slice_header(&st, &pic_param, ChromaArrayType, vdppi->num_ref_idx_l0_active_minus1,
                           vdppi->num_ref_idx_l1_active_minus1, &sp_h264);

        VABufferID slice_parameters_buf;
        glx_context_lock();
        status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceParameterBufferType,
            sizeof(VASliceParameterBufferH264), 1, &sp_h264, &slice_parameters_buf);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }
        status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_parameters_buf, 1);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        VABufferID slice_buf;
        status = vaCreateBuffer(va_dpy, decoderData->context_id, VASliceDataBufferType,
            sp_h264.slice_data_size, 1, merged_bitstream + nal_offset, &slice_buf);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        status = vaRenderPicture(va_dpy, decoderData->context_id, &slice_buf, 1);
        if (VA_STATUS_SUCCESS != status) {
            glx_context_unlock();
            err_code = VDP_STATUS_ERROR;
            goto quit;
        }

        vaDestroyBuffer(va_dpy, slice_parameters_buf);
        vaDestroyBuffer(va_dpy, slice_buf);
        glx_context_unlock();

        if (nal_offset_next < 0)        // nal_offset_next equals -1 when there is no slice
            break;                      // start code found. Thus that was the final slice.
        nal_offset = nal_offset_next;
    } while (1);

    glx_context_lock();
    status = vaEndPicture(va_dpy, decoderData->context_id);
    glx_context_unlock();
    if (VA_STATUS_SUCCESS != status) {
        err_code = VDP_STATUS_ERROR;
        goto quit;
    }

    free(merged_bitstream);
    err_code = VDP_STATUS_OK;
quit:
    return err_code;
}