Пример #1
0
static int rv10_decode_frame(AVCodecContext *avctx,
                             void *data, int *data_size,
                             AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MpegEncContext *s = avctx->priv_data;
    int i;
    AVFrame *pict = data;
    int slice_count;
    const uint8_t *slices_hdr = NULL;

    av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);

    /* no supplementary picture */
    if (buf_size == 0) {
        return 0;
    }

    if(!avctx->slice_count){
        slice_count = (*buf++) + 1;
        slices_hdr = buf + 4;
        buf += 8 * slice_count;
    }else
        slice_count = avctx->slice_count;

    for(i=0; i<slice_count; i++){
        int offset= get_slice_offset(avctx, slices_hdr, i);
        int size, size2;

        if(i+1 == slice_count)
            size= buf_size - offset;
        else
            size= get_slice_offset(avctx, slices_hdr, i+1) - offset;

        if(i+2 >= slice_count)
            size2= buf_size - offset;
        else
            size2= get_slice_offset(avctx, slices_hdr, i+2) - offset;

        if(rv10_decode_packet(avctx, buf+offset, size, size2) > 8*size)
            i++;
    }

    if(s->current_picture_ptr != NULL && s->mb_y>=s->mb_height){
        ff_er_frame_end(s);
        MPV_frame_end(s);

        if (s->pict_type == FF_B_TYPE || s->low_delay) {
            *pict= *(AVFrame*)s->current_picture_ptr;
        } else if (s->last_picture_ptr != NULL) {
            *pict= *(AVFrame*)s->last_picture_ptr;
        }

        if(s->last_picture_ptr || s->low_delay){
            *data_size = sizeof(AVFrame);
            ff_print_debug_info(s, pict);
        }
        s->current_picture_ptr= NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
    }

    return buf_size;
}
Пример #2
0
static int decode_slice(MpegEncContext *s){
    const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F;
    const int mb_size = 16;
    s->last_resync_gb= s->gb;
    s->first_slice_line= 1;

    s->resync_mb_x= s->mb_x;
    s->resync_mb_y= s->mb_y;

    ff_set_qscale(s, s->qscale);

    if (s->avctx->hwaccel) {
        const uint8_t *start= s->gb.buffer + get_bits_count(&s->gb)/8;
        const uint8_t *end  = ff_h263_find_resync_marker(start + 1, s->gb.buffer_end);
        skip_bits_long(&s->gb, 8*(end - start));
        return s->avctx->hwaccel->decode_slice(s->avctx, start, end - start);
    }

    if(s->partitioned_frame){
        const int qscale= s->qscale;

        if(CONFIG_MPEG4_DECODER && s->codec_id==AV_CODEC_ID_MPEG4){
            if(ff_mpeg4_decode_partitions(s) < 0)
                return -1;
        }

        /* restore variables which were modified */
        s->first_slice_line=1;
        s->mb_x= s->resync_mb_x;
        s->mb_y= s->resync_mb_y;
        ff_set_qscale(s, qscale);
    }

    for(; s->mb_y < s->mb_height; s->mb_y++) {
        /* per-row end of slice checks */
        if(s->msmpeg4_version){
            if(s->resync_mb_y + s->slice_height == s->mb_y){
                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);

                return 0;
            }
        }

        if(s->msmpeg4_version==1){
            s->last_dc[0]=
            s->last_dc[1]=
            s->last_dc[2]= 128;
        }

        ff_init_block_index(s);
        for(; s->mb_x < s->mb_width; s->mb_x++) {
            int ret;

            ff_update_block_index(s);

            if(s->resync_mb_x == s->mb_x && s->resync_mb_y+1 == s->mb_y){
                s->first_slice_line=0;
            }

            /* DCT & quantize */

            s->mv_dir = MV_DIR_FORWARD;
            s->mv_type = MV_TYPE_16X16;
//            s->mb_skipped = 0;
            av_dlog(s, "%d %d %06X\n",
                    ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
            ret= s->decode_mb(s, s->block);

            if (s->pict_type!=AV_PICTURE_TYPE_B)
                ff_h263_update_motion_val(s);

            if(ret<0){
                const int xy= s->mb_x + s->mb_y*s->mb_stride;
                if(ret==SLICE_END){
                    ff_MPV_decode_mb(s, s->block);
                    if(s->loop_filter)
                        ff_h263_loop_filter(s);

                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);

                    s->padding_bug_score--;

                    if(++s->mb_x >= s->mb_width){
                        s->mb_x=0;
                        ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
                        ff_MPV_report_decode_progress(s);
                        s->mb_y++;
                    }
                    return 0;
                }else if(ret==SLICE_NOEND){
                    av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy);
                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, ER_MB_END&part_mask);
                    return -1;
                }
                av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR&part_mask);

                return -1;
            }

            ff_MPV_decode_mb(s, s->block);
            if(s->loop_filter)
                ff_h263_loop_filter(s);
        }

        ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
        ff_MPV_report_decode_progress(s);

        s->mb_x= 0;
    }

    assert(s->mb_x==0 && s->mb_y==s->mb_height);

    if(s->codec_id==AV_CODEC_ID_MPEG4
       && (s->workaround_bugs&FF_BUG_AUTODETECT)
       && get_bits_left(&s->gb) >= 48
       && show_bits(&s->gb, 24)==0x4010
       && !s->data_partitioning)
        s->padding_bug_score+=32;

    /* try to detect the padding bug */
    if(      s->codec_id==AV_CODEC_ID_MPEG4
       &&   (s->workaround_bugs&FF_BUG_AUTODETECT)
       &&    get_bits_left(&s->gb) >=0
       &&    get_bits_left(&s->gb) < 48
//       &&   !s->resync_marker
       &&   !s->data_partitioning){

        const int bits_count= get_bits_count(&s->gb);
        const int bits_left = s->gb.size_in_bits - bits_count;

        if(bits_left==0){
            s->padding_bug_score+=16;
        } else if(bits_left != 1){
            int v= show_bits(&s->gb, 8);
            v|= 0x7F >> (7-(bits_count&7));

            if(v==0x7F && bits_left<=8)
                s->padding_bug_score--;
            else if(v==0x7F && ((get_bits_count(&s->gb)+8)&8) && bits_left<=16)
                s->padding_bug_score+= 4;
            else
                s->padding_bug_score++;
        }
    }
Пример #3
0
static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
    AVPacket *pkt) {

    int chunk_type;

    if (s->audio_chunk_offset) {

        /* adjust for PCM audio by skipping chunk header */
        if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) {
            s->audio_chunk_offset += 6;
            s->audio_chunk_size -= 6;
        }

        avio_seek(pb, s->audio_chunk_offset, SEEK_SET);
        s->audio_chunk_offset = 0;

        if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
            return CHUNK_EOF;

        pkt->stream_index = s->audio_stream_index;
        pkt->pts = s->audio_frame_count;

        /* audio frame maintenance */
        if (s->audio_type != CODEC_ID_INTERPLAY_DPCM)
            s->audio_frame_count +=
            (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
        else
            s->audio_frame_count +=
                (s->audio_chunk_size - 6) / s->audio_channels;

        av_dlog(NULL, "sending audio frame with pts %"PRId64" (%d audio frames)\n",
                pkt->pts, s->audio_frame_count);

        chunk_type = CHUNK_VIDEO;

    } else if (s->decode_map_chunk_offset) {

        /* send both the decode map and the video data together */

        if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
            return CHUNK_NOMEM;

        if (s->has_palette) {
            uint8_t *pal;

            pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
                                          AVPALETTE_SIZE);
            if (pal) {
                memcpy(pal, s->palette, AVPALETTE_SIZE);
                s->has_palette = 0;
            }
        }

        pkt->pos= s->decode_map_chunk_offset;
        avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET);
        s->decode_map_chunk_offset = 0;

        if (avio_read(pb, pkt->data, s->decode_map_chunk_size) !=
            s->decode_map_chunk_size) {
            av_free_packet(pkt);
            return CHUNK_EOF;
        }

        avio_seek(pb, s->video_chunk_offset, SEEK_SET);
        s->video_chunk_offset = 0;

        if (avio_read(pb, pkt->data + s->decode_map_chunk_size,
            s->video_chunk_size) != s->video_chunk_size) {
            av_free_packet(pkt);
            return CHUNK_EOF;
        }

        pkt->stream_index = s->video_stream_index;
        pkt->pts = s->video_pts;

        av_dlog(NULL, "sending video frame with pts %"PRId64"\n", pkt->pts);

        s->video_pts += s->frame_pts_inc;

        chunk_type = CHUNK_VIDEO;

    } else {

        avio_seek(pb, s->next_chunk_offset, SEEK_SET);
        chunk_type = CHUNK_DONE;

    }

    return chunk_type;
}
Пример #4
0
/**
 * Parse the header of a LPCM frame read from a MPEG-TS stream
 * @param avctx the codec context
 * @param header pointer to the first four bytes of the data packet
 */
static int pcm_bluray_parse_header(AVCodecContext *avctx,
                                   const uint8_t *header)
{
	static const uint8_t bits_per_samples[4] = { 0, 16, 20, 24 };
	static const uint32_t channel_layouts[16] =
	{
		0, AV_CH_LAYOUT_MONO, 0, AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_SURROUND,
		AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_4POINT0, AV_CH_LAYOUT_2_2, AV_CH_LAYOUT_5POINT0,
		AV_CH_LAYOUT_5POINT1, AV_CH_LAYOUT_7POINT0, AV_CH_LAYOUT_7POINT1, 0, 0, 0, 0
	};
	static const uint8_t channels[16] =
	{
		0, 1, 0, 2, 3, 3, 4, 4, 5, 6, 7, 8, 0, 0, 0, 0
	};
	uint8_t channel_layout = header[2] >> 4;

	if (avctx->debug & FF_DEBUG_PICT_INFO)
		av_dlog(avctx, "pcm_bluray_parse_header: header = %02x%02x%02x%02x\n",
		        header[0], header[1], header[2], header[3]);

	/* get the sample depth and derive the sample format from it */
	avctx->bits_per_coded_sample = bits_per_samples[header[3] >> 6];
	if (!avctx->bits_per_coded_sample)
	{
		av_log(avctx, AV_LOG_ERROR, "unsupported sample depth (0)\n");
		return -1;
	}
	avctx->sample_fmt = avctx->bits_per_coded_sample == 16 ? AV_SAMPLE_FMT_S16 :
	                    AV_SAMPLE_FMT_S32;

	/* get the sample rate. Not all values are known or exist. */
	switch (header[2] & 0x0f)
	{
	case 1:
		avctx->sample_rate = 48000;
		break;
	case 4:
		avctx->sample_rate = 96000;
		break;
	case 5:
		avctx->sample_rate = 192000;
		break;
	default:
		avctx->sample_rate = 0;
		av_log(avctx, AV_LOG_ERROR, "unsupported sample rate (%d)\n",
		       header[2] & 0x0f);
		return -1;
	}

	/*
	 * get the channel number (and mapping). Not all values are known or exist.
	 * It must be noted that the number of channels in the MPEG stream can
	 * differ from the actual meaningful number, e.g. mono audio still has two
	 * channels, one being empty.
	 */
	avctx->channel_layout  = channel_layouts[channel_layout];
	avctx->channels        =        channels[channel_layout];
	if (!avctx->channels)
	{
		av_log(avctx, AV_LOG_ERROR, "unsupported channel configuration (%d)\n",
		       channel_layout);
		return -1;
	}

	avctx->bit_rate = avctx->channels * avctx->sample_rate *
	                  avctx->bits_per_coded_sample;

	if (avctx->debug & FF_DEBUG_PICT_INFO)
		av_dlog(avctx,
		        "pcm_bluray_parse_header: %d channels, %d bits per sample, %d kHz, %d kbit\n",
		        avctx->channels, avctx->bits_per_coded_sample,
		        avctx->sample_rate, avctx->bit_rate);
	return 0;
}
Пример #5
0
Файл: nsvdec.c Проект: komh/kmp
static int nsv_parse_NSVf_header(AVFormatContext *s)
{
    NSVContext *nsv = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned int av_unused file_size;
    unsigned int size;
    int64_t duration;
    int strings_size;
    int table_entries;
    int table_entries_used;

    av_dlog(s, "%s()\n", __FUNCTION__);

    nsv->state = NSV_UNSYNC; /* in case we fail */

    size = avio_rl32(pb);
    if (size < 28)
        return -1;
    nsv->NSVf_end = size;

    //s->file_size = (uint32_t)avio_rl32(pb);
    file_size = (uint32_t)avio_rl32(pb);
    av_dlog(s, "NSV NSVf chunk_size %u\n", size);
    av_dlog(s, "NSV NSVf file_size %u\n", file_size);

    nsv->duration = duration = avio_rl32(pb); /* in ms */
    av_dlog(s, "NSV NSVf duration %"PRId64" ms\n", duration);
    // XXX: store it in AVStreams

    strings_size = avio_rl32(pb);
    table_entries = avio_rl32(pb);
    table_entries_used = avio_rl32(pb);
    av_dlog(s, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
            strings_size, table_entries, table_entries_used);
    if (url_feof(pb))
        return -1;

    av_dlog(s, "NSV got header; filepos %"PRId64"\n", avio_tell(pb));

    if (strings_size > 0) {
        char *strings; /* last byte will be '\0' to play safe with str*() */
        char *p, *endp;
        char *token, *value;
        char quote;

        p = strings = av_mallocz((size_t)strings_size + 1);
        if (!p)
            return AVERROR(ENOMEM);
        endp = strings + strings_size;
        avio_read(pb, strings, strings_size);
        while (p < endp) {
            while (*p == ' ')
                p++; /* strip out spaces */
            if (p >= endp-2)
                break;
            token = p;
            p = strchr(p, '=');
            if (!p || p >= endp-2)
                break;
            *p++ = '\0';
            quote = *p++;
            value = p;
            p = strchr(p, quote);
            if (!p || p >= endp)
                break;
            *p++ = '\0';
            av_dlog(s, "NSV NSVf INFO: %s='%s'\n", token, value);
            av_dict_set(&s->metadata, token, value, 0);
        }
        av_free(strings);
    }
    if (url_feof(pb))
        return -1;

    av_dlog(s, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb));

    if (table_entries_used > 0) {
        int i;
        nsv->index_entries = table_entries_used;
        if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t))
            return -1;
        nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t));
        if (!nsv->nsvs_file_offset)
            return AVERROR(ENOMEM);

        for(i=0;i<table_entries_used;i++)
            nsv->nsvs_file_offset[i] = avio_rl32(pb) + size;

        if(table_entries > table_entries_used &&
           avio_rl32(pb) == MKTAG('T','O','C','2')) {
            nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t));
            if (!nsv->nsvs_timestamps)
                return AVERROR(ENOMEM);
            for(i=0;i<table_entries_used;i++) {
                nsv->nsvs_timestamps[i] = avio_rl32(pb);
            }
        }
    }

    av_dlog(s, "NSV got index; filepos %"PRId64"\n", avio_tell(pb));

#ifdef DEBUG_DUMP_INDEX
#define V(v) ((v<0x20 || v > 127)?'.':v)
    /* dump index */
    av_dlog(s, "NSV %d INDEX ENTRIES:\n", table_entries);
    av_dlog(s, "NSV [dataoffset][fileoffset]\n", table_entries);
    for (i = 0; i < table_entries; i++) {
        unsigned char b[8];
        avio_seek(pb, size + nsv->nsvs_file_offset[i], SEEK_SET);
        avio_read(pb, b, 8);
        av_dlog(s, "NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x"
           "%c%c%c%c%c%c%c%c\n",
           nsv->nsvs_file_offset[i], size + nsv->nsvs_file_offset[i],
           b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
           V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) );
    }
    //avio_seek(pb, size, SEEK_SET); /* go back to end of header */
#undef V
#endif

    avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */

    if (url_feof(pb))
        return -1;
    nsv->state = NSV_HAS_READ_NSVF;
    return 0;
}
Пример #6
0
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data,
                                   int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *src = avpkt->data;
    int buf_size = avpkt->size;
    GetByteContext gb;
    int num_source_channels, channel, retval;
    int sample_size, samples;
    int16_t *dst16;
    int32_t *dst32;

    if (buf_size < 4) {
        av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n");
        return AVERROR_INVALIDDATA;
    }

    if ((retval = pcm_bluray_parse_header(avctx, src)))
        return retval;
    src += 4;
    buf_size -= 4;

    bytestream2_init(&gb, src, buf_size);

    /* There's always an even number of channels in the source */
    num_source_channels = FFALIGN(avctx->channels, 2);
    sample_size = (num_source_channels *
                   (avctx->sample_fmt == AV_SAMPLE_FMT_S16 ? 16 : 24)) >> 3;
    samples = buf_size / sample_size;

    /* get output buffer */
    frame->nb_samples = samples;
    if ((retval = ff_get_buffer(avctx, frame, 0)) < 0)
        return retval;
    dst16 = (int16_t *)frame->data[0];
    dst32 = (int32_t *)frame->data[0];

    if (samples) {
        switch (avctx->channel_layout) {
            /* cases with same number of source and coded channels */
        case AV_CH_LAYOUT_STEREO:
        case AV_CH_LAYOUT_4POINT0:
        case AV_CH_LAYOUT_2_2:
            samples *= num_source_channels;
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
#if HAVE_BIGENDIAN
                bytestream2_get_buffer(&gb, dst16, buf_size);
#else
                do {
                    *dst16++ = bytestream2_get_be16u(&gb);
                } while (--samples);
#endif
            } else {
                do {
                    *dst32++ = bytestream2_get_be24u(&gb) << 8;
                } while (--samples);
            }
            break;
        /* cases where number of source channels = coded channels + 1 */
        case AV_CH_LAYOUT_MONO:
        case AV_CH_LAYOUT_SURROUND:
        case AV_CH_LAYOUT_2_1:
        case AV_CH_LAYOUT_5POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
#if HAVE_BIGENDIAN
                    bytestream2_get_buffer(&gb, dst16, avctx->channels * 2);
                    dst16 += avctx->channels;
#else
                    channel = avctx->channels;
                    do {
                        *dst16++ = bytestream2_get_be16u(&gb);
                    } while (--channel);
#endif
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    channel = avctx->channels;
                    do {
                        *dst32++ = bytestream2_get_be24u(&gb) << 8;
                    } while (--channel);
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LBack, RBack, LF */
        case AV_CH_LAYOUT_5POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 6;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 6;
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */
        case AV_CH_LAYOUT_7POINT0:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16 += 7;
                    bytestream2_skip(&gb, 2);
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 7;
                    bytestream2_skip(&gb, 3);
                } while (--samples);
            }
            break;
            /* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */
        case AV_CH_LAYOUT_7POINT1:
            if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt) {
                do {
                    dst16[0] = bytestream2_get_be16u(&gb);
                    dst16[1] = bytestream2_get_be16u(&gb);
                    dst16[2] = bytestream2_get_be16u(&gb);
                    dst16[6] = bytestream2_get_be16u(&gb);
                    dst16[4] = bytestream2_get_be16u(&gb);
                    dst16[5] = bytestream2_get_be16u(&gb);
                    dst16[7] = bytestream2_get_be16u(&gb);
                    dst16[3] = bytestream2_get_be16u(&gb);
                    dst16 += 8;
                } while (--samples);
            } else {
                do {
                    dst32[0] = bytestream2_get_be24u(&gb) << 8;
                    dst32[1] = bytestream2_get_be24u(&gb) << 8;
                    dst32[2] = bytestream2_get_be24u(&gb) << 8;
                    dst32[6] = bytestream2_get_be24u(&gb) << 8;
                    dst32[4] = bytestream2_get_be24u(&gb) << 8;
                    dst32[5] = bytestream2_get_be24u(&gb) << 8;
                    dst32[7] = bytestream2_get_be24u(&gb) << 8;
                    dst32[3] = bytestream2_get_be24u(&gb) << 8;
                    dst32 += 8;
                } while (--samples);
            }
            break;
        }
    }

    *got_frame_ptr = 1;

    retval = bytestream2_tell(&gb);
    if (avctx->debug & FF_DEBUG_BITSTREAM)
        av_dlog(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n",
                retval, buf_size);
    return retval + 4;
}
Пример #7
0
static int decode(AVCodecContext *avctx, void *data, int *data_size,
                  AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;

    const uint8_t *buf_end;
    uint8_t       segment_type;
    int           segment_length;
    int i, ret;

    av_dlog(avctx, "PGS sub packet:\n");

    for (i = 0; i < buf_size; i++) {
        av_dlog(avctx, "%02x ", buf[i]);
        if (i % 16 == 15)
            av_dlog(avctx, "\n");
    }

    if (i & 15)
        av_dlog(avctx, "\n");

    *data_size = 0;

    /* Ensure that we have received at a least a segment code and segment length */
    if (buf_size < 3)
        return -1;

    buf_end = buf + buf_size;

    /* Step through buffer to identify segments */
    while (buf < buf_end) {
        segment_type   = bytestream_get_byte(&buf);
        segment_length = bytestream_get_be16(&buf);

        av_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type);

        if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf)
            break;

        switch (segment_type) {
        case PALETTE_SEGMENT:
            parse_palette_segment(avctx, buf, segment_length);
            break;
        case PICTURE_SEGMENT:
            parse_picture_segment(avctx, buf, segment_length);
            break;
        case PRESENTATION_SEGMENT:
            ret = parse_presentation_segment(avctx, buf, segment_length, avpkt->pts);
            if (ret < 0)
                return ret;
            break;
        case WINDOW_SEGMENT:
            /*
             * Window Segment Structure (No new information provided):
             *     2 bytes: Unknown,
             *     2 bytes: X position of subtitle,
             *     2 bytes: Y position of subtitle,
             *     2 bytes: Width of subtitle,
             *     2 bytes: Height of subtitle.
             */
            break;
        case DISPLAY_SEGMENT:
            *data_size = display_end_segment(avctx, data, buf, segment_length);
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n",
                   segment_type, segment_length);
            break;
        }

        buf += segment_length;
    }

    return buf_size;
}
Пример #8
0
static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
                            double q, int frame_num)
{
    RateControlContext *rcc  = &s->rc_context;
    const double buffer_size = s->avctx->rc_buffer_size;
    const double fps         = 1 / av_q2d(s->avctx->time_base);
    const double min_rate    = s->avctx->rc_min_rate / fps;
    const double max_rate    = s->avctx->rc_max_rate / fps;
    const int pict_type      = rce->new_pict_type;
    int qmin, qmax;

    get_qminmax(&qmin, &qmax, s, pict_type);

    /* modulation */
    if (s->rc_qmod_freq &&
        frame_num % s->rc_qmod_freq == 0 &&
        pict_type == AV_PICTURE_TYPE_P)
        q *= s->rc_qmod_amp;

    /* buffer overflow/underflow protection */
    if (buffer_size) {
        double expected_size = rcc->buffer_index;
        double q_limit;

        if (min_rate) {
            double d = 2 * (buffer_size - expected_size) / buffer_size;
            if (d > 1.0)
                d = 1.0;
            else if (d < 0.0001)
                d = 0.0001;
            q *= pow(d, 1.0 / s->rc_buffer_aggressivity);

            q_limit = bits2qp(rce,
                              FFMAX((min_rate - buffer_size + rcc->buffer_index) *
                                    s->avctx->rc_min_vbv_overflow_use, 1));

            if (q > q_limit) {
                if (s->avctx->debug & FF_DEBUG_RC)
                    av_log(s->avctx, AV_LOG_DEBUG,
                           "limiting QP %f -> %f\n", q, q_limit);
                q = q_limit;
            }
        }

        if (max_rate) {
            double d = 2 * expected_size / buffer_size;
            if (d > 1.0)
                d = 1.0;
            else if (d < 0.0001)
                d = 0.0001;
            q /= pow(d, 1.0 / s->rc_buffer_aggressivity);

            q_limit = bits2qp(rce,
                              FFMAX(rcc->buffer_index *
                                    s->avctx->rc_max_available_vbv_use,
                                    1));
            if (q < q_limit) {
                if (s->avctx->debug & FF_DEBUG_RC)
                    av_log(s->avctx, AV_LOG_DEBUG,
                           "limiting QP %f -> %f\n", q, q_limit);
                q = q_limit;
            }
        }
    }
    av_dlog(s, "q:%f max:%f min:%f size:%f index:%f agr:%f\n",
            q, max_rate, min_rate, buffer_size, rcc->buffer_index,
            s->rc_buffer_aggressivity);
    if (s->rc_qsquish == 0.0 || qmin == qmax) {
        if (q < qmin)
            q = qmin;
        else if (q > qmax)
            q = qmax;
    } else {
        double min2 = log(qmin);
        double max2 = log(qmax);

        q  = log(q);
        q  = (q - min2) / (max2 - min2) - 0.5;
        q *= -4.0;
        q  = 1.0 / (1.0 + exp(q));
        q  = q * (max2 - min2) + min2;

        q = exp(q);
    }

    return q;
}
Пример #9
0
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
{
    float q;
    int qmin, qmax;
    float br_compensation;
    double diff;
    double short_term_q;
    double fps;
    int picture_number = s->picture_number;
    int64_t wanted_bits;
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    RateControlEntry local_rce, *rce;
    double bits;
    double rate_factor;
    int var;
    const int pict_type = s->pict_type;
    Picture * const pic = &s->current_picture;
    emms_c();

#if CONFIG_LIBXVID
    if ((s->flags & CODEC_FLAG_PASS2) &&
        s->avctx->rc_strategy == FF_RC_STRATEGY_XVID)
        return ff_xvid_rate_estimate_qscale(s, dry_run);
#endif

    get_qminmax(&qmin, &qmax, s, pict_type);

    fps = 1 / av_q2d(s->avctx->time_base);
    /* update predictors */
    if (picture_number > 2 && !dry_run) {
        const int last_var = s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
                                                                    : rcc->last_mc_mb_var_sum;
        update_predictor(&rcc->pred[s->last_pict_type],
                         rcc->last_qscale,
                         sqrt(last_var), s->frame_bits);
    }

    if (s->flags & CODEC_FLAG_PASS2) {
        assert(picture_number >= 0);
        assert(picture_number < rcc->num_entries);
        rce         = &rcc->entry[picture_number];
        wanted_bits = rce->expected_bits;
    } else {
        Picture *dts_pic;
        rce = &local_rce;

        /* FIXME add a dts field to AVFrame and ensure it is set and use it
         * here instead of reordering but the reordering is simpler for now
         * until H.264 B-pyramid must be handled. */
        if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
            dts_pic = s->current_picture_ptr;
        else
            dts_pic = s->last_picture_ptr;

        if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
            wanted_bits = (uint64_t)(s->bit_rate * (double)picture_number / fps);
        else
            wanted_bits = (uint64_t)(s->bit_rate * (double)dts_pic->f->pts / fps);
    }

    diff = s->total_bits - wanted_bits;
    br_compensation = (a->bit_rate_tolerance - diff) / a->bit_rate_tolerance;
    if (br_compensation <= 0.0)
        br_compensation = 0.001;

    var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;

    short_term_q = 0; /* avoid warning */
    if (s->flags & CODEC_FLAG_PASS2) {
        if (pict_type != AV_PICTURE_TYPE_I)
            assert(pict_type == rce->new_pict_type);

        q = rce->new_qscale / br_compensation;
        av_dlog(s, "%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale,
                br_compensation, s->frame_bits, var, pict_type);
    } else {
        rce->pict_type     =
        rce->new_pict_type = pict_type;
        rce->mc_mb_var_sum = pic->mc_mb_var_sum;
        rce->mb_var_sum    = pic->mb_var_sum;
        rce->qscale        = FF_QP2LAMBDA * 2;
        rce->f_code        = s->f_code;
        rce->b_code        = s->b_code;
        rce->misc_bits     = 1;

        bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
        if (pict_type == AV_PICTURE_TYPE_I) {
            rce->i_count    = s->mb_num;
            rce->i_tex_bits = bits;
            rce->p_tex_bits = 0;
            rce->mv_bits    = 0;
        } else {
            rce->i_count    = 0;    // FIXME we do know this approx
            rce->i_tex_bits = 0;
            rce->p_tex_bits = bits * 0.9;
            rce->mv_bits    = bits * 0.1;
        }
        rcc->i_cplx_sum[pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[pict_type] += rce->mv_bits;
        rcc->frame_count[pict_type]++;

        bits        = rce->i_tex_bits + rce->p_tex_bits;
        rate_factor = rcc->pass1_wanted_bits /
                      rcc->pass1_rc_eq_output_sum * br_compensation;

        q = get_qscale(s, rce, rate_factor, picture_number);
        if (q < 0)
            return -1;

        assert(q > 0.0);
        q = get_diff_limited_q(s, rce, q);
        assert(q > 0.0);

        // FIXME type dependent blur like in 2-pass
        if (pict_type == AV_PICTURE_TYPE_P || s->intra_only) {
            rcc->short_term_qsum   *= a->qblur;
            rcc->short_term_qcount *= a->qblur;

            rcc->short_term_qsum += q;
            rcc->short_term_qcount++;
            q = short_term_q = rcc->short_term_qsum / rcc->short_term_qcount;
        }
        assert(q > 0.0);

        q = modify_qscale(s, rce, q, picture_number);

        rcc->pass1_wanted_bits += s->bit_rate / fps;

        assert(q > 0.0);
    }

    if (s->avctx->debug & FF_DEBUG_RC) {
        av_log(s->avctx, AV_LOG_DEBUG,
               "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f "
               "size:%d var:%d/%d br:%d fps:%d\n",
               av_get_picture_type_char(pict_type),
               qmin, q, qmax, picture_number,
               (int)wanted_bits / 1000, (int)s->total_bits / 1000,
               br_compensation, short_term_q, s->frame_bits,
               pic->mb_var_sum, pic->mc_mb_var_sum,
               s->bit_rate / 1000, (int)fps);
    }

    if (q < qmin)
        q = qmin;
    else if (q > qmax)
        q = qmax;

    if (s->adaptive_quant)
        adaptive_quantization(s, q);
    else
        q = (int)(q + 0.5);

    if (!dry_run) {
        rcc->last_qscale        = q;
        rcc->last_mc_mb_var_sum = pic->mc_mb_var_sum;
        rcc->last_mb_var_sum    = pic->mb_var_sum;
    }
    return q;
}
Пример #10
0
int attribute_align_arg avresample_convert(AVAudioResampleContext *avr,
        uint8_t **output, int out_plane_size,
        int out_samples, uint8_t **input,
        int in_plane_size, int in_samples)
{
    AudioData input_buffer;
    AudioData output_buffer;
    AudioData *current_buffer;
    int ret, direct_output;

    /* reset internal buffers */
    if (avr->in_buffer) {
        avr->in_buffer->nb_samples = 0;
        ff_audio_data_set_channels(avr->in_buffer,
                                   avr->in_buffer->allocated_channels);
    }
    if (avr->resample_out_buffer) {
        avr->resample_out_buffer->nb_samples = 0;
        ff_audio_data_set_channels(avr->resample_out_buffer,
                                   avr->resample_out_buffer->allocated_channels);
    }
    if (avr->out_buffer) {
        avr->out_buffer->nb_samples = 0;
        ff_audio_data_set_channels(avr->out_buffer,
                                   avr->out_buffer->allocated_channels);
    }

    av_dlog(avr, "[start conversion]\n");

    /* initialize output_buffer with output data */
    direct_output = output && av_audio_fifo_size(avr->out_fifo) == 0;
    if (output) {
        ret = ff_audio_data_init(&output_buffer, output, out_plane_size,
                                 avr->out_channels, out_samples,
                                 avr->out_sample_fmt, 0, "output");
        if (ret < 0)
            return ret;
        output_buffer.nb_samples = 0;
    }

    if (input) {
        /* initialize input_buffer with input data */
        ret = ff_audio_data_init(&input_buffer, input, in_plane_size,
                                 avr->in_channels, in_samples,
                                 avr->in_sample_fmt, 1, "input");
        if (ret < 0)
            return ret;
        current_buffer = &input_buffer;

        if (avr->upmix_needed && !avr->in_convert_needed && !avr->resample_needed &&
                !avr->out_convert_needed && direct_output && out_samples >= in_samples) {
            /* in some rare cases we can copy input to output and upmix
               directly in the output buffer */
            av_dlog(avr, "[copy] %s to output\n", current_buffer->name);
            ret = ff_audio_data_copy(&output_buffer, current_buffer,
                                     avr->remap_point == REMAP_OUT_COPY ?
                                     &avr->ch_map_info : NULL);
            if (ret < 0)
                return ret;
            current_buffer = &output_buffer;
        } else if (avr->remap_point == REMAP_OUT_COPY &&
                   (!direct_output || out_samples < in_samples)) {
            /* if remapping channels during output copy, we may need to
             * use an intermediate buffer in order to remap before adding
             * samples to the output fifo */
            av_dlog(avr, "[copy] %s to out_buffer\n", current_buffer->name);
            ret = ff_audio_data_copy(avr->out_buffer, current_buffer,
                                     &avr->ch_map_info);
            if (ret < 0)
                return ret;
            current_buffer = avr->out_buffer;
        } else if (avr->in_copy_needed || avr->in_convert_needed) {
            /* if needed, copy or convert input to in_buffer, and downmix if
               applicable */
            if (avr->in_convert_needed) {
                ret = ff_audio_data_realloc(avr->in_buffer,
                                            current_buffer->nb_samples);
                if (ret < 0)
                    return ret;
                av_dlog(avr, "[convert] %s to in_buffer\n", current_buffer->name);
                ret = ff_audio_convert(avr->ac_in, avr->in_buffer,
                                       current_buffer);
                if (ret < 0)
                    return ret;
            } else {
                av_dlog(avr, "[copy] %s to in_buffer\n", current_buffer->name);
                ret = ff_audio_data_copy(avr->in_buffer, current_buffer,
                                         avr->remap_point == REMAP_IN_COPY ?
                                         &avr->ch_map_info : NULL);
                if (ret < 0)
                    return ret;
            }
            ff_audio_data_set_channels(avr->in_buffer, avr->in_channels);
            if (avr->downmix_needed) {
                av_dlog(avr, "[downmix] in_buffer\n");
                ret = ff_audio_mix(avr->am, avr->in_buffer);
                if (ret < 0)
                    return ret;
            }
            current_buffer = avr->in_buffer;
        }
    } else {
        /* flush resampling buffer and/or output FIFO if input is NULL */
        if (!avr->resample_needed)
            return handle_buffered_output(avr, output ? &output_buffer : NULL,
                                          NULL);
        current_buffer = NULL;
    }

    if (avr->resample_needed) {
        AudioData *resample_out;

        if (!avr->out_convert_needed && direct_output && out_samples > 0)
            resample_out = &output_buffer;
        else
            resample_out = avr->resample_out_buffer;
        av_dlog(avr, "[resample] %s to %s\n",
                current_buffer ? current_buffer->name : "null",
                resample_out->name);
        ret = ff_audio_resample(avr->resample, resample_out,
                                current_buffer);
        if (ret < 0)
            return ret;

        /* if resampling did not produce any samples, just return 0 */
        if (resample_out->nb_samples == 0) {
            av_dlog(avr, "[end conversion]\n");
            return 0;
        }

        current_buffer = resample_out;
    }

    if (avr->upmix_needed) {
        av_dlog(avr, "[upmix] %s\n", current_buffer->name);
        ret = ff_audio_mix(avr->am, current_buffer);
        if (ret < 0)
            return ret;
    }

    /* if we resampled or upmixed directly to output, return here */
    if (current_buffer == &output_buffer) {
        av_dlog(avr, "[end conversion]\n");
        return current_buffer->nb_samples;
    }

    if (avr->out_convert_needed) {
        if (direct_output && out_samples >= current_buffer->nb_samples) {
            /* convert directly to output */
            av_dlog(avr, "[convert] %s to output\n", current_buffer->name);
            ret = ff_audio_convert(avr->ac_out, &output_buffer, current_buffer);
            if (ret < 0)
                return ret;

            av_dlog(avr, "[end conversion]\n");
            return output_buffer.nb_samples;
        } else {
            ret = ff_audio_data_realloc(avr->out_buffer,
                                        current_buffer->nb_samples);
            if (ret < 0)
                return ret;
            av_dlog(avr, "[convert] %s to out_buffer\n", current_buffer->name);
            ret = ff_audio_convert(avr->ac_out, avr->out_buffer,
                                   current_buffer);
            if (ret < 0)
                return ret;
            current_buffer = avr->out_buffer;
        }
    }

    return handle_buffered_output(avr, output ? &output_buffer : NULL,
                                  current_buffer);
}
Пример #11
0
int avresample_open(AVAudioResampleContext *avr)
{
    int ret;

    if (avresample_is_open(avr)) {
        av_log(avr, AV_LOG_ERROR, "The resampling context is already open.\n");
        return AVERROR(EINVAL);
    }

    /* set channel mixing parameters */
    avr->in_channels = av_get_channel_layout_nb_channels(avr->in_channel_layout);
    if (avr->in_channels <= 0 || avr->in_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid input channel layout: %"PRIu64"\n",
               avr->in_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->out_channels = av_get_channel_layout_nb_channels(avr->out_channel_layout);
    if (avr->out_channels <= 0 || avr->out_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid output channel layout: %"PRIu64"\n",
               avr->out_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->resample_channels = FFMIN(avr->in_channels, avr->out_channels);
    avr->downmix_needed    = avr->in_channels  > avr->out_channels;
    avr->upmix_needed      = avr->out_channels > avr->in_channels ||
                             (!avr->downmix_needed && (avr->mix_matrix ||
                                     avr->in_channel_layout != avr->out_channel_layout));
    avr->mixing_needed     = avr->downmix_needed || avr->upmix_needed;

    /* set resampling parameters */
    avr->resample_needed   = avr->in_sample_rate != avr->out_sample_rate ||
                             avr->force_resampling;

    /* select internal sample format if not specified by the user */
    if (avr->internal_sample_fmt == AV_SAMPLE_FMT_NONE &&
            (avr->mixing_needed || avr->resample_needed)) {
        enum AVSampleFormat  in_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
        enum AVSampleFormat out_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
        int max_bps = FFMAX(av_get_bytes_per_sample(in_fmt),
                            av_get_bytes_per_sample(out_fmt));
        if (max_bps <= 2) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_S16P;
        } else if (avr->mixing_needed) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
        } else {
            if (max_bps <= 4) {
                if (in_fmt  == AV_SAMPLE_FMT_S32P ||
                        out_fmt == AV_SAMPLE_FMT_S32P) {
                    if (in_fmt  == AV_SAMPLE_FMT_FLTP ||
                            out_fmt == AV_SAMPLE_FMT_FLTP) {
                        /* if one is s32 and the other is flt, use dbl */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
                    } else {
                        /* if one is s32 and the other is s32, s16, or u8, use s32 */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_S32P;
                    }
                } else {
                    /* if one is flt and the other is flt, s16 or u8, use flt */
                    avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
                }
            } else {
                /* if either is dbl, use dbl */
                avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
            }
        }
        av_log(avr, AV_LOG_DEBUG, "Using %s as internal sample format\n",
               av_get_sample_fmt_name(avr->internal_sample_fmt));
    }

    /* treat all mono as planar for easier comparison */
    if (avr->in_channels == 1)
        avr->in_sample_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
    if (avr->out_channels == 1)
        avr->out_sample_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);

    /* we may need to add an extra conversion in order to remap channels if
       the output format is not planar */
    if (avr->use_channel_map && !avr->mixing_needed && !avr->resample_needed &&
            !av_sample_fmt_is_planar(avr->out_sample_fmt)) {
        avr->internal_sample_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
    }

    /* set sample format conversion parameters */
    if (avr->resample_needed || avr->mixing_needed)
        avr->in_convert_needed = avr->in_sample_fmt != avr->internal_sample_fmt;
    else
        avr->in_convert_needed = avr->use_channel_map &&
                                 !av_sample_fmt_is_planar(avr->out_sample_fmt);

    if (avr->resample_needed || avr->mixing_needed || avr->in_convert_needed)
        avr->out_convert_needed = avr->internal_sample_fmt != avr->out_sample_fmt;
    else
        avr->out_convert_needed = avr->in_sample_fmt != avr->out_sample_fmt;

    avr->in_copy_needed = !avr->in_convert_needed && (avr->mixing_needed ||
                          (avr->use_channel_map && avr->resample_needed));

    if (avr->use_channel_map) {
        if (avr->in_copy_needed) {
            avr->remap_point = REMAP_IN_COPY;
            av_dlog(avr, "remap channels during in_copy\n");
        } else if (avr->in_convert_needed) {
            avr->remap_point = REMAP_IN_CONVERT;
            av_dlog(avr, "remap channels during in_convert\n");
        } else if (avr->out_convert_needed) {
            avr->remap_point = REMAP_OUT_CONVERT;
            av_dlog(avr, "remap channels during out_convert\n");
        } else {
            avr->remap_point = REMAP_OUT_COPY;
            av_dlog(avr, "remap channels during out_copy\n");
        }

#ifdef DEBUG
        {
            int ch;
            av_dlog(avr, "output map: ");
            if (avr->ch_map_info.do_remap)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_map[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "copy map:   ");
            if (avr->ch_map_info.do_copy)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_copy[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "zero map:   ");
            if (avr->ch_map_info.do_zero)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_zero[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "input map:  ");
            for (ch = 0; ch < avr->in_channels; ch++)
                av_dlog(avr, " % 2d", avr->ch_map_info.input_map[ch]);
            av_dlog(avr, "\n");
        }
#endif
    } else
        avr->remap_point = REMAP_NONE;

    /* allocate buffers */
    if (avr->in_copy_needed || avr->in_convert_needed) {
        avr->in_buffer = ff_audio_data_alloc(FFMAX(avr->in_channels, avr->out_channels),
                                             0, avr->internal_sample_fmt,
                                             "in_buffer");
        if (!avr->in_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample_out_buffer = ff_audio_data_alloc(avr->out_channels,
                                   1024, avr->internal_sample_fmt,
                                   "resample_out_buffer");
        if (!avr->resample_out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        avr->out_buffer = ff_audio_data_alloc(avr->out_channels, 0,
                                              avr->out_sample_fmt, "out_buffer");
        if (!avr->out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    avr->out_fifo = av_audio_fifo_alloc(avr->out_sample_fmt, avr->out_channels,
                                        1024);
    if (!avr->out_fifo) {
        ret = AVERROR(ENOMEM);
        goto error;
    }

    /* setup contexts */
    if (avr->in_convert_needed) {
        avr->ac_in = ff_audio_convert_alloc(avr, avr->internal_sample_fmt,
                                            avr->in_sample_fmt, avr->in_channels,
                                            avr->in_sample_rate,
                                            avr->remap_point == REMAP_IN_CONVERT);
        if (!avr->ac_in) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        enum AVSampleFormat src_fmt;
        if (avr->in_convert_needed)
            src_fmt = avr->internal_sample_fmt;
        else
            src_fmt = avr->in_sample_fmt;
        avr->ac_out = ff_audio_convert_alloc(avr, avr->out_sample_fmt, src_fmt,
                                             avr->out_channels,
                                             avr->out_sample_rate,
                                             avr->remap_point == REMAP_OUT_CONVERT);
        if (!avr->ac_out) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample = ff_audio_resample_init(avr);
        if (!avr->resample) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->mixing_needed) {
        avr->am = ff_audio_mix_alloc(avr);
        if (!avr->am) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }

    return 0;

error:
    avresample_close(avr);
    return ret;
}
Пример #12
0
int
ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
                           AVStream *st, RMStream *rst, int codec_data_size, const uint8_t *mime)
{
    unsigned int v;
    int size;
    int64_t codec_pos;
    int ret;

    avpriv_set_pts_info(st, 64, 1, 1000);
    codec_pos = avio_tell(pb);
    v = avio_rb32(pb);
    if (v == MKTAG(0xfd, 'a', 'r', '.')) {
        /* ra type header */
        if (rm_read_audio_stream_info(s, pb, st, rst, 0))
            return -1;
    } else if (v == MKBETAG('L', 'S', 'D', ':')) {
        avio_seek(pb, -4, SEEK_CUR);
        if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0)
            return ret;

        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_tag  = AV_RL32(st->codec->extradata);
        st->codec->codec_id   = ff_codec_get_id(ff_rm_codec_tags,
                                                st->codec->codec_tag);
    } else if(mime && !strcmp(mime, "logical-fileinfo")){
        int stream_count, rule_count, property_count, i;
        ff_free_stream(s, st);
        if (avio_rb16(pb) != 0) {
            av_log(s, AV_LOG_WARNING, "Unsupported version\n");
            goto skip;
        }
        stream_count = avio_rb16(pb);
        avio_skip(pb, 6*stream_count);
        rule_count = avio_rb16(pb);
        avio_skip(pb, 2*rule_count);
        property_count = avio_rb16(pb);
        for(i=0; i<property_count; i++){
            uint8_t name[128], val[128];
            avio_rb32(pb);
            if (avio_rb16(pb) != 0) {
                av_log(s, AV_LOG_WARNING, "Unsupported Name value property version\n");
                goto skip; //FIXME skip just this one
            }
            get_str8(pb, name, sizeof(name));
            switch(avio_rb32(pb)) {
            case 2: get_strl(pb, val, sizeof(val), avio_rb16(pb));
                av_dict_set(&s->metadata, name, val, 0);
                break;
            default: avio_skip(pb, avio_rb16(pb));
            }
        }
    } else {
        int fps;
        if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
        fail1:
            av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v);
            goto skip;
        }
        st->codec->codec_tag = avio_rl32(pb);
        st->codec->codec_id  = ff_codec_get_id(ff_rm_codec_tags,
                                               st->codec->codec_tag);
        av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
        if (st->codec->codec_id == AV_CODEC_ID_NONE)
            goto fail1;
        st->codec->width  = avio_rb16(pb);
        st->codec->height = avio_rb16(pb);
        avio_skip(pb, 2); // looks like bits per sample
        avio_skip(pb, 4); // always zero?
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
        fps = avio_rb32(pb);

        if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
            return ret;

        av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num,
                  0x10000, fps, (1 << 30) - 1);
#if FF_API_R_FRAME_RATE
        st->r_frame_rate = st->avg_frame_rate;
#endif
    }

skip:
    /* skip codec info */
    size = avio_tell(pb) - codec_pos;
    avio_skip(pb, codec_data_size - size);

    return 0;
}
Пример #13
0
/** End a hardware decoding based frame. */
static int end_frame(AVCodecContext *avctx)
{
  H264Context * const h = avctx->priv_data;
  MpegEncContext * const s = &h->s;
  struct xvba_render_state *render;
  XVBAPictureDescriptor *pic_descriptor;
  XVBAQuantMatrixAvc *iq_matrix;

  render = (struct xvba_render_state *)s->current_picture_ptr->data[0];
  assert(render);

  if (render->picture_descriptor == 0 || render->iq_matrix == 0)
    return -1;

  pic_descriptor = render->picture_descriptor;
  iq_matrix = render->iq_matrix;

  av_dlog(avctx, "end_frame()\n");

  /* Fill in Picture Parameters*/
  pic_descriptor->profile                                     = ff_xvba_translate_profile(avctx->profile);
  pic_descriptor->level                                       = avctx->level;
  pic_descriptor->width_in_mb                                 = s->mb_width;
  pic_descriptor->height_in_mb                                = s->mb_height;
  pic_descriptor->picture_structure                           = s->picture_structure;
  pic_descriptor->chroma_format                               = s->chroma_format ? s->chroma_format : 1;
  pic_descriptor->avc_intra_flag                              = (h->slice_type == FF_I_TYPE) ? 1 : 0;
  pic_descriptor->avc_reference                               = (s->current_picture_ptr->reference & 3) ? 1 : 0;

  pic_descriptor->avc_bit_depth_luma_minus8                   = h->sps.bit_depth_luma - 8;
  pic_descriptor->avc_bit_depth_chroma_minus8                 = h->sps.bit_depth_chroma - 8;
  pic_descriptor->avc_log2_max_frame_num_minus4               = h->sps.log2_max_frame_num -4;
  pic_descriptor->avc_pic_order_cnt_type                      = h->sps.poc_type;
  pic_descriptor->avc_log2_max_pic_order_cnt_lsb_minus4       = h->sps.log2_max_poc_lsb - 4;
  pic_descriptor->avc_num_ref_frames                          = h->sps.ref_frame_count;
  pic_descriptor->avc_reserved_8bit                           = 0;

  pic_descriptor->avc_num_slice_groups_minus1                 = h->pps.slice_group_count - 1;
  pic_descriptor->avc_num_ref_idx_l0_active_minus1            = h->pps.ref_count[0] - 1;
  pic_descriptor->avc_num_ref_idx_l1_active_minus1            = h->pps.ref_count[1] - 1;

  pic_descriptor->avc_pic_init_qp_minus26                     = h->pps.init_qp - 26;
  pic_descriptor->avc_pic_init_qs_minus26                     = h->pps.init_qs - 26;
  pic_descriptor->avc_chroma_qp_index_offset                  = h->pps.chroma_qp_index_offset[0];
  pic_descriptor->avc_second_chroma_qp_index_offset           = h->pps.chroma_qp_index_offset[1];
  pic_descriptor->avc_slice_group_change_rate_minus1          = 0; // not implemented in ffmpeg
  pic_descriptor->avc_reserved_16bit                          = 0; // must be 0
  memset(pic_descriptor->avc_field_order_cnt_list,0,sizeof(pic_descriptor->avc_field_order_cnt_list)); // must be 0
  memset(pic_descriptor->avc_slice_group_map,0,sizeof(pic_descriptor->avc_slice_group_map)); // must be 0

  // sps
  pic_descriptor->sps_info.avc.delta_pic_always_zero_flag     = h->sps.delta_pic_order_always_zero_flag;
  pic_descriptor->sps_info.avc.direct_8x8_inference_flag      = h->sps.direct_8x8_inference_flag;
  pic_descriptor->sps_info.avc.frame_mbs_only_flag            = h->sps.frame_mbs_only_flag;
  pic_descriptor->sps_info.avc.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
  pic_descriptor->sps_info.avc.mb_adaptive_frame_field_flag   = h->sps.mb_aff;
  pic_descriptor->sps_info.avc.residual_colour_transform_flag = h->sps.residual_color_transform_flag;
  pic_descriptor->sps_info.avc.xvba_avc_sps_reserved          = 0;

  // pps
  pic_descriptor->pps_info.avc.entropy_coding_mode_flag       = h->pps.cabac;
  pic_descriptor->pps_info.avc.pic_order_present_flag         = h->pps.pic_order_present;
  pic_descriptor->pps_info.avc.weighted_pred_flag             = h->pps.weighted_pred;
  pic_descriptor->pps_info.avc.weighted_bipred_idc            = h->pps.weighted_bipred_idc;
  pic_descriptor->pps_info.avc.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
  pic_descriptor->pps_info.avc.constrained_intra_pred_flag    = h->pps.constrained_intra_pred;
  pic_descriptor->pps_info.avc.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
  pic_descriptor->pps_info.avc.transform_8x8_mode_flag        = h->pps.transform_8x8_mode;
  pic_descriptor->pps_info.avc.xvba_avc_pps_reserved          = 0; // must be 0

  memcpy(iq_matrix->bScalingLists4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->bScalingLists4x4));
  memcpy(iq_matrix->bScalingLists8x8, h->pps.scaling_matrix8, sizeof(iq_matrix->bScalingLists8x8));

  // Wait for an I-frame before start decoding. Workaround for ATI UVD and UVD+ GPUs
  if (!h->got_first_iframe) {
      if (h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE)
          return -1;
      h->got_first_iframe = 1;
  }

  ff_draw_horiz_band(s, 0, s->avctx->height);

  return 0;
}
Пример #14
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char tag[5];
    unsigned int flags=0;
    const int stream_index= pkt->stream_index;
    AVIStream *avist= s->streams[stream_index]->priv_data;
    AVCodecContext *enc= s->streams[stream_index]->codec;
    int size= pkt->size;

    av_dlog(s, "dts:%s packet_count:%d stream_index:%d\n", av_ts2str(pkt->dts), avist->packet_count, stream_index);
    while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count && enc->codec_id != AV_CODEC_ID_XSUB && avist->packet_count){
        AVPacket empty_packet;

        if(pkt->dts - avist->packet_count > 60000){
            av_log(s, AV_LOG_ERROR, "Too large number of skipped frames %"PRId64" > 60000\n", pkt->dts - avist->packet_count);
            return AVERROR(EINVAL);
        }

        av_init_packet(&empty_packet);
        empty_packet.size= 0;
        empty_packet.data= NULL;
        empty_packet.stream_index= stream_index;
        avi_write_packet(s, &empty_packet);
        av_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(pkt->dts), avist->packet_count);
    }
    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {

        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if(pkt->flags&AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
       avist->audio_strm_length += size;
    }

    if (s->pb->seekable) {
        AVIIndex* idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc_f(idx->cluster, sizeof(void*), cl+1);
            if (!idx->cluster)
                return AVERROR(ENOMEM);
            idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return AVERROR(ENOMEM);
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len = size;
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    return 0;
}
Пример #15
0
static int gif_read_image(GifState *s, AVFrame *frame)
{
    int left, top, width, height, bits_per_pixel, code_size, flags;
    int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
    uint32_t *ptr, *pal, *px, *pr, *ptr1;
    int ret;
    uint8_t *idx;

    /* At least 9 bytes of Image Descriptor. */
    if (bytestream2_get_bytes_left(&s->gb) < 9)
        return AVERROR_INVALIDDATA;

    left   = bytestream2_get_le16u(&s->gb);
    top    = bytestream2_get_le16u(&s->gb);
    width  = bytestream2_get_le16u(&s->gb);
    height = bytestream2_get_le16u(&s->gb);
    flags  = bytestream2_get_byteu(&s->gb);
    is_interleaved = flags & 0x40;
    has_local_palette = flags & 0x80;
    bits_per_pixel = (flags & 0x07) + 1;

    av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);

    if (has_local_palette) {
        pal_size = 1 << bits_per_pixel;

        if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(s, s->local_palette, pal_size);
        pal = s->local_palette;
    } else {
        if (!s->has_global_palette) {
            av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
            return AVERROR_INVALIDDATA;
        }

        pal = s->global_palette;
    }

    if (s->keyframe) {
        if (s->transparent_color_index == -1 && s->has_global_palette) {
            /* transparency wasn't set before the first frame, fill with background color */
            gif_fill(frame, s->bg_color);
        } else {
            /* otherwise fill with transparent color.
             * this is necessary since by default picture filled with 0x80808080. */
            gif_fill(frame, s->trans_color);
        }
    }

    /* verify that all the image is inside the screen dimensions */
    if (left + width > s->screen_width ||
        top + height > s->screen_height) {
        av_log(s->avctx, AV_LOG_ERROR, "image is outside the screen dimensions.\n");
        return AVERROR_INVALIDDATA;
    }
    if (width <= 0 || height <= 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid image dimensions.\n");
        return AVERROR_INVALIDDATA;
    }

    /* process disposal method */
    if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
        gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
        gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
            frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    }

    s->gce_prev_disposal = s->gce_disposal;

    if (s->gce_disposal != GCE_DISPOSAL_NONE) {
        s->gce_l = left;  s->gce_t = top;
        s->gce_w = width; s->gce_h = height;

        if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
            if (s->transparent_color_index >= 0)
                s->stored_bg_color = s->trans_color;
            else
                s->stored_bg_color = s->bg_color;
        } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
            av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
            if (!s->stored_img)
                return AVERROR(ENOMEM);

            gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
                frame->linesize[0] / sizeof(uint32_t), left, top, width, height);
        }
    }

    /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
    if (bytestream2_get_bytes_left(&s->gb) < 2)
        return AVERROR_INVALIDDATA;

    /* now get the image data */
    code_size = bytestream2_get_byteu(&s->gb);
    if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer,
                                  bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
        return ret;
    }

    /* read all the image */
    linesize = frame->linesize[0] / sizeof(uint32_t);
    ptr1 = (uint32_t *)frame->data[0] + top * linesize + left;
    ptr = ptr1;
    pass = 0;
    y1 = 0;
    for (y = 0; y < height; y++) {
        if (ff_lzw_decode(s->lzw, s->idx_line, width) == 0)
            goto decode_tail;

        pr = ptr + width;

        for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
            if (*idx != s->transparent_color_index)
                *px = pal[*idx];
        }

        if (is_interleaved) {
            switch(pass) {
            default:
            case 0:
            case 1:
                y1 += 8;
                ptr += linesize * 8;
                if (y1 >= height) {
                    y1 = pass ? 2 : 4;
                    ptr = ptr1 + linesize * y1;
                    pass++;
                }
                break;
            case 2:
                y1 += 4;
                ptr += linesize * 4;
                if (y1 >= height) {
                    y1 = 1;
                    ptr = ptr1 + linesize;
                    pass++;
                }
                break;
            case 3:
                y1 += 2;
                ptr += linesize * 2;
                break;
            }
        } else {
            ptr += linesize;
        }
    }

 decode_tail:
    /* read the garbage data until end marker is found */
    ff_lzw_decode_tail(s->lzw);

    /* Graphic Control Extension's scope is single frame.
     * Remove its influence. */
    s->transparent_color_index = -1;
    s->gce_disposal = GCE_DISPOSAL_NONE;

    return 0;
}
Пример #16
0
static int init_pass2(MpegEncContext *s)
{
    RateControlContext *rcc = &s->rc_context;
    AVCodecContext *a       = s->avctx;
    int i, toobig;
    double fps             = 1 / av_q2d(s->avctx->time_base);
    double complexity[5]   = { 0 }; // approximate bits at quant=1
    uint64_t const_bits[5] = { 0 }; // quantizer independent bits
    uint64_t all_const_bits;
    uint64_t all_available_bits = (uint64_t)(s->bit_rate *
                                             (double)rcc->num_entries / fps);
    double rate_factor          = 0;
    double step;
    const int filter_size = (int)(a->qblur * 4) | 1;
    double expected_bits;
    double *qscale, *blurred_qscale, qscale_sum;

    /* find complexity & const_bits & decide the pict_types */
    for (i = 0; i < rcc->num_entries; i++) {
        RateControlEntry *rce = &rcc->entry[i];

        rce->new_pict_type                = rce->pict_type;
        rcc->i_cplx_sum[rce->pict_type]  += rce->i_tex_bits * rce->qscale;
        rcc->p_cplx_sum[rce->pict_type]  += rce->p_tex_bits * rce->qscale;
        rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits;
        rcc->frame_count[rce->pict_type]++;

        complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) *
                                          (double)rce->qscale;
        const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits;
    }

    all_const_bits = const_bits[AV_PICTURE_TYPE_I] +
                     const_bits[AV_PICTURE_TYPE_P] +
                     const_bits[AV_PICTURE_TYPE_B];

    if (all_available_bits < all_const_bits) {
        av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
        return -1;
    }

    qscale         = av_malloc(sizeof(double) * rcc->num_entries);
    blurred_qscale = av_malloc(sizeof(double) * rcc->num_entries);
    toobig = 0;

    for (step = 256 * 256; step > 0.0000001; step *= 0.5) {
        expected_bits = 0;
        rate_factor  += step;

        rcc->buffer_index = s->avctx->rc_buffer_size / 2;

        /* find qscale */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
            rcc->last_qscale_for[rce->pict_type] = qscale[i];
        }
        assert(filter_size % 2 == 1);

        /* fixed I/B QP relative to P mode */
        for (i = rcc->num_entries - 1; i >= 0; i--) {
            RateControlEntry *rce = &rcc->entry[i];

            qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
        }

        /* smooth curve */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            const int pict_type   = rce->new_pict_type;
            int j;
            double q = 0.0, sum = 0.0;

            for (j = 0; j < filter_size; j++) {
                int index    = i + j - filter_size / 2;
                double d     = index - i;
                double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur));

                if (index < 0 || index >= rcc->num_entries)
                    continue;
                if (pict_type != rcc->entry[index].new_pict_type)
                    continue;
                q   += qscale[index] * coeff;
                sum += coeff;
            }
            blurred_qscale[i] = q / sum;
        }

        /* find expected bits */
        for (i = 0; i < rcc->num_entries; i++) {
            RateControlEntry *rce = &rcc->entry[i];
            double bits;

            rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);

            bits  = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
            bits += 8 * ff_vbv_update(s, bits);

            rce->expected_bits = expected_bits;
            expected_bits     += bits;
        }

        av_dlog(s->avctx,
                "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
                expected_bits, (int)all_available_bits, rate_factor);
        if (expected_bits > all_available_bits) {
            rate_factor -= step;
            ++toobig;
        }
    }
    av_free(qscale);
    av_free(blurred_qscale);

    /* check bitrate calculations and print info */
    qscale_sum = 0.0;
    for (i = 0; i < rcc->num_entries; i++) {
        av_dlog(s, "[lavc rc] entry[%d].new_qscale = %.3f  qp = %.3f\n",
                i,
                rcc->entry[i].new_qscale,
                rcc->entry[i].new_qscale / FF_QP2LAMBDA);
        qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
                              s->avctx->qmin, s->avctx->qmax);
    }
    assert(toobig <= 40);
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] requested bitrate: %d bps  expected bitrate: %d bps\n",
           s->bit_rate,
           (int)(expected_bits / ((double)all_available_bits / s->bit_rate)));
    av_log(s->avctx, AV_LOG_DEBUG,
           "[lavc rc] estimated target average qp: %.3f\n",
           (float)qscale_sum / rcc->num_entries);
    if (toobig == 0) {
        av_log(s->avctx, AV_LOG_INFO,
               "[lavc rc] Using all of requested bitrate is not "
               "necessary for this video with these parameters.\n");
    } else if (toobig == 40) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: bitrate too low for this video "
               "with these parameters.\n");
        return -1;
    } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
        av_log(s->avctx, AV_LOG_ERROR,
               "[lavc rc] Error: 2pass curve failed to converge\n");
        return -1;
    }

    return 0;
}
Пример #17
0
static int h261_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    H261Context *h     = avctx->priv_data;
    MpegEncContext *s  = &h->s;
    int ret;
    AVFrame *pict = data;

    av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
    av_dlog(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
    s->flags  = avctx->flags;
    s->flags2 = avctx->flags2;

    h->gob_start_code_skipped = 0;

retry:
    init_get_bits(&s->gb, buf, buf_size * 8);

    if (!s->context_initialized)
        // we need the IDCT permutaton for reading a custom matrix
        if (ff_MPV_common_init(s) < 0)
            return -1;

    /* We need to set current_picture_ptr before reading the header,
     * otherwise we cannot store anything in there. */
    if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
        int i = ff_find_unused_picture(s, 0);
        if (i < 0)
            return i;
        s->current_picture_ptr = &s->picture[i];
    }

    ret = h261_decode_picture_header(h);

    /* skip if the header was thrashed */
    if (ret < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
        return -1;
    }

    if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
        ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat
        s->parse_context.buffer = 0;
        ff_MPV_common_end(s);
        s->parse_context = pc;
    }
    if (!s->context_initialized) {
        avcodec_set_dimensions(avctx, s->width, s->height);

        goto retry;
    }

    // for skipping the frame
    s->current_picture.f.pict_type = s->pict_type;
    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;

    if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
        (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
         avctx->skip_frame >= AVDISCARD_ALL)
        return get_consumed_bytes(s, buf_size);

    if (ff_MPV_frame_start(s, avctx) < 0)
        return -1;

    ff_mpeg_er_frame_start(s);

    /* decode each macroblock */
    s->mb_x = 0;
    s->mb_y = 0;

    while (h->gob_number < (s->mb_height == 18 ? 12 : 5)) {
        if (ff_h261_resync(h) < 0)
            break;
        h261_decode_gob(h);
    }
    ff_MPV_frame_end(s);

    av_assert0(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
    av_assert0(s->current_picture.f.pict_type == s->pict_type);

    if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
        return ret;
    ff_print_debug_info(s, s->current_picture_ptr, pict);

    *got_frame = 1;

    return get_consumed_bytes(s, buf_size);
}
Пример #18
0
static int gif_read_image(GifState *s, AVFrame *frame)
{
    int left, top, width, height, bits_per_pixel, code_size, flags, pw;
    int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
    uint32_t *ptr, *pal, *px, *pr, *ptr1;
    int ret;
    uint8_t *idx;

    /* At least 9 bytes of Image Descriptor. */
    if (bytestream2_get_bytes_left(&s->gb) < 9)
        return AVERROR_INVALIDDATA;

    left   = bytestream2_get_le16u(&s->gb);
    top    = bytestream2_get_le16u(&s->gb);
    width  = bytestream2_get_le16u(&s->gb);
    height = bytestream2_get_le16u(&s->gb);
    flags  = bytestream2_get_byteu(&s->gb);
    is_interleaved = flags & 0x40;
    has_local_palette = flags & 0x80;
    bits_per_pixel = (flags & 0x07) + 1;

    av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);

    if (has_local_palette) {
        pal_size = 1 << bits_per_pixel;

        if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(s, s->local_palette, pal_size);
        pal = s->local_palette;
    } else {
        if (!s->has_global_palette) {
            av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
            return AVERROR_INVALIDDATA;
        }

        pal = s->global_palette;
    }

    if (s->keyframe) {
        if (s->transparent_color_index == -1 && s->has_global_palette) {
            /* transparency wasn't set before the first frame, fill with background color */
            gif_fill(frame, s->bg_color);
        } else {
            /* otherwise fill with transparent color.
             * this is necessary since by default picture filled with 0x80808080. */
            gif_fill(frame, s->trans_color);
        }
    }

    /* verify that all the image is inside the screen dimensions */
    if (!width || width > s->screen_width || left >= s->screen_width) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid image width.\n");
        return AVERROR_INVALIDDATA;
    }
    if (!height || height > s->screen_height || top >= s->screen_height) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid image height.\n");
        return AVERROR_INVALIDDATA;
    }
    if (left + width > s->screen_width) {
        /* width must be kept around to avoid lzw vs line desync */
        pw = s->screen_width - left;
        av_log(s->avctx, AV_LOG_WARNING, "Image too wide by %d, truncating.\n",
               left + width - s->screen_width);
    } else {
        pw = width;
    }
    if (top + height > s->screen_height) {
        /* we don't care about the extra invisible lines */
        av_log(s->avctx, AV_LOG_WARNING, "Image too high by %d, truncating.\n",
               top + height - s->screen_height);
        height = s->screen_height - top;
    }

    /* process disposal method */
    if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
        gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
        gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
            frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    }

    s->gce_prev_disposal = s->gce_disposal;

    if (s->gce_disposal != GCE_DISPOSAL_NONE) {
        s->gce_l = left;  s->gce_t = top;
        s->gce_w = pw;    s->gce_h = height;

        if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
            if (s->transparent_color_index >= 0)
                s->stored_bg_color = s->trans_color;
            else
                s->stored_bg_color = s->bg_color;
        } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
            av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
            if (!s->stored_img)
                return AVERROR(ENOMEM);

            gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
                frame->linesize[0] / sizeof(uint32_t), left, top, pw, height);
        }
    }

    /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
    if (bytestream2_get_bytes_left(&s->gb) < 2)
        return AVERROR_INVALIDDATA;

    /* now get the image data */
    code_size = bytestream2_get_byteu(&s->gb);
    if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer,
                                  bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
        return ret;
    }

    /* read all the image */
    linesize = frame->linesize[0] / sizeof(uint32_t);
    ptr1 = (uint32_t *)frame->data[0] + top * linesize + left;
    ptr = ptr1;
    pass = 0;
    y1 = 0;
    for (y = 0; y < height; y++) {
        int count = ff_lzw_decode(s->lzw, s->idx_line, width);
        if (count != width) {
            if (count)
                av_log(s->avctx, AV_LOG_ERROR, "LZW decode failed\n");
            goto decode_tail;
        }

        pr = ptr + pw;

        for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
            if (*idx != s->transparent_color_index)
                *px = pal[*idx];
        }

        if (is_interleaved) {
            switch(pass) {
            default:
            case 0:
            case 1:
                y1 += 8;
                ptr += linesize * 8;
                break;
            case 2:
                y1 += 4;
                ptr += linesize * 4;
                break;
            case 3:
                y1 += 2;
                ptr += linesize * 2;
                break;
            }
            while (y1 >= height) {
                y1 = 4 >> pass;
                ptr = ptr1 + linesize * y1;
                pass++;
            }
        } else {
            ptr += linesize;
        }
    }
Пример #19
0
/**
 * Parse the presentation segment packet.
 *
 * The presentation segment contains details on the video
 * width, video height, x & y subtitle position.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Implement cropping
 * @todo TODO: Implement forcing of subtitles
 */
static int parse_presentation_segment(AVCodecContext *avctx,
                                      const uint8_t *buf, int buf_size,
                                      int64_t pts)
{
    PGSSubContext *ctx = avctx->priv_data;

    int x, y, ret;

    int w = bytestream_get_be16(&buf);
    int h = bytestream_get_be16(&buf);

    ctx->presentation.pts = pts;

    av_dlog(avctx, "Video Dimensions %dx%d\n",
            w, h);
    ret = ff_set_dimensions(avctx, w, h);
    if (ret < 0)
        return ret;

    /* Skip 1 bytes of unknown, frame rate? */
    buf++;

    ctx->presentation.id_number = bytestream_get_be16(&buf);

    /*
     * Skip 3 bytes of unknown:
     *     state
     *     palette_update_flag (0x80),
     *     palette_id_to_use,
     */
    buf += 3;

    ctx->presentation.object_number = bytestream_get_byte(&buf);
    ctx->presentation.composition_flag = 0;
    if (!ctx->presentation.object_number)
        return 0;

    /*
     * Skip 3 bytes of unknown:
     *     object_id_ref (2 bytes),
     *     window_id_ref,
     */
    buf += 3;
    ctx->presentation.composition_flag = bytestream_get_byte(&buf);

    x = bytestream_get_be16(&buf);
    y = bytestream_get_be16(&buf);

    /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/

    av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y);

    if (x > avctx->width || y > avctx->height) {
        av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
               x, y, avctx->width, avctx->height);
        x = 0; y = 0;
    }

    /* Fill in dimensions */
    ctx->presentation.x = x;
    ctx->presentation.y = y;

    return 0;
}
Пример #20
0
/**
 * Decode LSE block with initialization parameters
 */
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
{
    int id;
    int tid, wt, maxtab, i, j;

    int len = get_bits(&s->gb, 16);
    id = get_bits(&s->gb, 8);

    switch (id) {
    case 1:
        if (len < 13)
            return AVERROR_INVALIDDATA;

        s->maxval = get_bits(&s->gb, 16);
        s->t1     = get_bits(&s->gb, 16);
        s->t2     = get_bits(&s->gb, 16);
        s->t3     = get_bits(&s->gb, 16);
        s->reset  = get_bits(&s->gb, 16);

        if(s->avctx->debug & FF_DEBUG_PICT_INFO) {
            av_log(s->avctx, AV_LOG_DEBUG, "Coding parameters maxval:%d T1:%d T2:%d T3:%d reset:%d\n",
                   s->maxval, s->t1, s->t2, s->t3, s->reset);
        }

//        ff_jpegls_reset_coding_parameters(s, 0);
        //FIXME quant table?
        break;
    case 2:
        s->palette_index = 0;
    case 3:
        tid= get_bits(&s->gb, 8);
        wt = get_bits(&s->gb, 8);

        if (len < 5)
            return AVERROR_INVALIDDATA;

        if (wt < 1 || wt > MAX_COMPONENTS) {
            avpriv_request_sample(s->avctx, "wt %d", wt);
            return AVERROR_PATCHWELCOME;
        }

        if (!s->maxval)
            maxtab = 255;
        else if ((5 + wt*(s->maxval+1)) < 65535)
            maxtab = s->maxval;
        else
            maxtab = 65530/wt - 1;

        if(s->avctx->debug & FF_DEBUG_PICT_INFO) {
            av_log(s->avctx, AV_LOG_DEBUG, "LSE palette %d tid:%d wt:%d maxtab:%d\n", id, tid, wt, maxtab);
        }
        if (maxtab >= 256) {
            avpriv_request_sample(s->avctx, ">8bit palette");
            return AVERROR_PATCHWELCOME;
        }
        maxtab = FFMIN(maxtab, (len - 5) / wt + s->palette_index);

        if (s->palette_index > maxtab)
            return AVERROR_INVALIDDATA;

        if ((s->avctx->pix_fmt == AV_PIX_FMT_GRAY8 || s->avctx->pix_fmt == AV_PIX_FMT_PAL8) &&
            (s->picture_ptr->format == AV_PIX_FMT_GRAY8 || s->picture_ptr->format == AV_PIX_FMT_PAL8)) {
            uint32_t *pal = s->picture_ptr->data[1];
            s->picture_ptr->format =
            s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
            for (i=s->palette_index; i<=maxtab; i++) {
                pal[i] = 0;
                for (j=0; j<wt; j++) {
                    pal[i] |= get_bits(&s->gb, 8) << (8*(wt-j-1));
                }
            }
            s->palette_index = i;
        }
        break;
    case 4:
        avpriv_request_sample(s->avctx, "oversize image");
        return AVERROR(ENOSYS);
    default:
        av_log(s->avctx, AV_LOG_ERROR, "invalid id %d\n", id);
        return AVERROR_INVALIDDATA;
    }
    av_dlog(s->avctx, "ID=%i, T=%i,%i,%i\n", id, s->t1, s->t2, s->t3);

    return 0;
}
Пример #21
0
static int pcm_bluray_decode_frame(AVCodecContext *avctx,
                                   void *data,
                                   int *data_size,
                                   AVPacket *avpkt)
{
	const uint8_t *src = avpkt->data;
	int buf_size = avpkt->size;
	int num_source_channels, channel, retval;
	int sample_size, samples, output_size;
	int16_t *dst16 = data;
	int32_t *dst32 = data;

	if (buf_size < 4)
	{
		av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n");
		return -1;
	}

	if (pcm_bluray_parse_header(avctx, src))
		return -1;
	src += 4;
	buf_size -= 4;

	/* There's always an even number of channels in the source */
	num_source_channels = FFALIGN(avctx->channels, 2);
	sample_size = (num_source_channels * avctx->bits_per_coded_sample) >> 3;
	samples = buf_size / sample_size;

	output_size = samples * avctx->channels *
	              (avctx->sample_fmt == AV_SAMPLE_FMT_S32 ? 4 : 2);
	if (output_size > *data_size)
	{
		av_log(avctx, AV_LOG_ERROR,
		       "Insufficient output buffer space (%d bytes, needed %d bytes)\n",
		       *data_size, output_size);
		return -1;
	}
	*data_size = output_size;

	if (samples)
	{
		switch (avctx->channel_layout)
		{
			/* cases with same number of source and coded channels */
		case AV_CH_LAYOUT_STEREO:
		case AV_CH_LAYOUT_4POINT0:
		case AV_CH_LAYOUT_2_2:
			samples *= num_source_channels;
			if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt)
			{
#if HAVE_BIGENDIAN
				memcpy(dst16, src, output_size);
#else
				do
				{
					*dst16++ = bytestream_get_be16(&src);
				}
				while (--samples);
#endif
			}
			else
			{
				do
				{
					*dst32++ = bytestream_get_be24(&src) << 8;
				}
				while (--samples);
			}
			break;
			/* cases where number of source channels = coded channels + 1 */
		case AV_CH_LAYOUT_MONO:
		case AV_CH_LAYOUT_SURROUND:
		case AV_CH_LAYOUT_2_1:
		case AV_CH_LAYOUT_5POINT0:
			if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt)
			{
				do
				{
#if HAVE_BIGENDIAN
					memcpy(dst16, src, avctx->channels * 2);
					dst16 += avctx->channels;
					src += sample_size;
#else
					channel = avctx->channels;
					do
					{
						*dst16++ = bytestream_get_be16(&src);
					}
					while (--channel);
					src += 2;
#endif
				}
				while (--samples);
			}
			else
			{
				do
				{
					channel = avctx->channels;
					do
					{
						*dst32++ = bytestream_get_be24(&src) << 8;
					}
					while (--channel);
					src += 3;
				}
				while (--samples);
			}
			break;
			/* remapping: L, R, C, LBack, RBack, LF */
		case AV_CH_LAYOUT_5POINT1:
			if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt)
			{
				do
				{
					dst16[0] = bytestream_get_be16(&src);
					dst16[1] = bytestream_get_be16(&src);
					dst16[2] = bytestream_get_be16(&src);
					dst16[4] = bytestream_get_be16(&src);
					dst16[5] = bytestream_get_be16(&src);
					dst16[3] = bytestream_get_be16(&src);
					dst16 += 6;
				}
				while (--samples);
			}
			else
			{
				do
				{
					dst32[0] = bytestream_get_be24(&src) << 8;
					dst32[1] = bytestream_get_be24(&src) << 8;
					dst32[2] = bytestream_get_be24(&src) << 8;
					dst32[4] = bytestream_get_be24(&src) << 8;
					dst32[5] = bytestream_get_be24(&src) << 8;
					dst32[3] = bytestream_get_be24(&src) << 8;
					dst32 += 6;
				}
				while (--samples);
			}
			break;
			/* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */
		case AV_CH_LAYOUT_7POINT0:
			if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt)
			{
				do
				{
					dst16[0] = bytestream_get_be16(&src);
					dst16[1] = bytestream_get_be16(&src);
					dst16[2] = bytestream_get_be16(&src);
					dst16[5] = bytestream_get_be16(&src);
					dst16[3] = bytestream_get_be16(&src);
					dst16[4] = bytestream_get_be16(&src);
					dst16[6] = bytestream_get_be16(&src);
					dst16 += 7;
					src += 2;
				}
				while (--samples);
			}
			else
			{
				do
				{
					dst32[0] = bytestream_get_be24(&src) << 8;
					dst32[1] = bytestream_get_be24(&src) << 8;
					dst32[2] = bytestream_get_be24(&src) << 8;
					dst32[5] = bytestream_get_be24(&src) << 8;
					dst32[3] = bytestream_get_be24(&src) << 8;
					dst32[4] = bytestream_get_be24(&src) << 8;
					dst32[6] = bytestream_get_be24(&src) << 8;
					dst32 += 7;
					src += 3;
				}
				while (--samples);
			}
			break;
			/* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */
		case AV_CH_LAYOUT_7POINT1:
			if (AV_SAMPLE_FMT_S16 == avctx->sample_fmt)
			{
				do
				{
					dst16[0] = bytestream_get_be16(&src);
					dst16[1] = bytestream_get_be16(&src);
					dst16[2] = bytestream_get_be16(&src);
					dst16[6] = bytestream_get_be16(&src);
					dst16[4] = bytestream_get_be16(&src);
					dst16[5] = bytestream_get_be16(&src);
					dst16[7] = bytestream_get_be16(&src);
					dst16[3] = bytestream_get_be16(&src);
					dst16 += 8;
				}
				while (--samples);
			}
			else
			{
				do
				{
					dst32[0] = bytestream_get_be24(&src) << 8;
					dst32[1] = bytestream_get_be24(&src) << 8;
					dst32[2] = bytestream_get_be24(&src) << 8;
					dst32[6] = bytestream_get_be24(&src) << 8;
					dst32[4] = bytestream_get_be24(&src) << 8;
					dst32[5] = bytestream_get_be24(&src) << 8;
					dst32[7] = bytestream_get_be24(&src) << 8;
					dst32[3] = bytestream_get_be24(&src) << 8;
					dst32 += 8;
				}
				while (--samples);
			}
			break;
		}
	}

	retval = src - avpkt->data;
	if (avctx->debug & FF_DEBUG_BITSTREAM)
		av_dlog(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n",
		        retval, *data_size);
	return retval;
}
Пример #22
0
static av_always_inline void decode_line(FFV1Context *s, int w,
                                         int16_t *sample[2],
                                         int plane_index, int bits)
{
    PlaneContext *const p = &s->plane[plane_index];
    RangeCoder *const c   = &s->c;
    int x;
    int run_count = 0;
    int run_mode  = 0;
    int run_index = s->run_index;

    if (s->slice_coding_mode == 1) {
        int i;
        for (x = 0; x < w; x++) {
            int v = 0;
            for (i=0; i<bits; i++) {
                uint8_t state = 128;
                v += v + get_rac(c, &state);
            }
            sample[1][x] = v;
        }
        return;
    }

    for (x = 0; x < w; x++) {
        int diff, context, sign;

        context = get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
        if (context < 0) {
            context = -context;
            sign    = 1;
        } else
            sign = 0;

        av_assert2(context < p->context_count);

        if (s->ac) {
            diff = get_symbol_inline(c, p->state[context], 1);
        } else {
            if (context == 0 && run_mode == 0)
                run_mode = 1;

            if (run_mode) {
                if (run_count == 0 && run_mode == 1) {
                    if (get_bits1(&s->gb)) {
                        run_count = 1 << ff_log2_run[run_index];
                        if (x + run_count <= w)
                            run_index++;
                    } else {
                        if (ff_log2_run[run_index])
                            run_count = get_bits(&s->gb, ff_log2_run[run_index]);
                        else
                            run_count = 0;
                        if (run_index)
                            run_index--;
                        run_mode = 2;
                    }
                }
                run_count--;
                if (run_count < 0) {
                    run_mode  = 0;
                    run_count = 0;
                    diff      = get_vlc_symbol(&s->gb, &p->vlc_state[context],
                                               bits);
                    if (diff >= 0)
                        diff++;
                } else
                    diff = 0;
            } else
                diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);

            av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
                    run_count, run_index, run_mode, x, get_bits_count(&s->gb));
        }

        if (sign)
            diff = -diff;

        sample[1][x] = (predict(sample[1] + x, sample[0] + x) + diff) &
                       ((1 << bits) - 1);
    }
    s->run_index = run_index;
}
Пример #23
0
static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
    VC1Context * const v = avctx->priv_data;
    MpegEncContext * const s = &v->s;
    struct vaapi_context * const vactx = avctx->hwaccel_context;
    VAPictureParameterBufferVC1 *pic_param;

    av_dlog(avctx, "vaapi_vc1_start_frame()\n");

    vactx->slice_param_size = sizeof(VASliceParameterBufferVC1);

    /* Fill in VAPictureParameterBufferVC1 */
    pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferVC1));
    if (!pic_param)
        return -1;
    pic_param->forward_reference_picture                            = VA_INVALID_ID;
    pic_param->backward_reference_picture                           = VA_INVALID_ID;
    pic_param->inloop_decoded_picture                               = VA_INVALID_ID;
    pic_param->sequence_fields.value                                = 0; /* reset all bits */
    pic_param->sequence_fields.bits.pulldown                        = v->broadcast;
    pic_param->sequence_fields.bits.interlace                       = v->interlace;
    pic_param->sequence_fields.bits.tfcntrflag                      = v->tfcntrflag;
    pic_param->sequence_fields.bits.finterpflag                     = v->finterpflag;
    pic_param->sequence_fields.bits.psf                             = v->psf;
    pic_param->sequence_fields.bits.multires                        = v->multires;
    pic_param->sequence_fields.bits.overlap                         = v->overlap;
    pic_param->sequence_fields.bits.syncmarker                      = v->resync_marker;
    pic_param->sequence_fields.bits.rangered                        = v->rangered;
    pic_param->sequence_fields.bits.max_b_frames                    = s->avctx->max_b_frames;
#if VA_CHECK_VERSION(0,32,0)
    pic_param->sequence_fields.bits.profile                         = v->profile;
#endif
    pic_param->coded_width                                          = s->avctx->coded_width;
    pic_param->coded_height                                         = s->avctx->coded_height;
    pic_param->entrypoint_fields.value                              = 0; /* reset all bits */
    pic_param->entrypoint_fields.bits.broken_link                   = v->broken_link;
    pic_param->entrypoint_fields.bits.closed_entry                  = v->closed_entry;
    pic_param->entrypoint_fields.bits.panscan_flag                  = v->panscanflag;
    pic_param->entrypoint_fields.bits.loopfilter                    = s->loop_filter;
    pic_param->conditional_overlap_flag                             = v->condover;
    pic_param->fast_uvmc_flag                                       = v->fastuvmc;
    pic_param->range_mapping_fields.value                           = 0; /* reset all bits */
    pic_param->range_mapping_fields.bits.luma_flag                  = v->range_mapy_flag;
    pic_param->range_mapping_fields.bits.luma                       = v->range_mapy;
    pic_param->range_mapping_fields.bits.chroma_flag                = v->range_mapuv_flag;
    pic_param->range_mapping_fields.bits.chroma                     = v->range_mapuv;
    pic_param->b_picture_fraction                                   = v->bfraction_lut_index;
    pic_param->cbp_table                                            = v->cbpcy_vlc ? v->cbpcy_vlc - ff_vc1_cbpcy_p_vlc : 0;
    pic_param->mb_mode_table                                        = 0; /* XXX: interlaced frame */
    pic_param->range_reduction_frame                                = v->rangeredfrm;
    pic_param->rounding_control                                     = v->rnd;
    pic_param->post_processing                                      = v->postproc;
    pic_param->picture_resolution_index                             = v->respic;
    pic_param->luma_scale                                           = v->lumscale;
    pic_param->luma_shift                                           = v->lumshift;
    pic_param->picture_fields.value                                 = 0; /* reset all bits */
    pic_param->picture_fields.bits.picture_type                     = vc1_get_PTYPE(v);
    pic_param->picture_fields.bits.frame_coding_mode                = v->fcm;
    pic_param->picture_fields.bits.top_field_first                  = v->tff;
    pic_param->picture_fields.bits.is_first_field                   = v->fcm == 0; /* XXX: interlaced frame */
    pic_param->picture_fields.bits.intensity_compensation           = v->mv_mode == MV_PMODE_INTENSITY_COMP;
    pic_param->raw_coding.value                                     = 0; /* reset all bits */
    pic_param->raw_coding.flags.mv_type_mb                          = v->mv_type_is_raw;
    pic_param->raw_coding.flags.direct_mb                           = v->dmb_is_raw;
    pic_param->raw_coding.flags.skip_mb                             = v->skip_is_raw;
    pic_param->raw_coding.flags.field_tx                            = 0; /* XXX: interlaced frame */
    pic_param->raw_coding.flags.forward_mb                          = 0; /* XXX: interlaced frame */
    pic_param->raw_coding.flags.ac_pred                             = v->acpred_is_raw;
    pic_param->raw_coding.flags.overflags                           = v->overflg_is_raw;
    pic_param->bitplane_present.value                               = 0; /* reset all bits */
    pic_param->bitplane_present.flags.bp_mv_type_mb                 = vc1_has_MVTYPEMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_direct_mb                  = vc1_has_DIRECTMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_skip_mb                    = vc1_has_SKIPMB_bitplane(v);
    pic_param->bitplane_present.flags.bp_field_tx                   = 0; /* XXX: interlaced frame */
    pic_param->bitplane_present.flags.bp_forward_mb                 = 0; /* XXX: interlaced frame */
    pic_param->bitplane_present.flags.bp_ac_pred                    = vc1_has_ACPRED_bitplane(v);
    pic_param->bitplane_present.flags.bp_overflags                  = vc1_has_OVERFLAGS_bitplane(v);
    pic_param->reference_fields.value                               = 0; /* reset all bits */
    pic_param->reference_fields.bits.reference_distance_flag        = v->refdist_flag;
    pic_param->reference_fields.bits.reference_distance             = 0; /* XXX: interlaced frame */
    pic_param->reference_fields.bits.num_reference_pictures         = 0; /* XXX: interlaced frame */
    pic_param->reference_fields.bits.reference_field_pic_indicator  = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.value                                      = 0; /* reset all bits */
    pic_param->mv_fields.bits.mv_mode                               = vc1_get_MVMODE(v);
    pic_param->mv_fields.bits.mv_mode2                              = vc1_get_MVMODE2(v);
    pic_param->mv_fields.bits.mv_table                              = s->mv_table_index;
    pic_param->mv_fields.bits.two_mv_block_pattern_table            = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.four_mv_switch                        = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.four_mv_block_pattern_table           = 0; /* XXX: interlaced frame */
    pic_param->mv_fields.bits.extended_mv_flag                      = v->extended_mv;
    pic_param->mv_fields.bits.extended_mv_range                     = v->mvrange;
    pic_param->mv_fields.bits.extended_dmv_flag                     = v->extended_dmv;
    pic_param->mv_fields.bits.extended_dmv_range                    = 0; /* XXX: interlaced frame */
    pic_param->pic_quantizer_fields.value                           = 0; /* reset all bits */
    pic_param->pic_quantizer_fields.bits.dquant                     = v->dquant;
    pic_param->pic_quantizer_fields.bits.quantizer                  = v->quantizer_mode;
    pic_param->pic_quantizer_fields.bits.half_qp                    = v->halfpq;
    pic_param->pic_quantizer_fields.bits.pic_quantizer_scale        = v->pq;
    pic_param->pic_quantizer_fields.bits.pic_quantizer_type         = v->pquantizer;
    pic_param->pic_quantizer_fields.bits.dq_frame                   = v->dquantfrm;
    pic_param->pic_quantizer_fields.bits.dq_profile                 = v->dqprofile;
    pic_param->pic_quantizer_fields.bits.dq_sb_edge                 = v->dqprofile == DQPROFILE_SINGLE_EDGE  ? v->dqsbedge : 0;
    pic_param->pic_quantizer_fields.bits.dq_db_edge                 = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0;
    pic_param->pic_quantizer_fields.bits.dq_binary_level            = v->dqbilevel;
    pic_param->pic_quantizer_fields.bits.alt_pic_quantizer          = v->altpq;
    pic_param->transform_fields.value                               = 0; /* reset all bits */
    pic_param->transform_fields.bits.variable_sized_transform_flag  = v->vstransform;
    pic_param->transform_fields.bits.mb_level_transform_type_flag   = v->ttmbf;
    pic_param->transform_fields.bits.frame_level_transform_type     = vc1_get_TTFRM(v);
    pic_param->transform_fields.bits.transform_ac_codingset_idx1    = v->c_ac_table_index;
    pic_param->transform_fields.bits.transform_ac_codingset_idx2    = v->y_ac_table_index;
    pic_param->transform_fields.bits.intra_transform_dc_table       = v->s.dc_table_index;

    switch (s->pict_type) {
    case AV_PICTURE_TYPE_B:
        pic_param->backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
        // fall-through
    case AV_PICTURE_TYPE_P:
        pic_param->forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
        break;
    }

    if (pic_param->bitplane_present.value) {
        uint8_t *bitplane;
        const uint8_t *ff_bp[3];
        int x, y, n;

        switch (s->pict_type) {
        case AV_PICTURE_TYPE_P:
            ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb  ? v->direct_mb_plane    : NULL;
            ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb    ? s->mbskip_table       : NULL;
            ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane   : NULL;
            break;
        case AV_PICTURE_TYPE_B:
            if (!v->bi_type) {
                ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
                ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb   ? s->mbskip_table    : NULL;
                ff_bp[2] = NULL; /* XXX: interlaced frame (FORWARD plane) */
                break;
            }
            /* fall-through (BI-type) */
        case AV_PICTURE_TYPE_I:
            ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */
            ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred    ? v->acpred_plane       : NULL;
            ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags  ? v->over_flags_plane   : NULL;
            break;
        default:
            ff_bp[0] = NULL;
            ff_bp[1] = NULL;
            ff_bp[2] = NULL;
            break;
        }

        bitplane = ff_vaapi_alloc_bitplane(vactx, (s->mb_width * s->mb_height + 1) / 2);
        if (!bitplane)
            return -1;

        n = 0;
        for (y = 0; y < s->mb_height; y++)
            for (x = 0; x < s->mb_width; x++, n++)
                vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride);
        if (n & 1) /* move last nibble to the high order */
            bitplane[n/2] <<= 4;
    }
    return 0;
}
Пример #24
0
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
                               const int data_size, AVCodecContext *avctx)
{
    int hdr_size, width, height, flags;
    int version;
    const uint8_t *ptr;

    hdr_size = AV_RB16(buf);
    av_dlog(avctx, "header size %d\n", hdr_size);
    if (hdr_size > data_size) {
        av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
        return AVERROR_INVALIDDATA;
    }

    version = AV_RB16(buf + 2);
    av_dlog(avctx, "%.4s version %d\n", buf+4, version);
    if (version > 1) {
        av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
        return AVERROR_PATCHWELCOME;
    }

    width  = AV_RB16(buf + 8);
    height = AV_RB16(buf + 10);
    if (width != avctx->width || height != avctx->height) {
        av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
               avctx->width, avctx->height, width, height);
        return AVERROR_PATCHWELCOME;
    }

    ctx->frame_type = (buf[12] >> 2) & 3;
    ctx->alpha_info = buf[17] & 0xf;

    if (ctx->alpha_info > 2) {
        av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
        return AVERROR_INVALIDDATA;
    }
    if (avctx->skip_alpha) ctx->alpha_info = 0;

    av_dlog(avctx, "frame type %d\n", ctx->frame_type);

    if (ctx->frame_type == 0) {
        ctx->scan = ctx->progressive_scan; // permuted
    } else {
        ctx->scan = ctx->interlaced_scan; // permuted
        ctx->frame->interlaced_frame = 1;
        ctx->frame->top_field_first = ctx->frame_type == 1;
    }

    if (ctx->alpha_info) {
        avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
    } else {
        avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
    }

    ptr   = buf + 20;
    flags = buf[19];
    av_dlog(avctx, "flags %x\n", flags);

    if (flags & 2) {
        if(buf + data_size - ptr < 64) {
            av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
            return AVERROR_INVALIDDATA;
        }
        permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
        ptr += 64;
    } else {
        memset(ctx->qmat_luma, 4, 64);
    }

    if (flags & 1) {
        if(buf + data_size - ptr < 64) {
            av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
            return AVERROR_INVALIDDATA;
        }
        permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
    } else {
        memset(ctx->qmat_chroma, 4, 64);
    }

    return hdr_size;
}
Пример #25
0
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
    AVIOContext *pb;
    uint8_t *buf;
    int len;
    int rtcp_bytes;
    RTPStatistics *stats= &s->statistics;
    uint32_t lost;
    uint32_t extended_max;
    uint32_t expected_interval;
    uint32_t received_interval;
    uint32_t lost_interval;
    uint32_t expected;
    uint32_t fraction;
    uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?

    if (!s->rtp_ctx || (count < 1))
        return -1;

    /* TODO: I think this is way too often; RFC 1889 has algorithm for this */
    /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
    s->octet_count += count;
    rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
        RTCP_TX_RATIO_DEN;
    rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
    if (rtcp_bytes < 28)
        return -1;
    s->last_octet_count = s->octet_count;

    if (avio_open_dyn_buf(&pb) < 0)
        return -1;

    // Receiver Report
    avio_w8(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
    avio_w8(pb, RTCP_RR);
    avio_wb16(pb, 7); /* length in words - 1 */
    // our own SSRC: we use the server's SSRC + 1 to avoid conflicts
    avio_wb32(pb, s->ssrc + 1);
    avio_wb32(pb, s->ssrc); // server SSRC
    // some placeholders we should really fill...
    // RFC 1889/p64
    extended_max= stats->cycles + stats->max_seq;
    expected= extended_max - stats->base_seq + 1;
    lost= expected - stats->received;
    lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
    expected_interval= expected - stats->expected_prior;
    stats->expected_prior= expected;
    received_interval= stats->received - stats->received_prior;
    stats->received_prior= stats->received;
    lost_interval= expected_interval - received_interval;
    if (expected_interval==0 || lost_interval<=0) fraction= 0;
    else fraction = (lost_interval<<8)/expected_interval;

    fraction= (fraction<<24) | lost;

    avio_wb32(pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
    avio_wb32(pb, extended_max); /* max sequence received */
    avio_wb32(pb, stats->jitter>>4); /* jitter */

    if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
    {
        avio_wb32(pb, 0); /* last SR timestamp */
        avio_wb32(pb, 0); /* delay since last SR */
    } else {
        uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
        uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;

        avio_wb32(pb, middle_32_bits); /* last SR timestamp */
        avio_wb32(pb, delay_since_last); /* delay since last SR */
    }

    // CNAME
    avio_w8(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
    avio_w8(pb, RTCP_SDES);
    len = strlen(s->hostname);
    avio_wb16(pb, (6 + len + 3) / 4); /* length in words - 1 */
    avio_wb32(pb, s->ssrc);
    avio_w8(pb, 0x01);
    avio_w8(pb, len);
    avio_write(pb, s->hostname, len);
    // padding
    for (len = (6 + len) % 4; len % 4; len++) {
        avio_w8(pb, 0);
    }

    avio_flush(pb);
    len = avio_close_dyn_buf(pb, &buf);
    if ((len > 0) && buf) {
        int av_unused result;
        av_dlog(s->ic, "sending %d bytes of RR\n", len);
        result= ffurl_write(s->rtp_ctx, buf, len);
        av_dlog(s->ic, "result from ffurl_write: %d\n", result);
        av_free(buf);
    }
    return 0;
}
Пример #26
0
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
{
  if(pc->overread){
    av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
            pc->overread, pc->state, next, pc->index, pc->overread_index);
    av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
  }

  /* Copy overread bytes from last frame into buffer. */
  for(; pc->overread>0; pc->overread--){
    pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  }

  /* flush remaining if EOF */
  if(!*buf_size && next == END_NOT_FOUND){
    next= 0;
  }

  pc->last_index= pc->index;

  /* copy into buffer end return */
  if(next == END_NOT_FOUND){
    void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);

    if(!new_buffer)
      return AVERROR(ENOMEM);
    pc->buffer = new_buffer;
    memcpy(&pc->buffer[pc->index], *buf, *buf_size);
    pc->index += *buf_size;
    return -1;
  }

  *buf_size=
    pc->overread_index= pc->index + next;

  /* append to buffer */
  if(pc->index){
    void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);

    if(!new_buffer)
      return AVERROR(ENOMEM);
    pc->buffer = new_buffer;
    if (next > -FF_INPUT_BUFFER_PADDING_SIZE)
      memcpy(&pc->buffer[pc->index], *buf,
             next + FF_INPUT_BUFFER_PADDING_SIZE);
    pc->index = 0;
    *buf= pc->buffer;
  }

  /* store overread bytes */
  for(;next < 0; next++){
    pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
    pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next];
    pc->overread++;
  }

  if(pc->overread){
    av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
            pc->overread, pc->state, next, pc->index, pc->overread_index);
    av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
  }

  return 0;
}
Пример #27
0
static int rtsp_read_setup(AVFormatContext *s, char* host, char *controlurl)
{
    RTSPState *rt             = s->priv_data;
    RTSPMessageHeader request = { 0 };
    int ret                   = 0;
    char url[1024];
    RTSPStream *rtsp_st;
    char responseheaders[1024];
    int localport    = -1;
    int transportidx = 0;
    int streamid     = 0;

    ret = rtsp_read_request(s, &request, "SETUP");
    if (ret)
        return ret;
    rt->seq++;
    if (!request.nb_transports) {
        av_log(s, AV_LOG_ERROR, "No transport defined in SETUP\n");
        return AVERROR_INVALIDDATA;
    }
    for (transportidx = 0; transportidx < request.nb_transports;
         transportidx++) {
        if (!request.transports[transportidx].mode_record ||
            (request.transports[transportidx].lower_transport !=
             RTSP_LOWER_TRANSPORT_UDP &&
             request.transports[transportidx].lower_transport !=
             RTSP_LOWER_TRANSPORT_TCP)) {
            av_log(s, AV_LOG_ERROR, "mode=record/receive not set or transport"
                   " protocol not supported (yet)\n");
            return AVERROR_INVALIDDATA;
        }
    }
    if (request.nb_transports > 1)
        av_log(s, AV_LOG_WARNING, "More than one transport not supported, "
               "using first of all\n");
    for (streamid = 0; streamid < rt->nb_rtsp_streams; streamid++) {
        if (!strcmp(rt->rtsp_streams[streamid]->control_url,
                    controlurl))
            break;
    }
    if (streamid == rt->nb_rtsp_streams) {
        av_log(s, AV_LOG_ERROR, "Unable to find requested track\n");
        return AVERROR_STREAM_NOT_FOUND;
    }
    rtsp_st   = rt->rtsp_streams[streamid];
    localport = rt->rtp_port_min;

    if (request.transports[0].lower_transport == RTSP_LOWER_TRANSPORT_TCP) {
        rt->lower_transport = RTSP_LOWER_TRANSPORT_TCP;
        if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) {
            rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
            return ret;
        }
        rtsp_st->interleaved_min = request.transports[0].interleaved_min;
        rtsp_st->interleaved_max = request.transports[0].interleaved_max;
        snprintf(responseheaders, sizeof(responseheaders), "Transport: "
                 "RTP/AVP/TCP;unicast;mode=receive;interleaved=%d-%d"
                 "\r\n", request.transports[0].interleaved_min,
                 request.transports[0].interleaved_max);
    } else {
        do {
            ff_url_join(url, sizeof(url), "rtp", NULL, host, localport, NULL);
            av_dlog(s, "Opening: %s", url);
            ret = ffurl_open(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE,
                             &s->interrupt_callback, NULL);
            if (ret)
                localport += 2;
        } while (ret || localport > rt->rtp_port_max);
        if (localport > rt->rtp_port_max) {
            rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
            return ret;
        }

        av_dlog(s, "Listening on: %d",
                ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle));
        if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) {
            rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq);
            return ret;
        }

        localport = ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle);
        snprintf(responseheaders, sizeof(responseheaders), "Transport: "
                 "RTP/AVP/UDP;unicast;mode=receive;source=%s;"
                 "client_port=%d-%d;server_port=%d-%d\r\n",
                 host, request.transports[0].client_port_min,
                 request.transports[0].client_port_max, localport,
                 localport + 1);
    }

    /* Establish sessionid if not previously set */
    /* Put this in a function? */
    /* RFC 2326: session id must be at least 8 digits */
    while (strlen(rt->session_id) < 8)
        av_strlcatf(rt->session_id, 512, "%u", av_get_random_seed());

    av_strlcatf(responseheaders, sizeof(responseheaders), "Session: %s\r\n",
                rt->session_id);
    /* Send Reply */
    rtsp_send_reply(s, RTSP_STATUS_OK, responseheaders, request.seq);

    rt->state = RTSP_STATE_PAUSED;
    return 0;
}
Пример #28
0
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
                                      void *data, int *got_frame,
                                      const uint8_t *buf, int buf_size)
{
    /* Note, the only difference between the 15Bpp and 16Bpp */
    /* Format is the pixel format, the packets are processed the same. */
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
        return ret;

    pixels = s->frame.data[0];
    pixel_limit = s->avctx->height * s->frame.linesize[0];

    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2);  /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */
    if (frame_size > buf_size)
        frame_size = buf_size;

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        int stream_ptr_after_chunk;
        chunk_size = bytestream2_get_le32(&g2);
        if (chunk_size > frame_size) {
            av_log(avctx, AV_LOG_WARNING,
                   "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
            chunk_size = frame_size;
        }
        stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size;

        chunk_type = bytestream2_get_le16(&g2);


        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
            av_dlog(avctx,
                    "Unexpected Palette chunk %d in non-palettized FLC\n",
                    chunk_type);
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                    break;
                line_packets = bytestream2_get_le16(&g2);
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame.linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                            break;
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = bytestream2_get_le16(&g2);
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                *((signed short*)(&pixels[pixel_ptr])) = pixel;
                                pixel_ptr += 2;
                            }
                        } else {
                            if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk)
                                break;
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                                pixel_ptr += 2;
                            }
                        }
                    }

                    y_ptr += s->frame.linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n");
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
            memset(pixels, 0x0000,
                   s->frame.linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = (s->avctx->width * 2);

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = bytestream2_get_byte(&g2);
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
                 * This does not give us any good opportunity to perform word endian conversion
                 * during decompression. So if it is required (i.e., this is not a LE target, we do
                 * a second pass over the line here, swapping the bytes.
                 */
#if HAVE_BIGENDIAN
                pixel_ptr = y_ptr;
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]);
                    pixel_ptr += 2;
                }
#endif
                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        pixel    = bytestream2_get_le16(&g2);
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = pixel;
                            pixel_ptr += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + 2 * byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                            pixel_ptr  += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {

                for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
                     y_ptr += s->frame.linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                      *((signed short*)(&pixels[y_ptr + pixel_ptr])) = bytestream2_get_le16(&g2);
                      pixel_ptr += 2;
                      pixel_countdown--;
                    }
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));

    if ((ret = av_frame_ref(data, &s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}
Пример #29
0
/* This function loads and processes a single chunk in an IP movie file.
 * It returns the type of chunk that was processed. */
static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb,
    AVPacket *pkt)
{
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;
    int chunk_size;
    unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
    unsigned char opcode_type;
    unsigned char opcode_version;
    int opcode_size;
    unsigned char scratch[1024];
    int i, j;
    int first_color, last_color;
    int audio_flags;
    unsigned char r, g, b;

    /* see if there are any pending packets */
    chunk_type = load_ipmovie_packet(s, pb, pkt);
    if (chunk_type != CHUNK_DONE)
        return chunk_type;

    /* read the next chunk, wherever the file happens to be pointing */
    if (url_feof(pb))
        return CHUNK_EOF;
    if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return CHUNK_BAD;
    chunk_size = AV_RL16(&chunk_preamble[0]);
    chunk_type = AV_RL16(&chunk_preamble[2]);

    av_dlog(NULL, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);

    switch (chunk_type) {

    case CHUNK_INIT_AUDIO:
        av_dlog(NULL, "initialize audio\n");
        break;

    case CHUNK_AUDIO_ONLY:
        av_dlog(NULL, "audio only\n");
        break;

    case CHUNK_INIT_VIDEO:
        av_dlog(NULL, "initialize video\n");
        break;

    case CHUNK_VIDEO:
        av_dlog(NULL, "video (and audio)\n");
        break;

    case CHUNK_SHUTDOWN:
        av_dlog(NULL, "shutdown\n");
        break;

    case CHUNK_END:
        av_dlog(NULL, "end\n");
        break;

    default:
        av_dlog(NULL, "invalid chunk\n");
        chunk_type = CHUNK_BAD;
        break;

    }

    while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {

        /* read the next chunk, wherever the file happens to be pointing */
        if (url_feof(pb)) {
            chunk_type = CHUNK_EOF;
            break;
        }
        if (avio_read(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
            CHUNK_PREAMBLE_SIZE) {
            chunk_type = CHUNK_BAD;
            break;
        }

        opcode_size = AV_RL16(&opcode_preamble[0]);
        opcode_type = opcode_preamble[2];
        opcode_version = opcode_preamble[3];

        chunk_size -= OPCODE_PREAMBLE_SIZE;
        chunk_size -= opcode_size;
        if (chunk_size < 0) {
            av_dlog(NULL, "chunk_size countdown just went negative\n");
            chunk_type = CHUNK_BAD;
            break;
        }

        av_dlog(NULL, "  opcode type %02X, version %d, 0x%04X bytes: ",
                opcode_type, opcode_version, opcode_size);
        switch (opcode_type) {

        case OPCODE_END_OF_STREAM:
            av_dlog(NULL, "end of stream\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_END_OF_CHUNK:
            av_dlog(NULL, "end of chunk\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_CREATE_TIMER:
            av_dlog(NULL, "create timer\n");
            if ((opcode_version > 0) || (opcode_size > 6)) {
                av_dlog(NULL, "bad create_timer opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]);
            av_dlog(NULL, "  %.2f frames/second (timer div = %d, subdiv = %d)\n",
                    1000000.0 / s->frame_pts_inc, AV_RL32(&scratch[0]),
                    AV_RL16(&scratch[4]));
            break;

        case OPCODE_INIT_AUDIO_BUFFERS:
            av_dlog(NULL, "initialize audio buffers\n");
            if ((opcode_version > 1) || (opcode_size > 10)) {
                av_dlog(NULL, "bad init_audio_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->audio_sample_rate = AV_RL16(&scratch[4]);
            audio_flags = AV_RL16(&scratch[2]);
            /* bit 0 of the flags: 0 = mono, 1 = stereo */
            s->audio_channels = (audio_flags & 1) + 1;
            /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
            s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
            /* bit 2 indicates compressed audio in version 1 opcode */
            if ((opcode_version == 1) && (audio_flags & 0x4))
                s->audio_type = CODEC_ID_INTERPLAY_DPCM;
            else if (s->audio_bits == 16)
                s->audio_type = CODEC_ID_PCM_S16LE;
            else
                s->audio_type = CODEC_ID_PCM_U8;
            av_dlog(NULL, "audio: %d bits, %d Hz, %s, %s format\n",
                    s->audio_bits, s->audio_sample_rate,
                    (s->audio_channels == 2) ? "stereo" : "mono",
                    (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
                    "Interplay audio" : "PCM");
            break;

        case OPCODE_START_STOP_AUDIO:
            av_dlog(NULL, "start/stop audio\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_INIT_VIDEO_BUFFERS:
            av_dlog(NULL, "initialize video buffers\n");
            if ((opcode_version > 2) || (opcode_size > 8)) {
                av_dlog(NULL, "bad init_video_buffers opcode\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) !=
                opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }
            s->video_width = AV_RL16(&scratch[0]) * 8;
            s->video_height = AV_RL16(&scratch[2]) * 8;
            if (opcode_version < 2 || !AV_RL16(&scratch[6])) {
                s->video_bpp = 8;
            } else {
                s->video_bpp = 16;
            }
            av_dlog(NULL, "video resolution: %d x %d\n",
                    s->video_width, s->video_height);
            break;

        case OPCODE_UNKNOWN_06:
        case OPCODE_UNKNOWN_0E:
        case OPCODE_UNKNOWN_10:
        case OPCODE_UNKNOWN_12:
        case OPCODE_UNKNOWN_13:
        case OPCODE_UNKNOWN_14:
        case OPCODE_UNKNOWN_15:
            av_dlog(NULL, "unknown (but documented) opcode %02X\n", opcode_type);
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SEND_BUFFER:
            av_dlog(NULL, "send buffer\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_AUDIO_FRAME:
            av_dlog(NULL, "audio frame\n");

            /* log position and move on for now */
            s->audio_chunk_offset = avio_tell(pb);
            s->audio_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SILENCE_FRAME:
            av_dlog(NULL, "silence frame\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_INIT_VIDEO_MODE:
            av_dlog(NULL, "initialize video mode\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_CREATE_GRADIENT:
            av_dlog(NULL, "create gradient\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SET_PALETTE:
            av_dlog(NULL, "set palette\n");
            /* check for the logical maximum palette size
             * (3 * 256 + 4 bytes) */
            if (opcode_size > 0x304) {
                av_dlog(NULL, "demux_ipmovie: set_palette opcode too large\n");
                chunk_type = CHUNK_BAD;
                break;
            }
            if (avio_read(pb, scratch, opcode_size) != opcode_size) {
                chunk_type = CHUNK_BAD;
                break;
            }

            /* load the palette into internal data structure */
            first_color = AV_RL16(&scratch[0]);
            last_color = first_color + AV_RL16(&scratch[2]) - 1;
            /* sanity check (since they are 16 bit values) */
            if ((first_color > 0xFF) || (last_color > 0xFF)) {
                av_dlog(NULL, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n",
                    first_color, last_color);
                chunk_type = CHUNK_BAD;
                break;
            }
            j = 4;  /* offset of first palette data */
            for (i = first_color; i <= last_color; i++) {
                /* the palette is stored as a 6-bit VGA palette, thus each
                 * component is shifted up to a 8-bit range */
                r = scratch[j++] * 4;
                g = scratch[j++] * 4;
                b = scratch[j++] * 4;
                s->palette[i] = (r << 16) | (g << 8) | (b);
            }
            s->has_palette = 1;
            break;

        case OPCODE_SET_PALETTE_COMPRESSED:
            av_dlog(NULL, "set palette compressed\n");
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_SET_DECODING_MAP:
            av_dlog(NULL, "set decoding map\n");

            /* log position and move on for now */
            s->decode_map_chunk_offset = avio_tell(pb);
            s->decode_map_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        case OPCODE_VIDEO_DATA:
            av_dlog(NULL, "set video data\n");

            /* log position and move on for now */
            s->video_chunk_offset = avio_tell(pb);
            s->video_chunk_size = opcode_size;
            avio_skip(pb, opcode_size);
            break;

        default:
            av_dlog(NULL, "*** unknown opcode type\n");
            chunk_type = CHUNK_BAD;
            break;

        }
    }

    /* make a note of where the stream is sitting */
    s->next_chunk_offset = avio_tell(pb);

    /* dispatch the first of any pending packets */
    if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
        chunk_type = load_ipmovie_packet(s, pb, pkt);

    return chunk_type;
}
Пример #30
0
static int rv10_decode_packet(AVCodecContext *avctx,
                             const uint8_t *buf, int buf_size, int buf_size2)
{
    MpegEncContext *s = avctx->priv_data;
    int mb_count, mb_pos, left, start_mb_x;

    init_get_bits(&s->gb, buf, buf_size*8);
    if(s->codec_id ==CODEC_ID_RV10)
        mb_count = rv10_decode_picture_header(s);
    else
        mb_count = rv20_decode_picture_header(s);
    if (mb_count < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n");
        return -1;
    }

    if (s->mb_x >= s->mb_width ||
        s->mb_y >= s->mb_height) {
        av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y);
        return -1;
    }
    mb_pos = s->mb_y * s->mb_width + s->mb_x;
    left = s->mb_width * s->mb_height - mb_pos;
    if (mb_count > left) {
        av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n");
        return -1;
    }

    if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
        if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
            ff_er_frame_end(s);
            MPV_frame_end(s);
            s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
        }
        if(MPV_frame_start(s, avctx) < 0)
            return -1;
        ff_er_frame_start(s);
    }

    av_dlog(avctx, "qscale=%d\n", s->qscale);

    /* default quantization values */
    if(s->codec_id== CODEC_ID_RV10){
        if(s->mb_y==0) s->first_slice_line=1;
    }else{
        s->first_slice_line=1;
        s->resync_mb_x= s->mb_x;
    }
    start_mb_x= s->mb_x;
    s->resync_mb_y= s->mb_y;
    if(s->h263_aic){
        s->y_dc_scale_table=
        s->c_dc_scale_table= ff_aic_dc_scale_table;
    }else{
        s->y_dc_scale_table=
        s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
    }

    if(s->modified_quant)
        s->chroma_qscale_table= ff_h263_chroma_qscale_table;

    ff_set_qscale(s, s->qscale);

    s->rv10_first_dc_coded[0] = 0;
    s->rv10_first_dc_coded[1] = 0;
    s->rv10_first_dc_coded[2] = 0;
    s->block_wrap[0]=
    s->block_wrap[1]=
    s->block_wrap[2]=
    s->block_wrap[3]= s->b8_stride;
    s->block_wrap[4]=
    s->block_wrap[5]= s->mb_stride;
    ff_init_block_index(s);
    /* decode each macroblock */

    for(s->mb_num_left= mb_count; s->mb_num_left>0; s->mb_num_left--) {
        int ret;
        ff_update_block_index(s);
        av_dlog(avctx, "**mb x=%d y=%d\n", s->mb_x, s->mb_y);

        s->mv_dir = MV_DIR_FORWARD;
        s->mv_type = MV_TYPE_16X16;
        ret=ff_h263_decode_mb(s, s->block);

        if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
            av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
            s->gb.size_in_bits= 8*buf_size2;
            ret= SLICE_OK;
        }

        if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
            av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
            return -1;
        }
        if(s->pict_type != FF_B_TYPE)
            ff_h263_update_motion_val(s);
        MPV_decode_mb(s, s->block);
        if(s->loop_filter)
            ff_h263_loop_filter(s);

        if (++s->mb_x == s->mb_width) {
            s->mb_x = 0;
            s->mb_y++;
            ff_init_block_index(s);
        }
        if(s->mb_x == s->resync_mb_x)
            s->first_slice_line=0;
        if(ret == SLICE_END) break;
    }

    ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);

    return s->gb.size_in_bits;
}