示例#1
0
/* return non zero if error */
static int http_open_cnx(URLContext *h)
{
    const char *path, *proxy_path;
    char hostname[1024], hoststr[1024];
    char auth[1024];
    char path1[1024];
    char buf[1024];
    int port, use_proxy, err, location_changed = 0, redirects = 0;
    HTTPAuthType cur_auth_type;
    HTTPContext *s = h->priv_data;
    URLContext *hd = NULL;

    s->init = 1;
    proxy_path = getenv("http_proxy");
    use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
        av_strstart(proxy_path, "http://", NULL);

    /* fill the dest addr */
 redo:
    /* needed in any case to build the host string */
    ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
                 path1, sizeof(path1), s->location);
    ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);

    if (use_proxy) {
        ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
                     NULL, 0, proxy_path);
        path = s->location;
    } else {
        if (path1[0] == '\0')
            path = "/";
        else
            path = path1;
    }
    if (port < 0)
        port = 80;

    ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
    err = url_open(&hd, buf, URL_RDWR);
    if (err < 0)
        goto fail;

    s->hd = hd;
    cur_auth_type = s->auth_state.auth_type;
    if (http_connect(h, path, hoststr, auth, &location_changed) < 0)
        goto fail;
    if (s->http_code == 401) {
        if (cur_auth_type == HTTP_AUTH_NONE && s->auth_state.auth_type != HTTP_AUTH_NONE) {
            url_close(hd);
            goto redo;
        } else
            goto fail;
    }
    if ((s->http_code == 302 || s->http_code == 303) && location_changed == 1) {
        /* url moved, get next */
        url_close(hd);
        if (redirects++ >= MAX_REDIRECTS)
            return AVERROR(EIO);
        location_changed = 0;
        goto redo;
    }
    return 0;
 fail:
    if (hd)
        url_close(hd);
    s->hd = NULL;
    return AVERROR(EIO);
}
示例#2
0
static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
                                     AVStream *st, RMStream *ast, int read_all)
{
    char buf[256];
    uint32_t version;
    int ret;

    /* ra type header */
    version = avio_rb16(pb); /* version */
    if (version == 3) {
        unsigned bytes_per_minute;
        int header_size = avio_rb16(pb);
        int64_t startpos = avio_tell(pb);
        avio_skip(pb, 8);
        bytes_per_minute = avio_rb16(pb);
        avio_skip(pb, 4);
        rm_read_metadata(s, pb, 0);
        if ((startpos + header_size) >= avio_tell(pb) + 2) {
            // fourcc (should always be "lpcJ")
            avio_r8(pb);
            get_str8(pb, buf, sizeof(buf));
        }
        // Skip extra header crap (this should never happen)
        if ((startpos + header_size) > avio_tell(pb))
            avio_skip(pb, header_size + startpos - avio_tell(pb));
        if (bytes_per_minute)
            st->codec->bit_rate = 8LL * bytes_per_minute / 60;
        st->codec->sample_rate = 8000;
        st->codec->channels = 1;
        st->codec->channel_layout = AV_CH_LAYOUT_MONO;
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = AV_CODEC_ID_RA_144;
        ast->deint_id = DEINT_ID_INT0;
    } else {
        int flavor, sub_packet_h, coded_framesize, sub_packet_size;
        int codecdata_length;
        unsigned bytes_per_minute;
        /* old version (4) */
        avio_skip(pb, 2); /* unused */
        avio_rb32(pb); /* .ra4 */
        avio_rb32(pb); /* data size */
        avio_rb16(pb); /* version2 */
        avio_rb32(pb); /* header size */
        flavor= avio_rb16(pb); /* add codec info / flavor */
        ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */
        avio_rb32(pb); /* ??? */
        bytes_per_minute = avio_rb32(pb);
        if (version == 4) {
            if (bytes_per_minute)
                st->codec->bit_rate = 8LL * bytes_per_minute / 60;
        }
        avio_rb32(pb); /* ??? */
        ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */
        st->codec->block_align= avio_rb16(pb); /* frame size */
        ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */
        avio_rb16(pb); /* ??? */
        if (version == 5) {
            avio_rb16(pb); avio_rb16(pb); avio_rb16(pb);
        }
        st->codec->sample_rate = avio_rb16(pb);
        avio_rb32(pb);
        st->codec->channels = avio_rb16(pb);
        if (version == 5) {
            ast->deint_id = avio_rl32(pb);
            avio_read(pb, buf, 4);
            buf[4] = 0;
        } else {
            get_str8(pb, buf, sizeof(buf)); /* desc */
            ast->deint_id = AV_RL32(buf);
            get_str8(pb, buf, sizeof(buf)); /* desc */
        }
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_tag  = AV_RL32(buf);
        st->codec->codec_id   = ff_codec_get_id(ff_rm_codec_tags,
                                                st->codec->codec_tag);

        switch (st->codec->codec_id) {
        case AV_CODEC_ID_AC3:
            st->need_parsing = AVSTREAM_PARSE_FULL;
            break;
        case AV_CODEC_ID_RA_288:
            st->codec->extradata_size= 0;
            ast->audio_framesize = st->codec->block_align;
            st->codec->block_align = coded_framesize;
            break;
        case AV_CODEC_ID_COOK:
            st->need_parsing = AVSTREAM_PARSE_HEADERS;
        case AV_CODEC_ID_ATRAC3:
        case AV_CODEC_ID_SIPR:
            if (read_all) {
                codecdata_length = 0;
            } else {
                avio_rb16(pb); avio_r8(pb);
                if (version == 5)
                    avio_r8(pb);
                codecdata_length = avio_rb32(pb);
                if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
                    av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
                    return -1;
                }
            }

            ast->audio_framesize = st->codec->block_align;
            if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
                if (flavor > 3) {
                    av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
                           flavor);
                    return -1;
                }
                st->codec->block_align = ff_sipr_subpk_size[flavor];
            } else {
                if(sub_packet_size <= 0){
                    av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
                    return -1;
                }
                st->codec->block_align = ast->sub_packet_size;
            }
            if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
                return ret;

            break;
        case AV_CODEC_ID_AAC:
            avio_rb16(pb); avio_r8(pb);
            if (version == 5)
                avio_r8(pb);
            codecdata_length = avio_rb32(pb);
            if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
                av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
                return -1;
            }
            if (codecdata_length >= 1) {
                avio_r8(pb);
                if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
                    return ret;
            }
            break;
        default:
            av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
        }
        if (ast->deint_id == DEINT_ID_INT4 ||
            ast->deint_id == DEINT_ID_GENR ||
            ast->deint_id == DEINT_ID_SIPR) {
            if (st->codec->block_align <= 0 ||
                ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX ||
                ast->audio_framesize * sub_packet_h < st->codec->block_align)
                return AVERROR_INVALIDDATA;
            if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0)
                return AVERROR(ENOMEM);
        }
        switch (ast->deint_id) {
        case DEINT_ID_INT4:
            if (ast->coded_framesize > ast->audio_framesize ||
                sub_packet_h <= 1 ||
                ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize)
                return AVERROR_INVALIDDATA;
            break;
        case DEINT_ID_GENR:
            if (ast->sub_packet_size <= 0 ||
                ast->sub_packet_size > ast->audio_framesize)
                return AVERROR_INVALIDDATA;
            break;
        case DEINT_ID_SIPR:
        case DEINT_ID_INT0:
        case DEINT_ID_VBRS:
        case DEINT_ID_VBRF:
            break;
        default:
            av_log(s, AV_LOG_ERROR, "Unknown interleaver %X\n", ast->deint_id);
            return AVERROR_INVALIDDATA;
        }

        if (read_all) {
            avio_r8(pb);
            avio_r8(pb);
            avio_r8(pb);
            rm_read_metadata(s, pb, 0);
        }
    }
    return 0;
}
示例#3
0
static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb,
                                   RMDemuxContext *rm, RMStream *vst,
                                   AVPacket *pkt, int len, int *pseq,
                                   int64_t *timestamp)
{
    int hdr;
    int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silcense compiler warning
    int type;
    int ret;

    hdr = avio_r8(pb); len--;
    type = hdr >> 6;

    if(type != 3){  // not frame as a part of packet
        seq = avio_r8(pb); len--;
    }
    if(type != 1){  // not whole frame
        len2 = get_num(pb, &len);
        pos  = get_num(pb, &len);
        pic_num = avio_r8(pb); len--;
    }
    if(len<0)
        return -1;
    rm->remaining_len = len;
    if(type&1){     // frame, not slice
        if(type == 3){  // frame as a part of packet
            len= len2;
            *timestamp = pos;
        }
        if(rm->remaining_len < len)
            return -1;
        rm->remaining_len -= len;
        if(av_new_packet(pkt, len + 9) < 0)
            return AVERROR(EIO);
        pkt->data[0] = 0;
        AV_WL32(pkt->data + 1, 1);
        AV_WL32(pkt->data + 5, 0);
        if ((ret = avio_read(pb, pkt->data + 9, len)) != len) {
            av_free_packet(pkt);
            return ret < 0 ? ret : AVERROR(EIO);
        }
        return 0;
    }
    //now we have to deal with single slice

    *pseq = seq;
    if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
        if (len2 > ffio_limit(pb, len2)) {
            av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n");
            return AVERROR_INVALIDDATA;
        }
        vst->slices = ((hdr & 0x3F) << 1) + 1;
        vst->videobufsize = len2 + 8*vst->slices + 1;
        av_free_packet(&vst->pkt); //FIXME this should be output.
        if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
            return AVERROR(ENOMEM);
        memset(vst->pkt.data, 0, vst->pkt.size);
        vst->videobufpos = 8*vst->slices + 1;
        vst->cur_slice = 0;
        vst->curpic_num = pic_num;
        vst->pktpos = avio_tell(pb);
    }
    if(type == 2)
        len = FFMIN(len, pos);

    if(++vst->cur_slice > vst->slices)
        return 1;
    AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
    AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
    if(vst->videobufpos + len > vst->videobufsize)
        return 1;
    if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len)
        return AVERROR(EIO);
    vst->videobufpos += len;
    rm->remaining_len-= len;

    if (type == 2 || vst->videobufpos == vst->videobufsize) {
        vst->pkt.data[0] = vst->cur_slice-1;
        *pkt= vst->pkt;
        vst->pkt.data= NULL;
        vst->pkt.size= 0;
        vst->pkt.buf = NULL;
#if FF_API_DESTRUCT_PACKET
        vst->pkt.destruct = NULL;
#endif
        if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
            memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
                vst->videobufpos - 1 - 8*vst->slices);
        pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
        pkt->pts = AV_NOPTS_VALUE;
        pkt->pos = vst->pktpos;
        vst->slices = 0;
        return 0;
    }

    return 1;
}
示例#4
0
/** return 0 on packet, no more left, 1 on packet, -1 on partial packet... */
static int qdm2_parse_packet(AVFormatContext *s, PayloadContext *qdm,
                             AVStream *st, AVPacket *pkt,
                             uint32_t *timestamp,
                             const uint8_t *buf, int len, uint16_t seq,
                             int flags)
{
    int res = AVERROR_INVALIDDATA, n;
    const uint8_t *end = buf + len, *p = buf;

    if (len > 0) {
        if (len < 2)
            return AVERROR_INVALIDDATA;

        /* configuration block */
        if (*p == 0xff) {
            if (qdm->n_pkts > 0) {
                av_log(s, AV_LOG_WARNING,
                       "Out of sequence config - dropping queue\n");
                qdm->n_pkts = 0;
                memset(qdm->len, 0, sizeof(qdm->len));
            }

            if ((res = qdm2_parse_config(qdm, st, ++p, end)) < 0)
                return res;
            p += res;

            /* We set codec_id to AV_CODEC_ID_NONE initially to
             * delay decoder initialization since extradata is
             * carried within the RTP stream, not SDP. Here,
             * by setting codec_id to AV_CODEC_ID_QDM2, we are signalling
             * to the decoder that it is OK to initialize. */
            st->codec->codec_id = AV_CODEC_ID_QDM2;
        }
        if (st->codec->codec_id == AV_CODEC_ID_NONE)
            return AVERROR(EAGAIN);

        /* subpackets */
        while (end - p >= 4) {
            if ((res = qdm2_parse_subpacket(qdm, st, p, end)) < 0)
                return res;
            p += res;
        }

        qdm->timestamp = *timestamp;
        if (++qdm->n_pkts < qdm->subpkts_per_block)
            return AVERROR(EAGAIN);
        qdm->cache = 0;
        for (n = 0; n < 0x80; n++)
            if (qdm->len[n] > 0)
                qdm->cache++;
    }

    /* output the subpackets into freshly created superblock structures */
    if (!qdm->cache || (res = qdm2_restore_block(qdm, st, pkt)) < 0)
        return res;
    if (--qdm->cache == 0)
        qdm->n_pkts = 0;

    *timestamp = qdm->timestamp;
    qdm->timestamp = RTP_NOTS_VALUE;

    return (qdm->cache > 0) ? 1 : 0;
}
示例#5
0
static int vid_read_packet(AVFormatContext *s,
                           AVPacket *pkt)
{
    BVID_DemuxContext *vid = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char block_type;
    int audio_length;
    int ret_value;

    if(vid->is_finished || url_feof(pb))
        return AVERROR(EIO);

    block_type = avio_r8(pb);
    switch(block_type){
        case PALETTE_BLOCK:
            if (vid->palette) {
                av_log(s, AV_LOG_WARNING, "discarding unused palette\n");
                av_freep(&vid->palette);
            }
            vid->palette = av_malloc(BVID_PALETTE_SIZE);
            if (!vid->palette)
                return AVERROR(ENOMEM);
            if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) {
                av_freep(&vid->palette);
                return AVERROR(EIO);
            }
            return vid_read_packet(s, pkt);

        case FIRST_AUDIO_BLOCK:
            avio_rl16(pb);
            // soundblaster DAC used for sample rate, as on specification page (link above)
            vid->sample_rate = 1000000 / (256 - avio_r8(pb));
        case AUDIO_BLOCK:
            if (vid->audio_index < 0) {
                AVStream *st = avformat_new_stream(s, NULL);
                if (!st)
                    return AVERROR(ENOMEM);
                vid->audio_index                 = st->index;
                st->codec->codec_type            = AVMEDIA_TYPE_AUDIO;
                st->codec->codec_id              = CODEC_ID_PCM_U8;
                st->codec->channels              = 1;
                st->codec->bits_per_coded_sample = 8;
                st->codec->sample_rate           = vid->sample_rate;
                st->codec->bit_rate              = 8 * st->codec->sample_rate;
                st->start_time                   = 0;
                avpriv_set_pts_info(st, 64, 1, vid->sample_rate);
            }
            audio_length = avio_rl16(pb);
            if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) {
                if (ret_value < 0)
                    return ret_value;
                av_log(s, AV_LOG_ERROR, "incomplete audio block\n");
                return AVERROR(EIO);
            }
            pkt->stream_index = vid->audio_index;
            pkt->duration     = audio_length;
            pkt->flags |= AV_PKT_FLAG_KEY;
            return 0;

        case VIDEO_P_FRAME:
        case VIDEO_YOFF_P_FRAME:
        case VIDEO_I_FRAME:
            return read_frame(vid, pb, pkt, block_type, s);

        case EOF_BLOCK:
            if(vid->nframes != 0)
                av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
            vid->is_finished = 1;
            return AVERROR(EIO);
        default:
            av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
                   block_type, block_type, block_type);
            return AVERROR_INVALIDDATA;
    }
}
示例#6
0
文件: bink.c 项目: simock85/libav
static int read_header(AVFormatContext *s)
{
    BinkDemuxContext *bink = s->priv_data;
    AVIOContext *pb = s->pb;
    uint32_t fps_num, fps_den;
    AVStream *vst, *ast;
    unsigned int i;
    uint32_t pos, next_pos;
    uint16_t flags;
    int keyframe;

    vst = avformat_new_stream(s, NULL);
    if (!vst)
        return AVERROR(ENOMEM);

    vst->codec->codec_tag = avio_rl32(pb);

    bink->file_size = avio_rl32(pb) + 8;
    vst->duration   = avio_rl32(pb);

    if (vst->duration > 1000000) {
        av_log(s, AV_LOG_ERROR, "invalid header: more than 1000000 frames\n");
        return AVERROR(EIO);
    }

    if (avio_rl32(pb) > bink->file_size) {
        av_log(s, AV_LOG_ERROR,
               "invalid header: largest frame size greater than file size\n");
        return AVERROR(EIO);
    }

    avio_skip(pb, 4);

    vst->codec->width  = avio_rl32(pb);
    vst->codec->height = avio_rl32(pb);

    fps_num = avio_rl32(pb);
    fps_den = avio_rl32(pb);
    if (fps_num == 0 || fps_den == 0) {
        av_log(s, AV_LOG_ERROR, "invalid header: invalid fps (%d/%d)\n", fps_num, fps_den);
        return AVERROR(EIO);
    }
    avpriv_set_pts_info(vst, 64, fps_den, fps_num);

    vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    vst->codec->codec_id   = CODEC_ID_BINKVIDEO;
    vst->codec->extradata  = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
    vst->codec->extradata_size = 4;
    avio_read(pb, vst->codec->extradata, 4);

    bink->num_audio_tracks = avio_rl32(pb);

    if (bink->num_audio_tracks > BINK_MAX_AUDIO_TRACKS) {
        av_log(s, AV_LOG_ERROR,
               "invalid header: more than "AV_STRINGIFY(BINK_MAX_AUDIO_TRACKS)" audio tracks (%d)\n",
               bink->num_audio_tracks);
        return AVERROR(EIO);
    }

    if (bink->num_audio_tracks) {
        avio_skip(pb, 4 * bink->num_audio_tracks);

        for (i = 0; i < bink->num_audio_tracks; i++) {
            ast = avformat_new_stream(s, NULL);
            if (!ast)
                return AVERROR(ENOMEM);
            ast->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
            ast->codec->codec_tag   = 0;
            ast->codec->sample_rate = avio_rl16(pb);
            avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
            flags = avio_rl16(pb);
            ast->codec->codec_id = flags & BINK_AUD_USEDCT ?
                                   CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT;
            ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1;
            ast->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!ast->codec->extradata)
                return AVERROR(ENOMEM);
            ast->codec->extradata_size = 4;
            AV_WL32(ast->codec->extradata, vst->codec->codec_tag);
        }

        for (i = 0; i < bink->num_audio_tracks; i++)
            s->streams[i + 1]->id = avio_rl32(pb);
    }

    /* frame index table */
    next_pos = avio_rl32(pb);
    for (i = 0; i < vst->duration; i++) {
        pos = next_pos;
        if (i == vst->duration - 1) {
            next_pos = bink->file_size;
            keyframe = 0;
        } else {
            next_pos = avio_rl32(pb);
            keyframe = pos & 1;
        }
        pos &= ~1;
        next_pos &= ~1;

        if (next_pos <= pos) {
            av_log(s, AV_LOG_ERROR, "invalid frame index table\n");
            return AVERROR(EIO);
        }
        av_add_index_entry(vst, pos, i, next_pos - pos, 0,
                           keyframe ? AVINDEX_KEYFRAME : 0);
    }

    avio_skip(pb, 4);

    bink->current_track = -1;
    return 0;
}
示例#7
0
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
                             int *data_size, AVPacket *pkt)
{
    CDXLVideoContext *c = avctx->priv_data;
    AVFrame * const p = &c->frame;
    int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
    const uint8_t *buf = pkt->data;

    if (buf_size < 32)
        return AVERROR_INVALIDDATA;
    encoding        = buf[1] & 7;
    c->format       = buf[1] & 0xE0;
    w               = AV_RB16(&buf[14]);
    h               = AV_RB16(&buf[16]);
    c->bpp          = buf[19];
    c->palette_size = AV_RB16(&buf[20]);
    c->palette      = buf + 32;
    c->video        = c->palette + c->palette_size;
    c->video_size   = buf_size - c->palette_size - 32;

    if (c->palette_size > 512)
        return AVERROR_INVALIDDATA;
    if (buf_size < c->palette_size + 32)
        return AVERROR_INVALIDDATA;
    if (c->bpp < 1)
        return AVERROR_INVALIDDATA;
    if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
        av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
        return ret;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);

    aligned_width = FFALIGN(c->avctx->width, 16);
    c->padded_bits  = aligned_width - c->avctx->width;
    if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
        return AVERROR_INVALIDDATA;
    if (!encoding && c->palette_size && c->bpp <= 8) {
        avctx->pix_fmt = AV_PIX_FMT_PAL8;
    } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
        if (c->palette_size != (1 << (c->bpp - 1)))
            return AVERROR_INVALIDDATA;
        avctx->pix_fmt = AV_PIX_FMT_BGR24;
    } else {
        av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n",
                              encoding, c->bpp);
        return AVERROR_PATCHWELCOME;
    }

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference = 0;
    if ((ret = avctx->get_buffer(avctx, p)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    p->pict_type = AV_PICTURE_TYPE_I;

    if (encoding) {
        av_fast_padded_malloc(&c->new_video, &c->new_video_size,
                              h * w + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!c->new_video)
            return AVERROR(ENOMEM);
        if (c->bpp == 8)
            cdxl_decode_ham8(c);
        else
            cdxl_decode_ham6(c);
    } else {
        cdxl_decode_rgb(c);
    }
    *data_size      = sizeof(AVFrame);
    *(AVFrame*)data = c->frame;

    return buf_size;
}
示例#8
0
文件: mxg.c 项目: eugenehp/ffmbc
static int mxg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret;
    unsigned int size;
    uint8_t *startmarker_ptr, *end, *search_end, marker;
    MXGContext *mxg = s->priv_data;

    while (!url_feof(s->pb) && !s->pb->error) {
        if (mxg->cache_size <= OVERREAD_SIZE) {
            /* update internal buffer */
            ret = mxg_update_cache(s, DEFAULT_PACKET_SIZE + OVERREAD_SIZE);
            if (ret < 0)
                return ret;
        }
        end = mxg->buffer_ptr + mxg->cache_size;

        /* find start marker - 0xff */
        if (mxg->cache_size > OVERREAD_SIZE) {
            search_end = end - OVERREAD_SIZE;
            startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
        } else {
            search_end = end;
            startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
            if (startmarker_ptr >= search_end - 1 ||
                    *(startmarker_ptr + 1) != EOI) break;
        }

        if (startmarker_ptr != search_end) { /* start marker found */
            marker = *(startmarker_ptr + 1);
            mxg->buffer_ptr = startmarker_ptr + 2;
            mxg->cache_size = end - mxg->buffer_ptr;

            if (marker == SOI) {
                mxg->soi_ptr = startmarker_ptr;
            } else if (marker == EOI) {
                if (!mxg->soi_ptr) {
                    av_log(s, AV_LOG_WARNING, "Found EOI before SOI, skipping\n");
                    continue;
                }

                pkt->pts = pkt->dts = mxg->dts;
                pkt->stream_index = VIDEO_STREAM_INDEX;
                pkt->destruct = NULL;
                pkt->size = mxg->buffer_ptr - mxg->soi_ptr;
                pkt->data = mxg->soi_ptr;

                if (mxg->soi_ptr - mxg->buffer > mxg->cache_size) {
                    if (mxg->cache_size > 0) {
                        memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
                    }

                    mxg->buffer_ptr = mxg->buffer;
                }
                mxg->soi_ptr = 0;

                return pkt->size;
            } else if ( (SOF0 <= marker && marker <= SOF15) ||
                        (SOS  <= marker && marker <= COM) ) {
                /* all other markers that start marker segment also contain
                   length value (see specification for JPEG Annex B.1) */
                size = AV_RB16(mxg->buffer_ptr);
                if (size < 2)
                    return AVERROR(EINVAL);

                if (mxg->cache_size < size) {
                    ret = mxg_update_cache(s, size);
                    if (ret < 0)
                        return ret;
                    startmarker_ptr = mxg->buffer_ptr - 2;
                    mxg->cache_size = 0;
                } else {
                    mxg->cache_size -= size;
                }

                mxg->buffer_ptr += size;

                if (marker == APP13 && size >= 16) { /* audio data */
                    /* time (GMT) of first sample in usec since 1970, little-endian */
                    pkt->pts = pkt->dts = AV_RL64(startmarker_ptr + 8);
                    pkt->stream_index = AUDIO_STREAM_INDEX;
                    pkt->destruct = NULL;
                    pkt->size = size - 14;
                    pkt->data = startmarker_ptr + 16;

                    if (startmarker_ptr - mxg->buffer > mxg->cache_size) {
                        if (mxg->cache_size > 0) {
                            memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
                        }
                        mxg->buffer_ptr = mxg->buffer;
                    }

                    return pkt->size;
                } else if (marker == COM && size >= 18 &&
                           !strncmp(startmarker_ptr + 4, "MXF", 3)) {
                    /* time (GMT) of video frame in usec since 1970, little-endian */
                    mxg->dts = AV_RL64(startmarker_ptr + 12);
                }
            }
        } else {
            /* start marker not found */
            mxg->buffer_ptr = search_end;
            mxg->cache_size = OVERREAD_SIZE;
        }
    }

    return AVERROR_EOF;
}
示例#9
0
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 5 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];

    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }


    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
	printf("width:%d height:%d pix_fmt:%d\n", width, height, pix_fmt);
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;

     /* create scaling context */
        sws_ctx = sws_getContext(width, height, pix_fmt,
                            width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
        if (!sws_ctx) {
            fprintf(stderr, "Impossible to create scale context for the conversion "
              "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                av_get_pix_fmt_name(pix_fmt), width,height,
                av_get_pix_fmt_name(pix_fmt), width,height);
            ret = AVERROR(EINVAL);
            goto end;
        }
        ret = av_image_alloc(scale_video_dst_data, scale_video_dst_linesize,
                             width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        scale_video_dst_bufsize = ret;
		AllocPic();
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /*encode yuv to h264*/
    h264_frame = av_frame_alloc();
    if (!h264_frame) {
        fprintf(stderr, "Could not allocate h264_frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    av_init_packet(&h264_pkt);
    h264_pkt.data = NULL;
    h264_pkt.size = 0;

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }

    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;

        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }

        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;

        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }
	av_write_trailer(mp4FmtCtx);

end:
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);
    av_free(scale_video_dst_data[0]);
    if(sws_ctx)
        sws_freeContext(sws_ctx);

    return ret < 0;
}
示例#10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    FlangerContext *s = ctx->priv;
    AVFrame *out_frame;
    int chan, i;

    if (av_frame_is_writable(frame)) {
        out_frame = frame;
    } else {
        out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
        if (!out_frame)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out_frame, frame);
    }

    for (i = 0; i < frame->nb_samples; i++) {

        s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;

        for (chan = 0; chan < inlink->channels; chan++) {
            double *src = (double *)frame->extended_data[chan];
            double *dst = (double *)out_frame->extended_data[chan];
            double delayed_0, delayed_1;
            double delayed;
            double in, out;
            int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
            double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
            int int_delay = (int)delay;
            double frac_delay = modf(delay, &delay);
            double *delay_buffer = (double *)s->delay_buffer[chan];

            in = src[i];
            delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
                                                           s->feedback_gain;
            delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
            delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];

            if (s->interpolation == INTERPOLATION_LINEAR) {
                delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
            } else {
                double a, b;
                double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
                delayed_2 -= delayed_0;
                delayed_1 -= delayed_0;
                a = delayed_2 * .5 - delayed_1;
                b = delayed_1 *  2 - delayed_2 *.5;
                delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
            }

            s->delay_last[chan] = delayed;
            out = in * s->in_gain + delayed * s->delay_gain;
            dst[i] = out;
        }
        s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
    }

    if (frame != out_frame)
        av_frame_free(&frame);

    return ff_filter_frame(ctx->outputs[0], out_frame);
}
示例#11
0
文件: asvdec.c 项目: ares89/vlc
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame,
                        AVPacket *avpkt)
{
    ASV1Context * const a = avctx->priv_data;
    const uint8_t *buf    = avpkt->data;
    int buf_size          = avpkt->size;
    AVFrame *picture      = data;
    AVFrame * const p     = &a->picture;
    int mb_x, mb_y, ret;

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference = 0;
    if ((ret = ff_get_buffer(avctx, p)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
                          buf_size);
    if (!a->bitstream_buffer)
        return AVERROR(ENOMEM);

    if (avctx->codec_id == AV_CODEC_ID_ASV1)
        a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
    else {
        int i;
        for (i = 0; i < buf_size; i++)
            a->bitstream_buffer[i] = ff_reverse[buf[i]];
    }

    init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);

    for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
        for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, mb_x, mb_y);
        }
    }

    if (a->mb_width2 != a->mb_width) {
        mb_x = a->mb_width2;
        for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, mb_x, mb_y);
        }
    }

    if (a->mb_height2 != a->mb_height) {
        mb_y = a->mb_height2;
        for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, mb_x, mb_y);
        }
    }

    *picture   = a->picture;
    *got_frame = 1;

    emms_c();

    return (get_bits_count(&a->gb) + 31) / 32 * 4;
}
示例#12
0
文件: ffdemuxer.c 项目: arkanis/smeb
int main(int argc, char** argv) {
	if (argc != 2)
		fprintf(stderr, "usage: %s webm-file\n", argv[0]), exit(1);
	
	char errmsg[512];
	// Switch stdin to non-blocking IO to test out a non-blocking av_read_frame()
	if ( fcntl(0, F_SETFL, fcntl(0, F_GETFL, NULL) | O_NONBLOCK) == -1 )
		perror("fcntl"), exit(1);
	
	av_register_all();
	
	AVInputFormat* webm_fmt = av_find_input_format("webm");
	AVFormatContext* demuxer = avformat_alloc_context();
	demuxer->flags |= AVFMT_FLAG_NONBLOCK;
	int error = avformat_open_input(&demuxer, argv[1], webm_fmt, NULL);
	//int error = avformat_open_input(&demuxer, "pipe:0", webm_fmt, NULL);
	if (error < 0)
		fprintf(stderr, "avformat_open_input(): %s\n", av_make_error_string(errmsg, sizeof(errmsg), error)), exit(1);
	
	printf("found %d streams:\n", demuxer->nb_streams);
	for(size_t i = 0; i < demuxer->nb_streams; i++) {
		AVStream* stream = demuxer->streams[i];
		printf("%d: time base %d/%d, codec: %s, extradata: %p, %d bytes\n",
			stream->index, stream->time_base.num, stream->time_base.den,
			stream->codec->codec_name, stream->codec->extradata, stream->codec->extradata_size);
		switch (stream->codec->codec_type) {
			case AVMEDIA_TYPE_VIDEO:
				printf("   video, w: %d, h: %d, sar: %d/%d, %dx%d\n",
					stream->codec->width, stream->codec->height, stream->sample_aspect_ratio.num, stream->sample_aspect_ratio.den,
					stream->codec->width * stream->sample_aspect_ratio.num / stream->sample_aspect_ratio.den, stream->codec->height);
				break;
			case AVMEDIA_TYPE_AUDIO:
				printf("   audio, %d channels, sampel rate: %d, bits per sample: %d\n",
					stream->codec->channels, stream->codec->sample_rate, stream->codec->bits_per_coded_sample);
				break;
			default:
				break;
		}
	}
	
	AVPacket packet;
	int ret =0;
	while (true) {
		ret = av_read_frame(demuxer, &packet);
		if (ret == AVERROR(EAGAIN)) {
			printf("sleep\n");
			struct timespec duration = {0, 250 * 1000000};
			nanosleep(&duration, NULL);
			continue;
		} else if (ret != 0) {
			break;
		}
		
		if (packet.flags & AV_PKT_FLAG_KEY && packet.stream_index == 0)
			printf("keyframe: stream %d, pts: %lu, dts: %lu, duration: %d, buf: %p\n", packet.stream_index, packet.pts, packet.dts, packet.duration, packet.buf);
		
		av_free_packet(&packet);
	}
	
	avformat_close_input(&demuxer);
	
	return 0;
}
示例#13
0
/** get sizes and offsets of image, tiles; number of components */
static int get_siz(J2kDecoderContext *s)
{
    int i, ret;

    if (bytestream2_get_bytes_left(&s->g) < 36)
        return AVERROR(EINVAL);

                        bytestream2_get_be16u(&s->g); // Rsiz (skipped)
             s->width = bytestream2_get_be32u(&s->g); // width
            s->height = bytestream2_get_be32u(&s->g); // height
    s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
    s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz

        s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
       s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
     s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
     s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
       s->ncomponents = bytestream2_get_be16u(&s->g); // CSiz

    if(s->tile_width<=0 || s->tile_height<=0)
        return AVERROR(EINVAL);

    if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
        return AVERROR(EINVAL);

    for (i = 0; i < s->ncomponents; i++){ // Ssiz_i XRsiz_i, YRsiz_i
        uint8_t x = bytestream2_get_byteu(&s->g);
        s->cbps[i] = (x & 0x7f) + 1;
        s->precision = FFMAX(s->cbps[i], s->precision);
        s->sgnd[i] = !!(x & 0x80);
        s->cdx[i] = bytestream2_get_byteu(&s->g);
        s->cdy[i] = bytestream2_get_byteu(&s->g);
    }

    s->numXtiles = ff_j2k_ceildiv(s->width - s->tile_offset_x, s->tile_width);
    s->numYtiles = ff_j2k_ceildiv(s->height - s->tile_offset_y, s->tile_height);

    if(s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(J2kTile))
        return AVERROR(EINVAL);

    s->tile = av_mallocz(s->numXtiles * s->numYtiles * sizeof(J2kTile));
    if (!s->tile)
        return AVERROR(ENOMEM);

    for (i = 0; i < s->numXtiles * s->numYtiles; i++){
        J2kTile *tile = s->tile + i;

        tile->comp = av_mallocz(s->ncomponents * sizeof(J2kComponent));
        if (!tile->comp)
            return AVERROR(ENOMEM);
    }

    s->avctx->width  = s->width  - s->image_offset_x;
    s->avctx->height = s->height - s->image_offset_y;

    switch(s->ncomponents){
    case 1:
        if (s->precision > 8) {
            s->avctx->pix_fmt = PIX_FMT_GRAY16;
        } else {
            s->avctx->pix_fmt = PIX_FMT_GRAY8;
        }
        break;
    case 3:
        if (s->precision > 8) {
            s->avctx->pix_fmt = PIX_FMT_RGB48;
        } else {
            s->avctx->pix_fmt = PIX_FMT_RGB24;
        }
        break;
    case 4:
        s->avctx->pix_fmt = PIX_FMT_RGBA;
        break;
    }

    if (s->picture.data[0])
        s->avctx->release_buffer(s->avctx, &s->picture);

    if ((ret = s->avctx->get_buffer(s->avctx, &s->picture)) < 0)
        return ret;

    s->picture.pict_type = AV_PICTURE_TYPE_I;
    s->picture.key_frame = 1;

    return 0;
}
示例#14
0
文件: bethsoftvid.c 项目: AndyA/ffmbc
static int read_frame(BVID_DemuxContext *vid, ByteIOContext *pb, AVPacket *pkt,
                      uint8_t block_type, AVFormatContext *s, int npixels)
{
    uint8_t * vidbuf_start = NULL;
    int vidbuf_nbytes = 0;
    int code;
    int bytes_copied = 0;
    int position, delay;
    unsigned int vidbuf_capacity;

    vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
    if(!vidbuf_start)
        return AVERROR(ENOMEM);

    // save the file position for the packet, include block type
    position = url_ftell(pb) - 1;

    vidbuf_start[vidbuf_nbytes++] = block_type;

    // get the video delay (next int16), and set the presentation time
    delay = get_le16(pb);

    // set the y offset if it exists (decoder header data should be in data section)
    if(block_type == VIDEO_YOFF_P_FRAME){
        if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2)
            goto fail;
        vidbuf_nbytes += 2;
    }

    do{
        vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
        if(!vidbuf_start)
            return AVERROR(ENOMEM);

        code = get_byte(pb);
        vidbuf_start[vidbuf_nbytes++] = code;

        if(code >= 0x80){ // rle sequence
            if(block_type == VIDEO_I_FRAME)
                vidbuf_start[vidbuf_nbytes++] = get_byte(pb);
        } else if(code){ // plain sequence
            if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], code) != code)
                goto fail;
            vidbuf_nbytes += code;
        }
        bytes_copied += code & 0x7F;
        if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
            // may contain a 0 byte even if read all pixels
            if(get_byte(pb))
                url_fseek(pb, -1, SEEK_CUR);
            break;
        }
        if(bytes_copied > npixels)
            goto fail;
    } while(code);

    // copy data into packet
    if(av_new_packet(pkt, vidbuf_nbytes) < 0)
        goto fail;
    memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
    av_free(vidbuf_start);

    pkt->pos = position;
    pkt->stream_index = 0;  // use the video decoder, which was initialized as the first stream
    pkt->pts = vid->video_pts;

    vid->video_pts += vid->bethsoft_global_delay + delay;

    vid->nframes--;  // used to check if all the frames were read
    return vidbuf_nbytes;
fail:
    av_free(vidbuf_start);
    return -1;
}
示例#15
0
文件: ffjni.c 项目: aopi1125/FFmpeg
int ff_jni_init_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx)
{
    int i, ret = 0;
    jclass last_clazz = NULL;

    for (i = 0; jfields_mapping[i].name; i++) {
        int mandatory = jfields_mapping[i].mandatory;
        enum FFJniFieldType type = jfields_mapping[i].type;

        if (type == FF_JNI_CLASS) {
            jclass clazz;

            last_clazz = NULL;

            clazz = (*env)->FindClass(env, jfields_mapping[i].name);
            if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
                goto done;
            }

            last_clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) =
                    global ? (*env)->NewGlobalRef(env, clazz) : clazz;
        } else {

            if (!last_clazz) {
                ret = AVERROR_EXTERNAL;
                break;
            }

            switch(type) {
            case FF_JNI_FIELD: {
                jfieldID field_id = (*env)->GetFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
                if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
                    goto done;
                }

                *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id;
                break;
            }
            case FF_JNI_STATIC_FIELD: {
                jfieldID field_id = (*env)->GetStaticFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
                if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
                    goto done;
                }

                *(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id;
                break;
            }
            case FF_JNI_METHOD: {
                jmethodID method_id = (*env)->GetMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
                if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
                    goto done;
                }

                *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id;
                break;
            }
            case FF_JNI_STATIC_METHOD: {
                jmethodID method_id = (*env)->GetStaticMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
                if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
                    goto done;
                }

                *(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id;
                break;
            }
            default:
                av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n");
                ret = AVERROR(EINVAL);
                goto done;
            }
        }
    }

done:
    if (ret < 0) {
        /* reset jfields in case of failure so it does not leak references */
        ff_jni_reset_jfields(env, jfields, jfields_mapping, global, log_ctx);
    }

    return ret;
}
示例#16
0
av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode,
                         unsigned int *sample_rate,
                         int channels, enum CodecID *codec_id)
{
    AlsaData *s = ctx->priv_data;
    const char *audio_device;
    int res, flags = 0;
    snd_pcm_format_t format;
    snd_pcm_t *h;
    snd_pcm_hw_params_t *hw_params;
    snd_pcm_uframes_t buffer_size, period_size;

    if (ctx->filename[0] == 0) audio_device = "default";
    else                       audio_device = ctx->filename;

    if (*codec_id == CODEC_ID_NONE)
        *codec_id = DEFAULT_CODEC_ID;
    format = codec_id_to_pcm_format(*codec_id);
    if (format == SND_PCM_FORMAT_UNKNOWN) {
        av_log(ctx, AV_LOG_ERROR, "sample format 0x%04x is not supported\n", *codec_id);
        return AVERROR(ENOSYS);
    }
    s->frame_size = av_get_bits_per_sample(*codec_id) / 8 * channels;

    if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
        flags = SND_PCM_NONBLOCK;
    }
    res = snd_pcm_open(&h, audio_device, mode, flags);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot open audio device %s (%s)\n",
               audio_device, snd_strerror(res));
        return AVERROR(EIO);
    }

    res = snd_pcm_hw_params_malloc(&hw_params);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot allocate hardware parameter structure (%s)\n",
               snd_strerror(res));
        goto fail1;
    }

    res = snd_pcm_hw_params_any(h, hw_params);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot initialize hardware parameter structure (%s)\n",
               snd_strerror(res));
        goto fail;
    }

    res = snd_pcm_hw_params_set_access(h, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set access type (%s)\n",
               snd_strerror(res));
        goto fail;
    }

    res = snd_pcm_hw_params_set_format(h, hw_params, format);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set sample format 0x%04x %d (%s)\n",
               *codec_id, format, snd_strerror(res));
        goto fail;
    }

    res = snd_pcm_hw_params_set_rate_near(h, hw_params, sample_rate, 0);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set sample rate (%s)\n",
               snd_strerror(res));
        goto fail;
    }

    res = snd_pcm_hw_params_set_channels(h, hw_params, channels);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set channel count to %d (%s)\n",
               channels, snd_strerror(res));
        goto fail;
    }

    snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
    /* TODO: maybe use ctx->max_picture_buffer somehow */
    res = snd_pcm_hw_params_set_buffer_size_near(h, hw_params, &buffer_size);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set ALSA buffer size (%s)\n",
               snd_strerror(res));
        goto fail;
    }

    snd_pcm_hw_params_get_period_size_min(hw_params, &period_size, NULL);
    res = snd_pcm_hw_params_set_period_size_near(h, hw_params, &period_size, NULL);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set ALSA period size (%s)\n",
               snd_strerror(res));
        goto fail;
    }
    s->period_size = period_size;

    res = snd_pcm_hw_params(h, hw_params);
    if (res < 0) {
        av_log(ctx, AV_LOG_ERROR, "cannot set parameters (%s)\n",
               snd_strerror(res));
        goto fail;
    }

    snd_pcm_hw_params_free(hw_params);

    s->h = h;
    return 0;

fail:
    snd_pcm_hw_params_free(hw_params);
fail1:
    snd_pcm_close(h);
    return AVERROR(EIO);
}
示例#17
0
文件: bink.c 项目: simock85/libav
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    BinkDemuxContext *bink = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret;

    if (bink->current_track < 0) {
        int index_entry;
        AVStream *st = s->streams[0]; // stream 0 is video stream with index

        if (bink->video_pts >= st->duration)
            return AVERROR(EIO);

        index_entry = av_index_search_timestamp(st, bink->video_pts,
                                                AVSEEK_FLAG_ANY);
        if (index_entry < 0) {
            av_log(s, AV_LOG_ERROR,
                   "could not find index entry for frame %"PRId64"\n",
                   bink->video_pts);
            return AVERROR(EIO);
        }

        bink->remain_packet_size = st->index_entries[index_entry].size;
        bink->current_track = 0;
    }

    while (bink->current_track < bink->num_audio_tracks) {
        uint32_t audio_size = avio_rl32(pb);
        if (audio_size > bink->remain_packet_size - 4) {
            av_log(s, AV_LOG_ERROR,
                   "frame %"PRId64": audio size in header (%u) > size of packet left (%u)\n",
                   bink->video_pts, audio_size, bink->remain_packet_size);
            return AVERROR(EIO);
        }
        bink->remain_packet_size -= 4 + audio_size;
        bink->current_track++;
        if (audio_size >= 4) {
            /* get one audio packet per track */
            if ((ret = av_get_packet(pb, pkt, audio_size)) < 0)
                return ret;
            pkt->stream_index = bink->current_track;
            pkt->pts = bink->audio_pts[bink->current_track - 1];

            /* Each audio packet reports the number of decompressed samples
               (in bytes). We use this value to calcuate the audio PTS */
            if (pkt->size >= 4)
                bink->audio_pts[bink->current_track -1] +=
                    AV_RL32(pkt->data) / (2 * s->streams[bink->current_track]->codec->channels);
            return 0;
        } else {
            avio_skip(pb, audio_size);
        }
    }

    /* get video packet */
    if ((ret = av_get_packet(pb, pkt, bink->remain_packet_size)) < 0)
        return ret;
    pkt->stream_index = 0;
    pkt->pts = bink->video_pts++;
    pkt->flags |= AV_PKT_FLAG_KEY;

    /* -1 instructs the next call to read_packet() to read the next frame */
    bink->current_track = -1;

    return 0;
}
示例#18
0
文件: v4l.c 项目: apeliom/tikitv
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    VideoData *s = s1->priv_data;
    AVStream *st;
    int width, height;
    int video_fd, frame_size;
    int ret, frame_rate, frame_rate_base;
    int desired_palette, desired_depth;
    struct video_tuner tuner;
    struct video_audio audio;
    struct video_picture pict;
    int j;
    int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);

    if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
        av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
            ap->width, ap->height, ap->time_base.den);

        return -1;
    }

    width = ap->width;
    height = ap->height;
    frame_rate      = ap->time_base.den;
    frame_rate_base = ap->time_base.num;

    if((unsigned)width > 32767 || (unsigned)height > 32767) {
        av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
            width, height);

        return -1;
    }

    st = av_new_stream(s1, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    s->width = width;
    s->height = height;
    s->frame_rate      = frame_rate;
    s->frame_rate_base = frame_rate_base;

    video_fd = open(s1->filename, O_RDWR);
    if (video_fd < 0) {
        av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
        goto fail;
    }

    if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
        av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
        goto fail;
    }

    if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
        av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
        goto fail;
    }

    desired_palette = -1;
    desired_depth = -1;
    for (j = 0; j < vformat_num; j++) {
        if (ap->pix_fmt == video_formats[j].pix_fmt) {
            desired_palette = video_formats[j].palette;
            desired_depth = video_formats[j].depth;
            break;
        }
    }

    /* set tv standard */
    if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
        if (!strcasecmp(ap->standard, "pal"))
            tuner.mode = VIDEO_MODE_PAL;
        else if (!strcasecmp(ap->standard, "secam"))
            tuner.mode = VIDEO_MODE_SECAM;
        else
            tuner.mode = VIDEO_MODE_NTSC;
        ioctl(video_fd, VIDIOCSTUNER, &tuner);
    }

    /* unmute audio */
    audio.audio = 0;
    ioctl(video_fd, VIDIOCGAUDIO, &audio);
    memcpy(&s->audio_saved, &audio, sizeof(audio));
    audio.flags &= ~VIDEO_AUDIO_MUTE;
    ioctl(video_fd, VIDIOCSAUDIO, &audio);

    ioctl(video_fd, VIDIOCGPICT, &pict);
#if 0
    printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
           pict.colour,
           pict.hue,
           pict.brightness,
           pict.contrast,
           pict.whiteness);
#endif
    /* try to choose a suitable video format */
    pict.palette = desired_palette;
    pict.depth= desired_depth;
    if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
        for (j = 0; j < vformat_num; j++) {
            pict.palette = video_formats[j].palette;
            pict.depth = video_formats[j].depth;
            if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
                break;
        }
        if (j >= vformat_num)
            goto fail1;
    }

    ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
    if (ret < 0) {
        /* try to use read based access */
        struct video_window win;
        int val;

        win.x = 0;
        win.y = 0;
        win.width = width;
        win.height = height;
        win.chromakey = -1;
        win.flags = 0;

        ioctl(video_fd, VIDIOCSWIN, &win);

        s->frame_format = pict.palette;

        val = 1;
        ioctl(video_fd, VIDIOCCAPTURE, &val);

        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
        s->use_mmap = 0;
    } else {
        s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
        if ((unsigned char*)-1 == s->video_buf) {
            s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
            if ((unsigned char*)-1 == s->video_buf) {
                av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
                goto fail;
            }
        }
        s->gb_frame = 0;
        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;

        /* start to grab the first frame */
        s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
        s->gb_buf.height = height;
        s->gb_buf.width = width;
        s->gb_buf.format = pict.palette;

        ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
        if (ret < 0) {
            if (errno != EAGAIN) {
            fail1:
                av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
            } else {
                av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
            }
            goto fail;
        }
        for (j = 1; j < s->gb_buffers.frames; j++) {
          s->gb_buf.frame = j;
          ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
        }
        s->frame_format = s->gb_buf.format;
        s->use_mmap = 1;
    }

    for (j = 0; j < vformat_num; j++) {
        if (s->frame_format == video_formats[j].palette) {
            frame_size = width * height * video_formats[j].depth / 8;
            st->codec->pix_fmt = video_formats[j].pix_fmt;
            break;
        }
    }

    if (j >= vformat_num)
        goto fail;

    s->fd = video_fd;
    s->frame_size = frame_size;

    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_RAWVIDEO;
    st->codec->width = width;
    st->codec->height = height;
    st->codec->time_base.den      = frame_rate;
    st->codec->time_base.num = frame_rate_base;
    st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;

    return 0;
 fail:
    if (video_fd >= 0)
        close(video_fd);
    av_free(st);
    return AVERROR(EIO);
}
示例#19
0
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                       unsigned int width, unsigned int height)
{

    uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
    if (!fbuffer)
    {
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE");
        return false;
    }
    MemBuffer buf;
    buf.data = buffer;
    buf.size = bufSize;
    buf.pos = 0;

    AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                            mem_file_read, NULL, mem_file_seek);

    if (!ioctx)
    {
        av_free(fbuffer);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
        return false;
    }

    AVFormatContext* fctx = avformat_alloc_context();
    if (!fctx)
    {
        av_free(ioctx->buffer);
        av_free(ioctx);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
        return false;
    }

    fctx->pb = ioctx;
    ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

    // Some clients have pngs saved as jpeg or ask us for png but are jpeg
    // mythv throws all mimetypes away and asks us with application/octet-stream
    // this is poor man's fallback to at least identify png / jpeg
    bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
    bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
    bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

    AVInputFormat* inp = nullptr;
    if (is_jpeg)
        inp = av_find_input_format("jpeg_pipe");
    else if (is_png)
        inp = av_find_input_format("png_pipe");
    else if (is_tiff)
        inp = av_find_input_format("tiff_pipe");
    else if (m_strMimeType == "image/jp2")
        inp = av_find_input_format("j2k_pipe");
    else if (m_strMimeType == "image/webp")
        inp = av_find_input_format("webp_pipe");
    // brute force parse if above check already failed
    else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
        inp = av_find_input_format("jpeg_pipe");
    else if (m_strMimeType == "image/png")
        inp = av_find_input_format("png_pipe");
    else if (m_strMimeType == "image/tiff")
        inp = av_find_input_format("tiff_pipe");

    if (avformat_open_input(&fctx, "", inp, NULL) < 0)
    {
        CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVCodecContext* codec_ctx = fctx->streams[0]->codec;
    AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (avcodec_open2(codec_ctx, codec, NULL) < 0)
    {
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVPacket pkt;
    AVFrame* frame = av_frame_alloc();
    av_read_frame(fctx, &pkt);
    int frame_decoded;
    int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
    if (ret < 0)
        CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

    if (frame_decoded != 0)
    {
        av_frame_free(&m_pFrame);
        m_pFrame = av_frame_clone(frame);

        if (m_pFrame)
        {
            m_height = m_pFrame->height;
            m_width = m_pFrame->width;
            m_originalWidth = m_width;
            m_originalHeight = m_height;

            const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(m_pFrame->format));
            if (pixDescriptor && ((pixDescriptor->flags & (AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_PAL)) != 0))
                m_hasAlpha = true;
        }
        else
        {
            CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
            frame_decoded = 0;
        }
    }
    else
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

    av_frame_free(&frame);
    av_free_packet(&pkt);
    avcodec_close(codec_ctx);
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);

    return (frame_decoded != 0);
}
示例#20
0
static int vp8_handle_packet(AVFormatContext *ctx,
                             PayloadContext *vp8,
                             AVStream *st,
                             AVPacket *pkt,
                             uint32_t *timestamp,
                             const uint8_t *buf,
                             int len, int flags)
{
    int start_packet, end_packet, has_au, ret = AVERROR(EAGAIN);

    if (!buf) {
        // only called when vp8_handle_packet returns 1
        if (!vp8->data) {
            av_log(ctx, AV_LOG_ERROR, "Invalid VP8 data passed\n");
            return AVERROR_INVALIDDATA;
        }
        prepare_packet(pkt, vp8, st->index);
        *timestamp = vp8->timestamp;
        return 0;
    }

    start_packet = *buf & 1;
    end_packet   = flags & RTP_FLAG_MARKER;
    has_au       = *buf & 2;
    buf++;
    len--;

    if (start_packet) {
        int res;
        uint32_t ts = *timestamp;
        if (vp8->data) {
            // missing end marker; return old frame anyway. untested
            prepare_packet(pkt, vp8, st->index);
            *timestamp = vp8->timestamp; // reset timestamp from old frame

            // if current frame fits into one rtp packet, need to hold
            // that for the next av_get_packet call
            ret = end_packet ? 1 : 0;
        }
        if ((res = avio_open_dyn_buf(&vp8->data)) < 0)
            return res;
        vp8->is_keyframe = *buf & 1;
        vp8->timestamp   = ts;
     }

    if (!vp8->data || vp8->timestamp != *timestamp && ret == AVERROR(EAGAIN)) {
        av_log(ctx, AV_LOG_WARNING,
               "Received no start marker; dropping frame\n");
        return AVERROR(EAGAIN);
    }

    // cycle through VP8AU headers if needed
    // not tested with actual VP8AUs
    while (len) {
        int au_len = len;
        if (has_au && len > 2) {
            au_len = AV_RB16(buf);
            buf += 2;
            len -= 2;
            if (buf + au_len > buf + len) {
                av_log(ctx, AV_LOG_ERROR, "Invalid VP8AU length\n");
                return AVERROR_INVALIDDATA;
            }
        }

        avio_write(vp8->data, buf, au_len);
        buf += au_len;
        len -= au_len;
    }

    if (ret != AVERROR(EAGAIN)) // did we miss a end marker?
        return ret;

    if (end_packet) {
        prepare_packet(pkt, vp8, st->index);
        return 0;
    }

    return AVERROR(EAGAIN);
}
示例#21
0
文件: vf_psnr.c 项目: BossKing/FFmpeg
static int config_input_ref(AVFilterLink *inlink)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    AVFilterContext *ctx  = inlink->dst;
    PSNRContext *s = ctx->priv;
    int j;

    s->nb_components = desc->nb_components;
    if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
        ctx->inputs[0]->h != ctx->inputs[1]->h) {
        av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
        return AVERROR(EINVAL);
    }
    if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
        av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
        return AVERROR(EINVAL);
    }

    switch (inlink->format) {
    case AV_PIX_FMT_GRAY8:
    case AV_PIX_FMT_GRAY16:
    case AV_PIX_FMT_GBRP:
    case AV_PIX_FMT_GBRP9:
    case AV_PIX_FMT_GBRP10:
    case AV_PIX_FMT_GBRP12:
    case AV_PIX_FMT_GBRP14:
    case AV_PIX_FMT_GBRP16:
    case AV_PIX_FMT_GBRAP:
    case AV_PIX_FMT_GBRAP16:
    case AV_PIX_FMT_YUVJ411P:
    case AV_PIX_FMT_YUVJ420P:
    case AV_PIX_FMT_YUVJ422P:
    case AV_PIX_FMT_YUVJ440P:
    case AV_PIX_FMT_YUVJ444P:
        s->max[0] = (1 << (desc->comp[0].depth_minus1 + 1)) - 1;
        s->max[1] = (1 << (desc->comp[1].depth_minus1 + 1)) - 1;
        s->max[2] = (1 << (desc->comp[2].depth_minus1 + 1)) - 1;
        s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
        break;
    default:
        s->max[0] = 235 * (1 << (desc->comp[0].depth_minus1 - 7));
        s->max[1] = 240 * (1 << (desc->comp[1].depth_minus1 - 7));
        s->max[2] = 240 * (1 << (desc->comp[2].depth_minus1 - 7));
        s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
    }

    s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
    s->comps[0] = s->is_rgb ? 'r' : 'y' ;
    s->comps[1] = s->is_rgb ? 'g' : 'u' ;
    s->comps[2] = s->is_rgb ? 'b' : 'v' ;
    s->comps[3] = 'a';

    for (j = 0; j < s->nb_components; j++)
        s->average_max += s->max[j];
    s->average_max /= s->nb_components;

    s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
    s->planeheight[0] = s->planeheight[3] = inlink->h;
    s->planewidth[1]  = s->planewidth[2]  = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
    s->planewidth[0]  = s->planewidth[3]  = inlink->w;

    s->compute_mse = desc->comp[0].depth_minus1 > 7 ? compute_images_mse_16bit : compute_images_mse;

    return 0;
}
示例#22
0
static int read_part_of_packet(AVFormatContext *s, int64_t *pts,
                               int *len, int *strid, int read_packet) {
    ByteIOContext *pb = s->pb;
    PVAContext *pvactx = s->priv_data;
    int syncword, streamid, reserved, flags, length, pts_flag;
    int64_t pva_pts = AV_NOPTS_VALUE, startpos;

recover:
    startpos = url_ftell(pb);

    syncword = get_be16(pb);
    streamid = get_byte(pb);
    get_byte(pb);               /* counter not used */
    reserved = get_byte(pb);
    flags    = get_byte(pb);
    length   = get_be16(pb);

    pts_flag = flags & 0x10;

    if (syncword != PVA_MAGIC) {
        pva_log(s, AV_LOG_ERROR, "invalid syncword\n");
        return AVERROR(EIO);
    }
    if (streamid != PVA_VIDEO_PAYLOAD && streamid != PVA_AUDIO_PAYLOAD) {
        pva_log(s, AV_LOG_ERROR, "invalid streamid\n");
        return AVERROR(EIO);
    }
    if (reserved != 0x55) {
        pva_log(s, AV_LOG_WARNING, "expected reserved byte to be 0x55\n");
    }
    if (length > PVA_MAX_PAYLOAD_LENGTH) {
        pva_log(s, AV_LOG_ERROR, "invalid payload length %u\n", length);
        return AVERROR(EIO);
    }

    if (streamid == PVA_VIDEO_PAYLOAD && pts_flag) {
        pva_pts = get_be32(pb);
        length -= 4;
    } else if (streamid == PVA_AUDIO_PAYLOAD) {
        /* PVA Audio Packets either start with a signaled PES packet or
         * are a continuation of the previous PES packet. New PES packets
         * always start at the beginning of a PVA Packet, never somewhere in
         * the middle. */
        if (!pvactx->continue_pes) {
            int pes_signal, pes_header_data_length, pes_packet_length,
                pes_flags;
            unsigned char pes_header_data[256];

            pes_signal             = get_be24(pb);
            get_byte(pb);
            pes_packet_length      = get_be16(pb);
            pes_flags              = get_be16(pb);
            pes_header_data_length = get_byte(pb);

            if (pes_signal != 1) {
                pva_log(s, AV_LOG_WARNING, "expected signaled PES packet, "
                                          "trying to recover\n");
                url_fskip(pb, length - 9);
                if (!read_packet)
                    return AVERROR(EIO);
                goto recover;
            }

            get_buffer(pb, pes_header_data, pes_header_data_length);
            length -= 9 + pes_header_data_length;

            pes_packet_length -= 3 + pes_header_data_length;

            pvactx->continue_pes = pes_packet_length;

            if (pes_flags & 0x80 && (pes_header_data[0] & 0xf0) == 0x20)
                pva_pts = ff_parse_pes_pts(pes_header_data);
        }

        pvactx->continue_pes -= length;

        if (pvactx->continue_pes < 0) {
            pva_log(s, AV_LOG_WARNING, "audio data corruption\n");
            pvactx->continue_pes = 0;
        }
    }

    if (pva_pts != AV_NOPTS_VALUE)
        av_add_index_entry(s->streams[streamid-1], startpos, pva_pts, 0, 0, AVINDEX_KEYFRAME);

    *pts   = pva_pts;
    *len   = length;
    *strid = streamid;
    return 0;
}
示例#23
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ATADenoiseContext *s = ctx->priv;
    AVFrame *out, *in;
    int i;

    if (s->q.available != s->size) {
        if (s->q.available < s->mid) {
            out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
            if (!out)
                return AVERROR(ENOMEM);

            for (i = 0; i < s->mid; i++)
                ff_bufqueue_add(ctx, &s->q, av_frame_clone(out));
            av_frame_free(&out);
        }
        if (s->q.available < s->size) {
            ff_bufqueue_add(ctx, &s->q, buf);
            s->available++;
        }
        return 0;
    }

    in = ff_bufqueue_peek(&s->q, s->mid);

    if (!ctx->is_disabled) {
        ThreadData td;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < s->size; i++) {
            AVFrame *frame = ff_bufqueue_peek(&s->q, i);

            s->data[0][i] = frame->data[0];
            s->data[1][i] = frame->data[1];
            s->data[2][i] = frame->data[2];
            s->linesize[0][i] = frame->linesize[0];
            s->linesize[1][i] = frame->linesize[1];
            s->linesize[2][i] = frame->linesize[2];
        }

        td.in = in; td.out = out;
        ctx->internal->execute(ctx, s->filter_slice, &td, NULL,
                               FFMIN3(s->planeheight[1],
                                      s->planeheight[2],
                                      ctx->graph->nb_threads));
        av_frame_copy_props(out, in);
    } else {
        out = av_frame_clone(in);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }
    }

    in = ff_bufqueue_get(&s->q);
    av_frame_free(&in);
    ff_bufqueue_add(ctx, &s->q, buf);

    return ff_filter_frame(outlink, out);
}
示例#24
0
static int roq_encode_video(RoqContext *enc)
{
    RoqTempdata *tempData = enc->tmpData;
    int i;

    memset(tempData, 0, sizeof(*tempData));

    create_cel_evals(enc, tempData);

    generate_new_codebooks(enc, tempData);

    if (enc->framesSinceKeyframe >= 1) {
        motion_search(enc, 8);
        motion_search(enc, 4);
    }

 retry_encode:
    for (i=0; i<enc->width*enc->height/64; i++)
        gather_data_for_cel(tempData->cel_evals + i, enc, tempData);

    /* Quake 3 can't handle chunks bigger than 65535 bytes */
    if (tempData->mainChunkSize/8 > 65535 && enc->quake3_compat) {
        if (enc->lambda > 100000) {
            av_log(enc->avctx, AV_LOG_ERROR, "Cannot encode video in Quake compatible form\n");
            return AVERROR(EINVAL);
        }
        av_log(enc->avctx, AV_LOG_ERROR,
               "Warning, generated a frame too big for Quake (%d > 65535), "
               "now switching to a bigger qscale value.\n",
               tempData->mainChunkSize/8);
        enc->lambda *= 1.5;
        tempData->mainChunkSize = 0;
        memset(tempData->used_option, 0, sizeof(tempData->used_option));
        memset(tempData->codebooks.usedCB4, 0,
               sizeof(tempData->codebooks.usedCB4));
        memset(tempData->codebooks.usedCB2, 0,
               sizeof(tempData->codebooks.usedCB2));

        goto retry_encode;
    }

    remap_codebooks(enc, tempData);

    write_codebooks(enc, tempData);

    reconstruct_and_encode_image(enc, tempData, enc->width, enc->height,
                                 enc->width*enc->height/64);

    enc->avctx->coded_frame = enc->current_frame;

    /* Rotate frame history */
    FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);
    FFSWAP(motion_vect *, enc->last_motion4, enc->this_motion4);
    FFSWAP(motion_vect *, enc->last_motion8, enc->this_motion8);

    av_free(tempData->cel_evals);
    av_free(tempData->closest_cb2);

    enc->framesSinceKeyframe++;

    return 0;
}
示例#25
0
static int read_frame(BVID_DemuxContext *vid, AVIOContext *pb, AVPacket *pkt,
                      uint8_t block_type, AVFormatContext *s)
{
    uint8_t * vidbuf_start = NULL;
    int vidbuf_nbytes = 0;
    int code;
    int bytes_copied = 0;
    int position, duration, npixels;
    unsigned int vidbuf_capacity;
    int ret = 0;
    AVStream *st;

    if (vid->video_index < 0) {
        st = avformat_new_stream(s, NULL);
        if (!st)
            return AVERROR(ENOMEM);
        vid->video_index = st->index;
        if (vid->audio_index < 0) {
            av_log_ask_for_sample(s, "No audio packet before first video "
                                  "packet. Using default video time base.\n");
        }
        avpriv_set_pts_info(st, 64, 185, vid->sample_rate);
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id   = CODEC_ID_BETHSOFTVID;
        st->codec->width      = vid->width;
        st->codec->height     = vid->height;
    }
    st      = s->streams[vid->video_index];
    npixels = st->codec->width * st->codec->height;

    vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
    if(!vidbuf_start)
        return AVERROR(ENOMEM);

    // save the file position for the packet, include block type
    position = avio_tell(pb) - 1;

    vidbuf_start[vidbuf_nbytes++] = block_type;

    // get the current packet duration
    duration = vid->bethsoft_global_delay + avio_rl16(pb);

    // set the y offset if it exists (decoder header data should be in data section)
    if(block_type == VIDEO_YOFF_P_FRAME){
        if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2) {
            ret = AVERROR(EIO);
            goto fail;
        }
        vidbuf_nbytes += 2;
    }

    do{
        vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
        if(!vidbuf_start)
            return AVERROR(ENOMEM);

        code = avio_r8(pb);
        vidbuf_start[vidbuf_nbytes++] = code;

        if(code >= 0x80){ // rle sequence
            if(block_type == VIDEO_I_FRAME)
                vidbuf_start[vidbuf_nbytes++] = avio_r8(pb);
        } else if(code){ // plain sequence
            if (avio_read(pb, &vidbuf_start[vidbuf_nbytes], code) != code) {
                ret = AVERROR(EIO);
                goto fail;
            }
            vidbuf_nbytes += code;
        }
        bytes_copied += code & 0x7F;
        if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
            // may contain a 0 byte even if read all pixels
            if(avio_r8(pb))
                avio_seek(pb, -1, SEEK_CUR);
            break;
        }
        if (bytes_copied > npixels) {
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }
    } while(code);

    // copy data into packet
    if ((ret = av_new_packet(pkt, vidbuf_nbytes)) < 0)
        goto fail;
    memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
    av_free(vidbuf_start);

    pkt->pos = position;
    pkt->stream_index = vid->video_index;
    pkt->duration = duration;
    if (block_type == VIDEO_I_FRAME)
        pkt->flags |= AV_PKT_FLAG_KEY;

    /* if there is a new palette available, add it to packet side data */
    if (vid->palette) {
        uint8_t *pdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
                                                 BVID_PALETTE_SIZE);
        memcpy(pdata, vid->palette, BVID_PALETTE_SIZE);
        av_freep(&vid->palette);
    }

    vid->nframes--;  // used to check if all the frames were read
    return 0;
fail:
    av_free(vidbuf_start);
    return ret;
}
示例#26
0
文件: mmst.c 项目: csd/ffmpeg
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSContext *mms)
{
    int read_result;
    MMSSCPacketType packet_type= -1;

    for(;;) {
        if((read_result= url_read_complete(mms->mms_hd, mms->in_buffer, 8))==8) {
            // handle command packet.
            if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
                mms->incoming_flags= mms->in_buffer[3];
                read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
                if(read_result == 4) {
                    int length_remaining= AV_RL32(mms->in_buffer+8) + 4;
                    int hr;

                    dprintf(NULL, "Length remaining is %d\n", length_remaining);
                    // read the rest of the packet.
                    if (length_remaining < 0
                        || length_remaining > sizeof(mms->in_buffer) - 12) {
                        dprintf(NULL, "Incoming message len %d exceeds buffer len %d\n",
                            length_remaining, sizeof(mms->in_buffer) - 12);
                        return -1;
                    }
                    read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12,
                                                  length_remaining) ;
                    if (read_result == length_remaining) {
                        packet_type= AV_RL16(mms->in_buffer+36);
                    } else {
                        dprintf(NULL, "read for packet type failed%d!\n", read_result);
                        return -1;
                    }
                    hr = AV_RL32(mms->in_buffer + 40);
                    if (hr) {
                        dprintf(NULL, "The server side send back error code:0x%x\n", hr);
                        return -1;
                    }
                } else {
                    dprintf(NULL, "read for length remaining failed%d!\n", read_result);
                    return -1;
                }
            } else {
                int length_remaining;
                int packet_id_type;
                int tmp;

                // note we cache the first 8 bytes,
                // then fill up the buffer with the others
                tmp                       = AV_RL16(mms->in_buffer + 6);
                length_remaining          = (tmp - 8) & 0xffff;
                mms->incoming_packet_seq  = AV_RL32(mms->in_buffer);
                packet_id_type            = mms->in_buffer[4];
                mms->incoming_flags       = mms->in_buffer[5];

                if (length_remaining < 0
                        || length_remaining > sizeof(mms->in_buffer) - 8) {
                    dprintf(NULL, "Incoming data len %d exceeds buffer len %d\n",
                            length_remaining, sizeof(mms->in_buffer));
                    return -1;
                }
                mms->remaining_in_len    = length_remaining;
                mms->read_in_ptr         = mms->in_buffer;
                read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
                if(read_result != length_remaining) {
                    dprintf(NULL, "read_bytes result: %d asking for %d\n",
                            read_result, length_remaining);
                    return -1;
                } else {
                    // if we successfully read everything.
                    if(packet_id_type == mms->header_packet_id) {
                        packet_type = SC_PKT_ASF_HEADER;
                        // Store the asf header
                        if(!mms->header_parsed) {
                            void *p = av_realloc(mms->asf_header,
                                              mms->asf_header_size
                                              + mms->remaining_in_len);
                            if (!p) {
                                av_freep(&mms->asf_header);
                                return AVERROR(ENOMEM);
                            }
                            mms->asf_header = p;
                            memcpy(mms->asf_header + mms->asf_header_size,
                                                 mms->read_in_ptr,
                                                 mms->remaining_in_len);
                            mms->asf_header_size += mms->remaining_in_len;
                        }
                        // 0x04 means asf header is sent in multiple packets.
                        if (mms->incoming_flags == 0x04)
                            continue;
                    } else if(packet_id_type == mms->packet_id) {
                        packet_type = SC_PKT_ASF_MEDIA;
                    } else {
                        dprintf(NULL, "packet id type %d is old.", packet_id_type);
                        continue;
                    }
                }
            }

            // preprocess some packet type
            if(packet_type == SC_PKT_KEEPALIVE) {
                send_keepalive_packet(mms);
                continue;
            } else if(packet_type == SC_PKT_STREAM_CHANGING) {
                handle_packet_stream_changing_type(mms);
            } else if(packet_type == SC_PKT_ASF_MEDIA) {
                pad_media_packet(mms);
            }
            return packet_type;
        } else {
            if(read_result<0) {
                dprintf(NULL, "Read error (or cancelled) returned %d!\n", read_result);
                packet_type = SC_PKT_CANCEL;
            } else {
                dprintf(NULL, "Read result of zero?!\n");
                packet_type = SC_PKT_NO_DATA;
            }
            return packet_type;
        }
    }
}
示例#27
0
static int rm_read_header(AVFormatContext *s)
{
    RMDemuxContext *rm = s->priv_data;
    AVStream *st;
    AVIOContext *pb = s->pb;
    unsigned int tag;
    int tag_size;
    unsigned int start_time, duration;
    unsigned int data_off = 0, indx_off = 0;
    char buf[128], mime[128];
    int flags = 0;

    tag = avio_rl32(pb);
    if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
        /* very old .ra format */
        return rm_read_header_old(s);
    } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
        return AVERROR(EIO);
    }

    tag_size = avio_rb32(pb);
    avio_skip(pb, tag_size - 8);

    for(;;) {
        if (url_feof(pb))
            return -1;
        tag = avio_rl32(pb);
        tag_size = avio_rb32(pb);
        avio_rb16(pb);
        av_dlog(s, "tag=%c%c%c%c (%08x) size=%d\n",
                (tag      ) & 0xff,
                (tag >>  8) & 0xff,
                (tag >> 16) & 0xff,
                (tag >> 24) & 0xff,
                tag,
                tag_size);
        if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
            return -1;
        switch(tag) {
        case MKTAG('P', 'R', 'O', 'P'):
            /* file header */
            avio_rb32(pb); /* max bit rate */
            avio_rb32(pb); /* avg bit rate */
            avio_rb32(pb); /* max packet size */
            avio_rb32(pb); /* avg packet size */
            avio_rb32(pb); /* nb packets */
            duration = avio_rb32(pb); /* duration */
            s->duration = av_rescale(duration, AV_TIME_BASE, 1000);
            avio_rb32(pb); /* preroll */
            indx_off = avio_rb32(pb); /* index offset */
            data_off = avio_rb32(pb); /* data offset */
            avio_rb16(pb); /* nb streams */
            flags = avio_rb16(pb); /* flags */
            break;
        case MKTAG('C', 'O', 'N', 'T'):
            rm_read_metadata(s, pb, 1);
            break;
        case MKTAG('M', 'D', 'P', 'R'):
            st = avformat_new_stream(s, NULL);
            if (!st)
                return AVERROR(ENOMEM);
            st->id = avio_rb16(pb);
            avio_rb32(pb); /* max bit rate */
            st->codec->bit_rate = avio_rb32(pb); /* bit rate */
            avio_rb32(pb); /* max packet size */
            avio_rb32(pb); /* avg packet size */
            start_time = avio_rb32(pb); /* start time */
            avio_rb32(pb); /* preroll */
            duration = avio_rb32(pb); /* duration */
            st->start_time = start_time;
            st->duration = duration;
            if(duration>0)
                s->duration = AV_NOPTS_VALUE;
            get_str8(pb, buf, sizeof(buf)); /* desc */
            get_str8(pb, mime, sizeof(mime)); /* mimetype */
            st->codec->codec_type = AVMEDIA_TYPE_DATA;
            st->priv_data = ff_rm_alloc_rmstream();
            if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
                                          avio_rb32(pb), mime) < 0)
                return -1;
            break;
        case MKTAG('D', 'A', 'T', 'A'):
            goto header_end;
        default:
            /* unknown tag: skip it */
            avio_skip(pb, tag_size - 10);
            break;
        }
    }
 header_end:
    rm->nb_packets = avio_rb32(pb); /* number of packets */
    if (!rm->nb_packets && (flags & 4))
        rm->nb_packets = 3600 * 25;
    avio_rb32(pb); /* next data header */

    if (!data_off)
        data_off = avio_tell(pb) - 18;
    if (indx_off && pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
        avio_seek(pb, indx_off, SEEK_SET) >= 0) {
        rm_read_index(s);
        avio_seek(pb, data_off + 18, SEEK_SET);
    }

    return 0;
}
示例#28
0
文件: mmst.c 项目: csd/ffmpeg
static int mms_open(URLContext *h, const char *uri, int flags)
{
    MMSContext *mms;
    int port, err;
    char tcpname[256];

    h->is_streamed = 1;
    mms = h->priv_data = av_mallocz(sizeof(MMSContext));
    if (!h->priv_data)
        return AVERROR(ENOMEM);

    // only for MMS over TCP, so set proto = NULL
    av_url_split(NULL, 0, NULL, 0,
            mms->host, sizeof(mms->host), &port, mms->path,
            sizeof(mms->path), uri);

    if(port<0)
        port = 1755; // defaut mms protocol port

    // establish tcp connection.
    ff_url_join(tcpname, sizeof(tcpname), "tcp", NULL, mms->host, port, NULL);
    err = url_open(&mms->mms_hd, tcpname, URL_RDWR);
    if (err)
        goto fail;

    mms->packet_id        = 3;          // default, initial value.
    mms->header_packet_id = 2;          // default, initial value.
    err = mms_safe_send_recv(mms, send_startup_packet, SC_PKT_CLIENT_ACCEPTED);
    if (err)
        goto fail;
    err = mms_safe_send_recv(mms, send_time_test_data, SC_PKT_TIMING_TEST_REPLY);
    if (err)
        goto fail;
    err = mms_safe_send_recv(mms, send_protocol_select, SC_PKT_PROTOCOL_ACCEPTED);
    if (err)
        goto fail;
    err = mms_safe_send_recv(mms, send_media_file_request, SC_PKT_MEDIA_FILE_DETAILS);
    if (err)
        goto fail;
    err = mms_safe_send_recv(mms, send_media_header_request, SC_PKT_HEADER_REQUEST_ACCEPTED);
    if (err)
        goto fail;
    err = mms_safe_send_recv(mms, NULL, SC_PKT_ASF_HEADER);
    if (err)
        goto fail;
    if((mms->incoming_flags != 0X08) && (mms->incoming_flags != 0X0C))
        goto fail;
    err = asf_header_parser(mms);
    if (err) {
        dprintf(NULL, "asf header parsed failed!\n");
        goto fail;
    }
    mms->header_parsed = 1;

    if (!mms->asf_packet_len || !mms->stream_num)
        goto fail;

    dprintf(NULL, "Leaving open (success)\n");
    return 0;
fail:
    mms_close(h);
    dprintf(NULL, "Leaving open (failure: %d)\n", err);
    return err;
}
示例#29
0
static av_cold int encode_init(AVCodecContext *avctx)
{
    WMACodecContext *s = avctx->priv_data;
    int i, flags1, flags2, block_align;
    uint8_t *extradata;

    s->avctx = avctx;

    if (avctx->channels > MAX_CHANNELS) {
        av_log(avctx, AV_LOG_ERROR,
               "too many channels: got %i, need %i or fewer\n",
               avctx->channels, MAX_CHANNELS);
        return AVERROR(EINVAL);
    }

    if (avctx->sample_rate > 48000) {
        av_log(avctx, AV_LOG_ERROR, "sample rate is too high: %d > 48kHz\n",
               avctx->sample_rate);
        return AVERROR(EINVAL);
    }

    if (avctx->bit_rate < 24 * 1000) {
        av_log(avctx, AV_LOG_ERROR,
               "bitrate too low: got %"PRId64", need 24000 or higher\n",
               (int64_t)avctx->bit_rate);
        return AVERROR(EINVAL);
    }

    /* extract flag infos */
    flags1 = 0;
    flags2 = 1;
    if (avctx->codec->id == AV_CODEC_ID_WMAV1) {
        extradata             = av_malloc(4);
        if (!extradata)
            return AVERROR(ENOMEM);
        avctx->extradata_size = 4;
        AV_WL16(extradata, flags1);
        AV_WL16(extradata + 2, flags2);
    } else if (avctx->codec->id == AV_CODEC_ID_WMAV2) {
        extradata             = av_mallocz(10);
        if (!extradata)
            return AVERROR(ENOMEM);
        avctx->extradata_size = 10;
        AV_WL32(extradata, flags1);
        AV_WL16(extradata + 4, flags2);
    } else {
        av_assert0(0);
    }
    avctx->extradata          = extradata;
    s->use_exp_vlc            = flags2 & 0x0001;
    s->use_bit_reservoir      = flags2 & 0x0002;
    s->use_variable_block_len = flags2 & 0x0004;
    if (avctx->channels == 2)
        s->ms_stereo = 1;

    ff_wma_init(avctx, flags2);

    /* init MDCT */
    for (i = 0; i < s->nb_block_sizes; i++)
        ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 0, 1.0);

    block_align        = avctx->bit_rate * (int64_t) s->frame_len /
                         (avctx->sample_rate * 8);
    block_align        = FFMIN(block_align, MAX_CODED_SUPERFRAME_SIZE);
    avctx->block_align = block_align;
    avctx->frame_size = avctx->initial_padding = s->frame_len;

    return 0;
}
示例#30
0
static int http_connect(URLContext *h, const char *path, const char *hoststr,
                        const char *auth, int *new_location)
{
    HTTPContext *s = h->priv_data;
    int post, err;
    char line[1024];
    char headers[1024] = "";
    char *authstr = NULL;
    int64_t off = s->off;
    int len = 0;


    /* send http header */
    post = h->flags & URL_WRONLY;
    authstr = ff_http_auth_create_response(&s->auth_state, auth, path,
                                        post ? "POST" : "GET");

    /* set default headers if needed */
    if (!has_header(s->headers, "\r\nUser-Agent: "))
       len += av_strlcatf(headers + len, sizeof(headers) - len,
                          "User-Agent: %s\r\n", LIBAVFORMAT_IDENT);
    if (!has_header(s->headers, "\r\nAccept: "))
        len += av_strlcpy(headers + len, "Accept: */*\r\n",
                          sizeof(headers) - len);
    if (!has_header(s->headers, "\r\nRange: "))
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Range: bytes=%"PRId64"-\r\n", s->off);
    if (!has_header(s->headers, "\r\nConnection: "))
        len += av_strlcpy(headers + len, "Connection: close\r\n",
                          sizeof(headers)-len);
    if (!has_header(s->headers, "\r\nHost: "))
        len += av_strlcatf(headers + len, sizeof(headers) - len,
                           "Host: %s\r\n", hoststr);

    /* now add in custom headers */
    av_strlcpy(headers+len, s->headers, sizeof(headers)-len);

    snprintf(s->buffer, sizeof(s->buffer),
             "%s %s HTTP/1.1\r\n"
             "%s"
             "%s"
             "%s"
             "\r\n",
             post ? "POST" : "GET",
             path,
             post && s->is_chunked ? "Transfer-Encoding: chunked\r\n" : "",
             headers,
             authstr ? authstr : "");

    av_freep(&authstr);
    if (url_write(s->hd, s->buffer, strlen(s->buffer)) < 0)
        return AVERROR(EIO);

    /* init input buffer */
    s->buf_ptr = s->buffer;
    s->buf_end = s->buffer;
    s->line_count = 0;
    s->off = 0;
    s->filesize = -1;
    s->chunksize = -1;
    if (post) {
        /* always use chunked encoding for upload data */
        s->chunksize = 0;
        /* Pretend that it did work. We didn't read any header yet, since
         * we've still to send the POST data, but the code calling this
         * function will check http_code after we return. */
        s->http_code = 200;
        return 0;
    }

    /* wait for header */
    for(;;) {
        if (http_get_line(s, line, sizeof(line)) < 0)
            return AVERROR(EIO);

        dprintf(NULL, "header='%s'\n", line);

        err = process_line(h, line, s->line_count, new_location);
        if (err < 0)
            return err;
        if (err == 0)
            break;
        s->line_count++;
    }

    return (off == s->off) ? 0 : -1;
}