示例#1
0
文件: rawdec.c 项目: superdump/libav
/* raw input */
int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    AVStream *st;
    enum CodecID id;

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

        id = s->iformat->value;
        if (id == CODEC_ID_RAWVIDEO) {
            st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        } else {
            st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        }
        st->codec->codec_id = id;

        switch(st->codec->codec_type) {
        case AVMEDIA_TYPE_AUDIO: {
            RawAudioDemuxerContext *s1 = s->priv_data;

#if FF_API_FORMAT_PARAMETERS
            if (ap->sample_rate)
                st->codec->sample_rate = ap->sample_rate;
            if (ap->channels)
                st->codec->channels    = ap->channels;
            else st->codec->channels   = 1;
#endif

            if (s1->sample_rate)
                st->codec->sample_rate = s1->sample_rate;
            if (s1->channels)
                st->codec->channels    = s1->channels;

            st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id);
            assert(st->codec->bits_per_coded_sample > 0);
            st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8;
            av_set_pts_info(st, 64, 1, st->codec->sample_rate);
            break;
            }
        case AVMEDIA_TYPE_VIDEO: {
            FFRawVideoDemuxerContext *s1 = s->priv_data;
            int width = 0, height = 0, ret = 0;
            enum PixelFormat pix_fmt;
            AVRational framerate;

            if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) {
                av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
                goto fail;
            }
            if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == PIX_FMT_NONE) {
                av_log(s, AV_LOG_ERROR, "No such pixel format: %s.\n", s1->pixel_format);
                ret = AVERROR(EINVAL);
                goto fail;
            }
            if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) {
                av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate);
                goto fail;
            }
#if FF_API_FORMAT_PARAMETERS
            if (ap->width > 0)
                width = ap->width;
            if (ap->height > 0)
                height = ap->height;
            if (ap->pix_fmt)
                pix_fmt = ap->pix_fmt;
            if (ap->time_base.num)
                framerate = (AVRational){ap->time_base.den, ap->time_base.num};
#endif
            av_set_pts_info(st, 64, framerate.den, framerate.num);
            st->codec->width  = width;
            st->codec->height = height;
            st->codec->pix_fmt = pix_fmt;
fail:
            av_freep(&s1->video_size);
            av_freep(&s1->pixel_format);
            av_freep(&s1->framerate);
            return ret;
            }
        default:
            return -1;
        }
    return 0;
}
示例#2
0
static int wsvqa_read_header(AVFormatContext *s,
                             AVFormatParameters *ap)
{
    WsVqaDemuxContext *wsvqa = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVStream *st;
    unsigned char *header;
    unsigned char scratch[VQA_PREAMBLE_SIZE];
    unsigned int chunk_tag;
    unsigned int chunk_size;

    /* initialize the video decoder stream */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
    wsvqa->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_WS_VQA;
    st->codec->codec_tag = 0;  /* no fourcc */

    /* skip to the start of the VQA header */
    url_fseek(pb, 20, SEEK_SET);

    /* the VQA header needs to go to the decoder */
    st->codec->extradata_size = VQA_HEADER_SIZE;
    st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
    header = (unsigned char *)st->codec->extradata;
    if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
        VQA_HEADER_SIZE) {
        av_free(st->codec->extradata);
        return AVERROR(EIO);
    }
    st->codec->width = AV_RL16(&header[6]);
    st->codec->height = AV_RL16(&header[8]);

    /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
    if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        if (AV_RL16(&header[0]) == 1)
            st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
        else
            st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->sample_rate = AV_RL16(&header[24]);
        if (!st->codec->sample_rate)
            st->codec->sample_rate = 22050;
        st->codec->channels = header[26];
        if (!st->codec->channels)
            st->codec->channels = 1;
        st->codec->bits_per_coded_sample = 16;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample / 4;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;

        wsvqa->audio_stream_index = st->index;
        wsvqa->audio_samplerate = st->codec->sample_rate;
        wsvqa->audio_channels = st->codec->channels;
        wsvqa->audio_frame_counter = 0;
    }

    /* there are 0 or more chunks before the FINF chunk; iterate until
     * FINF has been skipped and the file will be ready to be demuxed */
    do {
        if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
            av_free(st->codec->extradata);
            return AVERROR(EIO);
        }
        chunk_tag = AV_RB32(&scratch[0]);
        chunk_size = AV_RB32(&scratch[4]);

        /* catch any unknown header tags, for curiousity */
        switch (chunk_tag) {
        case CINF_TAG:
        case CINH_TAG:
        case CIND_TAG:
        case PINF_TAG:
        case PINH_TAG:
        case PIND_TAG:
        case FINF_TAG:
        case CMDS_TAG:
            break;

        default:
            av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
                scratch[0], scratch[1],
                scratch[2], scratch[3]);
            break;
        }

        url_fseek(pb, chunk_size, SEEK_CUR);
    } while (chunk_tag != FINF_TAG);

    wsvqa->video_pts = wsvqa->audio_frame_counter = 0;

    return 0;
}
示例#3
0
文件: ffmpeg.c 项目: Calc86/motion
/**
 * ffmpeg_open
 *      Opens an mpeg file using the new libavformat method. Both mpeg1
 *      and mpeg4 are supported. However, if the current ffmpeg version doesn't allow
 *      mpeg1 with non-standard framerate, the open will fail. Timelapse is a special
 *      case and is tested separately.
 *
 *  Returns
 *      A new allocated ffmpeg struct or NULL if any error happens.
 */
struct ffmpeg *ffmpeg_open(char *ffmpeg_video_codec, char *filename,
                           unsigned char *y, unsigned char *u, unsigned char *v,
                           int width, int height, int rate, int bps, int vbr)
{
    AVCodecContext *c;
    AVCodec *codec;
    struct ffmpeg *ffmpeg;
    int is_mpeg1;
    int ret;
    /*
     * Allocate space for our ffmpeg structure. This structure contains all the
     * codec and image information we need to generate movies.
     * FIXME when motion exits we should close the movie to ensure that
     * ffmpeg is freed.
     */
    ffmpeg = mymalloc(sizeof(struct ffmpeg));
    memset(ffmpeg, 0, sizeof(struct ffmpeg));

    ffmpeg->vbr = vbr;

    /* Store codec name in ffmpeg->codec, with buffer overflow check. */
    snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec);

    /* Allocation the output media context. */
#ifdef have_avformat_alloc_context
    ffmpeg->oc = avformat_alloc_context();
#elif defined have_av_avformat_alloc_context
    ffmpeg->oc = av_alloc_format_context();
#else
    ffmpeg->oc = av_mallocz(sizeof(AVFormatContext));
#endif

    if (!ffmpeg->oc) {
        MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Memory error while allocating"
                   " output media context");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Setup output format */
    ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename);
    if (!ffmpeg->oc->oformat) {
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename);

    /* Create a new video stream and initialize the codecs. */
    ffmpeg->video_st = NULL;
    if (ffmpeg->oc->oformat->video_codec != CODEC_ID_NONE) {
#if defined FF_API_NEW_AVIO 
        ffmpeg->video_st = avformat_new_stream(ffmpeg->oc, NULL /* Codec */);
#else
        ffmpeg->video_st = av_new_stream(ffmpeg->oc, 0);
#endif
        if (!ffmpeg->video_st) {
            MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: av_new_stream - could"
                       " not alloc stream");
            ffmpeg_cleanups(ffmpeg);
            return NULL;
        }
    } else {
        /* We did not get a proper video codec. */
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Failed to obtain a proper"
                   " video codec");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    ffmpeg->c     = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st);
    c->codec_id   = ffmpeg->oc->oformat->video_codec;
#if LIBAVCODEC_VERSION_MAJOR < 53    
    c->codec_type = CODEC_TYPE_VIDEO;
#else
    c->codec_type = AVMEDIA_TYPE_VIDEO;
#endif    
    is_mpeg1      = c->codec_id == CODEC_ID_MPEG1VIDEO;

    if (strcmp(ffmpeg_video_codec, "ffv1") == 0)
        c->strict_std_compliance = -2;

    /* Uncomment to allow non-standard framerates. */
    //c->strict_std_compliance = -1;

    /* Set default parameters */
    c->bit_rate = bps;
    c->width    = width;
    c->height   = height;
#if LIBAVCODEC_BUILD >= 4754
    /* Frame rate = 1/time_base, so we set 1/rate, not rate/1 */
    c->time_base.num = 1;
    c->time_base.den = rate;
#else
    c->frame_rate      = rate;
    c->frame_rate_base = 1;
#endif /* LIBAVCODEC_BUILD >= 4754 */

    MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s FPS %d",
               rate);

    if (vbr)
        c->flags |= CODEC_FLAG_QSCALE;

    /*
     * Set codec specific parameters.
     * Set intra frame distance in frames depending on codec.
     */
    c->gop_size = is_mpeg1 ? 10 : 12;

    /* Some formats want stream headers to be separate. */
    if (!strcmp(ffmpeg->oc->oformat->name, "mp4") ||
        !strcmp(ffmpeg->oc->oformat->name, "mov") ||
        !strcmp(ffmpeg->oc->oformat->name, "3gp")) {
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

#if defined FF_API_NEW_AVIO
// pass the options to avformat_write_header directly
#else
    /* Set the output parameters (must be done even if no parameters). */
    if (av_set_parameters(ffmpeg->oc, NULL) < 0) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: av_set_parameters error:"
                   " Invalid output format parameters");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }
#endif

    /* Dump the format settings.  This shows how the various streams relate to each other. */
    //dump_format(ffmpeg->oc, 0, filename, 1);

    /*
     * Now that all the parameters are set, we can open the video
     * codec and allocate the necessary encode buffers.
     */
    codec = avcodec_find_encoder(c->codec_id);

    if (!codec) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Codec %s not found",
                   ffmpeg_video_codec);
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Set the picture format - need in ffmpeg starting round April-May 2005 */
    c->pix_fmt = PIX_FMT_YUV420P;

    /* Get a mutex lock. */
    pthread_mutex_lock(&global_lock);

    /* Open the codec */
#if defined FF_API_NEW_AVIO
    ret = avcodec_open2(c, codec, NULL /* options */ );
#else
    ret = avcodec_open(c, codec);
#endif

    if (ret < 0) {
        /* Release the lock. */
        pthread_mutex_unlock(&global_lock);
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_open - could not open codec %s",
                   ffmpeg_video_codec);
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Release the lock. */
    pthread_mutex_unlock(&global_lock);

    ffmpeg->video_outbuf = NULL;

    if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /*
         * Allocate output buffer
         * XXX: API change will be done
         * ffmpeg->video_outbuf_size = 200000
         */
        ffmpeg->video_outbuf_size = ffmpeg->c->width * 512;
        ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size);
    }

    /* Allocate the encoded raw picture. */
    ffmpeg->picture = avcodec_alloc_frame();

    if (!ffmpeg->picture) {
        MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_alloc_frame -"
                   " could not alloc frame");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Set variable bitrate if requested. */
    if (ffmpeg->vbr)
        ffmpeg->picture->quality = ffmpeg->vbr;


    /* Set the frame data. */
    ffmpeg->picture->data[0] = y;
    ffmpeg->picture->data[1] = u;
    ffmpeg->picture->data[2] = v;
    ffmpeg->picture->linesize[0] = ffmpeg->c->width;
    ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2;
    ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2;

    /* Open the output file, if needed. */
    if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) {
        char file_proto[256];

        /*
         * Use append file protocol for mpeg1, to get the append behavior from
         * url_fopen, but no protocol (=> default) for other codecs.
         */
        if (is_mpeg1)
#if defined FF_API_NEW_AVIO
            snprintf(file_proto, sizeof(file_proto), "%s", filename);
#else
            snprintf(file_proto, sizeof(file_proto), APPEND_PROTO ":%s", filename);
#endif
        else
示例#4
0
文件: jvdec.c 项目: eugenehp/ffmbc
static int read_header(AVFormatContext *s,
                       AVFormatParameters *ap)
{
    JVDemuxContext *jv = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *vst, *ast;
    int64_t audio_pts = 0;
    int64_t offset;
    int i;

    avio_skip(pb, 80);

    ast = av_new_stream(s, 0);
    vst = av_new_stream(s, 1);
    if (!ast || !vst)
        return AVERROR(ENOMEM);

    vst->codec->codec_type  = AVMEDIA_TYPE_VIDEO;
    vst->codec->codec_id    = CODEC_ID_JV;
    vst->codec->codec_tag   = 0; /* no fourcc */
    vst->codec->width       = avio_rl16(pb);
    vst->codec->height      = avio_rl16(pb);
    vst->nb_frames          =
    ast->nb_index_entries   = avio_rl16(pb);
    av_set_pts_info(vst, 64, avio_rl16(pb), 1000);

    avio_skip(pb, 4);

    ast->codec->codec_type  = AVMEDIA_TYPE_AUDIO;
    ast->codec->codec_id    = CODEC_ID_PCM_U8;
    ast->codec->codec_tag   = 0; /* no fourcc */
    ast->codec->sample_rate = avio_rl16(pb);
    ast->codec->channels    = 1;
    av_set_pts_info(ast, 64, 1, ast->codec->sample_rate);

    avio_skip(pb, 10);

    ast->index_entries = av_malloc(ast->nb_index_entries * sizeof(*ast->index_entries));
    if (!ast->index_entries)
        return AVERROR(ENOMEM);

    jv->frames = av_malloc(ast->nb_index_entries * sizeof(JVFrame));
    if (!jv->frames)
        return AVERROR(ENOMEM);

    offset = 0x68 + ast->nb_index_entries * 16;
    for(i = 0; i < ast->nb_index_entries; i++) {
        AVIndexEntry *e   = ast->index_entries + i;
        JVFrame      *jvf = jv->frames + i;

        /* total frame size including audio, video, palette data and padding */
        e->size         = avio_rl32(pb);
        e->timestamp    = i;
        e->pos          = offset;
        offset         += e->size;

        jvf->audio_size = avio_rl32(pb);
        jvf->video_size = avio_rl32(pb);
        jvf->palette_size = avio_r8(pb) ? 768 : 0;
        jvf->video_size = FFMIN(FFMAX(jvf->video_size, 0),
                                INT_MAX - JV_PREAMBLE_SIZE - jvf->palette_size);
        if (avio_r8(pb))
             av_log(s, AV_LOG_WARNING, "unsupported audio codec\n");
        jvf->video_type = avio_r8(pb);
        avio_skip(pb, 1);

        e->timestamp = jvf->audio_size ? audio_pts : AV_NOPTS_VALUE;
        audio_pts += jvf->audio_size;

        e->flags = jvf->video_type != 1 ? AVINDEX_KEYFRAME : 0;
    }

    jv->state = JV_AUDIO;
    return 0;
}
示例#5
0
文件: c93.c 项目: AWilco/xbmc
static int read_packet(AVFormatContext *s, AVPacket *pkt)
{
    ByteIOContext *pb = s->pb;
    C93DemuxContext *c93 = s->priv_data;
    C93BlockRecord *br = &c93->block_records[c93->current_block];
    int datasize;
    int ret, i;

    if (c93->next_pkt_is_audio) {
        c93->current_frame++;
        c93->next_pkt_is_audio = 0;
        datasize = get_le16(pb);
        if (datasize > 42) {
            if (!c93->audio) {
                c93->audio = av_new_stream(s, 1);
                if (!c93->audio)
                    return AVERROR(ENOMEM);
                c93->audio->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            }
            url_fskip(pb, 26); /* VOC header */
            ret = voc_get_packet(s, pkt, c93->audio, datasize - 26);
            if (ret > 0) {
                pkt->stream_index = 1;
                pkt->flags |= AV_PKT_FLAG_KEY;
                return ret;
            }
        }
    }
    if (c93->current_frame >= br->frames) {
        if (c93->current_block >= 511 || !br[1].length)
            return AVERROR(EIO);
        br++;
        c93->current_block++;
        c93->current_frame = 0;
    }

    if (c93->current_frame == 0) {
        url_fseek(pb, br->index * 2048, SEEK_SET);
        for (i = 0; i < 32; i++) {
            c93->frame_offsets[i] = get_le32(pb);
        }
    }

    url_fseek(pb,br->index * 2048 +
            c93->frame_offsets[c93->current_frame], SEEK_SET);
    datasize = get_le16(pb); /* video frame size */

    ret = av_new_packet(pkt, datasize + 768 + 1);
    if (ret < 0)
        return ret;
    pkt->data[0] = 0;
    pkt->size = datasize + 1;

    ret = get_buffer(pb, pkt->data + 1, datasize);
    if (ret < datasize) {
        ret = AVERROR(EIO);
        goto fail;
    }

    datasize = get_le16(pb); /* palette size */
    if (datasize) {
        if (datasize != 768) {
            av_log(s, AV_LOG_ERROR, "invalid palette size %u\n", datasize);
            ret = AVERROR_INVALIDDATA;
            goto fail;
        }
        pkt->data[0] |= C93_HAS_PALETTE;
        ret = get_buffer(pb, pkt->data + pkt->size, datasize);
        if (ret < datasize) {
            ret = AVERROR(EIO);
            goto fail;
        }
        pkt->size += 768;
    }
    pkt->stream_index = 0;
    c93->next_pkt_is_audio = 1;

    /* only the first frame is guaranteed to not reference previous frames */
    if (c93->current_block == 0 && c93->current_frame == 0) {
        pkt->flags |= AV_PKT_FLAG_KEY;
        pkt->data[0] |= C93_FIRST_FRAME;
    }
    return 0;

    fail:
    av_free_packet(pkt);
    return ret;
}
示例#6
0
static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
                           int letter, const char *buf)
{
    RTSPState *rt = s->priv_data;
    char buf1[64], st_type[64];
    const char *p;
    int codec_type, payload_type, i;
    AVStream *st;
    RTSPStream *rtsp_st;
    struct in_addr sdp_ip;
    int ttl;

#ifdef DEBUG
    printf("sdp: %c='%s'\n", letter, buf);
#endif

    p = buf;
    switch(letter) {
    case 'c':
        get_word(buf1, sizeof(buf1), &p);
        if (strcmp(buf1, "IN") != 0)
            return;
        get_word(buf1, sizeof(buf1), &p);
        if (strcmp(buf1, "IP4") != 0)
            return;
        get_word_sep(buf1, sizeof(buf1), "/", &p);
        if (inet_aton(buf1, &sdp_ip) == 0)
            return;
        ttl = 16;
        if (*p == '/') {
            p++;
            get_word_sep(buf1, sizeof(buf1), "/", &p);
            ttl = atoi(buf1);
        }
        if (s->nb_streams == 0) {
            s1->default_ip = sdp_ip;
            s1->default_ttl = ttl;
        } else {
            st = s->streams[s->nb_streams - 1];
            rtsp_st = st->priv_data;
            rtsp_st->sdp_ip = sdp_ip;
            rtsp_st->sdp_ttl = ttl;
        }
        break;
    case 's':
        av_strlcpy(s->title, p, sizeof(s->title));
        break;
    case 'i':
        if (s->nb_streams == 0) {
            av_strlcpy(s->comment, p, sizeof(s->comment));
            break;
        }
        break;
    case 'm':
        /* new stream */
        get_word(st_type, sizeof(st_type), &p);
        if (!strcmp(st_type, "audio")) {
            codec_type = CODEC_TYPE_AUDIO;
        } else if (!strcmp(st_type, "video")) {
            codec_type = CODEC_TYPE_VIDEO;
        } else {
            return;
        }
        rtsp_st = av_mallocz(sizeof(RTSPStream));
        if (!rtsp_st)
            return;
        rtsp_st->stream_index = -1;
        dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);

        rtsp_st->sdp_ip = s1->default_ip;
        rtsp_st->sdp_ttl = s1->default_ttl;

        get_word(buf1, sizeof(buf1), &p); /* port */
        rtsp_st->sdp_port = atoi(buf1);

        get_word(buf1, sizeof(buf1), &p); /* protocol (ignored) */

        /* XXX: handle list of formats */
        get_word(buf1, sizeof(buf1), &p); /* format list */
        rtsp_st->sdp_payload_type = atoi(buf1);

        if (!strcmp(ff_rtp_enc_name(rtsp_st->sdp_payload_type), "MP2T")) {
            /* no corresponding stream */
        } else {
            st = av_new_stream(s, 0);
            if (!st)
                return;
            st->priv_data = rtsp_st;
            rtsp_st->stream_index = st->index;
            st->codec->codec_type = codec_type;
            if (rtsp_st->sdp_payload_type < RTP_PT_PRIVATE) {
                /* if standard payload type, we can find the codec right now */
                rtp_get_codec_info(st->codec, rtsp_st->sdp_payload_type);
            }
        }
        /* put a default control url */
        av_strlcpy(rtsp_st->control_url, s->filename, sizeof(rtsp_st->control_url));
        break;
    case 'a':
        if (av_strstart(p, "control:", &p) && s->nb_streams > 0) {
            char proto[32];
            /* get the control url */
            st = s->streams[s->nb_streams - 1];
            rtsp_st = st->priv_data;

            /* XXX: may need to add full url resolution */
            url_split(proto, sizeof(proto), NULL, 0, NULL, 0, NULL, NULL, 0, p);
            if (proto[0] == '\0') {
                /* relative control URL */
                av_strlcat(rtsp_st->control_url, "/", sizeof(rtsp_st->control_url));
                av_strlcat(rtsp_st->control_url, p,   sizeof(rtsp_st->control_url));
            } else {
                av_strlcpy(rtsp_st->control_url, p,   sizeof(rtsp_st->control_url));
            }
        } else if (av_strstart(p, "rtpmap:", &p)) {
            /* NOTE: rtpmap is only supported AFTER the 'm=' tag */
            get_word(buf1, sizeof(buf1), &p);
            payload_type = atoi(buf1);
            for(i = 0; i < s->nb_streams;i++) {
                st = s->streams[i];
                rtsp_st = st->priv_data;
                if (rtsp_st->sdp_payload_type == payload_type) {
                    sdp_parse_rtpmap(st->codec, rtsp_st, payload_type, p);
                }
            }
        } else if (av_strstart(p, "fmtp:", &p)) {
            /* NOTE: fmtp is only supported AFTER the 'a=rtpmap:xxx' tag */
            get_word(buf1, sizeof(buf1), &p);
            payload_type = atoi(buf1);
            for(i = 0; i < s->nb_streams;i++) {
                st = s->streams[i];
                rtsp_st = st->priv_data;
                if (rtsp_st->sdp_payload_type == payload_type) {
                    if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
                        if(!rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf)) {
                            sdp_parse_fmtp(st, p);
                        }
                    } else {
                        sdp_parse_fmtp(st, p);
                    }
                }
            }
        } else if(av_strstart(p, "framesize:", &p)) {
            // let dynamic protocol handlers have a stab at the line.
            get_word(buf1, sizeof(buf1), &p);
            payload_type = atoi(buf1);
            for(i = 0; i < s->nb_streams;i++) {
                st = s->streams[i];
                rtsp_st = st->priv_data;
                if (rtsp_st->sdp_payload_type == payload_type) {
                    if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
                        rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf);
                    }
                }
            }
        } else if(av_strstart(p, "range:", &p)) {
            int64_t start, end;

            // this is so that seeking on a streamed file can work.
            rtsp_parse_range_npt(p, &start, &end);
            s->start_time= start;
            s->duration= (end==AV_NOPTS_VALUE)?AV_NOPTS_VALUE:end-start; // AV_NOPTS_VALUE means live broadcast (and can't seek)
        }
        break;
    }
}
示例#7
0
/* This function opens an mpeg file using the new libavformat method. Both mpeg1
 * and mpeg4 are supported. However, if the current ffmpeg version doesn't allow
 * mpeg1 with non-standard framerate, the open will fail. Timelapse is a special
 * case and is tested separately.
 */
struct ffmpeg *ffmpeg_open(char *ffmpeg_video_codec, char *filename,
                           unsigned char *y, unsigned char *u, unsigned char *v,
                           int width, int height, int rate, int bps, int vbr)
{
    AVCodecContext *c;
    AVCodec *codec;
    struct ffmpeg *ffmpeg;
    int is_mpeg1;

    /* Allocate space for our ffmpeg structure. This structure contains all the 
     * codec and image information we need to generate movies.
     * FIXME when motion exits we should close the movie to ensure that
     * ffmpeg is freed.
     */
    ffmpeg = mymalloc(sizeof(struct ffmpeg));
    memset(ffmpeg, 0, sizeof(struct ffmpeg));

    ffmpeg->vbr = vbr;
    
    /* store codec name in ffmpeg->codec, with buffer overflow check */
    snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec);

    /* allocation the output media context */
    ffmpeg->oc = av_mallocz(sizeof(AVFormatContext));

    if (!ffmpeg->oc) {
        motion_log(LOG_ERR, 1, "Memory error while allocating output media context");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Setup output format */
    ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename);

    if (!ffmpeg->oc->oformat) {
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename);

    /* Create a new video stream and initialize the codecs */
    ffmpeg->video_st = NULL;

    if (ffmpeg->oc->oformat->video_codec != CODEC_ID_NONE) {
        ffmpeg->video_st = av_new_stream(ffmpeg->oc, 0);
        if (!ffmpeg->video_st) {
            motion_log(LOG_ERR, 1, "av_new_stream - could not alloc stream");
            ffmpeg_cleanups(ffmpeg);
            return NULL;
        }
    } else {
        /* We did not get a proper video codec. */
        motion_log(LOG_ERR, 0, "Failed to obtain a proper video codec");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    ffmpeg->c     = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st);
    c->codec_id   = ffmpeg->oc->oformat->video_codec;
    c->codec_type = CODEC_TYPE_VIDEO;
    is_mpeg1      = c->codec_id == CODEC_ID_MPEG1VIDEO;

    if (strcmp(ffmpeg_video_codec, "ffv1") == 0)
        c->strict_std_compliance = -2; 

    /* Uncomment to allow non-standard framerates. */
    //c->strict_std_compliance = -1;

    /* Set default parameters */
    c->bit_rate = bps;
    c->width    = width;
    c->height   = height;
#if LIBAVCODEC_BUILD >= 4754
    /* frame rate = 1/time_base, so we set 1/rate, not rate/1 */
    c->time_base.num = 1;
    c->time_base.den = rate;
#else
    c->frame_rate      = rate;
    c->frame_rate_base = 1;
#endif /* LIBAVCODEC_BUILD >= 4754 */

    if (debug_level >= CAMERA_DEBUG)
        motion_log(LOG_DEBUG, 0, "%s FPS %d",__FUNCTION__,rate);    

    if (vbr)
        c->flags |= CODEC_FLAG_QSCALE;

    /* Set codec specific parameters. */
    /* set intra frame distance in frames depending on codec */
    c->gop_size = is_mpeg1 ? 10 : 12;
    
    /* some formats want stream headers to be separate */
    if (!strcmp(ffmpeg->oc->oformat->name, "mp4") || 
       !strcmp(ffmpeg->oc->oformat->name, "mov") ||
       !strcmp(ffmpeg->oc->oformat->name, "3gp")) {
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    /* set the output parameters (must be done even if no parameters). */
    if (av_set_parameters(ffmpeg->oc, NULL) < 0) {
        motion_log(LOG_ERR, 0, "ffmpeg av_set_parameters error: Invalid output format parameters");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Dump the format settings.  This shows how the various streams relate to each other */
    //dump_format(ffmpeg->oc, 0, filename, 1);

    /* Now that all the parameters are set, we can open the video
        codec and allocate the necessary encode buffers */
    codec = avcodec_find_encoder(c->codec_id);

    if (!codec) {
        motion_log(LOG_ERR, 1, "Codec not found");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }
    
    /* Set the picture format - need in ffmpeg starting round April-May 2005 */
    c->pix_fmt = PIX_FMT_YUV420P;

    /* Get a mutex lock. */
    pthread_mutex_lock(&global_lock);

    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        /* Release the lock. */
        pthread_mutex_unlock(&global_lock);
        motion_log(LOG_ERR, 1, "avcodec_open - could not open codec");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* Release the lock. */
    pthread_mutex_unlock(&global_lock);


    ffmpeg->video_outbuf = NULL;
    if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        /* ffmpeg->video_outbuf_size = 20000; */
        ffmpeg->video_outbuf_size = ffmpeg->c->width * 256; 
        ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size);
    }

    /* allocate the encoded raw picture */
    ffmpeg->picture = avcodec_alloc_frame();
    if (!ffmpeg->picture) {
        motion_log(LOG_ERR, 1, "avcodec_alloc_frame - could not alloc frame");
        ffmpeg_cleanups(ffmpeg);
        return NULL;
    }

    /* set variable bitrate if requested */
    if (ffmpeg->vbr) 
        ffmpeg->picture->quality = ffmpeg->vbr;
    

    /* set the frame data */
    ffmpeg->picture->data[0] = y;
    ffmpeg->picture->data[1] = u;
    ffmpeg->picture->data[2] = v;
    ffmpeg->picture->linesize[0] = ffmpeg->c->width;
    ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2;
    ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2;

    /* open the output file, if needed */
    if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) {
        char file_proto[256];

        /* Use append file protocol for mpeg1, to get the append behavior from 
         * url_fopen, but no protocol (=> default) for other codecs.
         */
        if (is_mpeg1) 
            snprintf(file_proto, sizeof(file_proto), APPEND_PROTO ":%s", filename);
        else 
            snprintf(file_proto, sizeof(file_proto), "%s", filename);
        

        if (url_fopen(&ffmpeg->oc->pb, file_proto, URL_WRONLY) < 0) {
            /* path did not exist? */
            if (errno == ENOENT) {
                /* create path for file (don't use file_proto)... */
                if (create_path(filename) == -1) {
                    ffmpeg_cleanups(ffmpeg);
                    return NULL;
                }

                /* and retry opening the file (use file_proto) */
                if (url_fopen(&ffmpeg->oc->pb, file_proto, URL_WRONLY) < 0) {
                    motion_log(LOG_ERR, 1, "url_fopen - error opening file %s",filename);
                    ffmpeg_cleanups(ffmpeg);
                    return NULL;
                }
                /* Permission denied */
            } else if (errno ==  EACCES) {
                motion_log(LOG_ERR, 1,
                           "url_fopen - error opening file %s"
                           " ... check access rights to target directory", filename);
                ffmpeg_cleanups(ffmpeg);
                return NULL;
            } else {
                motion_log(LOG_ERR, 1, "Error opening file %s", filename);
                ffmpeg_cleanups(ffmpeg);
                return NULL;
            }
        }
    }

    /* write the stream header, if any */
    av_write_header(ffmpeg->oc);
    
    return ffmpeg;
}
示例#8
0
static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    VideoData *s = s1->priv_data;
    int first_index, last_index;
    AVStream *st;

    s1->ctx_flags |= AVFMTCTX_NOHEADER;

    st = av_new_stream(s1, 0);
    if (!st) {
        return AVERROR(ENOMEM);
    }

    av_strlcpy(s->path, s1->filename, sizeof(s->path));
    s->img_number = 0;
    s->img_count = 0;

    /* find format */
    if (s1->iformat->flags & AVFMT_NOFILE)
        s->is_pipe = 0;
    else{
        s->is_pipe = 1;
        st->need_parsing = AVSTREAM_PARSE_FULL;
    }

    if (!ap->time_base.num) {
        av_set_pts_info(st, 60, 1, 25);
    } else {
        av_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
    }

    if(ap->width && ap->height){
        st->codec->width = ap->width;
        st->codec->height= ap->height;
    }

    if (!s->is_pipe) {
        if (find_image_range(&first_index, &last_index, s->path) < 0)
            return AVERROR(ENOENT);
        s->img_first = first_index;
        s->img_last = last_index;
        s->img_number = first_index;
        /* compute duration */
        st->start_time = 0;
        st->duration = last_index - first_index + 1;
    }

    if(s1->video_codec_id){
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id = s1->video_codec_id;
    }else if(s1->audio_codec_id){
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = s1->audio_codec_id;
    }else{
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id = av_str2id(img_tags, s->path);
    }
    if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ap->pix_fmt != PIX_FMT_NONE)
        st->codec->pix_fmt = ap->pix_fmt;

    return 0;
}
示例#9
0
文件: avisynth.c 项目: eugenehp/ffmbc
static int avisynth_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
  AVISynthContext *avs = s->priv_data;
  HRESULT res;
  AVIFILEINFO info;
  DWORD id;
  AVStream *st;
  AVISynthStream *stream;

  AVIFileInit();

  res = AVIFileOpen(&avs->file, s->filename, OF_READ|OF_SHARE_DENY_WRITE, NULL);
  if (res != S_OK)
    {
      av_log(s, AV_LOG_ERROR, "AVIFileOpen failed with error %ld\n", res);
      AVIFileExit();
      return -1;
    }

  res = AVIFileInfo(avs->file, &info, sizeof(info));
  if (res != S_OK)
    {
      av_log(s, AV_LOG_ERROR, "AVIFileInfo failed with error %ld\n", res);
      AVIFileExit();
      return -1;
    }

  avs->streams = av_mallocz(info.dwStreams * sizeof(AVISynthStream));

  for (id=0; id<info.dwStreams; id++)
    {
      stream = &avs->streams[id];
      stream->read = 0;
      if (AVIFileGetStream(avs->file, &stream->handle, 0, id) == S_OK)
        {
          if (AVIStreamInfo(stream->handle, &stream->info, sizeof(stream->info)) == S_OK)
            {
              if (stream->info.fccType == streamtypeAUDIO)
                {
                  WAVEFORMATEX wvfmt;
                  LONG struct_size = sizeof(WAVEFORMATEX);
                  if (AVIStreamReadFormat(stream->handle, 0, &wvfmt, &struct_size) != S_OK)
                    continue;

                  st = av_new_stream(s, id);
                  st->codec->codec_type = AVMEDIA_TYPE_AUDIO;

                  st->codec->block_align = wvfmt.nBlockAlign;
                  st->codec->channels = wvfmt.nChannels;
                  st->codec->sample_rate = wvfmt.nSamplesPerSec;
                  st->codec->bit_rate = wvfmt.nAvgBytesPerSec * 8;
                  st->codec->bits_per_coded_sample = wvfmt.wBitsPerSample;

                  stream->chunck_samples = wvfmt.nSamplesPerSec * (uint64_t)info.dwScale / (uint64_t)info.dwRate;
                  stream->chunck_size = stream->chunck_samples * wvfmt.nChannels * wvfmt.wBitsPerSample / 8;

                  st->codec->codec_tag = wvfmt.wFormatTag;
                  st->codec->codec_id = ff_wav_codec_get_id(wvfmt.wFormatTag, st->codec->bits_per_coded_sample);
                  av_set_pts_info(st, 64, 1, st->codec->sample_rate);
                }
              else if (stream->info.fccType == streamtypeVIDEO)
                {
                  BITMAPINFO imgfmt;
                  LONG struct_size = sizeof(BITMAPINFO);

                  stream->chunck_size = stream->info.dwSampleSize;
                  stream->chunck_samples = 1;

                  if (AVIStreamReadFormat(stream->handle, 0, &imgfmt, &struct_size) != S_OK)
                    continue;

                  st = av_new_stream(s, id);
                  st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
                  st->r_frame_rate.num = stream->info.dwRate;
                  st->r_frame_rate.den = stream->info.dwScale;

                  st->codec->width = imgfmt.bmiHeader.biWidth;
                  st->codec->height = imgfmt.bmiHeader.biHeight;

                  st->codec->bits_per_coded_sample = imgfmt.bmiHeader.biBitCount;
                  st->codec->bit_rate = (uint64_t)stream->info.dwSampleSize * (uint64_t)stream->info.dwRate * 8 / (uint64_t)stream->info.dwScale;
                  st->codec->codec_tag = imgfmt.bmiHeader.biCompression;
                  st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, imgfmt.bmiHeader.biCompression);
                  if (st->codec->codec_id == CODEC_ID_RAWVIDEO && imgfmt.bmiHeader.biCompression== BI_RGB) {
                    st->codec->extradata = av_malloc(9 + FF_INPUT_BUFFER_PADDING_SIZE);
                    if (st->codec->extradata) {
                      st->codec->extradata_size = 9;
                      memcpy(st->codec->extradata, "BottomUp", 9);
                    }
                  }


                  st->duration = stream->info.dwLength;
                  av_set_pts_info(st, 64, info.dwScale, info.dwRate);
                }
              else
                {
                  AVIStreamRelease(stream->handle);
                  continue;
                }

              avs->nb_streams++;

              st->codec->stream_codec_tag = stream->info.fccHandler;

              st->start_time = stream->info.dwStart;
            }
        }
    }

  return 0;
}
示例#10
0
static int ipmovie_read_header(AVFormatContext *s,
                               AVFormatParameters *ap)
{
    IPMVEContext *ipmovie = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVPacket pkt;
    AVStream *st;
    unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
    int chunk_type;

    /* initialize private context members */
    ipmovie->video_pts = ipmovie->audio_frame_count = 0;
    ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
    ipmovie->decode_map_chunk_offset = 0;

    /* on the first read, this will position the stream at the first chunk */
    ipmovie->next_chunk_offset = IPMOVIE_SIGNATURE_SIZE + 6;

    /* process the first chunk which should be CHUNK_INIT_VIDEO */
    if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
        return AVERROR_INVALIDDATA;

    /* peek ahead to the next chunk-- if it is an init audio chunk, process
     * it; if it is the first video chunk, this is a silent file */
    if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
        CHUNK_PREAMBLE_SIZE)
        return AVERROR(EIO);
    chunk_type = AV_RL16(&chunk_preamble[2]);
    url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);

    if (chunk_type == CHUNK_VIDEO)
        ipmovie->audio_type = CODEC_ID_NONE;  /* no audio */
    else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
        return AVERROR_INVALIDDATA;

    /* initialize the stream decoders */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 33, 1, 90000);
    ipmovie->video_stream_index = st->index;
    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = ipmovie->video_width;
    st->codec->height = ipmovie->video_height;

    /* palette considerations */
    st->codec->palctrl = &ipmovie->palette_control;

    if (ipmovie->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR(ENOMEM);
        av_set_pts_info(st, 33, 1, 90000);
        ipmovie->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = ipmovie->audio_type;
        st->codec->codec_tag = 0;  /* no tag */
        st->codec->channels = ipmovie->audio_channels;
        st->codec->sample_rate = ipmovie->audio_sample_rate;
        st->codec->bits_per_coded_sample = ipmovie->audio_bits;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_coded_sample;
        if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
            st->codec->bit_rate /= 2;
        st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
    }

    return 0;
}
示例#11
0
static AVStream* alloc_video_stream(RenderData *rd, int codec_id, AVFormatContext* of,
					int rectx, int recty) 
{
	AVStream* st;
	AVCodecContext* c;
	AVCodec* codec;
	st = av_new_stream(of, 0);
	if (!st) return NULL;

	/* Set up the codec context */
	
	c = st->codec;
	c->codec_id = codec_id;
	c->codec_type = AVMEDIA_TYPE_VIDEO;


	/* Get some values from the current render settings */
	
	c->width = rectx;
	c->height = recty;

	/* FIXME: Really bad hack (tm) for NTSC support */
	if (ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
		c->time_base.den = 2997;
		c->time_base.num = 100;
	} else if ((double) ((int) rd->frs_sec_base) == 
		   rd->frs_sec_base) {
		c->time_base.den = rd->frs_sec;
		c->time_base.num = (int) rd->frs_sec_base;
	} else {
		c->time_base.den = rd->frs_sec * 100000;
		c->time_base.num = ((double) rd->frs_sec_base) * 100000;
	}
	
	c->gop_size = ffmpeg_gop_size;
	c->bit_rate = ffmpeg_video_bitrate*1000;
	c->rc_max_rate = rd->ffcodecdata.rc_max_rate*1000;
	c->rc_min_rate = rd->ffcodecdata.rc_min_rate*1000;
	c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;
	c->rc_initial_buffer_occupancy 
		= rd->ffcodecdata.rc_buffer_size*3/4;
	c->rc_buffer_aggressivity = 1.0;
	c->me_method = ME_EPZS;
	
	codec = avcodec_find_encoder(c->codec_id);
	if (!codec) return NULL;
	
	/* Be sure to use the correct pixel format(e.g. RGB, YUV) */
	
	if (codec->pix_fmts) {
		c->pix_fmt = codec->pix_fmts[0];
	} else {
		/* makes HuffYUV happy ... */
		c->pix_fmt = PIX_FMT_YUV422P;
	}

	if (ffmpeg_type == FFMPEG_XVID) {
		/* arghhhh ... */
		c->pix_fmt = PIX_FMT_YUV420P;
		c->codec_tag = (('D'<<24) + ('I'<<16) + ('V'<<8) + 'X');
	}

	if (codec_id == CODEC_ID_H264) {
		/* correct wrong default ffmpeg param which crash x264 */
		c->qmin=10;
		c->qmax=51;
	}
	
	if ((of->oformat->flags & AVFMT_GLOBALHEADER)
//		|| !strcmp(of->oformat->name, "mp4")
//	    || !strcmp(of->oformat->name, "mov")
//	    || !strcmp(of->oformat->name, "3gp")
		) {
		fprintf(stderr, "Using global header\n");
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	
	/* Determine whether we are encoding interlaced material or not */
	if (rd->mode & R_FIELDS) {
		fprintf(stderr, "Encoding interlaced video\n");
		c->flags |= CODEC_FLAG_INTERLACED_DCT;
		c->flags |= CODEC_FLAG_INTERLACED_ME;
	}

	/* xasp & yasp got float lately... */

	st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(
		((double) rd->xasp / (double) rd->yasp), 255);

	set_ffmpeg_properties(rd, c, "video");
	
	if (avcodec_open(c, codec) < 0) {
		//
		//XXX error("Couldn't initialize codec");
		return NULL;
	}

	video_buffersize = 2000000;
	video_buffer = (uint8_t*)MEM_mallocN(video_buffersize, 
						 "FFMPEG video buffer");
	
	current_frame = alloc_picture(c->pix_fmt, c->width, c->height);

	img_convert_ctx = sws_getContext(c->width, c->height,
					 PIX_FMT_BGR32,
					 c->width, c->height,
					 c->pix_fmt,
					 SWS_BICUBIC,
					 NULL, NULL, NULL);
	return st;
}
示例#12
0
文件: yop.c 项目: Unhelpful/ffmpeg
static int yop_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    YopDecContext *yop = s->priv_data;
    AVIOContext *pb  = s->pb;

    AVCodecContext *audio_dec, *video_dec;
    AVStream *audio_stream, *video_stream;

    int frame_rate, ret;

    audio_stream = av_new_stream(s, 0);
    video_stream = av_new_stream(s, 1);

    // Extra data that will be passed to the decoder
    video_stream->codec->extradata_size = 8;

    video_stream->codec->extradata = av_mallocz(video_stream->codec->extradata_size +
                                                FF_INPUT_BUFFER_PADDING_SIZE);

    if (!video_stream->codec->extradata)
        return AVERROR(ENOMEM);

    // Audio
    audio_dec               = audio_stream->codec;
    audio_dec->codec_type   = AVMEDIA_TYPE_AUDIO;
    audio_dec->codec_id     = CODEC_ID_ADPCM_IMA_WS;
    audio_dec->channels     = 1;
    audio_dec->sample_rate  = 22050;

    // Video
    video_dec               = video_stream->codec;
    video_dec->codec_type   = AVMEDIA_TYPE_VIDEO;
    video_dec->codec_id     = CODEC_ID_YOP;

    avio_skip(pb, 6);

    frame_rate              = avio_r8(pb);
    yop->frame_size         = avio_r8(pb) * 2048;
    video_dec->width        = avio_rl16(pb);
    video_dec->height       = avio_rl16(pb);

    video_stream->sample_aspect_ratio = (AVRational){1, 2};

    ret = avio_read(pb, video_dec->extradata, 8);
    if (ret < 8)
        return ret < 0 ? ret : AVERROR_EOF;

    yop->palette_size       = video_dec->extradata[0] * 3 + 4;
    yop->audio_block_length = AV_RL16(video_dec->extradata + 6);

    // 1840 samples per frame, 1 nibble per sample; hence 1840/2 = 920
    if (yop->audio_block_length < 920 ||
        yop->audio_block_length + yop->palette_size >= yop->frame_size) {
        av_log(s, AV_LOG_ERROR, "YOP has invalid header\n");
        return AVERROR_INVALIDDATA;
    }

    avio_seek(pb, 2048, SEEK_SET);

    av_set_pts_info(video_stream, 32, 1, frame_rate);

    return 0;
}
示例#13
0
AVStream* CEncode::AddAudioStream(AVFormatContext *oc, int codec_id)
{
    AVCodecContext *c = NULL;
    AVStream *st = NULL;

    st = av_new_stream(oc, 1);
    if (st == NULL) {
        Error("Could not allocate audio stream\n");
        return NULL;
    }

    c = st->codec;
    c->codec_id = (CodecID)codec_id;
    c->codec_type = CODEC_TYPE_AUDIO;
    c->bit_rate = m_EncodeParams.GetParams().nOutputAudioBitrate;
    c->sample_rate = m_EncodeParams.GetParams().nOutputAudioSampleRate;
    c->channels = m_EncodeParams.GetParams().nOutputAudioChannels;

	// c->level = 13; 
	// OutputDebugString("Changed audio level!!!");
//	c->time_base= (AVRational){1, m_EncodeParams.GetParams().nOutputAudioSampleRate};


	c->strict_std_compliance = 0;

    if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

	if (m_EncodeParams.GetParams().vbrAudio)
	{
		if (m_EncodeParams.GetParams().nOutputAudioBitrate < m_EncodeParams.GetParams().nOutputAudioBitrateMin)
			c->bit_rate = m_EncodeParams.GetParams().nOutputAudioBitrateMin;
		else
			c->bit_rate = m_EncodeParams.GetParams().nOutputAudioBitrate;
		c->rc_max_rate = m_EncodeParams.GetParams().nOutputAudioBitrateMax;
		c->rc_min_rate = m_EncodeParams.GetParams().nOutputAudioBitrateMin;
		c->bit_rate = (c->rc_min_rate + c->rc_max_rate) / 2;
		c->bit_rate_tolerance = 4000 * 1000;
		c->rc_buffer_size = 1024 * 8 * 20;
		c->rc_initial_buffer_occupancy = c->rc_buffer_size*3/4;
		c->rc_buffer_aggressivity = 1.0;
	}

	if (c->channels == 0)
		Error("Audio Channel Count not specified");

	if (c->sample_rate == 0)
		Error("Audio Sample Rate Count not specified");

	if (c->bit_rate == 0)
		Error("Audio Bit Rate Count not specified");

	if (m_EncodeParams.GetParams().wfexAudioSrc.nChannels == 0 ||
		m_EncodeParams.GetParams().wfexAudioSrc.nSamplesPerSec == 0 ||
		m_EncodeParams.GetParams().wfexAudioSrc.wBitsPerSample == 0)
	{
		Error("Input audio information not specified. See EncodeParams.wfexAudioSrc");
	}

    return st;
}
示例#14
0
static int read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    int i, len, header_remaining;
    ASSContext *ass = s->priv_data;
    AVIOContext *pb = s->pb;
    AVStream *st;
    int allocated[2]={0};
    uint8_t *p, **dst[2]={0};
    int pos[2]={0};

    st = av_new_stream(s, 0);
    if (!st)
        return -1;
    av_set_pts_info(st, 64, 1, 100);
    st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
    st->codec->codec_id= CODEC_ID_SSA;

    header_remaining= INT_MAX;
    dst[0] = &st->codec->extradata;
    dst[1] = &ass->event_buffer;
    while(!pb->eof_reached){
        uint8_t line[MAX_LINESIZE];

        len = ff_get_line(pb, line, sizeof(line));

        if(!memcmp(line, "[Events]", 8))
            header_remaining= 2;
        else if(line[0]=='[')
            header_remaining= INT_MAX;

        i= header_remaining==0;

        if(i && get_pts(line) == AV_NOPTS_VALUE)
            continue;

        p = av_fast_realloc(*(dst[i]), &allocated[i], pos[i]+MAX_LINESIZE);
        if(!p)
            goto fail;
        *(dst[i])= p;
        memcpy(p + pos[i], line, len+1);
        pos[i] += len;
        if(i) ass->event_count++;
        else  header_remaining--;
    }
    st->codec->extradata_size= pos[0];

    if(ass->event_count >= UINT_MAX / sizeof(*ass->event))
        goto fail;

    ass->event= av_malloc(ass->event_count * sizeof(*ass->event));
    p= ass->event_buffer;
    for(i=0; i<ass->event_count; i++){
        ass->event[i]= p;
        while(*p && *p != '\n')
            p++;
        p++;
    }

    qsort(ass->event, ass->event_count, sizeof(*ass->event), (void*)event_cmp);

    return 0;

fail:
    read_close(s);

    return -1;
}
static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    struct vfw_ctx *ctx = s->priv_data;
    AVCodecContext *codec;
    AVStream *st;
    int devnum;
    int bisize;
    BITMAPINFO *bi;
    CAPTUREPARMS cparms;
    DWORD biCompression;
    WORD biBitCount;
    int width;
    int height;
    int ret;

    if(!ap->time_base.den) {
        av_log(s, AV_LOG_ERROR, "A time base must be specified.\n");
        return AVERROR_IO;
    }

    ctx->s = s;

    ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
    if(!ctx->hwnd) {
        av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
        return AVERROR_IO;
    }

    /* If atoi fails, devnum==0 and the default device is used */
    devnum = atoi(s->filename);

    ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
        DestroyWindow(ctx->hwnd);
        return AVERROR(ENODEV);
    }

    SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
    SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
                      (LPARAM) videostream_cb);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
        goto fail_io;
    }

    SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) ctx);

    st = av_new_stream(s, 0);
    if(!st) {
        vfw_read_close(s);
        return AVERROR_NOMEM;
    }

    /* Set video format */
    bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
    if(!bisize)
        goto fail_io;
    bi = av_malloc(bisize);
    if(!bi) {
        vfw_read_close(s);
        return AVERROR_NOMEM;
    }
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret)
        goto fail_bi;

    dump_bih(s, &bi->bmiHeader);

    width  = ap->width  ? ap->width  : bi->bmiHeader.biWidth ;
    height = ap->height ? ap->height : bi->bmiHeader.biHeight;
    bi->bmiHeader.biWidth  = width ;
    bi->bmiHeader.biHeight = height;

#if 0
    /* For testing yet unsupported compressions
     * Copy these values from user-supplied verbose information */
    bi->bmiHeader.biWidth       = 320;
    bi->bmiHeader.biHeight      = 240;
    bi->bmiHeader.biPlanes      = 1;
    bi->bmiHeader.biBitCount    = 12;
    bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
    bi->bmiHeader.biSizeImage   = 115200;
    dump_bih(s, &bi->bmiHeader);
#endif

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
        goto fail_bi;
    }

    biCompression = bi->bmiHeader.biCompression;
    biBitCount = bi->bmiHeader.biBitCount;

    av_free(bi);

    /* Set sequence setup */
    ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail_io;

    dump_captureparms(s, &cparms);

    cparms.fYield = 1; // Spawn a background thread
    cparms.dwRequestMicroSecPerFrame =
                               (ap->time_base.num*1000000) / ap->time_base.den;
    cparms.fAbortLeftMouse = 0;
    cparms.fAbortRightMouse = 0;
    cparms.fCaptureAudio = 0;
    cparms.vKeyAbort = 0;

    ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
                      (LPARAM) &cparms);
    if(!ret)
        goto fail_io;

    codec = st->codec;
    codec->time_base = ap->time_base;
    codec->codec_type = CODEC_TYPE_VIDEO;
    codec->width = width;
    codec->height = height;
    codec->codec_id = CODEC_ID_RAWVIDEO;
    codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
    if(biCompression == BI_RGB)
        codec->bits_per_coded_sample = biBitCount;

    av_set_pts_info(st, 32, 1, 1000);

    if(codec->pix_fmt == -1) {
        av_log(s, AV_LOG_ERROR, "Unknown compression type."
                         "Please report verbose (-v 99) debug information.\n");
        vfw_read_close(s);
        return AVERROR_PATCHWELCOME;
    }

    ctx->mutex = CreateMutex(NULL, 0, NULL);
    if(!ctx->mutex) {
        av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
        goto fail_io;
    }
    ctx->event = CreateEvent(NULL, 1, 0, NULL);
    if(!ctx->event) {
        av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
        goto fail_io;
    }

    ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
    if(!ret) {
        av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
        goto fail_io;
    }

    return 0;

fail_bi:
    av_free(bi);

fail_io:
    vfw_read_close(s);
    return AVERROR_IO;
}
uint8_t lavMuxer::open(const char *filename,uint32_t inbitrate, ADM_MUXER_TYPE type, aviInfo *info,uint32_t videoExtraDataSize,
                        uint8_t *videoExtraData, WAVHeader *audioheader,uint32_t audioextraSize,uint8_t *audioextraData)
{
 AVCodecContext *c;
 	_type=type;
	_fps1000=info->fps1000;
	switch(_type)
	{
                case MUXER_TS:
                        fmt=guess_format("mpegts", NULL, NULL);
                        break;
		case MUXER_DVD:
			fmt = guess_format("dvd", NULL, NULL);
			break;
		case MUXER_VCD:
			fmt = guess_format("vcd", NULL, NULL);
			break;
		case MUXER_SVCD:
			fmt = guess_format("svcd", NULL, NULL);
			break;
                case MUXER_MP4:
                        fmt = guess_format("mp4", NULL, NULL);
                        break;
                case MUXER_PSP:
                        fmt = guess_format("psp", NULL, NULL);
                        break;                        
		default:
			fmt=NULL;
	}
	if (!fmt) 
	{
        	printf("Lav:Cannot guess format\n");
		return 0;
	}
	oc = av_alloc_format_context();
	if (!oc) 
	{
       		printf("Lav:Cannot allocate context\n");
		return 0;
	}
	oc->oformat = fmt;
	snprintf(oc->filename,1000,"file://%s",filename);
	// Video
	//________
	
	video_st = av_new_stream(oc, 0);
	if (!video_st) 
	{
		printf("Lav: new stream failed\n");
		return 0;
	}	
	
	c = video_st->codec;
	switch(_type)
	{
                case MUXER_MP4:
                        if(isMpeg4Compatible(info->fcc))
                        {
                                c->codec_id = CODEC_ID_MPEG4;
                                c->has_b_frames=1; // in doubt...
                        }else
                        {
                                if(isH264Compatible(info->fcc))
                                {
                                        c->has_b_frames=1; // in doubt...
                                        c->codec_id = CODEC_ID_H264;
                                        c->codec=new AVCodec;
                                        memset(c->codec,0,sizeof(AVCodec));
                                        c->codec->name=ADM_strdup("H264");
                                }
                                else
                                {
                                         c->codec_id = CODEC_ID_MPEG4; // Default value
                                        printf("Ooops, cant mux that...\n");
                                        printf("Ooops, cant mux that...\n");
                                        printf("Ooops, cant mux that...\n");
                                        //return 0;
                                }
                        }
                        if(videoExtraDataSize)
                        {
                                c->extradata=videoExtraData;
                                c->extradata_size= videoExtraDataSize;
                        }
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;
        
                        break;
                case MUXER_TS:
                        c->codec_id = CODEC_ID_MPEG2VIDEO;
                        c->rc_buffer_size=8*1024*224;
                        c->rc_max_rate=9500*1000;
                        c->rc_min_rate=0;
                        if(!inbitrate)
                                c->bit_rate=9000*1000;
                        else
                                c->bit_rate=inbitrate;
        
                        break;
		case MUXER_DVD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;
			c->rc_buffer_size=8*1024*224;
			c->rc_max_rate=9500*1000;
			c->rc_min_rate=0;
			if(!inbitrate)
				c->bit_rate=9000*1000;
			else
				c->bit_rate=inbitrate;
	
			break;
		case MUXER_VCD:
			c->codec_id = CODEC_ID_MPEG1VIDEO;

			c->rc_buffer_size=8*1024*40;
			c->rc_max_rate=1152*1000;
			c->rc_min_rate=1152*1000;
			
			c->bit_rate=1152*1000;
			

			break;
		case MUXER_SVCD:
			c->codec_id = CODEC_ID_MPEG2VIDEO;

			c->rc_buffer_size=8*1024*112;
			c->rc_max_rate=2500*1000;
			c->rc_min_rate=0*1000;
			if(!inbitrate)
				c->bit_rate=2040*1000;
			else
				c->bit_rate=inbitrate;

			break;
		default:
			ADM_assert(0);
	}
	
	c->codec_type = CODEC_TYPE_VIDEO;
	c->flags=CODEC_FLAG_QSCALE;   
	c->width = info->width;  
	c->height = info->height; 
	
    	switch(_fps1000)
	{
		case 25000:
			 c->time_base= (AVRational){1001,25025};
			//c->frame_rate = 25025;  
			//c->frame_rate_base = 1001;	
			break;
		case 23976:
/*
			c->frame_rate = 24000;  
			c->frame_rate_base = 1001;	
			break;
*/
                        if(_type==MUXER_MP4)
                        {
                                 c->time_base= (AVRational){1001,24000};
                                break;
                        }
		case  29970:
			 c->time_base= (AVRational){1001,30000};
			//c->frame_rate = 30000;  
			//c->frame_rate_base = 1001;	
			break;
		default:
                        if(_type==MUXER_MP4)
                        {
                                c->time_base= (AVRational){1000,_fps1000};
                                break;
                        }
                        else
                        {
                          GUI_Error_HIG(_("Incompatible frame rate"), NULL);
                            return 0;
                        }
	}

			
	c->gop_size=15;
	c->max_b_frames=2;
	c->has_b_frames=1;

	
	// Audio
	//________
        if(audioheader)
        {
	audio_st = av_new_stream(oc, 1);
	if (!audio_st) 
	{
		printf("Lav: new stream failed\n");
		return 0;
	}

		
	c = audio_st->codec;
        c->frame_size=1024; //For AAC mainly, sample per frame
        switch(audioheader->encoding)
        {
                case WAV_AC3: c->codec_id = CODEC_ID_AC3;break;
                case WAV_MP2: c->codec_id = CODEC_ID_MP2;break;
                case WAV_MP3:
#warning FIXME : Probe deeper
                            c->frame_size=1152;
                            c->codec_id = CODEC_ID_MP3;
                            break;
                case WAV_PCM: 
                                // One chunk is 10 ms (1/100 of fq)
                                c->frame_size=4;
                                c->codec_id = CODEC_ID_PCM_S16LE;break;
                case WAV_AAC: 
                                c->extradata=audioextraData;
                                c->extradata_size= audioextraSize;
                                c->codec_id = CODEC_ID_AAC;
                                break;
                default:
                        printf("Cant mux that ! audio\n"); 
                        printf("Cant mux that ! audio\n");
                        c->codec_id = CODEC_ID_MP2;
                        return 0;
                        break;
        }
	c->codec_type = CODEC_TYPE_AUDIO;
	
	c->bit_rate = audioheader->byterate*8;
        c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth
	_audioFq=c->sample_rate = audioheader->frequency;
	c->channels = audioheader->channels;
        _audioByterate=audioheader->byterate;
        }
        // /audio
	
	
//----------------------
	switch(_type)
	{
                case MUXER_MP4:
                        oc->mux_rate=10080*1000; // Needed ?
                        break;

                case MUXER_TS:
                        oc->mux_rate=10080*1000;
                        break;
		case MUXER_DVD:
			oc->packet_size=2048;
			oc->mux_rate=10080*1000;
			break;
		case MUXER_VCD:
			oc->packet_size=2324;
			oc->mux_rate=2352 * 75 * 8;
			
			break;
		case MUXER_SVCD:
			
			oc->packet_size=2324;
			oc->mux_rate=2*2352 * 75 * 8; // ?
			
			break;
		default:
			ADM_assert(0);
	}
	oc->preload=AV_TIME_BASE/10; // 100 ms preloading
	oc->max_delay=200*1000; // 500 ms
	
	if (av_set_parameters(oc, NULL) < 0) 
	{
		printf("Lav: set param failed \n");
		return 0;
	}
	 if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0) 
	 {
	 	printf("Lav: Failed to open file :%s\n",filename);
		return 0;
        }

	av_write_header(oc);
	dump_format(oc, 0, filename, 1);


	printf("lavformat mpeg muxer initialized\n");
	
	_running=1;

	one=(1000*1000*1000)/_fps1000; 
	_curDTS=one;

	return 1;
}
示例#17
0
av_cold static int fbdev_read_header(AVFormatContext *avctx,
                                     AVFormatParameters *ap)
{
    FBDevContext *fbdev = avctx->priv_data;
    AVStream *st = NULL;
    enum PixelFormat pix_fmt;
    int ret, flags = O_RDONLY;

    if (!(st = av_new_stream(avctx, 0)))
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */

    if (ap->time_base.den <= 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid time base %d/%d\n",
               ap->time_base.num, ap->time_base.den);
        return AVERROR(EINVAL);
    }

    /* NONBLOCK is ignored by the fbdev driver, only set for consistency */
    if (avctx->flags & AVFMT_FLAG_NONBLOCK)
        flags |= O_NONBLOCK;

    if ((fbdev->fd = open(avctx->filename, flags)) == -1) {
        ret = AVERROR(errno);
        av_log(avctx, AV_LOG_ERROR,
               "Could not open framebuffer device '%s': %s\n",
               avctx->filename, strerror(ret));
        return ret;
    }

    if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
        ret = AVERROR(errno);
        av_log(avctx, AV_LOG_ERROR,
               "FBIOGET_VSCREENINFO: %s\n", strerror(errno));
        goto fail;
    }

    if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
        ret = AVERROR(errno);
        av_log(avctx, AV_LOG_ERROR,
               "FBIOGET_FSCREENINFO: %s\n", strerror(errno));
        goto fail;
    }

    pix_fmt = get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
    if (pix_fmt == PIX_FMT_NONE) {
        ret = AVERROR(EINVAL);
        av_log(avctx, AV_LOG_ERROR,
               "Framebuffer pixel format not supported.\n");
        goto fail;
    }

    fbdev->width           = fbdev->varinfo.xres;
    fbdev->heigth          = fbdev->varinfo.yres;
    fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3;
    fbdev->frame_linesize  = fbdev->width * fbdev->bytes_per_pixel;
    fbdev->frame_size      = fbdev->frame_linesize * fbdev->heigth;
    fbdev->time_base       = ap->time_base;
    fbdev->time_frame      = AV_NOPTS_VALUE;
    fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
    if (fbdev->data == MAP_FAILED) {
        ret = AVERROR(errno);
        av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", strerror(errno));
        goto fail;
    }

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id   = CODEC_ID_RAWVIDEO;
    st->codec->width      = fbdev->width;
    st->codec->height     = fbdev->heigth;
    st->codec->pix_fmt    = pix_fmt;
    st->codec->time_base  = ap->time_base;
    st->codec->bit_rate   =
        fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel / av_q2d(ap->time_base) * 8;

    av_log(avctx, AV_LOG_INFO,
           "w:%d h:%d bpp:%d pixfmt:%s tb:%d/%d bit_rate:%d\n",
           fbdev->width, fbdev->heigth, fbdev->varinfo.bits_per_pixel,
           av_pix_fmt_descriptors[pix_fmt].name,
           ap->time_base.num, ap->time_base.den,
           st->codec->bit_rate);
    return 0;

fail:
    close(fbdev->fd);
    return ret;
}
示例#18
0
static AVStream *alloc_video_stream(RenderData *rd, int codec_id, AVFormatContext *of,
                                    int rectx, int recty, char *error, int error_size)
{
	AVStream *st;
	AVCodecContext *c;
	AVCodec *codec;

	error[0] = '\0';

	st = av_new_stream(of, 0);
	if (!st) return NULL;

	/* Set up the codec context */
	
	c = st->codec;
	c->codec_id = codec_id;
	c->codec_type = AVMEDIA_TYPE_VIDEO;

	/* Get some values from the current render settings */
	
	c->width = rectx;
	c->height = recty;

	/* FIXME: Really bad hack (tm) for NTSC support */
	if (ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
		c->time_base.den = 2997;
		c->time_base.num = 100;
	}
	else if ((float) ((int) rd->frs_sec_base) == rd->frs_sec_base) {
		c->time_base.den = rd->frs_sec;
		c->time_base.num = (int) rd->frs_sec_base;
	}
	else {
		c->time_base.den = rd->frs_sec * 100000;
		c->time_base.num = ((double) rd->frs_sec_base) * 100000;
	}
	
	c->gop_size = ffmpeg_gop_size;
	c->bit_rate = ffmpeg_video_bitrate * 1000;
	c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
	c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
	c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;

#if 0
	/* this options are not set in ffmpeg.c and leads to artifacts with MPEG-4
	 * see #33586: Encoding to mpeg4 makes first frame(s) blocky
	 */
	c->rc_initial_buffer_occupancy = rd->ffcodecdata.rc_buffer_size * 3 / 4;
	c->rc_buffer_aggressivity = 1.0;
#endif

	c->me_method = ME_EPZS;
	
	codec = avcodec_find_encoder(c->codec_id);
	if (!codec)
		return NULL;
	
	/* Be sure to use the correct pixel format(e.g. RGB, YUV) */

	if (codec->pix_fmts) {
		c->pix_fmt = codec->pix_fmts[0];
	}
	else {
		/* makes HuffYUV happy ... */
		c->pix_fmt = PIX_FMT_YUV422P;
	}

	if (ffmpeg_type == FFMPEG_XVID) {
		/* arghhhh ... */
		c->pix_fmt = PIX_FMT_YUV420P;
		c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
	}

	if (codec_id == CODEC_ID_H264) {
		/* correct wrong default ffmpeg param which crash x264 */
		c->qmin = 10;
		c->qmax = 51;
	}
	
	/* Keep lossless encodes in the RGB domain. */
	if (codec_id == CODEC_ID_HUFFYUV) {
		/* HUFFYUV was PIX_FMT_YUV422P before */
		c->pix_fmt = PIX_FMT_RGB32;
	}

	if (codec_id == CODEC_ID_FFV1) {
#ifdef FFMPEG_FFV1_ALPHA_SUPPORTED
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = PIX_FMT_RGB32;
		}
		else {
			c->pix_fmt = PIX_FMT_BGR0;
		}
#else
		c->pix_fmt = PIX_FMT_RGB32;
#endif
	}

	if (codec_id == CODEC_ID_QTRLE) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = PIX_FMT_ARGB;
		}
	}

	if ((of->oformat->flags & AVFMT_GLOBALHEADER)
//		|| !strcmp(of->oformat->name, "mp4")
//	    || !strcmp(of->oformat->name, "mov")
//	    || !strcmp(of->oformat->name, "3gp")
	    )
	{
		PRINT("Using global header\n");
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	
	/* Determine whether we are encoding interlaced material or not */
	if (rd->mode & R_FIELDS) {
		PRINT("Encoding interlaced video\n");
		c->flags |= CODEC_FLAG_INTERLACED_DCT;
		c->flags |= CODEC_FLAG_INTERLACED_ME;
	}

	/* xasp & yasp got float lately... */

	st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double) rd->xasp / (double) rd->yasp), 255);

	set_ffmpeg_properties(rd, c, "video");
	
	if (avcodec_open(c, codec) < 0) {
		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
		return NULL;
	}

	if (codec_id == CODEC_ID_QTRLE) {
		/* normally it should be enough to have buffer with actual image size,
		 * but some codecs like QTRLE might store extra information in this buffer,
		 * so it should be a way larger */

		/* maximum video buffer size is 6-bytes per pixel, plus DPX header size (1664)
		 * (from FFmpeg sources) */
		int size = c->width * c->height;
		video_buffersize = 7 * size + 10000;
	}
	else
		video_buffersize = avpicture_get_size(c->pix_fmt, c->width, c->height);

	video_buffer = (uint8_t *)MEM_mallocN(video_buffersize * sizeof(uint8_t), "FFMPEG video buffer");
	
	current_frame = alloc_picture(c->pix_fmt, c->width, c->height);

	img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
	                                 NULL, NULL, NULL);
	return st;
}
示例#19
0
文件: aiff.c 项目: apakian/rtmp-cpp
/* aiff input */
static int aiff_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    int size, filesize;
    offset_t offset = 0;
    uint32_t tag;
    unsigned version = AIFF_C_VERSION1;
    ByteIOContext *pb = s->pb;
    AVStream * st = s->streams[0];

    /* check FORM header */
    filesize = get_tag(pb, &tag);
    if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
        return AVERROR_INVALIDDATA;

    /* AIFF data type */
    tag = get_le32(pb);
    if (tag == MKTAG('A', 'I', 'F', 'F'))       /* Got an AIFF file */
        version = AIFF;
    else if (tag != MKTAG('A', 'I', 'F', 'C'))  /* An AIFF-C file then */
        return AVERROR_INVALIDDATA;

    filesize -= 4;

    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);

    while (filesize > 0) {
        /* parse different chunks */
        size = get_tag(pb, &tag);
        if (size < 0)
            return size;

        filesize -= size + 8;

        switch (tag) {
        case MKTAG('C', 'O', 'M', 'M'):     /* Common chunk */
            /* Then for the complete header info */
            st->nb_frames = get_aiff_header(pb, st->codec, size, version);
            if (st->nb_frames < 0)
                return st->nb_frames;
            if (offset > 0) // COMM is after SSND
                goto got_sound;
            break;
        case MKTAG('F', 'V', 'E', 'R'):     /* Version chunk */
            version = get_be32(pb);
            break;
        case MKTAG('N', 'A', 'M', 'E'):     /* Sample name chunk */
            get_meta(pb, s->title, sizeof(s->title), size);
            break;
        case MKTAG('A', 'U', 'T', 'H'):     /* Author chunk */
            get_meta(pb, s->author, sizeof(s->author), size);
            break;
        case MKTAG('(', 'c', ')', ' '):     /* Copyright chunk */
            get_meta(pb, s->copyright, sizeof(s->copyright), size);
            break;
        case MKTAG('A', 'N', 'N', 'O'):     /* Annotation chunk */
            get_meta(pb, s->comment, sizeof(s->comment), size);
            break;
        case MKTAG('S', 'S', 'N', 'D'):     /* Sampled sound chunk */
            offset = get_be32(pb);      /* Offset of sound data */
            get_be32(pb);               /* BlockSize... don't care */
            offset += url_ftell(pb);    /* Compute absolute data offset */
            if (st->codec->block_align)    /* Assume COMM already parsed */
                goto got_sound;
            if (url_is_streamed(pb)) {
                av_log(s, AV_LOG_ERROR, "file is not seekable\n");
                return -1;
            }
            url_fskip(pb, size - 8);
            break;
        case MKTAG('w', 'a', 'v', 'e'):
            if ((uint64_t)size > (1<<30))
                return -1;
            st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
            if (!st->codec->extradata)
                return AVERROR(ENOMEM);
            st->codec->extradata_size = size;
            get_buffer(pb, st->codec->extradata, size);
            break;
        default: /* Jump */
            if (size & 1)   /* Always even aligned */
                size++;
            url_fskip (pb, size);
        }
    }

    if (!st->codec->block_align) {
        av_log(s, AV_LOG_ERROR, "could not find COMM tag\n");
        return -1;
    }

got_sound:
    /* Now positioned, get the sound data start and end */
    if (st->nb_frames)
        s->file_size = st->nb_frames * st->codec->block_align;

    av_set_pts_info(st, 64, 1, st->codec->sample_rate);
    st->start_time = 0;
    st->duration = st->codec->frame_size ?
        st->nb_frames * st->codec->frame_size : st->nb_frames;

    /* Position the stream at the first block */
    url_fseek(pb, offset, SEEK_SET);

    return 0;
}
示例#20
0
static AVStream *alloc_audio_stream(RenderData *rd, int codec_id, AVFormatContext *of, char *error, int error_size)
{
	AVStream *st;
	AVCodecContext *c;
	AVCodec *codec;

	error[0] = '\0';

	st = av_new_stream(of, 1);
	if (!st) return NULL;

	c = st->codec;
	c->codec_id = codec_id;
	c->codec_type = AVMEDIA_TYPE_AUDIO;

	c->sample_rate = rd->ffcodecdata.audio_mixrate;
	c->bit_rate = ffmpeg_audio_bitrate * 1000;
	c->sample_fmt = AV_SAMPLE_FMT_S16;
	c->channels = rd->ffcodecdata.audio_channels;
	if (use_float_audio_buffer(codec_id)) {
		c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
		c->sample_fmt = AV_SAMPLE_FMT_FLT;
	}
	codec = avcodec_find_encoder(c->codec_id);
	if (!codec) {
		//XXX error("Couldn't find a valid audio codec");
		return NULL;
	}

	set_ffmpeg_properties(rd, c, "audio");

	if (avcodec_open(c, codec) < 0) {
		//XXX error("Couldn't initialize audio codec");
		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
		return NULL;
	}

	/* need to prevent floating point exception when using vorbis audio codec,
	 * initialize this value in the same way as it's done in FFmpeg iteslf (sergey) */
	st->codec->time_base.num = 1;
	st->codec->time_base.den = st->codec->sample_rate;

	audio_outbuf_size = FF_MIN_BUFFER_SIZE;

	if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
		audio_input_samples = audio_outbuf_size * 8 / c->bits_per_coded_sample / c->channels;
	else {
		audio_input_samples = c->frame_size;
		if (c->frame_size * c->channels * sizeof(int16_t) * 4 > audio_outbuf_size)
			audio_outbuf_size = c->frame_size * c->channels * sizeof(int16_t) * 4;
	}

	audio_output_buffer = (uint8_t *) av_malloc(audio_outbuf_size);

	if (use_float_audio_buffer(codec_id)) {
		audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * sizeof(float));
	}
	else {
		audio_input_buffer = (uint8_t *) av_malloc(audio_input_samples * c->channels * sizeof(int16_t));
	}

	audio_time = 0.0f;

	return st;
}
示例#21
0
文件: sapdec.c 项目: Jarlene/ffplayer
static int sap_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[1500];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = ffurl_open(&sap->ann_fd, url, AVIO_RDONLY);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    sap->sdp_ctx->pb        = &sap->sdp_pb;
    ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = av_new_stream(s, i);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
示例#22
0
文件: smacker.c 项目: AndyA/ffmbc
static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    ByteIOContext *pb = s->pb;
    SmackerContext *smk = s->priv_data;
    AVStream *st, *ast[7];
    int i, ret;
    int tbase;

    /* read and check header */
    smk->magic = get_le32(pb);
    if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4'))
        return -1;
    smk->width = get_le32(pb);
    smk->height = get_le32(pb);
    smk->frames = get_le32(pb);
    smk->pts_inc = (int32_t)get_le32(pb);
    smk->flags = get_le32(pb);
    if(smk->flags & SMACKER_FLAG_RING_FRAME)
        smk->frames++;
    for(i = 0; i < 7; i++)
        smk->audio[i] = get_le32(pb);
    smk->treesize = get_le32(pb);

    if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant)
        av_log(s, AV_LOG_ERROR, "treesize too large\n");
        return -1;
    }

//FIXME remove extradata "rebuilding"
    smk->mmap_size = get_le32(pb);
    smk->mclr_size = get_le32(pb);
    smk->full_size = get_le32(pb);
    smk->type_size = get_le32(pb);
    for(i = 0; i < 7; i++)
        smk->rates[i] = get_le32(pb);
    smk->pad = get_le32(pb);
    /* setup data */
    if(smk->frames > 0xFFFFFF) {
        av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames);
        return -1;
    }
    smk->frm_size = av_malloc(smk->frames * 4);
    smk->frm_flags = av_malloc(smk->frames);

    smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2'));

    /* read frame info */
    for(i = 0; i < smk->frames; i++) {
        smk->frm_size[i] = get_le32(pb);
    }
    for(i = 0; i < smk->frames; i++) {
        smk->frm_flags[i] = get_byte(pb);
    }

    /* init video codec */
    st = av_new_stream(s, 0);
    if (!st)
        return -1;
    smk->videoindex = st->index;
    st->codec->width = smk->width;
    st->codec->height = smk->height;
    st->codec->pix_fmt = PIX_FMT_PAL8;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_SMACKVIDEO;
    st->codec->codec_tag = smk->magic;
    /* Smacker uses 100000 as internal timebase */
    if(smk->pts_inc < 0)
        smk->pts_inc = -smk->pts_inc;
    else
        smk->pts_inc *= 100;
    tbase = 100000;
    av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1);
    av_set_pts_info(st, 33, smk->pts_inc, tbase);
    st->duration = smk->frames;
    /* handle possible audio streams */
    for(i = 0; i < 7; i++) {
        smk->indexes[i] = -1;
        if(smk->rates[i] & 0xFFFFFF){
            ast[i] = av_new_stream(s, 0);
            smk->indexes[i] = ast[i]->index;
            ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO;
            if (smk->rates[i] & SMK_AUD_BINKAUD) {
                ast[i]->codec->codec_id = CODEC_ID_BINKAUDIO_RDFT;
            } else if (smk->rates[i] & SMK_AUD_USEDCT) {
                ast[i]->codec->codec_id = CODEC_ID_BINKAUDIO_DCT;
            } else if (smk->rates[i] & SMK_AUD_PACKED){
                ast[i]->codec->codec_id = CODEC_ID_SMACKAUDIO;
                ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A');
            } else {
                ast[i]->codec->codec_id = CODEC_ID_PCM_U8;
            }
            ast[i]->codec->channels = (smk->rates[i] & SMK_AUD_STEREO) ? 2 : 1;
            ast[i]->codec->sample_rate = smk->rates[i] & 0xFFFFFF;
            ast[i]->codec->bits_per_coded_sample = (smk->rates[i] & SMK_AUD_16BITS) ? 16 : 8;
            if(ast[i]->codec->bits_per_coded_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8)
                ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE;
            av_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate
                    * ast[i]->codec->channels * ast[i]->codec->bits_per_coded_sample / 8);
        }
    }


    /* load trees to extradata, they will be unpacked by decoder */
    st->codec->extradata = av_malloc(smk->treesize + 16);
    st->codec->extradata_size = smk->treesize + 16;
    if(!st->codec->extradata){
        av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16);
        av_free(smk->frm_size);
        av_free(smk->frm_flags);
        return -1;
    }
    ret = get_buffer(pb, st->codec->extradata + 16, st->codec->extradata_size - 16);
    if(ret != st->codec->extradata_size - 16){
        av_free(smk->frm_size);
        av_free(smk->frm_flags);
        return AVERROR(EIO);
    }
    ((int32_t*)st->codec->extradata)[0] = av_le2ne32(smk->mmap_size);
    ((int32_t*)st->codec->extradata)[1] = av_le2ne32(smk->mclr_size);
    ((int32_t*)st->codec->extradata)[2] = av_le2ne32(smk->full_size);
    ((int32_t*)st->codec->extradata)[3] = av_le2ne32(smk->type_size);

    smk->curstream = -1;
    smk->nextpos = url_ftell(pb);

    return 0;
}
AVStream *FFMpegManager::addVideoStream(AVFormatContext *oc, int codec_id, int width, int height, int fps)
{
	AVCodecContext *c;
	AVStream *st;
	
	int w = width;
	int h = height;

	st = av_new_stream(oc, 0);
	if (!st) 
	{
		dError() << "Could not alloc stream";
		return 0;
	}

#if LIBAVCODEC_BUILD <= 4743
	c = &st->codec;
#else
	c = st->codec;
#endif

	c->codec_id = CodecID(codec_id);
	c->codec_type = CODEC_TYPE_VIDEO;

	/* put sample parameters */
	c->bit_rate = 400000;
	/* resolution must be a multiple of two */
	c->width = w;  // 520
	c->height = h; // 340

#if LIBAVCODEC_BUILD <= 4743
	/* frames per second */
	c->frame_rate = fps;
	c->frame_rate_base = 1;
#else
	c->time_base.den = fps;
	c->time_base.num = 1;
	c->gop_size = 12; /* emit one intra frame every twelve frames at most */
	c->pix_fmt = PIX_FMT_YUV420P;
#endif

	c->gop_size = 12; /* emit one intra frame every twelve frames at most */
	if (c->codec_id == CODEC_ID_MPEG2VIDEO) 
	{
		/* just for testing, we also add B frames */
		c->max_b_frames = 2;
	}
	if (c->codec_id == CODEC_ID_MPEG1VIDEO)
	{
        /* needed to avoid using macroblocks in which some coeffs overflow
		this doesnt happen with normal video, it just happens here as the
		motion of the chroma plane doesnt match the luma plane */
		c->mb_decision=2;
	}
    // some formats want stream headers to be seperate
	if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
	{
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	
	return st;
}
示例#24
0
static int flic_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FlicDemuxContext *flic = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char header[FLIC_HEADER_SIZE];
    AVStream *st, *ast;
    int speed;
    int magic_number;
    unsigned char preamble[FLIC_PREAMBLE_SIZE];

    flic->frame_number = 0;

    /* load the whole header and pull out the width and height */
    if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
        return AVERROR(EIO);

    magic_number = AV_RL16(&header[4]);
    speed = AV_RL32(&header[0x10]);
    if (speed == 0)
        speed = FLIC_DEFAULT_SPEED;

    /* initialize the decoder streams */
    st = av_new_stream(s, 0);
    if (!st)
        return AVERROR(ENOMEM);
    flic->video_stream_index = st->index;
    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_FLIC;
    st->codec->codec_tag = 0;  /* no fourcc */
    st->codec->width = AV_RL16(&header[0x08]);
    st->codec->height = AV_RL16(&header[0x0A]);

    if (!st->codec->width || !st->codec->height) {
        /* Ugly hack needed for the following sample: */
        /* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
        av_log(s, AV_LOG_WARNING,
               "File with no specified width/height. Trying 640x480.\n");
        st->codec->width  = 640;
        st->codec->height = 480;
    }

    /* send over the whole 128-byte FLIC header */
    st->codec->extradata_size = FLIC_HEADER_SIZE;
    st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
    memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);

    /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
    if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
        av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
        return AVERROR(EIO);
    }

    avio_seek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);

    /* Time to figure out the framerate:
     * If the first preamble's magic number is 0xAAAA then this file is from
     * X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
     * magic number at offset 0x10 assume this file is from Magic Carpet instead.
     * If neither of the above is true then this is a normal FLIC file.
     */
    if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
        /* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
        ast = av_new_stream(s, 1);
        if (!ast)
            return AVERROR(ENOMEM);

        flic->audio_stream_index = ast->index;

        /* all audio frames are the same size, so use the size of the first chunk for block_align */
        ast->codec->block_align = AV_RL32(&preamble[0]);
        ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        ast->codec->codec_id = CODEC_ID_PCM_U8;
        ast->codec->codec_tag = 0;
        ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
        ast->codec->channels = 1;
        ast->codec->sample_fmt = AV_SAMPLE_FMT_U8;
        ast->codec->bit_rate = st->codec->sample_rate * 8;
        ast->codec->bits_per_coded_sample = 8;
        ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
        ast->codec->extradata_size = 0;

        /* Since the header information is incorrect we have to figure out the
         * framerate using block_align and the fact that the audio is 22050 Hz.
         * We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
        av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
        av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
    } else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
        av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);

        /* rewind the stream since the first chunk is at offset 12 */
        avio_seek(pb, 12, SEEK_SET);

        /* send over abbreviated FLIC header chunk */
        av_free(st->codec->extradata);
        st->codec->extradata_size = 12;
        st->codec->extradata = av_malloc(12);
        memcpy(st->codec->extradata, header, 12);

    } else if (magic_number == FLIC_FILE_MAGIC_1) {
        av_set_pts_info(st, 64, speed, 70);
    } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
               (magic_number == FLIC_FILE_MAGIC_3)) {
        av_set_pts_info(st, 64, speed, 1000);
    } else {
        av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
示例#25
0
void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int height, int bitrate, double frame_rate )
{
	/* ffmpeg format matching */
	switch(colours) {
	  case ZM_COLOUR_RGB24:
	  {
	    if(subpixelorder == ZM_SUBPIX_ORDER_BGR) {
	      /* BGR subpixel order */
	      pf = PIX_FMT_BGR24;
	    } else {
	      /* Assume RGB subpixel order */
	      pf = PIX_FMT_RGB24;
	    }
	    break;
	  }
	  case ZM_COLOUR_RGB32:
	  {
	    if(subpixelorder == ZM_SUBPIX_ORDER_ARGB) {
	      /* ARGB subpixel order */
	      pf = PIX_FMT_ARGB;
	    } else if(subpixelorder == ZM_SUBPIX_ORDER_ABGR) {
	      /* ABGR subpixel order */
	      pf = PIX_FMT_ABGR;
	    } else if(subpixelorder == ZM_SUBPIX_ORDER_BGRA) {
	      /* BGRA subpixel order */
	      pf = PIX_FMT_BGRA;
	    } else {
	      /* Assume RGBA subpixel order */
	      pf = PIX_FMT_RGBA;
	    }
	    break;
	  }
	  case ZM_COLOUR_GRAY8:
	    pf = PIX_FMT_GRAY8;
	    break;
	  default:
	    Panic("Unexpected colours: %d",colours);
	    break;
	}

	/* add the video streams using the default format codecs
	   and initialize the codecs */
	ost = NULL;
	if (of->video_codec != AV_CODEC_ID_NONE)
	{
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
		ost = av_new_stream(ofc, 0);
#else
      ost = avformat_new_stream(ofc, 0);
#endif
		if (!ost)
		{
			Panic( "Could not alloc stream" );
		}
		
#if ZM_FFMPEG_SVN
		AVCodecContext *c = ost->codec;
#else
		AVCodecContext *c = &ost->codec;
#endif

		c->codec_id = of->video_codec;
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		c->codec_type = AVMEDIA_TYPE_VIDEO;
#else
		c->codec_type = CODEC_TYPE_VIDEO;
#endif

		/* put sample parameters */
		c->bit_rate = bitrate;
		c->pix_fmt = PIX_FMT_YUV420P;
		/* resolution must be a multiple of two */
		c->width = width;
		c->height = height;
#if ZM_FFMPEG_SVN
		/* time base: this is the fundamental unit of time (in seconds) in terms
		   of which frame timestamps are represented. for fixed-fps content,
		   timebase should be 1/framerate and timestamp increments should be
		   identically 1. */
		//c->time_base.den = (int)(frame_rate*100);
		//c->time_base.num = 100;
		c->time_base.den = frame_rate;
		c->time_base.num = 1;
#else
		/* frames per second */
		c->frame_rate = frame_rate;
		c->frame_rate_base = 1;
#endif
		//c->gop_size = frame_rate/2; /* emit one intra frame every half second or so */
		c->gop_size = 12;
		if ( c->gop_size < 3 )
			c->gop_size = 3;
		// some formats want stream headers to be seperate
		if(!strcmp(ofc->oformat->name, "mp4") || !strcmp(ofc->oformat->name, "mov") || !strcmp(ofc->oformat->name, "3gp"))
			c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
}
示例#26
0
static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
{
    ByteIOContext *pb = &s->pb;
    APEContext *ape = s->priv_data;
    AVStream *st;
    uint32_t tag;
    int i;
    int total_blocks;
    int64_t pts;

    /* TODO: Skip any leading junk such as id3v2 tags */
    ape->junklength = 0;

    tag = get_le32(pb);
    if (tag != MKTAG('M', 'A', 'C', ' '))
        return -1;

    ape->fileversion = get_le16(pb);

    if (ape->fileversion < APE_MIN_VERSION || ape->fileversion > APE_MAX_VERSION) {
        av_log(s, AV_LOG_ERROR, "Unsupported file version - %d.%02d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10);
        return -1;
    }

    if (ape->fileversion >= 3980) {
        ape->padding1             = get_le16(pb);
        ape->descriptorlength     = get_le32(pb);
        ape->headerlength         = get_le32(pb);
        ape->seektablelength      = get_le32(pb);
        ape->wavheaderlength      = get_le32(pb);
        ape->audiodatalength      = get_le32(pb);
        ape->audiodatalength_high = get_le32(pb);
        ape->wavtaillength        = get_le32(pb);
        get_buffer(pb, ape->md5, 16);

        /* Skip any unknown bytes at the end of the descriptor.
           This is for future compatibility */
        if (ape->descriptorlength > 52)
            url_fseek(pb, ape->descriptorlength - 52, SEEK_CUR);

        /* Read header data */
        ape->compressiontype      = get_le16(pb);
        ape->formatflags          = get_le16(pb);
        ape->blocksperframe       = get_le32(pb);
        ape->finalframeblocks     = get_le32(pb);
        ape->totalframes          = get_le32(pb);
        ape->bps                  = get_le16(pb);
        ape->channels             = get_le16(pb);
        ape->samplerate           = get_le32(pb);
    } else {
        ape->descriptorlength = 0;
        ape->headerlength = 32;

        ape->compressiontype      = get_le16(pb);
        ape->formatflags          = get_le16(pb);
        ape->channels             = get_le16(pb);
        ape->samplerate           = get_le32(pb);
        ape->wavheaderlength      = get_le32(pb);
        ape->wavtaillength        = get_le32(pb);
        ape->totalframes          = get_le32(pb);
        ape->finalframeblocks     = get_le32(pb);

        if (ape->formatflags & MAC_FORMAT_FLAG_HAS_PEAK_LEVEL) {
            url_fseek(pb, 4, SEEK_CUR); /* Skip the peak level */
            ape->headerlength += 4;
        }

        if (ape->formatflags & MAC_FORMAT_FLAG_HAS_SEEK_ELEMENTS) {
            ape->seektablelength = get_le32(pb);
            ape->headerlength += 4;
            ape->seektablelength *= sizeof(int32_t);
        } else
            ape->seektablelength = ape->totalframes * sizeof(int32_t);

        if (ape->formatflags & MAC_FORMAT_FLAG_8_BIT)
            ape->bps = 8;
        else if (ape->formatflags & MAC_FORMAT_FLAG_24_BIT)
            ape->bps = 24;
        else
            ape->bps = 16;

        if (ape->fileversion >= 3950)
            ape->blocksperframe = 73728 * 4;
        else if (ape->fileversion >= 3900 || (ape->fileversion >= 3800  && ape->compressiontype >= 4000))
            ape->blocksperframe = 73728;
        else
            ape->blocksperframe = 9216;

        /* Skip any stored wav header */
        if (!(ape->formatflags & MAC_FORMAT_FLAG_CREATE_WAV_HEADER))
            url_fskip(pb, ape->wavheaderlength);
    }

    if(ape->totalframes > UINT_MAX / sizeof(APEFrame)){
        av_log(s, AV_LOG_ERROR, "Too many frames: %d\n", ape->totalframes);
        return -1;
    }
    ape->frames       = av_malloc(ape->totalframes * sizeof(APEFrame));
    if(!ape->frames)
        return AVERROR_NOMEM;
    ape->firstframe   = ape->junklength + ape->descriptorlength + ape->headerlength + ape->seektablelength + ape->wavheaderlength;
    ape->currentframe = 0;


    ape->totalsamples = ape->finalframeblocks;
    if (ape->totalframes > 1)
        ape->totalsamples += ape->blocksperframe * (ape->totalframes - 1);

    if (ape->seektablelength > 0) {
        ape->seektable = av_malloc(ape->seektablelength);
        for (i = 0; i < ape->seektablelength / sizeof(uint32_t); i++)
            ape->seektable[i] = get_le32(pb);
    }

    ape->frames[0].pos     = ape->firstframe;
    ape->frames[0].nblocks = ape->blocksperframe;
    ape->frames[0].skip    = 0;
    for (i = 1; i < ape->totalframes; i++) {
        ape->frames[i].pos      = ape->seektable[i]; //ape->frames[i-1].pos + ape->blocksperframe;
        ape->frames[i].nblocks  = ape->blocksperframe;
        ape->frames[i - 1].size = ape->frames[i].pos - ape->frames[i - 1].pos;
        ape->frames[i].skip     = (ape->frames[i].pos - ape->frames[0].pos) & 3;
    }
    ape->frames[ape->totalframes - 1].size    = ape->finalframeblocks * 4;
    ape->frames[ape->totalframes - 1].nblocks = ape->finalframeblocks;

    for (i = 0; i < ape->totalframes; i++) {
        if(ape->frames[i].skip){
            ape->frames[i].pos  -= ape->frames[i].skip;
            ape->frames[i].size += ape->frames[i].skip;
        }
        ape->frames[i].size = (ape->frames[i].size + 3) & ~3;
    }


    ape_dumpinfo(ape);

    /* try to read APE tags */
    if (!url_is_streamed(pb)) {
        ape_parse_tag(s);
        url_fseek(pb, 0, SEEK_SET);
    }

    av_log(s, AV_LOG_DEBUG, "Decoding file - v%d.%02d, compression level %d\n", ape->fileversion / 1000, (ape->fileversion % 1000) / 10, ape->compressiontype);

    /* now we are ready: build format streams */
    st = av_new_stream(s, 0);
    if (!st)
        return -1;

    total_blocks = (ape->totalframes == 0) ? 0 : ((ape->totalframes - 1) * ape->blocksperframe) + ape->finalframeblocks;

    st->codec->codec_type      = CODEC_TYPE_AUDIO;
    st->codec->codec_id        = CODEC_ID_APE;
    st->codec->codec_tag       = MKTAG('A', 'P', 'E', ' ');
    st->codec->channels        = ape->channels;
    st->codec->sample_rate     = ape->samplerate;
    st->codec->bits_per_sample = ape->bps;
    st->codec->frame_size      = MAC_SUBFRAME_SIZE;

    st->nb_frames = ape->totalframes;
    s->start_time = 0;
    s->duration   = (int64_t) total_blocks * AV_TIME_BASE / ape->samplerate;
    av_set_pts_info(st, 64, MAC_SUBFRAME_SIZE, ape->samplerate);

    st->codec->extradata = av_malloc(APE_EXTRADATA_SIZE);
    st->codec->extradata_size = APE_EXTRADATA_SIZE;
    AV_WL16(st->codec->extradata + 0, ape->fileversion);
    AV_WL16(st->codec->extradata + 2, ape->compressiontype);
    AV_WL16(st->codec->extradata + 4, ape->formatflags);

    pts = 0;
    for (i = 0; i < ape->totalframes; i++) {
        ape->frames[i].pts = pts;
        av_add_index_entry(st, ape->frames[i].pos, ape->frames[i].pts, 0, 0, AVINDEX_KEYFRAME);
        pts += ape->blocksperframe / MAC_SUBFRAME_SIZE;
    }

    return 0;
}
示例#27
0
MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d)
{
    AVCodecContext *ctx;
    vid_st = av_new_stream(oc, 0);
    if(!vid_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }
    ctx = vid_st->codec;
    ctx->codec_id = oc->oformat->video_codec;
    ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    ctx->width = w;
    ctx->height = h;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    // dunno if any of these help; some output just looks plain crappy
    // will have to investigate further
    ctx->bit_rate = 400000;
    ctx->gop_size = 12;
    ctx->max_b_frames = 2;
    switch(d) {
    case 16:
	// FIXME: test & make endian-neutral
	pixfmt = PIX_FMT_RGB565LE;
	break;
    case 24:
	pixfmt = PIX_FMT_RGB24;
	break;
    case 32:
    default: // should never be anything else
	pixfmt = PIX_FMT_RGBA;
	break;
    }
    ctx->pix_fmt = pixfmt;
    pixsize = d >> 3;
    linesize = pixsize * w;
    ctx->max_b_frames = 2;
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec);
    // make sure RGB is supported (mostly not)
    if(codec->pix_fmts) {
	const enum PixelFormat *p;
	int64_t mask = 0;
	for(p = codec->pix_fmts; *p != -1; p++) {
	    // may get complaints about 1LL; thus the cast
	    mask |= ((int64_t)1) << *p;
	    if(*p == pixfmt)
		break;
	}
	if(*p == -1) {
	    // if not supported, use a converter to the next best format
	    // this is swscale, the converter used by the output demo
	    enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL);
	    if(dp == -1)
		dp = codec->pix_fmts[0];
	    if(!(convpic = avcodec_alloc_frame()) ||
	       avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
	    converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC,
				       NULL, NULL, NULL);
#else
	    converter = sws_alloc_context();
	    // what a convoluted, inefficient way to set options
	    av_set_int(converter, "sws_flags", SWS_BICUBIC);
	    av_set_int(converter, "srcw", w);
	    av_set_int(converter, "srch", h);
	    av_set_int(converter, "dstw", w);
	    av_set_int(converter, "dsth", h);
	    av_set_int(converter, "src_format", pixfmt);
	    av_set_int(converter, "dst_format", dp);
	    sws_init_context(converter, NULL, NULL);
#endif
	    ctx->pix_fmt = dp;
	}
    }
    if(!codec || avcodec_open(ctx, codec)) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}
示例#28
0
文件: segafilm.c 项目: suborb/reelvdr
static int film_read_header(AVFormatContext *s,
                            AVFormatParameters *ap)
{
    FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
    ByteIOContext *pb = &s->pb;
    AVStream *st;
    unsigned char scratch[256];
    int i;
    unsigned int data_offset;
    unsigned int audio_frame_counter;

    film->sample_table = NULL;
    film->stereo_buffer = NULL;
    film->stereo_buffer_size = 0;

    /* load the main FILM header */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR_IO;
    data_offset = BE_32(&scratch[4]);
    film->version = BE_32(&scratch[8]);

    /* load the FDSC chunk */
    if (film->version == 0) {
        /* special case for Lemmings .film files; 20-byte header */
        if (get_buffer(pb, scratch, 20) != 20)
            return AVERROR_IO;
        /* make some assumptions about the audio parameters */
        film->audio_type = CODEC_ID_PCM_S8;
        film->audio_samplerate = 22050;
        film->audio_channels = 1;
        film->audio_bits = 8;
    } else {
        /* normal Saturn .cpk files; 32-byte header */
        if (get_buffer(pb, scratch, 32) != 32)
            return AVERROR_IO;
        film->audio_samplerate = BE_16(&scratch[24]);;
        film->audio_channels = scratch[21];
        film->audio_bits = scratch[22];
        if (film->audio_bits == 8)
            film->audio_type = CODEC_ID_PCM_S8;
        else if (film->audio_bits == 16)
            film->audio_type = CODEC_ID_PCM_S16BE;
        else
            film->audio_type = 0;
    }

    if (BE_32(&scratch[0]) != FDSC_TAG)
        return AVERROR_INVALIDDATA;

    film->cvid_extra_bytes = 0;
    if (BE_32(&scratch[8]) == CVID_TAG) {
        film->video_type = CODEC_ID_CINEPAK;
        if (film->version)
            film->cvid_extra_bytes = 2;
        else
            film->cvid_extra_bytes = 6;  /* Lemmings 3DO case */
    } else
        film->video_type = 0;

    /* initialize the decoder streams */
    if (film->video_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR_NOMEM;
        film->video_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_VIDEO;
        st->codec->codec_id = film->video_type;
        st->codec->codec_tag = 0;  /* no fourcc */
        st->codec->width = BE_32(&scratch[16]);
        st->codec->height = BE_32(&scratch[12]);
    }

    if (film->audio_type) {
        st = av_new_stream(s, 0);
        if (!st)
            return AVERROR_NOMEM;
        film->audio_stream_index = st->index;
        st->codec->codec_type = CODEC_TYPE_AUDIO;
        st->codec->codec_id = film->audio_type;
        st->codec->codec_tag = 1;
        st->codec->channels = film->audio_channels;
        st->codec->bits_per_sample = film->audio_bits;
        st->codec->sample_rate = film->audio_samplerate;
        st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
            st->codec->bits_per_sample;
        st->codec->block_align = st->codec->channels * 
            st->codec->bits_per_sample / 8;
    }

    /* load the sample table */
    if (get_buffer(pb, scratch, 16) != 16)
        return AVERROR_IO;
    if (BE_32(&scratch[0]) != STAB_TAG)
        return AVERROR_INVALIDDATA;
    film->base_clock = BE_32(&scratch[8]);
    film->sample_count = BE_32(&scratch[12]);
    if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
        return -1;
    film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));
    
    for(i=0; i<s->nb_streams; i++)
        av_set_pts_info(s->streams[i], 33, 1, film->base_clock);
    
    audio_frame_counter = 0;
    for (i = 0; i < film->sample_count; i++) {
        /* load the next sample record and transfer it to an internal struct */
        if (get_buffer(pb, scratch, 16) != 16) {
            av_free(film->sample_table);
            return AVERROR_IO;
        }
        film->sample_table[i].sample_offset = 
            data_offset + BE_32(&scratch[0]);
        film->sample_table[i].sample_size = BE_32(&scratch[4]);
        if (BE_32(&scratch[8]) == 0xFFFFFFFF) {
            film->sample_table[i].stream = film->audio_stream_index;
            film->sample_table[i].pts = audio_frame_counter;
            film->sample_table[i].pts *= film->base_clock;
            film->sample_table[i].pts /= film->audio_samplerate;

            audio_frame_counter += (film->sample_table[i].sample_size /
                (film->audio_channels * film->audio_bits / 8));
        } else {
            film->sample_table[i].stream = film->video_stream_index;
            film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF;
            film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
        }
    }

    film->current_sample = 0;

    return 0;
}
示例#29
0
文件: v4l.c 项目: Haaaaaank/avbin
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    VideoData *s = s1->priv_data;
    AVStream *st;
    int width, height;
    int video_fd, frame_size;
    int ret, frame_rate, frame_rate_base;
    int desired_palette, desired_depth;
    struct video_tuner tuner;
    struct video_audio audio;
    struct video_picture pict;
    int j;
    int vformat_num = FF_ARRAY_ELEMS(video_formats);

    if (ap->width <= 0 || ap->height <= 0) {
        av_log(s1, AV_LOG_ERROR, "Wrong size (%dx%d)\n", ap->width, ap->height);
        return -1;
    }
    if (ap->time_base.den <= 0) {
        av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
        return -1;
    }

    width = ap->width;
    height = ap->height;
    frame_rate      = ap->time_base.den;
    frame_rate_base = ap->time_base.num;

    if((unsigned)width > 32767 || (unsigned)height > 32767) {
        av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
            width, height);

        return -1;
    }

    st = av_new_stream(s1, 0);
    if (!st)
        return AVERROR(ENOMEM);
    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    s->width = width;
    s->height = height;
    s->frame_rate      = frame_rate;
    s->frame_rate_base = frame_rate_base;

    video_fd = open(s1->filename, O_RDWR);
    if (video_fd < 0) {
        av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
        goto fail;
    }

    if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
        av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
        goto fail;
    }

    if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
        av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
        goto fail;
    }

    desired_palette = -1;
    desired_depth = -1;
    for (j = 0; j < vformat_num; j++) {
        if (ap->pix_fmt == video_formats[j].pix_fmt) {
            desired_palette = video_formats[j].palette;
            desired_depth = video_formats[j].depth;
            break;
        }
    }

    /* set tv standard */
    if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
        if (!strcasecmp(ap->standard, "pal"))
            tuner.mode = VIDEO_MODE_PAL;
        else if (!strcasecmp(ap->standard, "secam"))
            tuner.mode = VIDEO_MODE_SECAM;
        else
            tuner.mode = VIDEO_MODE_NTSC;
        ioctl(video_fd, VIDIOCSTUNER, &tuner);
    }

    /* unmute audio */
    audio.audio = 0;
    ioctl(video_fd, VIDIOCGAUDIO, &audio);
    memcpy(&s->audio_saved, &audio, sizeof(audio));
    audio.flags &= ~VIDEO_AUDIO_MUTE;
    ioctl(video_fd, VIDIOCSAUDIO, &audio);

    ioctl(video_fd, VIDIOCGPICT, &pict);
#if 0
    printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
           pict.colour,
           pict.hue,
           pict.brightness,
           pict.contrast,
           pict.whiteness);
#endif
    /* try to choose a suitable video format */
    pict.palette = desired_palette;
    pict.depth= desired_depth;
    if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
        for (j = 0; j < vformat_num; j++) {
            pict.palette = video_formats[j].palette;
            pict.depth = video_formats[j].depth;
            if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
                break;
        }
        if (j >= vformat_num)
            goto fail1;
    }

    ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
    if (ret < 0) {
        /* try to use read based access */
        struct video_window win;
        int val;

        win.x = 0;
        win.y = 0;
        win.width = width;
        win.height = height;
        win.chromakey = -1;
        win.flags = 0;

        ioctl(video_fd, VIDIOCSWIN, &win);

        s->frame_format = pict.palette;

        val = 1;
        ioctl(video_fd, VIDIOCCAPTURE, &val);

        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
        s->use_mmap = 0;
    } else {
        s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
        if ((unsigned char*)-1 == s->video_buf) {
            s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
            if ((unsigned char*)-1 == s->video_buf) {
                av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
                goto fail;
            }
        }
        s->gb_frame = 0;
        s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;

        /* start to grab the first frame */
        s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
        s->gb_buf.height = height;
        s->gb_buf.width = width;
        s->gb_buf.format = pict.palette;

        ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
        if (ret < 0) {
            if (errno != EAGAIN) {
            fail1:
                av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
            } else {
                av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
            }
            goto fail;
        }
        for (j = 1; j < s->gb_buffers.frames; j++) {
          s->gb_buf.frame = j;
          ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
        }
        s->frame_format = s->gb_buf.format;
        s->use_mmap = 1;
    }

    for (j = 0; j < vformat_num; j++) {
        if (s->frame_format == video_formats[j].palette) {
            frame_size = width * height * video_formats[j].depth / 8;
            st->codec->pix_fmt = video_formats[j].pix_fmt;
            break;
        }
    }

    if (j >= vformat_num)
        goto fail;

    s->fd = video_fd;
    s->frame_size = frame_size;

    st->codec->codec_type = CODEC_TYPE_VIDEO;
    st->codec->codec_id = CODEC_ID_RAWVIDEO;
    st->codec->width = width;
    st->codec->height = height;
    st->codec->time_base.den      = frame_rate;
    st->codec->time_base.num = frame_rate_base;
    st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;

    return 0;
 fail:
    if (video_fd >= 0)
        close(video_fd);
    return AVERROR(EIO);
}
示例#30
0
static int fourxm_read_header(AVFormatContext *s,
                              AVFormatParameters *ap)
{
    ByteIOContext *pb = &s->pb;
    unsigned int fourcc_tag;
    unsigned int size;
    int header_size;
    FourxmDemuxContext *fourxm = s->priv_data;
    unsigned char *header;
    int i;
    int current_track = -1;
    AVStream *st;

    fourxm->track_count = 0;
    fourxm->tracks = NULL;
    fourxm->selected_track = 0;
    fourxm->fps = 1.0;

    /* skip the first 3 32-bit numbers */
    url_fseek(pb, 12, SEEK_CUR);

    /* check for LIST-HEAD */
    GET_LIST_HEADER();
    header_size = size - 4;
    if (fourcc_tag != HEAD_TAG)
        return AVERROR_INVALIDDATA;

    /* allocate space for the header and load the whole thing */
    header = av_malloc(header_size);
    if (!header)
        return AVERROR(ENOMEM);
    if (get_buffer(pb, header, header_size) != header_size)
        return AVERROR(EIO);

    /* take the lazy approach and search for any and all vtrk and strk chunks */
    for (i = 0; i < header_size - 8; i++) {
        fourcc_tag = AV_RL32(&header[i]);
        size = AV_RL32(&header[i + 4]);

        if (fourcc_tag == std__TAG) {
            fourxm->fps = av_int2flt(AV_RL32(&header[i + 12]));
        } else if (fourcc_tag == vtrk_TAG) {
            /* check that there is enough data */
            if (size != vtrk_SIZE) {
                av_free(header);
                return AVERROR_INVALIDDATA;
            }
            fourxm->width = AV_RL32(&header[i + 36]);
            fourxm->height = AV_RL32(&header[i + 40]);

            /* allocate a new AVStream */
            st = av_new_stream(s, 0);
            if (!st)
                return AVERROR(ENOMEM);
            av_set_pts_info(st, 60, 1, fourxm->fps);

            fourxm->video_stream_index = st->index;

            st->codec->codec_type = CODEC_TYPE_VIDEO;
            st->codec->codec_id = CODEC_ID_4XM;
            st->codec->extradata_size = 4;
            st->codec->extradata = av_malloc(4);
            AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16]));
            st->codec->width = fourxm->width;
            st->codec->height = fourxm->height;

            i += 8 + size;
        } else if (fourcc_tag == strk_TAG) {
            /* check that there is enough data */
            if (size != strk_SIZE) {
                av_free(header);
                return AVERROR_INVALIDDATA;
            }
            current_track = AV_RL32(&header[i + 8]);
            if (current_track + 1 > fourxm->track_count) {
                fourxm->track_count = current_track + 1;
                if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
                    return -1;
                fourxm->tracks = av_realloc(fourxm->tracks,
                    fourxm->track_count * sizeof(AudioTrack));
                if (!fourxm->tracks) {
                    av_free(header);
                    return AVERROR(ENOMEM);
                }
            }
            fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]);
            fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]);
            fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]);
            fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]);
            i += 8 + size;

            /* allocate a new AVStream */
            st = av_new_stream(s, current_track);
            if (!st)
                return AVERROR(ENOMEM);

            av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);

            fourxm->tracks[current_track].stream_index = st->index;

            st->codec->codec_type = CODEC_TYPE_AUDIO;
            st->codec->codec_tag = 0;
            st->codec->channels = fourxm->tracks[current_track].channels;
            st->codec->sample_rate = fourxm->tracks[current_track].sample_rate;
            st->codec->bits_per_sample = fourxm->tracks[current_track].bits;
            st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
                st->codec->bits_per_sample;
            st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
            if (fourxm->tracks[current_track].adpcm)
                st->codec->codec_id = CODEC_ID_ADPCM_4XM;
            else if (st->codec->bits_per_sample == 8)
                st->codec->codec_id = CODEC_ID_PCM_U8;
            else
                st->codec->codec_id = CODEC_ID_PCM_S16LE;
        }
    }

    av_free(header);

    /* skip over the LIST-MOVI chunk (which is where the stream should be */
    GET_LIST_HEADER();
    if (fourcc_tag != MOVI_TAG)
        return AVERROR_INVALIDDATA;

    /* initialize context members */
    fourxm->video_pts = -1;  /* first frame will push to 0 */
    fourxm->audio_pts = 0;

    return 0;
}