Exemple #1
0
static int gxf_write_umf_material_description(AVFormatContext *s)
{
    GXFContext *gxf = s->priv_data;
    ByteIOContext *pb = s->pb;
    int timecode_base = gxf->time_base.den == 60000 ? 60 : 50;

    // XXX drop frame
    uint32_t timecode =
        gxf->nb_fields / (timecode_base * 3600) % 24 << 24 | // hours
        gxf->nb_fields / (timecode_base * 60) % 60   << 16 | // minutes
        gxf->nb_fields /  timecode_base % 60         <<  8 | // seconds
        gxf->nb_fields %  timecode_base;                     // fields

    put_le32(pb, gxf->flags);
    put_le32(pb, gxf->nb_fields); /* length of the longest track */
    put_le32(pb, gxf->nb_fields); /* length of the shortest track */
    put_le32(pb, 0); /* mark in */
    put_le32(pb, gxf->nb_fields); /* mark out */
    put_le32(pb, 0); /* timecode mark in */
    put_le32(pb, timecode); /* timecode mark out */
    put_le64(pb, s->timestamp); /* modification time */
    put_le64(pb, s->timestamp); /* creation time */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, gxf->audio_tracks);
    put_le16(pb, 1); /* timecode track count */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, gxf->mpeg_tracks);
    return 48;
}
Exemple #2
0
static int gxf_write_umf_material_description(ByteIOContext *pb, GXFContext *ctx)
{
    // XXX drop frame
    uint32_t timecode =
        ctx->nb_fields / (ctx->sample_rate * 3600) % 24 << 24 | // hours
        ctx->nb_fields / (ctx->sample_rate * 60) % 60   << 16 | // minutes
        ctx->nb_fields / ctx->sample_rate % 60          <<  8 | // seconds
        ctx->nb_fields % ctx->sample_rate;                    // fields

    put_le32(pb, ctx->flags);
    put_le32(pb, ctx->nb_fields); /* length of the longest track */
    put_le32(pb, ctx->nb_fields); /* length of the shortest track */
    put_le32(pb, 0); /* mark in */
    put_le32(pb, ctx->nb_fields); /* mark out */
    put_le32(pb, 0); /* timecode mark in */
    put_le32(pb, timecode); /* timecode mark out */
    put_le64(pb, ctx->fc->timestamp); /* modification time */
    put_le64(pb, ctx->fc->timestamp); /* creation time */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, ctx->audio_tracks);
    put_le16(pb, 0); /* timecode track count */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, ctx->mpeg_tracks);
    return 48;
}
Exemple #3
0
static int avi_write_ix(AVFormatContext *s)
{
    ByteIOContext *pb = s->pb;
    AVIContext *avi = s->priv_data;
    char tag[5];
    char ix_tag[] = "ix00";
    int i, j;

    assert(!url_is_streamed(pb));

    if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
        return -1;

    for (i=0;i<s->nb_streams;i++) {
        AVIStream *avist= s->streams[i]->priv_data;
         int64_t ix, pos;

         avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
         ix_tag[3] = '0' + i;

         /* Writing AVI OpenDML leaf index chunk */
         ix = url_ftell(pb);
         put_tag(pb, &ix_tag[0]);     /* ix?? */
         put_le32(pb, avist->indexes.entry * 8 + 24);
                                      /* chunk size */
         put_le16(pb, 2);             /* wLongsPerEntry */
         put_byte(pb, 0);             /* bIndexSubType (0 == frame index) */
         put_byte(pb, 1);             /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
         put_le32(pb, avist->indexes.entry);
                                      /* nEntriesInUse */
         put_tag(pb, &tag[0]);        /* dwChunkId */
         put_le64(pb, avi->movi_list);/* qwBaseOffset */
         put_le32(pb, 0);             /* dwReserved_3 (must be 0) */

         for (j=0; j<avist->indexes.entry; j++) {
             AVIIentry* ie = avi_get_ientry(&avist->indexes, j);
             put_le32(pb, ie->pos + 8);
             put_le32(pb, ((uint32_t)ie->len & ~0x80000000) |
                          (ie->flags & 0x10 ? 0 : 0x80000000));
         }
         put_flush_packet(pb);
         pos = url_ftell(pb);

         /* Updating one entry in the AVI OpenDML master index */
         url_fseek(pb, avist->indexes.indx_start - 8, SEEK_SET);
         put_tag(pb, "indx");                 /* enabling this entry */
         url_fskip(pb, 8);
         put_le32(pb, avi->riff_id);          /* nEntriesInUse */
         url_fskip(pb, 16*avi->riff_id);
         put_le64(pb, ix);                    /* qwOffset */
         put_le32(pb, pos - ix);              /* dwSize */
         put_le32(pb, avist->indexes.entry); /* dwDuration */

         url_fseek(pb, pos, SEEK_SET);
    }
    return 0;
}
Exemple #4
0
static int gxf_write_umf_media_audio(ByteIOContext *pb, GXFStreamContext *sc)
{
    put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
    put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
    put_le32(pb, 0); /* number of fields over which to ramp up sound level */
    put_le32(pb, 0); /* number of fields over which to ramp down sound level */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    return 32;
}
Exemple #5
0
static int ogg_write_page(AVFormatContext *s, const uint8_t *data, int size,
                          int64_t granule, int stream_index, int flags)
{
    OGGStreamContext *oggstream = s->streams[stream_index]->priv_data;
    int64_t crc_offset;
    int page_segments, i;

    if (size >= 255*255) {
        granule = -1;
        size = 255*255;
    } else if (oggstream->eos)
        flags |= 4;

    page_segments = FFMIN((size/255)+!!size, 255);

    init_checksum(s->pb, ff_crc04C11DB7_update, 0);
    put_tag(s->pb, "OggS");
    put_byte(s->pb, 0);
    put_byte(s->pb, flags);
    put_le64(s->pb, granule);
    put_le32(s->pb, stream_index);
    put_le32(s->pb, oggstream->page_counter++);
    crc_offset = url_ftell(s->pb);
    put_le32(s->pb, 0); // crc
    put_byte(s->pb, page_segments);
    for (i = 0; i < page_segments-1; i++)
        put_byte(s->pb, 255);
    if (size) {
        put_byte(s->pb, size - (page_segments-1)*255);
        put_buffer(s->pb, data, size);
    }
    ogg_update_checksum(s, crc_offset);
    put_flush_packet(s->pb);
    return size;
}
Exemple #6
0
static int ivf_write_header(AVFormatContext *s)
{
    AVCodecContext *ctx;
    ByteIOContext *pb = s->pb;

    if (s->nb_streams != 1) {
        av_log(s, AV_LOG_ERROR, "Format supports only exactly one video stream\n");
        return AVERROR(EINVAL);
    }
    ctx = s->streams[0]->codec;
    if (ctx->codec_type != CODEC_TYPE_VIDEO || ctx->codec_id != CODEC_ID_VP8) {
        av_log(s, AV_LOG_ERROR, "Currently only VP8 is supported!\n");
        return AVERROR(EINVAL);
    }
    put_buffer(pb, "DKIF", 4);
    put_le16(pb, 0); // version
    put_le16(pb, 32); // header length
    put_le32(pb, ctx->codec_tag ? ctx->codec_tag : AV_RL32("VP80"));
    put_le16(pb, ctx->width);
    put_le16(pb, ctx->height);
    put_le32(pb, s->streams[0]->time_base.den);
    put_le32(pb, s->streams[0]->time_base.num);
    put_le64(pb, s->streams[0]->duration); // TODO: duration or number of frames?!?

    return 0;
}
Exemple #7
0
static int asf_write_index(AVFormatContext *s, ASFIndex *index, uint16_t max, uint32_t count)
{
    ByteIOContext *pb = s->pb;
    int i;

    put_guid(pb, &ff_asf_simple_index_header);
    put_le64(pb, 24 + 16 + 8 + 4 + 4 + (4 + 2)*count);
    put_guid(pb, &ff_asf_my_guid);
    put_le64(pb, ASF_INDEXED_INTERVAL);
    put_le32(pb, max);
    put_le32(pb, count);
    for(i=0; i<count; i++) {
        put_le32(pb, index[i].packet_number);
        put_le16(pb, index[i].packet_count);
    }

    return 0;
}
Exemple #8
0
/* update header size */
static void end_header(ByteIOContext *pb, int64_t pos)
{
    int64_t pos1;

    pos1 = url_ftell(pb);
    url_fseek(pb, pos + 16, SEEK_SET);
    put_le64(pb, pos1 - pos);
    url_fseek(pb, pos1, SEEK_SET);
}
Exemple #9
0
static int64_t put_header(ByteIOContext *pb, const GUID *g)
{
    int64_t pos;

    pos = url_ftell(pb);
    put_guid(pb, g);
    put_le64(pb, 24);
    return pos;
}
Exemple #10
0
static int sox_write_header(AVFormatContext *s)
{
    SoXContext *sox = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[0]->codec;
    AVMetadataTag *comment;
    size_t comment_len = 0, comment_size;

    comment = av_metadata_get(s->metadata, "comment", NULL, 0);
    if (comment)
        comment_len = strlen(comment->value);
    comment_size = (comment_len + 7) & ~7;

    sox->header_size = SOX_FIXED_HDR + comment_size;

    if (enc->codec_id == CODEC_ID_PCM_S32LE) {
        put_tag(pb, ".SoX");
        put_le32(pb, sox->header_size);
        put_le64(pb, 0); /* number of samples */
        put_le64(pb, av_dbl2int(enc->sample_rate));
        put_le32(pb, enc->channels);
        put_le32(pb, comment_size);
    } else if (enc->codec_id == CODEC_ID_PCM_S32BE) {
        put_tag(pb, "XoS.");
        put_be32(pb, sox->header_size);
        put_be64(pb, 0); /* number of samples */
        put_be64(pb, av_dbl2int(enc->sample_rate));
        put_be32(pb, enc->channels);
        put_be32(pb, comment_size);
    } else {
        av_log(s, AV_LOG_ERROR, "invalid codec; use pcm_s32le or pcm_s32be\n");
        return -1;
    }

    if (comment_len)
        put_buffer(pb, comment->value, comment_len);

    for ( ; comment_size > comment_len; comment_len++)
        put_byte(pb, 0);

    put_flush_packet(pb);

    return 0;
}
Exemple #11
0
static int ivf_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    ByteIOContext *pb = s->pb;
    put_le32(pb, pkt->size);
    put_le64(pb, pkt->pts);
    put_buffer(pb, pkt->data, pkt->size);
    put_flush_packet(pb);

    return 0;
}
Exemple #12
0
static int gxf_write_umf_material_description(ByteIOContext *pb, GXFContext *ctx)
{
    put_le32(pb, ctx->flags);
    put_le32(pb, ctx->nb_frames); /* length of the longest track */
    put_le32(pb, ctx->nb_frames); /* length of the shortest track */
    put_le32(pb, 0); /* mark in */
    put_le32(pb, ctx->nb_frames); /* mark out */
    put_le32(pb, 0); /* timecode mark in */
    put_le32(pb, ctx->nb_frames); /* timecode mark out */
    put_le64(pb, ctx->fc->timestamp); /* modification time */
    put_le64(pb, ctx->fc->timestamp); /* creation time */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, ctx->audio_tracks);
    put_le16(pb, 0); /* timecode track count */
    put_le16(pb, 0); /* reserved */
    put_le16(pb, ctx->mpeg_tracks);
    return 48;
}
Exemple #13
0
static int gxf_write_track_description(AVFormatContext *s, GXFStreamContext *sc, int index)
{
    ByteIOContext *pb = s->pb;
    int64_t pos;
    int mpeg = sc->track_type == 4 || sc->track_type == 9;

    /* track description section */
    put_byte(pb, sc->media_type + 0x80);
    put_byte(pb, index + 0xC0);

    pos = url_ftell(pb);
    put_be16(pb, 0); /* size */

    /* media file name */
    put_byte(pb, TRACK_NAME);
    put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
    put_tag(pb, ES_NAME_PATTERN);
    put_be16(pb, sc->media_info);
    put_byte(pb, 0);

    if (!mpeg) {
        /* auxiliary information */
        put_byte(pb, TRACK_AUX);
        put_byte(pb, 8);
        if (sc->track_type == 3)
            gxf_write_timecode_auxiliary(pb, sc);
        else
            put_le64(pb, 0);
    }

    /* file system version */
    put_byte(pb, TRACK_VER);
    put_byte(pb, 4);
    put_be32(pb, 0);

    if (mpeg)
        gxf_write_mpeg_auxiliary(pb, s->streams[index]);

    /* frame rate */
    put_byte(pb, TRACK_FPS);
    put_byte(pb, 4);
    put_be32(pb, sc->frame_rate_index);

    /* lines per frame */
    put_byte(pb, TRACK_LINES);
    put_byte(pb, 4);
    put_be32(pb, sc->lines_index);

    /* fields per frame */
    put_byte(pb, TRACK_FPF);
    put_byte(pb, 4);
    put_be32(pb, sc->fields);

    return updateSize(pb, pos);
}
/**
 * HCI read local supported states command. Returns the states
 * supported by the controller.
 *
 * @param rspbuf Pointer to response buffer
 * @param rsplen Length of response buffer
 *
 * @return int BLE error code
 */
static int
ble_ll_hci_le_read_supp_states(uint8_t *rspbuf, uint8_t *rsplen)
{
    uint64_t supp_states;

    /* Add list of supported states. */
    supp_states = ble_ll_read_supp_states();
    put_le64(rspbuf, supp_states);
    *rsplen = BLE_HCI_RD_SUPP_STATES_RSPLEN;
    return BLE_ERR_SUCCESS;
}
Exemple #15
0
static int gxf_write_track_description(ByteIOContext *pb, GXFStreamContext *stream)
{
    int64_t pos;

    /* track description section */
    put_byte(pb, stream->media_type + 0x80);
    put_byte(pb, stream->index + 0xC0);

    pos = url_ftell(pb);
    put_be16(pb, 0); /* size */

    /* media file name */
    put_byte(pb, TRACK_NAME);
    put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
    put_tag(pb, ES_NAME_PATTERN);
    put_be16(pb, stream->media_info);
    put_byte(pb, 0);

    if (stream->codec->codec_id != CODEC_ID_MPEG2VIDEO) {
        /* auxiliary information */
        put_byte(pb, TRACK_AUX);
        put_byte(pb, 8);
        if (stream->codec->codec_id == CODEC_ID_NONE)
            gxf_write_timecode_auxiliary(pb, stream);
        else
            put_le64(pb, 0);
    }

    /* file system version */
    put_byte(pb, TRACK_VER);
    put_byte(pb, 4);
    put_be32(pb, 0);

    if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
        gxf_write_mpeg_auxiliary(pb, stream);

    /* frame rate */
    put_byte(pb, TRACK_FPS);
    put_byte(pb, 4);
    put_be32(pb, stream->frame_rate_index);

    /* lines per frame */
    put_byte(pb, TRACK_LINES);
    put_byte(pb, 4);
    put_be32(pb, stream->lines_index);

    /* fields per frame */
    put_byte(pb, TRACK_FPF);
    put_byte(pb, 4);
    put_be32(pb, stream->fields);

    return updateSize(pb, pos);
}
Exemple #16
0
static int sox_write_trailer(AVFormatContext *s)
{
    SoXContext *sox = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[0]->codec;

    if (!url_is_streamed(s->pb)) {
        /* update number of samples */
        int64_t file_size = url_ftell(pb);
        int64_t num_samples = (file_size - sox->header_size - 4LL) >> 2LL;
        url_fseek(pb, 8, SEEK_SET);
        if (enc->codec_id == CODEC_ID_PCM_S32LE) {
            put_le64(pb, num_samples);
        } else
            put_be64(pb, num_samples);
        url_fseek(pb, file_size, SEEK_SET);

        put_flush_packet(pb);
    }
Exemple #17
0
static int ogg_write_page(AVFormatContext *s, OGGPage *page, int extra_flags)
{
    OGGStreamContext *oggstream = s->streams[page->stream_index]->priv_data;
    ByteIOContext *pb;
    int64_t crc_offset;
    int ret, size;
    uint8_t *buf;

    ret = url_open_dyn_buf(&pb);
    if (ret < 0)
        return ret;
    init_checksum(pb, ff_crc04C11DB7_update, 0);
    put_tag(pb, "OggS");
    put_byte(pb, 0);
    put_byte(pb, page->flags | extra_flags);
    put_le64(pb, page->granule);
    put_le32(pb, oggstream->serial_num);
    put_le32(pb, oggstream->page_counter++);
    crc_offset = url_ftell(pb);
    put_le32(pb, 0); // crc
    put_byte(pb, page->segments_count);
    put_buffer(pb, page->segments, page->segments_count);
    put_buffer(pb, page->data, page->size);

    ogg_update_checksum(s, pb, crc_offset);
    put_flush_packet(pb);

    size = url_close_dyn_buf(pb, &buf);
    if (size < 0)
        return size;

    put_buffer(s->pb, buf, size);
    put_flush_packet(s->pb);
    av_free(buf);
    oggstream->page_count--;
    return 0;
}
Exemple #18
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    ByteIOContext *pb = &s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    offset_t list1, list2, strh, strf;

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl");

    /* avi header */
    put_tag(pb, "avih");
    put_le32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == CODEC_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        put_le32(pb, 0);
    }
    put_le32(pb, bitrate / 8); /* XXX: not quite exact */
    put_le32(pb, 0); /* padding */
    if (url_is_streamed(pb))
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
    put_le32(pb, nb_frames); /* nb frames, filled later */
    put_le32(pb, 0); /* initial frame */
    put_le32(pb, s->nb_streams); /* nb streams */
    put_le32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        put_le32(pb, video_enc->width);
        put_le32(pb, video_enc->height);
    } else {
        put_le32(pb, 0);
        put_le32(pb, 0);
    }
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        list2 = start_tag(pb, "LIST");
        put_tag(pb, "strl");

        stream = s->streams[i]->codec;

        /* FourCC should really be set by the codec itself */
        if (! stream->codec_tag) {
            stream->codec_tag = codec_get_bmp_tag(stream->codec_id);
        }

        /* stream generic header */
        strh = start_tag(pb, "strh");
        switch(stream->codec_type) {
        case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
        case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
//        case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
        case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
        }
        if(stream->codec_type == CODEC_TYPE_VIDEO)
            put_le32(pb, stream->codec_tag);
        else
            put_le32(pb, 1);
        put_le32(pb, 0); /* flags */
        put_le16(pb, 0); /* priority */
        put_le16(pb, 0); /* language */
        put_le32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        put_le32(pb, au_scale); /* scale */
        put_le32(pb, au_byterate); /* rate */
        av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);

        put_le32(pb, 0); /* start */
        avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */
        if (url_is_streamed(pb))
            put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            put_le32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == CODEC_TYPE_VIDEO)
            put_le32(pb, 1024 * 1024);
        else if(stream->codec_type == CODEC_TYPE_AUDIO)
            put_le32(pb, 12 * 1024);
        else
            put_le32(pb, 0);
        put_le32(pb, -1); /* quality */
        put_le32(pb, au_ssize); /* sample size */
        put_le32(pb, 0);
        put_le16(pb, stream->width);
        put_le16(pb, stream->height);
        end_tag(pb, strh);

      if(stream->codec_type != CODEC_TYPE_DATA){
        strf = start_tag(pb, "strf");
        switch(stream->codec_type) {
        case CODEC_TYPE_VIDEO:
            put_bmp_header(pb, stream, codec_bmp_tags, 0);
            break;
        case CODEC_TYPE_AUDIO:
            if (put_wav_header(pb, stream) < 0) {
                av_free(avi);
                return -1;
            }
            break;
        default:
            return -1;
        }
        end_tag(pb, strf);
      }

        if (!url_is_streamed(pb)) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
            avi->indexes[i].indx_start = start_tag(pb, "JUNK");
            put_le16(pb, 4);        /* wLongsPerEntry */
            put_byte(pb, 0);        /* bIndexSubType (0 == frame index) */
            put_byte(pb, 0);        /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            put_le32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
                                    /* dwChunkId */
            put_le64(pb, 0);        /* dwReserved[3]
            put_le32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 put_le64(pb, 0);
            end_tag(pb, avi->indexes[i].indx_start);
        }

        end_tag(pb, list2);
    }

    if (!url_is_streamed(pb)) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = start_tag(pb, "JUNK");
        put_tag(pb, "odml");
        put_tag(pb, "dmlh");
        put_le32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             put_le32(pb, 0);
        end_tag(pb, avi->odml_list);
    }

    end_tag(pb, list1);

    list2 = start_tag(pb, "LIST");
    put_tag(pb, "INFO");
    avi_write_info_tag(pb, "INAM", s->title);
    avi_write_info_tag(pb, "IART", s->author);
    avi_write_info_tag(pb, "ICOP", s->copyright);
    avi_write_info_tag(pb, "ICMT", s->comment);
    avi_write_info_tag(pb, "IPRD", s->album);
    avi_write_info_tag(pb, "IGNR", s->genre);
    if (s->track) {
        char str_track[4];
        snprintf(str_track, 4, "%d", s->track);
        avi_write_info_tag(pb, "IPRT", str_track);
    }
    if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
        avi_write_info_tag(pb, "ISFT", LIBAVFORMAT_IDENT);
    end_tag(pb, list2);

    /* some padding for easier tag editing */
    list2 = start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        put_le32(pb, 0);
    end_tag(pb, list2);

    avi->movi_list = start_tag(pb, "LIST");
    put_tag(pb, "movi");

    put_flush_packet(pb);

    return 0;
}
Exemple #19
0
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
    ASFContext *asf = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVMetadataTag *title, *author, *copyright, *comment;
    int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
    int has_title;
    int metadata_count;
    AVCodecContext *enc;
    int64_t header_offset, cur_pos, hpos;
    int bit_rate;
    int64_t duration;

    title     = av_metadata_get(s->metadata, "title"    , NULL, 0);
    author    = av_metadata_get(s->metadata, "author"   , NULL, 0);
    copyright = av_metadata_get(s->metadata, "copyright", NULL, 0);
    comment   = av_metadata_get(s->metadata, "comment"  , NULL, 0);

    duration = asf->duration + PREROLL_TIME * 10000;
    has_title = title || author || copyright || comment;
    metadata_count = s->metadata ? s->metadata->count : 0;

    bit_rate = 0;
    for(n=0;n<s->nb_streams;n++) {
        enc = s->streams[n]->codec;

        av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */

        bit_rate += enc->bit_rate;
    }

    if (asf->is_streamed) {
        put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
    }

    put_guid(pb, &ff_asf_header);
    put_le64(pb, -1); /* header length, will be patched after */
    put_le32(pb, 3 + has_title + !!metadata_count + s->nb_streams); /* number of chunks in header */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 2); /* ??? */

    /* file header */
    header_offset = url_ftell(pb);
    hpos = put_header(pb, &ff_asf_file_header);
    put_guid(pb, &ff_asf_my_guid);
    put_le64(pb, file_size);
    file_time = 0;
    put_le64(pb, unix_to_file_time(file_time));
    put_le64(pb, asf->nb_packets); /* number of packets */
    put_le64(pb, duration); /* end time stamp (in 100ns units) */
    put_le64(pb, asf->duration); /* duration (in 100ns units) */
    put_le64(pb, PREROLL_TIME); /* start time stamp */
    put_le32(pb, (asf->is_streamed || url_is_streamed(pb)) ? 3 : 2); /* ??? */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, bit_rate); /* Nominal data rate in bps */
    end_header(pb, hpos);

    /* unknown headers */
    hpos = put_header(pb, &ff_asf_head1_guid);
    put_guid(pb, &ff_asf_head2_guid);
    put_le32(pb, 6);
    put_le16(pb, 0);
    end_header(pb, hpos);

    /* title and other infos */
    if (has_title) {
        hpos = put_header(pb, &ff_asf_comment_header);
        put_le16(pb, title     ? 2 * (strlen(title->value    ) + 1) : 0);
        put_le16(pb, author    ? 2 * (strlen(author->value   ) + 1) : 0);
        put_le16(pb, copyright ? 2 * (strlen(copyright->value) + 1) : 0);
        put_le16(pb, comment   ? 2 * (strlen(comment->value  ) + 1) : 0);
        put_le16(pb, 0);
        if (title    ) put_str16_nolen(pb, title->value    );
        if (author   ) put_str16_nolen(pb, author->value   );
        if (copyright) put_str16_nolen(pb, copyright->value);
        if (comment  ) put_str16_nolen(pb, comment->value  );
        end_header(pb, hpos);
    }
    if (metadata_count) {
        AVMetadataTag *tag = NULL;
        hpos = put_header(pb, &ff_asf_extended_content_header);
        put_le16(pb, metadata_count);
        while ((tag = av_metadata_get(s->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
            put_le16(pb, 2*(strlen(tag->key) + 3) + 1);
            put_le16(pb, 'W');
            put_le16(pb, 'M');
            put_le16(pb, '/');
            put_str16_nolen(pb, tag->key);
            put_le16(pb, 0);
            put_le16(pb, 2*strlen(tag->value) + 1);
            put_str16_nolen(pb, tag->value);
        }
        end_header(pb, hpos);
    }

    /* stream headers */
    for(n=0;n<s->nb_streams;n++) {
        int64_t es_pos;
        //        ASFStream *stream = &asf->streams[n];

        enc = s->streams[n]->codec;
        asf->streams[n].num = n + 1;
        asf->streams[n].seq = 0;


        switch(enc->codec_type) {
        case CODEC_TYPE_AUDIO:
            wav_extra_size = 0;
            extra_size = 18 + wav_extra_size;
            extra_size2 = 8;
            break;
        default:
        case CODEC_TYPE_VIDEO:
            wav_extra_size = enc->extradata_size;
            extra_size = 0x33 + wav_extra_size;
            extra_size2 = 0;
            break;
        }

        hpos = put_header(pb, &ff_asf_stream_header);
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_guid(pb, &ff_asf_audio_stream);
            put_guid(pb, &ff_asf_audio_conceal_spread);
        } else {
            put_guid(pb, &ff_asf_video_stream);
            put_guid(pb, &ff_asf_video_conceal_none);
        }
        put_le64(pb, 0); /* ??? */
        es_pos = url_ftell(pb);
        put_le32(pb, extra_size); /* wav header len */
        put_le32(pb, extra_size2); /* additional data len */
        put_le16(pb, n + 1); /* stream number */
        put_le32(pb, 0); /* ??? */

        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            /* WAVEFORMATEX header */
            int wavsize = put_wav_header(pb, enc);
            if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
                wavsize += 2;
                put_le16(pb, 0);
            }

            if (wavsize < 0)
                return -1;
            if (wavsize != extra_size) {
                cur_pos = url_ftell(pb);
                url_fseek(pb, es_pos, SEEK_SET);
                put_le32(pb, wavsize); /* wav header len */
                url_fseek(pb, cur_pos, SEEK_SET);
            }
            /* ERROR Correction */
            put_byte(pb, 0x01);
            if(enc->codec_id == CODEC_ID_ADPCM_G726 || !enc->block_align){
                put_le16(pb, 0x0190);
                put_le16(pb, 0x0190);
            }else{
                put_le16(pb, enc->block_align);
                put_le16(pb, enc->block_align);
            }
            put_le16(pb, 0x01);
            put_byte(pb, 0x00);
        } else {
            put_le32(pb, enc->width);
            put_le32(pb, enc->height);
            put_byte(pb, 2); /* ??? */
            put_le16(pb, 40 + enc->extradata_size); /* size */

            /* BITMAPINFOHEADER header */
            put_bmp_header(pb, enc, codec_bmp_tags, 1);
        }
        end_header(pb, hpos);
    }

    /* media comments */

    hpos = put_header(pb, &ff_asf_codec_comment_header);
    put_guid(pb, &ff_asf_codec_comment1_header);
    put_le32(pb, s->nb_streams);
    for(n=0;n<s->nb_streams;n++) {
        AVCodec *p;

        enc = s->streams[n]->codec;
        p = avcodec_find_encoder(enc->codec_id);

        if(enc->codec_type == CODEC_TYPE_AUDIO)
            put_le16(pb, 2);
        else if(enc->codec_type == CODEC_TYPE_VIDEO)
            put_le16(pb, 1);
        else
            put_le16(pb, -1);

        if(enc->codec_id == CODEC_ID_WMAV2)
            put_str16(pb, "Windows Media Audio V8");
        else
            put_str16(pb, p ? p->name : enc->codec_name);
        put_le16(pb, 0); /* no parameters */


        /* id */
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_le16(pb, 2);
            put_le16(pb, enc->codec_tag);
        } else {
            put_le16(pb, 4);
            put_le32(pb, enc->codec_tag);
        }
        if(!enc->codec_tag)
            return -1;
    }
    end_header(pb, hpos);

    /* patch the header size fields */

    cur_pos = url_ftell(pb);
    header_size = cur_pos - header_offset;
    if (asf->is_streamed) {
        header_size += 8 + 30 + 50;

        url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
        put_le16(pb, header_size);
        url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
        put_le16(pb, header_size);

        header_size -= 8 + 30 + 50;
    }
    header_size += 24 + 6;
    url_fseek(pb, header_offset - 14, SEEK_SET);
    put_le64(pb, header_size);
    url_fseek(pb, cur_pos, SEEK_SET);

    /* movie chunk, followed by packets of packet_size */
    asf->data_offset = cur_pos;
    put_guid(pb, &ff_asf_data_header);
    put_le64(pb, data_chunk_size);
    put_guid(pb, &ff_asf_my_guid);
    put_le64(pb, asf->nb_packets); /* nb packets */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 1); /* ??? */
    return 0;
}
Exemple #20
0
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
    ASFContext *asf = s->priv_data;
    ByteIOContext *pb = &s->pb;
    int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
    int has_title;
    AVCodecContext *enc;
    int64_t header_offset, cur_pos, hpos;
    int bit_rate;
    int64_t duration;

    duration = asf->duration + preroll_time * 10000;
    has_title = (s->title[0] || s->author[0] || s->copyright[0] || s->comment[0]);

    bit_rate = 0;
    for(n=0;n<s->nb_streams;n++) {
        enc = s->streams[n]->codec;

        av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */

        bit_rate += enc->bit_rate;
    }

    if (asf->is_streamed) {
        put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
    }

    put_guid(pb, &asf_header);
    put_le64(pb, -1); /* header length, will be patched after */
    put_le32(pb, 3 + has_title + s->nb_streams); /* number of chunks in header */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 2); /* ??? */

    /* file header */
    header_offset = url_ftell(pb);
    hpos = put_header(pb, &file_header);
    put_guid(pb, &my_guid);
    put_le64(pb, file_size);
    file_time = 0;
    put_le64(pb, unix_to_file_time(file_time));
    put_le64(pb, asf->nb_packets); /* number of packets */
    put_le64(pb, duration); /* end time stamp (in 100ns units) */
    put_le64(pb, duration); /* duration (in 100ns units) */
    put_le32(pb, preroll_time); /* start time stamp */
    put_le32(pb, 0); /* ??? */
    put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, asf->packet_size); /* packet size */
    put_le32(pb, bit_rate); /* Nominal data rate in bps */
    end_header(pb, hpos);

    /* unknown headers */
    hpos = put_header(pb, &head1_guid);
    put_guid(pb, &head2_guid);
    put_le32(pb, 6);
    put_le16(pb, 0);
    end_header(pb, hpos);

    /* title and other infos */
    if (has_title) {
        hpos = put_header(pb, &comment_header);
        if ( s->title[0]     ) { put_le16(pb, 2 * (strlen(s->title    ) + 1)); } else { put_le16(pb, 0); }
        if ( s->author[0]    ) { put_le16(pb, 2 * (strlen(s->author   ) + 1)); } else { put_le16(pb, 0); }
        if ( s->copyright[0] ) { put_le16(pb, 2 * (strlen(s->copyright) + 1)); } else { put_le16(pb, 0); }
        if ( s->comment[0]   ) { put_le16(pb, 2 * (strlen(s->comment  ) + 1)); } else { put_le16(pb, 0); }
        put_le16(pb, 0);
        if ( s->title[0]     ) put_str16_nolen(pb, s->title);
        if ( s->author[0]    ) put_str16_nolen(pb, s->author);
        if ( s->copyright[0] ) put_str16_nolen(pb, s->copyright);
        if ( s->comment[0]   ) put_str16_nolen(pb, s->comment);
        end_header(pb, hpos);
    }

    /* stream headers */
    for(n=0;n<s->nb_streams;n++) {
        int64_t es_pos;
        const uint8_t *er_spr = NULL;
        int er_spr_len = 0;
        //        ASFStream *stream = &asf->streams[n];

        enc = s->streams[n]->codec;
        asf->streams[n].num = n + 1;
        asf->streams[n].seq = 0;


        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            if (enc->codec_id == CODEC_ID_ADPCM_G726) {
                er_spr     = error_spread_ADPCM_G726;
                er_spr_len = sizeof(error_spread_ADPCM_G726);
            }
        }

        switch(enc->codec_type) {
        case CODEC_TYPE_AUDIO:
            wav_extra_size = 0;
            extra_size = 18 + wav_extra_size;
            extra_size2 = er_spr_len;
            break;
        default:
        case CODEC_TYPE_VIDEO:
            wav_extra_size = enc->extradata_size;
            extra_size = 0x33 + wav_extra_size;
            extra_size2 = 0;
            break;
        }

        hpos = put_header(pb, &stream_header);
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_guid(pb, &audio_stream);
            if ((er_spr != NULL) && (er_spr_len != 0)) {
                put_guid(pb, &audio_conceal_spread);
            } else {
                put_guid(pb, &video_conceal_none);
            }
        } else {
            put_guid(pb, &video_stream);
            put_guid(pb, &video_conceal_none);
        }
        put_le64(pb, 0); /* ??? */
        es_pos = url_ftell(pb);
        put_le32(pb, extra_size); /* wav header len */
        put_le32(pb, extra_size2); /* additional data len */
        put_le16(pb, n + 1); /* stream number */
        put_le32(pb, 0); /* ??? */

        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            /* WAVEFORMATEX header */
            int wavsize = put_wav_header(pb, enc);
            if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
                wavsize += 2;
                put_le16(pb, 0);
            }

            if (wavsize < 0)
                return -1;
            if (wavsize != extra_size) {
                cur_pos = url_ftell(pb);
                url_fseek(pb, es_pos, SEEK_SET);
                put_le32(pb, wavsize); /* wav header len */
                url_fseek(pb, cur_pos, SEEK_SET);
            }
            /* ERROR Correction */
            if ((er_spr != NULL) && (er_spr_len != 0))
                put_buffer(pb, er_spr, er_spr_len);
        } else {
            put_le32(pb, enc->width);
            put_le32(pb, enc->height);
            put_byte(pb, 2); /* ??? */
            put_le16(pb, 40 + enc->extradata_size); /* size */

            /* BITMAPINFOHEADER header */
            put_bmp_header(pb, enc, codec_bmp_tags, 1);
        }
        end_header(pb, hpos);
    }

    /* media comments */

    hpos = put_header(pb, &codec_comment_header);
    put_guid(pb, &codec_comment1_header);
    put_le32(pb, s->nb_streams);
    for(n=0;n<s->nb_streams;n++) {
        AVCodec *p;

        enc = s->streams[n]->codec;
        p = avcodec_find_encoder(enc->codec_id);

        put_le16(pb, asf->streams[n].num);
        put_str16(pb, p ? p->name : enc->codec_name);
        put_le16(pb, 0); /* no parameters */


        /* id */
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            put_le16(pb, 2);
            if(!enc->codec_tag)
                enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
            if(!enc->codec_tag)
                return -1;
            put_le16(pb, enc->codec_tag);
        } else {
            put_le16(pb, 4);
            if(!enc->codec_tag)
                enc->codec_tag = codec_get_tag(codec_bmp_tags, enc->codec_id);
            if(!enc->codec_tag)
                return -1;
            put_le32(pb, enc->codec_tag);
        }
    }
    end_header(pb, hpos);

    /* patch the header size fields */

    cur_pos = url_ftell(pb);
    header_size = cur_pos - header_offset;
    if (asf->is_streamed) {
        header_size += 8 + 30 + 50;

        url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
        put_le16(pb, header_size);
        url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
        put_le16(pb, header_size);

        header_size -= 8 + 30 + 50;
    }
    header_size += 24 + 6;
    url_fseek(pb, header_offset - 14, SEEK_SET);
    put_le64(pb, header_size);
    url_fseek(pb, cur_pos, SEEK_SET);

    /* movie chunk, followed by packets of packet_size */
    asf->data_offset = cur_pos;
    put_guid(pb, &data_header);
    put_le64(pb, data_chunk_size);
    put_guid(pb, &my_guid);
    put_le64(pb, asf->nb_packets); /* nb packets */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 1); /* ??? */
    return 0;
}
Exemple #21
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    ByteIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    int64_t list1, list2, strh, strf;
    AVMetadataTag *t = NULL;

    for(n=0;n<s->nb_streams;n++) {
        s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
        if(!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    put_tag(pb, "avih");
    put_le32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == AVMEDIA_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        put_le32(pb, 0);
    }
    put_le32(pb, bitrate / 8); /* XXX: not quite exact */
    put_le32(pb, 0); /* padding */
    if (url_is_streamed(pb))
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
    put_le32(pb, nb_frames); /* nb frames, filled later */
    put_le32(pb, 0); /* initial frame */
    put_le32(pb, s->nb_streams); /* nb streams */
    put_le32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        put_le32(pb, video_enc->width);
        put_le32(pb, video_enc->height);
    } else {
        put_le32(pb, 0);
        put_le32(pb, 0);
    }
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        AVIStream *avist= s->streams[i]->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        put_tag(pb, "strl");

        stream = s->streams[i]->codec;

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO: put_tag(pb, "vids"); break;
        case AVMEDIA_TYPE_AUDIO: put_tag(pb, "auds"); break;
//        case AVMEDIA_TYPE_TEXT : put_tag(pb, "txts"); break;
        case AVMEDIA_TYPE_DATA : put_tag(pb, "dats"); break;
        }
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
           stream->codec_id == CODEC_ID_XSUB)
            put_le32(pb, stream->codec_tag);
        else
            put_le32(pb, 1);
        put_le32(pb, 0); /* flags */
        put_le16(pb, 0); /* priority */
        put_le16(pb, 0); /* language */
        put_le32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        put_le32(pb, au_scale); /* scale */
        put_le32(pb, au_byterate); /* rate */
        av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);

        put_le32(pb, 0); /* start */
        avist->frames_hdr_strm = url_ftell(pb); /* remember this offset to fill later */
        if (url_is_streamed(pb))
            put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            put_le32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
            put_le32(pb, 1024 * 1024);
        else if(stream->codec_type == AVMEDIA_TYPE_AUDIO)
            put_le32(pb, 12 * 1024);
        else
            put_le32(pb, 0);
        put_le32(pb, -1); /* quality */
        put_le32(pb, au_ssize); /* sample size */
        put_le32(pb, 0);
        put_le16(pb, stream->width);
        put_le16(pb, stream->height);
        ff_end_tag(pb, strh);

      if(stream->codec_type != AVMEDIA_TYPE_DATA){
        strf = ff_start_tag(pb, "strf");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != CODEC_ID_XSUB) break;
        case AVMEDIA_TYPE_VIDEO:
            ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0);
            break;
        case AVMEDIA_TYPE_AUDIO:
            if (ff_put_wav_header(pb, stream) < 0) {
                return -1;
            }
            break;
        default:
            return -1;
        }
        ff_end_tag(pb, strf);
        if ((t = av_metadata_get(s->streams[i]->metadata, "title", NULL, 0))) {
            avi_write_info_tag(s->pb, "strn", t->value);
            t = NULL;
        }
      }

        if (!url_is_streamed(pb)) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avist->indexes.entry = avist->indexes.ents_allocated = 0;
            avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
            put_le16(pb, 4);        /* wLongsPerEntry */
            put_byte(pb, 0);        /* bIndexSubType (0 == frame index) */
            put_byte(pb, 0);        /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            put_le32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
                                    /* dwChunkId */
            put_le64(pb, 0);        /* dwReserved[3]
            put_le32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 put_le64(pb, 0);
            ff_end_tag(pb, avist->indexes.indx_start);
        }

        if(   stream->codec_type == AVMEDIA_TYPE_VIDEO
           && s->streams[i]->sample_aspect_ratio.num>0
           && s->streams[i]->sample_aspect_ratio.den>0){
            int vprp= ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
                                      (AVRational){stream->width, stream->height});
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            put_le32(pb, 0); //video format  = unknown
            put_le32(pb, 0); //video standard= unknown
            put_le32(pb, lrintf(1.0/av_q2d(stream->time_base)));
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le16(pb, den);
            put_le16(pb, num);
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le32(pb, 1); //progressive FIXME

            put_le32(pb, stream->height);
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le32(pb, stream->width );
            put_le32(pb, 0);
            put_le32(pb, 0);

            put_le32(pb, 0);
            put_le32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (!url_is_streamed(pb)) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        put_tag(pb, "odml");
        put_tag(pb, "dmlh");
        put_le32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             put_le32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    list2 = ff_start_tag(pb, "LIST");
    put_tag(pb, "INFO");
    ff_metadata_conv(&s->metadata, ff_avi_metadata_conv, NULL);
    for (i = 0; *ff_avi_tags[i]; i++) {
        if ((t = av_metadata_get(s->metadata, ff_avi_tags[i], NULL, AV_METADATA_MATCH_CASE)))
            avi_write_info_tag(s->pb, t->key, t->value);
    }
    ff_end_tag(pb, list2);

    /* some padding for easier tag editing */
    list2 = ff_start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        put_le32(pb, 0);
    ff_end_tag(pb, list2);

    avi->movi_list = ff_start_tag(pb, "LIST");
    put_tag(pb, "movi");

    put_flush_packet(pb);

    return 0;
}