Exemple #1
0
static int gxf_write_mpeg_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
{
    char buffer[1024];
    int size, starting_line;

    if (ctx->iframes) {
        ctx->p_per_gop = ctx->pframes / ctx->iframes;
        if (ctx->pframes % ctx->iframes)
            ctx->p_per_gop++;
        if (ctx->pframes) {
            ctx->b_per_i_or_p = ctx->bframes / ctx->pframes;
            if (ctx->bframes % ctx->pframes)
                ctx->b_per_i_or_p++;
        }
        if (ctx->p_per_gop > 9)
            ctx->p_per_gop = 9; /* ensure value won't take more than one char */
        if (ctx->b_per_i_or_p > 9)
            ctx->b_per_i_or_p = 9; /* ensure value won't take more than one char */
    }
    if (ctx->codec->height == 512 || ctx->codec->height == 608)
        starting_line = 7; // VBI
    else if (ctx->codec->height == 480)
        starting_line = 20;
    else
        starting_line = 23; // default PAL

    size = snprintf(buffer, 1024, "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
                    "Pix 0\nCf %d\nCg %d\nSl %d\nnl16 %d\nVi 1\nf1 1\n",
                    (float)ctx->codec->bit_rate, ctx->p_per_gop, ctx->b_per_i_or_p,
                    ctx->codec->pix_fmt == PIX_FMT_YUV422P ? 2 : 1, ctx->first_gop_closed == 1,
                    starting_line, ctx->codec->height / 16);
    put_byte(pb, TRACK_MPG_AUX);
    put_byte(pb, size + 1);
    put_buffer(pb, (uint8_t *)buffer, size + 1);
    return size + 3;
}
Exemple #2
0
static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
{
    uint8_t *buf1;
    RMContext *rm = s->priv_data;
    ByteIOContext *pb = s->pb;
    StreamInfo *stream = rm->audio_stream;
    int i;

    /* XXX: suppress this malloc */
    buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );

    write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY));

    /* for AC-3, the words seem to be reversed */
    for(i=0;i<size;i+=2) {
        buf1[i] = buf[i+1];
        buf1[i+1] = buf[i];
    }
    put_buffer(pb, buf1, size);
    put_flush_packet(pb);
    stream->nb_frames++;
    av_free(buf1);
    return 0;
}
Exemple #3
0
static int flv_write_header(AVFormatContext *s)
{
    ByteIOContext *pb = s->pb;
    FLVContext *flv = s->priv_data;
    AVCodecContext *audio_enc = NULL, *video_enc = NULL;
    int i;
    double framerate = 0.0;
    int64_t metadata_size_pos, data_size;

    for(i=0; i<s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
                framerate = av_q2d(s->streams[i]->r_frame_rate);
            } else {
                framerate = 1/av_q2d(s->streams[i]->codec->time_base);
            }
            video_enc = enc;
            if(enc->codec_tag == 0) {
                av_log(enc, AV_LOG_ERROR, "video codec not compatible with flv\n");
                return -1;
            }
        } else {
            audio_enc = enc;
            if(get_audio_flags(enc)<0)
                return -1;
        }
        av_set_pts_info(s->streams[i], 32, 1, 1000); /* 32 bit pts in ms */
    }
    put_tag(pb,"FLV");
    put_byte(pb,1);
    put_byte(pb,   FLV_HEADER_FLAG_HASAUDIO * !!audio_enc
             + FLV_HEADER_FLAG_HASVIDEO * !!video_enc);
    put_be32(pb,9);
    put_be32(pb,0);

    for(i=0; i<s->nb_streams; i++) {
        if(s->streams[i]->codec->codec_tag == 5) {
            put_byte(pb,8); // message type
            put_be24(pb,0); // include flags
            put_be24(pb,0); // time stamp
            put_be32(pb,0); // reserved
            put_be32(pb,11); // size
            flv->reserved=5;
        }
    }

    /* write meta_tag */
    put_byte(pb, 18);         // tag type META
    metadata_size_pos= url_ftell(pb);
    put_be24(pb, 0);          // size of data part (sum of all parts below)
    put_be24(pb, 0);          // time stamp
    put_be32(pb, 0);          // reserved

    /* now data of data_size size */

    /* first event name as a string */
    put_byte(pb, AMF_DATA_TYPE_STRING);
    put_amf_string(pb, "onMetaData"); // 12 bytes

    /* mixed array (hash) with size and string/type/data tuples */
    put_byte(pb, AMF_DATA_TYPE_MIXEDARRAY);
    put_be32(pb, 5*!!video_enc + 5*!!audio_enc + 2); // +2 for duration and file size

    put_amf_string(pb, "duration");
    flv->duration_offset= url_ftell(pb);
    put_amf_double(pb, s->duration / AV_TIME_BASE); // fill in the guessed duration, it'll be corrected later if incorrect

    if(video_enc) {
        put_amf_string(pb, "width");
        put_amf_double(pb, video_enc->width);

        put_amf_string(pb, "height");
        put_amf_double(pb, video_enc->height);

        put_amf_string(pb, "videodatarate");
        put_amf_double(pb, video_enc->bit_rate / 1024.0);

        put_amf_string(pb, "framerate");
        put_amf_double(pb, framerate);

        put_amf_string(pb, "videocodecid");
        put_amf_double(pb, video_enc->codec_tag);
    }

    if(audio_enc) {
        put_amf_string(pb, "audiodatarate");
        put_amf_double(pb, audio_enc->bit_rate / 1024.0);

        put_amf_string(pb, "audiosamplerate");
        put_amf_double(pb, audio_enc->sample_rate);

        put_amf_string(pb, "audiosamplesize");
        put_amf_double(pb, audio_enc->codec_id == CODEC_ID_PCM_U8 ? 8 : 16);

        put_amf_string(pb, "stereo");
        put_amf_bool(pb, audio_enc->channels == 2);

        put_amf_string(pb, "audiocodecid");
        put_amf_double(pb, audio_enc->codec_tag);
    }

    put_amf_string(pb, "filesize");
    flv->filesize_offset= url_ftell(pb);
    put_amf_double(pb, 0); // delayed write

    put_amf_string(pb, "");
    put_byte(pb, AMF_END_OF_OBJECT);

    /* write total size of tag */
    data_size= url_ftell(pb) - metadata_size_pos - 10;
    url_fseek(pb, metadata_size_pos, SEEK_SET);
    put_be24(pb, data_size);
    url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
    put_be32(pb, data_size + 11);

    for (i = 0; i < s->nb_streams; i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_id == CODEC_ID_AAC || enc->codec_id == CODEC_ID_H264) {
            int64_t pos;
            put_byte(pb, enc->codec_type == AVMEDIA_TYPE_VIDEO ?
                     FLV_TAG_TYPE_VIDEO : FLV_TAG_TYPE_AUDIO);
            put_be24(pb, 0); // size patched later
            put_be24(pb, 0); // ts
            put_byte(pb, 0); // ts ext
            put_be24(pb, 0); // streamid
            pos = url_ftell(pb);
            if (enc->codec_id == CODEC_ID_AAC) {
                put_byte(pb, get_audio_flags(enc));
                put_byte(pb, 0); // AAC sequence header
                put_buffer(pb, enc->extradata, enc->extradata_size);
            } else {
                put_byte(pb, enc->codec_tag | FLV_FRAME_KEY); // flags
                put_byte(pb, 0); // AVC sequence header
                put_be24(pb, 0); // composition time
                ff_isom_write_avcc(pb, enc->extradata, enc->extradata_size);
            }
            data_size = url_ftell(pb) - pos;
            url_fseek(pb, -data_size - 10, SEEK_CUR);
            put_be24(pb, data_size);
            url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
            put_be32(pb, data_size + 11); // previous tag size
        }
    }

    return 0;
}
Exemple #4
0
static int au_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    ByteIOContext *pb = s->pb;
    put_buffer(pb, pkt->data, pkt->size);
    return 0;
}
Exemple #5
0
static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    VideoData *img = s->priv_data;
    ByteIOContext *pb[3];
    char filename[1024];
    AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec;
    int i;

    if (!img->is_pipe) {
        if (av_get_frame_filename(filename, sizeof(filename),
                                  img->path, img->img_number) < 0 && img->img_number>1) {
            av_log(s, AV_LOG_ERROR, "Could not get frame filename from pattern\n");
            return AVERROR(EIO);
        }
        for(i=0; i<3; i++){
            if (url_fopen(&pb[i], filename, URL_WRONLY) < 0) {
                av_log(s, AV_LOG_ERROR, "Could not open file : %s\n",filename);
                return AVERROR(EIO);
            }

            if(codec->codec_id != CODEC_ID_RAWVIDEO)
                break;
            filename[ strlen(filename) - 1 ]= 'U' + i;
        }
    } else {
        pb[0] = s->pb;
    }

    if(codec->codec_id == CODEC_ID_RAWVIDEO){
        int ysize = codec->width * codec->height;
        put_buffer(pb[0], pkt->data        , ysize);
        put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2);
        put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2);
        put_flush_packet(pb[1]);
        put_flush_packet(pb[2]);
        url_fclose(pb[1]);
        url_fclose(pb[2]);
    }else{
        if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000){
            AVStream *st = s->streams[0];
            if(st->codec->extradata_size > 8 &&
               AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){
                if(pkt->size < 8 || AV_RL32(pkt->data+4) != MKTAG('j','p','2','c'))
                    goto error;
                put_be32(pb[0], 12);
                put_tag (pb[0], "jP  ");
                put_be32(pb[0], 0x0D0A870A); // signature
                put_be32(pb[0], 20);
                put_tag (pb[0], "ftyp");
                put_tag (pb[0], "jp2 ");
                put_be32(pb[0], 0);
                put_tag (pb[0], "jp2 ");
                put_buffer(pb[0], st->codec->extradata, st->codec->extradata_size);
            }else if(pkt->size < 8 ||
                     (!st->codec->extradata_size &&
                      AV_RL32(pkt->data+4) != MKTAG('j','P',' ',' '))){ // signature
            error:
                av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream\n");
                return -1;
            }
        }
        put_buffer(pb[0], pkt->data, pkt->size);
    }
    put_flush_packet(pb[0]);
    if (!img->is_pipe) {
        url_fclose(pb[0]);
    }

    img->img_number++;
    return 0;
}
Exemple #6
0
static int swf_write_header(AVFormatContext *s)
{
    SWFContext *swf = s->priv_data;
    ByteIOContext *pb = s->pb;
    PutBitContext p;
    uint8_t buf1[256];
    int i, width, height, rate, rate_base;
    int version;

    swf->sound_samples = 0;
    swf->swf_frame_number = 0;
    swf->video_frame_number = 0;

    for(i=0;i<s->nb_streams;i++) {
        AVCodecContext *enc = s->streams[i]->codec;
        if (enc->codec_type == CODEC_TYPE_AUDIO) {
            if (enc->codec_id == CODEC_ID_MP3) {
                if (!enc->frame_size) {
                    av_log(s, AV_LOG_ERROR, "audio frame size not set\n");
                    return -1;
                }
                swf->audio_enc = enc;
                av_fifo_init(&swf->audio_fifo, AUDIO_FIFO_SIZE);
            } else {
                av_log(s, AV_LOG_ERROR, "SWF muxer only supports MP3\n");
                return -1;
            }
        } else {
            if (enc->codec_id == CODEC_ID_VP6F ||
                enc->codec_id == CODEC_ID_FLV1 ||
                enc->codec_id == CODEC_ID_MJPEG) {
                swf->video_enc = enc;
            } else {
                av_log(s, AV_LOG_ERROR, "SWF muxer only supports VP6, FLV1 and MJPEG\n");
                return -1;
            }
        }
    }

    if (!swf->video_enc) {
        /* currently, cannot work correctly if audio only */
        width = 320;
        height = 200;
        rate = 10;
        rate_base= 1;
    } else {
        width = swf->video_enc->width;
        height = swf->video_enc->height;
        rate = swf->video_enc->time_base.den;
        rate_base = swf->video_enc->time_base.num;
    }

    if (!swf->audio_enc)
        swf->samples_per_frame = (44100. * rate_base) / rate;
    else
        swf->samples_per_frame = (swf->audio_enc->sample_rate * rate_base) / rate;

    put_tag(pb, "FWS");

    if (!strcmp("avm2", s->oformat->name))
        version = 9;
    else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_VP6F)
        version = 8; /* version 8 and above support VP6 codec */
    else if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_FLV1)
        version = 6; /* version 6 and above support FLV1 codec */
    else
        version = 4; /* version 4 for mpeg audio support */
    put_byte(pb, version);

    put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
                                      (will be patched if not streamed) */

    put_swf_rect(pb, 0, width * 20, 0, height * 20);
    put_le16(pb, (rate * 256) / rate_base); /* frame rate */
    swf->duration_pos = url_ftell(pb);
    put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */

    /* avm2/swf v9 (also v8?) files require a file attribute tag */
    if (version == 9) {
        put_swf_tag(s, TAG_FILEATTRIBUTES);
        put_le32(pb, 1<<3); /* set ActionScript v3/AVM2 flag */
        put_swf_end_tag(s);
    }

    /* define a shape with the jpeg inside */
    if (swf->video_enc && swf->video_enc->codec_id == CODEC_ID_MJPEG) {
        put_swf_tag(s, TAG_DEFINESHAPE);

        put_le16(pb, SHAPE_ID); /* ID of shape */
        /* bounding rectangle */
        put_swf_rect(pb, 0, width, 0, height);
        /* style info */
        put_byte(pb, 1); /* one fill style */
        put_byte(pb, 0x41); /* clipped bitmap fill */
        put_le16(pb, BITMAP_ID); /* bitmap ID */
        /* position of the bitmap */
        put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
                       0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
        put_byte(pb, 0); /* no line style */

        /* shape drawing */
        init_put_bits(&p, buf1, sizeof(buf1));
        put_bits(&p, 4, 1); /* one fill bit */
        put_bits(&p, 4, 0); /* zero line bit */

        put_bits(&p, 1, 0); /* not an edge */
        put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
        put_bits(&p, 5, 1); /* nbits */
        put_bits(&p, 1, 0); /* X */
        put_bits(&p, 1, 0); /* Y */
        put_bits(&p, 1, 1); /* set fill style 1 */

        /* draw the rectangle ! */
        put_swf_line_edge(&p, width, 0);
        put_swf_line_edge(&p, 0, height);
        put_swf_line_edge(&p, -width, 0);
        put_swf_line_edge(&p, 0, -height);

        /* end of shape */
        put_bits(&p, 1, 0); /* not an edge */
        put_bits(&p, 5, 0);

        flush_put_bits(&p);
        put_buffer(pb, buf1, pbBufPtr(&p) - p.buf);

        put_swf_end_tag(s);
    }

    if (swf->audio_enc && swf->audio_enc->codec_id == CODEC_ID_MP3) {
        int v = 0;

        /* start sound */
        put_swf_tag(s, TAG_STREAMHEAD2);
        switch(swf->audio_enc->sample_rate) {
        case 11025: v |= 1 << 2; break;
        case 22050: v |= 2 << 2; break;
        case 44100: v |= 3 << 2; break;
        default:
            /* not supported */
            av_log(s, AV_LOG_ERROR, "swf does not support that sample rate, choose from (44100, 22050, 11025).\n");
            return -1;
        }
        v |= 0x02; /* 16 bit playback */
        if (swf->audio_enc->channels == 2)
            v |= 0x01; /* stereo playback */
        put_byte(s->pb, v);
        v |= 0x20; /* mp3 compressed */
        put_byte(s->pb, v);
        put_le16(s->pb, swf->samples_per_frame);  /* avg samples per frame */
        put_le16(s->pb, 0);

        put_swf_end_tag(s);
    }

    put_flush_packet(s->pb);
    return 0;
}
Exemple #7
0
static int flac_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
    put_buffer(s->pb, pkt->data, pkt->size);
    put_flush_packet(s->pb);
    return 0;
}
Exemple #8
0
int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
{
    ByteIOContext *pb;
    uint8_t *buf;
    int len;
    int rtcp_bytes;
    RTPStatistics *stats= &s->statistics;
    uint32_t lost;
    uint32_t extended_max;
    uint32_t expected_interval;
    uint32_t received_interval;
    uint32_t lost_interval;
    uint32_t expected;
    uint32_t fraction;
    uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?

    if (!s->rtp_ctx || (count < 1))
        return -1;

    /* TODO: I think this is way too often; RFC 1889 has algorithm for this */
    /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
    s->octet_count += count;
    rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
        RTCP_TX_RATIO_DEN;
    rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
    if (rtcp_bytes < 28)
        return -1;
    s->last_octet_count = s->octet_count;

    if (url_open_dyn_buf(&pb) < 0)
        return -1;

    // Receiver Report
    put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
    put_byte(pb, 201);
    put_be16(pb, 7); /* length in words - 1 */
    put_be32(pb, s->ssrc); // our own SSRC
    put_be32(pb, s->ssrc); // XXX: should be the server's here!
    // some placeholders we should really fill...
    // RFC 1889/p64
    extended_max= stats->cycles + stats->max_seq;
    expected= extended_max - stats->base_seq + 1;
    lost= expected - stats->received;
    lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
    expected_interval= expected - stats->expected_prior;
    stats->expected_prior= expected;
    received_interval= stats->received - stats->received_prior;
    stats->received_prior= stats->received;
    lost_interval= expected_interval - received_interval;
    if (expected_interval==0 || lost_interval<=0) fraction= 0;
    else fraction = (lost_interval<<8)/expected_interval;

    fraction= (fraction<<24) | lost;

    put_be32(pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
    put_be32(pb, extended_max); /* max sequence received */
    put_be32(pb, stats->jitter>>4); /* jitter */

    if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
    {
        put_be32(pb, 0); /* last SR timestamp */
        put_be32(pb, 0); /* delay since last SR */
    } else {
        uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
        uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;

        put_be32(pb, middle_32_bits); /* last SR timestamp */
        put_be32(pb, delay_since_last); /* delay since last SR */
    }

    // CNAME
    put_byte(pb, (RTP_VERSION << 6) + 1); /* 1 report block */
    put_byte(pb, 202);
    len = strlen(s->hostname);
    put_be16(pb, (6 + len + 3) / 4); /* length in words - 1 */
    put_be32(pb, s->ssrc);
    put_byte(pb, 0x01);
    put_byte(pb, len);
    put_buffer(pb, s->hostname, len);
    // padding
    for (len = (6 + len) % 4; len % 4; len++) {
        put_byte(pb, 0);
    }

    put_flush_packet(pb);
    len = url_close_dyn_buf(pb, &buf);
    if ((len > 0) && buf) {
        int result;
        dprintf(s->ic, "sending %d bytes of RR\n", len);
        result= url_write(s->rtp_ctx, buf, len);
        dprintf(s->ic, "result from url_write: %d\n", result);
        av_free(buf);
    }
    return 0;
}
Exemple #9
0
static void put_frame(
                    AVFormatContext *s,
                    ASFStream       *stream,
                    AVStream        *avst,
                    int             timestamp,
                    const uint8_t   *buf,
                    int             m_obj_size,
                    int             flags
                )
{
    ASFContext *asf = s->priv_data;
    int m_obj_offset, payload_len, frag_len1;

    m_obj_offset = 0;
    while (m_obj_offset < m_obj_size) {
        payload_len = m_obj_size - m_obj_offset;
        if (asf->packet_timestamp_start == -1) {
            asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);

            asf->packet_size_left = PACKET_SIZE;
            if (asf->multi_payloads_present){
                frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
            }
            else {
                frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
            }
            asf->packet_timestamp_start = timestamp;
        }
        else {
            // multi payloads
            frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS - PACKET_HEADER_MIN_SIZE - 1;

            if(frag_len1 < payload_len && avst->codec->codec_type == CODEC_TYPE_AUDIO){
                flush_packet(s);
                continue;
            }
        }
        if (frag_len1 > 0) {
            if (payload_len > frag_len1)
                payload_len = frag_len1;
            else if (payload_len == (frag_len1 - 1))
                payload_len = frag_len1 - 2;  //additional byte need to put padding length

            put_payload_header(s, stream, timestamp+PREROLL_TIME, m_obj_size, m_obj_offset, payload_len, flags);
            put_buffer(&asf->pb, buf, payload_len);

            if (asf->multi_payloads_present)
                asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS);
            else
                asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
            asf->packet_timestamp_end = timestamp;

            asf->packet_nb_payloads++;
        } else {
            payload_len = 0;
        }
        m_obj_offset += payload_len;
        buf += payload_len;

        if (!asf->multi_payloads_present)
            flush_packet(s);
        else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + PACKET_HEADER_MIN_SIZE + 1))
            flush_packet(s);
    }
    stream->seq++;
}
Exemple #10
0
static void put_guid(ByteIOContext *s, const GUID *g)
{
    assert(sizeof(*g) == 16);
    put_buffer(s, *g, sizeof(*g));
}
Exemple #11
0
void
transmit_data(void)
{
    unsigned int count;
    u_short perm;
    char *buffer;
    int bufsize;
    int fd;
    struct stat st;
    int len, len2;
    int error;
    char exists;

    if (debug)
        diag("sending data");
    if ((tempmode&WANT_BACK) == 0) {
        (void) unlink(tempfile);
        if (debug)
            diag("not wanted back");
        return;
    }
    if (lstat(tempfile, &st) < 0) {
        if (errno != ENOENT) {
            error = errno;
            (void) unlink(tempfile);
            (void) rmdir(tempdir);
            errno = error;
            efatal("lstat", error);
        }
        exists = 0;
    } else {
        if ((st.st_mode&S_IFMT) != S_IFREG) {
            (void) unlink(tempfile);
            (void) rmdir(tempdir);
            fatal("tempfile no longer a regular file");
        }
        exists = 1;
    }
    if (debug)
        diag("exists %d", exists);
    if ( ! put_exists ( &exists ) ) {
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        efatal("write failed");
    }
    if (!exists) {
        (void) unlink(tempfile);
        return;
    }
    perm = st.st_mode&0777;
    if (debug)
        diag("perm %#o", perm);
    if ( ! put_perm ( &perm ) ) {
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        efatal("write failed");
    }
    count = st.st_size;
    if (debug)
        diag("count %d", count);
    if ( ! put_count ( &count ) ) {
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        efatal("write failed");
    }
    count = st.st_size;
    if (count == 0) {
        (void) unlink(tempfile);
#ifdef notdef
        (void) rmdir(tempdir);
#endif
        return;
    }
    if ((fd = open(tempfile, O_RDONLY, 0)) < 0) {
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        efatal("open failed");
    }
    if ((bufsize = count) > 10*1024)
        buffer = (char *) malloc(bufsize = 10*1024);
    else
        buffer = (char *) malloc(bufsize);
    if (buffer == NULL) {
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        fatal("malloc failed");
    }
    if (debug)
        diag("bufsize %d", bufsize);
    while (count != 0) {
        if (debug)
            diag("loop count %d", count);
        if ((len = read(fd, buffer, bufsize)) <= 0) {
            error = errno;
            (void) close(fd);
            (void) unlink(tempfile);
            (void) rmdir(tempdir);
            if (len == 0)
                fatal("1 premature EOF");
            errno = error;
            efatal("server read", error);
        }
        if (debug)
            diag("read %d bytes", len);
        put_buffer ( buffer, len, &len2, fd );
        if (debug)
            diag("wrote %d bytes", len2);
        count -= len;
        if (count < bufsize)
            bufsize = count;
    }
    if (close(fd) < 0) {
        error = errno;
        (void) unlink(tempfile);
        (void) rmdir(tempdir);
        errno = error;
        efatal("close");
    }
    (void) free(buffer);
    (void) unlink(tempfile);
    if (debug)
        diag("transfer complete");
}
Exemple #12
0
/* write the header (used two times if non streamed) */
static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
{
    ASFContext *asf = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVMetadataTag *tags[5];
    int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
    int has_title;
    int metadata_count;
    AVCodecContext *enc;
    int64_t header_offset, cur_pos, hpos;
    int bit_rate;
    int64_t duration;

    ff_metadata_conv(&s->metadata, ff_asf_metadata_conv, NULL);

    tags[0] = av_metadata_get(s->metadata, "title"    , NULL, 0);
    tags[1] = av_metadata_get(s->metadata, "author"   , NULL, 0);
    tags[2] = av_metadata_get(s->metadata, "copyright", NULL, 0);
    tags[3] = av_metadata_get(s->metadata, "comment"  , NULL, 0);
    tags[4] = av_metadata_get(s->metadata, "rating"   , NULL, 0);

    duration = asf->duration + PREROLL_TIME * 10000;
    has_title = tags[0] || tags[1] || tags[2] || tags[3] || tags[4];
    metadata_count = s->metadata ? s->metadata->count : 0;

    bit_rate = 0;
    for(n=0;n<s->nb_streams;n++) {
        enc = s->streams[n]->codec;

        av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */

        bit_rate += enc->bit_rate;
    }

    if (asf->is_streamed) {
        put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
    }

    put_guid(pb, &ff_asf_header);
    put_le64(pb, -1); /* header length, will be patched after */
    put_le32(pb, 3 + has_title + !!metadata_count + s->nb_streams); /* number of chunks in header */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 2); /* ??? */

    /* file header */
    header_offset = url_ftell(pb);
    hpos = put_header(pb, &ff_asf_file_header);
    put_guid(pb, &ff_asf_my_guid);
    put_le64(pb, file_size);
    file_time = 0;
    put_le64(pb, unix_to_file_time(file_time));
    put_le64(pb, asf->nb_packets); /* number of packets */
    put_le64(pb, duration); /* end time stamp (in 100ns units) */
    put_le64(pb, asf->duration); /* duration (in 100ns units) */
    put_le64(pb, PREROLL_TIME); /* start time stamp */
    put_le32(pb, (asf->is_streamed || url_is_streamed(pb)) ? 3 : 2); /* ??? */
    put_le32(pb, s->packet_size); /* packet size */
    put_le32(pb, s->packet_size); /* packet size */
    put_le32(pb, bit_rate); /* Nominal data rate in bps */
    end_header(pb, hpos);

    /* unknown headers */
    hpos = put_header(pb, &ff_asf_head1_guid);
    put_guid(pb, &ff_asf_head2_guid);
    put_le32(pb, 6);
    put_le16(pb, 0);
    end_header(pb, hpos);

    /* title and other infos */
    if (has_title) {
        int len;
        uint8_t *buf;
        ByteIOContext *dyn_buf;

        if (url_open_dyn_buf(&dyn_buf) < 0)
            return AVERROR(ENOMEM);

        hpos = put_header(pb, &ff_asf_comment_header);

        for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {
            len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;
            put_le16(pb, len);
        }
        len = url_close_dyn_buf(dyn_buf, &buf);
        put_buffer(pb, buf, len);
        av_freep(&buf);
        end_header(pb, hpos);
    }
    if (metadata_count) {
        AVMetadataTag *tag = NULL;
        hpos = put_header(pb, &ff_asf_extended_content_header);
        put_le16(pb, metadata_count);
        while ((tag = av_metadata_get(s->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
            put_str16(pb, tag->key);
            put_le16(pb, 0);
            put_str16(pb, tag->value);
        }
        end_header(pb, hpos);
    }

    /* stream headers */
    for(n=0;n<s->nb_streams;n++) {
        int64_t es_pos;
        //        ASFStream *stream = &asf->streams[n];

        enc = s->streams[n]->codec;
        asf->streams[n].num = n + 1;
        asf->streams[n].seq = 0;


        switch(enc->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            wav_extra_size = 0;
            extra_size = 18 + wav_extra_size;
            extra_size2 = 8;
            break;
        default:
        case AVMEDIA_TYPE_VIDEO:
            wav_extra_size = enc->extradata_size;
            extra_size = 0x33 + wav_extra_size;
            extra_size2 = 0;
            break;
        }

        hpos = put_header(pb, &ff_asf_stream_header);
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            put_guid(pb, &ff_asf_audio_stream);
            put_guid(pb, &ff_asf_audio_conceal_spread);
        } else {
            put_guid(pb, &ff_asf_video_stream);
            put_guid(pb, &ff_asf_video_conceal_none);
        }
        put_le64(pb, 0); /* ??? */
        es_pos = url_ftell(pb);
        put_le32(pb, extra_size); /* wav header len */
        put_le32(pb, extra_size2); /* additional data len */
        put_le16(pb, n + 1); /* stream number */
        put_le32(pb, 0); /* ??? */

        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            /* WAVEFORMATEX header */
            int wavsize = ff_put_wav_header(pb, enc);
            if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
                wavsize += 2;
                put_le16(pb, 0);
            }

            if (wavsize < 0)
                return -1;
            if (wavsize != extra_size) {
                cur_pos = url_ftell(pb);
                url_fseek(pb, es_pos, SEEK_SET);
                put_le32(pb, wavsize); /* wav header len */
                url_fseek(pb, cur_pos, SEEK_SET);
            }
            /* ERROR Correction */
            put_byte(pb, 0x01);
            if(enc->codec_id == CODEC_ID_ADPCM_G726 || !enc->block_align){
                put_le16(pb, 0x0190);
                put_le16(pb, 0x0190);
            }else{
                put_le16(pb, enc->block_align);
                put_le16(pb, enc->block_align);
            }
            put_le16(pb, 0x01);
            put_byte(pb, 0x00);
        } else {
            put_le32(pb, enc->width);
            put_le32(pb, enc->height);
            put_byte(pb, 2); /* ??? */
            put_le16(pb, 40 + enc->extradata_size); /* size */

            /* BITMAPINFOHEADER header */
            ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 1);
        }
        end_header(pb, hpos);
    }

    /* media comments */

    hpos = put_header(pb, &ff_asf_codec_comment_header);
    put_guid(pb, &ff_asf_codec_comment1_header);
    put_le32(pb, s->nb_streams);
    for(n=0;n<s->nb_streams;n++) {
        AVCodec *p;
        const char *desc;
        int len;
        uint8_t *buf;
        ByteIOContext *dyn_buf;

        enc = s->streams[n]->codec;
        p = avcodec_find_encoder(enc->codec_id);

        if(enc->codec_type == AVMEDIA_TYPE_AUDIO)
            put_le16(pb, 2);
        else if(enc->codec_type == AVMEDIA_TYPE_VIDEO)
            put_le16(pb, 1);
        else
            put_le16(pb, -1);

        if(enc->codec_id == CODEC_ID_WMAV2)
            desc = "Windows Media Audio V8";
        else
            desc = p ? p->name : enc->codec_name;

        if ( url_open_dyn_buf(&dyn_buf) < 0)
            return AVERROR(ENOMEM);

        avio_put_str16le(dyn_buf, desc);
        len = url_close_dyn_buf(dyn_buf, &buf);
        put_le16(pb, len / 2); // "number of characters" = length in bytes / 2

        put_buffer(pb, buf, len);
        av_freep(&buf);

        put_le16(pb, 0); /* no parameters */


        /* id */
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
            put_le16(pb, 2);
            put_le16(pb, enc->codec_tag);
        } else {
            put_le16(pb, 4);
            put_le32(pb, enc->codec_tag);
        }
        if(!enc->codec_tag)
            return -1;
    }
    end_header(pb, hpos);

    /* patch the header size fields */

    cur_pos = url_ftell(pb);
    header_size = cur_pos - header_offset;
    if (asf->is_streamed) {
        header_size += 8 + 30 + 50;

        url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
        put_le16(pb, header_size);
        url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
        put_le16(pb, header_size);

        header_size -= 8 + 30 + 50;
    }
    header_size += 24 + 6;
    url_fseek(pb, header_offset - 14, SEEK_SET);
    put_le64(pb, header_size);
    url_fseek(pb, cur_pos, SEEK_SET);

    /* movie chunk, followed by packets of packet_size */
    asf->data_offset = cur_pos;
    put_guid(pb, &ff_asf_data_header);
    put_le64(pb, data_chunk_size);
    put_guid(pb, &ff_asf_my_guid);
    put_le64(pb, asf->nb_packets); /* nb packets */
    put_byte(pb, 1); /* ??? */
    put_byte(pb, 1); /* ??? */
    return 0;
}
void put_string(StubState *state, const char * str)
{
    put_buffer(state, (const uint8_t *) str, strlen(str));
}
Exemple #14
0
void top_twenty(integer this_many)
{
  /*{ Enters a players name on the top twenty list		-JWT-	}*/

  string    list[MAX_HIGH_SCORES+2];
  integer   players_line = 0;
  integer   i1,i2,i3,i4;
  int       n1;
  vtype     o1,s1;
  FILE     *f1;
  boolean   flag;
  char      ch;

  if (py.misc.cheated) {
    exit_game();
  }
  clear_screen();

  if (!read_top_scores(&f1, MORIA_TOP, list, MAX_HIGH_SCORES, &n1, s1)) {
    prt(s1,2,1);
    prt("",3,1);
  } else {

    i3   = total_points();
    flag = false;
    
    if (i3 == 0) {
      i1 = n1;
    } else {
      for (i1=1; (i1 <= n1) && !flag ; ) {   /* XXXX check for corruption */
	sscanf(&(list[i1][13]),"%ld",&i4);
	if (i4 < i3) {
	  flag = true;
	} else {
	  i1++;
	}
      }
    }

    if ((i3 > 0) && ((flag) || (n1 == 0) || (n1 < MAX_HIGH_SCORES))) {

      for (i2 = MAX_HIGH_SCORES-1; i2 >= i1 ; i2--) {
	strcpy(list[i2+1], list[i2]);
      }
      
      user_name(o1);
      
      format_top_score(list[i1], o1, i3, PM.diffic, PM.name,
		       PM.lev, PM.race, PM.tclass);
      
      if (n1 < MAX_HIGH_SCORES) {
	n1++;
      }

      max_score    = n1;
      players_line = i1;
      flag         = false;

      write_top_scores(&f1, list, n1);

    } else { 
      /* did not get a high score */
      max_score = 20;
    }      
    
    if (!close_top_scores(&f1)) {
      prt("Error unlocking score file.",2,1);
      prt("",3,1);
    }
    
    put_buffer("Username     Points  Diff    Character name    Level  Race         Class",1,1);
    put_buffer("____________ ________ _ ________________________ __ __________ ________________",2,1);
    
    i2 = 3;
    if (max_score > n1) {
      max_score = n1;
    }
    
    if (this_many > 0) {
      if (this_many > MAX_HIGH_SCORES) {
	max_score = MAX_HIGH_SCORES;
      } else {
	max_score = this_many;
      }
    }
    for (i1 = 1; i1 <= max_score; i1++) {
      /*insert_str(list[i1],chr(7),''); XXXX  why? */
      if (i1 == players_line) {
	put_buffer_attr(list[i1],i2,1, A_REVERSE);
      } else {
	put_buffer(list[i1],i2,1);
      }
      if ((i1 != 1) && ((i1 % 20) == 0) && (i1 != max_score)) {
	prt("[Press any key to continue, or <Control>-Z to exit]",
	    24,1);
	ch = inkey();
	switch (ch) {
	case 3: case 25: case 26:
	  erase_line(24,1);
	  put_buffer(" ",23,13);
	  exit_game();
	  break;
	}
	clear_rc(3,1);
	i2 = 2;
      }
      i2++;
    } /* end for */
    
    erase_line(23,1);
    put_qio();
    
  } /* end if read_top_scores */
};
Exemple #15
0
/**
 * Write an RTP hint (that may contain one or more RTP packets)
 * for the packets in data. data contains one or more packets with a
 * BE32 size header.
 *
 * @param out buffer where the hints are written
 * @param data buffer containing RTP packets
 * @param size the size of the data buffer
 * @param trk the MOVTrack for the hint track
 * @param pts pointer where the timestamp for the written RTP hint is stored
 * @return the number of RTP packets in the written hint
 */
static int write_hint_packets(ByteIOContext *out, const uint8_t *data,
                              int size, MOVTrack *trk, int64_t *pts)
{
    int64_t curpos;
    int64_t count_pos, entries_pos;
    int count = 0, entries;

    count_pos = url_ftell(out);
    /* RTPsample header */
    put_be16(out, 0); /* packet count */
    put_be16(out, 0); /* reserved */

    while (size > 4) {
        uint32_t packet_len = AV_RB32(data);
        uint16_t seq;
        uint32_t ts;

        data += 4;
        size -= 4;
        if (packet_len > size || packet_len <= 12)
            break;
        if (data[1] >= 200 && data[1] <= 204) {
            /* RTCP packet, just skip */
            data += packet_len;
            size -= packet_len;
            continue;
        }

        if (packet_len > trk->max_packet_size)
            trk->max_packet_size = packet_len;

        seq = AV_RB16(&data[2]);
        ts = AV_RB32(&data[4]);

        if (trk->prev_rtp_ts == 0)
            trk->prev_rtp_ts = ts;
        /* Unwrap the 32-bit RTP timestamp that wraps around often
         * into a not (as often) wrapping 64-bit timestamp. */
        trk->cur_rtp_ts_unwrapped += (int32_t) (ts - trk->prev_rtp_ts);
        trk->prev_rtp_ts = ts;
        if (*pts == AV_NOPTS_VALUE)
            *pts = trk->cur_rtp_ts_unwrapped;

        count++;
        /* RTPpacket header */
        put_be32(out, 0); /* relative_time */
        put_buffer(out, data, 2); /* RTP header */
        put_be16(out, seq); /* RTPsequenceseed */
        put_be16(out, 0); /* reserved + flags */
        entries_pos = url_ftell(out);
        put_be16(out, 0); /* entry count */

        data += 12;
        size -= 12;
        packet_len -= 12;

        entries = 0;
        /* Write one or more constructors describing the payload data */
        describe_payload(data, packet_len, out, &entries, &trk->sample_queue);
        data += packet_len;
        size -= packet_len;

        curpos = url_ftell(out);
        url_fseek(out, entries_pos, SEEK_SET);
        put_be16(out, entries);
        url_fseek(out, curpos, SEEK_SET);
    }

    curpos = url_ftell(out);
    url_fseek(out, count_pos, SEEK_SET);
    put_be16(out, count);
    url_fseek(out, curpos, SEEK_SET);
    return count;
}
Exemple #16
0
static int ffm_write_header(AVFormatContext *s)
{
    FFMContext *ffm = s->priv_data;
    AVStream *st;
    ByteIOContext *pb = s->pb;
    AVCodecContext *codec;
    int bit_rate, i;

    ffm->packet_size = FFM_PACKET_SIZE;

    /* header */
    put_le32(pb, MKTAG('F', 'F', 'M', '1'));
    put_be32(pb, ffm->packet_size);
    /* XXX: store write position in other file ? */
    put_be64(pb, ffm->packet_size); /* current write position */

    put_be32(pb, s->nb_streams);
    bit_rate = 0;
    for(i=0;i<s->nb_streams;i++) {
        st = s->streams[i];
        bit_rate += st->codec->bit_rate;
    }
    put_be32(pb, bit_rate);

    /* list of streams */
    for(i=0;i<s->nb_streams;i++) {
        st = s->streams[i];
        av_set_pts_info(st, 64, 1, 1000000);

        codec = st->codec;
        /* generic info */
        put_be32(pb, codec->codec_id);
        put_byte(pb, codec->codec_type);
        put_be32(pb, codec->bit_rate);
        put_be32(pb, st->quality);
        put_be32(pb, codec->flags);
        put_be32(pb, codec->flags2);
        put_be32(pb, codec->debug);
        /* specific info */
        switch(codec->codec_type) {
        case CODEC_TYPE_VIDEO:
            put_be32(pb, codec->time_base.num);
            put_be32(pb, codec->time_base.den);
            put_be16(pb, codec->width);
            put_be16(pb, codec->height);
            put_be16(pb, codec->gop_size);
            put_be32(pb, codec->pix_fmt);
            put_byte(pb, codec->qmin);
            put_byte(pb, codec->qmax);
            put_byte(pb, codec->max_qdiff);
            put_be16(pb, (int) (codec->qcompress * 10000.0));
            put_be16(pb, (int) (codec->qblur * 10000.0));
            put_be32(pb, codec->bit_rate_tolerance);
            put_strz(pb, codec->rc_eq ? codec->rc_eq : "tex^qComp");
            put_be32(pb, codec->rc_max_rate);
            put_be32(pb, codec->rc_min_rate);
            put_be32(pb, codec->rc_buffer_size);
            put_be64(pb, av_dbl2int(codec->i_quant_factor));
            put_be64(pb, av_dbl2int(codec->b_quant_factor));
            put_be64(pb, av_dbl2int(codec->i_quant_offset));
            put_be64(pb, av_dbl2int(codec->b_quant_offset));
            put_be32(pb, codec->dct_algo);
            put_be32(pb, codec->strict_std_compliance);
            put_be32(pb, codec->max_b_frames);
            put_be32(pb, codec->luma_elim_threshold);
            put_be32(pb, codec->chroma_elim_threshold);
            put_be32(pb, codec->mpeg_quant);
            put_be32(pb, codec->intra_dc_precision);
            put_be32(pb, codec->me_method);
            put_be32(pb, codec->mb_decision);
            put_be32(pb, codec->nsse_weight);
            put_be32(pb, codec->frame_skip_cmp);
            put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
            put_be32(pb, codec->codec_tag);
            put_byte(pb, codec->thread_count);
            break;
        case CODEC_TYPE_AUDIO:
            put_be32(pb, codec->sample_rate);
            put_le16(pb, codec->channels);
            put_le16(pb, codec->frame_size);
            break;
        default:
            return -1;
        }
        if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
            put_be32(pb, codec->extradata_size);
            put_buffer(pb, codec->extradata, codec->extradata_size);
        }
    }

    /* flush until end of block reached */
    while ((url_ftell(pb) % ffm->packet_size) != 0)
        put_byte(pb, 0);

    put_flush_packet(pb);

    /* init packet mux */
    ffm->packet_ptr = ffm->packet;
    ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
    assert(ffm->packet_end >= ffm->packet);
    ffm->frame_offset = 0;
    ffm->dts = 0;
    ffm->first_packet = 1;

    return 0;
}
Exemple #17
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIContext *avi = s->priv_data;
    ByteIOContext *pb = &s->pb;
    unsigned char tag[5];
    unsigned int flags=0;
    const int stream_index= pkt->stream_index;
    AVCodecContext *enc= s->streams[stream_index]->codec;
    int size= pkt->size;

//    av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
    while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avi->packet_count[stream_index]){
        AVPacket empty_packet;

        av_init_packet(&empty_packet);
        empty_packet.size= 0;
        empty_packet.data= NULL;
        empty_packet.stream_index= stream_index;
        avi_write_packet(s, &empty_packet);
//        av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
    }
    avi->packet_count[stream_index]++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (!url_is_streamed(pb) &&
        (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {

        avi_write_ix(s);
        end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
    if(pkt->flags&PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
       avi->audio_strm_length[stream_index] += size;
    }

    if (!url_is_streamed(&s->pb)) {
        AVIIndex* idx = &avi->indexes[stream_index];
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
            if (!idx->cluster)
                return -1;
            idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return -1;
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
        idx->cluster[cl][id].len = size;
        idx->entry++;
    }

    put_buffer(pb, tag, 4);
    put_le32(pb, size);
    put_buffer(pb, pkt->data, size);
    if (size & 1)
        put_byte(pb, 0);

    put_flush_packet(pb);
    return 0;
}
Exemple #18
0
static int rso_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    put_buffer(s->pb, pkt->data, pkt->size);
    return 0;
}
Exemple #19
0
 int logBuffer::sync()
 {
    put_buffer();
    return 0;
 }
Exemple #20
0
/* GIF header */
static int gif_image_write_header(ByteIOContext *pb,
                                  int width, int height, int loop_count,
                                  uint32_t *palette)
{
    int i;
    unsigned int v;

    put_tag(pb, "GIF");
    put_tag(pb, "89a");
    put_le16(pb, width);
    put_le16(pb, height);

    put_byte(pb, 0xf7); /* flags: global clut, 256 entries */
    put_byte(pb, 0x1f); /* background color index */
    put_byte(pb, 0); /* aspect ratio */

    /* the global palette */
    if (!palette) {
        put_buffer(pb, (const unsigned char *)gif_clut, 216*3);
        for(i=0;i<((256-216)*3);i++)
            put_byte(pb, 0);
    } else {
        for(i=0;i<256;i++) {
            v = palette[i];
            put_byte(pb, (v >> 16) & 0xff);
            put_byte(pb, (v >> 8) & 0xff);
            put_byte(pb, (v) & 0xff);
        }
    }

        /*        update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
                see http://members.aol.com/royalef/gifabout.htm#net-extension

                byte   1       : 33 (hex 0x21) GIF Extension code
                byte   2       : 255 (hex 0xFF) Application Extension Label
                byte   3       : 11 (hex (0x0B) Length of Application Block
                                         (eleven bytes of data to follow)
                bytes  4 to 11 : "NETSCAPE"
                bytes 12 to 14 : "2.0"
                byte  15       : 3 (hex 0x03) Length of Data Sub-Block
                                         (three bytes of data to follow)
                byte  16       : 1 (hex 0x01)
                bytes 17 to 18 : 0 to 65535, an unsigned integer in
                                         lo-hi byte format. This indicate the
                                         number of iterations the loop should
                                         be executed.
                bytes 19       : 0 (hex 0x00) a Data Sub-block Terminator
        */

    /* application extension header */
#ifdef GIF_ADD_APP_HEADER
    if (loop_count >= 0 && loop_count <= 65535) {
    put_byte(pb, 0x21);
    put_byte(pb, 0xff);
    put_byte(pb, 0x0b);
        put_tag(pb, "NETSCAPE2.0");  // bytes 4 to 14
        put_byte(pb, 0x03); // byte 15
        put_byte(pb, 0x01); // byte 16
        put_le16(pb, (uint16_t)loop_count);
        put_byte(pb, 0x00); // byte 19
    }
#endif
    return 0;
}
Exemple #21
0
static int a64_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
    AVCodecContext *avctx = s->streams[0]->codec;
    A64MuxerContext *c = s->priv_data;
    int i, j;
    int ch_chunksize;
    int lifetime;
    int frame_count;
    int charset_size;
    int frame_size;
    int num_frames;

    /* fetch values from extradata */
    switch (avctx->codec->id) {
    case CODEC_ID_A64_MULTI:
    case CODEC_ID_A64_MULTI5:
        if(c->interleaved) {
            /* Write interleaved, means we insert chunks of the future charset before each current frame.
             * Reason: if we load 1 charset + corresponding frames in one block on c64, we need to store
             * them first and then display frame by frame to keep in sync. Thus we would read and write
             * the data for colram from/to ram first and waste too much time. If we interleave and send the
             * charset beforehand, we assemble a new charset chunk by chunk, write current screen data to
             * screen-ram to be displayed and decode the colram directly to colram-location $d800 during
             * the overscan, while reading directly from source.
             * This is the only way so far, to achieve 25fps on c64 */
            if(avctx->extradata) {
                /* fetch values from extradata */
                lifetime     = AV_RB32(avctx->extradata + 0);
                frame_count  = AV_RB32(avctx->extradata + 4);
                charset_size = AV_RB32(avctx->extradata + 8);
                frame_size   = AV_RB32(avctx->extradata + 12);

                /* TODO: sanity checks? */
            } else {
                av_log(avctx, AV_LOG_ERROR, "extradata not set\n");
                return AVERROR(EINVAL);
            }

            ch_chunksize=charset_size/lifetime;
            /* TODO: check if charset/size is % lifetime, but maybe check in codec */

            if(pkt->data) num_frames = lifetime;
            else num_frames = c->prev_frame_count;

            for(i = 0; i < num_frames; i++) {
                if(pkt->data) {
                    /* if available, put newest charset chunk into buffer */
                    put_buffer(s->pb, pkt->data + ch_chunksize * i, ch_chunksize);
                } else {
                    /* a bit ugly, but is there an alternative to put many zeros? */
                    for(j = 0; j < ch_chunksize; j++) put_byte(s->pb, 0);
                }

                if(c->prev_pkt.data) {
                    /* put frame (screen + colram) from last packet into buffer */
                    put_buffer(s->pb, c->prev_pkt.data + charset_size + frame_size * i, frame_size);
                } else {
                    /* a bit ugly, but is there an alternative to put many zeros? */
                    for(j = 0; j < frame_size; j++) put_byte(s->pb, 0);
                }
            }

            /* backup current packet for next turn */
            if(pkt->data) {
                /* no backup packet yet? create one! */
                if(!c->prev_pkt.data) av_new_packet(&c->prev_pkt, pkt->size);
                /* we have a packet and data is big enough, reuse it */
                if(c->prev_pkt.data && c->prev_pkt.size >= pkt->size) {
                    memcpy(c->prev_pkt.data, pkt->data, pkt->size);
                    c->prev_pkt.size = pkt->size;
                } else {
                    av_log(avctx, AV_LOG_ERROR, "Too less memory for prev_pkt.\n");
                    return AVERROR(ENOMEM);
                }
            }

            c->prev_frame_count = frame_count;
            break;
        }
        default:
            /* Write things as is. Nice for self-contained frames from non-multicolor modes or if played
             * directly from ram and not from a streaming device (rrnet/mmc) */
            if(pkt) put_buffer(s->pb, pkt->data, pkt->size);
        break;
    }

    put_flush_packet(s->pb);
    return 0;
}
Exemple #22
0
static int gif_image_write_image(ByteIOContext *pb,
                                 int x1, int y1, int width, int height,
                                 const uint8_t *buf, int linesize, int pix_fmt)
{
    PutBitContext p;
    uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
    int i, left, w, v;
    const uint8_t *ptr;
    /* image block */

    put_byte(pb, 0x2c);
    put_le16(pb, x1);
    put_le16(pb, y1);
    put_le16(pb, width);
    put_le16(pb, height);
    put_byte(pb, 0x00); /* flags */
    /* no local clut */

    put_byte(pb, 0x08);

    left= width * height;

    init_put_bits(&p, buffer, 130);

/*
 * the thing here is the bitstream is written as little packets, with a size byte before
 * but it's still the same bitstream between packets (no flush !)
 */
    ptr = buf;
    w = width;
    while(left>0) {

        put_bits(&p, 9, 0x0100); /* clear code */

        for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
            if (pix_fmt == PIX_FMT_RGB24) {
                v = gif_clut_index(ptr[0], ptr[1], ptr[2]);
                ptr+=3;
            } else {
                v = *ptr++;
            }
            put_bits(&p, 9, v);
            if (--w == 0) {
                w = width;
                buf += linesize;
                ptr = buf;
            }
        }

        if(left<=GIF_CHUNKS) {
            put_bits(&p, 9, 0x101); /* end of stream */
            flush_put_bits(&p);
        }
        if(pbBufPtr(&p) - p.buf > 0) {
            put_byte(pb, pbBufPtr(&p) - p.buf); /* byte count of the packet */
            put_buffer(pb, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
            p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
        }
        left-=GIF_CHUNKS;
    }
    put_byte(pb, 0x00); /* end of image block */

    return 0;
}
Exemple #23
0
static int swf_write_video(AVFormatContext *s,
                           AVCodecContext *enc, const uint8_t *buf, int size)
{
    SWFContext *swf = s->priv_data;
    ByteIOContext *pb = s->pb;

    /* Flash Player limit */
    if (swf->swf_frame_number == 16000)
        av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");

    if (enc->codec_id == CODEC_ID_VP6F ||
        enc->codec_id == CODEC_ID_FLV1) {
        if (swf->video_frame_number == 0) {
            /* create a new video object */
            put_swf_tag(s, TAG_VIDEOSTREAM);
            put_le16(pb, VIDEO_ID);
            swf->vframes_pos = url_ftell(pb);
            put_le16(pb, 15000); /* hard flash player limit */
            put_le16(pb, enc->width);
            put_le16(pb, enc->height);
            put_byte(pb, 0);
            put_byte(pb,codec_get_tag(swf_codec_tags,enc->codec_id));
            put_swf_end_tag(s);

            /* place the video object for the first time */
            put_swf_tag(s, TAG_PLACEOBJECT2);
            put_byte(pb, 0x36);
            put_le16(pb, 1);
            put_le16(pb, VIDEO_ID);
            put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0);
            put_le16(pb, swf->video_frame_number);
            put_tag(pb, "video");
            put_byte(pb, 0x00);
            put_swf_end_tag(s);
        } else {
            /* mark the character for update */
            put_swf_tag(s, TAG_PLACEOBJECT2);
            put_byte(pb, 0x11);
            put_le16(pb, 1);
            put_le16(pb, swf->video_frame_number);
            put_swf_end_tag(s);
        }

        /* set video frame data */
        put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
        put_le16(pb, VIDEO_ID);
        put_le16(pb, swf->video_frame_number++);
        put_buffer(pb, buf, size);
        put_swf_end_tag(s);
    } else if (enc->codec_id == CODEC_ID_MJPEG) {
        if (swf->swf_frame_number > 0) {
            /* remove the shape */
            put_swf_tag(s, TAG_REMOVEOBJECT);
            put_le16(pb, SHAPE_ID); /* shape ID */
            put_le16(pb, 1); /* depth */
            put_swf_end_tag(s);

            /* free the bitmap */
            put_swf_tag(s, TAG_FREECHARACTER);
            put_le16(pb, BITMAP_ID);
            put_swf_end_tag(s);
        }

        put_swf_tag(s, TAG_JPEG2 | TAG_LONG);

        put_le16(pb, BITMAP_ID); /* ID of the image */

        /* a dummy jpeg header seems to be required */
        put_be32(pb, 0xffd8ffd9);
        /* write the jpeg image */
        put_buffer(pb, buf, size);

        put_swf_end_tag(s);

        /* draw the shape */

        put_swf_tag(s, TAG_PLACEOBJECT);
        put_le16(pb, SHAPE_ID); /* shape ID */
        put_le16(pb, 1); /* depth */
        put_swf_matrix(pb, 20 << FRAC_BITS, 0, 0, 20 << FRAC_BITS, 0, 0);
        put_swf_end_tag(s);
    }

    swf->swf_frame_number++;

    /* streaming sound always should be placed just before showframe tags */
    if (swf->audio_enc && av_fifo_size(&swf->audio_fifo)) {
        int frame_size = av_fifo_size(&swf->audio_fifo);
        put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
        put_le16(pb, swf->sound_samples);
        put_le16(pb, 0); // seek samples
        av_fifo_generic_read(&swf->audio_fifo, frame_size, &put_buffer, pb);
        put_swf_end_tag(s);

        /* update FIFO */
        swf->sound_samples = 0;
    }

    /* output the frame */
    put_swf_tag(s, TAG_SHOWFRAME);
    put_swf_end_tag(s);

    put_flush_packet(s->pb);

    return 0;
}
Exemple #24
0
static int aiff_write_header(AVFormatContext *s)
{
    AIFFOutputContext *aiff = s->priv_data;
    ByteIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[0]->codec;
    AVExtFloat sample_rate;
    int aifc = 0;

    /* First verify if format is ok */
    if (!enc->codec_tag)
        return -1;
    if (enc->codec_tag != MKTAG('N','O','N','E'))
        aifc = 1;

    /* FORM AIFF header */
    put_tag(pb, "FORM");
    aiff->form = url_ftell(pb);
    put_be32(pb, 0);                    /* file length */
    put_tag(pb, aifc ? "AIFC" : "AIFF");

    if (aifc) { // compressed audio
        enc->bits_per_coded_sample = 16;
        if (!enc->block_align) {
            av_log(s, AV_LOG_ERROR, "block align not set\n");
            return -1;
        }
        /* Version chunk */
        put_tag(pb, "FVER");
        put_be32(pb, 4);
        put_be32(pb, 0xA2805140);
    }

    /* Common chunk */
    put_tag(pb, "COMM");
    put_be32(pb, aifc ? 24 : 18); /* size */
    put_be16(pb, enc->channels);  /* Number of channels */

    aiff->frames = url_ftell(pb);
    put_be32(pb, 0);              /* Number of frames */

    if (!enc->bits_per_coded_sample)
        enc->bits_per_coded_sample = av_get_bits_per_sample(enc->codec_id);
    if (!enc->bits_per_coded_sample) {
        av_log(s, AV_LOG_ERROR, "could not compute bits per sample\n");
        return -1;
    }
    if (!enc->block_align)
        enc->block_align = (enc->bits_per_coded_sample * enc->channels) >> 3;

    put_be16(pb, enc->bits_per_coded_sample); /* Sample size */

    sample_rate = av_dbl2ext((double)enc->sample_rate);
    put_buffer(pb, (uint8_t*)&sample_rate, sizeof(sample_rate));

    if (aifc) {
        put_le32(pb, enc->codec_tag);
        put_be16(pb, 0);
    }

    /* Sound data chunk */
    put_tag(pb, "SSND");
    aiff->ssnd = url_ftell(pb);         /* Sound chunk size */
    put_be32(pb, 0);                    /* Sound samples data size */
    put_be32(pb, 0);                    /* Data offset */
    put_be32(pb, 0);                    /* Block-size (block align) */

    av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);

    /* Data is starting here */
    put_flush_packet(pb);

    return 0;
}
Exemple #25
0
static void
serve_thread(uint32_t a) {
	struct st_args *args = (struct st_args *)a;
	union Nsipc *req = args->req;
	int r;

	switch (args->reqno) {
	case NSREQ_ACCEPT:
	{
		struct Nsret_accept ret;
		r = lwip_accept(req->accept.req_s, &ret.ret_addr,
				&ret.ret_addrlen);
		memmove(req, &ret, sizeof ret);
		break;
	}
	case NSREQ_BIND:
		r = lwip_bind(req->bind.req_s, &req->bind.req_name,
			      req->bind.req_namelen);
		break;
	case NSREQ_SHUTDOWN:
		r = lwip_shutdown(req->shutdown.req_s, req->shutdown.req_how);
		break;
	case NSREQ_CLOSE:
		r = lwip_close(req->close.req_s);
		break;
	case NSREQ_CONNECT:
		r = lwip_connect(req->connect.req_s, &req->connect.req_name,
				 req->connect.req_namelen);
		break;
	case NSREQ_LISTEN:
		r = lwip_listen(req->listen.req_s, req->listen.req_backlog);
		break;
	case NSREQ_RECV:
		// Note that we read the request fields before we
		// overwrite it with the response data.
		r = lwip_recv(req->recv.req_s, req->recvRet.ret_buf,
			      req->recv.req_len, req->recv.req_flags);
		break;
	case NSREQ_SEND:
		r = lwip_send(req->send.req_s, &req->send.req_buf,
			      req->send.req_size, req->send.req_flags);
		break;
	case NSREQ_SOCKET:
		r = lwip_socket(req->socket.req_domain, req->socket.req_type,
				req->socket.req_protocol);
		break;
	case NSREQ_INPUT:
		jif_input(&nif, (void *)&req->pkt);
		r = 0;
		break;
	default:
		cprintf("Invalid request code %d from %08x\n", args->whom, args->req);
		r = -E_INVAL;
		break;
	}

	if (r == -1) {
		char buf[100];
		snprintf(buf, sizeof buf, "ns req type %d", args->reqno);
		perror(buf);
	}

	if (args->reqno != NSREQ_INPUT)
		ipc_send(args->whom, r, 0, 0);

	put_buffer(args->req);
	sys_page_unmap(0, (void*) args->req);
	free(args);
}
Exemple #26
0
int get_string(char *in_str,int row,int column,int slen)
{
  register int start_col, end_col, i;
  char *p;
  int flag, aborted;

  aborted = FALSE;
  flag  = FALSE;
  gotoxy(column+1,row+1);
  for (i = slen; i > 0; i--)
    putch(' ');
  gotoxy(column+1,row+1);
  start_col = column;
  end_col = column + slen - 1;
  if (end_col > 79)
    {
      slen = 80 - column;
      end_col = 79;
  }
  p = in_str;
  do
    {
      i = inkey();
      switch(i)
        {
          case ESCAPE:
          aborted = TRUE;
          break;
        case CTRL('J'): case CTRL('M'):
          flag  = TRUE;
          break;
        case DELETE: case CTRL('H'):
          if (column > start_col)
            {
              column--;
              put_buffer(" ", row, column);
              move_cursor(row, column);
              *--p = '\0';
          }
          break;
        default:
          if (!isprint(i) || column > end_col)
            bell();
          else
            {
              gotoxy(column+1,row+1);
              putch((char) i);
              *p++ = i;
              column++;
          }
          break;
      }
  }
  while ((!flag) && (!aborted));
  if (aborted)
    return(FALSE);
  /* Remove trailing blanks     */
  while (p > in_str && p[-1] == ' ')
    p--;
  *p = '\0';
  return(TRUE);
}
Exemple #27
0
static void put_amf_string(ByteIOContext *pb, const char *str)
{
    size_t len = strlen(str);
    put_be16(pb, len);
    put_buffer(pb, str, len);
}
Exemple #28
0
static int flv_write_packet( AVFormatContext *s, AVPacket *pkt )
{

    LogStr("Init");


    //Fernando:
    //printf("-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
    //printf("************* proc: %s - line: %d\n", __func__, __LINE__);
    //dumpPacket(&pkt);
    //getchar();


    ByteIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
    FLVContext *flv = s->priv_data;
    unsigned ts;
    int size = pkt->size;
    int flags, flags_size;

    //    av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);

    if (enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F || enc->codec_id == CODEC_ID_AAC)
    {
        flags_size = 2;
    }
    else if (enc->codec_id == CODEC_ID_H264)
    {
        flags_size = 5;
    }
    else
    {
        flags_size = 1;
    }

    if (enc->codec_type == CODEC_TYPE_VIDEO)
    {
        put_byte(pb, FLV_TAG_TYPE_VIDEO);

        flags = enc->codec_tag;
        if (flags == 0)
        {
            av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n", enc->codec_id);
            LogStr("Exit");
            return -1;
        }

        flags |= pkt->flags & PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
    }
    else
    {
        assert(enc->codec_type == CODEC_TYPE_AUDIO);
        flags = get_audio_flags(enc);

        assert(size);

        put_byte(pb, FLV_TAG_TYPE_AUDIO);
    }

    if (enc->codec_id == CODEC_ID_H264 &&
    /* check if extradata looks like mp4 formated */
    enc->extradata_size > 0 && *(uint8_t*) enc->extradata != 1)
    {
        if (ff_avc_parse_nal_units(pkt->data, &pkt->data, &pkt->size) < 0)
        {
            LogStr("Exit");
            return -1;
        }
        assert(pkt->size);
        size = pkt->size;
        /* cast needed to get negative value */
        if (!flv->delay && pkt->dts < 0)
        {
            flv->delay = -pkt->dts;
        }
    }

    ts = pkt->dts + flv->delay; // add delay to force positive dts
    put_be24(pb, size + flags_size);
    put_be24(pb, ts);
    put_byte(pb, (ts >> 24) & 0x7F); // timestamps are 32bits _signed_
    put_be24(pb, flv->reserved);
    put_byte(pb, flags);

    if (enc->codec_id == CODEC_ID_VP6)
    {
        put_byte(pb, 0);
    }

    if (enc->codec_id == CODEC_ID_VP6F)
    {
        put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0);
    }
    else if (enc->codec_id == CODEC_ID_AAC)
    {
        put_byte(pb, 1); // AAC raw
    }
    else if (enc->codec_id == CODEC_ID_H264)
    {
        put_byte(pb, 1); // AVC NALU
        put_be24(pb, pkt->pts - pkt->dts);
    }
    put_buffer(pb, pkt->data, size);
    put_be32(pb, size + flags_size + 11); // previous tag size
    flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration);

    put_flush_packet(pb);
    LogStr("Exit");
    return 0;
}
Exemple #29
0
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    ByteIOContext *pb = s->pb;
    AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
    FLVContext *flv = s->priv_data;
    unsigned ts;
    int size= pkt->size;
    uint8_t *data= NULL;
    int flags, flags_size;

//    av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);

    if(enc->codec_id == CODEC_ID_VP6 || enc->codec_id == CODEC_ID_VP6F ||
            enc->codec_id == CODEC_ID_AAC)
        flags_size= 2;
    else if(enc->codec_id == CODEC_ID_H264)
        flags_size= 5;
    else
        flags_size= 1;

    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
        put_byte(pb, FLV_TAG_TYPE_VIDEO);

        flags = enc->codec_tag;
        if(flags == 0) {
            av_log(enc, AV_LOG_ERROR, "video codec %X not compatible with flv\n",enc->codec_id);
            return -1;
        }

        flags |= pkt->flags & AV_PKT_FLAG_KEY ? FLV_FRAME_KEY : FLV_FRAME_INTER;
    } else {
        assert(enc->codec_type == AVMEDIA_TYPE_AUDIO);
        flags = get_audio_flags(enc);

        assert(size);

        put_byte(pb, FLV_TAG_TYPE_AUDIO);
    }

    if (enc->codec_id == CODEC_ID_H264) {
        /* check if extradata looks like mp4 formated */
        if (enc->extradata_size > 0 && *(uint8_t*)enc->extradata != 1) {
            if (ff_avc_parse_nal_units_buf(pkt->data, &data, &size) < 0)
                return -1;
        }
        if (!flv->delay && pkt->dts < 0)
            flv->delay = -pkt->dts;
    }

    ts = pkt->dts + flv->delay; // add delay to force positive dts
    put_be24(pb,size + flags_size);
    put_be24(pb,ts);
    put_byte(pb,(ts >> 24) & 0x7F); // timestamps are 32bits _signed_
    put_be24(pb,flv->reserved);
    put_byte(pb,flags);
    if (enc->codec_id == CODEC_ID_VP6)
        put_byte(pb,0);
    if (enc->codec_id == CODEC_ID_VP6F)
        put_byte(pb, enc->extradata_size ? enc->extradata[0] : 0);
    else if (enc->codec_id == CODEC_ID_AAC)
        put_byte(pb,1); // AAC raw
    else if (enc->codec_id == CODEC_ID_H264) {
        put_byte(pb,1); // AVC NALU
        put_be24(pb,pkt->pts - pkt->dts);
    }

    put_buffer(pb, data ? data : pkt->data, size);

    put_be32(pb,size+flags_size+11); // previous tag size
    flv->duration = FFMAX(flv->duration, pkt->pts + flv->delay + pkt->duration);

    put_flush_packet(pb);

    av_free(data);

    return 0;
}
Exemple #30
0
static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
{
    AVCodecContext *c;
    uint8_t *outbuf = NULL;
    int outbuf_size, ret, size, i;
    AVFrame *picture;

    ret = -1;
    c = avcodec_alloc_context();
    if (!c)
        return -1;
    picture = avcodec_alloc_frame();
    if (!picture)
        goto fail2;
    c->width = info->width;
    c->height = info->height;
    /* XXX: currently move that to the codec ? */
    switch(info->pix_fmt) {
    case PIX_FMT_YUVJ420P:
        c->pix_fmt = PIX_FMT_YUV420P;
        break;
    case PIX_FMT_YUVJ422P:
        c->pix_fmt = PIX_FMT_YUV422P;
        break;
    case PIX_FMT_YUVJ444P:
        c->pix_fmt = PIX_FMT_YUV444P;
        break;
    default:
        goto fail1;
    }
    for(i=0;i<3;i++) {
        picture->data[i] = info->pict.data[i];
        picture->linesize[i] = info->pict.linesize[i];
    }
    /* set the quality */
    picture->quality = 3; /* XXX: a parameter should be used */
    c->flags |= CODEC_FLAG_QSCALE;
    
    if (avcodec_open(c, &mjpeg_encoder) < 0)
        goto fail1;
    
    /* XXX: needs to sort out that size problem */
    outbuf_size = 1000000;
    outbuf = av_malloc(outbuf_size);

    size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
    if (size < 0)
        goto fail;
    put_buffer(pb, outbuf, size);
    put_flush_packet(pb);
    ret = 0;

 fail:
    avcodec_close(c);
    av_free(outbuf);
 fail1:
    av_free(picture);
 fail2:
    av_free(c);
    return ret;
}