/* send an integer number of samples and compute time stamp and fill the rtp send buffer before sending. */ static void rtp_send_samples(AVFormatContext *s1, const uint8_t *buf1, int size, int sample_size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size, n; max_packet_size = (s->max_payload_size / sample_size) * sample_size; /* not needed, but who nows */ if ((size % sample_size) != 0) av_abort(); n = 0; while (size > 0) { s->buf_ptr = s->buf; len = FFMIN(max_packet_size, size); /* copy data */ memcpy(s->buf_ptr, buf1, len); s->buf_ptr += len; buf1 += len; size -= len; s->timestamp = s->cur_timestamp + n / sample_size; ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); n += (s->buf_ptr - s->buf); } }
static int iff_read_packet(AVFormatContext *s, AVPacket *pkt) { IffDemuxContext *iff = s->priv_data; AVIOContext *pb = s->pb; AVStream *st = s->streams[0]; int ret; if(iff->sent_bytes >= iff->body_size) return AVERROR_EOF; if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { ret = av_get_packet(pb, pkt, iff->body_size); } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { uint8_t *buf; if (av_new_packet(pkt, iff->body_size + 2) < 0) { return AVERROR(ENOMEM); } buf = pkt->data; bytestream_put_be16(&buf, 2); ret = avio_read(pb, buf, iff->body_size); } else { av_abort(); } if(iff->sent_bytes == 0) pkt->flags |= AV_PKT_FLAG_KEY; iff->sent_bytes = iff->body_size; pkt->stream_index = 0; return ret; }
static void flush_packet(AVFormatContext *s) { FFMContext *ffm = s->priv_data; int fill_size, h; ByteIOContext *pb = s->pb; fill_size = ffm->packet_end - ffm->packet_ptr; memset(ffm->packet_ptr, 0, fill_size); if (url_ftell(pb) % ffm->packet_size) av_abort(); /* put header */ put_be16(pb, PACKET_ID); put_be16(pb, fill_size); put_be64(pb, ffm->dts); h = ffm->frame_offset; if (ffm->first_packet) h |= 0x8000; put_be16(pb, h); put_buffer(pb, ffm->packet, ffm->packet_end - ffm->packet); put_flush_packet(pb); /* prepare next packet */ ffm->frame_offset = 0; /* no key frame */ ffm->packet_ptr = ffm->packet; ffm->first_packet = 0; }
static void flush_packet(AVFormatContext *s) { FFMContext *ffm = s->priv_data; int fill_size, h; AVIOContext *pb = s->pb; fill_size = ffm->packet_end - ffm->packet_ptr; memset(ffm->packet_ptr, 0, fill_size); if (avio_tell(pb) % ffm->packet_size) av_abort(); /* put header */ avio_wb16(pb, PACKET_ID); avio_wb16(pb, fill_size); avio_wb64(pb, ffm->dts); h = ffm->frame_offset; if (ffm->first_packet) h |= 0x8000; avio_wb16(pb, h); avio_write(pb, ffm->packet, ffm->packet_end - ffm->packet); avio_flush(pb); /* prepare next packet */ ffm->frame_offset = 0; /* no key frame */ ffm->packet_ptr = ffm->packet; ffm->first_packet = 0; }
/* send an integer number of samples and compute time stamp and fill the rtp send buffer before sending. */ static void rtp_send_samples(AVFormatContext *s1, const uint8_t *buf1, int size, int sample_size) { RTPMuxContext *s = s1->priv_data; int len, max_packet_size, n; /* modify max packet size to 20ms according to rtp default */ //max_packet_size = (s->max_payload_size / sample_size) * sample_size; max_packet_size = 160 * sample_size; /* not needed, but who nows */ if ((size % sample_size) != 0) av_abort(); n = 0; s->buf_ptr = s->buf; if (s->stock_len > 0) { memcpy(s->buf, s->stock_buf, s->stock_len); s->buf_ptr += s->stock_len; s->stock_len = 0; } //while (size > 0) { while (size+s->buf_ptr-s->buf >= max_packet_size) { //s->buf_ptr = s->buf; len = FFMIN(max_packet_size, size) - (s->buf_ptr-s->buf); /* copy data */ memcpy(s->buf_ptr, buf1, len); s->buf_ptr += len; buf1 += len; size -= len; s->timestamp = s->cur_timestamp + n / sample_size; ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0); //usleep(19*1000); n += (s->buf_ptr - s->buf); s->buf_ptr = s->buf; } if (size > 0) { memcpy(s->stock_buf, buf1, size); s->stock_len = size; } s->cur_timestamp += n / sample_size; }
static int avi_write_header(AVFormatContext *s) { AVIContext *avi = s->priv_data; ByteIOContext *pb = &s->pb; int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale; AVCodecContext *stream, *video_enc; offset_t list1, list2, strh, strf; /* header list */ avi->riff_id = 0; list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl"); /* avi header */ put_tag(pb, "avih"); put_le32(pb, 14 * 4); bitrate = 0; video_enc = NULL; for(n=0;n<s->nb_streams;n++) { stream = &s->streams[n]->codec; bitrate += stream->bit_rate; if (stream->codec_type == CODEC_TYPE_VIDEO) video_enc = stream; } nb_frames = 0; if(video_enc){ put_le32(pb, (uint32_t)(int64_t_C(1000000) * video_enc->frame_rate_base / video_enc->frame_rate)); } else { put_le32(pb, 0); } put_le32(pb, bitrate / 8); /* XXX: not quite exact */ put_le32(pb, 0); /* padding */ put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */ avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */ put_le32(pb, nb_frames); /* nb frames, filled later */ put_le32(pb, 0); /* initial frame */ put_le32(pb, s->nb_streams); /* nb streams */ put_le32(pb, 1024 * 1024); /* suggested buffer size */ if(video_enc){ put_le32(pb, video_enc->width); put_le32(pb, video_enc->height); } else { put_le32(pb, 0); put_le32(pb, 0); } put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ put_le32(pb, 0); /* reserved */ /* stream list */ for(i=0;i<n;i++) { list2 = start_tag(pb, "LIST"); put_tag(pb, "strl"); stream = &s->streams[i]->codec; /* FourCC should really be set by the codec itself */ if (! stream->codec_tag) { stream->codec_tag = codec_get_bmp_tag(stream->codec_id); } /* stream generic header */ strh = start_tag(pb, "strh"); switch(stream->codec_type) { case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); put_le32(pb, stream->codec_tag); put_le32(pb, 0); /* flags */ put_le16(pb, 0); /* priority */ put_le16(pb, 0); /* language */ put_le32(pb, 0); /* initial frame */ put_le32(pb, stream->frame_rate_base); /* scale */ put_le32(pb, stream->frame_rate); /* rate */ av_set_pts_info(s->streams[i], 64, stream->frame_rate_base, stream->frame_rate); put_le32(pb, 0); /* start */ avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */ put_le32(pb, nb_frames); /* length, XXX: fill later */ put_le32(pb, 1024 * 1024); /* suggested buffer size */ put_le32(pb, -1); /* quality */ put_le32(pb, stream->width * stream->height * 3); /* sample size */ put_le16(pb, 0); put_le16(pb, 0); put_le16(pb, stream->width); put_le16(pb, stream->height); break; case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); put_le32(pb, 1); /* tag */ put_le32(pb, 0); /* flags */ put_le16(pb, 0); /* priority */ put_le16(pb, 0); /* language */ put_le32(pb, 0); /* initial frame */ parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale); put_le32(pb, au_scale); /* scale */ put_le32(pb, au_byterate); /* rate */ // av_set_pts_info(&s->streams[i], 64, au_scale, au_byterate); put_le32(pb, 0); /* start */ avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */ put_le32(pb, 0); /* length, XXX: filled later */ put_le32(pb, 12 * 1024); /* suggested buffer size */ put_le32(pb, -1); /* quality */ put_le32(pb, au_ssize); /* sample size */ put_le32(pb, 0); put_le32(pb, 0); break; default: av_abort(); } end_tag(pb, strh); strf = start_tag(pb, "strf"); switch(stream->codec_type) { case CODEC_TYPE_VIDEO: put_bmp_header(pb, stream, codec_bmp_tags, 0); break; case CODEC_TYPE_AUDIO: if (put_wav_header(pb, stream) < 0) { av_free(avi); return -1; } break; default: av_abort(); } end_tag(pb, strf); if (!url_is_streamed(pb)) { unsigned char tag[5]; int j; /* Starting to lay out AVI OpenDML master index. * We want to make it JUNK entry for now, since we'd * like to get away without making AVI an OpenDML one * for compatibility reasons. */ avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0; avi->indexes[i].indx_start = start_tag(pb, "JUNK"); put_le16(pb, 4); /* wLongsPerEntry */ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */ put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */ put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */ put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type)); /* dwChunkId */ put_le64(pb, 0); /* dwReserved[3] put_le32(pb, 0); Must be 0. */ for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++) put_le64(pb, 0); end_tag(pb, avi->indexes[i].indx_start); } end_tag(pb, list2); } if (!url_is_streamed(pb)) { /* AVI could become an OpenDML one, if it grows beyond 2Gb range */ avi->odml_list = start_tag(pb, "JUNK"); put_tag(pb, "odml"); put_tag(pb, "dmlh"); put_le32(pb, 248); for (i = 0; i < 248; i+= 4) put_le32(pb, 0); end_tag(pb, avi->odml_list); } end_tag(pb, list1); avi->movi_list = start_tag(pb, "LIST"); put_tag(pb, "movi"); put_flush_packet(pb); return 0; }
void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts) { ContextInfo *ci = (ContextInfo *) ctx; AVPicture picture1; Imlib_Image image; DATA32 *data; image = get_cached_image(ci, width, height); if (!image) { image = imlib_create_image(width, height); put_cached_image(ci, image, width, height); } imlib_context_set_image(image); data = imlib_image_get_data(); if (pix_fmt != PIX_FMT_RGBA32) { avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGBA32, width, height); if (img_convert(&picture1, PIX_FMT_RGBA32, picture, pix_fmt, width, height) < 0) { goto done; } } else { av_abort(); } imlib_image_set_has_alpha(0); { int wid, hig, h_a, v_a; char buff[1000]; char tbuff[1000]; char *tbp = ci->text; time_t now = time(0); char *p, *q; int x, y; if (ci->file) { int fd = open(ci->file, O_RDONLY); if (fd < 0) { tbp = "[File not found]"; } else { int l = read(fd, tbuff, sizeof(tbuff) - 1); if (l >= 0) { tbuff[l] = 0; tbp = tbuff; } else { tbp = "[I/O Error]"; } close(fd); } } strftime(buff, sizeof(buff), tbp ? tbp : "[No data]", localtime(&now)); x = ci->x; y = ci->y; for (p = buff; p; p = q) { q = strchr(p, '\n'); if (q) *q++ = 0; imlib_text_draw_with_return_metrics(x, y, p, &wid, &hig, &h_a, &v_a); y += v_a; } } if (pix_fmt != PIX_FMT_RGBA32) { if (img_convert(picture, pix_fmt, &picture1, PIX_FMT_RGBA32, width, height) < 0) { } } done: ; }