static int asf_write_trailer(AVFormatContext *s) { ASFContext *asf = s->priv_data; int64_t file_size,data_size; /* flush the current packet */ if (asf->pb.buf_ptr > asf->pb.buffer) flush_packet(s); /* write index */ data_size = url_ftell(s->pb); if ((!asf->is_streamed) && (asf->nb_index_count != 0)) { asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->nb_index_count); } put_flush_packet(s->pb); if (asf->is_streamed || url_is_streamed(s->pb)) { put_chunk(s, 0x4524, 0, 0); /* end of stream */ } else { /* rewrite an updated header */ file_size = url_ftell(s->pb); url_fseek(s->pb, 0, SEEK_SET); asf_write_header1(s, file_size, data_size - asf->data_offset); } put_flush_packet(s->pb); av_free(asf->index_ptr); return 0; }
static int gxf_write_trailer(AVFormatContext *s) { GXFContext *gxf = s->priv_data; ByteIOContext *pb = s->pb; int64_t end; int i; ff_audio_interleave_close(s); gxf_write_eos_packet(pb); end = url_ftell(pb); url_fseek(pb, 0, SEEK_SET); /* overwrite map, flt and umf packets with new values */ gxf_write_map_packet(s, 1); gxf_write_flt_packet(s); gxf_write_umf_packet(s); put_flush_packet(pb); /* update duration in all map packets */ for (i = 1; i < gxf->map_offsets_nb; i++) { url_fseek(pb, gxf->map_offsets[i], SEEK_SET); gxf_write_map_packet(s, 1); put_flush_packet(pb); } url_fseek(pb, end, SEEK_SET); av_freep(&gxf->flt_entries); av_freep(&gxf->map_offsets); return 0; }
static int wav_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; WAVContext *wav = s->priv_data; int64_t file_size; put_flush_packet(pb); if (!url_is_streamed(s->pb)) { ff_end_tag(pb, wav->data); /* update file size */ file_size = url_ftell(pb); url_fseek(pb, 4, SEEK_SET); put_le32(pb, (uint32_t)(file_size - 8)); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); if(s->streams[0]->codec->codec_tag != 0x01) { /* Update num_samps in fact chunk */ int number_of_samples; number_of_samples = av_rescale(wav->maxpts - wav->minpts + wav->last_duration, s->streams[0]->codec->sample_rate * (int64_t)s->streams[0]->time_base.num, s->streams[0]->time_base.den); url_fseek(pb, wav->data-12, SEEK_SET); put_le32(pb, number_of_samples); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); } } return 0; }
/* We set AVOutputFormat->write_trailer to this function for mpeg1. That way, * the mpeg1 video gets a proper trailer when it is closed. */ static int mpeg1_write_trailer(AVFormatContext *s) { #if LIBAVFORMAT_BUILD >= (52<<16) put_buffer(s->pb, mpeg1_trailer, 4); put_flush_packet(s->pb); #else put_buffer(&s->pb, mpeg1_trailer, 4); put_flush_packet(&s->pb); #endif /* LIBAVFORMAT_BUILD >= (52<<16) */ return 0; /* success */ }
/** * mpeg1_write_trailer * We set AVOutputFormat->write_trailer to this function for mpeg1. That way, * the mpeg1 video gets a proper trailer when it is closed. * * Returns 0 * */ static int mpeg1_write_trailer(AVFormatContext *s) { #if defined FF_API_NEW_AVIO avio_write(s->pb, mpeg1_trailer, 4); avio_flush(s->pb); #elif LIBAVFORMAT_BUILD >= (52<<16) put_buffer(s->pb, mpeg1_trailer, 4); put_flush_packet(s->pb); #else put_buffer(&s->pb, mpeg1_trailer, 4); put_flush_packet(&s->pb); #endif /* FF_API_NEW_AVIO -- LIBAVFORMAT_BUILD >= (52<<16) */ return 0; /* success */ }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; ByteIOContext pb1, *pb; char filename[1024]; if (!img->is_pipe) { if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = &s->pb; } put_buffer(pb, pkt->data, pkt->size); put_flush_packet(pb); if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0; }
static int asf_write_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; asf->packet_size = PACKET_SIZE; asf->nb_packets = 0; asf->last_indexed_pts = 0; asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK ); asf->nb_index_memory_alloc = ASF_INDEX_BLOCK; asf->nb_index_count = 0; asf->maximum_packet = 0; if (asf_write_header1(s, 0, 50) < 0) { //av_free(asf); return -1; } put_flush_packet(&s->pb); asf->packet_nb_payloads = 0; asf->prev_packet_sent_time = 0; asf->packet_timestamp_start = -1; asf->packet_timestamp_end = -1; init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1, NULL, NULL, NULL, NULL); return 0; }
static int asf_write_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; asf->packet_size = PACKET_SIZE; asf->nb_packets = 0; asf->last_indexed_pts = 0; asf->index_ptr = av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK ); asf->nb_index_memory_alloc = ASF_INDEX_BLOCK; asf->nb_index_count = 0; asf->maximum_packet = 0; /* the data-chunk-size has to be 50, which is data_size - asf->data_offset * at the moment this function is done. It is needed to use asf as * streamable format. */ if (asf_write_header1(s, 0, 50) < 0) { //av_free(asf); return -1; } put_flush_packet(s->pb); asf->packet_nb_payloads = 0; asf->packet_timestamp_start = -1; asf->packet_timestamp_end = -1; init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1, NULL, NULL, NULL, NULL); return 0; }
static int swf_write_trailer(AVFormatContext *s) { SWFContext *swf = s->priv_data; ByteIOContext *pb = s->pb; AVCodecContext *enc, *video_enc; int file_size, i; video_enc = NULL; for(i=0;i<s->nb_streams;i++) { enc = s->streams[i]->codec; if (enc->codec_type == CODEC_TYPE_VIDEO) video_enc = enc; else av_fifo_free(&swf->audio_fifo); } put_swf_tag(s, TAG_END); put_swf_end_tag(s); put_flush_packet(s->pb); /* patch file size and number of frames if not streamed */ if (!url_is_streamed(s->pb) && video_enc) { file_size = url_ftell(pb); url_fseek(pb, 4, SEEK_SET); put_le32(pb, file_size); url_fseek(pb, swf->duration_pos, SEEK_SET); put_le16(pb, swf->video_frame_number); url_fseek(pb, swf->vframes_pos, SEEK_SET); put_le16(pb, swf->video_frame_number); url_fseek(pb, file_size, SEEK_SET); } return 0; }
static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags) { uint8_t *buf1; RMMuxContext *rm = s->priv_data; ByteIOContext *pb = s->pb; StreamInfo *stream = rm->audio_stream; int i; /* XXX: suppress this malloc */ buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) ); write_packet_header(s, stream, size, !!(flags & AV_PKT_FLAG_KEY)); if (stream->enc->codec_id == CODEC_ID_AC3) { /* for AC-3, the words seem to be reversed */ for(i=0;i<size;i+=2) { buf1[i] = buf[i+1]; buf1[i+1] = buf[i]; } put_buffer(pb, buf1, size); } else { put_buffer(pb, buf, size); } put_flush_packet(pb); stream->nb_frames++; av_free(buf1); return 0; }
static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = &s->pb; AVCodecContext *enc = s->streams[pkt->stream_index]->codec; FLVContext *flv = s->priv_data; int size= pkt->size; int flags; // av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size); if (enc->codec_type == CODEC_TYPE_VIDEO) { put_byte(pb, 9); flags = 2; // choose h263 flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator } else { assert(enc->codec_type == CODEC_TYPE_AUDIO); flags = get_audio_flags(enc); assert(size); put_byte(pb, 8); } put_be24(pb,size+1); // include flags put_be24(pb,pkt->pts); put_be32(pb,flv->reserved); put_byte(pb,flags); put_buffer(pb, pkt->data, size); put_be32(pb,size+1+11); // previous tag size flv->duration = pkt->pts + pkt->duration; put_flush_packet(pb); return 0; }
static void flush_packet(AVFormatContext *s) { FFMContext *ffm = s->priv_data; int fill_size, h; ByteIOContext *pb = s->pb; fill_size = ffm->packet_end - ffm->packet_ptr; memset(ffm->packet_ptr, 0, fill_size); if (url_ftell(pb) % ffm->packet_size) av_abort(); /* put header */ put_be16(pb, PACKET_ID); put_be16(pb, fill_size); put_be64(pb, ffm->dts); h = ffm->frame_offset; if (ffm->first_packet) h |= 0x8000; put_be16(pb, h); put_buffer(pb, ffm->packet, ffm->packet_end - ffm->packet); put_flush_packet(pb); /* prepare next packet */ ffm->frame_offset = 0; /* no key frame */ ffm->packet_ptr = ffm->packet; ffm->first_packet = 0; }
static int write_header(AVFormatContext *s) { ASSContext *ass = s->priv_data; AVCodecContext *avctx= s->streams[0]->codec; uint8_t *last= NULL; if(s->nb_streams != 1 || avctx->codec_id != CODEC_ID_SSA){ av_log(s, AV_LOG_ERROR, "Exactly one ASS/SSA stream is needed.\n"); return -1; } while(ass->extra_index < avctx->extradata_size){ uint8_t *p = avctx->extradata + ass->extra_index; uint8_t *end= strchr(p, '\n'); if(!end) end= avctx->extradata + avctx->extradata_size; else end++; put_buffer(s->pb, p, end-p); ass->extra_index += end-p; if(last && !memcmp(last, "[Events]", 8)) break; last=p; } put_flush_packet(s->pb); return 0; }
static int aiff_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; AIFFOutputContext *aiff = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; /* Chunks sizes must be even */ int64_t file_size, end_size; end_size = file_size = url_ftell(pb); if (file_size & 1) { put_byte(pb, 0); end_size++; } if (!url_is_streamed(s->pb)) { /* File length */ url_fseek(pb, aiff->form, SEEK_SET); put_be32(pb, file_size - aiff->form - 4); /* Number of sample frames */ url_fseek(pb, aiff->frames, SEEK_SET); put_be32(pb, (file_size-aiff->ssnd-12)/enc->block_align); /* Sound Data chunk size */ url_fseek(pb, aiff->ssnd, SEEK_SET); put_be32(pb, file_size - aiff->ssnd - 4); /* return to the end */ url_fseek(pb, end_size, SEEK_SET); put_flush_packet(pb); } return 0; }
static int gif_write_video(AVFormatContext *s, AVCodecContext *enc, const uint8_t *buf, int size) { ByteIOContext *pb = s->pb; GIFContext *gif = s->priv_data; int jiffies; int64_t delay; /* graphic control extension block */ put_byte(pb, 0x21); put_byte(pb, 0xf9); put_byte(pb, 0x04); /* block size */ put_byte(pb, 0x04); /* flags */ /* 1 jiffy is 1/70 s */ /* the delay_time field indicates the number of jiffies - 1 */ delay = gif->file_time - gif->time; /* XXX: should use delay, in order to be more accurate */ /* instead of using the same rounded value each time */ /* XXX: don't even remember if I really use it for now */ jiffies = (70*enc->time_base.num/enc->time_base.den) - 1; put_le16(pb, jiffies); put_byte(pb, 0x1f); /* transparent color index */ put_byte(pb, 0x00); gif_image_write_image(pb, 0, 0, enc->width, enc->height, buf, enc->width * 3, PIX_FMT_RGB24); put_flush_packet(s->pb); return 0; }
static int rso_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; int64_t file_size; uint16_t coded_file_size; file_size = url_ftell(pb); if (file_size < 0) return file_size; if (file_size > 0xffff + RSO_HEADER_SIZE) { av_log(s, AV_LOG_WARNING, "Output file is too big (%"PRId64" bytes >= 64kB)\n", file_size); coded_file_size = 0xffff; } else { coded_file_size = file_size - RSO_HEADER_SIZE; } /* update file size */ url_fseek(pb, 2, SEEK_SET); put_be16(pb, coded_file_size); url_fseek(pb, file_size, SEEK_SET); put_flush_packet(pb); return 0; }
/* send an rtcp sender report packet */ static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time) { RTPMuxContext *s = s1->priv_data; uint32_t rtp_ts; #ifdef _MSC_VER AVRational rational = {1, 1000000}; #endif dprintf(s1, "RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp); s->last_rtcp_ntp_time = ntp_time; #ifdef _MSC_VER rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, rational, s1->streams[0]->time_base) + s->base_timestamp; #else rtp_ts = av_rescale_q(ntp_time - s->first_rtcp_ntp_time, (AVRational){1, 1000000}, s1->streams[0]->time_base) + s->base_timestamp; #endif put_byte(s1->pb, (RTP_VERSION << 6)); put_byte(s1->pb, RTCP_SR); put_be16(s1->pb, 6); /* length in words - 1 */ put_be32(s1->pb, s->ssrc); put_be32(s1->pb, ntp_time / 1000000); put_be32(s1->pb, ((ntp_time % 1000000) << 32) / 1000000); put_be32(s1->pb, rtp_ts); put_be32(s1->pb, s->packet_count); put_be32(s1->pb, s->octet_count); put_flush_packet(s1->pb); }
static void flush_packet(AVFormatContext *s) { ASFContext *asf = s->priv_data; int packet_hdr_size, packet_filled_size; assert(asf->packet_timestamp_end >= asf->packet_timestamp_start); if (asf->is_streamed) { put_chunk(s, 0x4424, asf->packet_size, 0); } packet_hdr_size = put_payload_parsing_info( s, asf->packet_timestamp_start, asf->packet_timestamp_end - asf->packet_timestamp_start, asf->packet_nb_payloads, asf->packet_size_left ); packet_filled_size = PACKET_SIZE - asf->packet_size_left; assert(packet_hdr_size <= asf->packet_size_left); memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left); put_buffer(s->pb, asf->packet_buf, asf->packet_size - packet_hdr_size); put_flush_packet(s->pb); asf->nb_packets++; asf->packet_nb_payloads = 0; asf->packet_timestamp_start = -1; asf->packet_timestamp_end = -1; init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1, NULL, NULL, NULL, NULL); }
static int ogg_write_page(AVFormatContext *s, const uint8_t *data, int size, int64_t granule, int stream_index, int flags) { OGGStreamContext *oggstream = s->streams[stream_index]->priv_data; int64_t crc_offset; int page_segments, i; if (size >= 255*255) { granule = -1; size = 255*255; } else if (oggstream->eos) flags |= 4; page_segments = FFMIN((size/255)+!!size, 255); init_checksum(s->pb, ff_crc04C11DB7_update, 0); put_tag(s->pb, "OggS"); put_byte(s->pb, 0); put_byte(s->pb, flags); put_le64(s->pb, granule); put_le32(s->pb, stream_index); put_le32(s->pb, oggstream->page_counter++); crc_offset = url_ftell(s->pb); put_le32(s->pb, 0); // crc put_byte(s->pb, page_segments); for (i = 0; i < page_segments-1; i++) put_byte(s->pb, 255); if (size) { put_byte(s->pb, size - (page_segments-1)*255); put_buffer(s->pb, data, size); } ogg_update_checksum(s, crc_offset); put_flush_packet(s->pb); return size; }
static int write_trailer(AVFormatContext *s) { int i; write_tags(s->pb, s->metadata); for (i = 0; i < s->nb_streams; i++) { put_tag(s->pb, ID_STREAM); put_byte(s->pb, '\n'); write_tags(s->pb, s->streams[i]->metadata); } for (i = 0; i < s->nb_chapters; i++) { AVChapter *ch = s->chapters[i]; put_tag(s->pb, ID_CHAPTER); put_byte(s->pb, '\n'); url_fprintf(s->pb, "TIMEBASE=%d/%d\n", ch->time_base.num, ch->time_base.den); url_fprintf(s->pb, "START=%"PRId64"\n", ch->start); url_fprintf(s->pb, "END=%"PRId64"\n", ch->end); write_tags(s->pb, ch->metadata); } put_flush_packet(s->pb); return 0; }
static int rm_write_trailer(AVFormatContext *s) { RMMuxContext *rm = s->priv_data; int data_size, index_pos, i; ByteIOContext *pb = s->pb; if (!url_is_streamed(s->pb)) { /* end of file: finish to write header */ index_pos = url_fseek(pb, 0, SEEK_CUR); data_size = index_pos - rm->data_pos; /* FIXME: write index */ /* undocumented end header */ put_be32(pb, 0); put_be32(pb, 0); url_fseek(pb, 0, SEEK_SET); for(i=0;i<s->nb_streams;i++) rm->streams[i].total_frames = rm->streams[i].nb_frames; rv10_write_header(s, data_size, 0); } else { /* undocumented end header */ put_be32(pb, 0); put_be32(pb, 0); } put_flush_packet(pb); return 0; }
static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt) { GXFContext *gxf = s->priv_data; gxf_write_media_packet(s->pb, gxf, pkt); put_flush_packet(s->pb); return 0; }
static int gif_write_trailer(AVFormatContext *s) { ByteIOContext *pb = s->pb; put_byte(pb, 0x3b); put_flush_packet(s->pb); return 0; }
static int ogg_write_packet(AVFormatContext *avfcontext, int stream_index, unsigned char *buf, int size, int force_pts) { OggContext *context = avfcontext->priv_data ; ogg_packet *op ; ogg_page og ; int l = 0 ; /* flush header packets so audio starts on a new page */ if(!context->header_handled) { while(ogg_stream_flush(&context->os, &og)) { put_buffer(&avfcontext->pb, og.header, og.header_len) ; put_buffer(&avfcontext->pb, og.body, og.body_len) ; put_flush_packet(&avfcontext->pb); } context->header_handled = 1 ; } while(l < size) { op = (ogg_packet*)(buf + l) ; op->packet = buf + l + sizeof(ogg_packet) ; /* fix data pointer */ if(!context->base_packet_no) { /* this is the first packet */ context->base_packet_no = op->packetno ; context->base_granule_pos = op->granulepos ; } /* correct the fields in the packet -- essential for streaming */ op->packetno -= context->base_packet_no ; op->granulepos -= context->base_granule_pos ; ogg_stream_packetin(&context->os, op) ; l += sizeof(ogg_packet) + op->bytes ; while(ogg_stream_pageout(&context->os, &og)) { put_buffer(&avfcontext->pb, og.header, og.header_len) ; put_buffer(&avfcontext->pb, og.body, og.body_len) ; put_flush_packet(&avfcontext->pb); } } return 0; }
static int write_header(AVFormatContext *s) { put_tag(s->pb, ID_STRING); put_byte(s->pb, '1'); // version put_byte(s->pb, '\n'); put_flush_packet(s->pb); return 0; }
static int write_packet(AVFormatContext *s, AVPacket *pkt) { put_buffer(s->pb, pkt->data, pkt->size); put_flush_packet(s->pb); return 0; }
static int daud_write_packet(struct AVFormatContext *s, AVPacket *pkt) { put_be16(s->pb, pkt->size); put_be16(s->pb, 0x8010); // unknown put_buffer(s->pb, pkt->data, pkt->size); put_flush_packet(s->pb); return 0; }
static int avi_write_ix(AVFormatContext *s) { ByteIOContext *pb = s->pb; AVIContext *avi = s->priv_data; char tag[5]; char ix_tag[] = "ix00"; int i, j; assert(!url_is_streamed(pb)); if (avi->riff_id > AVI_MASTER_INDEX_SIZE) return -1; for (i=0;i<s->nb_streams;i++) { AVIStream *avist= s->streams[i]->priv_data; int64_t ix, pos; avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type); ix_tag[3] = '0' + i; /* Writing AVI OpenDML leaf index chunk */ ix = url_ftell(pb); put_tag(pb, &ix_tag[0]); /* ix?? */ put_le32(pb, avist->indexes.entry * 8 + 24); /* chunk size */ put_le16(pb, 2); /* wLongsPerEntry */ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */ put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */ put_le32(pb, avist->indexes.entry); /* nEntriesInUse */ put_tag(pb, &tag[0]); /* dwChunkId */ put_le64(pb, avi->movi_list);/* qwBaseOffset */ put_le32(pb, 0); /* dwReserved_3 (must be 0) */ for (j=0; j<avist->indexes.entry; j++) { AVIIentry* ie = avi_get_ientry(&avist->indexes, j); put_le32(pb, ie->pos + 8); put_le32(pb, ((uint32_t)ie->len & ~0x80000000) | (ie->flags & 0x10 ? 0 : 0x80000000)); } put_flush_packet(pb); pos = url_ftell(pb); /* Updating one entry in the AVI OpenDML master index */ url_fseek(pb, avist->indexes.indx_start - 8, SEEK_SET); put_tag(pb, "indx"); /* enabling this entry */ url_fskip(pb, 8); put_le32(pb, avi->riff_id); /* nEntriesInUse */ url_fskip(pb, 16*avi->riff_id); put_le64(pb, ix); /* qwOffset */ put_le32(pb, pos - ix); /* dwSize */ put_le32(pb, avist->indexes.entry); /* dwDuration */ url_fseek(pb, pos, SEEK_SET); } return 0; }
static int mpjpeg_write_header(AVFormatContext *s) { uint8_t buf1[256]; snprintf(buf1, sizeof(buf1), "--%s\n", BOUNDARY_TAG); put_buffer(s->pb, buf1, strlen(buf1)); put_flush_packet(s->pb); return 0; }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; ByteIOContext pb1[3], *pb[3]= {&pb1[0], &pb1[1], &pb1[2]}; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; for(i=0; i<3; i++){ if (url_fopen(pb[i], filename, URL_WRONLY) < 0) return AVERROR_IO; if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = &s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; put_buffer(pb[0], pkt->data , ysize); put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); put_flush_packet(pb[1]); put_flush_packet(pb[2]); url_fclose(pb[1]); url_fclose(pb[2]); }else{ put_buffer(pb[0], pkt->data, pkt->size); } put_flush_packet(pb[0]); if (!img->is_pipe) { url_fclose(pb[0]); } img->img_number++; return 0; }