static int yuv_read(ByteIOContext *f, int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque) { ByteIOContext pb1, *pb = &pb1; int img_size, ret; char fname[1024], *p; int size; URLContext *h; AVImageInfo info1, *info = &info1; img_size = url_fsize(f); /* XXX: hack hack */ h = url_fileno(f); url_get_filename(h, fname, sizeof(fname)); if (infer_size(&info->width, &info->height, img_size) < 0) { return AVERROR_IO; } info->pix_fmt = PIX_FMT_YUV420P; ret = alloc_cb(opaque, info); if (ret) return ret; size = info->width * info->height; p = strrchr(fname, '.'); if (!p || p[1] != 'Y') return AVERROR_IO; get_buffer(f, info->pict.data[0], size); p[1] = 'U'; if (url_fopen(pb, fname, URL_RDONLY) < 0) return AVERROR_IO; get_buffer(pb, info->pict.data[1], size / 4); url_fclose(pb); p[1] = 'V'; if (url_fopen(pb, fname, URL_RDONLY) < 0) return AVERROR_IO; get_buffer(pb, info->pict.data[2], size / 4); url_fclose(pb); return 0; }
bool AVFormatWriter::OpenFile(void) { if (!(m_fmt.flags & AVFMT_NOFILE)) { if (url_fopen(&m_ctx->pb, m_filename.toAscii().constData(), URL_WRONLY) < 0) { LOG(VB_RECORD, LOG_ERR, LOC + "OpenFile(): url_fopen() failed"); return false; } } m_ringBuffer = RingBuffer::Create(m_filename, true); if (!m_ringBuffer) { LOG(VB_RECORD, LOG_ERR, LOC + "OpenFile(): RingBuffer::Create() failed"); return false; } m_avfRingBuffer = new AVFRingBuffer(m_ringBuffer); URLContext *uc = (URLContext *)m_ctx->pb->opaque; uc->prot = &AVF_RingBuffer_Protocol; uc->priv_data = (void *)m_avfRingBuffer; av_write_header(m_ctx); return true; }
int main(int argc, char **argv) { URL_FILE *handle; char buffer[BUFSIZE]; if(argc > 1) strcpy(BASE,argv[1]); else { fprintf(stderr, "Usage: %s BaseURL\n",argv[0]); exit(1); } handle = url_fopen(BASE, "r"); if (!handle) { fprintf(stderr,"couldn't url_fopen() %s\n", BASE); return 2; } while(!url_feof(handle)) { url_fgets(buffer,sizeof(buffer),handle); strlower(buffer); fputs(buffer,stdout); char *cur, link[BUFSIZE], full_link[BUFSIZE]; cur = buffer; while ((cur = nextURL(cur)) != NULL) { getURL(cur, link, BUFSIZE-1); normalise(link, full_link, BUFSIZE-1); printf("%s\n",full_link); cur += strlen(link); } } url_fclose(handle); return 0; }
/// Move on to next fragment IF NEEDED (changes file) /// This method won't change file if it's not ready to void advance_fragment(EncoderJob &jobSpec) { // Check to see if this frame should be split. if (should_advance(jobSpec)) { jobSpec.SplitNextKey = (jobSpec.a_pts > jobSpec.v_pts) ? (jobSpec.a_pts) : (jobSpec.v_pts); jobSpec.SegmentNumber++; #ifdef NEW_M2TS jobSpec.p->CloseFile(); sprintf(jobSpec.oc->filename, "%s-%05u.ts", jobSpec.BaseDirectory, jobSpec.SegmentNumber); int track_ids[2] = {120, 121}; uint8_t track_types[2] = {Pests::TT_H264, Pests::TT_MpegAudio}; jobSpec.p->StartFile(jobSpec.oc->filename, track_ids, track_types, 2); #else url_fclose(jobSpec.oc->pb); sprintf(jobSpec.oc->filename, "%s-%05u.ts", jobSpec.BaseDirectory, jobSpec.SegmentNumber); if (url_fopen(&jobSpec.oc->pb, jobSpec.oc->filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", jobSpec.oc->filename); jobSpec.IsValid = false; return; } av_write_header(jobSpec.oc); #endif } }
/*=======================================================================================*/ int url_is_file_list(ByteIOContext *s, const char *filename) { int ret; list_demux_t *demux; ByteIOContext *lio = s; int64_t *oldpos = 0; if (am_getconfig_bool("media.amplayer.usedm3udemux")) { return 0; /*if used m3u demux,always failed;*/ } if (!lio) { ret = url_fopen(&lio, filename, AVIO_FLAG_READ | URL_MINI_BUFFER|URL_NO_LP_BUFFER); if (ret != 0) { return AVERROR(EIO); } } else { oldpos = url_ftell(lio); } demux = probe_demux(lio, filename); if (lio != s) { url_fclose(lio); } else { url_fseek(lio, oldpos, SEEK_SET); } return demux != NULL ? 100 : 0; }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int i; int size[3]={0}, ret[3]={0}; ByteIOContext f1[3], *f[3]= {&f1[0], &f1[1], &f1[2]}; AVCodecContext *codec= s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s1->loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR_IO; for(i=0; i<3; i++){ if (url_fopen(f[i], filename, URL_RDONLY) < 0) return AVERROR_IO; size[i]= url_fsize(f[i]); if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = &s1->pb; if (url_feof(f[0])) return AVERROR_IO; size[0]= 4096; } av_new_packet(pkt, size[0] + size[1] + size[2]); pkt->stream_index = 0; pkt->flags |= PKT_FLAG_KEY; pkt->size= 0; for(i=0; i<3; i++){ if(size[i]){ ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) url_fclose(f[i]); if(ret[i]>0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
/*=======================================================================================*/ int url_is_file_list(ByteIOContext *s,const char *filename) { int ret; list_demux_t *demux; ByteIOContext *lio=s; int64_t *oldpos=0; if(!lio) { ret=url_fopen(&lio,filename,AVIO_FLAG_READ); if(ret!=0) { return AVERROR(EIO); } } else{ oldpos=url_ftell(lio); } demux=probe_demux(lio,filename); if(lio!=s) { url_fclose(lio); } else { url_fseek(lio, oldpos, SEEK_SET); } return demux!=NULL?100:0; }
UINT CFlvUtils::OpenFlvFile() { HRESULT hr = S_OK; if (!m_szFlvFile) hr = E_FAIL; if (SUCCEEDED(hr)) { if (!(m_pAVOutputFormat->flags & AVFMT_NOFILE)) { if (url_fopen(&m_pAVFormatContext->pb, m_szFlvFile, URL_WRONLY) < 0) { hr = E_FAIL; _ftprintf(stderr, _T("Error in CFlvUtils::OpenFlvFile():\n Could not open '%s'!\n"), m_szFlvFile); // TODO: error handling? } } // Write the stream header, if any av_write_header(m_pAVFormatContext); } return hr; }
static int list_open_internet(ByteIOContext **pbio,struct list_mgt *mgt,const char *filename, int flags) { list_demux_t *demux; int ret; ByteIOContext *bio; ret=url_fopen(&bio,filename,flags); if(ret!=0) { return AVERROR(EIO); } demux=probe_demux(bio,filename); if(!demux) { ret=-1; goto error; } ret=demux->parser(mgt,bio); if(ret<=0) { ret=-1; goto error; } *pbio=bio; return 0; error: if(bio) url_fclose(bio); return ret; }
char * read_page(char * url) { URL_FILE * page = url_fopen(url, "rb"); if (page == NULL) return NULL; char * result = (char *)malloc(1024 * 1024); if (result == NULL) { url_fclose(page); return NULL; } result[0] = '\0'; if (page != NULL) { char buffer[8]; int n = 0; n = url_fread(buffer, 1, 8, page); while(n > 0) { strncat(result, buffer, n); n = url_fread(buffer, 1, 8, page); } } url_fclose(page); return result; }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; ByteIOContext pb1, *pb; char filename[1024]; if (!img->is_pipe) { if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = &s->pb; } put_buffer(pb, pkt->data, pkt->size); put_flush_packet(pb); if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0; }
bool CFFMPEGLoader::CreateMovie(const char *filename, const AVOutputFormat *format, const AVCodecContext *VideoCon, const AVCodecContext *AudioCon) { if(!filename) return false; AVOutputFormat *fmt; //*fmt=*format; fmt = guess_format(NULL, filename, NULL); pFormatCon = av_alloc_format_context(); if(!pFormatCon) { cout<<"Error while allocating format context\n"; return false; } bOutput=true; strcpy(pFormatCon->filename,filename); pFormatCon->oformat=fmt; pAudioStream=pVideoStream=NULL; if (fmt->video_codec != CODEC_ID_NONE) { pVideoStream = add_video_stream(pFormatCon, fmt->video_codec,VideoCon); } if (fmt->audio_codec != CODEC_ID_NONE) { pAudioStream = add_audio_stream(pFormatCon, fmt->audio_codec,AudioCon); } if (av_set_parameters(pFormatCon, NULL) < 0) { cout<<"Invalid output format parameters\n"; return false; } if (pVideoStream) open_stream(pFormatCon, pVideoStream); if (pAudioStream) open_stream(pFormatCon, pAudioStream); dump_format(pFormatCon, 0, filename, 1); if (!(fmt->flags & AVFMT_NOFILE)) { if (url_fopen(&pFormatCon->pb, filename, URL_WRONLY) < 0) { cout<<"Could not open '%s'"<<filename<<endl; return false; } } /* write the stream header, if any */ av_write_header(pFormatCon); return true; }
QString getTitle() { if ( Type != 2 ) return ""; URL_FILE *uFile = url_fopen(curF.toUtf8().data(),""); if (!uFile) return ""; char *data = new char[_DATA_BUFF]; url_fread(data,1,_DATA_BUFF,uFile); url_fclose(uFile); char *t = getICYTitle(data,_DATA_BUFF); QString t2 = t; delete[] t; delete[] data; return t2; }
QString InternetFormatSupport( const char* address ) { loadCURL(); if ( !CURLloaded ) return ""; OggVorbis_File mus; URL_FILE *uF = NULL; uF = url_fopen( address ); bool loaded = !ov_open_callbacks(uF, &mus, NULL, 0, OV_CALLBACKS_URL); ov_clear(&mus); unloadCURL(); if ( loaded ) return plugName; else return ""; }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (!s->is_pipe) { /* loop over input */ /* if (loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; }*/ if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number)<0 && s->img_number > 1) return AVERROR_IO; f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return AVERROR_IO; } else { f = &s1->pb; if (url_feof(f)) return AVERROR_IO; } if (s->is_pipe) { av_new_packet(pkt, 4096); }else{ av_new_packet(pkt, url_filesize(url_fileno(f))); } pkt->stream_index = 0; pkt->flags |= PKT_FLAG_KEY; ret = get_buffer(f, pkt->data, pkt->size); if (!s->is_pipe) { url_fclose(f); } if (ret <= 0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
static int yuv_write(ByteIOContext *pb2, AVImageInfo *info) { ByteIOContext pb1, *pb; char fname[1024], *p; int i, j, width, height; uint8_t *ptr; URLContext *h; static const char *ext = "YUV"; /* XXX: hack hack */ h = url_fileno(pb2); url_get_filename(h, fname, sizeof(fname)); p = strrchr(fname, '.'); if (!p || p[1] != 'Y') return AVERROR_IO; width = info->width; height = info->height; for(i=0;i<3;i++) { if (i == 1) { width >>= 1; height >>= 1; } if (i >= 1) { pb = &pb1; p[1] = ext[i]; if (url_fopen(pb, fname, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = pb2; } ptr = info->pict.data[i]; for(j=0;j<height;j++) { put_buffer(pb, ptr, width); ptr += info->pict.linesize[i]; } put_flush_packet(pb); if (i >= 1) { url_fclose(pb); } }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (!s->is_pipe) { /* loop over input */ if (loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0) return AVERROR_IO; f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return AVERROR_IO; } else { f = &s1->pb; if (url_feof(f)) return AVERROR_IO; } av_new_packet(pkt, s->img_size); pkt->stream_index = 0; s->ptr = pkt->data; ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s); if (!s->is_pipe) { url_fclose(f); } if (ret < 0) { av_free_packet(pkt); return AVERROR_IO; /* signal EOF */ } else { /* XXX: computing this pts is not necessary as it is done in the generic code too */ pkt->pts = av_rescale((int64_t)s->img_count * s1->streams[0]->codec->time_base.num, s1->streams[0]->time_base.den, s1->streams[0]->codec->time_base.den) / s1->streams[0]->time_base.num; s->img_count++; s->img_number++; return 0; } }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; ByteIOContext pb1[3], *pb[3]= {&pb1[0], &pb1[1], &pb1[2]}; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) return AVERROR_IO; for(i=0; i<3; i++){ if (url_fopen(pb[i], filename, URL_WRONLY) < 0) return AVERROR_IO; if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = &s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; put_buffer(pb[0], pkt->data , ysize); put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); put_flush_packet(pb[1]); put_flush_packet(pb[2]); url_fclose(pb[1]); url_fclose(pb[2]); }else{ put_buffer(pb[0], pkt->data, pkt->size); } put_flush_packet(pb[0]); if (!img->is_pipe) { url_fclose(pb[0]); } img->img_number++; return 0; }
int FFMpegEncoder::configOutput() { AVOutputFormat *fmt = guess_format(profile.formatStr,NULL,NULL); if (fmt == NULL) return ERR_GUESS_FORMAT; pFormatCtx->oformat = fmt; sprintf(pFormatCtx->filename, "%s", profile.outputFilename); int ret = url_fopen(&pFormatCtx->pb, (char*)profile.outputFilename, URL_WRONLY); /* fifo_open(&pFormatCtx->pb); pFormatCtx->pb->write_packet = fifo_write; pFormatCtx->pb->seek = fifo_seek; AVFifoBuffer *fifo = getFifo(); */ return ret; }
int sj_index_load(char *filename, SJ_IndexContext *sj_ic) { ByteIOContext pb; register_protocol(&file_protocol); if (url_fopen(&pb, filename, URL_RDONLY) < 0) { // file could not be open return -1; } sj_ic->size = url_fsize(&pb) - HEADER_SIZE; sj_ic->index_num = (sj_ic->size / INDEX_SIZE); sj_ic->indexes = av_malloc(sj_ic->index_num * sizeof(Index)); int64_t magic = get_le64(&pb); if (magic != 0x534A2D494E444558LL) { // not an index file url_fclose(&pb); return -2; } sj_ic->version = get_byte(&pb); sj_ic->start_pts = get_le64(&pb); sj_ic->start_dts = get_le64(&pb); sj_ic->start_timecode.frames = get_byte(&pb); sj_ic->start_timecode.seconds = get_byte(&pb); sj_ic->start_timecode.minutes = get_byte(&pb); sj_ic->start_timecode.hours = get_byte(&pb); if (!sj_ic->index_num) { // empty index url_fclose(&pb); return -4; } for(int i = 0; i < sj_ic->index_num; i++) { read_index(&sj_ic->indexes[i], &pb); } url_fclose(&pb); return 0; }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; ByteIOContext pb1, *pb; AVPicture *picture; int width, height, ret; char filename[1024]; AVImageInfo info; width = st->codec->width; height = st->codec->height; picture = (AVPicture *)pkt->data; if (!img->is_pipe) { if (get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0) return AVERROR_IO; pb = &pb1; if (url_fopen(pb, filename, URL_WRONLY) < 0) return AVERROR_IO; } else { pb = &s->pb; } info.width = width; info.height = height; info.pix_fmt = st->codec->pix_fmt; info.interleaved = 0; /* FIXME: there should be a way to set it right */ info.pict = *picture; ret = av_write_image(pb, img->img_fmt, &info); if (!img->is_pipe) { url_fclose(pb); } img->img_number++; return 0; }
static int open_variant(AppleHTTPContext *c, struct variant *var, int skip) { int ret; if (c->cur_seq_no < var->start_seq_no) { av_log(NULL, AV_LOG_WARNING, "seq %d not available in variant %s, skipping\n", var->start_seq_no, var->url); return 0; } if (c->cur_seq_no - var->start_seq_no >= var->n_segments) return c->finished ? AVERROR_EOF : 0; ret = url_fopen(&var->pb, var->segments[c->cur_seq_no - var->start_seq_no]->url, URL_RDONLY); if (ret < 0) return ret; var->ctx->pb = var->pb; /* If this is a new segment in parallel with another one already opened, * skip ahead so they're all at the same dts. */ if (skip && c->last_packet_dts != AV_NOPTS_VALUE) { while (1) { ret = av_read_frame(var->ctx, &var->pkt); if (ret < 0) { if (ret == AVERROR_EOF) { reset_packet(&var->pkt); return 0; } return ret; } if (var->pkt.dts >= c->last_packet_dts) break; av_free_packet(&var->pkt); } } return 0; }
QString InternetFormatSupport( const char* address ) { loadCURL(); if ( !CURLloaded ) return ""; mpg123_handle *mus = mpg123_new(NULL, NULL); mpg123_open_feed( mus ); URL_FILE *f = url_fopen( address ); if ( !f ) { unloadCURL(); return ""; } char *data; int _DATA_BUFF; if ( !getDataBuff( f, url_fread, _DATA_BUFF, &data ) ) { url_fclose(f); unloadCURL(); return ""; } int bread = url_fread(data+10, 1, _DATA_BUFF-10, f); mpg123_decode( mus, (const unsigned char*)data, bread, 0,0,0 ); bool loaded = getMusInfo( mus, 0,0,0,0,0, -1, "" ); mpg123_close(mus); mpg123_delete(mus); delete[] data; url_fclose(f); unloadCURL(); if ( loaded ) return plugName; else return ""; }
// ###################################################################### FfmpegEncoder::FfmpegEncoder(const std::string& fname, const std::string& codecname, const int bitrate, const int framerate, const int frameratebase, const Dims& dims, const int bufsz, const bool useFormatContext) : itsFile(0), itsContext(), itsFormatContext(0), itsFrameNumber(0), itsOutbufSize(bufsz), itsFrameSizeRange(), itsUseFormatContext(useFormatContext) { GVX_TRACE(__PRETTY_FUNCTION__); // no need to guard these functions for being called multiple times; // they all have internal guards av_register_all(); avcodec_init(); avcodec_register_all(); AVOutputFormat* oformat = NULL; #if LIBAVCODEC_VERSION_MAJOR >= 53 && LIBAVCODEC_VERSION_MINOR >= 21 if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); AVOutputFormat* f = av_oformat_next(NULL); while(f) { LINFO("%s: %s %d", f->name, f->long_name, f->flags); f = av_oformat_next(f); } LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... AVOutputFormat* f = av_oformat_next(NULL); while(f) { if (codecname.compare(f->name) == 0) { oformat = f; break; } f = av_oformat_next(f); } } #else if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) LINFO("%s: %s %d", f->name, f->long_name, f->flags); LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) if (codecname.compare(f->name) == 0) { oformat = f; break; } } #endif if (oformat == 0) LFATAL("No such video codec '%s';\n" "try re-running with --output-codec=List to see a list\n" "of available codecs", codecname.c_str()); char ext[100]; ext[0] = '.'; uint i; for (i = 0; i < strlen(oformat->extensions); i ++) if (oformat->extensions[i] == ',') break; else ext[i+1] = oformat->extensions[i]; ext[i+1] = '\0'; LINFO("Using output format '%s' (%s), extension %s", oformat->name, oformat->long_name, ext); std::string oname(fname); std::string::size_type idx1 = oname.rfind('/', oname.npos); std::string::size_type idx2 = oname.rfind('.', oname.npos); // must check that idx2 is valid; otherwise if we do // oname.erase(idx2) with e.g. idx2==npos then we will get a // std::out_of_range exception if (idx2 < oname.size() && idx2 > idx1) oname.erase(idx2, oname.npos); oname.append(ext); LINFO("Output file: %s", oname.c_str()); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS LINFO("Using FormatContext to output data"); #ifdef AVMEDIA_TYPE_VIDEO itsFormatContext = avformat_alloc_context(); #else itsFormatContext = av_alloc_format_context(); #endif if (!itsFormatContext) LFATAL("Cannot allocate format context"); itsFormatContext->oformat = oformat; itsAVStream = av_new_stream(itsFormatContext, 0); if (!itsAVStream) LFATAL("Can not allocate AVStream"); #else LFATAL("Need a new version of ffmpeg libs for this option"); itsFormatContext = NULL; #endif } AVCodec* const codec = avcodec_find_encoder(oformat->video_codec); if (codec == NULL) LFATAL("codec not found"); #if defined(INVT_FFMPEG_HAS_DEFAULTS_FUNCTIONS) avcodec_get_context_defaults(&itsContext); #else { AVCodecContext* const tmp = avcodec_alloc_context(); memcpy(&itsContext, tmp, sizeof(AVCodecContext)); free(tmp); } #endif itsContext.bit_rate = bitrate; // Be sure to set itsContext.pix_fmt -- it may occasionally // appear to work to leave pix_fmt unset, because the value we want, // PIX_FMT_YUV420P, has the enum value of 0, so if the uninitialized // memory for pix_fmt happens to have the value 0, then we'll slip // through without setting it explicitly. itsContext.pix_fmt = PIX_FMT_YUV420P; /* resolution must be a multiple of two */ itsContext.width = dims.w(); itsContext.height = dims.h(); #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) AVRational time_base = { frameratebase, framerate }; itsContext.time_base = time_base; const int frb = frameratebase; #elif LIBAVCODEC_VERSION_INT >= 0x000406 && LIBAVCODEC_BUILD > 4665 itsContext.frame_rate = framerate; const int frb = frameratebase; itsContext.frame_rate_base = frb; #else itsContext.frame_rate = framerate; const int frb = FRAME_RATE_BASE; #endif itsContext.gop_size = 10; /* emit one intra frame every ten frames */ if(codec->id != CODEC_ID_MPEG4 && codec->id != CODEC_ID_MPEG1VIDEO && codec->id != CODEC_ID_MPEG2VIDEO) itsContext.max_b_frames = 0; else itsContext.max_b_frames = 1; itsFrameNumber = 0; LINFO("using max_b_frames=%i bitrate=%u width=%u height=%u framerate=%u frameratebase=%u", itsContext.max_b_frames, itsContext.bit_rate, itsContext.width, itsContext.height, framerate, frb); if (avcodec_open(&itsContext, codec) < 0) LFATAL("could not open codec\n"); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS AVCodecContext *c = itsAVStream->codec; c->codec_id = itsContext.codec_id; #ifdef CODEC_TYPE_VIDEO c->codec_type = CODEC_TYPE_VIDEO; #else #ifdef AVMEDIA_TYPE_VIDEO c->codec_type = AVMEDIA_TYPE_VIDEO; #endif #endif /* put sample parameters */ c->bit_rate = itsContext.bit_rate; /* resolution must be a multiple of two */ c->width = itsContext.width; c->height = itsContext.height; /* time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. */ #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) c->time_base.den = itsContext.time_base.den; c->time_base.num = itsContext.time_base.num; #endif c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = itsContext.pix_fmt; /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(itsFormatContext, NULL) < 0) LFATAL("Invalid output format parameters"); #if defined(INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #endif #else #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else LFATAL("Could not open '%s' ffmpeg version mismatch", oname.c_str()); #endif #endif //INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) /* write the stream header, if any */ av_write_header(itsFormatContext); #else LFATAL("Need a new version of FFMPEG for this option"); #endif } else { itsFile = fopen(oname.c_str(), "w"); if (itsFile==NULL) LFATAL("could not open file! %s", oname.c_str()); } LINFO("EnCoder Inited"); }
void VideoStream::OpenStream() { /* now that all the parameters are set, we can open the video codecs and allocate the necessary encode buffers */ if ( ost ) { #if ZM_FFMPEG_SVN AVCodecContext *c = ost->codec; #else AVCodecContext *c = &ost->codec; #endif /* find the video encoder */ AVCodec *codec = avcodec_find_encoder(c->codec_id); if ( !codec ) { Panic( "codec not found" ); } /* open the codec */ if ( avcodec_open(c, codec) < 0 ) { Panic( "Could not open codec" ); } /* allocate the encoded raw picture */ opicture = avcodec_alloc_frame(); if ( !opicture ) { Panic( "Could not allocate opicture" ); } int size = avpicture_get_size( c->pix_fmt, c->width, c->height); uint8_t *opicture_buf = (uint8_t *)malloc(size); if ( !opicture_buf ) { av_free(opicture); Panic( "Could not allocate opicture" ); } avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt, c->width, c->height ); /* if the output format is not RGB24, then a temporary RGB24 picture is needed too. It is then converted to the required output format */ tmp_opicture = NULL; if ( c->pix_fmt != pf ) { tmp_opicture = avcodec_alloc_frame(); if ( !tmp_opicture ) { Panic( "Could not allocate temporary opicture" ); } int size = avpicture_get_size( pf, c->width, c->height); uint8_t *tmp_opicture_buf = (uint8_t *)malloc(size); if (!tmp_opicture_buf) { av_free( tmp_opicture ); Panic( "Could not allocate temporary opicture" ); } avpicture_fill( (AVPicture *)tmp_opicture, tmp_opicture_buf, pf, c->width, c->height ); } } /* open the output file, if needed */ if ( !(of->flags & AVFMT_NOFILE) ) { #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1) if ( avio_open(&ofc->pb, filename, URL_WRONLY) < 0 ) #else if ( url_fopen(&ofc->pb, filename, URL_WRONLY) < 0 ) #endif { Fatal( "Could not open '%s'", filename ); } } video_outbuf = NULL; if ( !(ofc->oformat->flags & AVFMT_RAWPICTURE) ) { /* allocate output buffer */ /* XXX: API change will be done */ video_outbuf_size = 200000; video_outbuf = (uint8_t *)malloc(video_outbuf_size); } /* write the stream header, if any */ av_write_header(ofc); }
uint8_t lavMuxer::open(const char *filename,uint32_t inbitrate, ADM_MUXER_TYPE type, aviInfo *info,uint32_t videoExtraDataSize, uint8_t *videoExtraData, WAVHeader *audioheader,uint32_t audioextraSize,uint8_t *audioextraData) { AVCodecContext *c; _type=type; _fps1000=info->fps1000; switch(_type) { case MUXER_TS: fmt=guess_format("mpegts", NULL, NULL); break; case MUXER_DVD: fmt = guess_format("dvd", NULL, NULL); break; case MUXER_VCD: fmt = guess_format("vcd", NULL, NULL); break; case MUXER_SVCD: fmt = guess_format("svcd", NULL, NULL); break; case MUXER_MP4: fmt = guess_format("mp4", NULL, NULL); break; case MUXER_PSP: fmt = guess_format("psp", NULL, NULL); break; default: fmt=NULL; } if (!fmt) { printf("Lav:Cannot guess format\n"); return 0; } oc = av_alloc_format_context(); if (!oc) { printf("Lav:Cannot allocate context\n"); return 0; } oc->oformat = fmt; snprintf(oc->filename,1000,"file://%s",filename); // Video //________ video_st = av_new_stream(oc, 0); if (!video_st) { printf("Lav: new stream failed\n"); return 0; } c = video_st->codec; switch(_type) { case MUXER_MP4: if(isMpeg4Compatible(info->fcc)) { c->codec_id = CODEC_ID_MPEG4; c->has_b_frames=1; // in doubt... }else { if(isH264Compatible(info->fcc)) { c->has_b_frames=1; // in doubt... c->codec_id = CODEC_ID_H264; c->codec=new AVCodec; memset(c->codec,0,sizeof(AVCodec)); c->codec->name=ADM_strdup("H264"); } else { c->codec_id = CODEC_ID_MPEG4; // Default value printf("Ooops, cant mux that...\n"); printf("Ooops, cant mux that...\n"); printf("Ooops, cant mux that...\n"); //return 0; } } if(videoExtraDataSize) { c->extradata=videoExtraData; c->extradata_size= videoExtraDataSize; } c->rc_buffer_size=8*1024*224; c->rc_max_rate=9500*1000; c->rc_min_rate=0; if(!inbitrate) c->bit_rate=9000*1000; else c->bit_rate=inbitrate; break; case MUXER_TS: c->codec_id = CODEC_ID_MPEG2VIDEO; c->rc_buffer_size=8*1024*224; c->rc_max_rate=9500*1000; c->rc_min_rate=0; if(!inbitrate) c->bit_rate=9000*1000; else c->bit_rate=inbitrate; break; case MUXER_DVD: c->codec_id = CODEC_ID_MPEG2VIDEO; c->rc_buffer_size=8*1024*224; c->rc_max_rate=9500*1000; c->rc_min_rate=0; if(!inbitrate) c->bit_rate=9000*1000; else c->bit_rate=inbitrate; break; case MUXER_VCD: c->codec_id = CODEC_ID_MPEG1VIDEO; c->rc_buffer_size=8*1024*40; c->rc_max_rate=1152*1000; c->rc_min_rate=1152*1000; c->bit_rate=1152*1000; break; case MUXER_SVCD: c->codec_id = CODEC_ID_MPEG2VIDEO; c->rc_buffer_size=8*1024*112; c->rc_max_rate=2500*1000; c->rc_min_rate=0*1000; if(!inbitrate) c->bit_rate=2040*1000; else c->bit_rate=inbitrate; break; default: ADM_assert(0); } c->codec_type = CODEC_TYPE_VIDEO; c->flags=CODEC_FLAG_QSCALE; c->width = info->width; c->height = info->height; switch(_fps1000) { case 25000: c->time_base= (AVRational){1001,25025}; //c->frame_rate = 25025; //c->frame_rate_base = 1001; break; case 23976: /* c->frame_rate = 24000; c->frame_rate_base = 1001; break; */ if(_type==MUXER_MP4) { c->time_base= (AVRational){1001,24000}; break; } case 29970: c->time_base= (AVRational){1001,30000}; //c->frame_rate = 30000; //c->frame_rate_base = 1001; break; default: if(_type==MUXER_MP4) { c->time_base= (AVRational){1000,_fps1000}; break; } else { GUI_Error_HIG(_("Incompatible frame rate"), NULL); return 0; } } c->gop_size=15; c->max_b_frames=2; c->has_b_frames=1; // Audio //________ if(audioheader) { audio_st = av_new_stream(oc, 1); if (!audio_st) { printf("Lav: new stream failed\n"); return 0; } c = audio_st->codec; c->frame_size=1024; //For AAC mainly, sample per frame switch(audioheader->encoding) { case WAV_AC3: c->codec_id = CODEC_ID_AC3;break; case WAV_MP2: c->codec_id = CODEC_ID_MP2;break; case WAV_MP3: #warning FIXME : Probe deeper c->frame_size=1152; c->codec_id = CODEC_ID_MP3; break; case WAV_PCM: // One chunk is 10 ms (1/100 of fq) c->frame_size=4; c->codec_id = CODEC_ID_PCM_S16LE;break; case WAV_AAC: c->extradata=audioextraData; c->extradata_size= audioextraSize; c->codec_id = CODEC_ID_AAC; break; default: printf("Cant mux that ! audio\n"); printf("Cant mux that ! audio\n"); c->codec_id = CODEC_ID_MP2; return 0; break; } c->codec_type = CODEC_TYPE_AUDIO; c->bit_rate = audioheader->byterate*8; c->rc_buffer_size=(c->bit_rate/(2*8)); // 500 ms worth _audioFq=c->sample_rate = audioheader->frequency; c->channels = audioheader->channels; _audioByterate=audioheader->byterate; } // /audio //---------------------- switch(_type) { case MUXER_MP4: oc->mux_rate=10080*1000; // Needed ? break; case MUXER_TS: oc->mux_rate=10080*1000; break; case MUXER_DVD: oc->packet_size=2048; oc->mux_rate=10080*1000; break; case MUXER_VCD: oc->packet_size=2324; oc->mux_rate=2352 * 75 * 8; break; case MUXER_SVCD: oc->packet_size=2324; oc->mux_rate=2*2352 * 75 * 8; // ? break; default: ADM_assert(0); } oc->preload=AV_TIME_BASE/10; // 100 ms preloading oc->max_delay=200*1000; // 500 ms if (av_set_parameters(oc, NULL) < 0) { printf("Lav: set param failed \n"); return 0; } if (url_fopen(&(oc->pb), filename, URL_WRONLY) < 0) { printf("Lav: Failed to open file :%s\n",filename); return 0; } av_write_header(oc); dump_format(oc, 0, filename, 1); printf("lavformat mpeg muxer initialized\n"); _running=1; one=(1000*1000*1000)/_fps1000; _curDTS=one; return 1; }
static int img_write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; ByteIOContext *pb[3]; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) { av_log(s, AV_LOG_ERROR, "Could not get frame filename from pattern\n"); return AVERROR(EIO); } for(i=0; i<3; i++){ if (url_fopen(&pb[i], filename, URL_WRONLY) < 0) { av_log(s, AV_LOG_ERROR, "Could not open file : %s\n",filename); return AVERROR(EIO); } if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; put_buffer(pb[0], pkt->data , ysize); put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); put_flush_packet(pb[1]); put_flush_packet(pb[2]); url_fclose(pb[1]); url_fclose(pb[2]); }else{ if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000){ AVStream *st = s->streams[0]; if(st->codec->extradata_size > 8 && AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){ if(pkt->size < 8 || AV_RL32(pkt->data+4) != MKTAG('j','p','2','c')) goto error; put_be32(pb[0], 12); put_tag (pb[0], "jP "); put_be32(pb[0], 0x0D0A870A); // signature put_be32(pb[0], 20); put_tag (pb[0], "ftyp"); put_tag (pb[0], "jp2 "); put_be32(pb[0], 0); put_tag (pb[0], "jp2 "); put_buffer(pb[0], st->codec->extradata, st->codec->extradata_size); }else if(pkt->size < 8 || (!st->codec->extradata_size && AV_RL32(pkt->data+4) != MKTAG('j','P',' ',' '))){ // signature error: av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream\n"); return -1; } } put_buffer(pb[0], pkt->data, pkt->size); } put_flush_packet(pb[0]); if (!img->is_pipe) { url_fclose(pb[0]); } img->img_number++; return 0; }
static u32 ts_interleave_thread_run(void *param) { GF_AbstractTSMuxer * mux = (GF_AbstractTSMuxer *) param; AVStream * video_st = mux->video_st; AVStream * audio_st = mux->audio_st; u64 audio_pts, video_pts; u64 audioSize, videoSize, videoKbps, audioKbps; u32 pass; u32 now, start; /* open the output file, if needed */ if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) { if (url_fopen(&mux->oc->pb, mux->destination, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", mux->destination); return 0; } } /* write the stream header, if any */ av_write_header(mux->oc); audio_pts = video_pts = 0; // Buffering... gf_sleep(1000); now = start = gf_sys_clock(); audioSize = videoSize = 0; audioKbps = videoKbps = 0; pass = 0; while ( mux->encode) { pass++; if (0== (pass%16)) { now = gf_sys_clock(); if (now - start > 1000) { videoKbps = videoSize * 8000 / (now-start) / 1024; audioKbps = audioSize * 8000 / (now-start) / 1024; audioSize = videoSize = 0; start = now; GF_LOG(GF_LOG_DEBUG, GF_LOG_MODULE, ("\rPTS audio="LLU" ("LLU"kbps), video="LLU" ("LLU"kbps)", audio_pts, audioKbps, video_pts, videoKbps)); } } /* write interleaved audio and video frames */ if (!video_st || (audio_pts == AV_NOPTS_VALUE && has_packet_ready(mux, mux->audioMx, &mux->audioPackets)) || ((audio_st && audio_pts < video_pts && audio_pts!= AV_NOPTS_VALUE))) { AVPacketList * pl = wait_for_packet(mux, mux->audioMx, &mux->audioPackets); if (!pl) goto exit; audio_pts = pl->pkt.pts ; audioSize+=pl->pkt.size; if (pl->pkt.pts == AV_NOPTS_VALUE) { pl->pkt.pts = 0; } if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write audio interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts)); } gf_free(pl); } else { AVPacketList * pl = wait_for_packet(mux, mux->videoMx, &mux->videoPackets); if (!pl) goto exit; video_pts = pl->pkt.pts; /* write the compressed frame in the media file */ if (0 && audio_pts != AV_NOPTS_VALUE && audio_pts > video_pts && pl->next) { u32 skipped = 0; u64 first = video_pts; /* We may be too slow... */ gf_mx_p(mux->videoMx); while (video_pts < audio_pts && pl->next) { AVPacketList * old = pl; // We skip frames... pl = pl->next; video_pts = pl->pkt.pts; skipped++; gf_free(old); } mux->videoPackets = pl->next; gf_mx_v(mux->videoMx); if (skipped > 0) GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("Skipped %u video frames, frame was "LLU", but is now "LLU"\n", skipped, first, video_pts)); } videoSize+=pl->pkt.size; video_pts = pl->pkt.pts; // * video_st->time_base.num / video_st->time_base.den; assert( video_pts); if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write video interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts)); } gf_free(pl); } gf_sleep(1); } exit: GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] Ending TS thread...\n")); av_write_trailer(mux->oc); if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) { /* close the output file */ url_fclose(mux->oc->pb); } return 0; }
void create_video_file(const char*filename,int width,int height) { /* auto detect the output format from the name. default is mpeg. */ //fmt = av_guess_format(NULL, filename, NULL); #if (LIBAVFORMAT_VERSION_INT>=AV_VERSION_INT(52,81,0)) #define libavformat_guess_format av_guess_format #else #define libavformat_guess_format guess_format #endif fmt = libavformat_guess_format(NULL, filename, NULL); if (!fmt) { printf("Could not deduce output format from file extension: using MPEG.\n"); //fmt = av_guess_format("mpeg", NULL, NULL); fmt = libavformat_guess_format("mpeg", NULL, NULL); } if (!fmt) { fprintf(stderr, "Could not find suitable output format\n"); exit(1); } /* allocate the output media context */ oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Memory error\n"); exit(1); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* add the audio and video streams using the default format codecs and initialize the codecs */ video_st = NULL; if (fmt->video_codec != CODEC_ID_NONE) { video_st = add_video_stream(oc, fmt->video_codec,width,height); } /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(oc, NULL) < 0) { fprintf(stderr, "Invalid output format parameters\n"); exit(1); } dump_format(oc, 0, filename, 1); /* now that all the parameters are set, we can open the audio and video codecs and allocate the necessary encode buffers */ if (video_st) open_video(oc, video_st); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); exit(1); } } /* write the stream header, if any */ av_write_header(oc); }
static bool ffemu_init_muxer(ffemu_t *handle) { AVFormatContext *ctx = avformat_alloc_context(); av_strlcpy(ctx->filename, handle->params.filename, sizeof(ctx->filename)); ctx->oformat = av_guess_format(NULL, ctx->filename, NULL); if (!ctx->oformat) return false; // FFmpeg sure likes to make things difficult. #if defined(AVIO_FLAG_WRITE) #define FFMPEG_FLAG_RW AVIO_FLAG_WRITE #elif defined(AVIO_WRONLY) #define FFMPEG_FLAG_RW AVIO_WRONLY #elif defined(URL_WRONLY) #define FFMPEG_FLAG_RW URL_WRONLY #else #define FFMPEG_FLAG_RW 2 // Seems to be consistent, but you never know. #endif #ifdef HAVE_FFMPEG_AVIO_OPEN if (avio_open(&ctx->pb, ctx->filename, FFMPEG_FLAG_RW) < 0) #else if (url_fopen(&ctx->pb, ctx->filename, FFMPEG_FLAG_RW) < 0) #endif { av_free(ctx); return false; } #ifdef HAVE_FFMPEG_AVFORMAT_NEW_STREAM AVStream *stream = avformat_new_stream(ctx, handle->video.encoder); #else unsigned stream_cnt = 0; AVStream *stream = av_new_stream(ctx, stream_cnt++); #endif stream->codec = handle->video.codec; if (ctx->oformat->flags & AVFMT_GLOBALHEADER) handle->video.codec->flags |= CODEC_FLAG_GLOBAL_HEADER; handle->muxer.vstream = stream; handle->muxer.vstream->sample_aspect_ratio = handle->video.codec->sample_aspect_ratio; #ifdef HAVE_FFMPEG_AVFORMAT_NEW_STREAM stream = avformat_new_stream(ctx, handle->audio.encoder); #else stream = av_new_stream(ctx, stream_cnt++); #endif stream->codec = handle->audio.codec; if (ctx->oformat->flags & AVFMT_GLOBALHEADER) handle->audio.codec->flags |= CODEC_FLAG_GLOBAL_HEADER; handle->muxer.astream = stream; #ifdef HAVE_X264RGB // Avoids a warning at end about non-monotonically increasing DTS values. It seems to be harmless to disable this. if (g_settings.video.h264_record) ctx->oformat->flags |= AVFMT_TS_NONSTRICT; #endif av_dict_set(&ctx->metadata, "title", "RetroArch video dump", 0); #ifdef HAVE_FFMPEG_AVFORMAT_WRITE_HEADER if (avformat_write_header(ctx, NULL) < 0) #else if (av_write_header(ctx) != 0) #endif return false; handle->muxer.ctx = ctx; return true; }