static int chunk_end(AVFormatContext *s) { WebMChunkContext *wc = s->priv_data; AVFormatContext *oc = wc->avf; int ret; int buffer_size; uint8_t *buffer; AVIOContext *pb; char filename[MAX_FILENAME_SIZE]; if (wc->chunk_start_index == wc->chunk_index) return 0; // Flush the cluster in WebM muxer. oc->oformat->write_packet(oc, NULL); buffer_size = avio_close_dyn_buf(oc->pb, &buffer); ret = get_chunk_filename(s, 0, filename); if (ret < 0) goto fail; ret = avio_open2(&pb, filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL); if (ret < 0) goto fail; avio_write(pb, buffer, buffer_size); ret = avio_close(pb); if (ret < 0) goto fail; oc->pb = NULL; fail: av_free(buffer); return (ret < 0) ? ret : 0; }
static int webm_chunk_write_header(AVFormatContext *s) { WebMChunkContext *wc = s->priv_data; AVFormatContext *oc = NULL; int ret; // DASH Streams can only have either one track per file. if (s->nb_streams != 1) { return AVERROR_INVALIDDATA; } wc->chunk_index = wc->chunk_start_index; wc->oformat = av_guess_format("webm", s->filename, "video/webm"); if (!wc->oformat) return AVERROR_MUXER_NOT_FOUND; ret = chunk_mux_init(s); if (ret < 0) return ret; oc = wc->avf; ret = get_chunk_filename(s, 1, oc->filename); if (ret < 0) return ret; ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL); if (ret < 0) return ret; oc->pb->seekable = 0; ret = oc->oformat->write_header(oc); if (ret < 0) return ret; avio_close(oc->pb); return 0; }
static int input_avio_open (input_plugin_t *this_gen) { avio_input_plugin_t *this = (avio_input_plugin_t *) this_gen; int toread = MAX_PREVIEW_SIZE; int trycount = 0; if (!this->pb) { /* try to open libavio protocol */ if (avio_open2(&this->pb, this->mrl_private, AVIO_FLAG_READ, NULL, NULL) < 0) { xprintf (this->stream->xine, XINE_VERBOSITY_LOG, LOG_MODULE": failed to open avio protocol for '%s'\n", this->mrl); _x_freep (&this->mrl_private); return 0; } xprintf (this->stream->xine, XINE_VERBOSITY_LOG, LOG_MODULE": opened avio protocol for '%s'\n", this->mrl); } _x_freep (&this->mrl_private); while ((toread > 0) && (trycount < 10)) { off_t n = avio_read (this->pb, this->preview + this->preview_size, toread); if (n > 0) { this->preview_size += n; } trycount++; toread = MAX_PREVIEW_SIZE - this->preview_size; } return 1; }
bool OutputFile::setup() { av_register_all(); if( ! _outputFormat ) _outputFormat = av_guess_format( NULL, _filename.c_str(), NULL); if( ! _outputFormat ) { throw std::runtime_error( "unable to find format" ); } _formatContext->oformat = _outputFormat; if( !( _outputFormat->flags & AVFMT_NOFILE ) ) { if( avio_open2( &_formatContext->pb, _filename.c_str(), AVIO_FLAG_WRITE, NULL, NULL ) < 0 ) { avformat_close_input( &_formatContext ); _formatContext = NULL; throw std::runtime_error( "error when opening output format" ); } } return _formatContext != NULL; }
static int open_output(struct camera *cam) { AVFormatContext *s; AVStream *ost; int ret; s = avformat_alloc_context(); if((s->oformat = av_guess_format("h264", NULL, NULL)) == NULL) { av_err_msg("av_guess_format", 0); avformat_free_context(s); return -1; } db_create_videofile(cam, s->filename); if((ret = avio_open2(&s->pb, s->filename, AVIO_FLAG_WRITE, NULL, NULL)) < 0) { av_err_msg("avio_open2", ret); avformat_free_context(s); return -1; } if((ost = copy_ctx_from_input(s, cam)) == NULL) { fprintf(stderr, "copy_ctx_from_input failed\n"); avformat_free_context(s); return -1; } if((ret = avformat_write_header(s, NULL)) < 0) { av_err_msg("avformat_write_header", ret); avformat_free_context(s); return -1; } cam->output_context = s; cam->output_stream = ost; return 0; }
int OutOpenAvio(vlc_object_t *object) { sout_access_out_t *access = (sout_access_out_t*)object; config_ChainParse( access, "sout-avio-", ppsz_sout_options, access->p_cfg ); sout_access_out_sys_t *sys = malloc(sizeof(*sys)); if (!sys) return VLC_ENOMEM; sys->context = NULL; /* */ vlc_init_avformat(object); if (!access->psz_path) goto error; int ret; #if LIBAVFORMAT_VERSION_MAJOR < 54 ret = avio_open(&sys->context, access->psz_path, AVIO_FLAG_WRITE); #else AVDictionary *options = NULL; char *psz_opts = var_InheritString(access, "sout-avio-options"); if (psz_opts && *psz_opts) { options = vlc_av_get_options(psz_opts); free(psz_opts); } ret = avio_open2(&sys->context, access->psz_path, AVIO_FLAG_WRITE, NULL, &options); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options, "", t, AV_DICT_IGNORE_SUFFIX))) msg_Err( access, "unknown option \"%s\"", t->key ); av_dict_free(&options); #endif if (ret < 0) { errno = AVUNERROR(ret); msg_Err(access, "Failed to open %s", access->psz_path); goto error; } #if LIBAVFORMAT_VERSION_MAJOR < 54 /* We can accept only one active user at any time */ if (SetupAvioCb(VLC_OBJECT(access))) { msg_Err(access, "Module aready in use"); goto error; } #endif access->pf_write = Write; access->pf_control = OutControl; access->pf_seek = OutSeek; access->p_sys = sys; return VLC_SUCCESS; error: free(sys); return VLC_EGENERIC; }
int main(int argc, char **argv) { AVDictionary *options = NULL; AVIOContext *client = NULL, *server = NULL; const char *in_uri, *out_uri; int ret, pid; av_log_set_level(AV_LOG_TRACE); if (argc < 3) { printf("usage: %s input http://hostname[:port]\n" "API example program to serve http to multiple clients.\n" "\n", argv[0]); return 1; } in_uri = argv[1]; out_uri = argv[2]; avformat_network_init(); if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) { fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret)); return ret; } if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) { fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret)); return ret; } fprintf(stderr, "Entering main loop.\n"); for (;;) { if ((ret = avio_accept(server, &client)) < 0) goto end; fprintf(stderr, "Accepted client, forking process.\n"); // XXX: Since we don't reap our children and don't ignore signals // this produces zombie processes. pid = fork(); if (pid < 0) { perror("Fork failed"); ret = AVERROR(errno); goto end; } if (pid == 0) { fprintf(stderr, "In child.\n"); process_client(client, in_uri); avio_close(server); exit(0); } if (pid > 0) avio_close(client); } end: avio_close(server); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret)); return 1; } return 0; }
/* slightly difference scanning function here so can't re-use lookup_default */ struct codec_ent encode_getcontainer(const char* const requested, int dst, const char* remote) { AVFormatContext* ctx; struct codec_ent res = {0}; if (requested && strcmp(requested, "stream") == 0){ res.storage.container.format = av_guess_format("flv", NULL, NULL); if (!res.storage.container.format) LOG("(encode) couldn't setup streaming output.\n"); else { ctx = avformat_alloc_context(); ctx->oformat = res.storage.container.format; res.storage.container.context = ctx; res.setup.muxer = default_format_setup; int rv = avio_open2(&ctx->pb, remote, AVIO_FLAG_WRITE, NULL, NULL); LOG("(encode) attempting to open: %s, result: %d\n", remote, rv); } return res; } if (requested) res.storage.container.format = av_guess_format(requested, NULL, NULL); if (!res.storage.container.format){ LOG("(encode) couldn't find a suitable container matching (%s)," " reverting to matroska (MKV)\n", requested); res.storage.container.format = av_guess_format("matroska", NULL, NULL); } else LOG("(encode) requested container (%s) found.\n", requested); /* no stream, nothing requested that matched and default didn't work. * Give up and cascade. */ if (!res.storage.container.format){ LOG("(encode) couldn't find a suitable container.\n"); return res; } avformat_alloc_output_context2(&ctx, res.storage.container.format, NULL, NULL); /* * Since there's no sane way for us to just pass a file descriptor and * not be limited to pipe behaviors, we have to provide an entire * custom avio class.. */ int* fdbuf = malloc(sizeof(int)); *fdbuf = dst; ctx->pb = avio_alloc_context(av_malloc(4096), 4096, 1, fdbuf, fdr, fdw, fds); res.storage.container.context = ctx; res.setup.muxer = default_format_setup; return res; }
static void output_init() { int i, err; AVCodec *c[2]; char *guess_fp = "a.flv"; // AVStream *st = ; ofc = avformat_alloc_context(); AVOutputFormat *ofmt = av_guess_format(NULL, guess_fp, NULL); ofc->oformat = ofmt; strcpy(ofc->filename, guess_fp); printf("ofc ok\n"); AVStream *ost[2]; ost[0] = avformat_new_stream(ofc, NULL); //ost[0]->codec->codec_id = ist[0]->codec->codec_id; AVCodecContext *codec[2]; codec[0] = ost[0]->codec; avcodec_get_context_defaults3(codec[0], NULL); codec[0]->codec_id = AV_CODEC_ID_H264; codec[0]->codec_type = AVMEDIA_TYPE_VIDEO; codec[0]->time_base.num = 1; codec[0]->time_base.den = 25; codec[0]->width = 720; codec[0]->height = 360; codec[0]->codec_tag = av_codec_get_tag(ofmt->codec_tag, AV_CODEC_ID_H264); printf("ofc->codec codec[0]=%p ofc->st[0]->codec=%p\n", codec[0], ofc->streams[0]->codec); printf("codec[0].tag=%d\n", codec[0]->codec_tag); err = avio_open2(&ofc->pb, output_filename, AVIO_FLAG_WRITE, NULL, NULL); printf("open2=%d\n", err); printf("write header\n"); printf("ofc.nb_streams=%d\n", ofc->nb_streams); printf("ofc.st[0]=%p\n", ofc->streams[0]); printf("ofc.st[0].codec=%p\n", ofc->streams[0]->codec); printf("ofc.oformat=%p\n", ofc->oformat); printf("ofc.oformat.write_header=%p\n", ofc->oformat->write_header); printf("ofc.oformat.name=%s\n", ofc->oformat->name); printf("ofc.pb=%p\n", ofc->pb); printf("ofc.st[0].avg_frame_rate={%d,%d}\n", ofc->streams[0]->avg_frame_rate.num, ofc->streams[0]->avg_frame_rate.den ); printf("ofc.st[0].codec.timebase={%d,%d}\n", ofc->streams[0]->codec->time_base.num, ofc->streams[0]->codec->time_base.den ); printf("ofc.priv=%p\n", ofc->priv_data); //err = ofc->oformat->write_header(ofc); err = avformat_write_header(ofc, NULL); printf("write_header=%d\n", err); avio_flush(ofc->pb); }
static int open_in(HLSContext *c, AVIOContext **in, const char *url) { AVDictionary *tmp = NULL; int ret; av_dict_copy(&tmp, c->avio_opts, 0); ret = avio_open2(in, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp); av_dict_free(&tmp); return ret; }
char* PMS_IssueHttpRequest(const char* url, const char* verb) { char* reply = NULL; AVIOContext *ioctx = NULL; AVDictionary *settings = NULL; int size = 0; int ret; char headers[1024]; const char *token = getenv("X_PLEX_TOKEN"); if (token && *token) snprintf(headers, sizeof(headers), "X-Plex-Token: %s\r\n", token); av_dict_set(&settings, "method", verb, 0); if (token && *token) av_dict_set(&settings, "headers", headers, 0); ret = avio_open2(&ioctx, url, AVIO_FLAG_READ, NULL, &settings); if (ret < 0) goto fail; size = avio_size(ioctx); if (size < 0) size = 4095; else if (!size) goto fail; reply = av_malloc(size + 1); if (!reply) goto fail; ret = avio_read(ioctx, reply, size); if (ret < 0) goto fail; reply[ret] = 0; avio_close(ioctx); av_dict_free(&settings); return reply; fail: avio_close(ioctx); av_dict_free(&settings); if (reply) av_free(reply); return NULL; }
static AVIOContext *ffms_fopen(const char *filename, const char *mode) { int flags = 0; if (strchr(mode, 'r')) flags |= AVIO_FLAG_READ; if (strchr(mode, 'w')) flags |= AVIO_FLAG_WRITE; AVIOContext *ctx; int ret = avio_open2(&ctx, filename, flags, nullptr, nullptr); if (ret < 0) return nullptr; return ctx; }
int OutOpenAvio(vlc_object_t *object) { sout_access_out_t *access = (sout_access_out_t*)object; config_ChainParse( access, "sout-avio-", ppsz_sout_options, access->p_cfg ); sout_access_out_sys_t *sys = vlc_obj_malloc(object, sizeof(*sys)); if (!sys) return VLC_ENOMEM; sys->context = NULL; /* */ vlc_init_avformat(object); if (!access->psz_path) return VLC_EGENERIC; int ret; AVDictionary *options = NULL; char *psz_opts = var_InheritString(access, "sout-avio-options"); if (psz_opts) { vlc_av_get_options(psz_opts, &options); free(psz_opts); } ret = avio_open2(&sys->context, access->psz_path, AVIO_FLAG_WRITE, NULL, &options); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options, "", t, AV_DICT_IGNORE_SUFFIX))) msg_Err( access, "unknown option \"%s\"", t->key ); av_dict_free(&options); if (ret < 0) { errno = AVUNERROR(ret); msg_Err(access, "Failed to open %s", access->psz_path); return VLC_EGENERIC; } access->pf_write = Write; access->pf_control = OutControl; access->pf_seek = OutSeek; access->p_sys = sys; return VLC_SUCCESS; }
static AVStream* init_mp4_output(AVFormatContext *s, struct camera *cam) { int ret; AVStream *ost; strcpy(s->filename + strlen(s->filename) - 4, "mp4"); if((s->oformat = av_guess_format("mp4", NULL, NULL)) == NULL) { av_err_msg("av_guess_format", 0); return NULL; } if((ret = avio_open2(&s->pb, s->filename, AVIO_FLAG_WRITE, NULL, NULL)) < 0) { av_err_msg("avio_open2", ret); return NULL; } if((ost = copy_ctx_from_input(s, cam)) == NULL) { fprintf(stderr, "copy_ctx_from_input failed\n"); return NULL; } if((ret = avformat_write_header(s, NULL)) < 0) { av_err_msg("avformat_write_header", ret); avformat_free_context(s); return NULL; } return ost; }
/* * Initialize the AVFormatContext * Called on encoder initialize and when beginning * each new video chunk */ int initializeAVFormatContext(AVFormatContext **out_oc, jbyte *output_filename, AVStream **out_video_st, AVFrame **out_picture, int video_width, int video_height, float video_crf, int *out_last_pts, int *out_video_frame_count, AVStream **out_audio_st, int16_t **out_samples, int audio_bitrate){ AVFormatContext *oc; AVStream *video_st; AVStream *audio_st; AVFrame *picture; int16_t *samples; // TODO: Can we do this only once? /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); //LOGI("initializeAVFC with filename: %s", output_filename); if(!oc) LOGI("initializeAVFC, oc is properly null"); /* allocate the output media context */ avformat_alloc_output_context2(&oc, NULL, NULL, ((const char*) output_filename)); if (!oc) { LOGI("Could not deduce output format, using mpeg"); //printf("Could not deduce output format from file extension: using MPEG.\n"); avformat_alloc_output_context2(&oc, NULL, "mpeg", ((const char*) output_filename)); } if (!oc) { LOGE("Could not allocate output context"); exit(1); } //else //LOGI("initializeAVFC, oc appears properly allocated"); //LOGI("avformat_alloc_output_context2"); fmt = oc->oformat; // Set AVOutputFormat video/audio codec fmt->video_codec = VIDEO_CODEC_ID; fmt->audio_codec = AUDIO_CODEC_ID; /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ video_st = NULL; audio_st = NULL; if (fmt->video_codec != CODEC_ID_NONE) { video_st = add_video_stream(oc, fmt->video_codec, video_width, video_height, video_crf); //(AVFormatContext *oc, enum CodecID codec_id, int width, int height, float crf) } if (fmt->audio_codec != CODEC_ID_NONE) { audio_st = add_audio_stream(oc, fmt->audio_codec, audio_bitrate); //static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id, int bit_rate) } //LOGI("add_audio_stream / add_video_stream"); /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (video_st){ open_video(oc, video_st, &picture); //open_video(AVFormatContext *oc, AVStream *st, AVFrame *picture } if (audio_st){ open_audio(oc, audio_st ,&samples); //open_audio(AVFormatContext *oc, AVStream *st, int16_t *samples) } av_dump_format(oc, 0, output_filename, 1); //LOGI("open audio / video"); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { char *error_buffer_ptr; char error_buffer[90]; error_buffer_ptr = error_buffer; //LOGI("pre avio_open2"); int error = avio_open2(&oc->pb, output_filename, AVIO_FLAG_WRITE, NULL, NULL); //LOGI("post avio_open2"); if ( error < 0) { av_strerror (error, error_buffer_ptr, 90); LOGE("Could not open %s. Error: %s", output_filename, error_buffer_ptr); //fprintf(stderr, "Could not open '%s'\n", native_output_file_lq1); exit(-420); } } /* Write the stream header, if any. */ //LOGI("pre avformat_write_header"); avformat_write_header(oc, NULL); //LOGI("avformat_write_header"); //LOGI("end initializeAVFC: audio_input_frame_size: %d fps: %d", audio_input_frame_size, video_st->codec->time_base.den); // Set results to output arguments *out_oc = oc; *out_video_st = video_st; *out_audio_st = audio_st; *out_picture = picture; *out_samples = samples; *out_last_pts = -1; *out_video_frame_count = 0; return audio_input_frame_size; }
static int parse_playlist(HLSContext *c, const char *url, struct variant *var, AVIOContext *in) { int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0; enum KeyType key_type = KEY_NONE; uint8_t iv[16] = ""; int has_iv = 0; char key[MAX_URL_SIZE] = ""; char line[1024]; const char *ptr; int close_in = 0; if (!in) { int is_redirected = 0; URLContext *h = NULL; AVDictionary *opts = NULL; close_in = 1; /* Some HLS servers dont like being sent the range header */ av_dict_set(&opts, "seekable", "0", 0); ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts); av_dict_free(&opts); if (ret < 0) return ret; h = (URLContext *) in->opaque; ff_http_get_location_changed(h, &is_redirected); if ( is_redirected ) { ff_http_get_new_location(h, var->url); av_log(NULL, AV_LOG_WARNING, "the var url changed to %s \n", var->url); } } read_chomp_line(in, line, sizeof(line)); if (strcmp(line, "#EXTM3U")) { ret = AVERROR_INVALIDDATA; goto fail; } if (var) { free_segment_list(var); var->finished = 0; } while (!url_feof(in)) { read_chomp_line(in, line, sizeof(line)); if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) { struct variant_info info = {{0}}; is_variant = 1; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, &info); bandwidth = atoi(info.bandwidth); } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) { struct key_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args, &info); key_type = KEY_NONE; has_iv = 0; if (!strcmp(info.method, "AES-128")) key_type = KEY_AES_128; if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) { ff_hex_to_data(iv, info.iv + 2); has_iv = 1; } av_strlcpy(key, info.uri, sizeof(key)); } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->target_duration = atoi(ptr); } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->start_seq_no = atoi(ptr); } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) { if (var) var->finished = 1; } else if (av_strstart(line, "#EXTINF:", &ptr)) { is_segment = 1; duration = atoi(ptr); } else if (av_strstart(line, "#", NULL)) { continue; } else if (line[0]) { if (is_variant) { if (!new_variant(c, bandwidth, line, url)) { ret = AVERROR(ENOMEM); goto fail; } is_variant = 0; bandwidth = 0; } if (is_segment) { struct segment *seg; if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } seg = av_malloc(sizeof(struct segment)); if (!seg) { ret = AVERROR(ENOMEM); goto fail; } seg->duration = duration; seg->key_type = key_type; if (has_iv) { memcpy(seg->iv, iv, sizeof(iv)); } else { int seq = var->start_seq_no + var->n_segments; memset(seg->iv, 0, sizeof(seg->iv)); AV_WB32(seg->iv + 12, seq); } ff_make_absolute_url(seg->key, sizeof(seg->key), url, key); ff_make_absolute_url(seg->url, sizeof(seg->url), url, line); dynarray_add(&var->segments, &var->n_segments, seg); is_segment = 0; } } } if (var) var->last_load_time = av_gettime(); fail: if (close_in) avio_close(in); return ret; }
MediaRecorder::MediaRecorder(const char * outfile,int width, int height) { audiofailed = false; /* INIT SOUND RECORDING */ debug_samples_out = fopen("audiosamples.s16","wb"); audio_samples_written = 0; pa_context* pactx; pa_mainloop * m = pa_mainloop_new(); m_api = pa_mainloop_get_api(m); pactx = pa_context_new(m_api,"Rec1"); if ( pa_context_connect(pactx,NULL,(pa_context_flags_t)0,NULL) < 0 ) printf("Cannot connect to pulseaudio\n"); int ret; pa_context_set_state_callback(pactx, context_state_callback, this); pa_mainloop_run(m,&ret); std::cout << "Use source: " << monitorsources[defaultsink] << std::endl; static const pa_sample_spec ss = { .format = PA_SAMPLE_S16LE, .rate = 44100, .channels = 2 }; pa_context_disconnect(pactx); int error; s = pa_simple_new(NULL,"GLCAP Record",PA_STREAM_RECORD,monitorsources[defaultsink].c_str(), "record", &ss, NULL,NULL , &error); if ( !s ) { printf("Cannot create pa_simple\n"); } run = true; ready = false; firstframe = true; this->width = width; this->height = height; pthread_mutex_init(&encode_mutex,NULL); pthread_mutex_init(&sound_buffer_lock,NULL); pthread_cond_init(&encode_cond,NULL); pthread_create(&encode_thread,NULL,(void*(*)(void*))&MediaRecorder::EncodingThread,this); av_log_set_level(AV_LOG_DEBUG); outCtx = avformat_alloc_context(); outCtx->oformat = av_guess_format(NULL, outfile, NULL); snprintf(outCtx->filename, sizeof(outCtx->filename), "%s", outfile); codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4); acodec = avcodec_find_encoder(AV_CODEC_ID_MP2); ctx = avcodec_alloc_context3(codec); actx = avcodec_alloc_context3(acodec); avcodec_get_context_defaults3(actx,acodec); avcodec_get_context_defaults3(ctx,codec); ctx->width = width; ctx->height = height; ctx->bit_rate = 6000*1000; std::cout << ctx->time_base.den << " " << ctx->time_base.num << std::endl; ctx->time_base.den = TIMEBASE; ctx->time_base.num = 1; ctx->thread_count = 4; ctx->qmin = 2; ctx->qmax = 31; ctx->b_sensitivity = 100; ctx->gop_size = 1; ctx->me_method = 1; ctx->global_quality = 100; ctx->lowres = 0; ctx->bit_rate_tolerance = 200000; actx->sample_fmt = AV_SAMPLE_FMT_S16; actx->sample_rate = 44100; actx->channels = 2; actx->time_base.den = 44100; actx->time_base.num = 1; actx->bit_rate = 128000; actx->frame_size = 8192; actx->channel_layout = 3; /* ctx->compression_level = 0; ctx->trellis = 0; ctx->gop_size = 1; /* emit one intra frame every ten frames */ /*ctx->me_pre_cmp = 0; ctx->me_cmp = 0; ctx->me_sub_cmp = 0; ctx->mb_cmp = 2; ctx->pre_dia_size = 0; ctx->dia_size = 1; ctx->quantizer_noise_shaping = 0; // qns=0 ctx->noise_reduction = 0; // nr=0 ctx->mb_decision = 0; // mbd=0 ("realtime" encoding) ctx->flags &= ~CODEC_FLAG_QPEL; ctx->flags &= ~CODEC_FLAG_4MV; ctx->trellis = 0; ctx->flags &= ~CODEC_FLAG_CBP_RD; ctx->flags &= ~CODEC_FLAG_QP_RD; ctx->flags &= ~CODEC_FLAG_MV0;*/ //ctx->s ctx->pix_fmt = PIX_FMT_YUV420P; if (avcodec_open2(ctx, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); } if (avcodec_open2(actx, acodec, NULL) < 0) { fprintf(stderr, "Could not open audio codec\n"); audiofailed = true; } printf("frame_size: %d\n",actx->frame_size); pthread_create(&record_sound_thread,NULL,(void*(*)(void*))&MediaRecorder::RecordingThread,this); AVStream* s = av_new_stream(outCtx,0); s->codec = ctx; s->r_frame_rate.den = TIMEBASE; s->r_frame_rate.num = 1; if (!audiofailed ) { AVStream* as = av_new_stream(outCtx,1); as->codec = actx; as->r_frame_rate.den = 44100; as->r_frame_rate.num = 1; } picture = alloc_picture(PIX_FMT_YUV420P, ctx->width, ctx->height); if (!picture) { fprintf(stderr, "Could not allocate picture\n"); exit(1); } tmp_picture = NULL; tmp_picture = alloc_picture(PIX_FMT_RGBA, ctx->width, ctx->height); if (!tmp_picture) { fprintf(stderr, "Could not allocate temporary picture\n"); exit(1); } img_convert_ctx = sws_getContext(ctx->width, ctx->height, PIX_FMT_RGBA, ctx->width, ctx->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR , NULL, NULL, NULL); if (img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context\n"); exit(1); } av_dump_format(outCtx, 0, outfile, 1); avio_open2(&outCtx->pb, outfile, AVIO_FLAG_WRITE,NULL,NULL); avformat_write_header(outCtx,NULL); } MediaRecorder::~MediaRecorder() { run = false; ready = false; pthread_cond_broadcast(&encode_cond); printf("Joining thread..\n"); pthread_join(encode_thread,NULL); printf("Joining recording thread..\n"); pthread_join(record_sound_thread,NULL); printf("Done\n"); av_write_trailer(outCtx); av_free(picture); avformat_free_context(outCtx); pa_simple_free(s); fclose(debug_samples_out); } int fcount = 0; void MediaRecorder::AppendFrame(float time, int width, int height, char* data) { if ( !ready ) return ; printf("AppendFrame\n"); this->time = getcurrenttime2(); if ( firstframe ) { starttime = getcurrenttime2(); firstframe = false; } this->height = height; this->width = width; m_data = data; ready = false; pthread_cond_broadcast(&encode_cond); /*int i = 0; unsigned int numpixels = width * height; unsigned int ui = numpixels; unsigned int vi = numpixels + numpixels / 4; for ( int j = 0; j < height; j++ ) { for ( int k = 0; k < width; k++ ) { int sR = data[i*4+0]; int sG = data[i*4+1]; int sB = data[i*4+2]; picture->data[0][i] = ( (66*sR + 129*sG + 25*sB + 128) >> 8) + 16; if (0 == j%2 && 0 == k%2) { picture->data[0][ui++] = ( (-38*sR - 74*sG + 112*sB + 128) >> 8) + 128; picture->data[0][vi++] = ( (112*sR - 94*sG - 18*sB + 128) >> 8) + 128; } i++; } }*/ // printf("End flip %f\n",(float)getcurrenttime2()); //memcpy(tmp_picture->data[0],data,width*height*4); } void MediaRecorder::EncodingThread() { while ( run ) { printf("Encode thread ready\n"); ready = true; pthread_cond_wait(&encode_cond,&encode_mutex); if (!run) { printf("Encoding finished\n"); break; } for ( int y = 0; y < height; y++ ) { /*for ( int x = 0; x < width; x++ ) {*/ char r,g,b; int oldindex = (y*width); int newindex = ((height-1-y)*width); memcpy(&tmp_picture->data[0][(newindex)*4],&m_data[oldindex*4],width*4); /* r = data[oldindex*4+0]; g = data[oldindex*4+1]; b = data[oldindex*4+2]; tmp_picture->data[0][(newindex)*4+0] = r; tmp_picture->data[0][(newindex)*4+1] = g; tmp_picture->data[0][(newindex)*4+2] = b; */ // } } sws_scale(img_convert_ctx,tmp_picture->data,tmp_picture->linesize,0,height,picture->data,picture->linesize); AVPacket p; av_init_packet(&p); p.data = NULL; p.size = 0; picture->pts = int64_t((time-starttime)*TIMEBASE); uint64_t vpts = picture->pts; // picture->pts = time*30.0; int got_frame; printf("%p %p\n",ctx, picture); if(avcodec_encode_video2(ctx, &p, picture, &got_frame) < 0) return; if(got_frame) { // outContainer is "mp4" p.pts = vpts; p.dts = AV_NOPTS_VALUE; av_write_frame(outCtx, &p); av_free_packet(&p); } //sleep(1); printf("End enc frame %f, pts %lld\n",(float)getcurrenttime2(),picture->pts); AVFrame * aframe = avcodec_alloc_frame(); bool unlocked = false; while ( sound_buffers.size() > 0 ) { uint64_t apts = audio_samples_written; /* if ( apts > vpts ) break;*/ pthread_mutex_lock(&sound_buffer_lock); short * buf = sound_buffers.front(); sound_buffers.pop_front(); pthread_mutex_unlock(&sound_buffer_lock); if (!audiofailed ) { unlocked = true; aframe->nb_samples = actx->frame_size; aframe->channel_layout = actx->channel_layout; aframe->format = AV_SAMPLE_FMT_S16; aframe->channels = actx->channels; avcodec_fill_audio_frame(aframe,actx->channels,AV_SAMPLE_FMT_S16,(char*)buf,actx->frame_size*2*2,0); // avcodec_fill_audio_frame(aframe,actx->channels,actx->sample_fmt,(char*)buf,actx->frame_size*2,0); printf("sound_buffers.size() = %d\n",sound_buffers.size()); av_init_packet(&p); p.data = NULL; p.size = 0; avcodec_encode_audio2(actx,&p,aframe,&got_frame); if ( got_frame ) { p.stream_index = 1; p.flags |= AV_PKT_FLAG_KEY; av_write_frame(outCtx,&p); av_free_packet(&p); } audio_samples_written += actx->frame_size;//samples/2 each channel } //printf("Consumed 1024 samples\n"); delete[] buf; } /* if ( !unlocked ) pthread_mutex_unlock(&sound_buffer_lock);*/ avcodec_free_frame(&aframe); } } bool MediaRecorder::isReady() { return ready; }
/********************************************************************** * avformatInit ********************************************************************** * Allocates hb_mux_data_t structures, create file and write headers *********************************************************************/ static int avformatInit( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_audio_t * audio; hb_mux_data_t * track; int meta_mux; int max_tracks; int ii, jj, ret; int clock_min, clock_max, clock; hb_video_framerate_get_limits(&clock_min, &clock_max, &clock); const char *muxer_name = NULL; uint8_t default_track_flag = 1; uint8_t need_fonts = 0; char *lang; max_tracks = 1 + hb_list_count( job->list_audio ) + hb_list_count( job->list_subtitle ); m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*)); m->oc = avformat_alloc_context(); if (m->oc == NULL) { hb_error( "Could not initialize avformat context." ); goto error; } AVDictionary * av_opts = NULL; switch (job->mux) { case HB_MUX_AV_MP4: m->time_base.num = 1; m->time_base.den = 90000; if( job->ipod_atom ) muxer_name = "ipod"; else muxer_name = "mp4"; meta_mux = META_MUX_MP4; av_dict_set(&av_opts, "brand", "mp42", 0); if (job->mp4_optimize) av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0); else av_dict_set(&av_opts, "movflags", "+disable_chpl", 0); break; case HB_MUX_AV_MKV: // libavformat is essentially hard coded such that it only // works with a timebase of 1/1000 m->time_base.num = 1; m->time_base.den = 1000; muxer_name = "matroska"; meta_mux = META_MUX_MKV; break; default: { hb_error("Invalid Mux %x", job->mux); goto error; } } m->oc->oformat = av_guess_format(muxer_name, NULL, NULL); if(m->oc->oformat == NULL) { hb_error("Could not guess output format %s", muxer_name); goto error; } av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename)); ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE, &m->oc->interrupt_callback, NULL); if( ret < 0 ) { hb_error( "avio_open2 failed, errno %d", ret); goto error; } /* Video track */ track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); job->mux_data = track; track->type = MUX_TYPE_VIDEO; track->prev_chapter_tc = AV_NOPTS_VALUE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize video stream"); goto error; } track->st->time_base = m->time_base; avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; uint8_t *priv_data = NULL; int priv_size = 0; switch (job->vcodec) { case HB_VCODEC_X264_8BIT: case HB_VCODEC_X264_10BIT: case HB_VCODEC_QSV_H264: track->st->codec->codec_id = AV_CODEC_ID_H264; /* Taken from x264 muxers.c */ priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("H.264 extradata: malloc failure"); goto error; } priv_data[0] = 1; priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */ priv_data[2] = job->config.h264.sps[2]; /* profile_compat */ priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */ priv_data[4] = 0xff; // nalu size length is four bytes priv_data[5] = 0xe1; // one sps priv_data[6] = job->config.h264.sps_length >> 8; priv_data[7] = job->config.h264.sps_length; memcpy(priv_data+8, job->config.h264.sps, job->config.h264.sps_length); priv_data[8+job->config.h264.sps_length] = 1; // one pps priv_data[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8; priv_data[10+job->config.h264.sps_length] = job->config.h264.pps_length; memcpy(priv_data+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length ); break; case HB_VCODEC_FFMPEG_MPEG4: track->st->codec->codec_id = AV_CODEC_ID_MPEG4; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("MPEG4 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_MPEG2: track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("MPEG2 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_VP8: track->st->codec->codec_id = AV_CODEC_ID_VP8; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_FFMPEG_VP9: track->st->codec->codec_id = AV_CODEC_ID_VP9; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_THEORA: { track->st->codec->codec_id = AV_CODEC_ID_THEORA; int size = 0; ogg_packet *ogg_headers[3]; for (ii = 0; ii < 3; ii++) { ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii]; size += ogg_headers[ii]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("Theora extradata: malloc failure"); goto error; } size = 0; for(ii = 0; ii < 3; ii++) { AV_WB16(priv_data + size, ogg_headers[ii]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[ii]->packet, ogg_headers[ii]->bytes); size += ogg_headers[ii]->bytes; } } break; case HB_VCODEC_X265_8BIT: case HB_VCODEC_X265_10BIT: case HB_VCODEC_X265_12BIT: case HB_VCODEC_X265_16BIT: case HB_VCODEC_QSV_H265: track->st->codec->codec_id = AV_CODEC_ID_HEVC; if (job->config.h265.headers_length > 0) { priv_size = job->config.h265.headers_length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("H.265 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.h265.headers, priv_size); } break; default: hb_error("muxavformat: Unknown video codec: %x", job->vcodec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; track->st->sample_aspect_ratio.num = job->par.num; track->st->sample_aspect_ratio.den = job->par.den; track->st->codec->sample_aspect_ratio.num = job->par.num; track->st->codec->sample_aspect_ratio.den = job->par.den; track->st->codec->width = job->width; track->st->codec->height = job->height; track->st->disposition |= AV_DISPOSITION_DEFAULT; hb_rational_t vrate = job->vrate; // If the vrate is the internal clock rate, there's a good chance // this is a standard rate that we have in our hb_video_rates table. // Because of rounding errors and approximations made while // measuring framerate, the actual value may not be exact. So // we look for rates that are "close" and make an adjustment // to fps.den. if (vrate.num == clock) { const hb_rate_t *video_framerate = NULL; while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL) { if (abs(vrate.den - video_framerate->rate) < 10) { vrate.den = video_framerate->rate; break; } } } hb_reduce(&vrate.num, &vrate.den, vrate.num, vrate.den); if (job->mux == HB_MUX_AV_MP4) { // libavformat mp4 muxer requires that the codec time_base have the // same denominator as the stream time_base, it uses it for the // mdhd timescale. double scale = (double)track->st->time_base.den / vrate.num; track->st->codec->time_base.den = track->st->time_base.den; track->st->codec->time_base.num = vrate.den * scale; } else { track->st->codec->time_base.num = vrate.den; track->st->codec->time_base.den = vrate.num; } track->st->avg_frame_rate.num = vrate.num; track->st->avg_frame_rate.den = vrate.den; /* add the audio tracks */ for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ ) { audio = hb_list_item( job->list_audio, ii ); track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); audio->priv.mux_data = track; track->type = MUX_TYPE_AUDIO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize audio stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (job->mux == HB_MUX_AV_MP4) { track->st->codec->time_base.num = audio->config.out.samples_per_frame; track->st->codec->time_base.den = audio->config.out.samplerate; track->st->time_base.num = 1; track->st->time_base.den = audio->config.out.samplerate; } else { track->st->codec->time_base = m->time_base; track->st->time_base = m->time_base; } priv_data = NULL; priv_size = 0; switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_DCA: case HB_ACODEC_DCA_HD: track->st->codec->codec_id = AV_CODEC_ID_DTS; break; case HB_ACODEC_AC3: track->st->codec->codec_id = AV_CODEC_ID_AC3; break; case HB_ACODEC_FFEAC3: track->st->codec->codec_id = AV_CODEC_ID_EAC3; break; case HB_ACODEC_FFTRUEHD: track->st->codec->codec_id = AV_CODEC_ID_TRUEHD; break; case HB_ACODEC_LAME: case HB_ACODEC_MP3: track->st->codec->codec_id = AV_CODEC_ID_MP3; break; case HB_ACODEC_VORBIS: { track->st->codec->codec_id = AV_CODEC_ID_VORBIS; int jj, size = 0; ogg_packet *ogg_headers[3]; for (jj = 0; jj < 3; jj++) { ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj]; size += ogg_headers[jj]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("Vorbis extradata: malloc failure"); goto error; } size = 0; for(jj = 0; jj < 3; jj++) { AV_WB16(priv_data + size, ogg_headers[jj]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[jj]->packet, ogg_headers[jj]->bytes); size += ogg_headers[jj]->bytes; } } break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: track->st->codec->codec_id = AV_CODEC_ID_FLAC; if (audio->priv.config.extradata.length) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("FLAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: track->st->codec->codec_id = AV_CODEC_ID_AAC; // libav mkv muxer expects there to be extradata for // AAC and will crash if it is NULL. // // Also, libav can over-read the buffer by up to 8 bytes // when it fills it's get_bits cache. // // So allocate extra bytes priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("AAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); // AAC from pass-through source may be ADTS. // Therefore inserting "aac_adtstoasc" bitstream filter is // preferred. // The filter does nothing for non-ADTS bitstream. if (audio->config.out.codec == HB_ACODEC_AAC_PASS) { track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc"); } break; default: hb_error("muxavformat: Unknown audio codec: %x", audio->config.out.codec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if( default_track_flag ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; default_track_flag = 0; } lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } track->st->codec->sample_rate = audio->config.out.samplerate; if (audio->config.out.codec & HB_ACODEC_PASS_FLAG) { track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout); track->st->codec->channel_layout = audio->config.in.channel_layout; } else { track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL); } char *name; if (audio->config.out.name == NULL) { switch (track->st->codec->channels) { case 1: name = "Mono"; break; case 2: name = "Stereo"; break; default: name = "Surround"; break; } } else { name = audio->config.out.name; } // Set audio track title av_dict_set(&track->st->metadata, "title", name, 0); if (job->mux == HB_MUX_AV_MP4) { // Some software (MPC, mediainfo) use hdlr description // for track title av_dict_set(&track->st->metadata, "handler", name, 0); } } // Check for audio track associations for (ii = 0; ii < hb_list_count(job->list_audio); ii++) { audio = hb_list_item(job->list_audio, ii); switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: break; default: { // Mark associated fallback audio tracks for any non-aac track for(jj = 0; jj < hb_list_count( job->list_audio ); jj++ ) { hb_audio_t * fallback; int codec; if (ii == jj) continue; fallback = hb_list_item( job->list_audio, jj ); codec = fallback->config.out.codec & HB_ACODEC_MASK; if (fallback->config.in.track == audio->config.in.track && (codec == HB_ACODEC_FFAAC || codec == HB_ACODEC_CA_AAC || codec == HB_ACODEC_CA_HAAC || codec == HB_ACODEC_FDK_AAC || codec == HB_ACODEC_FDK_HAAC)) { hb_mux_data_t * fallback_track; int * sd; track = audio->priv.mux_data; fallback_track = fallback->priv.mux_data; sd = (int*)av_stream_new_side_data(track->st, AV_PKT_DATA_FALLBACK_TRACK, sizeof(int)); if (sd != NULL) { *sd = fallback_track->st->index; } } } } break; } } char * subidx_fmt = "size: %dx%d\n" "org: %d, %d\n" "scale: 100%%, 100%%\n" "alpha: 100%%\n" "smooth: OFF\n" "fadein/out: 50, 50\n" "align: OFF at LEFT TOP\n" "time offset: 0\n" "forced subs: %s\n" "palette: %06x, %06x, %06x, %06x, %06x, %06x, " "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n" "custom colors: OFF, tridx: 0000, " "colors: 000000, 000000, 000000, 000000\n"; int subtitle_default = -1; for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii ); if( subtitle->config.dest == PASSTHRUSUB ) { if ( subtitle->config.default_track ) subtitle_default = ii; } } // Quicktime requires that at least one subtitle is enabled, // else it doesn't show any of the subtitles. // So check to see if any of the subtitles are flagged to be // the defualt. The default will the the enabled track, else // enable the first track. if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1) { subtitle_default = 0; } for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t * subtitle; uint32_t rgb[16]; char subidx[2048]; int len; subtitle = hb_list_item( job->list_subtitle, ii ); if (subtitle->config.dest != PASSTHRUSUB) continue; track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); subtitle->mux_data = track; track->type = MUX_TYPE_SUBTITLE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize subtitle stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; track->st->time_base = m->time_base; track->st->codec->time_base = m->time_base; track->st->codec->width = subtitle->width; track->st->codec->height = subtitle->height; priv_data = NULL; priv_size = 0; switch (subtitle->source) { case VOBSUB: { int jj; track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE; for (jj = 0; jj < 16; jj++) rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]); len = snprintf(subidx, 2048, subidx_fmt, subtitle->width, subtitle->height, 0, 0, "OFF", rgb[0], rgb[1], rgb[2], rgb[3], rgb[4], rgb[5], rgb[6], rgb[7], rgb[8], rgb[9], rgb[10], rgb[11], rgb[12], rgb[13], rgb[14], rgb[15]); priv_size = len + 1; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("VOBSUB extradata: malloc failure"); goto error; } memcpy(priv_data, subidx, priv_size); } break; case PGSSUB: { track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE; } break; case CC608SUB: case CC708SUB: case TX3GSUB: case SRTSUB: case UTF8SUB: case SSASUB: { if (job->mux == HB_MUX_AV_MP4) { track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; } else { track->st->codec->codec_id = AV_CODEC_ID_SSA; need_fonts = 1; if (subtitle->extradata_size) { priv_size = subtitle->extradata_size; priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("SSA extradata: malloc failure"); goto error; } memcpy(priv_data, subtitle->extradata, priv_size); } } } break; default: continue; } if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT) { // Build codec extradata for tx3g. // If we were using a libav codec to generate this data // this would (or should) be done for us. uint8_t properties[] = { 0x00, 0x00, 0x00, 0x00, // Display Flags 0x01, // Horiz. Justification 0xff, // Vert. Justification 0x00, 0x00, 0x00, 0xff, // Bg color 0x00, 0x00, 0x00, 0x00, // Default text box 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Reserved 0x00, 0x01, // Font ID 0x00, // Font face 0x18, // Font size 0xff, 0xff, 0xff, 0xff, // Fg color // Font table: 0x00, 0x00, 0x00, 0x12, // Font table size 'f','t','a','b', // Tag 0x00, 0x01, // Count 0x00, 0x01, // Font ID 0x05, // Font name length 'A','r','i','a','l' // Font name }; int width, height = 60; width = job->width * job->par.num / job->par.den; track->st->codec->width = width; track->st->codec->height = height; properties[14] = height >> 8; properties[15] = height & 0xff; properties[16] = width >> 8; properties[17] = width & 0xff; priv_size = sizeof(properties); priv_data = av_malloc(priv_size + FF_INPUT_BUFFER_PADDING_SIZE); if (priv_data == NULL) { hb_error("TX3G extradata: malloc failure"); goto error; } memcpy(priv_data, properties, priv_size); } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if (ii == subtitle_default) { track->st->disposition |= AV_DISPOSITION_DEFAULT; } if (subtitle->config.default_track) { track->st->disposition |= AV_DISPOSITION_FORCED; } lang = lookup_lang_code(job->mux, subtitle->iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } }
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; char filename[1024]; int i, res; int size[3] = { 0 }, ret[3] = { 0 }; AVIOContext *f[3] = { NULL }; AVCodecContext *codec = s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0 && s->img_number > 1) return AVERROR(EIO); for (i = 0; i < 3; i++) { if (avio_open2(&f[i], filename, AVIO_FLAG_READ, &s1->interrupt_callback, NULL) < 0) { if (i >= 1) break; av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n", filename); return AVERROR(EIO); } size[i] = avio_size(f[i]); if (codec->codec_id != AV_CODEC_ID_RAWVIDEO) break; filename[strlen(filename) - 1] = 'U' + i; } if (codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (f[0]->eof_reached) return AVERROR(EIO); size[0] = 4096; } res = av_new_packet(pkt, size[0] + size[1] + size[2]); if (res < 0) return res; pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; pkt->size = 0; for (i = 0; i < 3; i++) { if (f[i]) { ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if (ret[i] > 0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) { av_packet_unref(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
static int write_packet(AVFormatContext *s, AVPacket *pkt) { VideoMuxData *img = s->priv_data; AVIOContext *pb[3]; char filename[1024]; AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number>1) { av_log(s, AV_LOG_ERROR, "Could not get frame filename number %d from pattern '%s'\n", img->img_number, img->path); return AVERROR(EIO); } for(i=0; i<3; i++){ if (avio_open2(&pb[i], filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL) < 0) { av_log(s, AV_LOG_ERROR, "Could not open file : %s\n",filename); return AVERROR(EIO); } if(codec->codec_id != CODEC_ID_RAWVIDEO) break; filename[ strlen(filename) - 1 ]= 'U' + i; } } else { pb[0] = s->pb; } if(codec->codec_id == CODEC_ID_RAWVIDEO){ int ysize = codec->width * codec->height; avio_write(pb[0], pkt->data , ysize); avio_write(pb[1], pkt->data + ysize, (pkt->size - ysize)/2); avio_write(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2); avio_flush(pb[1]); avio_flush(pb[2]); avio_close(pb[1]); avio_close(pb[2]); }else{ if(ff_guess_image2_codec(s->filename) == CODEC_ID_JPEG2000){ AVStream *st = s->streams[0]; if(st->codec->extradata_size > 8 && AV_RL32(st->codec->extradata+4) == MKTAG('j','p','2','h')){ if(pkt->size < 8 || AV_RL32(pkt->data+4) != MKTAG('j','p','2','c')) goto error; avio_wb32(pb[0], 12); ffio_wfourcc(pb[0], "jP "); avio_wb32(pb[0], 0x0D0A870A); // signature avio_wb32(pb[0], 20); ffio_wfourcc(pb[0], "ftyp"); ffio_wfourcc(pb[0], "jp2 "); avio_wb32(pb[0], 0); ffio_wfourcc(pb[0], "jp2 "); avio_write(pb[0], st->codec->extradata, st->codec->extradata_size); }else if(pkt->size < 8 || (!st->codec->extradata_size && AV_RL32(pkt->data+4) != MKTAG('j','P',' ',' '))){ // signature error: av_log(s, AV_LOG_ERROR, "malformed JPEG 2000 codestream\n"); return -1; } } avio_write(pb[0], pkt->data, pkt->size); } avio_flush(pb[0]); if (!img->is_pipe) { avio_close(pb[0]); } img->img_number++; return 0; }
int main(int argc, char **argv) { char *in_graph_desc, **out_dev_name; int nb_out_dev = 0, nb_streams = 0; AVFilterGraph *in_graph = NULL; Stream *streams = NULL, *st; AVFrame *frame = NULL; int i, j, run = 1, ret; //av_log_set_level(AV_LOG_DEBUG); if (argc < 3) { av_log(NULL, AV_LOG_ERROR, "Usage: %s filter_graph dev:out [dev2:out2...]\n\n" "Examples:\n" "%s movie=file.nut:s=v+a xv:- alsa:default\n" "%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n", argv[0], argv[0], argv[0]); exit(1); } in_graph_desc = argv[1]; out_dev_name = argv + 2; nb_out_dev = argc - 2; av_register_all(); avdevice_register_all(); avfilter_register_all(); /* Create input graph */ if (!(in_graph = avfilter_graph_alloc())) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n", av_err2str(ret)); goto fail; } ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n", av_err2str(ret)); goto fail; } nb_streams = 0; for (i = 0; i < in_graph->nb_filters; i++) { AVFilterContext *f = in_graph->filters[i]; for (j = 0; j < f->nb_inputs; j++) { if (!f->inputs[j]) { av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n"); ret = AVERROR(EINVAL); goto fail; } } for (j = 0; j < f->nb_outputs; j++) if (!f->outputs[j]) nb_streams++; } if (!nb_streams) { av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n"); ret = AVERROR(EINVAL); goto fail; } if (nb_out_dev != 1 && nb_out_dev != nb_streams) { av_log(NULL, AV_LOG_ERROR, "Graph has %d output streams, %d devices given\n", nb_streams, nb_out_dev); ret = AVERROR(EINVAL); goto fail; } if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n"); } st = streams; for (i = 0; i < in_graph->nb_filters; i++) { AVFilterContext *f = in_graph->filters[i]; for (j = 0; j < f->nb_outputs; j++) { if (!f->outputs[j]) { if ((ret = create_sink(st++, in_graph, f, j)) < 0) goto fail; } } } av_assert0(st - streams == nb_streams); if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n"); goto fail; } /* Create output devices */ for (i = 0; i < nb_out_dev; i++) { char *fmt = NULL, *dev = out_dev_name[i]; st = &streams[i]; if ((dev = strchr(dev, ':'))) { *(dev++) = 0; fmt = out_dev_name[i]; } ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n", av_err2str(ret)); goto fail; } if (!(st->mux->oformat->flags & AVFMT_NOFILE)) { ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE, NULL, NULL); if (ret < 0) { av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n", av_err2str(ret)); goto fail; } } } for (; i < nb_streams; i++) streams[i].mux = streams[0].mux; /* Create output device streams */ for (i = 0; i < nb_streams; i++) { st = &streams[i]; if (!(st->stream = avformat_new_stream(st->mux, NULL))) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n"); goto fail; } st->stream->codec->codec_type = st->link->type; st->stream->time_base = st->stream->codec->time_base = st->link->time_base; switch (st->link->type) { case AVMEDIA_TYPE_VIDEO: st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->stream->avg_frame_rate = st->stream-> r_frame_rate = av_buffersink_get_frame_rate(st->sink); st->stream->codec->width = st->link->w; st->stream->codec->height = st->link->h; st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio; st->stream->codec->pix_fmt = st->link->format; break; case AVMEDIA_TYPE_AUDIO: st->stream->codec->channel_layout = st->link->channel_layout; st->stream->codec->channels = avfilter_link_get_channels(st->link); st->stream->codec->sample_rate = st->link->sample_rate; st->stream->codec->sample_fmt = st->link->format; st->stream->codec->codec_id = av_get_pcm_codec(st->stream->codec->sample_fmt, -1); break; default: av_assert0(!"reached"); } } /* Init output devices */ for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; if ((ret = avformat_write_header(st->mux, NULL)) < 0) { av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n", av_err2str(ret)); goto fail; } } /* Check output devices */ for (i = 0; i < nb_streams; i++) { st = &streams[i]; ret = av_write_uncoded_frame_query(st->mux, st->stream->index); if (ret < 0) { av_log(st->mux, AV_LOG_ERROR, "Uncoded frames not supported on stream #%d: %s\n", i, av_err2str(ret)); goto fail; } } while (run) { ret = avfilter_graph_request_oldest(in_graph); if (ret < 0) { if (ret == AVERROR_EOF) { run = 0; } else { av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n", av_err2str(ret)); break; } } for (i = 0; i < nb_streams; i++) { st = &streams[i]; while (1) { if (!frame && !(frame = av_frame_alloc())) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n"); goto fail; } ret = av_buffersink_get_frame_flags(st->sink, frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n", av_err2str(ret)); break; } if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, st->link ->time_base, st->stream->time_base); ret = av_interleaved_write_uncoded_frame(st->mux, st->stream->index, frame); frame = NULL; if (ret < 0) { av_log(st->stream->codec, AV_LOG_ERROR, "Error writing frame: %s\n", av_err2str(ret)); goto fail; } } } } ret = 0; for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; av_write_trailer(st->mux); } fail: av_frame_free(&frame); avfilter_graph_free(&in_graph); if (streams) { for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; if (st->mux) { if (st->mux->pb) avio_closep(&st->mux->pb); avformat_free_context(st->mux); } } } av_freep(&streams); return ret < 0; }
/********************************************************************** * avformatInit ********************************************************************** * Allocates hb_mux_data_t structures, create file and write headers *********************************************************************/ static int avformatInit( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_audio_t * audio; hb_mux_data_t * track; int meta_mux; int max_tracks; int ii, ret; const char *muxer_name = NULL; uint8_t default_track_flag = 1; uint8_t need_fonts = 0; char *lang; m->delay = -1; max_tracks = 1 + hb_list_count( job->list_audio ) + hb_list_count( job->list_subtitle ); m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*)); m->oc = avformat_alloc_context(); if (m->oc == NULL) { hb_error( "Could not initialize avformat context." ); goto error; } switch (job->mux) { case HB_MUX_AV_MP4: m->time_base.num = 1; m->time_base.den = 90000; if( job->ipod_atom ) muxer_name = "ipod"; else muxer_name = "mp4"; meta_mux = META_MUX_MP4; break; case HB_MUX_AV_MKV: // libavformat is essentially hard coded such that it only // works with a timebase of 1/1000 m->time_base.num = 1; m->time_base.den = 1000; muxer_name = "matroska"; meta_mux = META_MUX_MKV; break; default: { hb_error("Invalid Mux %x", job->mux); goto error; } } m->oc->oformat = av_guess_format(muxer_name, NULL, NULL); if(m->oc->oformat == NULL) { hb_error("Could not guess output format %s", muxer_name); goto error; } av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename)); ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE, &m->oc->interrupt_callback, NULL); if( ret < 0 ) { hb_error( "avio_open2 failed, errno %d", ret); goto error; } /* Video track */ track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); job->mux_data = track; track->type = MUX_TYPE_VIDEO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize video stream"); goto error; } track->st->time_base = m->time_base; avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; uint8_t *priv_data = NULL; int priv_size = 0; switch (job->vcodec) { case HB_VCODEC_X264: case HB_VCODEC_QSV_H264: track->st->codec->codec_id = AV_CODEC_ID_H264; /* Taken from x264 muxers.c */ priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } priv_data[0] = 1; priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */ priv_data[2] = job->config.h264.sps[2]; /* profile_compat */ priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */ priv_data[4] = 0xff; // nalu size length is four bytes priv_data[5] = 0xe1; // one sps priv_data[6] = job->config.h264.sps_length >> 8; priv_data[7] = job->config.h264.sps_length; memcpy(priv_data+8, job->config.h264.sps, job->config.h264.sps_length); priv_data[8+job->config.h264.sps_length] = 1; // one pps priv_data[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8; priv_data[10+job->config.h264.sps_length] = job->config.h264.pps_length; memcpy(priv_data+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length ); break; case HB_VCODEC_FFMPEG_MPEG4: track->st->codec->codec_id = AV_CODEC_ID_MPEG4; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_MPEG2: track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_THEORA: { track->st->codec->codec_id = AV_CODEC_ID_THEORA; int size = 0; ogg_packet *ogg_headers[3]; for (ii = 0; ii < 3; ii++) { ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii]; size += ogg_headers[ii]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } size = 0; for(ii = 0; ii < 3; ii++) { AV_WB16(priv_data + size, ogg_headers[ii]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[ii]->packet, ogg_headers[ii]->bytes); size += ogg_headers[ii]->bytes; } } break; default: hb_error("muxavformat: Unknown video codec: %x", job->vcodec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if (job->anamorphic.mode > 0) { track->st->sample_aspect_ratio.num = job->anamorphic.par_width; track->st->sample_aspect_ratio.den = job->anamorphic.par_height; track->st->codec->sample_aspect_ratio.num = job->anamorphic.par_width; track->st->codec->sample_aspect_ratio.den = job->anamorphic.par_height; } else { track->st->sample_aspect_ratio.num = 1; track->st->sample_aspect_ratio.den = 1; track->st->codec->sample_aspect_ratio.num = 1; track->st->codec->sample_aspect_ratio.den = 1; } track->st->codec->width = job->width; track->st->codec->height = job->height; track->st->disposition |= AV_DISPOSITION_DEFAULT; int vrate_base, vrate; if( job->pass == 2 ) { hb_interjob_t * interjob = hb_interjob_get( job->h ); vrate_base = interjob->vrate_base; vrate = interjob->vrate; } else { vrate_base = job->vrate_base; vrate = job->vrate; } // If the vrate is 27000000, there's a good chance this is // a standard rate that we have in our hb_video_rates table. // Because of rounding errors and approximations made while // measuring framerate, the actual value may not be exact. So // we look for rates that are "close" and make an adjustment // to fps.den. if (vrate == 27000000) { const hb_rate_t *video_framerate = NULL; while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL) { if (abs(vrate_base - video_framerate->rate) < 10) { vrate_base = video_framerate->rate; break; } } } hb_reduce(&vrate_base, &vrate, vrate_base, vrate); if (job->mux == HB_MUX_AV_MP4) { // libavformat mp4 muxer requires that the codec time_base have the // same denominator as the stream time_base, it uses it for the // mdhd timescale. double scale = (double)track->st->time_base.den / vrate; track->st->codec->time_base.den = track->st->time_base.den; track->st->codec->time_base.num = vrate_base * scale; } else { track->st->codec->time_base.num = vrate_base; track->st->codec->time_base.den = vrate; } /* add the audio tracks */ for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ ) { audio = hb_list_item( job->list_audio, ii ); track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); audio->priv.mux_data = track; track->type = MUX_TYPE_AUDIO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize audio stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (job->mux == HB_MUX_AV_MP4) { track->st->codec->time_base.num = audio->config.out.samples_per_frame; track->st->codec->time_base.den = audio->config.out.samplerate; track->st->time_base.num = 1; track->st->time_base.den = audio->config.out.samplerate; } else { track->st->codec->time_base = m->time_base; } priv_data = NULL; priv_size = 0; switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_DCA: case HB_ACODEC_DCA_HD: track->st->codec->codec_id = AV_CODEC_ID_DTS; break; case HB_ACODEC_AC3: track->st->codec->codec_id = AV_CODEC_ID_AC3; break; case HB_ACODEC_LAME: case HB_ACODEC_MP3: track->st->codec->codec_id = AV_CODEC_ID_MP3; break; case HB_ACODEC_VORBIS: { track->st->codec->codec_id = AV_CODEC_ID_VORBIS; int jj, size = 0; ogg_packet *ogg_headers[3]; for (jj = 0; jj < 3; jj++) { ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj]; size += ogg_headers[jj]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } size = 0; for(jj = 0; jj < 3; jj++) { AV_WB16(priv_data + size, ogg_headers[jj]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[jj]->packet, ogg_headers[jj]->bytes); size += ogg_headers[jj]->bytes; } } break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: track->st->codec->codec_id = AV_CODEC_ID_FLAC; if (audio->priv.config.extradata.bytes) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; case HB_ACODEC_FAAC: case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: track->st->codec->codec_id = AV_CODEC_ID_AAC; if (audio->priv.config.extradata.bytes) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; default: hb_error("muxavformat: Unknown audio codec: %x", audio->config.out.codec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if( default_track_flag ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; default_track_flag = 0; } lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } track->st->codec->sample_rate = audio->config.out.samplerate; if (audio->config.out.codec & HB_ACODEC_PASS_FLAG) { track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout); track->st->codec->channel_layout = audio->config.in.channel_layout; } else { track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL); } char *name; if (audio->config.out.name == NULL) { switch (track->st->codec->channels) { case 1: name = "Mono"; break; case 2: name = "Stereo"; break; default: name = "Surround"; break; } } else { name = audio->config.out.name; } av_dict_set(&track->st->metadata, "title", name, 0); } char * subidx_fmt = "size: %dx%d\n" "org: %d, %d\n" "scale: 100%%, 100%%\n" "alpha: 100%%\n" "smooth: OFF\n" "fadein/out: 50, 50\n" "align: OFF at LEFT TOP\n" "time offset: 0\n" "forced subs: %s\n" "palette: %06x, %06x, %06x, %06x, %06x, %06x, " "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n" "custom colors: OFF, tridx: 0000, " "colors: 000000, 000000, 000000, 000000\n"; int subtitle_default = -1; for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii ); if( subtitle->config.dest == PASSTHRUSUB ) { if ( subtitle->config.default_track ) subtitle_default = ii; } } // Quicktime requires that at least one subtitle is enabled, // else it doesn't show any of the subtitles. // So check to see if any of the subtitles are flagged to be // the defualt. The default will the the enabled track, else // enable the first track. if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1) { subtitle_default = 0; } for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t * subtitle; uint32_t rgb[16]; char subidx[2048]; int len; subtitle = hb_list_item( job->list_subtitle, ii ); if (subtitle->config.dest != PASSTHRUSUB) continue; track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); subtitle->mux_data = track; track->type = MUX_TYPE_SUBTITLE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize subtitle stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; track->st->time_base = m->time_base; track->st->codec->time_base = m->time_base; track->st->codec->width = subtitle->width; track->st->codec->height = subtitle->height; priv_data = NULL; priv_size = 0; switch (subtitle->source) { case VOBSUB: { int jj; track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE; for (jj = 0; jj < 16; jj++) rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]); len = snprintf(subidx, 2048, subidx_fmt, subtitle->width, subtitle->height, 0, 0, "OFF", rgb[0], rgb[1], rgb[2], rgb[3], rgb[4], rgb[5], rgb[6], rgb[7], rgb[8], rgb[9], rgb[10], rgb[11], rgb[12], rgb[13], rgb[14], rgb[15]); priv_size = len + 1; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, subidx, priv_size); } break; case PGSSUB: { track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE; } break; case SSASUB: { if (job->mux == HB_MUX_AV_MP4) { track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; } else { track->st->codec->codec_id = AV_CODEC_ID_SSA; need_fonts = 1; if (subtitle->extradata_size) { priv_size = subtitle->extradata_size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, subtitle->extradata, priv_size); } } } break; case CC608SUB: case CC708SUB: case UTF8SUB: case TX3GSUB: case SRTSUB: { if (job->mux == HB_MUX_AV_MP4) track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; else track->st->codec->codec_id = AV_CODEC_ID_TEXT; } break; default: continue; } if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT) { // Build codec extradata for tx3g. // If we were using a libav codec to generate this data // this would (or should) be done for us. uint8_t properties[] = { 0x00, 0x00, 0x00, 0x00, // Display Flags 0x01, // Horiz. Justification 0xff, // Vert. Justification 0x00, 0x00, 0x00, 0xff, // Bg color 0x00, 0x00, 0x00, 0x00, // Default text box 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Reserved 0x00, 0x01, // Font ID 0x00, // Font face 0x18, // Font size 0xff, 0xff, 0xff, 0xff, // Fg color // Font table: 0x00, 0x00, 0x00, 0x12, // Font table size 'f','t','a','b', // Tag 0x00, 0x01, // Count 0x00, 0x01, // Font ID 0x05, // Font name length 'A','r','i','a','l' // Font name }; int width, height = 60; if (job->anamorphic.mode) width = job->width * ((float)job->anamorphic.par_width / job->anamorphic.par_height); else width = job->width; track->st->codec->width = width; track->st->codec->height = height; properties[14] = height >> 8; properties[15] = height & 0xff; properties[16] = width >> 8; properties[17] = width & 0xff; priv_size = sizeof(properties); priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("malloc failure"); goto error; } memcpy(priv_data, properties, priv_size); } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if ( ii == subtitle_default ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; } lang = lookup_lang_code(job->mux, subtitle->iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } }
static int parse_playlist(AppleHTTPContext *c, const char *url, struct variant *var, AVIOContext *in) { int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0; enum KeyType key_type = KEY_NONE; uint8_t iv[16] = ""; int has_iv = 0; char key[MAX_URL_SIZE] = ""; char line[1024]; const char *ptr; int close_in = 0; if (!in) { close_in = 1; if ((ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, NULL)) < 0) return ret; } read_chomp_line(in, line, sizeof(line)); if (strcmp(line, "#EXTM3U")) { ret = AVERROR_INVALIDDATA; goto fail; } if (var) { free_segment_list(var); var->finished = 0; } while (!in->eof_reached) { read_chomp_line(in, line, sizeof(line)); if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) { struct variant_info info = {{0}}; is_variant = 1; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, &info); bandwidth = atoi(info.bandwidth); } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) { struct key_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args, &info); key_type = KEY_NONE; has_iv = 0; if (!strcmp(info.method, "AES-128")) key_type = KEY_AES_128; if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) { ff_hex_to_data(iv, info.iv + 2); has_iv = 1; } av_strlcpy(key, info.uri, sizeof(key)); } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->target_duration = atoi(ptr); } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->start_seq_no = atoi(ptr); } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) { if (var) var->finished = 1; } else if (av_strstart(line, "#EXTINF:", &ptr)) { is_segment = 1; duration = atoi(ptr); } else if (av_strstart(line, "#", NULL)) { continue; } else if (line[0]) { if (is_variant) { if (!new_variant(c, bandwidth, line, url)) { ret = AVERROR(ENOMEM); goto fail; } is_variant = 0; bandwidth = 0; } if (is_segment) { struct segment *seg; if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } seg = av_malloc(sizeof(struct segment)); if (!seg) { ret = AVERROR(ENOMEM); goto fail; } seg->duration = duration; seg->key_type = key_type; if (has_iv) { memcpy(seg->iv, iv, sizeof(iv)); } else { int seq = var->start_seq_no + var->n_segments; memset(seg->iv, 0, sizeof(seg->iv)); AV_WB32(seg->iv + 12, seq); } ff_make_absolute_url(seg->key, sizeof(seg->key), url, key); ff_make_absolute_url(seg->url, sizeof(seg->url), url, line); dynarray_add(&var->segments, &var->n_segments, seg); is_segment = 0; } } } if (var) var->last_load_time = av_gettime(); fail: if (close_in) avio_close(in); return ret; }
int frame_pusher_open(frame_pusher **o_fp, const char *path, int aud_samplerate, AVRational vid_framerate, int width, int height, int vid_bitrate) { *o_fp = NULL; int ret; frame_pusher *fp = (frame_pusher *)av_malloc(sizeof(frame_pusher)); // Guess the format AVOutputFormat *ofmt = av_guess_format(NULL, path, NULL); if (!ofmt) { ofmt = av_oformat_next(NULL); // Use the first format available av_log(NULL, AV_LOG_WARNING, "Unsupported container format. Using %s instead.\n", ofmt->name); // TODO: Add the extension to the path. } av_log(NULL, AV_LOG_INFO, "Using format %s\n", ofmt->name); // Open output file AVIOContext *io_ctx; if ((ret = avio_open2(&io_ctx, path, AVIO_FLAG_WRITE, NULL, NULL)) < 0) return ret; // Create the format context fp->fmt_ctx = avformat_alloc_context(); fp->fmt_ctx->oformat = ofmt; fp->fmt_ctx->pb = io_ctx; // > Create the streams. Here we simply create one for video and one for audio. // >> The audio stream AVCodec *aud_codec = avcodec_find_encoder(AV_CODEC_ID_AAC); fp->aud_stream = avformat_new_stream(fp->fmt_ctx, aud_codec); fp->aud_stream->id = 0; fp->aud_stream->codec->codec_id = AV_CODEC_ID_AAC; fp->aud_stream->codec->bit_rate = 64000; fp->aud_stream->codec->sample_rate = fp->aud_samplerate = aud_samplerate; // >>> http://stackoverflow.com/questions/22989838 // >>> TODO: Add an option to set the codec and the sample format. fp->aud_stream->codec->sample_fmt = fp->aud_stream->codec->codec->sample_fmts[0]; fp->aud_stream->codec->channel_layout = AV_CH_LAYOUT_STEREO; fp->aud_stream->codec->channels = 2; fp->aud_stream->codec->time_base = fp->aud_stream->time_base = (AVRational){1, aud_samplerate}; // >> The video stream AVCodec *vid_codec = avcodec_find_encoder(AV_CODEC_ID_H264); fp->vid_stream = avformat_new_stream(fp->fmt_ctx, vid_codec); fp->vid_width = fp->vid_stream->codec->width = width; fp->vid_height = fp->vid_stream->codec->height = height; fp->vid_stream->id = 1; // >>> * ATTENTION: fp->vid_stream->codec is an (AVCodecContext *) rather than (AVCodec *)! fp->vid_stream->codec->codec_id = AV_CODEC_ID_H264; fp->vid_stream->codec->bit_rate = vid_bitrate > 0 ? vid_bitrate : 1200000; fp->vid_stream->codec->pix_fmt = AV_PIX_FMT_YUV420P; fp->vid_stream->codec->gop_size = 24; fp->vid_stream->codec->time_base = fp->vid_stream->time_base = (AVRational){vid_framerate.den, vid_framerate.num}; // >> Enable experimental codecs such as AAC fp->aud_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; fp->vid_stream->codec->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; // >> Some formats want stream headers to be separate. // >> XXX: MPEG-4 doesn't have AVFMT_GLOBALHEADER in its format flags?? //if (fp->fmt_ctx->flags & AVFMT_GLOBALHEADER) fp->aud_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; fp->vid_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if ((ret = avcodec_open2(fp->aud_stream->codec, aud_codec, NULL)) < 0) return ret; if ((ret = avcodec_open2(fp->vid_stream->codec, vid_codec, NULL)) < 0) return ret; // Trigger a full initialization on the format context and write the header. avformat_write_header(fp->fmt_ctx, NULL); // Miscellaneous initializations fp->first_packet = 1; fp->last_aud_pts = fp->last_vid_pts = 0; fp->nb_aud_buffered_samples = 0; // > Video fp->vid_frame = av_frame_alloc(); fp->pict_bufsize = avpicture_get_size(AV_PIX_FMT_YUV420P, width, height); fp->pict_buf = (uint8_t *)av_malloc(fp->pict_bufsize); // >> Assign the video frame with the allocated buffer avpicture_fill((AVPicture *)fp->vid_frame, fp->pict_buf, AV_PIX_FMT_YUV420P, width, height); fp->sws_ctx = sws_getContext( width, height, PIX_FMT_RGB24, width, height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); // > Audio fp->aud_frame = av_frame_alloc(); fp->aud_buf = av_frame_alloc(); fp->aud_buf->format = fp->aud_frame->format = fp->aud_stream->codec->sample_fmt; fp->aud_buf->channel_layout = fp->aud_frame->channel_layout = fp->aud_stream->codec->channel_layout; fp->aud_buf->sample_rate = fp->aud_frame->sample_rate = fp->aud_stream->codec->sample_rate; if (aud_codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) { fp->nb_aud_samples_per_frame = 4096; av_log(NULL, AV_LOG_INFO, "frame_pusher: codec has variable frame size capability\n"); } else fp->nb_aud_samples_per_frame = fp->aud_stream->codec->frame_size; fp->aud_buf->nb_samples = fp->aud_frame->nb_samples = fp->nb_aud_samples_per_frame; av_log(NULL, AV_LOG_INFO, "frame_pusher: number of samples per frame = %d\n", fp->nb_aud_samples_per_frame); if ((ret = av_frame_get_buffer(fp->aud_frame, 0)) < 0) return ret; if ((ret = av_frame_get_buffer(fp->aud_buf, 0)) < 0) return ret; // >> The audio resampling context fp->swr_ctx = swr_alloc(); if (!fp->swr_ctx) { av_log(NULL, AV_LOG_ERROR, "frame_pusher: Cannot initialize audio resampling library" "(possibly caused by insufficient memory)\n"); return AVERROR_UNKNOWN; } av_opt_set_channel_layout(fp->swr_ctx, "in_channel_layout", fp->aud_stream->codec->channel_layout, 0); av_opt_set_channel_layout(fp->swr_ctx, "out_channel_layout", fp->aud_stream->codec->channel_layout, 0); av_opt_set_int(fp->swr_ctx, "in_sample_rate", fp->aud_stream->codec->sample_rate, 0); av_opt_set_int(fp->swr_ctx, "out_sample_rate", fp->aud_stream->codec->sample_rate, 0); av_opt_set_sample_fmt(fp->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_sample_fmt(fp->swr_ctx, "out_sample_fmt", fp->aud_stream->codec->sample_fmt, 0); if ((ret = swr_init(fp->swr_ctx)) < 0) return ret; *o_fp = fp; return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0) return -1; // Couldn't open file is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
static int write_packet(AVFormatContext *s, AVPacket *pkt) { VideoMuxData *img = s->priv_data; AVIOContext *pb[4]; char filename[1024]; AVCodecContext *codec = s->streams[pkt->stream_index]->codec; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(codec->pix_fmt); int i; int nb_renames = 0; if (!img->is_pipe) { if (img->update) { av_strlcpy(filename, img->path, sizeof(filename)); } else if (img->use_strftime) { time_t now0; struct tm *tm, tmpbuf; time(&now0); tm = localtime_r(&now0, &tmpbuf); if (!strftime(filename, sizeof(filename), img->path, tm)) { av_log(s, AV_LOG_ERROR, "Could not get frame filename with strftime\n"); return AVERROR(EINVAL); } } else if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number > 1) { av_log(s, AV_LOG_ERROR, "Could not get frame filename number %d from pattern '%s' (either set updatefirst or use a pattern like %%03d within the filename pattern)\n", img->img_number, img->path); return AVERROR(EINVAL); } for (i = 0; i < 4; i++) { snprintf(img->tmp[i], sizeof(img->tmp[i]), "%s.tmp", filename); av_strlcpy(img->target[i], filename, sizeof(img->target[i])); if (avio_open2(&pb[i], img->use_rename ? img->tmp[i] : filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL) < 0) { av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", img->use_rename ? img->tmp[i] : filename); return AVERROR(EIO); } if (!img->split_planes || i+1 >= desc->nb_components) break; filename[strlen(filename) - 1] = "UVAx"[i]; } if (img->use_rename) nb_renames = i + 1; } else { pb[0] = s->pb; } if (img->split_planes) { int ysize = codec->width * codec->height; int usize = AV_CEIL_RSHIFT(codec->width, desc->log2_chroma_w) * AV_CEIL_RSHIFT(codec->height, desc->log2_chroma_h); if (desc->comp[0].depth >= 9) { ysize *= 2; usize *= 2; } avio_write(pb[0], pkt->data , ysize); avio_write(pb[1], pkt->data + ysize , usize); avio_write(pb[2], pkt->data + ysize + usize, usize); avio_closep(&pb[1]); avio_closep(&pb[2]); if (desc->nb_components > 3) { avio_write(pb[3], pkt->data + ysize + 2*usize, ysize); avio_closep(&pb[3]); } } else if (img->muxer) { int ret; AVStream *st; AVPacket pkt2 = {0}; AVFormatContext *fmt = NULL; av_assert0(!img->split_planes); ret = avformat_alloc_output_context2(&fmt, NULL, img->muxer, s->filename); if (ret < 0) return ret; st = avformat_new_stream(fmt, NULL); if (!st) { avformat_free_context(fmt); return AVERROR(ENOMEM); } st->id = pkt->stream_index; fmt->pb = pb[0]; if ((ret = av_copy_packet(&pkt2, pkt)) < 0 || (ret = av_dup_packet(&pkt2)) < 0 || (ret = avcodec_copy_context(st->codec, s->streams[0]->codec)) < 0 || (ret = avformat_write_header(fmt, NULL)) < 0 || (ret = av_interleaved_write_frame(fmt, &pkt2)) < 0 || (ret = av_write_trailer(fmt)) < 0) { av_packet_unref(&pkt2); avformat_free_context(fmt); return ret; } av_packet_unref(&pkt2); avformat_free_context(fmt); } else { avio_write(pb[0], pkt->data, pkt->size); } avio_flush(pb[0]); if (!img->is_pipe) { avio_closep(&pb[0]); for (i = 0; i < nb_renames; i++) { ff_rename(img->tmp[i], img->target[i], s); } } img->img_number++; return 0; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx = NULL; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; is->videoStream = -1; is->audioStream = -1; is->audio_need_resample = 0; global_video_state = is; // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) { return -1; // Couldn't open file } is->pFormatCtx = pFormatCtx; // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL) < 0) { return -1; // Couldn't find stream information } // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i = 0; i < pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } #ifdef __RESAMPLER__ if( audio_index >= 0 && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) { is->audio_need_resample = 1; is->pResampledOut = NULL; is->pSwrCtx = NULL; printf("Configure resampler: "); #ifdef __LIBAVRESAMPLE__ printf("libAvResample\n"); is->pSwrCtx = avresample_alloc_context(); #endif #ifdef __LIBSWRESAMPLE__ printf("libSwResample\n"); is->pSwrCtx = swr_alloc(); #endif // Some MP3/WAV don't tell this so make assumtion that // They are stereo not 5.1 if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 2) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 1) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO; } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0 && pFormatCtx->streams[audio_index]->codec->channels == 0) { pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO; pFormatCtx->streams[audio_index]->codec->channels = 2; } av_opt_set_int(is->pSwrCtx, "in_channel_layout", pFormatCtx->streams[audio_index]->codec->channel_layout, 0); av_opt_set_int(is->pSwrCtx, "in_sample_fmt", pFormatCtx->streams[audio_index]->codec->sample_fmt, 0); av_opt_set_int(is->pSwrCtx, "in_sample_rate", pFormatCtx->streams[audio_index]->codec->sample_rate, 0); av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0); #ifdef __LIBAVRESAMPLE__ if (avresample_open(is->pSwrCtx) < 0) { #else if (swr_init(is->pSwrCtx) < 0) { #endif fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n", pFormatCtx->streams[audio_index]->codec->sample_rate, av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt)); fprintf(stderr, " To 44100 Sample format: s16\n"); is->audio_need_resample = 0; is->pSwrCtx = NULL;; } } #endif // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if(is->videoStream >= 0) { stream_index = is->videoStream; } else if(is->audioStream >= 0) { stream_index = is->audioStream; } if(stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } int main(int argc, char *argv[]) { SDL_Event event; //double pts; VideoState *is; is = av_mallocz(sizeof(VideoState)); if(argc < 2) { fprintf(stderr, "Usage: test <file>\n"); exit(1); } // Register all formats and codecs av_register_all(); if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError()); exit(1); } // Make a screen to put our video #ifndef __DARWIN__ screen = SDL_SetVideoMode(640, 480, 0, 0); #else screen = SDL_SetVideoMode(640, 480, 24, 0); #endif if(!screen) { fprintf(stderr, "SDL: could not set video mode - exiting\n"); exit(1); } av_strlcpy(is->filename, argv[1], 1024); is->pictq_mutex = SDL_CreateMutex(); is->pictq_cond = SDL_CreateCond(); schedule_refresh(is, 40); is->av_sync_type = DEFAULT_AV_SYNC_TYPE; is->parse_tid = SDL_CreateThread(decode_thread, is); if(!is->parse_tid) { av_free(is); return -1; } av_init_packet(&flush_pkt); flush_pkt.data = (unsigned char *)"FLUSH"; for(;;) { double incr, pos; SDL_WaitEvent(&event); switch(event.type) { case SDL_KEYDOWN: switch(event.key.keysym.sym) { case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; goto do_seek; do_seek: if(global_video_state) { pos = get_master_clock(global_video_state); pos += incr; stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr); } break; default: break; } break; case FF_QUIT_EVENT: case SDL_QUIT: is->quit = 1; /* * If the video has finished playing, then both the picture and * audio queues are waiting for more data. Make them stop * waiting and terminate normally. */ SDL_CondSignal(is->audioq.cond); SDL_CondSignal(is->videoq.cond); SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; case FF_REFRESH_EVENT: video_refresh_timer(event.user.data1); break; default: break; } } return 0; }
int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; char filename_bytes[1024]; char *filename = filename_bytes; int i; int size[3] = { 0 }, ret[3] = { 0 }; AVIOContext *f[3] = { NULL }; AVCodecContext *codec = s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s->loop && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (s->use_glob) { #if HAVE_GLOB filename = s->globstate.gl_pathv[s->img_number]; #endif } else { if (av_get_frame_filename(filename_bytes, sizeof(filename_bytes), s->path, s->img_number) < 0 && s->img_number > 1) return AVERROR(EIO); } for (i = 0; i < 3; i++) { if (avio_open2(&f[i], filename, AVIO_FLAG_READ, &s1->interrupt_callback, NULL) < 0) { if (i >= 1) break; av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n", filename); return AVERROR(EIO); } size[i] = avio_size(f[i]); if (!s->split_planes) break; filename[strlen(filename) - 1] = 'U' + i; } if (codec->codec_id == AV_CODEC_ID_NONE) { AVProbeData pd = { 0 }; AVInputFormat *ifmt; uint8_t header[PROBE_BUF_MIN + AVPROBE_PADDING_SIZE]; int ret; int score = 0; ret = avio_read(f[0], header, PROBE_BUF_MIN); if (ret < 0) return ret; memset(header + ret, 0, sizeof(header) - ret); avio_skip(f[0], -ret); pd.buf = header; pd.buf_size = ret; pd.filename = filename; ifmt = av_probe_input_format3(&pd, 1, &score); if (ifmt && ifmt->read_packet == ff_img_read_packet && ifmt->raw_codec_id) codec->codec_id = ifmt->raw_codec_id; } if (codec->codec_id == AV_CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (avio_feof(f[0])) return AVERROR(EIO); if (s->frame_size > 0) { size[0] = s->frame_size; } else if (!s1->streams[0]->parser) { size[0] = avio_size(s1->pb); } else { size[0] = 4096; } } if (av_new_packet(pkt, size[0] + size[1] + size[2]) < 0) return AVERROR(ENOMEM); pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; if (s->ts_from_file) { struct stat img_stat; if (stat(filename, &img_stat)) return AVERROR(EIO); pkt->pts = (int64_t)img_stat.st_mtime; #if HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC if (s->ts_from_file == 2) pkt->pts = 1000000000*pkt->pts + img_stat.st_mtim.tv_nsec; #endif av_add_index_entry(s1->streams[0], s->img_number, pkt->pts, 0, 0, AVINDEX_KEYFRAME); } else if (!s->is_pipe) { pkt->pts = s->pts; } pkt->size = 0; for (i = 0; i < 3; i++) { if (f[i]) { ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if (ret[i] > 0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) { av_free_packet(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; s->pts++; return 0; } }
int OpenAvio(vlc_object_t *object) { stream_t *access = (stream_t*)object; access_sys_t *sys = vlc_obj_malloc(object, sizeof(*sys)); if (!sys) return VLC_ENOMEM; sys->context = NULL; /* We accept: * - avio://full_url * - url (only a subset of available protocols). */ char *url; if (!strcmp(access->psz_name, "avio")) url = strdup(access->psz_location); else if (asprintf(&url, "%s://%s", access->psz_name, access->psz_location) < 0) url = NULL; if (!url) return VLC_ENOMEM; /* */ vlc_init_avformat(object); int ret; AVIOInterruptCB cb = { .callback = UrlInterruptCallback, .opaque = access, }; AVDictionary *options = NULL; char *psz_opts = var_InheritString(access, "avio-options"); if (psz_opts) { vlc_av_get_options(psz_opts, &options); free(psz_opts); } ret = avio_open2(&sys->context, url, AVIO_FLAG_READ, &cb, &options); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options, "", t, AV_DICT_IGNORE_SUFFIX))) msg_Err( access, "unknown option \"%s\"", t->key ); av_dict_free(&options); if (ret < 0) { msg_Err(access, "Failed to open %s: %s", url, vlc_strerror_c(AVUNERROR(ret))); free(url); return VLC_EGENERIC; } free(url); sys->size = avio_size(sys->context); bool seekable; seekable = sys->context->seekable; msg_Dbg(access, "%sseekable, size=%"PRIi64, seekable ? "" : "not ", sys->size); /* */ access->pf_read = Read; access->pf_block = NULL; access->pf_control = Control; access->pf_seek = Seek; access->p_sys = sys; return VLC_SUCCESS; }
VideoStore::VideoStore(const char *filename_in, const char *format_in, AVStream *input_st, AVStream *inpaud_st, int64_t nStartTime){ //see http://stackoverflow.com/questions/17592120/ffmpeg-how-to-copy-codec-video-and-audio-from-mp4-container-to-ts-cont //see https://www.ffmpeg.org/doxygen/trunk/remuxing_8c-example.html#a41 //store inputs in variables local to class filename = filename_in;//FIXME hmm format = format_in;//FIXME hmm keyframeMessage = false; keyframeSkipNumber = 0; char szErr[1024]; Info("Opening video storage stream %s\n", filename); //Init everything we need int ret; av_register_all(); //AVOutputFormat *outfmt = av_guess_format(NULL,filename,NULL); //Allocate the output media context based on the filename of the context avformat_alloc_output_context2(&oc, NULL, NULL, filename); //Couldn't deduce format from filename, trying from format name if(!oc){ avformat_alloc_output_context2(&oc, NULL, format, filename); } //Couldn't deduce format from filename, using MPEG if(!oc){ Error("Couldn't deduce format from filename, using MPEG"); avformat_alloc_output_context2(&oc, NULL, format, filename); } if(!oc){ Fatal("No output context was assigned..."); } fmt = oc->oformat; /* AVCodec *out_vid_codec,*out_aud_codec; out_vid_codec = out_aud_codec = NULL; //create a new video stream based on the incoming stream from the camera and copy the context across if(outfmt){//FIXME what if we failed out_vid_codec = avcodec_find_encoder(outfmt->video_codec);//what exactly are we doing here all we have is something based on the filename which if it is a container doesnt imply a codec? out_aud_codec = avcodec_find_encoder(outfmt->audio_codec);//what exactly are we doing here all we have is something based on the filename which if it is a container doesnt imply a codec? } else Fatal("Unable to guess output format\n");*/ video_st = avformat_new_stream(oc, /*out_vid_codec?out_vid_codec:*/input_st->codec->codec); if(video_st){ //FIXME handle failures ret=avcodec_copy_context(video_st->codec, input_st->codec); if(ret==0){ /*int m_fps=25;//FIXME doesn't say where to get this from? video_st->sample_aspect_ratio.den = input_st->codec->sample_aspect_ratio.den; video_st->sample_aspect_ratio.num = input_st->codec->sample_aspect_ratio.num; video_st->codec->codec_id = input_st->codec->codec_id; video_st->codec->time_base.num = 1; video_st->codec->time_base.den = m_fps*(input_st->codec->ticks_per_frame); video_st->time_base.num = 1; video_st->time_base.den = 1000; video_st->r_frame_rate.num = m_fps; video_st->r_frame_rate.den = 1; video_st->avg_frame_rate.den = 1; video_st->avg_frame_rate.num = m_fps; //video_st->duration = (m_out_end_time - m_out_start_time)*1000;//FIXME what the hell do i put here*/ } else Fatal("Unable to copy video context %s\n", av_make_error_string(szErr,1024,ret)); video_st->codec->codec_tag = 0; if (oc->oformat->flags & AVFMT_GLOBALHEADER) video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else Fatal("Unable to create video out stream\n"); if(inpaud_st){ audio_st = avformat_new_stream(oc, /*out_aud_codec?out_aud_codec:*/inpaud_st->codec->codec); if(audio_st){//FIXME failure? ret=avcodec_copy_context(audio_st->codec, inpaud_st->codec); if(ret==0){ //FIXME failure? /* audio_st->codec->codec_id = inpaud_st->codec->codec_id; audio_st->codec->codec_tag = 0; audio_st->pts = inpaud_st->pts; audio_st->duration = inpaud_st->duration; audio_st->time_base.num = inpaud_st->time_base.num; audio_st->time_base.den = inpaud_st->time_base.den;*/ } else Fatal("Unable to copy audio context %s\n", av_make_error_string(szErr,1024,ret)); audio_st->codec->codec_tag = 0; if (oc->oformat->flags & AVFMT_GLOBALHEADER) audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } else Fatal("Unable to create audio out stream\n"); }else{ audio_st = NULL; } //av_dump_format(oc, 0, filename, 1); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { ret = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,NULL,NULL); if (ret < 0) { Fatal("Could not open '%s': %s\n", filename, av_make_error_string(szErr,1024,ret)); } } /* Write the stream header, if any. */ ret = avformat_write_header(oc, NULL); if (ret < 0) { Fatal("Error occurred when opening output file: %s\n", av_make_error_string(szErr,1024,ret)); } startPts = 0; startDts = 0; filter_in_rescale_delta_last = AV_NOPTS_VALUE; startTime=av_gettime()-nStartTime;//oc->start_time; Info("VideoStore startTime=%d\n",startTime); }