Example #1
0
/**
 * Open an output file and the required encoder.
 * Also set some basic encoder parameters.
 * Some of these parameters are based on the input file's parameters.
 */
static int open_output_file(const char *filename,
                            AVCodecContext *input_codec_context,
                            AVFormatContext **output_format_context,
                            AVCodecContext **output_codec_context)
{
    AVIOContext *output_io_context = NULL;
    AVStream *stream               = NULL;
    AVCodec *output_codec          = NULL;
    int error;

    /** Open the output file to write to it. */
    if ((error = avio_open(&output_io_context, filename, AVIO_FLAG_WRITE)) < 0) {
        fprintf(stderr, "Could not open output file '%s' (error '%s')\n", filename, get_error_text(error));
        return error;
    }

    /** Create a new format context for the output container format. */
    if (!(*output_format_context = avformat_alloc_context())) {
        fprintf(stderr, "Could not allocate output format context\n");
        return AVERROR(ENOMEM);
    }

    /** Associate the output file (pointer) with the container format context. */
    (*output_format_context)->pb = output_io_context;

    /** Guess the desired container format based on the file extension. */
    if (!((*output_format_context)->oformat = av_guess_format(NULL, filename, NULL))) {
        fprintf(stderr, "Could not find output file format\n");
        goto cleanup;
    }

    av_strlcpy((*output_format_context)->filename, filename, sizeof((*output_format_context)->filename));

    /** Find the encoder to be used by its name. */
    if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
        fprintf(stderr, "Could not find an AAC encoder.\n");
        goto cleanup;
    }

    /** Create a new audio stream in the output file container. */
    if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
        fprintf(stderr, "Could not create new stream\n");
        error = AVERROR(ENOMEM);
        goto cleanup;
    }

    /** Save the encoder context for easier access later. */
    *output_codec_context = stream->codec;

    /**
     * Set the basic encoder parameters.
     * The input file's sample rate is used to avoid a sample rate conversion.
     */
    (*output_codec_context)->channels       = 1;
    (*output_codec_context)->channel_layout = av_get_default_channel_layout(1);
    (*output_codec_context)->sample_rate    = input_codec_context->sample_rate;
    (*output_codec_context)->sample_fmt     = output_codec->sample_fmts[0];
    (*output_codec_context)->bit_rate       = output_bit_rate;

    /** Set the sample rate for the container. */
    stream->time_base.den = input_codec_context->sample_rate;
    stream->time_base.num = 1;

    /**
     * Some container formats (like MP4) require global headers to be present
     * Mark the encoder so that it behaves accordingly.
     */
    if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
        (*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    /** Open the encoder for the audio stream to use it later. */
    if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
        fprintf(stderr, "Could not open output codec (error '%s')\n", get_error_text(error));
        goto cleanup;
    }

    return 0;

cleanup:
    avio_closep(&(*output_format_context)->pb);
    avformat_free_context(*output_format_context);
    *output_format_context = NULL;
    return error < 0 ? error : AVERROR_EXIT;
}
Example #2
0
NET_API_STATUS
RetrieveList(LPTSTR  wcMachineName,
             LPTSTR  wcTestedTransport,
             LPVOID  *ppvList,
             LPDWORD pdwEntriesInList,
             LPDWORD pdwTotalEntries,
             DWORD   Flag,
             LPTSTR  wcDomain,
             LPDWORD pdwHandle,
             BOOL    ErrMsg
             )
{
DWORD dwStartTime;
DWORD dwEndTime;
TCHAR wcTmpName[CNSLASHLEN+1];
NET_API_STATUS Status;

    if(_wcsnicmp(wcMachineName, L"\\\\", 2) != 0) {
       wcscpy(wcTmpName, L"\\\\");
       wcscat(wcTmpName, wcMachineName);

    } else {
       wcscpy(wcTmpName, wcMachineName);
    }

    dwStartTime = GetTickCount();
    Status = RxNetServerEnum(wcTmpName,
                             wcTestedTransport,
                             101,
                             (LPBYTE *)ppvList,
                             0xffffffff,
                             pdwEntriesInList,
                             pdwTotalEntries,
                             Flag,
                             wcDomain,
                             NULL
                             );

    dwEndTime = GetTickCount();

    if(ErrMsg){
       if (Status != NERR_Success) {
           sprintf(PrintBuf,"\nERROR[ER%ld]:Unable to retrieve List from %s ", ++ERRCOUNT, UnicodeToPrintfString(wcTmpName));
           PrintString(TOALL, PrintBuf);
           sprintf(PrintBuf,"on transport %s with Flag %lx. \nError: %s (%ld milliseconds)\n", UnicodeToPrintfString(wcTestedTransport), Flag, get_error_text(Status), dwEndTime - dwStartTime);
           PrintString(TOALL, PrintBuf);

//        if (Status != ERROR_MORE_DATA) {
//            exit(1);
//        }
       } else {
           sprintf(PrintBuf,"\nINFO:Retrieved List from %s ", UnicodeToPrintfString(wcTmpName));
           PrintString(TOSCREENANDLOG, PrintBuf);
           sprintf(PrintBuf,"on transport %s with Flag %lx: (%ld milliseconds).", UnicodeToPrintfString(wcTestedTransport), Flag, dwEndTime - dwStartTime);
           PrintString(TOSCREENANDLOG, PrintBuf);
       }
    }

    return Status;
}
Example #3
0
void spawner_new_c::json_report(runner *runner_instance,
                                rapidjson::PrettyWriter<rapidjson::StringBuffer, rapidjson::UTF16<> > &writer) {
    writer.StartObject();

    //for {
    report_class runner_report = runner_instance->get_report();
    options_class runner_options = runner_instance->get_options();
    //temporary
#define rapidjson_write(x) (writer.String(a2w(x)))
    rapidjson_write("Application");
    rapidjson_write(runner_report.application_name.c_str());
    rapidjson_write("Arguments");
    writer.StartArray();
    for (size_t i = 0; i < runner_options.get_arguments_count(); ++i) {
        rapidjson_write(runner_options.get_argument(i).c_str());
    }
    writer.EndArray();

    rapidjson_write("Limit");
    writer.StartObject();

    restrictions_class runner_restrictions = ((secure_runner*)runner_instance)->get_restrictions();
    struct {
        const char *field;
        unit_t unit;
        degrees_enum degree;
        restriction_kind_t restriction;
    } restriction_items[] = {
        { "Time", unit_time_second, degree_micro, restriction_processor_time_limit },
        { "WallClockTime", unit_time_second, degree_micro, restriction_user_time_limit },
        { "Memory", unit_memory_byte, degree_default, restriction_memory_limit },
        { "SecurityLevel", unit_no_unit, degree_default, restriction_security_limit },
        { "IOBytes", unit_memory_byte, degree_default, restriction_write_limit },
        { "IdlenessTime", unit_time_second, degree_micro, restriction_idle_time_limit },
        { "IdlenessProcessorLoad", unit_no_unit, degree_centi, restriction_load_ratio },
        { NULL, unit_no_unit, degree_default, restriction_max },
    };
    for (int i = 0; restriction_items[i].field; ++i) {
        if (runner_restrictions[restriction_items[i].restriction] == restriction_no_limit) {
            continue;
        }
        rapidjson_write(restriction_items[i].field);
        if (restriction_items[i].degree == degree_default) {
            writer.Uint64(runner_restrictions[restriction_items[i].restriction]);
        }
        else {
            writer.Double((double)convert(
                              value_t(restriction_items[i].unit, restriction_items[i].degree),
                              value_t(restriction_items[i].unit),
                              (long double)runner_restrictions[restriction_items[i].restriction]
                          ));
        }
    }
    writer.EndObject();

    rapidjson_write("Options");
    writer.StartObject();
    rapidjson_write("SearchInPath");
    writer.Bool(runner_options.use_cmd);
    writer.EndObject();

    rapidjson_write("Result");
    writer.StartObject();
    struct {
        const char *field;
        uint64_t value;
        unit_t unit;
        degrees_enum degree;
    } result_items[] = {
        { "Time", runner_report.processor_time, unit_time_second, degree_micro },
        { "WallClockTime", runner_report.user_time, unit_time_second, degree_micro },
        { "Memory", runner_report.peak_memory_used, unit_memory_byte, degree_default },
        { "BytesWritten", runner_report.write_transfer_count, unit_memory_byte, degree_default },
        { "KernelTime", runner_report.kernel_time, unit_time_second, degree_micro },
        { "ProcessorLoad", (uint64_t)(runner_report.load_ratio * 100), unit_no_unit, degree_centi },
        { NULL, 0, unit_no_unit, degree_default },
    };
    for (int i = 0; result_items[i].field; ++i) {
        rapidjson_write(result_items[i].field);
        if (result_items[i].degree == degree_default) {
            writer.Uint64(result_items[i].value);
        }
        else {
            writer.Double((double)convert(
                              value_t(result_items[i].unit, result_items[i].degree),
                              value_t(result_items[i].unit),
                              (long double)result_items[i].value
                          ));
        }
    }
    rapidjson_write("WorkingDirectory");
    rapidjson_write(runner_report.working_directory.c_str());
    writer.EndObject();

    rapidjson_write("StdOut");
    writer.StartArray();
    for (uint i = 0; i < runner_options.stdoutput.size(); ++i) {
        rapidjson_write(runner_options.stdoutput[i].c_str());
    }
    writer.EndArray();
    rapidjson_write("StdErr");
    writer.StartArray();
    for (uint i = 0; i < runner_options.stderror.size(); ++i) {
        rapidjson_write(runner_options.stderror[i].c_str());
    }
    writer.EndArray();
    rapidjson_write("StdIn");
    writer.StartArray();
    for (uint i = 0; i < runner_options.stdinput.size(); ++i) {
        rapidjson_write(runner_options.stdinput[i].c_str());
    }
    writer.EndArray();

    rapidjson_write("CreateProcessMethod");
    rapidjson_write(options.login == "" ? "CreateProcess" : "WithLogon");
    rapidjson_write("UserName");
    writer.String(runner_report.login.c_str());
    rapidjson_write("TerminateReason");
    rapidjson_write(get_terminate_reason(runner_report.terminate_reason).c_str());
    rapidjson_write("ExitCode");
    writer.Uint(runner_report.exit_code);
    rapidjson_write("ExitStatus");
    rapidjson_write(ExtractExitStatus(runner_report).c_str());
    rapidjson_write("SpawnerError");
    writer.StartArray();
    std::vector<std::string> errors;
    errors.push_back(get_error_text());
    for (auto& error : errors) {
        rapidjson_write(error.c_str());
    }
    writer.EndArray();
    writer.EndObject();
}
Example #4
0
int load_file(char* path, file_t* file)
{
	int error;

	file->path = (char*)malloc(strlen(path)+1);
	strcpy(file->path,path);

	file->format_context = NULL;

	if(avformat_open_input(&(file->format_context), file->path, NULL, NULL) < 0)
	{
		free(file->path);
		return -1;
	}

	if(avformat_find_stream_info(file->format_context, NULL) < 0)
	{
		free(file->path);
		avformat_close_input(&file->format_context);
		return -1;
	}

	int stream_no = av_find_best_stream(file->format_context,AVMEDIA_TYPE_AUDIO,-1,-1,NULL,0);

	if(stream_no < 0)
	{
		free(file->path);
		avformat_close_input(&file->format_context);
		return -1;
	}

	file->stream = file->format_context->streams[stream_no];

	if(!(file->codec = avcodec_find_decoder(file->stream->codec->codec_id)))
	{
		fprintf(stderr, "Could not find input codec\n");
		free(file->path);
		avformat_close_input(&file->format_context);
		return -1;
	}

	if((error = avcodec_open2(file->stream->codec, file->codec, NULL)) < 0)
	{
		fprintf(stderr, "Could not open input codec (error '%s')\n",
		        get_error_text(error));
		free(file->path);
		avformat_close_input(&file->format_context);
		return -1;
	}

	file->codec_context = file->stream->codec;

	file->resample_context = avresample_alloc_context();
	av_opt_set_int(file->resample_context, "in_channel_layout",  file->codec_context->channel_layout,0);
	av_opt_set_int(file->resample_context, "out_channel_layout", file->codec_context->channel_layout,0);
	av_opt_set_int(file->resample_context, "in_sample_rate",     file->codec_context->sample_rate,   0);
	av_opt_set_int(file->resample_context, "out_sample_rate",    file->codec_context->sample_rate,   0);
	av_opt_set_int(file->resample_context, "in_sample_fmt",      file->codec_context->sample_fmt,    0);
	av_opt_set_int(file->resample_context, "out_sample_fmt",     AV_SAMPLE_FMT_FLTP,   0);

	avresample_open(file->resample_context);

	return 0;
}
static ngx_int_t
ngx_http_tnt_send_reply(ngx_http_request_t *r,
                        ngx_http_upstream_t *u,
                        ngx_http_tnt_ctx_t *ctx)
{
    tp_transcode_t          tc;
    ngx_int_t               rc;
    ngx_http_tnt_loc_conf_t *tlcf;
    ngx_buf_t               *output;
    size_t                  output_size;


    tlcf = ngx_http_get_module_loc_conf(r, ngx_http_tnt_module);

    output_size =
        (ctx->tp_cache->end - ctx->tp_cache->start + ngx_http_tnt_overhead())
        * tlcf->out_multiplier;
    output = ngx_http_tnt_create_mem_buf(r, u, output_size);
    if (output == NULL) {
        return NGX_ERROR;
    }

    if (ctx->batch_size > 0
        && ctx->rest_batch_size == ctx->batch_size)
    {
        *output->pos = '[';
        ++output->pos;
    }

    tp_transcode_init_args_t args = {
        .output = (char *)output->pos,
        .output_size = output->end - output->pos,
        .method = NULL, .method_len = 0,
        .codec = TP_REPLY_TO_JSON,
        .mf = NULL
    };
    rc = tp_transcode_init(&tc, &args);
    if (rc == TP_TRANSCODE_ERROR) {
        crit("[BUG] failed to call tp_transcode_init(output)");
        return NGX_ERROR;
    }

    rc = tp_transcode(&tc, (char *)ctx->tp_cache->start,
                      ctx->tp_cache->end - ctx->tp_cache->start);
    if (rc == TP_TRANSCODE_OK) {

        size_t complete_msg_size = 0;
        rc = tp_transcode_complete(&tc, &complete_msg_size);
        if (rc == TP_TRANSCODE_ERROR) {

            crit("[BUG] failed to complete output transcoding");

            ngx_pfree(r->pool, output);

            const ngx_http_tnt_error_t *e = get_error_text(UNKNOWN_PARSE_ERROR);
            output = ngx_http_tnt_set_err(r, e->code, e->msg.data, e->msg.len);
            if (output == NULL) {
                goto error_exit;
            }

            goto done;
        }

        output->last = output->pos + complete_msg_size;

    } else if (rc == TP_TRANSCODE_ERROR) {

        crit("[BUG] failed to transcode output, err: '%s'", tc.errmsg);

        ngx_pfree(r->pool, output);

        output = ngx_http_tnt_set_err(r,
                                      tc.errcode,
                                      (u_char *)tc.errmsg,
                                      ngx_strlen(tc.errmsg));
        if (output == NULL) {
            goto error_exit;
        }
    }

done:
    tp_transcode_free(&tc);

    if (ctx->batch_size > 0) {

        if (ctx->rest_batch_size == 1)
        {
            *output->last = ']';
            ++output->last;
        }
        else if (ctx->rest_batch_size <= ctx->batch_size)
        {
            *output->last = ',';
            ++output->last;
        }
    }

    return ngx_http_tnt_output(r, u, output);

error_exit:
    tp_transcode_free(&tc);
    return NGX_ERROR;
}


static ngx_int_t
ngx_http_tnt_filter_reply(ngx_http_request_t *r,
                          ngx_http_upstream_t *u,
                          ngx_buf_t *b)
{
    ngx_http_tnt_ctx_t *ctx = ngx_http_get_module_ctx(r, ngx_http_tnt_module);
    ssize_t            bytes = b->last - b->pos;

    dd("filter_reply -> recv bytes: %i, rest: %i", (int)bytes, (int)ctx->rest);

    if (ctx->state == READ_PAYLOAD) {

        ssize_t payload_rest = ngx_min(ctx->payload.e - ctx->payload.p, bytes);
        if (payload_rest > 0) {
            ctx->payload.p = ngx_copy(ctx->payload.p, b->pos, payload_rest);
            bytes -= payload_rest;
            b->pos += payload_rest;
            payload_rest = ctx->payload.e - ctx->payload.p;

            dd("filter_reply -> payload rest:%i", (int)payload_rest);
        }

        if (payload_rest == 0) {
            ctx->payload_size = tp_read_payload((char *)&ctx->payload.mem[0],
                                                (char *)ctx->payload.e);
            if (ctx->payload_size <= 0) {
                crit("[BUG] tp_read_payload failed, ret:%i",
                        (int)ctx->payload_size);
                return NGX_ERROR;
            }

            ctx->rest = ctx->payload_size - 5 /* - header size */;

            dd("filter_reply -> got header payload:%i, rest:%i",
                    (int)ctx->payload_size,
                    (int)ctx->rest);

            ctx->tp_cache = ngx_create_temp_buf(r->pool, ctx->payload_size);
            if (ctx->tp_cache == NULL) {
                return NGX_ERROR;
            }

            ctx->tp_cache->pos = ctx->tp_cache->start;
            ctx->tp_cache->memory = 1;


            ctx->tp_cache->pos = ngx_copy(ctx->tp_cache->pos,
                                          &ctx->payload.mem[0],
                                          sizeof(ctx->payload.mem) - 1);

            ctx->payload.p = &ctx->payload.mem[0];

            ctx->state = READ_BODY;
        } else {
            return NGX_OK;
        }
    }

    ngx_int_t rc = NGX_OK;
    if (ctx->state == READ_BODY) {

        ssize_t rest = ctx->rest - bytes, read_on = bytes;
        if (rest < 0) {
            rest *= -1;
            read_on = bytes - rest;
            ctx->rest = 0;
            ctx->state = SEND_REPLY;
            rc = NGX_AGAIN;
        } else if (rest == 0) {
            ctx->state = SEND_REPLY;
            ctx->rest = 0;
        } else {
            ctx->rest -= bytes;
        }

        ctx->tp_cache->pos = ngx_copy(ctx->tp_cache->pos, b->pos, read_on);
        b->pos += read_on;

        dd("filter_reply -> read_on:%i, rest:%i, cache rest:%i, buf size:%i",
                (int)read_on,
                (int)ctx->rest,
                (int)(ctx->tp_cache->end - ctx->tp_cache->pos),
                (int)(b->last - b->pos));
    }

    if (ctx->state == SEND_REPLY) {

        rc = ngx_http_tnt_send_reply(r, u, ctx);

        ctx->state = READ_PAYLOAD;
        ctx->rest = ctx->payload_size = 0;

        --ctx->rest_batch_size;

        if (ctx->rest_batch_size <= 0) {
            u->length = 0;
            ctx->rest_batch_size = 0;
            ctx->batch_size = 0;
        }

        ngx_pfree(r->pool, ctx->tp_cache);
        ctx->tp_cache = NULL;

        if (b->last - b->pos > 0) {
            rc = NGX_AGAIN;
        }
    }

    return rc;
}
    int AudioDecoder::initDecoder(AVCodecContext* context, AVCodec* dec_codec)
    {
        ELOG_DEBUG("initDecoder started");

        codec_ = dec_codec;
        input_codec_context = context;  // ok?  ok

       if (avcodec_open2(input_codec_context, codec_, NULL) < 0) {
            ELOG_DEBUG("AudioDecoder initDecoder Error open2 audio decoder");
            return 0;
        }

        ELOG_DEBUG("input sample_fmts[0] is %s", av_get_sample_fmt_name(codec_->sample_fmts[0]));
        ELOG_DEBUG("input sample_fmt is %s", av_get_sample_fmt_name(input_codec_context->sample_fmt));
        ELOG_DEBUG("input frame size is %d, bitrate=%d", input_codec_context->frame_size, input_codec_context->bit_rate);

        // Init output encoder as well.
        AVCodec *output_codec          = NULL;
        int error;

        if (!(output_codec = avcodec_find_encoder(OUTPUT_CODEC_ID))) {
            ELOG_DEBUG( "Could not find the encoder.");
            return 0;
        }
        output_codec_context = avcodec_alloc_context3(output_codec);
        if (!output_codec_context) 
        {
            ELOG_DEBUG( "Could not allocate an encoding context");
            return 0;
        }

        /**
         * Set the basic encoder parameters.
         * The input file's sample rate is used to avoid a sample rate conversion.
         */
        output_codec_context->channels       = OUTPUT_CHANNELS;
        output_codec_context->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
        output_codec_context->sample_rate    = input_codec_context->sample_rate;
        output_codec_context->sample_fmt     = output_codec->sample_fmts[0]; //u8
        output_codec_context->bit_rate       = 510000;
        
        
        /** Allow the use of the experimental feature */
        output_codec_context->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

        /** Open the encoder for the audio stream to use it later. */
        if ((error = avcodec_open2(output_codec_context, output_codec, NULL)) < 0) {
            ELOG_DEBUG("Could not open output codec %s", get_error_text(error));
            return 0;
        }
        
        ELOG_DEBUG("output sample_fmts[0] is %s", av_get_sample_fmt_name(output_codec->sample_fmts[0]));
        ELOG_DEBUG("output sample_fmt is %s", av_get_sample_fmt_name(output_codec_context->sample_fmt));
        ELOG_DEBUG("output frame size is %d", output_codec_context->frame_size);
        
        output_codec_context->frame_size = 960; //20ms. actually no need, as it already is
        /** Initialize the resampler to be able to convert audio sample formats. */
        if (init_resampler(input_codec_context, output_codec_context))
        {
            ELOG_DEBUG(" init resampleer failed !!");
            return 0;
        }

        init_fifo(&fifo);

        ELOG_DEBUG("initDecoder end");

        return 1;
    }
Example #7
0
static SCM decode_video(struct ffmpeg_t *self, AVPacket *pkt, AVFrame *frame)
{
  int got_frame;
  int len = avcodec_decode_video2(self->video_codec_ctx, frame, &got_frame, pkt);
  if (len < 0)
    scm_misc_error("ffmpeg-decode-audio/video", "Error decoding frame: ~a", scm_list_1(get_error_text(len)));
  consume_packet_data(pkt, pkt->size);
  return got_frame ? list_timestamped_video(self, frame) : SCM_BOOL_F;
}
Example #8
0
SCM make_ffmpeg_output(SCM scm_file_name,
                       SCM scm_format_name,
                       SCM scm_video_parameters,
                       SCM scm_have_video,
                       SCM scm_audio_parameters,
                       SCM scm_have_audio,
                       SCM scm_debug)
{
  SCM retval;
  struct ffmpeg_t *self;
  scm_dynwind_begin(0);
  const char *file_name = scm_to_locale_string(scm_file_name);
  scm_dynwind_free(file_name);
  self = (struct ffmpeg_t *)scm_gc_calloc(sizeof(struct ffmpeg_t), "ffmpeg");
  self->video_stream_idx = -1;
  self->audio_stream_idx = -1;
  SCM_NEWSMOB(retval, ffmpeg_tag, self);

  int err;
  const char *format_name = NULL;
  if (!scm_is_false(scm_format_name)) {
    format_name = scm_to_locale_string(scm_symbol_to_string(scm_format_name));
    scm_dynwind_free(format_name);
  };
#ifdef HAVE_AVFORMAT_ALLOC_OUTPUT_CONTEXT2
  err = avformat_alloc_output_context2(&self->fmt_ctx, NULL, format_name, file_name);
  if (!self->fmt_ctx) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-output", "Error initializing output format for file '~a': ~a",
                   scm_list_2(scm_file_name, get_error_text(err)));
  };
#else
  AVOutputFormat *format;
  if (format_name)
    format = av_guess_format(format_name, NULL, NULL);
  else
    format = av_guess_format(NULL, file_name, NULL);
  if (!format) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-output", "Unable to determine file format for file '~a'",
                   scm_list_1(scm_file_name));
  };
  self->fmt_ctx = avformat_alloc_context();
  if (!self->fmt_ctx) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-output", "Error initializing output format for file '~a'",
                   scm_list_1(scm_file_name));
  };
  self->fmt_ctx->oformat = format;
  strncpy(self->fmt_ctx->filename, file_name, sizeof(self->fmt_ctx->filename));
#endif

  char have_video = scm_is_true(scm_have_video);
  if (have_video) {
    // Open codec and video stream
    enum AVCodecID video_codec_id = self->fmt_ctx->oformat->video_codec;
    AVCodec *video_encoder = find_encoder(retval, video_codec_id, "video");
    AVStream *video_stream = open_output_stream(retval, video_encoder, &self->video_stream_idx, "video", scm_file_name);

    // Get video parameters
    SCM scm_shape          = scm_car(scm_video_parameters);
    SCM scm_frame_rate     = scm_cadr(scm_video_parameters);
    SCM scm_video_bit_rate = scm_caddr(scm_video_parameters);
    SCM scm_aspect_ratio   = scm_cadddr(scm_video_parameters);

    // Configure the output video codec
    self->video_codec_ctx =
      configure_output_video_codec(video_stream, video_codec_id, scm_video_bit_rate, scm_shape, scm_frame_rate, scm_aspect_ratio);

    // Some formats want stream headers to be separate.
    if (self->fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        self->video_codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // Open output video codec
    open_codec(retval, self->video_codec_ctx, video_encoder, "video", scm_file_name);

    // Allocate frame
    self->video_target_frame = allocate_output_video_frame(retval, self->video_codec_ctx);
  };

  char have_audio = scm_is_true(scm_have_audio);
  if (have_audio) {
    // Open audio codec and stream
    enum AVCodecID audio_codec_id = self->fmt_ctx->oformat->audio_codec;
    AVCodec *audio_encoder = find_encoder(retval, audio_codec_id, "audio");
    AVStream *audio_stream = open_output_stream(retval, audio_encoder, &self->audio_stream_idx, "audio", scm_file_name);

    // Get audio parameters
    SCM scm_select_rate    = scm_car(scm_audio_parameters);
    SCM scm_channels       = scm_cadr(scm_audio_parameters);
    SCM scm_audio_bit_rate = scm_caddr(scm_audio_parameters);
    SCM scm_select_format  = scm_cadddr(scm_audio_parameters);

    // Configure the output audio codec
    self->audio_codec_ctx =
      configure_output_audio_codec(retval, audio_stream, audio_codec_id,
                                   scm_select_rate, scm_channels, scm_audio_bit_rate, scm_select_format);

    // Some formats want stream headers to be separate.
    if (self->fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        self->audio_codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // Open output audio codec
    open_codec(retval, self->audio_codec_ctx, audio_encoder, "audio", scm_file_name);

    // Allocate audio frame
    self->audio_target_frame =
      allocate_output_audio_frame(retval, self->audio_codec_ctx, self->audio_codec_ctx->sample_fmt);
    self->audio_packed_frame =
      allocate_output_audio_frame(retval, self->audio_codec_ctx, av_get_packed_sample_fmt(self->audio_codec_ctx->sample_fmt));

    // Initialise audio buffer
    ringbuffer_init(&self->audio_buffer, 1024);
  };

  if (scm_is_true(scm_debug)) av_dump_format(self->fmt_ctx, 0, file_name, 1);

  // Open the output file if needed
  if (!(self->fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
    int err = avio_open(&self->fmt_ctx->pb, file_name, AVIO_FLAG_WRITE);
    if (err < 0) {
      ffmpeg_destroy(retval);
      scm_misc_error("make-ffmpeg-output", "Could not open '~a': ~a",
                     scm_list_2(scm_file_name, get_error_text(err)));
    }
    self->output_file = 1;
  }

  // Write video file header
  err = avformat_write_header(self->fmt_ctx, NULL);
  if (err < 0) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-output", "Error writing header of video '~a': ~a",
                   scm_list_2(scm_file_name, get_error_text(err)));
  };
  self->header_written = 1;

  scm_dynwind_end();
  return retval;
}
Example #9
0
SCM make_ffmpeg_input(SCM scm_file_name, SCM scm_debug)
{
  SCM retval;
  struct ffmpeg_t *self;
  scm_dynwind_begin(0);
  const char *file_name = scm_to_locale_string(scm_file_name);
  scm_dynwind_free(file_name);
  self = (struct ffmpeg_t *)scm_gc_calloc(sizeof(struct ffmpeg_t), "ffmpeg");
  self->video_stream_idx = -1;
  self->audio_stream_idx = -1;
  SCM_NEWSMOB(retval, ffmpeg_tag, self);

  int err;
  err = avformat_open_input(&self->fmt_ctx, file_name, NULL, NULL);
  if (err < 0) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-input", "Error opening file '~a': ~a", scm_list_2(scm_file_name, get_error_text(err)));
  };

  err = avformat_find_stream_info(self->fmt_ctx, NULL);
  if (err < 0) {
    ffmpeg_destroy(retval);
    scm_misc_error("make-ffmpeg-input", "No stream information in file '~a': ~a", scm_list_2(scm_file_name, get_error_text(err)));
  };

  // TODO: only open desired streams
  // Open video stream
  self->video_stream_idx = av_find_best_stream(self->fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
  if (self->video_stream_idx >= 0)
    self->video_codec_ctx = open_decoder(retval, scm_file_name, video_stream(self), "video");

  // Open audio stream
  self->audio_stream_idx = av_find_best_stream(self->fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
  if (self->audio_stream_idx >= 0)
    self->audio_codec_ctx = open_decoder(retval, scm_file_name, audio_stream(self), "audio");

  // Print debug information
  if (scm_is_true(scm_debug)) av_dump_format(self->fmt_ctx, 0, file_name, 0);

  // Allocate input frames
  self->video_target_frame = allocate_frame(retval);
  self->audio_target_frame = allocate_frame(retval);

  // Initialise data packet
  av_init_packet(&self->pkt);
  self->pkt.data = NULL;
  self->pkt.size = 0;

  scm_dynwind_end();
  return retval;
}