Exemplo n.º 1
0
FFMS_CodecID MatroskaToFFCodecID(char *Codec, void *CodecPrivate, unsigned int FourCC, unsigned int BitsPerSample) {
	/* Look up native codecs */
	for (int i = 0; mkv_codec_tags[i].id != FFMS_ID(NONE); i++){
		if (!strncmp(mkv_codec_tags[i].str, Codec, strlen(mkv_codec_tags[i].str))) {
			// Uncompressed and exotic format fixup
			// This list is incomplete
			FFMS_CodecID CID = mkv_codec_tags[i].id;
			switch (CID) {
				case FFMS_ID(PCM_S16LE):
					switch (BitsPerSample) {
						case 8: CID = FFMS_ID(PCM_S8); break;
						case 16: CID = FFMS_ID(PCM_S16LE); break;
						case 24: CID = FFMS_ID(PCM_S24LE); break;
						case 32: CID = FFMS_ID(PCM_S32LE); break;
					}
					break;
				case FFMS_ID(PCM_S16BE):
					switch (BitsPerSample) {
						case 8: CID = FFMS_ID(PCM_S8); break;
						case 16: CID = FFMS_ID(PCM_S16BE); break;
						case 24: CID = FFMS_ID(PCM_S24BE); break;
						case 32: CID = FFMS_ID(PCM_S32BE); break;
					}
					break;
				default:
					break;
			}

			return CID;
		}
	}

	/* Video codecs for "avi in mkv" mode */
	const AVCodecTag *const tags[] = { avformat_get_riff_video_tags(), 0 };

	if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
		FFMS_BITMAPINFOHEADER *b = reinterpret_cast<FFMS_BITMAPINFOHEADER *>(CodecPrivate);
		return av_codec_get_id(tags, b->biCompression);
	}

	if (!strcmp(Codec, "V_FOURCC")) {
		return av_codec_get_id(tags, FourCC);
	}

	// FIXME
	/* Audio codecs for "acm in mkv" mode */
	//#include "Mmreg.h"
	//((WAVEFORMATEX *)TI->CodecPrivate)->wFormatTag

	/* Fixup for uncompressed video formats */

	/* Fixup for uncompressed audio formats */

	return FFMS_ID(NONE);
}
Exemplo n.º 2
0
static const char *lookup_tag(int type, uint32_t tag)
{
    for (int n = 0; mp_codec_tags[n].codec; n++) {
        if (mp_codec_tags[n].tag == tag)
            return mp_codec_tags[n].codec;
    }

    const struct AVCodecTag *av_tags[3] = {0};
    switch (type) {
    case STREAM_VIDEO: {
        av_tags[0] = avformat_get_riff_video_tags();
#if HAVE_QT_TAGS
        av_tags[1] = avformat_get_mov_video_tags();
#endif
        break;
    }
    case STREAM_AUDIO: {
        av_tags[0] = avformat_get_riff_audio_tags();
#if HAVE_QT_TAGS
        av_tags[1] = avformat_get_mov_audio_tags();
#endif
        break;
    }
    }

    int id = av_codec_get_id(av_tags, tag);
    return id == AV_CODEC_ID_NONE ? NULL : mp_codec_from_av_codec_id(id);
}
Exemplo n.º 3
0
static AVStream* copy_stream(AVFormatContext *context, AVStream *source_stream) {
    AVStream *output_stream = avformat_new_stream(context, source_stream->codec->codec);
    
    AVCodecContext *output_context = output_stream->codec, *source_context = source_stream->codec;
    
    output_context->codec_id   = source_context->codec_id;
    output_context->codec_type = source_context->codec_type;
    
    if (!output_context->codec_tag) {
        if (!context->oformat->codec_tag || 
            av_codec_get_id(context->oformat->codec_tag, source_context->codec_tag) ==  output_context->codec_id ||
            av_codec_get_tag(context->oformat->codec_tag, source_context->codec_id) <=0) {
            output_context->codec_tag = source_context->codec_tag;
        }
    }
    
    
    output_context->bit_rate = source_context->bit_rate;
    output_context->rc_max_rate = source_context->rc_max_rate;
    output_context->rc_buffer_size = source_context->rc_buffer_size;
    output_context->field_order = source_context->field_order;
    output_context->extradata = av_mallocz(source_context->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
    
    if (!output_context->extradata) {
        exit(-1);
    }
    
    memcpy(output_context->extradata, source_context->extradata, source_context->extradata_size);
    
    output_context->extradata_size = source_context->extradata_size;
    
    output_context->time_base = source_context->time_base;
    
    switch (output_context->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            output_context->channel_layout = source_context->channel_layout;
            output_context->sample_rate = source_context->sample_rate;
            output_context->channels = source_context->channels;
            output_context->frame_size = source_context->frame_size;
            output_context->audio_service_type = source_context->audio_service_type;
            output_context->block_align = source_context->block_align;
            break;
            
        case AVMEDIA_TYPE_VIDEO:    
            output_context->pix_fmt = source_context->pix_fmt;
            output_context->width = source_context->width;
            output_context->height = source_context->height;
            output_context->has_b_frames = source_context->has_b_frames;
            output_context->sample_aspect_ratio = source_context->sample_aspect_ratio;
            output_stream->sample_aspect_ratio  = output_context->sample_aspect_ratio;
        default:
            break;
    }
    
    return output_stream;
}
Exemplo n.º 4
0
static const char *lookup_tag(const struct mp_codec_tag *mp_table,
                              const struct AVCodecTag *av_table,
                              uint32_t tag)
{
    for (int n = 0; mp_table[n].codec; n++) {
        if (mp_table[n].tag == tag)
            return mp_table[n].codec;
    }
    const struct AVCodecTag *av_tags[] = {av_table, NULL};
    int id = av_codec_get_id(av_tags, tag);
    return id == AV_CODEC_ID_NONE ? NULL : mp_codec_from_av_codec_id(id);
}
Exemplo n.º 5
0
bool FFmpegBaseWriter::open(const char* filename, int fourcc, double fps, int width, int height,
          bool iscolor, bool need_encode) {
  static const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 };
  enum AVCodecID id = av_codec_get_id(table, fourcc);

  media_stream_params_t params;
  params.height_video = height;
  params.width_video = width;
  params.video_fps = fps;
  params.codec_id = id;

  stream_ = alloc_video_stream(filename, &params, need_encode);
  channels_ = iscolor ? 3 : 1;

  return true;
}
Exemplo n.º 6
0
Arquivo: dshow.c Projeto: 3688/FFmpeg
static int
dshow_add_device(AVFormatContext *avctx,
                 enum dshowDeviceType devtype)
{
    struct dshow_ctx *ctx = avctx->priv_data;
    AM_MEDIA_TYPE type;
    AVCodecParameters *par;
    AVStream *st;
    int ret = AVERROR(EIO);

    st = avformat_new_stream(avctx, NULL);
    if (!st) {
        ret = AVERROR(ENOMEM);
        goto error;
    }
    st->id = devtype;

    ctx->capture_filter[devtype]->stream_index = st->index;

    libAVPin_ConnectionMediaType(ctx->capture_pin[devtype], &type);

    par = st->codecpar;
    if (devtype == VideoDevice) {
        BITMAPINFOHEADER *bih = NULL;
        AVRational time_base;

        if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo)) {
            VIDEOINFOHEADER *v = (void *) type.pbFormat;
            time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
            bih = &v->bmiHeader;
        } else if (IsEqualGUID(&type.formattype, &FORMAT_VideoInfo2)) {
            VIDEOINFOHEADER2 *v = (void *) type.pbFormat;
            time_base = (AVRational) { v->AvgTimePerFrame, 10000000 };
            bih = &v->bmiHeader;
        }
        if (!bih) {
            av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
            goto error;
        }

        st->avg_frame_rate = av_inv_q(time_base);
        st->r_frame_rate = av_inv_q(time_base);

        par->codec_type = AVMEDIA_TYPE_VIDEO;
        par->width      = bih->biWidth;
        par->height     = bih->biHeight;
        par->codec_tag  = bih->biCompression;
        par->format     = dshow_pixfmt(bih->biCompression, bih->biBitCount);
        if (bih->biCompression == MKTAG('H', 'D', 'Y', 'C')) {
            av_log(avctx, AV_LOG_DEBUG, "attempt to use full range for HDYC...\n");
            par->color_range = AVCOL_RANGE_MPEG; // just in case it needs this...
        }
        if (par->format == AV_PIX_FMT_NONE) {
            const AVCodecTag *const tags[] = { avformat_get_riff_video_tags(), NULL };
            par->codec_id = av_codec_get_id(tags, bih->biCompression);
            if (par->codec_id == AV_CODEC_ID_NONE) {
                av_log(avctx, AV_LOG_ERROR, "Unknown compression type. "
                                 "Please report type 0x%X.\n", (int) bih->biCompression);
                return AVERROR_PATCHWELCOME;
            }
            par->bits_per_coded_sample = bih->biBitCount;
        } else {
            par->codec_id = AV_CODEC_ID_RAWVIDEO;
            if (bih->biCompression == BI_RGB || bih->biCompression == BI_BITFIELDS) {
                par->bits_per_coded_sample = bih->biBitCount;
                par->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE);
                if (par->extradata) {
                    par->extradata_size = 9;
                    memcpy(par->extradata, "BottomUp", 9);
                }
            }
        }
    } else {
        WAVEFORMATEX *fx = NULL;

        if (IsEqualGUID(&type.formattype, &FORMAT_WaveFormatEx)) {
            fx = (void *) type.pbFormat;
        }
        if (!fx) {
            av_log(avctx, AV_LOG_ERROR, "Could not get media type.\n");
            goto error;
        }

        par->codec_type  = AVMEDIA_TYPE_AUDIO;
        par->format      = sample_fmt_bits_per_sample(fx->wBitsPerSample);
        par->codec_id    = waveform_codec_id(par->format);
        par->sample_rate = fx->nSamplesPerSec;
        par->channels    = fx->nChannels;
    }

    avpriv_set_pts_info(st, 64, 1, 10000000);

    ret = 0;

error:
    return ret;
}
Exemplo n.º 7
0
Arquivo: dshow.c Projeto: 3688/FFmpeg
/**
 * Cycle through available formats using the specified pin,
 * try to set parameters specified through AVOptions and if successful
 * return 1 in *pformat_set.
 * If pformat_set is NULL, list all pin capabilities.
 */
static void
dshow_cycle_formats(AVFormatContext *avctx, enum dshowDeviceType devtype,
                    IPin *pin, int *pformat_set)
{
    struct dshow_ctx *ctx = avctx->priv_data;
    IAMStreamConfig *config = NULL;
    AM_MEDIA_TYPE *type = NULL;
    int format_set = 0;
    void *caps = NULL;
    int i, n, size, r;

    if (IPin_QueryInterface(pin, &IID_IAMStreamConfig, (void **) &config) != S_OK)
        return;
    if (IAMStreamConfig_GetNumberOfCapabilities(config, &n, &size) != S_OK)
        goto end;

    caps = av_malloc(size);
    if (!caps)
        goto end;

    for (i = 0; i < n && !format_set; i++) {
        r = IAMStreamConfig_GetStreamCaps(config, i, &type, (void *) caps);
        if (r != S_OK)
            goto next;
#if DSHOWDEBUG
        ff_print_AM_MEDIA_TYPE(type);
#endif

        if (devtype == VideoDevice) {
            VIDEO_STREAM_CONFIG_CAPS *vcaps = caps;
            BITMAPINFOHEADER *bih;
            int64_t *fr;
            const AVCodecTag *const tags[] = { avformat_get_riff_video_tags(), NULL };
#if DSHOWDEBUG
            ff_print_VIDEO_STREAM_CONFIG_CAPS(vcaps);
#endif
            if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
                VIDEOINFOHEADER *v = (void *) type->pbFormat;
                fr = &v->AvgTimePerFrame;
                bih = &v->bmiHeader;
            } else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
                VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
                fr = &v->AvgTimePerFrame;
                bih = &v->bmiHeader;
            } else {
                goto next;
            }
            if (!pformat_set) {
                enum AVPixelFormat pix_fmt = dshow_pixfmt(bih->biCompression, bih->biBitCount);
                if (pix_fmt == AV_PIX_FMT_NONE) {
                    enum AVCodecID codec_id = av_codec_get_id(tags, bih->biCompression);
                    AVCodec *codec = avcodec_find_decoder(codec_id);
                    if (codec_id == AV_CODEC_ID_NONE || !codec) {
                        av_log(avctx, AV_LOG_INFO, "  unknown compression type 0x%X", (int) bih->biCompression);
                    } else {
                        av_log(avctx, AV_LOG_INFO, "  vcodec=%s", codec->name);
                    }
                } else {
                    av_log(avctx, AV_LOG_INFO, "  pixel_format=%s", av_get_pix_fmt_name(pix_fmt));
                }
                av_log(avctx, AV_LOG_INFO, "  min s=%ldx%ld fps=%g max s=%ldx%ld fps=%g\n",
                       vcaps->MinOutputSize.cx, vcaps->MinOutputSize.cy,
                       1e7 / vcaps->MaxFrameInterval,
                       vcaps->MaxOutputSize.cx, vcaps->MaxOutputSize.cy,
                       1e7 / vcaps->MinFrameInterval);
                continue;
            }
            if (ctx->video_codec_id != AV_CODEC_ID_RAWVIDEO) {
                if (ctx->video_codec_id != av_codec_get_id(tags, bih->biCompression))
                    goto next;
            }
            if (ctx->pixel_format != AV_PIX_FMT_NONE &&
                ctx->pixel_format != dshow_pixfmt(bih->biCompression, bih->biBitCount)) {
                goto next;
            }
            if (ctx->framerate) {
                int64_t framerate = ((int64_t) ctx->requested_framerate.den*10000000)
                                            /  ctx->requested_framerate.num;
                if (framerate > vcaps->MaxFrameInterval ||
                    framerate < vcaps->MinFrameInterval)
                    goto next;
                *fr = framerate;
            }
            if (ctx->requested_width && ctx->requested_height) {
                if (ctx->requested_width  > vcaps->MaxOutputSize.cx ||
                    ctx->requested_width  < vcaps->MinOutputSize.cx ||
                    ctx->requested_height > vcaps->MaxOutputSize.cy ||
                    ctx->requested_height < vcaps->MinOutputSize.cy)
                    goto next;
                bih->biWidth  = ctx->requested_width;
                bih->biHeight = ctx->requested_height;
            }
        } else {
            AUDIO_STREAM_CONFIG_CAPS *acaps = caps;
            WAVEFORMATEX *fx;
#if DSHOWDEBUG
            ff_print_AUDIO_STREAM_CONFIG_CAPS(acaps);
#endif
            if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
                fx = (void *) type->pbFormat;
            } else {
                goto next;
            }
            if (!pformat_set) {
                av_log(avctx, AV_LOG_INFO, "  min ch=%lu bits=%lu rate=%6lu max ch=%lu bits=%lu rate=%6lu\n",
                       acaps->MinimumChannels, acaps->MinimumBitsPerSample, acaps->MinimumSampleFrequency,
                       acaps->MaximumChannels, acaps->MaximumBitsPerSample, acaps->MaximumSampleFrequency);
                continue;
            }
            if (ctx->sample_rate) {
                if (ctx->sample_rate > acaps->MaximumSampleFrequency ||
                    ctx->sample_rate < acaps->MinimumSampleFrequency)
                    goto next;
                fx->nSamplesPerSec = ctx->sample_rate;
            }
            if (ctx->sample_size) {
                if (ctx->sample_size > acaps->MaximumBitsPerSample ||
                    ctx->sample_size < acaps->MinimumBitsPerSample)
                    goto next;
                fx->wBitsPerSample = ctx->sample_size;
            }
            if (ctx->channels) {
                if (ctx->channels > acaps->MaximumChannels ||
                    ctx->channels < acaps->MinimumChannels)
                    goto next;
                fx->nChannels = ctx->channels;
            }
        }
        if (IAMStreamConfig_SetFormat(config, type) != S_OK)
            goto next;
        format_set = 1;
next:
        if (type->pbFormat)
            CoTaskMemFree(type->pbFormat);
        CoTaskMemFree(type);
    }
end:
    IAMStreamConfig_Release(config);
    av_free(caps);
    if (pformat_set)
        *pformat_set = format_set;
}
enum CodecID mp_tag2codec_id(uint32_t tag, int audio)
{
    const struct AVCodecTag *taglists[3];
    get_taglists(taglists, audio);
    return av_codec_get_id(taglists, tag);
}
Exemplo n.º 9
0
static void fix_parameters(muxer_stream_t *stream)
{
    muxer_stream_priv_t *spriv = (muxer_stream_priv_t *) stream->priv;
    AVCodecContext *ctx;

    ctx = spriv->avstream->codec;

    ctx->bit_rate= stream->avg_rate;
    if(stream->wf && stream->wf->nAvgBytesPerSec && !ctx->bit_rate)
        ctx->bit_rate = stream->wf->nAvgBytesPerSec * 8;
    ctx->rc_buffer_size= stream->vbv_size;
    ctx->rc_max_rate= stream->max_rate;

    if(stream->type == MUXER_TYPE_AUDIO)
    {
        ctx->codec_id = av_codec_get_id(mp_wav_taglists, stream->wf->wFormatTag);
#if 0 //breaks aac in mov at least
        ctx->codec_tag = codec_get_wav_tag(ctx->codec_id);
#endif
        mp_msg(MSGT_MUXER, MSGL_INFO, "AUDIO CODEC ID: %x, TAG: %x\n", ctx->codec_id, (uint32_t) ctx->codec_tag);
        ctx->sample_rate = stream->wf->nSamplesPerSec;
//                mp_msg(MSGT_MUXER, MSGL_INFO, "stream->h.dwSampleSize: %d\n", stream->h.dwSampleSize);
        ctx->channels = stream->wf->nChannels;
        if(stream->h.dwRate && (stream->h.dwScale * (int64_t)ctx->sample_rate) % stream->h.dwRate == 0)
            ctx->frame_size= (stream->h.dwScale * (int64_t)ctx->sample_rate) / stream->h.dwRate;
        mp_msg(MSGT_MUXER, MSGL_V, "MUXER_LAVF(audio stream) frame_size: %d, scale: %u, sps: %u, rate: %u, ctx->block_align = stream->wf->nBlockAlign; %d=%d stream->wf->nAvgBytesPerSec:%d\n",
               ctx->frame_size, stream->h.dwScale, ctx->sample_rate, stream->h.dwRate,
               ctx->block_align, stream->wf->nBlockAlign, stream->wf->nAvgBytesPerSec);
        ctx->block_align = stream->h.dwSampleSize;
        if(stream->wf+1 && stream->wf->cbSize)
        {
            ctx->extradata = av_malloc(stream->wf->cbSize);
            if(ctx->extradata != NULL)
            {
                ctx->extradata_size = stream->wf->cbSize;
                memcpy(ctx->extradata, stream->wf+1, ctx->extradata_size);
            }
            else
                mp_msg(MSGT_MUXER, MSGL_ERR, "MUXER_LAVF(audio stream) error! couldn't allocate %d bytes for extradata\n",
                       stream->wf->cbSize);
        }
    }
    else if(stream->type == MUXER_TYPE_VIDEO)
    {
        ctx->codec_id = av_codec_get_id(mp_bmp_taglists, stream->bih->biCompression);
        if(ctx->codec_id <= 0 || force_fourcc)
            ctx->codec_tag= stream->bih->biCompression;
        mp_msg(MSGT_MUXER, MSGL_INFO, "VIDEO CODEC ID: %d\n", ctx->codec_id);
        ctx->width = stream->bih->biWidth;
        ctx->height = stream->bih->biHeight;
        ctx->bit_rate = 800000;
        ctx->time_base.den = stream->h.dwRate;
        ctx->time_base.num = stream->h.dwScale;
        if(stream->bih+1 && (stream->bih->biSize > sizeof(BITMAPINFOHEADER)))
        {
            ctx->extradata_size = stream->bih->biSize - sizeof(BITMAPINFOHEADER);
            ctx->extradata = av_malloc(ctx->extradata_size);
            if(ctx->extradata != NULL)
                memcpy(ctx->extradata, stream->bih+1, ctx->extradata_size);
            else
            {
                mp_msg(MSGT_MUXER, MSGL_ERR, "MUXER_LAVF(video stream) error! couldn't allocate %d bytes for extradata\n",
                       ctx->extradata_size);
                ctx->extradata_size = 0;
            }
        }
    }
}
Exemplo n.º 10
0
/// close video output stream and free associated memory
void CvVideoWriter_FFMPEG::close()
{
	unsigned i;

	// nothing to do if already released
	if ( !picture )
		return;

	/* no more frame to compress. The codec has a latency of a few
	   frames if using B frames, so we get the last frames by
	   passing the same picture again */
	// TODO -- do we need to account for latency here?

	/* write the trailer, if any */
	av_write_trailer(oc);

	// free pictures
#if LIBAVFORMAT_BUILD > 4628
	if( video_st->codec->pix_fmt != input_pix_fmt){
#else
	if( video_st->codec.pix_fmt != input_pix_fmt){
#endif
		cvFree(&(picture->data[0]));
	}
	av_free(picture);

    if (input_picture) {
        av_free(input_picture);
    }

	/* close codec */
#if LIBAVFORMAT_BUILD > 4628
	avcodec_close(video_st->codec);
#else
	avcodec_close(&(video_st->codec));
#endif

	av_free(outbuf);

	/* free the streams */
	for(i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	if (!(fmt->flags & AVFMT_NOFILE)) {
		/* close the output file */


#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
		url_fclose(oc->pb);
#else
		url_fclose(&oc->pb);
#endif

	}

	/* free the stream */
	av_free(oc);

    cvReleaseImage( &temp_image );

	init();
}

/// Create a video writer object that uses FFMPEG
bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
		double fps, CvSize frameSize, bool is_color )
{
    CV_FUNCNAME("CvVideoWriter_FFMPEG::open");

	CodecID codec_id = CODEC_ID_NONE;
	int err, codec_pix_fmt, bitrate_scale=64;

	__BEGIN__;

    close();

	// check arguments
	assert (filename);
	assert (fps > 0);
	assert (frameSize.width > 0  &&  frameSize.height > 0);

	// tell FFMPEG to register codecs
	av_register_all ();

	/* auto detect the output format from the name and fourcc code. */
	fmt = guess_format(NULL, filename, NULL);
	if (!fmt) {
		CV_ERROR( CV_StsUnsupportedFormat, "FFMPEG does not recognize the given file extension");
	}

	/* determine optimal pixel format */
    if (is_color) {
        input_pix_fmt = PIX_FMT_BGR24;
    }
	else {
        input_pix_fmt = PIX_FMT_GRAY8;
    }

	// alloc memory for context
	oc = av_alloc_format_context();
	assert (oc);

	/* set file name */
	oc->oformat = fmt;
	snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

	/* set some options */
	oc->max_delay = (int)(0.7*AV_TIME_BASE);  /* This reduces buffer underrun warnings with MPEG */

	/* Lookup codec_id for given fourcc */
#if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0)
        if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE ){
			CV_ERROR( CV_StsUnsupportedFormat,
				"FFMPEG could not find a codec matching the given FOURCC code. Use fourcc=CV_FOURCC_DEFAULT for auto selection." );
		}
#else
	{
	const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL};
        if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE ){
			CV_ERROR( CV_StsUnsupportedFormat,
				"FFMPEG could not find a codec matching the given FOURCC code. Use fourcc=CV_FOURCC_DEFAULT for auto selection." );
		}
	}
#endif

    // set a few optimal pixel formats for lossless codecs of interest..
    switch (codec_id) {
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
    case CODEC_ID_JPEGLS:
        // BGR24 or GRAY8 depending on is_color...
        codec_pix_fmt = input_pix_fmt;
        break;
#endif
    case CODEC_ID_HUFFYUV:
        codec_pix_fmt = PIX_FMT_YUV422P;
        break;
    case CODEC_ID_MJPEG:
    case CODEC_ID_LJPEG:
      codec_pix_fmt = PIX_FMT_YUVJ420P;
      bitrate_scale = 128;
      break;
    case CODEC_ID_RAWVIDEO:
    default:
        // good for lossy formats, MPEG, etc.
        codec_pix_fmt = PIX_FMT_YUV420P;
        break;
    }

	// TODO -- safe to ignore output audio stream?
	video_st = icv_add_video_stream_FFMPEG(oc, codec_id,
			frameSize.width, frameSize.height, frameSize.width*frameSize.height*bitrate_scale,
            fps, codec_pix_fmt);


	/* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(oc, NULL) < 0) {
		CV_ERROR(CV_StsBadArg, "Invalid output format parameters");
    }

    dump_format(oc, 0, filename, 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (!video_st){
		CV_ERROR(CV_StsBadArg, "Couldn't open video stream");
	}

    AVCodec *codec;
    AVCodecContext *c;

#if LIBAVFORMAT_BUILD > 4628
    c = (video_st->codec);
#else
    c = &(video_st->codec);
#endif

    c->codec_tag = fourcc;
    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
		CV_ERROR(CV_StsBadArg, "codec not found");
    }

    /* open the codec */
    if ( (err=avcodec_open(c, codec)) < 0) {
		char errtext[256];
		sprintf(errtext, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err));
		CV_ERROR(CV_StsBadArg, errtext);
    }

    outbuf = NULL;

    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
		/* assume we will never get codec output with more than 4 bytes per pixel... */
		outbuf_size = frameSize.width*frameSize.height*4;
        outbuf = (uint8_t *) av_malloc(outbuf_size);
    }

	bool need_color_convert;
	need_color_convert = (c->pix_fmt != input_pix_fmt);

    /* allocate the encoded raw picture */
    picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert);
    if (!picture) {
		CV_ERROR(CV_StsNoMem, "Could not allocate picture");
    }

    /* if the output format is not our input format, then a temporary
       picture of the input format is needed too. It is then converted
	   to the required output format */
	input_picture = NULL;
    if ( need_color_convert ) {
        input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false);
        if (!input_picture) {
			CV_ERROR(CV_StsNoMem, "Could not allocate picture");
        }
    }

	/* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
			CV_ERROR(CV_StsBadArg, "Couldn't open output file for writing");
        }
    }

    /* write the stream header, if any */
    av_write_header( oc );


	__END__;

	return true;
}
Exemplo n.º 11
0
/// Create a video writer object that uses FFMPEG
inline bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
                                 double fps, int width, int height, bool is_color )
{
    icvInitFFMPEG_internal();

    CodecID codec_id = CODEC_ID_NONE;
    int err, codec_pix_fmt;
    double bitrate_scale = 1;

    close();

    // check arguments
    if( !filename )
        return false;
    if(fps <= 0)
        return false;

    // we allow frames of odd width or height, but in this case we truncate
    // the rightmost column/the bottom row. Probably, this should be handled more elegantly,
    // but some internal functions inside FFMPEG swscale require even width/height.
    width &= -2;
    height &= -2;
    if( width <= 0 || height <= 0 )
        return false;

    /* auto detect the output format from the name and fourcc code. */

#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
    fmt = av_guess_format(NULL, filename, NULL);
#else
    fmt = guess_format(NULL, filename, NULL);
#endif

    if (!fmt)
        return false;

    /* determine optimal pixel format */
    if (is_color) {
        input_pix_fmt = PIX_FMT_BGR24;
    }
    else {
        input_pix_fmt = PIX_FMT_GRAY8;
    }

    /* Lookup codec_id for given fourcc */
#if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0)
    if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE )
        return false;
#else
    const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL};
    if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE )
        return false;
#endif

    // alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
    oc = avformat_alloc_context();
#else
    oc = av_alloc_format_context();
#endif
    assert (oc);

    /* set file name */
    oc->oformat = fmt;
    _snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* set some options */
    oc->max_delay = (int)(0.7*AV_TIME_BASE);  /* This reduces buffer underrun warnings with MPEG */

    // set a few optimal pixel formats for lossless codecs of interest..
    switch (codec_id) {
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
    case CODEC_ID_JPEGLS:
        // BGR24 or GRAY8 depending on is_color...
        codec_pix_fmt = input_pix_fmt;
        break;
#endif
    case CODEC_ID_HUFFYUV:
        codec_pix_fmt = PIX_FMT_YUV422P;
        break;
    case CODEC_ID_MJPEG:
    case CODEC_ID_LJPEG:
        codec_pix_fmt = PIX_FMT_YUVJ420P;
        bitrate_scale = 3;
        break;
    case CODEC_ID_RAWVIDEO:
        codec_pix_fmt = input_pix_fmt == PIX_FMT_GRAY8 ||
                        input_pix_fmt == PIX_FMT_GRAY16LE ||
                        input_pix_fmt == PIX_FMT_GRAY16BE ? input_pix_fmt : PIX_FMT_YUV420P;
        break;
    default:
        // good for lossy formats, MPEG, etc.
        codec_pix_fmt = PIX_FMT_YUV420P;
        break;
    }

    double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);

    // TODO -- safe to ignore output audio stream?
    video_st = icv_add_video_stream_FFMPEG(oc, codec_id,
                                           width, height, (int)(bitrate + 0.5),
                                           fps, codec_pix_fmt);

    /* set the output parameters (must be done even if no
   parameters). */
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
    if (av_set_parameters(oc, NULL) < 0) {
        return false;
    }
#endif

#if 0
#if FF_API_DUMP_FORMAT
    dump_format(oc, 0, filename, 1);
#else
    av_dump_format(oc, 0, filename, 1);
#endif
#endif

    /* now that all the parameters are set, we can open the audio and
     video codecs and allocate the necessary encode buffers */
    if (!video_st){
        return false;
    }

    AVCodec *codec;
    AVCodecContext *c;

#if LIBAVFORMAT_BUILD > 4628
    c = (video_st->codec);
#else
    c = &(video_st->codec);
#endif

    c->codec_tag = fourcc;
    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr(
        #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
                AVERROR_ENCODER_NOT_FOUND
        #else
                -1
        #endif
                ));
        return false;
    }

    s64 lbit_rate = (s64)c->bit_rate;
    lbit_rate += (bitrate / 2);
    lbit_rate = std::min(lbit_rate, (s64)INT_MAX);
    c->bit_rate_tolerance = (int)lbit_rate;
    c->bit_rate = (int)lbit_rate;

    /* open the codec */
    if ((err=
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
         avcodec_open2(c, codec, NULL)
#else
         avcodec_open(c, codec)
#endif
         ) < 0) {
        fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err));
        return false;
    }

    outbuf = NULL;

    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* assume we will never get codec output with more than 4 bytes per pixel... */
        outbuf_size = width*height*4;
        outbuf = (uint8_t *) av_malloc(outbuf_size);
    }

    bool need_color_convert;
    need_color_convert = (c->pix_fmt != input_pix_fmt);

    /* allocate the encoded raw picture */
    picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert);
    if (!picture) {
        return false;
    }

    /* if the output format is not our input format, then a temporary
   picture of the input format is needed too. It is then converted
   to the required output format */
    input_picture = NULL;
    if ( need_color_convert ) {
        input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false);
        if (!input_picture) {
            return false;
        }
    }

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
#else
            if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0)
#endif
            {
            return false;
        }
    }

#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
    /* write the stream header, if any */
    err=avformat_write_header(oc, NULL);
#else
    err=av_write_header( oc );
#endif

    if(err < 0)
    {
        close();
        remove(filename);
        return false;
    }
    frame_width = width;
    frame_height = height;
    ok = true;
    return true;
}
Exemplo n.º 12
0
/// close video output stream and free associated memory
void CvVideoWriter_FFMPEG::close()
{
	unsigned i;

	// nothing to do if already released
	if ( !picture )
		return;

	/* no more frame to compress. The codec has a latency of a few
	   frames if using B frames, so we get the last frames by
	   passing the same picture again */
	// TODO -- do we need to account for latency here?

	/* write the trailer, if any */
	av_write_trailer(oc);

	// free pictures
#if LIBAVFORMAT_BUILD > 4628
	if( video_st->codec->pix_fmt != input_pix_fmt){
#else
	if( video_st->codec.pix_fmt != input_pix_fmt){
#endif
		cvFree(&(picture->data[0]));
	}
	av_free(picture);

    if (input_picture) {
        av_free(input_picture);
    }

	/* close codec */
#if LIBAVFORMAT_BUILD > 4628
	avcodec_close(video_st->codec);
#else
	avcodec_close(&(video_st->codec));
#endif

	av_free(outbuf);

	/* free the streams */
	for(i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	if (!(fmt->flags & AVFMT_NOFILE)) {
		/* close the output file */


#if LIBAVCODEC_VERSION_INT==((51<<16)+(49<<8)+0)
		url_fclose(oc->pb);
#else
		url_fclose(&oc->pb);
#endif

	}

	/* free the stream */
	av_free(oc);

    cvReleaseImage( &temp_image );

	init();
}

/// Create a video writer object that uses FFMPEG
bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
		double fps, CvSize frameSize, bool is_color )
{
    CV_FUNCNAME("CvVideoWriter_FFMPEG::open");

	CodecID codec_id = CODEC_ID_NONE;
	int err;

	__BEGIN__;

    close();

	// check arguments
	assert (filename);
	assert (fps > 0);
	assert (frameSize.width > 0  &&  frameSize.height > 0);

	// tell FFMPEG to register codecs
	av_register_all ();

	/* auto detect the output format from the name and fourcc code. */
	fmt = guess_format(NULL, filename, NULL);
	if (!fmt) {
		CV_ERROR( CV_StsUnsupportedFormat, "FFMPEG does not recognize the given file extension");
	}

	/* determine optimal pixel format */
    if (is_color) {
        input_pix_fmt = PIX_FMT_BGR24;
    }
	else {
        input_pix_fmt = PIX_FMT_GRAY8;
    }

	// alloc memory for context
	oc = av_alloc_format_context();
	assert (oc);

	/* set file name */
	oc->oformat = fmt;
	snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

	/* set some options */
	oc->max_delay = (int)(0.7*AV_TIME_BASE);  /* This reduces buffer underrun warnings with MPEG */

	/* Lookup codec_id for given fourcc */
	if(fourcc!=CV_FOURCC_DEFAULT){
#if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0)
        if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE ){
			CV_ERROR( CV_StsUnsupportedFormat,
				"FFMPEG could not find a codec matching the given FOURCC code. Use fourcc=CV_FOURCC_DEFAULT for auto selection." );
		}
	}
#else
        if( (codec_id = av_codec_get_id((const AVCodecTag**)(&codec_bmp_tags), fourcc)) == CODEC_ID_NONE ){
			CV_ERROR( CV_StsUnsupportedFormat,
				"FFMPEG could not find a codec matching the given FOURCC code. Use fourcc=CV_FOURCC_DEFAULT for auto selection." );
		}
	}
Exemplo n.º 13
0
int groove_file_save_as(struct GrooveFile *file, const char *filename) {
    struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file;

    // detect output format
    AVOutputFormat *ofmt = av_guess_format(f->ic->iformat->name, f->ic->filename, NULL);
    if (!ofmt) {
        return GrooveErrorUnknownFormat;
    }

    // allocate output media context
    f->oc = avformat_alloc_context();
    if (!f->oc) {
        cleanup_save(file);
        return GrooveErrorNoMem;
    }

    f->oc->oformat = ofmt;
    snprintf(f->oc->filename, sizeof(f->oc->filename), "%s", filename);

    // open output file if needed
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&f->oc->pb, f->oc->filename, AVIO_FLAG_WRITE) < 0) {
            cleanup_save(file);
            return GrooveErrorFileSystem;
        }
        f->tempfile_exists = 1;
    }

    if (f->ic->nb_streams > INT_MAX) {
        cleanup_save(file);
        return GrooveErrorTooManyStreams;
    }
    int stream_count = (int)f->ic->nb_streams;

    // add all the streams
    for (int i = 0; i < stream_count; i++) {
        AVStream *in_stream = f->ic->streams[i];
        AVStream *out_stream = avformat_new_stream(f->oc, NULL);
        if (!out_stream) {
            cleanup_save(file);
            return GrooveErrorNoMem;
        }
        out_stream->id = in_stream->id;
        out_stream->disposition = in_stream->disposition;
        out_stream->time_base = in_stream->time_base;

        AVCodecContext *icodec = in_stream->codec;
        AVCodecContext *ocodec = out_stream->codec;
        ocodec->bits_per_raw_sample    = icodec->bits_per_raw_sample;
        ocodec->chroma_sample_location = icodec->chroma_sample_location;
        ocodec->codec_id   = icodec->codec_id;
        ocodec->codec_type = icodec->codec_type;
        if (!ocodec->codec_tag) {
            if (!f->oc->oformat->codec_tag ||
                 av_codec_get_id (f->oc->oformat->codec_tag, icodec->codec_tag) == ocodec->codec_id ||
                 av_codec_get_tag(f->oc->oformat->codec_tag, icodec->codec_id) <= 0)
                ocodec->codec_tag = icodec->codec_tag;
        }
        ocodec->bit_rate       = icodec->bit_rate;
        ocodec->rc_max_rate    = icodec->rc_max_rate;
        ocodec->rc_buffer_size = icodec->rc_buffer_size;
        ocodec->field_order    = icodec->field_order;

        uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
        if (extra_size > INT_MAX) {
            cleanup_save(file);
            return GrooveErrorEncoding;
        }
        ocodec->extradata      = allocate<uint8_t>(extra_size);
        if (!ocodec->extradata) {
            cleanup_save(file);
            return GrooveErrorNoMem;
        }
        memcpy(ocodec->extradata, icodec->extradata, icodec->extradata_size);
        ocodec->extradata_size = icodec->extradata_size;
        switch (ocodec->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            ocodec->channel_layout     = icodec->channel_layout;
            ocodec->sample_rate        = icodec->sample_rate;
            ocodec->channels           = icodec->channels;
            ocodec->frame_size         = icodec->frame_size;
            ocodec->audio_service_type = icodec->audio_service_type;
            ocodec->block_align        = icodec->block_align;
            break;
        case AVMEDIA_TYPE_VIDEO:
            ocodec->pix_fmt            = icodec->pix_fmt;
            ocodec->width              = icodec->width;
            ocodec->height             = icodec->height;
            ocodec->has_b_frames       = icodec->has_b_frames;
            if (!ocodec->sample_aspect_ratio.num) {
                ocodec->sample_aspect_ratio   =
                out_stream->sample_aspect_ratio =
                    in_stream->sample_aspect_ratio.num ? in_stream->sample_aspect_ratio :
                    icodec->sample_aspect_ratio.num ?
                    icodec->sample_aspect_ratio : AVRational{0, 1};
            }
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ocodec->width  = icodec->width;
            ocodec->height = icodec->height;
            break;
        case AVMEDIA_TYPE_DATA:
        case AVMEDIA_TYPE_ATTACHMENT:
            break;
        default:
            cleanup_save(file);
            return GrooveErrorEncoding;
        }
    }

    // set metadata
    av_dict_copy(&f->oc->metadata, f->ic->metadata, 0);

    if (avformat_write_header(f->oc, NULL) < 0) {
        cleanup_save(file);
        return GrooveErrorEncoding;
    }

    AVPacket *pkt = &f->audio_pkt;
    for (;;) {
        int err = av_read_frame(f->ic, pkt);
        if (err == AVERROR_EOF) {
            break;
        } else if (err < 0) {
            cleanup_save(file);
            return GrooveErrorDecoding;
        }
        if (av_write_frame(f->oc, pkt) < 0) {
            cleanup_save(file);
            return GrooveErrorEncoding;
        }
        av_free_packet(pkt);
    }

    if (av_write_trailer(f->oc) < 0) {
        cleanup_save(file);
        return GrooveErrorEncoding;
    }

    f->tempfile_exists = 0;
    cleanup_save(file);

    return 0;
}