Example #1
0
/* Prepare a dummy image. */
static void fill_rgb_image(AVFrame *pict, int frame_index,
                           int width, int height)
{
    int x, y, k, i, ret;

    /* when we pass a frame to the encoder, it may keep a reference to it
     * internally;
     * make sure we do not overwrite it here
     */
    ret = av_frame_make_writable(pict);
    if (ret < 0)
        exit(1);

    for ( y = 0; y < height; ++y ) {
        for ( x = 0; x < width; ++x ) {

            /*
            for ( k = 0; k < 3; ++k ) {
                *( pict->data[0] + 3 * y * pict->linesize[0] + 3 * x + k ) 
            }
            */

            *( pict->data[0] + y * pict->linesize[0] + 3 * x + 0 ) = frame_index & 255;
            *( pict->data[0] + y * pict->linesize[0] + 3 * x + 1 ) = y & 255;
            *( pict->data[0] + y * pict->linesize[0] + 3 * x + 2 ) = x & 255;

        }
    }
}
Example #2
0
value
ffmpeg_frame_new(value stream, value pts_)
{
  CAMLparam2(stream, pts_);
  CAMLlocal1(frame);
  if (Stream_context_direct_val(stream) != Val_int(0)) {
    double pts = Double_val(pts_);
    frame = wrap_ptr(&avframe_ops, av_frame_alloc());
    AVFrame_val(frame)->format = USER_PIXFORMAT; // 0xrrggbbaa
    AVFrame_val(frame)->width = Stream_aux_val(stream)->avstream->codec->width;
    AVFrame_val(frame)->height = Stream_aux_val(stream)->avstream->codec->height;

    int ret;
    ret = av_frame_get_buffer(AVFrame_val(frame), 32);
    raise_if_not(ret >= 0, ExnMemory, ret);

    ret = av_frame_make_writable(AVFrame_val(frame));
    raise_if_not(ret >= 0, ExnLogic, ret);

    AVFrame_val(frame)->pts = pts = (int64_t) (Stream_aux_val(stream)->avstream->time_base.den * pts);
  } else {
    raise(ExnClosed, 0);
  }

  CAMLreturn((value) frame);
}
Example #3
0
void _ffmpegPostVideoFrame(struct GBAAVStream* stream, struct GBAVideoRenderer* renderer) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context) {
		return;
	}
	uint8_t* pixels;
	unsigned stride;
	renderer->getPixels(renderer, &stride, (void**) &pixels);
	stride *= 4;

	AVPacket packet;

	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->videoFrame);
#endif
	encoder->videoFrame->pts = av_rescale_q(encoder->currentVideoFrame, encoder->video->time_base, encoder->videoStream->time_base);
	++encoder->currentVideoFrame;

	sws_scale(encoder->scaleContext, (const uint8_t* const*) &pixels, (const int*) &stride, 0, VIDEO_VERTICAL_PIXELS, encoder->videoFrame->data, encoder->videoFrame->linesize);

	int gotData;
	avcodec_encode_video2(encoder->video, &packet, encoder->videoFrame, &gotData);
	if (gotData) {
		if (encoder->videoStream->codec->coded_frame->key_frame) {
			packet.flags |= AV_PKT_FLAG_KEY;
		}
		packet.stream_index = encoder->videoStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
	av_free_packet(&packet);
}
Example #4
0
void Frame::copy(void *src)
{
    static int i = 0;


    if (src==NULL) return;
    /* when we pass a frame to the encoder, it may keep a reference to it
     * internally;
     * make sure we do not overwrite it here
     */
    if (av_frame_make_writable(frm) < 0) return;

    /*for (y = 0; y < picture->height; y++)
        for (x = 0; x < picture->width; x++)
        {
            picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
            picture->data[1][y * picture->linesize[1] + x] = x + y + i * 3;
            picture->data[2][y * picture->linesize[2] + x] = x + y + i * 3;
            picture->data[3][y * picture->linesize[3] + x] = x + y + i * 3;
        }*/
    /* Cb and Cr */
    /*for (y = 0; y < picture->height / 2; y++) {
        for (x = 0; x < picture->width / 2; x++) {
            picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
            picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
        }
    }*/

    i++;

    ofLogNotice()<< ".";
    //memcpy(picture->data[0],src,picture->height*picture->width*4);
}
Example #5
0
static AVFrame *GetVideoFrame(OutputStream *ost, BYTE* src_img)
{
	AVCodecContext *c = ost->st->codec;

	if (!ost->sws_ctx) {
		ost->sws_ctx = sws_getContext(c->width, c->height,
									  AV_PIX_FMT_BGR0,
									  c->width, c->height,
									  c->pix_fmt,
									  SCALE_FLAGS, NULL, NULL, NULL);
		if (!ost->sws_ctx) {
			fprintf(stderr,
					"Could not initialize the conversion context\n");
			return NULL;
		}
	}
	// 画像をここでコピーする
	av_frame_make_writable(ost->tmp_frame);
	avpicture_fill((AVPicture*)ost->tmp_frame, src_img, AV_PIX_FMT_BGR0, c->width, c->height);

	sws_scale(ost->sws_ctx,
			  (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
			  0, c->height, ost->frame->data, ost->frame->linesize);


	ost->frame->pts = ost->next_pts++;

	return ost->frame;
}
Example #6
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;
    
    av_init_packet(&pkt);
    c = ost->st->codec;
    
    frame = get_audio_frame(ost);
    
    if (frame) {
        /* convert samples from native format to destination codec format, using the resampler */
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        av_assert0(dst_nb_samples == frame->nb_samples);
        
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);
        
        /* convert to destination format */
        ret = swr_convert(ost->swr_ctx,
                          ost->frame->data, dst_nb_samples,
                          (const uint8_t **)frame->data, frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
        frame = ost->frame;
        
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }
    
    ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }
    
    if (got_packet) {
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }
    }
    
    return (frame || got_packet) ? 0 : 1;
}
Example #7
0
void _ffmpegPostVideoFrame(struct mAVStream* stream, const color_t* pixels, size_t stride) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context) {
		return;
	}
	stride *= BYTES_PER_PIXEL;

	AVPacket packet;

	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->videoFrame);
#endif
	encoder->videoFrame->pts = av_rescale_q(encoder->currentVideoFrame, encoder->video->time_base, encoder->videoStream->time_base);
	++encoder->currentVideoFrame;

	sws_scale(encoder->scaleContext, (const uint8_t* const*) &pixels, (const int*) &stride, 0, encoder->iheight, encoder->videoFrame->data, encoder->videoFrame->linesize);

	int gotData;
	avcodec_encode_video2(encoder->video, &packet, encoder->videoFrame, &gotData);
	if (gotData) {
		if (encoder->videoStream->codec->coded_frame->key_frame) {
			packet.flags |= AV_PKT_FLAG_KEY;
		}
		packet.stream_index = encoder->videoStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
	av_free_packet(&packet);
}
Example #8
0
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index,
                           int width, int height)
{
    int x, y, i, ret;

    /* when we pass a frame to the encoder, it may keep a reference to it
     * internally;
     * make sure we do not overwrite it here
     */
    ret = av_frame_make_writable(pict);
    if (ret < 0)
        exit(1);

    i = frame_index;

    /* Y */
    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

    /* Cb and Cr */
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
        }
    }
}
Example #9
0
    void LibAVVideoWriter::Data::fill_rgb_image(const ImgBase *src, AVFrame **pict)
    {
      if(!*pict) {
        *pict = alloc_picture(INPUT_FORMAT,src->getSize().width,src->getSize().height);
      } else {
        if((*pict)->width != src->getSize().width || (*pict)->height != src->getSize().height) {
#if LIBAVCODEC_VERSION_MAJOR > 54
          av_frame_free(pict);
#else
          av_free((*pict)->data[0]);
          av_free(*pict);
#endif
          *pict = alloc_picture(INPUT_FORMAT,src->getSize().width,src->getSize().height);
        }
      }
#if LIBAVCODEC_VERSION_MAJOR > 54
      av_frame_make_writable(*pict);
#endif
      depth d = src->getDepth();
      switch(d) {
        case depth16s:
          core::planarToInterleaved(src->as16s(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth32f:
          core::planarToInterleaved(src->as32f(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth32s:
          core::planarToInterleaved(src->as32s(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth64f:
          core::planarToInterleaved(src->as64f(),(*pict)->data[0],(*pict)->linesize[0]);
        default:
          core::planarToInterleaved(src->as8u(),(*pict)->data[0],(*pict)->linesize[0]);
      }
    }
Example #10
0
void _ffmpegPostAudioFrame(struct GBAAVStream* stream, int32_t left, int32_t right) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context || !encoder->audioCodec) {
		return;
	}

	encoder->audioBuffer[encoder->currentAudioSample * 2] = left;
	encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right;

	++encoder->currentAudioFrame;
	++encoder->currentAudioSample;

	if ((encoder->currentAudioSample * 4) < encoder->audioBufferSize) {
		return;
	}
	encoder->currentAudioSample = 0;

	int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
	avresample_convert(encoder->resampleContext,
		0, 0, 0,
		(uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4);
	if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) {
		return;
	}
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize);

	AVRational timeBase = { 1, PREFERRED_SAMPLE_RATE };
	encoder->audioFrame->pts = encoder->nextAudioPts;
	encoder->nextAudioPts = av_rescale_q(encoder->currentAudioFrame, timeBase, encoder->audioStream->time_base);

	AVPacket packet;
	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
	int gotData;
	avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData);
	if (gotData) {
		if (encoder->absf) {
			AVPacket tempPacket = packet;
			int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0,
				&tempPacket.data, &tempPacket.size,
				packet.data, packet.size, 0);
			if (success > 0) {
#if LIBAVUTIL_VERSION_MAJOR >= 53
				tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0);
#endif
				av_free_packet(&packet);
			}
			packet = tempPacket;
		}
		packet.stream_index = encoder->audioStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
	av_free_packet(&packet);
}
Example #11
0
static int WriteAudioFrame(AVFormatContext *oc, OutputStream *ost, AVI6 *avi)
{
	AVCodecContext *c = NULL;
	AVPacket pkt = { 0 };
	AVFrame *frame = NULL;
	int ret = 0;
	int got_packet = 0;
	int dst_nb_samples = 0;

	av_init_packet(&pkt);
	c = ost->st->codec;

	frame = GetAudioFrame(ost, avi);

	if (frame) {
		// フォーマット変換後のサンプル数を決定
		dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, frame->sample_rate) + frame->nb_samples,
										c->sample_rate, c->sample_rate, AV_ROUND_UP);
		//av_assert0(dst_nb_samples == frame->nb_samples);

		// フレームを書き込み可能にする
		ret = av_frame_make_writable(ost->frame);
		if (ret < 0)
			exit(1);

		// 音声フォーマットを変換
		ret = swr_convert(ost->swr_ctx,
						  ost->frame->data, dst_nb_samples,
						  (const uint8_t **)frame->data, frame->nb_samples);
		if (ret < 0) {
			fprintf(stderr, "Error while converting\n");
			return 0;
		}
		frame = ost->frame;

		frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
		ost->samples_count += dst_nb_samples;

		ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
		if (ret < 0) {
			fprintf(stderr, "Error encoding audio frame: %s\n", MakeErrorString(ret));
			return 0;
		}

		if (got_packet) {
			ret = WriteFrame(oc, &c->time_base, ost->st, &pkt);
			if (ret < 0) {
				fprintf(stderr, "Error while writing audio frame: %s\n",
						MakeErrorString(ret));
				return 0;
			}
		}
	}

	return (frame || got_packet) ? 0 : 1;
}
Example #12
0
static void make_frame_writable(AVFrame *frame)
{
#ifdef HAVE_AV_FRAME_MAKE_WRITABLE
  // Make frame writeable
  int err = av_frame_make_writable(frame);
  if (err < 0)
    scm_misc_error("make-frame-writable", "Error making frame writeable: ~a",
                   scm_list_1(get_error_text(err)));
#endif
}
Example #13
0
BOOL CVideoLivRecord::fill_rgba_picture(AVFrame* frame, int width, int height, void* pBuffer, int len)
{
	int ret = av_frame_make_writable(frame);
	if (ret < 0){
		log("[CVideoLivRecord::fill_picture] -- av_frame_make_writable() error");
		return FALSE;
	}

	memcpy(frame->data[0], pBuffer, len);

	return TRUE;
}
Example #14
0
static inline int copy_field
(
    lw_log_handler_t *lhp,
    AVFrame          *dst,
    AVFrame          *src,
    int               line_offset
)
{
    /* Check if the destination is writable. */
    if( av_frame_is_writable( dst ) == 0 )
    {
        /* The destination is NOT writable, so allocate new buffers and copy the data. */
        av_frame_unref( dst );
        if( av_frame_ref( dst, src ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to reference a video frame.\n" );
            return -1;
        }
        if( av_frame_make_writable( dst ) < 0 )
        {
            if( lhp->show_log )
                lhp->show_log( lhp, LW_LOG_ERROR, "Failed to make a video frame writable.\n" );
            return -1;
        }
        /* For direct rendering, the destination can not know
         * whether the value at the address held by the opaque pointer is valid or not.
         * Anyway, the opaque pointer for direct rendering shall be set to NULL. */
        dst->opaque = NULL;
    }
    else
    {
        /* The destination is writable. Copy field data from the source. */
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get( (enum AVPixelFormat)dst->format );
        int number_of_planes = av_pix_fmt_count_planes( (enum AVPixelFormat)dst->format );
        int height           = MIN( dst->height, src->height );
        for( int i = 0; i < number_of_planes; i++ )
        {
            int r_shift = 1 + ((i == 1 || i == 2) ? desc->log2_chroma_h : 0);
            int field_height = (height >> r_shift) + (line_offset == 0 && (height & 1) ? 1 : 0);
            av_image_copy_plane( dst->data[i] + dst->linesize[i] * line_offset, 2 * dst->linesize[i],
                                 src->data[i] + src->linesize[i] * line_offset, 2 * src->linesize[i],
                                 MIN( dst->linesize[i], src->linesize[i] ),
                                 field_height );
        }
    }
    /* Treat this frame as interlaced. */
    dst->interlaced_frame = 1;
    return 0;
}
Example #15
0
BOOL CVideoLivRecord::write_audio_frame(AVStream *st, void* pBuffer, LONG len)
{
	AVCodecContext* avcc = st->codec;
	AVPacket pkt = {0};
	av_init_packet(&pkt);
	AVFrame* frame = get_audio_frame(st, pBuffer, len);
	int ret = 0;
	int dst_nb_samples = 0;
	if (frame){
		dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(m_pAudioSwrctx, avcc->sample_rate) + frame->nb_samples,
			avcc->sample_rate, avcc->sample_rate, AV_ROUND_UP);
		av_assert0(dst_nb_samples == frame->nb_samples);
		ret = av_frame_make_writable(m_pAudioFrame);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		ret = swr_convert(m_pAudioSwrctx, m_pAudioFrame->data, dst_nb_samples, 
			             (const uint8_t**)frame->data, frame->nb_samples);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		frame = m_pAudioFrame;
		AVRational tmp = {1, avcc->sample_rate};

		frame->pts = av_rescale_q(m_AudioSamplesCount, tmp, avcc->time_base);
		m_AudioSamplesCount += dst_nb_samples;
	}
	int got_packet = 0;
	ret = avcodec_encode_audio2(avcc, &pkt, frame, &got_packet);
	if (ret < 0){
		log("[CVideoLivRecord::write_audio_frame] -- avcodec_encode_audio2() error");
		return FALSE;
	}
	if(got_packet){
		av_packet_rescale_ts(&pkt, avcc->time_base, st->time_base);
		pkt.stream_index = st->index;
		ret = av_interleaved_write_frame(m_pAVFormatContext, &pkt);
		//ret = write_audio_frame(m_pAudioStream, pBuffer, len);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- write_audio_frame() error");
			return FALSE;
		}
	}
	return (frame || got_packet)? FALSE : TRUE;
}
Example #16
0
value
ffmpeg_write(value stream, value rgbaFrame)
{
  CAMLparam2(stream, rgbaFrame);
  int ret;
  AVFrame* yuvFrame = av_frame_alloc();
  raise_if_not(!!yuvFrame, ExnMemory, 0);

  struct StreamAux streamAux = *Stream_aux_val(stream);
  AVFormatContext* fmtCtx = Stream_context_val(stream)->fmtCtx;

  yuvFrame->format = AV_PIX_FMT_YUV420P;
  yuvFrame->width = AVFrame_val(rgbaFrame)->width;
  yuvFrame->height = AVFrame_val(rgbaFrame)->height;

  ret = av_frame_get_buffer(yuvFrame, 32);
  raise_if_not(ret >= 0, ExnMemory, ret);

  ret = av_frame_make_writable(yuvFrame);
  raise_if_not(ret >= 0, ExnMemory, ret);

  yuvFrame->pts = AVFrame_val(rgbaFrame)->pts;

  caml_enter_blocking_section();

  sws_scale(streamAux.swsCtx,
            (const uint8_t * const *) AVFrame_val(rgbaFrame)->data,
            AVFrame_val(rgbaFrame)->linesize,
            0, streamAux.avstream->codec->height, yuvFrame->data, yuvFrame->linesize);

  AVPacket packet = { 0 };
  av_init_packet(&packet);
  int gotIt = 0;
  ret = avcodec_encode_video2(streamAux.avstream->codec, &packet, yuvFrame, &gotIt);
  raise_and_leave_blocking_section_if_not(ret >= 0, ExnEncode, ret);
  if (gotIt) {
    packet.stream_index = 0;
    ret = av_interleaved_write_frame(fmtCtx, &packet);
    raise_and_leave_blocking_section_if_not(ret >= 0, ExnFileIO, ret);
  }

  av_frame_free(&yuvFrame);

  caml_leave_blocking_section();

  CAMLreturn(Val_unit);
}
Example #17
0
static block_t *vlc_av_frame_Wrap(AVFrame *frame)
{
    for (unsigned i = 1; i < AV_NUM_DATA_POINTERS; i++)
        assert(frame->linesize[i] == 0); /* only packed frame supported */

    if (av_frame_make_writable(frame)) /* TODO: read-only block_t */
        return NULL;

    vlc_av_frame_t *b = malloc(sizeof (*b));
    if (unlikely(b == NULL))
        return NULL;

    block_t *block = &b->self;

    block_Init(block, frame->extended_data[0], frame->linesize[0]);
    block->i_nb_samples = frame->nb_samples;
    block->pf_release = vlc_av_frame_Release;
    b->frame = frame;
    return block;
}
Example #18
0
File: muxing.c Project: BtbN/FFmpeg
static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->enc;

    /* check if we want to generate more frames */
    if (av_compare_ts(ost->next_pts, c->time_base,
                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;

    /* when we pass a frame to the encoder, it may keep a reference to it
     * internally; make sure we do not overwrite it here */
    if (av_frame_make_writable(ost->frame) < 0)
        exit(1);

    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        /* as we only generate a YUV420P picture, we must convert it
         * to the codec pixel format if needed */
        if (!ost->sws_ctx) {
            ost->sws_ctx = sws_getContext(c->width, c->height,
                                          AV_PIX_FMT_YUV420P,
                                          c->width, c->height,
                                          c->pix_fmt,
                                          SCALE_FLAGS, NULL, NULL, NULL);
            if (!ost->sws_ctx) {
                fprintf(stderr,
                        "Could not initialize the conversion context\n");
                exit(1);
            }
        }
        fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
        sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
                  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
                  ost->frame->linesize);
    } else {
        fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    }

    ost->frame->pts = ost->next_pts++;

    return ost->frame;
}
Example #19
0
int main(int argc, char **argv)
{
    const char *filename;
    const AVCodec *codec;
    AVCodecContext *c = NULL;
    AVFrame *frame;
    AVPacket *pkt;
    int i, j, k, ret;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    if (argc <= 1) {
        fprintf(stderr, "Usage: %s <output file>\n", argv[0]);
        return 0;
    }
    filename = argv[1];

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* packet for holding encoded output */
    pkt = av_packet_alloc();
    if (!pkt) {
        fprintf(stderr, "could not allocate the packet\n");
        exit(1);
    }

    /* frame containing input raw audio */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* allocate the data buffers */
    ret = av_frame_get_buffer(frame, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate audio data buffers\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for (i = 0; i < 200; i++) {
        /* make sure the frame is writable -- makes a copy if the encoder
         * kept a reference internally */
        ret = av_frame_make_writable(frame);
        if (ret < 0)
            exit(1);
        samples = (uint16_t*)frame->data[0];

        for (j = 0; j < c->frame_size; j++) {
            samples[2 * j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2 * j + k] = samples[2 * j];
            t += tincr;
        }
        encode(c, frame, pkt, f);
    }

    /* flush the encoder */
    encode(c, NULL, pkt, f);

    fclose(f);

    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_free_context(&c);

    return 0;
}
Example #20
0
	status_t FFMPEGer::encodeAudio(MediaBuffer* src, MediaBuffer* dst){
		AVCodecContext *c;
		
		AVFrame *frame = NULL;
		int ret;
		int got_packet;
		int dst_nb_samples;
		OutputStream* ost = &audio_st;
		
		unsigned char* srcData = (unsigned char*)src->data() + src->range_offset();
		int copySize = getAudioEncodeBufferSize();

		while(srcData < ((unsigned char*)src->data() + src->range_offset() + src->range_length())){
			AVPacket pkt = { 0 }; // data and size must be 0;
			av_init_packet(&pkt);
			c = ost->st->codec;
		
			frame = audio_st.tmp_frame;
			memcpy(frame->data[0], srcData, copySize);
			srcData += copySize;
			frame->pts = audio_st.next_pts;
			
			audio_st.next_pts += frame->nb_samples;

			if (frame) {
				/* convert samples from native format to destination codec format, using the resampler */
				/* compute destination number of samples */
				dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
												c->sample_rate, c->sample_rate, AV_ROUND_UP);
				av_assert0(dst_nb_samples == frame->nb_samples);

				/* when we pass a frame to the encoder, it may keep a reference to it
				 * internally;
				 * make sure we do not overwrite it here
				 */
				ret = av_frame_make_writable(ost->frame);
				if (ret < 0)
					return UNKNOWN_ERROR;

				/* convert to destination format */
				ret = swr_convert(ost->swr_ctx,
								  ost->frame->data, dst_nb_samples,
								  (const uint8_t **)frame->data, frame->nb_samples);
				if (ret < 0) {
					ALOGE("Error while converting");
					return UNKNOWN_ERROR;
				}
				
				frame = ost->frame;
				frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
				ost->samples_count += dst_nb_samples;
			}

			ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
			if (ret < 0) {
				ALOGE("Error encoding audio frame: %s", av_err2str(ret));
				return UNKNOWN_ERROR;
			}

			pkt.pts = frame->pts;//
			
#if 0
			static int count = 0;
			char a[50] = {0};
			sprintf(a, "/sdcard/pcm%d", count++);
			FILE* f1 = fopen(a, "ab");
			if(f1 != NULL){
				size_t res = fwrite(pkt.data, 1, pkt.size, f1);
				fclose(f1);
				ALOGV("fwrite %d of %d to /sdcard/pcm!", res, pkt.size);
			}else
				ALOGE("can not fopen /sdcard/pcm!!");
#endif
			
			
			if (got_packet) {
				ret = write_frame(fmt_ctx, &c->time_base, ost->st, &pkt);
				if (ret < 0) {
					ALOGE("Error while writing audio frame: %s", av_err2str(ret));
					return UNKNOWN_ERROR;
				}
			}
		}
	}
Example #21
0
void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }

  /* first we add incoming frames audio samples */
  {
    int i;
    int sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
      }
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

  /* then we encode as much as we can in a loop using the codec frame size */

  
  while (p->audio_pos - p->audio_read_pos > sample_count)
  {
    long i;
    int ret;
    int got_packet = 0;
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
                                        c->sample_rate, sample_count);

    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
      case AV_SAMPLE_FMT_S16:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
      case AV_SAMPLE_FMT_S32:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S16P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
    frame->pts = p->next_apts;
    p->next_apts += sample_count;

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
    {
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
      av_free_packet (&pkt);
    }

    av_frame_free (&frame);
    p->audio_read_pos += sample_count;
  }
}
Example #22
0
void _ffmpegPostAudioFrame(struct mAVStream* stream, int16_t left, int16_t right) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context || !encoder->audioCodec) {
		return;
	}

	if (encoder->absf && !left) {
		// XXX: AVBSF doesn't like silence. Figure out why.
		left = 1;
	}

	encoder->audioBuffer[encoder->currentAudioSample * 2] = left;
	encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right;

	++encoder->currentAudioSample;

	if (encoder->currentAudioSample * 4 < encoder->audioBufferSize) {
		return;
	}

	int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
	encoder->currentAudioSample = 0;
#ifdef USE_LIBAVRESAMPLE
	avresample_convert(encoder->resampleContext, 0, 0, 0,
	                   (uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4);

	if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) {
		return;
	}
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	int samples = avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize);
#else
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	if (swr_get_out_samples(encoder->resampleContext, encoder->audioBufferSize / 4) < encoder->audioFrame->nb_samples) {
		swr_convert(encoder->resampleContext, NULL, 0, (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
		return;
	}
	int samples = swr_convert(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize,
	                          (const uint8_t**) &encoder->audioBuffer, encoder->audioBufferSize / 4);
#endif

	encoder->audioFrame->pts = av_rescale_q(encoder->currentAudioFrame, encoder->audio->time_base, encoder->audioStream->time_base);
	encoder->currentAudioFrame += samples;

	AVPacket packet;
	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
	packet.pts = encoder->audioFrame->pts;

	int gotData;
#ifdef FFMPEG_USE_PACKETS
	avcodec_send_frame(encoder->audio, encoder->audioFrame);
	gotData = avcodec_receive_packet(encoder->audio, &packet);
	gotData = (gotData == 0) && packet.size;
#else
	avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData);
#endif
	if (gotData) {
		if (encoder->absf) {
			AVPacket tempPacket;

#ifdef FFMPEG_USE_NEW_BSF
			int success = av_bsf_send_packet(encoder->absf, &packet);
			if (success >= 0) {
				success = av_bsf_receive_packet(encoder->absf, &tempPacket);
			}
#else
			int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0,
			    &tempPacket.data, &tempPacket.size,
			    packet.data, packet.size, 0);
#endif

			if (success >= 0) {
#if LIBAVUTIL_VERSION_MAJOR >= 53
				tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0);
#endif

#ifdef FFMPEG_USE_PACKET_UNREF
				av_packet_move_ref(&packet, &tempPacket);
#else
				av_free_packet(&packet);
				packet = tempPacket;
#endif

				packet.stream_index = encoder->audioStream->index;
				av_interleaved_write_frame(encoder->context, &packet);
			}
		} else {
			packet.stream_index = encoder->audioStream->index;
			av_interleaved_write_frame(encoder->context, &packet);
		}
	}
#ifdef FFMPEG_USE_PACKET_UNREF
	av_packet_unref(&packet);
#else
	av_free_packet(&packet);
#endif
}
Example #23
0
void Frame::fill(int frame_index)
{
    int x, y, i;

    /* when we pass a frame to the encoder, it may keep a reference to it
     * internally;
     * make sure we do not overwrite it here
     */
    if (av_frame_make_writable(frm) < 0) return;

    i = frame_index;

	/*if (picture->format == AV_PIX_FMT_YUV420P) {
		// Y 
		for (y = 0; y < picture->height; y++)
			for (x = 0; x < picture->width; x++)
				picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;

		// Cb and Cr 
		for (y = 0; y < picture->height / 2; y++) {
			for (x = 0; x < picture->width / 2; x++) {
				picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
				picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
			}
		}
	}
	*/
	
    uint8_t *rgba32Data = new uint8_t[4*frm->width*frm->height];
	
	
	
	
    
    SwsContext * ctx = sws_getContext(frm->width, frm->height,
                                      AV_PIX_FMT_RGBA, frm->width, frm->height,
                                      AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
    
    
    /* encode 1 second of video */
        /* prepare a dummy image */
        /* Y */
        //        for (y = 0; y < c->height; y++) {
        //            for (x = 0; x < c->width; x++) {
        //                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
        //            }
        //        }
        //
        //        /* Cb and Cr */
        //        for (y = 0; y < c->height/2; y++) {
        //            for (x = 0; x < c->width/2; x++) {
        //                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
        //                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
        //            }
        //        }
        
	uint8_t *pos = rgba32Data;
    for (y = 0; y < frm->height; y++)
	{
        for (x = 0; x < frm->width; x++)
		{
			pos[0] = i / (float)250 * 255;
            pos[1] = y / (float)(frm->height) * 255;
            pos[2] = x / (float)(frm->width) * 255;
			pos[3] = 255;
			pos += 4;
		}
	}

	uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
	//
	// NOTE: In a more general setting, the rows of your input image may
	//       be padded; that is, the bytes per row may not be 4 * width.
	//       In such cases, inLineSize should be set to that padded width.
	//
    int inLinesize[1] = { 4*frm->width }; // RGBA stride
    sws_scale(ctx, inData, inLinesize, 0, frm->height, frm->data, frm->linesize);
}
Example #24
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
{
    AVFrame *frame;
    int got_output = 0;
    int ret;

    frame = get_audio_frame(ost);
    got_output |= !!frame;

    /* feed the data to lavr */
    if (frame) {
        ret = avresample_convert(ost->avr, NULL, 0, 0,
                                 frame->extended_data, frame->linesize[0],
                                 frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error feeding audio data to the resampler\n");
            exit(1);
        }
    }

    while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
           (!frame && avresample_get_out_samples(ost->avr, 0))) {
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);

        /* the difference between the two avresample calls here is that the
         * first one just reads the already converted data that is buffered in
         * the lavr output buffer, while the second one also flushes the
         * resampler */
        if (frame) {
            ret = avresample_read(ost->avr, ost->frame->extended_data,
                                  ost->frame->nb_samples);
        } else {
            ret = avresample_convert(ost->avr, ost->frame->extended_data,
                                     ost->frame->linesize[0], ost->frame->nb_samples,
                                     NULL, 0, 0);
        }

        if (ret < 0) {
            fprintf(stderr, "Error while resampling\n");
            exit(1);
        } else if (frame && ret != ost->frame->nb_samples) {
            fprintf(stderr, "Too few samples returned from lavr\n");
            exit(1);
        }

        ost->frame->nb_samples = ret;

        ost->frame->pts        = ost->next_pts;
        ost->next_pts         += ost->frame->nb_samples;

        got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
    }

    return !got_output;
}
Example #25
0
JNIEXPORT jint JNICALL Java_bits_jav_codec_JavFrame_nMakeWritable
( JNIEnv *env, jclass clazz, jlong pointer )
{
    AVFrame *frame = *(AVFrame**)&pointer;
    return av_frame_make_writable( frame );
}