Exemple #1
0
static void pass1(vpx_image_t *raw, FILE *infile, const char *outfile_name,
                  const VpxInterface *encoder, const vpx_codec_enc_cfg_t *cfg) {
  VpxVideoInfo info = { encoder->fourcc,
                        cfg->g_w,
                        cfg->g_h,
                        { cfg->g_timebase.num, cfg->g_timebase.den } };
  VpxVideoWriter *writer = NULL;
  vpx_codec_ctx_t codec;
  int frame_count = 0;

  writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info);
  if (!writer) die("Failed to open %s for writing", outfile_name);

  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0))
    die_codec(&codec, "Failed to initialize encoder");

  // Encode frames.
  while (vpx_img_read(raw, infile)) {
    ++frame_count;
    encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer);
  }

  // Flush encoder.
  while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {
  }

  printf("\n");

  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");

  vpx_video_writer_close(writer);

  printf("Pass 1 complete. Processed %d frames.\n", frame_count);
}
Exemple #2
0
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    WMACodecContext *s = avctx->priv_data;
    int i, total_gain, ret, error;

    s->block_len_bits = s->frame_len_bits; // required by non variable block len
    s->block_len      = 1 << s->block_len_bits;

    ret = apply_window_and_mdct(avctx, frame);

    if (ret < 0)
        return ret;

    if (s->ms_stereo) {
        float a, b;
        int i;

        for (i = 0; i < s->block_len; i++) {
            a              = s->coefs[0][i] * 0.5;
            b              = s->coefs[1][i] * 0.5;
            s->coefs[0][i] = a + b;
            s->coefs[1][i] = a - b;
        }
    }

    if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE, 0)) < 0)
        return ret;

    total_gain = 128;
    for (i = 64; i; i >>= 1) {
        error = encode_frame(s, s->coefs, avpkt->data, avpkt->size,
                                 total_gain - i);
        if (error <= 0)
            total_gain -= i;
    }

    while(total_gain <= 128 && error > 0)
        error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain++);
    if (error > 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid input data or requested bitrate too low, cannot encode\n");
        avpkt->size = 0;
        return AVERROR(EINVAL);
    }
    av_assert0((put_bits_count(&s->pb) & 7) == 0);
    i= avctx->block_align - (put_bits_count(&s->pb)+7)/8;
    av_assert0(i>=0);
    while(i--)
        put_bits(&s->pb, 8, 'N');

    flush_put_bits(&s->pb);
    av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == avctx->block_align);

    if (frame->pts != AV_NOPTS_VALUE)
        avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding);

    avpkt->size     = avctx->block_align;
    *got_packet_ptr = 1;
    return 0;
}
Exemple #3
0
static int encode_superframe(AVCodecContext *avctx,
                            unsigned char *buf, int buf_size, void *data){
    WMACodecContext *s = avctx->priv_data;
    short *samples = data;
    int i, total_gain;

    s->block_len_bits= s->frame_len_bits; //required by non variable block len
    s->block_len = 1 << s->block_len_bits;

    apply_window_and_mdct(avctx, samples, avctx->frame_size);

    if (s->ms_stereo) {
        float a, b;
        int i;

        for(i = 0; i < s->block_len; i++) {
            a = s->coefs[0][i]*0.5;
            b = s->coefs[1][i]*0.5;
            s->coefs[0][i] = a + b;
            s->coefs[1][i] = a - b;
        }
    }

#if 1
    total_gain= 128;
    for(i=64; i; i>>=1){
        int error= encode_frame(s, s->coefs, buf, buf_size, total_gain-i);
        if(error<0)
            total_gain-= i;
    }
#else
    total_gain= 90;
    best= encode_frame(s, s->coefs, buf, buf_size, total_gain);
    for(i=32; i; i>>=1){
        int scoreL= encode_frame(s, s->coefs, buf, buf_size, total_gain-i);
        int scoreR= encode_frame(s, s->coefs, buf, buf_size, total_gain+i);
        av_log(NULL, AV_LOG_ERROR, "%d %d %d (%d)\n", scoreL, best, scoreR, total_gain);
        if(scoreL < FFMIN(best, scoreR)){
            best = scoreL;
            total_gain -= i;
        }else if(scoreR < best){
            best = scoreR;
            total_gain += i;
        }
    }
#endif

    encode_frame(s, s->coefs, buf, buf_size, total_gain);
    assert((put_bits_count(&s->pb) & 7) == 0);
    i= s->block_align - (put_bits_count(&s->pb)+7)/8;
    assert(i>=0);
    while(i--)
        put_bits(&s->pb, 8, 'N');

    flush_put_bits(&s->pb);
    return put_bits_ptr(&s->pb) - s->pb.buf;
}
Exemple #4
0
int MPA_encode_frame(MpegAudioContext *s, unsigned char *frame, int buf_size,
                     unsigned char *sampbuf, int step)
{
    short smr[MPA_MAX_CHANNELS][SBLIMIT];
    unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
    int padding, i;

    for(i=0; i<s->nb_channels; i++) {
        filter(s, i, ((short*)(sampbuf)) + i, step >> 1);
    }

    for(i=0; i<s->nb_channels; i++) {
        compute_scale_factors(s->scale_code[i], s->scale_factors[i],
                              s->sb_samples[i], s->sblimit);
    }
    for(i=0; i<s->nb_channels; i++) {
        psycho_acoustic_model(s, smr[i]);
    }
    compute_bit_allocation(s, smr, bit_alloc, &padding);

    init_put_bits(&s->pb, frame, MPA_MAX_CODED_FRAME_SIZE);

    encode_frame(s, bit_alloc, padding);

    return pbBufPtr(&s->pb) - s->pb.buf;
}
Exemple #5
0
static int qtrle_encode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size, void *data)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame *pict = data;
    AVFrame * const p = &s->frame;
    int chunksize;

    *p = *pict;

    if (buf_size < s->max_buf_size) {
        /* Upper bound check for compressed data */
        av_log(avctx, AV_LOG_ERROR, "buf_size %d <  %d\n", buf_size, s->max_buf_size);
        return -1;
    }

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = FF_I_TYPE;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = FF_P_TYPE;
        p->key_frame = 0;
    }

    chunksize = encode_frame(s, pict, buf);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);
    return chunksize;
}
Exemple #6
0
int read_and_encode_frame(struct camera *cam) {
	struct v4l2_buffer buf;

	printf("in read_frame\n");

	CLEAR(buf);

	buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.memory = V4L2_MEMORY_MMAP;

	//this operator below will change buf.index and (0 <= buf.index <= 3)
	if (-1 == xioctl(cam->fd, VIDIOC_DQBUF, &buf)) {
		switch (errno) {
		case EAGAIN:
			return 0;
		case EIO:
			/* Could ignore EIO, see spec. */
			/* fall through */
		default:
			errno_exit("VIDIOC_DQBUF");
		}
	}

	encode_frame(cam->buffers[buf.index].start, buf.length);

	if (-1 == xioctl(cam->fd, VIDIOC_QBUF, &buf))
		errno_exit("VIDIOC_QBUF");

	return 1;
}
Exemple #7
0
int session_send_flow(LINK_ENDPOINT_HANDLE link_endpoint, FLOW_HANDLE flow)
{
	int result;

	if ((link_endpoint == NULL) ||
		(flow == NULL))
	{
		result = __LINE__;
	}
	else
	{
		LINK_ENDPOINT_INSTANCE* link_endpoint_instance = (LINK_ENDPOINT_INSTANCE*)link_endpoint;
		SESSION_INSTANCE* session_instance = (SESSION_INSTANCE*)link_endpoint_instance->session;

		result = 0;

		if ((session_instance->session_state == SESSION_STATE_BEGIN_RCVD) ||
			((session_instance->session_state == SESSION_STATE_MAPPED)))
		{
			if (flow_set_next_incoming_id(flow, session_instance->next_incoming_id) != 0)
			{
				result = __LINE__;
			}
		}

		if (result == 0)
		{
			if ((flow_set_incoming_window(flow, session_instance->incoming_window) != 0) ||
				(flow_set_next_outgoing_id(flow, session_instance->next_outgoing_id) != 0) ||
				(flow_set_outgoing_window(flow, session_instance->outgoing_window) != 0) ||
				(flow_set_handle(flow, link_endpoint_instance->output_handle) != 0))
			{
				result = __LINE__;
			}
			else
			{
				AMQP_VALUE flow_performative_value = amqpvalue_create_flow(flow);
				if (flow_performative_value == NULL)
				{
					result = __LINE__;
				}
				else
				{
					if (encode_frame(link_endpoint, flow_performative_value, NULL, 0) != 0)
					{
						result = __LINE__;
					}
					else
					{
						result = 0;
					}

					amqpvalue_destroy(flow_performative_value);
				}
			}
		}
	}

	return result;
}
Exemple #8
0
int session_send_disposition(LINK_ENDPOINT_HANDLE link_endpoint, DISPOSITION_HANDLE disposition)
{
	int result;

	if ((link_endpoint == NULL) ||
		(disposition == NULL))
	{
		result = __LINE__;
	}
	else
	{
		LINK_ENDPOINT_INSTANCE* link_endpoint_instance = (LINK_ENDPOINT_INSTANCE*)link_endpoint;
		AMQP_VALUE disposition_performative_value = amqpvalue_create_disposition(disposition);
		if (disposition_performative_value == NULL)
		{
			result = __LINE__;
		}
		else
		{
			if (encode_frame(link_endpoint, disposition_performative_value, NULL, 0) != 0)
			{
				result = __LINE__;
			}
			else
			{
				result = 0;
			}

			amqpvalue_destroy(disposition_performative_value);
		}
	}

	return result;
}
Exemple #9
0
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame * const p = &s->frame;
    int ret;

    *p = *pict;

    if ((ret = ff_alloc_packet2(avctx, pkt, s->max_buf_size)) < 0)
        return ret;

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = AV_PICTURE_TYPE_I;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = AV_PICTURE_TYPE_P;
        p->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);

    if (p->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Exemple #10
0
static int control(struct vf_instance_s* vf, int request, void* data){

    switch(request){
        case VFCTRL_FLUSH_FRAMES:
            if(vf->priv->codec->capabilities & CODEC_CAP_DELAY)
                while(encode_frame(vf, NULL, MP_NOPTS_VALUE) > 0);
            return CONTROL_TRUE;
        default:
            return CONTROL_UNKNOWN;
    }
}
Exemple #11
0
static int control(struct vf_instance *vf, int request, void *data)
{
    h264_module_t *mod=(h264_module_t*)vf->priv;
    switch(request){
        case VFCTRL_FLUSH_FRAMES:
            while (x264_encoder_delayed_frames(mod->x264) > 0)
                encode_frame(vf, NULL);
            return CONTROL_TRUE;
        default:
            return CONTROL_UNKNOWN;
    }
}
Exemple #12
0
static int encode(struct aucodec_st *st, struct mbuf *dst, struct mbuf *src)
{
	struct {
		uint8_t buf[1024];
		uint16_t len;
	} framev[MAX_FRAMES];
	uint32_t i;
	size_t n;
	int err = 0;

	n = src->end / st->fsize;
	if (n > MAX_FRAMES) {
		n = MAX_FRAMES;
		DEBUG_WARNING("number of frames truncated to %u\n", n);
	}

	DEBUG_INFO("enc: %u bytes into %u frames\n", src->end, n);

	if (n ==0) {
		DEBUG_WARNING("enc: short frame (%u < %u)\n",
			      src->end, st->fsize);
		return EINVAL;
	}

	/* Encode all frames into temp buffer */
	for (i=0; i<n && !err; i++) {
		framev[i].len = sizeof(framev[i].buf);
		err = encode_frame(st, &framev[i].len, framev[i].buf, src);
	}

	if (!st->low_overhead) {
		/* Encode all length headers */
		for (i=0; i<n && !err; i++) {
			uint16_t len = framev[i].len;

			while (len >= 0xff) {
				err = mbuf_write_u8(dst, 0xff);
				len -= 0xff;
			}
			err = mbuf_write_u8(dst, len);
		}
	}

	/* Encode all frame buffers */
	for (i=0; i<n && !err; i++) {
		err = mbuf_write_mem(dst, framev[i].buf, framev[i].len);
	}

	return err;
}
Exemple #13
0
void CameraStream::run_once()
{
	if (switch_) {
		switch_ = false;
		init();
	}

	if (rtp_) {
		AVPicture *pic = next_pic(cap_);
		if (pic) {
			int bytes = encode_frame(pic);
			if (bytes > 0) {
				sender_params_.write(sender_params_.ctx, frame_buf_, bytes, GetTickCount()/1000.0);
			}
		}
	}
}
static block_t *EncodeFrame( encoder_t *p_enc, aout_buffer_t *p_block )
{
    block_t *p_pcm_block;
    block_t *p_chain = NULL;
    unsigned int i_samples = p_block->i_buffer >> 2 /* s16l stereo */;
    mtime_t start_date = p_block->i_pts;
    start_date -= (mtime_t)i_samples * (mtime_t)1000000 / (mtime_t)p_enc->fmt_out.audio.i_rate;

    VLC_UNUSED(p_enc);

    do {
        p_pcm_block = GetPCM( p_enc, p_block );
        if( !p_pcm_block )
            break;

        p_block = NULL; /* we don't need it anymore */

        uint32_t enc_buffer[16384]; /* storage for 65536 Bytes XXX: too much */
        struct enc_chunk_hdr *chunk = (void*) enc_buffer;
        chunk->enc_data = ENC_CHUNK_SKIP_HDR(chunk->enc_data, chunk);

        encode_frame( (char*)p_pcm_block->p_buffer, chunk );
        block_Release( p_pcm_block );

        block_t *p_mp3_block = block_New( p_enc, chunk->enc_size );
        if( !p_mp3_block )
            break;

        vlc_memcpy( p_mp3_block->p_buffer, chunk->enc_data, chunk->enc_size );

        /* date management */
        p_mp3_block->i_length = SAMP_PER_FRAME1 * 1000000 /
            p_enc->fmt_out.audio.i_rate;

        start_date += p_mp3_block->i_length;
        p_mp3_block->i_dts = p_mp3_block->i_pts = start_date;

        p_mp3_block->i_nb_samples = SAMP_PER_FRAME1;

        block_ChainAppend( &p_chain, p_mp3_block );

    } while( p_pcm_block );

    return p_chain;
}
Exemple #15
0
//test func 
int encode_and_save(Encoder *enc,unsigned char * yuv_frame,size_t yuv_length,unsigned char* h264_buf )
{	
	int length = 0 ; 
	if(yuv_frame[0] == 0 )
	{
		//return 0;
	}
	
	length = encode_frame(enc,-1,yuv_frame,h264_buf) ;
	if(length > 0)
	{
	
		if(h264_fp)
		{	
			fwrite(h264_buf, length, 1, h264_fp);
		}		
	}

	return length ;
Exemple #16
0
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
    h264_module_t *mod=(h264_module_t*)vf->priv;
    int i;

    x264_picture_init(&mod->pic);
    mod->pic.img.i_csp=param.i_csp;
    mod->pic.img.i_plane=3;
    for(i=0; i<4; i++) {
        mod->pic.img.plane[i] = mpi->planes[i];
        mod->pic.img.i_stride[i] = mpi->stride[i];
    }

    mod->pic.i_type = X264_TYPE_AUTO;
    if (is_forced_key_frame(pts))
        mod->pic.i_type = X264_TYPE_KEYFRAME;

    return encode_frame(vf, &mod->pic) >= 0;
}
Exemple #17
0
int session_send_detach(LINK_ENDPOINT_HANDLE link_endpoint, DETACH_HANDLE detach)
{
	int result;

	if ((link_endpoint == NULL) ||
		(detach == NULL))
	{
		result = __LINE__;
	}
	else
	{
		LINK_ENDPOINT_INSTANCE* link_endpoint_instance = (LINK_ENDPOINT_INSTANCE*)link_endpoint;

		if (detach_set_handle(detach, link_endpoint_instance->output_handle) != 0)
		{
			result = __LINE__;
		}
		else
		{
			AMQP_VALUE detach_performative_value = amqpvalue_create_detach(detach);
			if (detach_performative_value == NULL)
			{
				result = __LINE__;
			}
			else
			{
				if (encode_frame(link_endpoint, detach_performative_value, NULL, 0) != 0)
				{
					result = __LINE__;
				}
				else
				{
					result = 0;
				}

				amqpvalue_destroy(detach_performative_value);
			}
		}
	}

	return result;
}
Exemple #18
0
/*****************************************************************************
 Prototype    : msg_process
 Description  : process msg
 Input        : EncoderHandle hEnc  
                CommonMsg *msgBuf   
 Output       : None
 Return Value : static
 Calls        : 
 Called By    : 
 
  History        :
  1.Date         : 2012/3/8
    Author       : Sun
    Modification : Created function

*****************************************************************************/
static Int32 msg_process(EncoderHandle hEnc, CommonMsg *msgBuf)
{
	Int32 ret;

	/* recv msg */
	ret = msg_recv(hEnc->hMsg, (MsgHeader *)msgBuf, sizeof(CommonMsg), 0);
	if(ret < 0) {
		ERR("%s recv msg err: %s", hEnc->name, str_err(ret));
		return ret;
	}

	/* process msg */
	MsgHeader *msgHdr = &msgBuf->header;
	switch(msgHdr->cmd) {
	case APPCMD_NEW_DATA:
		ret = encode_frame(hEnc, (ImgMsg *)msgBuf);
		break;
	case APPCMD_SET_ENC_PARAMS:
		ret = enc_params_update(hEnc, msgBuf->buf, msgHdr->dataLen);
		break;
	case APPCMD_SET_UPLOAD_PARAMS:
		if( msgHdr->dataLen == sizeof(UploadParams))
			ret = upload_update(hEnc, (UploadParams *)msgBuf->buf);
		else
			ERR("invalid len of upload params");
		break;
	case APPCMD_UPLOAD_CTRL:
		ret = upload_control(hEnc->hUpload, msgHdr->param[0], msgBuf->buf);
		break;
	case APPCMD_EXIT:
		hEnc->exit = TRUE;
		break;
	default:
		ERR("unkown cmd: 0x%X", (unsigned int)msgHdr->cmd);
		ret = E_UNSUPT;
		break;
	}

	return ret;
}
Exemple #19
0
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    int ret;

    if ((ret = ff_alloc_packet2(avctx, pkt, s->max_buf_size, 0)) < 0)
        return ret;

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        s->key_frame = 1;
    } else {
        /* P-Frame */
        s->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_frame_unref(s->previous_frame);
    ret = av_frame_ref(s->previous_frame, pict);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "cannot add reference\n");
        return ret;
    }

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
    avctx->coded_frame->key_frame = s->key_frame;
    avctx->coded_frame->pict_type = s->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    if (s->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Exemple #20
0
static int put_image(struct vf_instance_s* vf, mp_image_t *mpi, double pts){
    AVFrame *pic= vf->priv->pic;

    pic->data[0]=mpi->planes[0];
    pic->data[1]=mpi->planes[1];
    pic->data[2]=mpi->planes[2];
    pic->linesize[0]=mpi->stride[0];
    pic->linesize[1]=mpi->stride[1];
    pic->linesize[2]=mpi->stride[2];

    if(lavc_param_interlaced_dct){
        if((mpi->fields & MP_IMGFIELD_ORDERED) && (mpi->fields & MP_IMGFIELD_INTERLACED))
            pic->top_field_first= !!(mpi->fields & MP_IMGFIELD_TOP_FIRST);
        else 
            pic->top_field_first= 1;
    
        if(lavc_param_top!=-1)
            pic->top_field_first= lavc_param_top;
    }

    return (encode_frame(vf, pic, pts) >= 0);
}
Exemple #21
0
int main(int argc, char **argv) {
  FILE *infile = NULL;
  vpx_codec_ctx_t codec;
  vpx_codec_enc_cfg_t cfg;
  int frame_count = 0;
  vpx_image_t raw;
  vpx_codec_err_t res;
  VpxVideoInfo info;
  VpxVideoWriter *writer = NULL;
  const VpxInterface *encoder = NULL;
  const int fps = 2;        // TODO(dkovalev) add command line argument
  const double bits_per_pixel_per_frame = 0.067;

  exec_name = argv[0];
  if (argc != 6)
    die("Invalid number of arguments");

  memset(&info, 0, sizeof(info));

  encoder = get_vpx_encoder_by_name(argv[1]);
  if (encoder == NULL) {
    die("Unsupported codec.");
  }
  assert(encoder != NULL);
  info.codec_fourcc = encoder->fourcc;
  info.frame_width = strtol(argv[2], NULL, 0);
  info.frame_height = strtol(argv[3], NULL, 0);
  info.time_base.numerator = 1;
  info.time_base.denominator = fps;

  if (info.frame_width <= 0 ||
      info.frame_height <= 0 ||
      (info.frame_width % 2) != 0 ||
      (info.frame_height % 2) != 0) {
    die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
  }

  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
                                             info.frame_height, 1)) {
    die("Failed to allocate image.");
  }

  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));

  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
  if (res)
    die_codec(&codec, "Failed to get default codec config.");

  cfg.g_w = info.frame_width;
  cfg.g_h = info.frame_height;
  cfg.g_timebase.num = info.time_base.numerator;
  cfg.g_timebase.den = info.time_base.denominator;
  cfg.rc_target_bitrate = (unsigned int)(bits_per_pixel_per_frame * cfg.g_w *
                                         cfg.g_h * fps / 1000);
  cfg.g_lag_in_frames = 0;

  writer = vpx_video_writer_open(argv[5], kContainerIVF, &info);
  if (!writer)
    die("Failed to open %s for writing.", argv[5]);

  if (!(infile = fopen(argv[4], "rb")))
    die("Failed to open %s for reading.", argv[4]);

  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
    die_codec(&codec, "Failed to initialize encoder");

  // Encode frames.
  while (vpx_img_read(&raw, infile)) {
    ++frame_count;

    if (frame_count == 22 && encoder->fourcc == VP8_FOURCC) {
      set_roi_map(&cfg, &codec);
    } else if (frame_count == 33) {
      set_active_map(&cfg, &codec);
    } else if (frame_count == 44) {
      unset_active_map(&cfg, &codec);
    }

    encode_frame(&codec, &raw, frame_count, writer);
  }

  // Flush encoder.
  while (encode_frame(&codec, NULL, -1, writer)) {}

  printf("\n");
  fclose(infile);
  printf("Processed %d frames.\n", frame_count);

  vpx_img_free(&raw);
  if (vpx_codec_destroy(&codec))
    die_codec(&codec, "Failed to destroy codec.");

  vpx_video_writer_close(writer);

  return EXIT_SUCCESS;
}
Exemple #22
0
int main(int argc, char **argv) {
  FILE *infile = NULL;
  // Encoder
  vpx_codec_ctx_t ecodec = {0};
  vpx_codec_enc_cfg_t cfg = {0};
  unsigned int frame_in = 0;
  vpx_image_t raw;
  vpx_codec_err_t res;
  VpxVideoInfo info = {0};
  VpxVideoWriter *writer = NULL;
  const VpxInterface *encoder = NULL;

  // Test encoder/decoder mismatch.
  int test_decode = 1;
  // Decoder
  vpx_codec_ctx_t dcodec;
  unsigned int frame_out = 0;

  // The frame number to set reference frame on
  unsigned int update_frame_num = 0;
  int mismatch_seen = 0;

  const int fps = 30;
  const int bitrate = 500;

  const char *width_arg = NULL;
  const char *height_arg = NULL;
  const char *infile_arg = NULL;
  const char *outfile_arg = NULL;
  unsigned int limit = 0;
  exec_name = argv[0];

  if (argc < 6)
    die("Invalid number of arguments");

  width_arg = argv[1];
  height_arg = argv[2];
  infile_arg = argv[3];
  outfile_arg = argv[4];

  encoder = get_vpx_encoder_by_name("vp9");
  if (!encoder)
    die("Unsupported codec.");

  update_frame_num = atoi(argv[5]);
  // In VP9, the reference buffers (cm->buffer_pool->frame_bufs[i].buf) are
  // allocated while calling vpx_codec_encode(), thus, setting reference for
  // 1st frame isn't supported.
  if (update_frame_num <= 1)
    die("Couldn't parse frame number '%s'\n", argv[5]);

  if (argc > 6) {
    limit = atoi(argv[6]);
    if (update_frame_num > limit)
      die("Update frame number couldn't larger than limit\n");
  }

  info.codec_fourcc = encoder->fourcc;
  info.frame_width = strtol(width_arg, NULL, 0);
  info.frame_height = strtol(height_arg, NULL, 0);
  info.time_base.numerator = 1;
  info.time_base.denominator = fps;

  if (info.frame_width <= 0 ||
      info.frame_height <= 0 ||
      (info.frame_width % 2) != 0 ||
      (info.frame_height % 2) != 0) {
    die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
  }

  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
                                             info.frame_height, 1)) {
    die("Failed to allocate image.");
  }

  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));

  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
  if (res)
    die_codec(&ecodec, "Failed to get default codec config.");

  cfg.g_w = info.frame_width;
  cfg.g_h = info.frame_height;
  cfg.g_timebase.num = info.time_base.numerator;
  cfg.g_timebase.den = info.time_base.denominator;
  cfg.rc_target_bitrate = bitrate;
  cfg.g_lag_in_frames = 3;

  writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
  if (!writer)
    die("Failed to open %s for writing.", outfile_arg);

  if (!(infile = fopen(infile_arg, "rb")))
    die("Failed to open %s for reading.", infile_arg);

  if (vpx_codec_enc_init(&ecodec, encoder->codec_interface(), &cfg, 0))
    die_codec(&ecodec, "Failed to initialize encoder");

  // Disable alt_ref.
  if (vpx_codec_control(&ecodec, VP8E_SET_ENABLEAUTOALTREF, 0))
    die_codec(&ecodec, "Failed to set enable auto alt ref");

  if (test_decode) {
      const VpxInterface *decoder = get_vpx_decoder_by_name("vp9");
      if (vpx_codec_dec_init(&dcodec, decoder->codec_interface(), NULL, 0))
        die_codec(&dcodec, "Failed to initialize decoder.");
  }

  // Encode frames.
  while (vpx_img_read(&raw, infile)) {
    if (limit && frame_in >= limit)
      break;
    if (update_frame_num > 1 && frame_out + 1 == update_frame_num) {
      vpx_ref_frame_t ref;
      ref.frame_type = VP8_LAST_FRAME;
      ref.img = raw;
      // Set reference frame in encoder.
      if (vpx_codec_control(&ecodec, VP8_SET_REFERENCE, &ref))
        die_codec(&ecodec, "Failed to set reference frame");
      printf(" <SET_REF>");

      // If set_reference in decoder is commented out, the enc/dec mismatch
      // would be seen.
      if (test_decode) {
        if (vpx_codec_control(&dcodec, VP8_SET_REFERENCE, &ref))
          die_codec(&dcodec, "Failed to set reference frame");
      }
    }

    encode_frame(&ecodec, &cfg, &raw, frame_in, writer, test_decode,
                 &dcodec, &frame_out, &mismatch_seen);
    frame_in++;
    if (mismatch_seen)
      break;
  }

  // Flush encoder.
  if (!mismatch_seen)
    while (encode_frame(&ecodec, &cfg, NULL, frame_in, writer, test_decode,
                        &dcodec, &frame_out, &mismatch_seen)) {}

  printf("\n");
  fclose(infile);
  printf("Processed %d frames.\n", frame_out);

  if (test_decode) {
    if (!mismatch_seen)
      printf("Encoder/decoder results are matching.\n");
    else
      printf("Encoder/decoder results are NOT matching.\n");
  }

  if (test_decode)
    if (vpx_codec_destroy(&dcodec))
      die_codec(&dcodec, "Failed to destroy decoder");

  vpx_img_free(&raw);
  if (vpx_codec_destroy(&ecodec))
    die_codec(&ecodec, "Failed to destroy encoder.");

  vpx_video_writer_close(writer);

  return EXIT_SUCCESS;
}
int encode_video_sample_inc(video_encoder_t *videoencoder,const char *input_video_sample,int input_video_sample_size,
	char *output_video_sample,int *output_video_sample_size,int x,int y,int w,int h)
{
	if(videoencoder == NULL || input_video_sample == NULL || output_video_sample == NULL || output_video_sample_size == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error paraments..\n");        
		return -1;    
	}
	videoencoder_instanse *p_instanse = (videoencoder_instanse *)videoencoder;

	int width = p_instanse->width;
	int height = p_instanse->height;

	//printf("x=%d,y=%d,w=%d,h=%d\n",x,y,w,h);

	unsigned char* rgb_buffer = p_instanse->rgb_buffer;
	if(rgb_buffer == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error malloc ..\n");        
		return -2;    
	}

	//copy outbf->rgb_buffer
	
	for(int i = 0;i < h;i++)
		memcpy(rgb_buffer + width * (i+ y) * 4 + x * 4,input_video_sample + w *i * 4,w * 4);

	
	unsigned char* outbf = p_instanse->outbf;
	if(outbf == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error malloc ..\n");        
		return -2;    
	}
	
	int pDstStep[3];
	unsigned char *pDst[3];
	pDstStep[0]=width;
	pDstStep[1]=width/2;
	pDstStep[2]=width/2;

	IppiSize roiSize;
	roiSize.width=width;
	roiSize.height=height;

	pDst[0]=outbf;
	pDst[1]=outbf+width*height;
	pDst[2]=outbf+width*height+width*height/4;
	ippiBGRToYCbCr420_709CSC_8u_AC4P3R((unsigned char*)rgb_buffer,width*4,pDst,pDstStep,roiSize);

	int enc_size,offset;
	unsigned char *enc_buf=p_instanse->enc_buf;
	if(enc_buf == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error malloc ..\n");        
		return -2;    
	}
	
	struct coded_buff cdbf;
	cdbf.buff=enc_buf;
	
	encode_frame(outbf,&cdbf);

	int padding = 2048 - cdbf.length;
	if(padding < 0)
		padding = 0;

	p_instanse->real_count_fps++;

	if(*output_video_sample_size < cdbf.length + padding)
	{		 
		fprintf(stderr ,"libvideoencoder: Error output_video_sample_size is too small..\n");		
		return -3;	  
	} 
	
	memcpy(output_video_sample,cdbf.buff,cdbf.length);
	memset(output_video_sample + cdbf.length,0,padding);
	*output_video_sample_size = cdbf.length + padding;

	return 0;
}
int encode_video_sample(video_encoder_t *videoencoder,const char *input_video_sample,int input_video_sample_size,
	char *output_video_sample,int *output_video_sample_size)
{
	if(videoencoder == NULL || input_video_sample == NULL || output_video_sample == NULL || output_video_sample_size == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error paraments..\n");        
		return -1;    
	}
	videoencoder_instanse *p_instanse = (videoencoder_instanse *)videoencoder;

	int width = p_instanse->width;
	int height = p_instanse->height;
	
	unsigned char* outbf = p_instanse->outbf;
	if(outbf == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error malloc ..\n");        
		return -2;    
	}
	
	int pDstStep[3];
	unsigned char *pDst[3];
	pDstStep[0]=width;
	pDstStep[1]=width/2;
	pDstStep[2]=width/2;

	IppiSize roiSize;
	roiSize.width=width;
	roiSize.height=height;

	pDst[0]=outbf;
	pDst[1]=outbf+width*height;
	pDst[2]=outbf+width*height+width*height/4;
	ippiBGRToYCbCr420_709CSC_8u_AC4P3R((unsigned char*)input_video_sample,width*4,pDst,pDstStep,roiSize);

	int enc_size,offset;
	unsigned char *enc_buf=p_instanse->enc_buf;
	if(enc_buf == NULL)
	{        
		fprintf(stderr ,"libvideoencoder: Error malloc ..\n");        
		return -2;    
	}
	
	struct coded_buff cdbf;
	cdbf.buff=enc_buf;
	
	encode_frame(outbf,&cdbf);

	int iNeedAddSequenceHeadLen = 0;
	
	if(!bISFirstFrame)
	{
		int iFindPos = Find_MPEG2_GropHead(cdbf.buff,cdbf.length);
		if(iFindPos >= 0)
		{
			iNeedAddSequenceHeadLen = iMpeg2SequenceHeadLen;
		}
	}
	else// if(bISFirstFrame)
	{
		//get mpeg2 sequence 
		//find grop head
		int iFindPos = Find_MPEG2_GropHead(cdbf.buff,cdbf.length);
		if(iFindPos >= 0)
		{
			printf("----find mpeg2 grophead %d \n",iFindPos);
			//copy group head to buff
			memset(Mpeg2SequenceHead,0,sizeof(Mpeg2SequenceHead));
			memcpy(Mpeg2SequenceHead,cdbf.buff,iFindPos);
			iMpeg2SequenceHeadLen = iFindPos;
			bISFirstFrame = false;
/*			
			if(NULL == fp2)
				fp2 = fopen("mpeg2seqhead","wb+");
				fwrite(Mpeg2SequenceHead,1,iMpeg2SequenceHeadLen,fp2);
				fflush(fp2);
*/
		}
		
	}

	
	int padding = 512 - cdbf.length;

	//padding = 0;
	//if(iNeedAddSequenceHeadLen > 0)
	{
		padding -= iNeedAddSequenceHeadLen;
	}

	
	if(padding < 0)
		padding = 0;

	p_instanse->real_count_fps++;

	if(*output_video_sample_size < cdbf.length + padding)
	{		 
		fprintf(stderr ,"libvideoencoder: Error output_video_sample_size is too small..\n");		
		return -3;	  
	} 

	memcpy(output_video_sample,Mpeg2SequenceHead,iNeedAddSequenceHeadLen);
	memcpy(output_video_sample+iNeedAddSequenceHeadLen,cdbf.buff,cdbf.length);
	memset(output_video_sample+iNeedAddSequenceHeadLen + cdbf.length,0,padding);
	*output_video_sample_size = cdbf.length + padding +iNeedAddSequenceHeadLen;
/*
	if(NULL == fp1)
		fp1 = fopen("hasscode.mp2","wb+");
	if(iloop++ > 200)
		fwrite(output_video_sample,1,*output_video_sample_size,fp1);
*/	
	return 0;
}
Exemple #25
0
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    WMACodecContext *s = avctx->priv_data;
    int i, total_gain, ret;

    s->block_len_bits= s->frame_len_bits; //required by non variable block len
    s->block_len = 1 << s->block_len_bits;

    apply_window_and_mdct(avctx, frame);

    if (s->ms_stereo) {
        float a, b;
        int i;

        for(i = 0; i < s->block_len; i++) {
            a = s->coefs[0][i]*0.5;
            b = s->coefs[1][i]*0.5;
            s->coefs[0][i] = a + b;
            s->coefs[1][i] = a - b;
        }
    }

    if ((ret = ff_alloc_packet(avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

#if 1
    total_gain= 128;
    for(i=64; i; i>>=1){
        int error = encode_frame(s, s->coefs, avpkt->data, avpkt->size,
                                 total_gain - i);
        if(error<0)
            total_gain-= i;
    }
#else
    total_gain= 90;
    best = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain);
    for(i=32; i; i>>=1){
        int scoreL = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i);
        int scoreR = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain + i);
        av_log(NULL, AV_LOG_ERROR, "%d %d %d (%d)\n", scoreL, best, scoreR, total_gain);
        if(scoreL < FFMIN(best, scoreR)){
            best = scoreL;
            total_gain -= i;
        }else if(scoreR < best){
            best = scoreR;
            total_gain += i;
        }
    }
#endif

    if ((i = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain)) >= 0) {
        av_log(avctx, AV_LOG_ERROR, "required frame size too large. please "
               "use a higher bit rate.\n");
        return AVERROR(EINVAL);
    }
    assert((put_bits_count(&s->pb) & 7) == 0);
    while (i++)
        put_bits(&s->pb, 8, 'N');

    flush_put_bits(&s->pb);

    if (frame->pts != AV_NOPTS_VALUE)
        avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);

    avpkt->size = s->block_align;
    *got_packet_ptr = 1;
    return 0;
}
Exemple #26
0
JNIEXPORT jobject JNICALL Java_sender_FFmpeg_encodeFrame (JNIEnv *env, jclass thisclass){
	AVPacket dec_packet;
	av_init_packet(&packet);
	av_init_packet(&enc_packet);
	packet.size = 0;
	enc_packet.size = 0;
	packet.data =  NULL;
	enc_packet.data = NULL;
	int got_frame;
	do {
		fflush(stdout);
		while(av_read_frame(ifmt_ctx, &packet)>= 0) {
			ret = avcodec_decode_video2(codec_ctx, dec_frame,
					&got_frame, &packet);
			if(ret <= 0) {
				printf("Ret: %d\n", ret);
				fflush(stdout);
			}
			if(got_frame) {
				counter++;
				encode_frame(&enc_packet);
				av_packet_from_data(&dec_packet, enc_packet.data, enc_packet.size);
				break;
			}
		}
		//discard some frames according to framerate
	} while(((counter % 3 == 0) && (current_mode == 2)) || (0 && (current_mode == 3)) || ((counter % 3 != 0) && (current_mode == 1)));
	fflush(stdout);
	printf("Size: %d\n", enc_packet.size);
	if(enc_packet.size == 0) {
		fwrite(endcode, 1, sizeof(endcode), f);
		printf("Closing file.");
		fclose(f);
	} else {
		fwrite(save_packet.data, 1, save_packet.size, f);
	}

	//--------------------------------------------------------------------------------------------------
	//Create return object

	jclass cls = (*env)->FindClass(env, "sender/AVPacket");
	if(!cls) {
		printf("Class 'AVPacket' not found!\n");
		fflush(stdout);
	}

	jmethodID constructor = (*env)->GetMethodID(env, cls, "<init>", "([BIJI)V");
	if(!constructor) {
		printf("Constructor could not be found!\n");
		fflush(stdout);
	}

	jbyteArray data;
	jint size;
	jint mode;

	size = enc_packet.size;
	mode = current_mode;

	data = (*env)->NewByteArray(env, size);
	if (data == NULL) {
		return NULL; /* out of memory error thrown */
	}

	// move from the temp structure to the java structure
	(*env)->SetByteArrayRegion(env, data, 0, size, enc_packet.data);

	jobject return_packet = (*env)->NewObject(env, cls, constructor, data, size, (jlong) sequence, mode);
	sequence++;

	if(!return_packet) {
		printf("Couldn't create jobject!");
		fflush(stdout);
	}
	//	} else {
	//		printf("jobject created.");
	//		fflush(stdout);
	//	}

	//av_free_packet(&enc_packet);
	//av_free_packet(&dec_packet);
	return return_packet;

}
static void pass1(aom_image_t *raw, FILE *infile, const char *outfile_name,
                  const AvxInterface *encoder, aom_codec_enc_cfg_t *cfg,
                  int lf_width, int lf_height, int lf_blocksize, int flags,
                  aom_image_t *raw_shift) {
  AvxVideoInfo info = { encoder->fourcc,
                        cfg->g_w,
                        cfg->g_h,
                        { cfg->g_timebase.num, cfg->g_timebase.den },
                        0 };
  AvxVideoWriter *writer = NULL;
  aom_codec_ctx_t codec;
  int frame_count = 0;
  int image_size_bytes = aom_img_size_bytes(raw);
  int bu, bv;
  int u_blocks, v_blocks;
  aom_image_t *frame_to_encode;
  aom_image_t reference_images[MAX_EXTERNAL_REFERENCES];
  int reference_image_num = 0;
  int i;

  writer = aom_video_writer_open(outfile_name, kContainerIVF, &info);
  if (!writer) die("Failed to open %s for writing", outfile_name);

  if (aom_codec_enc_init(&codec, encoder->codec_interface(), cfg, flags))
    die_codec(&codec, "Failed to initialize encoder");
  if (aom_codec_control(&codec, AOME_SET_ENABLEAUTOALTREF, 0))
    die_codec(&codec, "Failed to turn off auto altref");
  if (aom_codec_control(&codec, AV1E_SET_FRAME_PARALLEL_DECODING, 0))
    die_codec(&codec, "Failed to set frame parallel decoding");
  // Note: The superblock is a sequence parameter and has to be the same for 1
  // sequence. In lightfield application, must choose the superblock size(either
  // 64x64 or 128x128) before the encoding starts. Otherwise, the default is
  // AOM_SUPERBLOCK_SIZE_DYNAMIC, and the superblock size will be set to 64x64
  // internally.
  if (aom_codec_control(&codec, AV1E_SET_SUPERBLOCK_SIZE,
                        AOM_SUPERBLOCK_SIZE_64X64))
    die_codec(&codec, "Failed to set SB size");

  u_blocks = (lf_width + lf_blocksize - 1) / lf_blocksize;
  v_blocks = (lf_height + lf_blocksize - 1) / lf_blocksize;

  reference_image_num = u_blocks * v_blocks;
  aom_img_fmt_t ref_fmt = AOM_IMG_FMT_I420;
  if (!CONFIG_LOWBITDEPTH) ref_fmt |= AOM_IMG_FMT_HIGHBITDEPTH;
  // Allocate memory with the border so that it can be used as a reference.
  for (i = 0; i < reference_image_num; i++) {
    if (!aom_img_alloc_with_border(&reference_images[i], ref_fmt, cfg->g_w,
                                   cfg->g_h, 32, 8, AOM_BORDER_IN_PIXELS)) {
      die("Failed to allocate image.");
    }
  }

  printf("\n Second pass: "******"Encoding Reference Images\n");
  for (bv = 0; bv < v_blocks; ++bv) {
    for (bu = 0; bu < u_blocks; ++bu) {
      const int block_u_min = bu * lf_blocksize;
      const int block_v_min = bv * lf_blocksize;
      int block_u_end = (bu + 1) * lf_blocksize;
      int block_v_end = (bv + 1) * lf_blocksize;
      int u_block_size, v_block_size;
      int block_ref_u, block_ref_v;

      block_u_end = block_u_end < lf_width ? block_u_end : lf_width;
      block_v_end = block_v_end < lf_height ? block_v_end : lf_height;
      u_block_size = block_u_end - block_u_min;
      v_block_size = block_v_end - block_v_min;
      block_ref_u = block_u_min + u_block_size / 2;
      block_ref_v = block_v_min + v_block_size / 2;

      printf("A%d, ", (block_ref_u + block_ref_v * lf_width));
      fseek(infile, (block_ref_u + block_ref_v * lf_width) * image_size_bytes,
            SEEK_SET);
      aom_img_read(raw, infile);

      get_raw_image(&frame_to_encode, raw, raw_shift);

      // Reference frames may be encoded without tiles.
      ++frame_count;
      printf("Encoding reference image %d of %d\n", bv * u_blocks + bu,
             u_blocks * v_blocks);
      encode_frame(&codec, frame_to_encode, frame_count, 1,
                   AOM_EFLAG_NO_REF_LAST2 | AOM_EFLAG_NO_REF_LAST3 |
                       AOM_EFLAG_NO_REF_GF | AOM_EFLAG_NO_REF_ARF |
                       AOM_EFLAG_NO_REF_BWD | AOM_EFLAG_NO_REF_ARF2 |
                       AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF |
                       AOM_EFLAG_NO_UPD_ARF | AOM_EFLAG_NO_UPD_ENTROPY,
                   writer);

      if (aom_codec_control(&codec, AV1_COPY_NEW_FRAME_IMAGE,
                            &reference_images[frame_count - 1]))
        die_codec(&codec, "Failed to copy decoder reference frame");
    }
  }

  cfg->large_scale_tile = 1;
  // Fixed q encoding for camera frames.
  cfg->rc_end_usage = AOM_Q;
  if (aom_codec_enc_config_set(&codec, cfg))
    die_codec(&codec, "Failed to configure encoder");

  // The fixed q value used in encoding.
  if (aom_codec_control(&codec, AOME_SET_CQ_LEVEL, 36))
    die_codec(&codec, "Failed to set cq level");
  if (aom_codec_control(&codec, AV1E_SET_FRAME_PARALLEL_DECODING, 1))
    die_codec(&codec, "Failed to set frame parallel decoding");
  if (aom_codec_control(&codec, AV1E_SET_SINGLE_TILE_DECODING, 1))
    die_codec(&codec, "Failed to turn on single tile decoding");
  // Set tile_columns and tile_rows to MAX values, which guarantees the tile
  // size of 64 x 64 pixels(i.e. 1 SB) for <= 4k resolution.
  if (aom_codec_control(&codec, AV1E_SET_TILE_COLUMNS, 6))
    die_codec(&codec, "Failed to set tile width");
  if (aom_codec_control(&codec, AV1E_SET_TILE_ROWS, 6))
    die_codec(&codec, "Failed to set tile height");

  for (bv = 0; bv < v_blocks; ++bv) {
    for (bu = 0; bu < u_blocks; ++bu) {
      const int block_u_min = bu * lf_blocksize;
      const int block_v_min = bv * lf_blocksize;
      int block_u_end = (bu + 1) * lf_blocksize;
      int block_v_end = (bv + 1) * lf_blocksize;
      int u, v;
      block_u_end = block_u_end < lf_width ? block_u_end : lf_width;
      block_v_end = block_v_end < lf_height ? block_v_end : lf_height;
      for (v = block_v_min; v < block_v_end; ++v) {
        for (u = block_u_min; u < block_u_end; ++u) {
          av1_ref_frame_t ref;
          ref.idx = 0;
          ref.use_external_ref = 1;
          ref.img = reference_images[bv * u_blocks + bu];
          if (aom_codec_control(&codec, AV1_SET_REFERENCE, &ref))
            die_codec(&codec, "Failed to set reference frame");

          printf("C%d, ", (u + v * lf_width));
          fseek(infile, (u + v * lf_width) * image_size_bytes, SEEK_SET);
          aom_img_read(raw, infile);
          get_raw_image(&frame_to_encode, raw, raw_shift);

          ++frame_count;
          printf("Encoding image %d of %d\n",
                 frame_count - (u_blocks * v_blocks), lf_width * lf_height);
          encode_frame(&codec, frame_to_encode, frame_count, 1,
                       AOM_EFLAG_NO_REF_LAST2 | AOM_EFLAG_NO_REF_LAST3 |
                           AOM_EFLAG_NO_REF_GF | AOM_EFLAG_NO_REF_ARF |
                           AOM_EFLAG_NO_REF_BWD | AOM_EFLAG_NO_REF_ARF2 |
                           AOM_EFLAG_NO_UPD_LAST | AOM_EFLAG_NO_UPD_GF |
                           AOM_EFLAG_NO_UPD_ARF | AOM_EFLAG_NO_UPD_ENTROPY,
                       writer);
        }
      }
    }
  }

  // Flush encoder.
  // No ARF, this should not be needed.
  while (encode_frame(&codec, NULL, -1, 1, 0, writer)) {
  }

  for (i = 0; i < reference_image_num; i++) aom_img_free(&reference_images[i]);

  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");
  aom_video_writer_close(writer);

  printf("\nSecond pass complete. Processed %d frames.\n", frame_count);
}
// TODO(tomfinegan): Improve command line parsing and add args for bitrate/fps.
int main(int argc, char **argv) {
  FILE *infile = NULL;
  aom_codec_ctx_t codec;
  aom_codec_enc_cfg_t cfg;
  int frame_count = 0;
  aom_image_t raw;
  aom_codec_err_t res;
  AvxVideoInfo info;
  AvxVideoWriter *writer = NULL;
  const AvxInterface *encoder = NULL;
  const int fps = 30;
  const int bitrate = 200;
  int keyframe_interval = 0;
  int max_frames = 0;
  int frames_encoded = 0;
  const char *codec_arg = NULL;
  const char *width_arg = NULL;
  const char *height_arg = NULL;
  const char *infile_arg = NULL;
  const char *outfile_arg = NULL;
  const char *keyframe_interval_arg = NULL;

  exec_name = argv[0];

  // Clear explicitly, as simply assigning "{ 0 }" generates
  // "missing-field-initializers" warning in some compilers.
  memset(&info, 0, sizeof(info));

  if (argc != 9) die("Invalid number of arguments");

  codec_arg = argv[1];
  width_arg = argv[2];
  height_arg = argv[3];
  infile_arg = argv[4];
  outfile_arg = argv[5];
  keyframe_interval_arg = argv[6];
  max_frames = (int)strtol(argv[8], NULL, 0);

  encoder = get_aom_encoder_by_name(codec_arg);
  if (!encoder) die("Unsupported codec.");

  info.codec_fourcc = encoder->fourcc;
  info.frame_width = (int)strtol(width_arg, NULL, 0);
  info.frame_height = (int)strtol(height_arg, NULL, 0);
  info.time_base.numerator = 1;
  info.time_base.denominator = fps;

  if (info.frame_width <= 0 || info.frame_height <= 0 ||
      (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) {
    die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
  }

  if (!aom_img_alloc(&raw, AOM_IMG_FMT_I420, info.frame_width,
                     info.frame_height, 1)) {
    die("Failed to allocate image.");
  }

  keyframe_interval = (int)strtol(keyframe_interval_arg, NULL, 0);
  if (keyframe_interval < 0) die("Invalid keyframe interval value.");

  printf("Using %s\n", aom_codec_iface_name(encoder->codec_interface()));

  res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
  if (res) die_codec(&codec, "Failed to get default codec config.");

  cfg.g_w = info.frame_width;
  cfg.g_h = info.frame_height;
  cfg.g_timebase.num = info.time_base.numerator;
  cfg.g_timebase.den = info.time_base.denominator;
  cfg.rc_target_bitrate = bitrate;
  cfg.g_error_resilient = (aom_codec_er_flags_t)strtoul(argv[7], NULL, 0);

  writer = aom_video_writer_open(outfile_arg, kContainerIVF, &info);
  if (!writer) die("Failed to open %s for writing.", outfile_arg);

  if (!(infile = fopen(infile_arg, "rb")))
    die("Failed to open %s for reading.", infile_arg);

  if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
    die_codec(&codec, "Failed to initialize encoder");

  // Encode frames.
  while (aom_img_read(&raw, infile)) {
    int flags = 0;
    if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
      flags |= AOM_EFLAG_FORCE_KF;
    encode_frame(&codec, &raw, frame_count++, flags, writer);
    frames_encoded++;
    if (max_frames > 0 && frames_encoded >= max_frames) break;
  }

  // Flush encoder.
  while (encode_frame(&codec, NULL, -1, 0, writer)) continue;

  printf("\n");
  fclose(infile);
  printf("Processed %d frames.\n", frame_count);

  aom_img_free(&raw);
  if (aom_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");

  aom_video_writer_close(writer);

  return EXIT_SUCCESS;
}
Exemple #29
0
void VideoEncoder::thread_loop() {
    int encnum;
    int res;
    auto screen = this->screen.lock();

    /* Convert picture from rgb to yuv420 planar

       two steps here:

       1) rgb24a or bgr24a to yuv422 interlaced (yuyv)
       2) yuv422 to yuv420 planar (yuv420p)

       to fix endiannes issues try adding #define ARCH_PPC
       and using
       mlt_convert_bgr24a_to_yuv422
       or
       mlt_convert_argb_to_yuv422
       (see mlt_frame.h in mltframework.org sourcecode)
       i can't tell as i don't have PPC, waiting for u mr.goil :)
     */

    uint8_t *surface = (uint8_t *)screen->get_surface();
    time_t *tm = (time_t *)malloc(sizeof(time_t));
    time(tm);
//   std::cerr << "-- ENC:" << asctime(localtime(tm));
    if(!surface) {
        fps->delay();
        /* std::cout << "fps->start_tv.tv_sec :" << fps->start_tv.tv_sec << \
           " tv_usec :" << fps->start_tv.tv_usec << "   \r" << std::endl; */
        return;
    }
    fps->delay();
    //uncomment this to see how long it takes between two frames in us.
    /*    timeval start_t;
        gettimeofday(&start_t,NULL);
        timeval did;
        timersub(&start_t, &m_lastTime, &did);
        m_lastTime.tv_sec = start_t.tv_sec;
        m_lastTime.tv_usec = start_t.tv_usec;
        std::cerr << "diff time :" << did.tv_usec << std::endl;*/
    screen->lock();
    auto & geo = screen->getGeometry();
    switch(screen->get_pixel_format()) {
    case ViewPort::RGBA32:
        mlt_convert_rgb24a_to_yuv422(surface,
                                     geo.getSize().x(), geo.getSize().y(),
                                     geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    case ViewPort::BGRA32:
        mlt_convert_bgr24a_to_yuv422(surface,
                                     geo.getSize().x(), geo.getSize().y(),
                                     geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    case ViewPort::ARGB32:
        mlt_convert_argb_to_yuv422(surface,
                                   geo.getSize().x(), geo.getSize().y(),
                                   geo.getSize().x() << 2, (uint8_t*)enc_yuyv, NULL);
        break;

    default:
        error("Video Encoder %s doesn't supports Screen %s pixel format",
              name.c_str(), screen->getName().c_str());
    }

    screen->unlock();

    ccvt_yuyv_420p(geo.getSize().x(), geo.getSize().y(), enc_yuyv, enc_y, enc_u, enc_v);

    ////// got the YUV, do the encoding
    res = encode_frame();
    if(res != 0) error("Can't encode frame");

    /// proceed writing and streaming encoded data in encpipe

    encnum = 0;
    if(write_to_disk || write_to_stream) {
        if((encnum = ringbuffer_read_space(ringbuffer)) > 0) {
            encbuf = (char *)realloc(encbuf, encnum);
//      encbuf = (char *)realloc(encbuf, (((audio_kbps + video_kbps)*1024)/24)); //doesn't change anything for shifting problem
            encnum = ringbuffer_read(ringbuffer, encbuf, encnum);
//      encnum = ringbuffer_read(ringbuffer, encbuf,
//                             ((audio_kbps + video_kbps)*1024)/24);
        }
    }

    if(encnum > 0) {
        //      func("%s has encoded %i bytes", name, encnum);
        if(write_to_disk && filedump_fd)
            fwrite(encbuf, 1, encnum, filedump_fd);

        if(write_to_stream && ice) {
            /*	int	wait_ms;
                wait_ms = shout_delay(ice);
                std::cerr << "---- shout delay :" << wait_ms << std::endl;*/
            shout_sync(ice);
            if(shout_send(ice, (const unsigned char*)encbuf, encnum)
               != SHOUTERR_SUCCESS) {
                error("shout_send: %s", shout_get_error(ice));
            } // else
              //printf("%d %d\n", encnum, (int)shout_queuelen(ice));
        }
        gettimeofday(&m_ActualTime, NULL);
        if(m_ActualTime.tv_sec == m_OldTime.tv_sec)
            m_ElapsedTime += ((double)(m_ActualTime.tv_usec - m_OldTime.tv_usec)) / 1000000.0;
        else
            m_ElapsedTime += ((double)(m_ActualTime.tv_sec - m_OldTime.tv_sec)) + \
                             (((double)(m_ActualTime.tv_usec - m_OldTime.tv_usec)) / 1000000.0);
        m_OldTime.tv_sec = m_ActualTime.tv_sec;
        m_OldTime.tv_usec = m_ActualTime.tv_usec;
        m_Streamed += encnum;
        if(m_ElapsedTime >= 3.0) {      //calculate stream rate every minimum 3 seconds
            m_StreamRate = ((double)m_Streamed / m_ElapsedTime) / 1000.0;
            m_ElapsedTime = 0;
            m_Streamed = 0;
        }
    }
}
Exemple #30
-2
int main(int argc, char **argv) {
  FILE *infile = NULL;
  vpx_codec_ctx_t codec;
  vpx_codec_enc_cfg_t cfg;
  int frame_count = 0;
  vpx_image_t raw;
  vpx_codec_err_t res;
  VpxVideoInfo info;
  VpxVideoWriter *writer = NULL;
  const VpxInterface *encoder = NULL;
  int update_frame_num = 0;
  const int fps = 30;       // TODO(dkovalev) add command line argument
  const int bitrate = 200;  // kbit/s TODO(dkovalev) add command line argument

  vp8_zero(codec);
  vp8_zero(cfg);
  vp8_zero(info);

  exec_name = argv[0];

  if (argc != 6) die("Invalid number of arguments");

  // TODO(dkovalev): add vp9 support and rename the file accordingly
  encoder = get_vpx_encoder_by_name("vp8");
  if (!encoder) die("Unsupported codec.");

  update_frame_num = atoi(argv[5]);
  if (!update_frame_num) die("Couldn't parse frame number '%s'\n", argv[5]);

  info.codec_fourcc = encoder->fourcc;
  info.frame_width = (int)strtol(argv[1], NULL, 0);
  info.frame_height = (int)strtol(argv[2], NULL, 0);
  info.time_base.numerator = 1;
  info.time_base.denominator = fps;

  if (info.frame_width <= 0 || info.frame_height <= 0 ||
      (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) {
    die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
  }

  if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
                     info.frame_height, 1)) {
    die("Failed to allocate image.");
  }

  printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));

  res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
  if (res) die_codec(&codec, "Failed to get default codec config.");

  cfg.g_w = info.frame_width;
  cfg.g_h = info.frame_height;
  cfg.g_timebase.num = info.time_base.numerator;
  cfg.g_timebase.den = info.time_base.denominator;
  cfg.rc_target_bitrate = bitrate;

  writer = vpx_video_writer_open(argv[4], kContainerIVF, &info);
  if (!writer) die("Failed to open %s for writing.", argv[4]);

  if (!(infile = fopen(argv[3], "rb")))
    die("Failed to open %s for reading.", argv[3]);

  if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
    die_codec(&codec, "Failed to initialize encoder");

  // Encode frames.
  while (vpx_img_read(&raw, infile)) {
    if (frame_count + 1 == update_frame_num) {
      vpx_ref_frame_t ref;
      ref.frame_type = VP8_LAST_FRAME;
      ref.img = raw;
      if (vpx_codec_control(&codec, VP8_SET_REFERENCE, &ref))
        die_codec(&codec, "Failed to set reference frame");
    }

    encode_frame(&codec, &raw, frame_count++, writer);
  }

  // Flush encoder.
  while (encode_frame(&codec, NULL, -1, writer)) {
  }

  printf("\n");
  fclose(infile);
  printf("Processed %d frames.\n", frame_count);

  vpx_img_free(&raw);
  if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec.");

  vpx_video_writer_close(writer);

  return EXIT_SUCCESS;
}