Beispiel #1
0
static bool ffemu_push_video_thread(ffemu_t *handle, const struct ffemu_video_data *data)
{
   if (!data->is_dupe)
   {
      handle->video.sws_ctx = sws_getCachedContext(handle->video.sws_ctx, data->width, data->height, handle->video.fmt,
            handle->params.out_width, handle->params.out_height, handle->video.pix_fmt, SWS_POINT,
            NULL, NULL, NULL);

      int linesize = data->pitch;

      sws_scale(handle->video.sws_ctx, (const uint8_t* const*)&data->data, &linesize, 0,
            data->height, handle->video.conv_frame->data, handle->video.conv_frame->linesize);
   }

   handle->video.conv_frame->pts = handle->video.frame_cnt;

   AVPacket pkt;
   if (!encode_video(handle, &pkt, handle->video.conv_frame))
      return false;

   if (pkt.size)
   {
      if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
         return false;
   }

   handle->video.frame_cnt++;
   return true;
}
Beispiel #2
0
static void ffemu_flush_video(ffemu_t *handle)
{
   for (;;)
   {
      AVPacket pkt;
      if (!encode_video(handle, &pkt, NULL) || !pkt.size ||
            av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
         break;
   }
}
Beispiel #3
0
SCM ffmpeg_encode_video(SCM scm_self)
{
  // TODO: AVFMT_RAWPICTURE
  struct ffmpeg_t *self = get_self(scm_self);
  if (is_input_context(self))
    scm_misc_error("ffmpeg-encode-video", "Attempt to write to FFmpeg input video", SCM_EOL);

  // Set frame timestamp
  self->video_target_frame->pts = self->output_video_pts++;

  encode_video(self, self->video_target_frame);

  return SCM_UNSPECIFIED;
}
Beispiel #4
0
static bool ffmpeg_push_video_thread(ffmpeg_t *handle, const struct ffemu_video_data *data)
{
   if (!data->is_dupe)
      ffmpeg_scale_input(handle, data);

   handle->video.conv_frame->pts = handle->video.frame_cnt;

   AVPacket pkt;
   if (!encode_video(handle, &pkt, handle->video.conv_frame))
      return false;

   if (pkt.size)
   {
      if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
         return false;
   }

   handle->video.frame_cnt++;
   return true;
}
Beispiel #5
0
static bool ffemu_push_video_thread(ffemu_t *handle, const struct ffemu_video_data *data)
{
   if (!data->is_dupe)
   {
      if (data->width != handle->video.scaler.in_width || data->height != handle->video.scaler.in_height)
      {
         handle->video.scaler.in_width  = data->width;
         handle->video.scaler.in_height = data->height;
         handle->video.scaler.in_stride = data->pitch;

         // Attempt to preserve more information if we scale down.
         bool shrunk = handle->params.out_width < data->width || handle->params.out_height < data->height;
         handle->video.scaler.scaler_type = shrunk ? SCALER_TYPE_BILINEAR : SCALER_TYPE_POINT;

         handle->video.scaler.out_width  = handle->params.out_width;
         handle->video.scaler.out_height = handle->params.out_height;
         handle->video.scaler.out_stride = handle->video.conv_frame->linesize[0];

         scaler_ctx_gen_filter(&handle->video.scaler);
      }

      scaler_ctx_scale(&handle->video.scaler, handle->video.conv_frame->data[0], data->data);
   }

   handle->video.conv_frame->pts = handle->video.frame_cnt;

   AVPacket pkt;
   if (!encode_video(handle, &pkt, handle->video.conv_frame))
      return false;

   if (pkt.size)
   {
      if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
         return false;
   }

   handle->video.frame_cnt++;
   return true;
}
Beispiel #6
0
avcomp_error avcomp_encode_data(avcomp_state *state, const UINT8 *source, UINT8 *dest, UINT32 *complength)
{
	const UINT8 *metastart, *videostart, *audiostart[MAX_CHANNELS];
	UINT32 metasize, channels, samples, width, height;
	UINT32 audioxor, videoxor, videostride;
	avcomp_error err;
	UINT32 dstoffs;
	int chnum;

	/* extract data from source if present */
	if (source != NULL)
	{
		/* validate the header */
		if (source[0] != 'c' || source[1] != 'h' || source[2] != 'a' || source[3] != 'v')
			return AVCERR_INVALID_DATA;

		/* extract info from the header */
		metasize = source[4];
		channels = source[5];
		samples = (source[6] << 8) + source[7];
		width = (source[8] << 8) + source[9];
		height = (source[10] << 8) + source[11];

		/* determine the start of each piece of data */
		source += 12;
		metastart = source;
		source += metasize;
		for (chnum = 0; chnum < channels; chnum++)
		{
			audiostart[chnum] = source;
			source += 2 * samples;
		}
		videostart = source;

		/* data is assumed to be big-endian already */
		audioxor = videoxor = 0;
		videostride = 2 * width;
	}

	/* otherwise, extract from the state */
	else
	{
		UINT16 betest = 0;

		/* extract metadata information */
		metastart = state->compress.metadata;
		metasize = state->compress.metalength;
		if ((metastart == NULL && metasize != 0) || (metastart != NULL && metasize == 0))
			return AVCERR_INVALID_CONFIGURATION;

		/* extract audio information */
		channels = state->compress.channels;
		samples = state->compress.samples;
		for (chnum = 0; chnum < channels; chnum++)
			audiostart[chnum] = (const UINT8 *)state->compress.audio[chnum];

		/* extract video information */
		videostart = NULL;
		videostride = width = height = 0;
		if (state->compress.video != NULL)
		{
			videostart = (const UINT8 *)state->compress.video->base;
			videostride = state->compress.video->rowpixels * 2;
			width = state->compress.video->width;
			height = state->compress.video->height;
		}

		/* data is assumed to be native-endian */
		*(UINT8 *)&betest = 1;
		audioxor = videoxor = (betest == 1) ? 1 : 0;
	}

	/* validate the info from the header */
	if (width > state->maxwidth || height > state->maxheight)
		return AVCERR_VIDEO_TOO_LARGE;
	if (channels > state->maxchannels)
		return AVCERR_AUDIO_TOO_LARGE;

	/* write the basics to the new header */
	dest[0] = metasize;
	dest[1] = channels;
	dest[2] = samples >> 8;
	dest[3] = samples;
	dest[4] = width >> 8;
	dest[5] = width;
	dest[6] = height >> 8;
	dest[7] = height;

	/* starting offsets */
	dstoffs = 10 + 2 * channels;

	/* copy the metadata first */
	if (metasize > 0)
	{
		memcpy(dest + dstoffs, metastart, metasize);
		dstoffs += metasize;
	}

	/* encode the audio channels */
	if (channels > 0)
	{
		/* encode the audio */
		err = encode_audio(state, channels, samples, audiostart, audioxor, dest + dstoffs, &dest[8]);
		if (err != AVCERR_NONE)
			return err;

		/* advance the pointers past the data */
		dstoffs += (dest[8] << 8) + dest[9];
		for (chnum = 0; chnum < channels; chnum++)
			dstoffs += (dest[10 + 2 * chnum] << 8) + dest[11 + 2 * chnum];
	}

	/* encode the video data */
	if (width > 0 && height > 0)
	{
		UINT32 vidlength = 0;

		/* encode the video */
		err = encode_video(state, width, height, videostart, videostride, videoxor, dest + dstoffs, &vidlength);
		if (err != AVCERR_NONE)
			return err;

		/* advance the pointers past the data */
		dstoffs += vidlength;
	}

	/* set the total compression */
	*complength = dstoffs;
	return AVCERR_NONE;
}
Beispiel #7
0
SCM ffmpeg_destroy(SCM scm_self)
{
  struct ffmpeg_t *self = get_self_no_check(scm_self);

  if (self->header_written) {
    // Clear audio encoder pipeline
    if (self->audio_codec_ctx)
      while (encode_audio(self, NULL));

    // Clear video encoder pipeline
    if (self->video_codec_ctx)
      while (encode_video(self, NULL));
  };

  if (self->video_target_frame) {
    av_frame_unref(self->video_target_frame);
    av_frame_free(&self->video_target_frame);
    self->video_target_frame = NULL;
  };

  if (self->audio_packed_frame) {
    av_frame_unref(self->audio_packed_frame);
    av_frame_free(&self->audio_packed_frame);
    self->audio_packed_frame = NULL;
  };

  if (self->audio_target_frame) {
    av_frame_unref(self->audio_target_frame);
    av_frame_free(&self->audio_target_frame);
    self->audio_target_frame = NULL;
  };

  if (self->audio_buffer.buffer) {
    ringbuffer_destroy(&self->audio_buffer);
    self->audio_buffer.buffer = NULL;
  };

  if (self->header_written) {
    av_write_trailer(self->fmt_ctx);
    self->header_written = 0;
  };

  if (self->orig_pkt.data) {
    av_packet_unref(&self->orig_pkt);
    self->orig_pkt.data = NULL;
  };

  if (self->audio_codec_ctx) {
    avcodec_close(self->audio_codec_ctx);
    self->audio_codec_ctx = NULL;
  };

  if (self->video_codec_ctx) {
    avcodec_close(self->video_codec_ctx);
    self->video_codec_ctx = NULL;
  };

  if (self->output_file) {
    avio_close(self->fmt_ctx->pb);
    self->output_file = 0;
  };

  if (self->fmt_ctx) {
    if (is_input_context(self))
      avformat_close_input(&self->fmt_ctx);
    else
      avformat_free_context(self->fmt_ctx);
    self->fmt_ctx = NULL;
  };

  return SCM_UNSPECIFIED;
}