int main()
{
  printf("starting test\n");
  FILE* input = fopen("screenshot.bin", "rb");
  if(input == NULL)
  {
    printf("Couldn't find file\n");
    exit(1);
  }
  
  encoder_context* context = create_context();
  init_encoder(context, 1366, 768);
  init_codec(context);
  init_image(context);
  
  struct stat stat_info;
  int result = stat("screenshot.bin", &stat_info);
  if(result)
  {
    fatal("Could not fstat");
  }
  
  char* buffer = malloc(stat_info.st_size);
  fread(buffer, stat_info.st_size, 1, input);
  
  /*
  memset(buffer, 0xF, stat_info.st_size);
  
  int a;
  for(a = 0; a < stat_info.st_size; a++)
  {
    if(a % 4 == 0)
    {
      buffer[a] = 0x0;
    }
  }*/
  
  convert_frame(context, buffer);
  FILE* output = fopen("mem.bin", "wb");
  fwrite(context->raw->planes[2], 100000, 1, output);
  fflush(output);
  printf("Size is %d\n", context->raw->stride[3]);

  int i;
  for(i = 0; i < 100; i++)
  {
  convert_frame(context, buffer);
  
  encode_next_frame(context);
  }
  encode_finish(context);
  
  printf("Finished test\n");
}
Exemple #2
0
static inline void output_video_data(struct obs_core_video *video,
		struct video_data *input_frame, int count)
{
	const struct video_output_info *info;
	struct video_frame output_frame;
	bool locked;

	info = video_output_get_info(video->video);

	locked = video_output_lock_frame(video->video, &output_frame, count,
			input_frame->timestamp);
	if (locked) {
		if (video->gpu_conversion) {
			set_gpu_converted_data(video, &output_frame,
					input_frame, info);

		} else if (format_is_yuv(info->format)) {
			convert_frame(&output_frame, input_frame, info);
		} else {
			copy_rgbx_frame(&output_frame, input_frame, info);
		}

		video_output_unlock_frame(video->video);
	}
}
Exemple #3
0
static void get_delayed(struct obj *self)
{
	AVPacket pkt;
	AVFrame *frame;
	int got_pic;

	av_init_packet(&pkt);
	frame = avcodec_alloc_frame();

	pkt.data = NULL;
	pkt.size = 0;

	do {
		GstFlowReturn ret;
		avcodec_decode_video2(self->av_ctx, frame, &got_pic, &pkt);
		if (got_pic) {
			GstBuffer *out_buf;
			out_buf = convert_frame(self, frame);
			ret = gst_pad_push(self->srcpad, out_buf);
			if (ret != GST_FLOW_OK)
				break;
		}
	} while (got_pic);

	av_free(frame);
}
/* Capture frame using frame callback.
 * Parameters and return value for this routine matches _camera_device_read_frame
 */
static int
_camera_device_read_frame_callback(WndCameraDevice* wcd,
                                   ClientFrameBuffer* framebuffers,
                                   int fbs_num,
                                   float r_scale,
                                   float g_scale,
                                   float b_scale,
                                   float exp_comp)
{
    /* Grab the frame. Note that this call will cause frame callback to be
     * invoked before capGrabFrameNoStop returns. */
    if (!capGrabFrameNoStop(wcd->cap_window) || wcd->last_frame == NULL) {
        E("%s: Device '%s' is unable to grab a frame: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        return -1;
    }

    /* Convert framebuffer. */
    return convert_frame(wcd->last_frame,
                         wcd->pixel_format,
                         wcd->frame_bitmap->bmiHeader.biSizeImage,
                         wcd->frame_bitmap->bmiHeader.biWidth,
                         wcd->frame_bitmap->bmiHeader.biHeight,
                         framebuffers, fbs_num,
                         r_scale, g_scale, b_scale, exp_comp);
}
Exemple #5
0
struct ast_frame* convert_frame_to_slinear( struct ast_trans_pvt* trans, struct ast_frame* fr )
{
    // check for null frame
    if ( fr == NULL )
    {
        ast_log( LOG_ERROR, "unable to translate null frame to slinear\n" ) ;
        return NULL ;
    }

    // we don't need to duplicate this frame since
    // the normal translation would free it anyway, so
    // we'll just pretend we free'd and malloc'd a new one.
#ifndef	AC_USE_G722
    if ( fr->subclass == AST_FORMAT_SLINEAR )
#else
    if ( fr->subclass == AST_FORMAT_SLINEAR16 )
#endif
        return fr ;

    // check for null translator ( after we've checked that we need to translate )
    if ( trans == NULL )
    {
        ast_log( LOG_ERROR, "unable to translate frame with null translation path\n" ) ;
        return fr ;
    }

    // return the converted frame
    return convert_frame( trans, fr ) ;
}
Exemple #6
0
struct ast_frame* convert_frame_from_slinear( struct ast_trans_pvt* trans, struct ast_frame* fr )
{
    // check for null translator ( after we've checked that we need to translate )
    if ( trans == NULL )
    {
        //ast_log( LOG_ERROR, "unable to translate frame with null translation path\n" ) ;
        return fr ;
    }

    // check for null frame
    if ( fr == NULL )
    {
        ast_log( LOG_ERROR, "unable to translate null slinear frame\n" ) ;
        return NULL ;
    }

    // if the frame is not slinear, return an error
#ifndef	AC_USE_G722
    if ( fr->subclass != AST_FORMAT_SLINEAR )
#else
    if ( fr->subclass != AST_FORMAT_SLINEAR16 )
#endif
    {
        ast_log( LOG_ERROR, "unable to translate non-slinear frame\n" ) ;
        return NULL ;
    }

    // return the converted frame
    return convert_frame( trans, fr ) ;
}
static void
process_input_buf (GstOmxBaseFilter * omx_base_filter, GstBuffer **buf)
{
  GstOmxH264Dec *h264_self;

  h264_self = GST_OMX_H264DEC (omx_base_filter);

  if (h264_self->h264Format == GSTOMX_H264_FORMAT_UNKNOWN) {
    check_frame(h264_self, *buf);
  }

  if (h264_self->h264Format == GSTOMX_H264_FORMAT_3GPP) {

    if (omx_base_filter->last_pad_push_return != GST_FLOW_OK ||
        !(omx_base_filter->gomx->omx_state == OMX_StateExecuting ||
            omx_base_filter->gomx->omx_state == OMX_StatePause)) {
        GST_LOG_OBJECT(h264_self, "this frame will not be converted and go to out_flushing");
        return;
    }

    GST_LOG_OBJECT(h264_self, "H264 format is 3GPP. convert to NALU");
    convert_frame(h264_self, buf);
  }

  GST_OMX_BASE_FILTER_CLASS (parent_class)->process_input_buf (omx_base_filter, buf);
}
Exemple #8
0
Rect absolute_frame(View* view) {
	Rect ret = view->frame;
	//find root view
	View* v = view;
	while (v->superview) {
		v = v->superview;
		ret = convert_frame(v, ret);
	}

	//find containing window
	Window* win = containing_window(v);
	ASSERT(win, "couldn't find container window!");

	return convert_rect(win->frame, ret);
}
Exemple #9
0
static inline void output_video_data(struct obs_core_video *video,
		struct video_data *frame, int cur_texture)
{
	const struct video_output_info *info;
	info = video_output_getinfo(video->video);

	if (video->gpu_conversion) {
		if (!set_gpu_converted_data(video, frame, cur_texture))
			return;

	} else if (format_is_yuv(info->format)) {
		if (!convert_frame(video, frame, info, cur_texture))
			return;
	}

	video_output_swap_frame(video->video, frame);
}
Exemple #10
0
static GstFlowReturn
pad_chain(GstPad *pad, GstBuffer *buf)
{
	struct obj *self;
	GstFlowReturn ret = GST_FLOW_OK;
	AVCodecContext *ctx;
	AVFrame *frame;
	int got_pic;
	AVPacket pkt;
	int read;

	self = (struct obj *)((GstObject *)pad)->parent;
	ctx = self->av_ctx;

	if (G_UNLIKELY(!self->initialized)) {
		GstCaps *new_caps;
		GstStructure *struc;

		self->initialized = true;
		if (gst_av_codec_open(ctx, self->codec) < 0) {
			ret = GST_FLOW_ERROR;
			goto leave;
		}

		if (self->parse_func)
			self->parse_func(self, buf);

		new_caps = gst_caps_new_empty();

		struc = gst_structure_new("video/x-raw-yuv",
				"width", G_TYPE_INT, ctx->width,
				"height", G_TYPE_INT, ctx->height,
				"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('I','4','2','0'),
				NULL);

		if (ctx->time_base.num)
			gst_structure_set(struc,
					"framerate", GST_TYPE_FRACTION,
					ctx->time_base.den, ctx->time_base.num,
					NULL);

		if (ctx->sample_aspect_ratio.num)
			gst_structure_set(struc,
					"pixel-aspect-ratio", GST_TYPE_FRACTION,
					ctx->sample_aspect_ratio.num, ctx->sample_aspect_ratio.den,
					NULL);

		gst_caps_append_structure(new_caps, struc);

		GST_INFO_OBJECT(self, "caps are: %" GST_PTR_FORMAT, new_caps);
		gst_pad_set_caps(self->srcpad, new_caps);
		gst_caps_unref(new_caps);
	}

	av_init_packet(&pkt);
	pkt.data = buf->data;
	pkt.size = buf->size;

	frame = avcodec_alloc_frame();

	read = avcodec_decode_video2(ctx, frame, &got_pic, &pkt);
	if (read < 0) {
		GST_WARNING_OBJECT(self, "error: %i", read);
		goto leave;
	}

	if (got_pic) {
		GstBuffer *out_buf;
		out_buf = convert_frame(self, frame);
		out_buf->timestamp = buf->timestamp;
		out_buf->duration = buf->duration;
		ret = gst_pad_push(self->srcpad, out_buf);
	}

leave:
	gst_buffer_unref(buf);

	return ret;
}
int
camera_device_read_frame(CameraDevice* cd,
                         ClientFrameBuffer* framebuffers,
                         int fbs_num,
                         float r_scale,
                         float g_scale,
                         float b_scale,
                         float exp_comp)
{
    WndCameraDevice* wcd;
    HBITMAP bm_handle;

    /* Sanity checks. */
    if (cd == NULL || cd->opaque == NULL) {
        E("%s: Invalid camera device descriptor", __FUNCTION__);
        return -1;
    }
    wcd = (WndCameraDevice*)cd->opaque;
    if (wcd->dc == NULL) {
        W("%s: Device '%s' is not captuing video",
          __FUNCTION__, wcd->window_name);
        return -1;
    }

    /* Grab a frame, and post it to the clipboard. Not very effective, but this
     * is how capXxx API is operating. */
    if (!capGrabFrameNoStop(wcd->cap_window) ||
        !capEditCopy(wcd->cap_window) ||
        !OpenClipboard(wcd->cap_window)) {
        E("%s: Device '%s' is unable to save frame to the clipboard: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        return -1;
    }

    /* Get bitmap handle saved into clipboard. Note that bitmap is still
     * owned by the clipboard here! */
    bm_handle = (HBITMAP)GetClipboardData(CF_BITMAP);
    if (bm_handle == NULL) {
        E("%s: Device '%s' is unable to obtain frame from the clipboard: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        CloseClipboard();
        return -1;
    }

    /* Get bitmap buffer. */
    if (wcd->gdi_bitmap->bmiHeader.biHeight > 0) {
        wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight;
    }

    if (!GetDIBits(wcd->dc, bm_handle, 0, wcd->frame_bitmap->bmiHeader.biHeight,
                   wcd->framebuffer, wcd->gdi_bitmap, DIB_RGB_COLORS)) {
        E("%s: Device '%s' is unable to transfer frame to the framebuffer: %d",
          __FUNCTION__, wcd->window_name, GetLastError());
        CloseClipboard();
        return -1;
    }

    if (wcd->gdi_bitmap->bmiHeader.biHeight < 0) {
        wcd->gdi_bitmap->bmiHeader.biHeight = -wcd->gdi_bitmap->bmiHeader.biHeight;
    }

    CloseClipboard();

    /* Convert framebuffer. */
    return convert_frame(wcd->framebuffer,
                         wcd->pixel_format,
                         wcd->gdi_bitmap->bmiHeader.biSizeImage,
                         wcd->frame_bitmap->bmiHeader.biWidth,
                         wcd->frame_bitmap->bmiHeader.biHeight,
                         framebuffers, fbs_num,
                         r_scale, g_scale, b_scale, exp_comp);
}
Exemple #12
0
static GstFlowReturn
pad_chain(GstPad *pad, GstBuffer *buf)
{
	struct obj *self;
	GstFlowReturn ret = GST_FLOW_OK;
	AVCodecContext *ctx;
	AVFrame *frame = NULL;
	int got_pic;
	AVPacket pkt;
	int read;

	self = (struct obj *)((GstObject *)pad)->parent;
	GST_DEBUG_OBJECT (self, "pad chain");
	ctx = self->av_ctx;

	if (G_UNLIKELY(!self->initialized)) {
		GstCaps *new_caps;
		GstStructure *struc;

		self->initialized = true;
		if (gst_av_codec_open(ctx, self->codec) < 0) {
			ret = GST_FLOW_ERROR;
			goto leave;
		}

		if (self->parse_func)
			self->parse_func(self, buf);

		new_caps = gst_caps_new_empty();

		struc = gst_structure_new("video/x-raw-yuv",
				"width", G_TYPE_INT, ctx->width,
				"height", G_TYPE_INT, ctx->height,
				"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('I','4','2','0'),
				NULL);

		if (ctx->time_base.num)
			gst_structure_set(struc,
					"framerate", GST_TYPE_FRACTION,
					ctx->time_base.den,
					ctx->time_base.num * ctx->ticks_per_frame,
					NULL);

		if (ctx->sample_aspect_ratio.num)
			gst_structure_set(struc,
					"pixel-aspect-ratio", GST_TYPE_FRACTION,
					ctx->sample_aspect_ratio.num, ctx->sample_aspect_ratio.den,
					NULL);

		gst_caps_append_structure(new_caps, struc);

		GST_INFO_OBJECT(self, "caps are: %" GST_PTR_FORMAT, new_caps);
		gst_pad_set_caps(self->srcpad, new_caps);
		gst_caps_unref(new_caps);
	}

	av_new_packet(&pkt, buf->size);

	memcpy(pkt.data, buf->data, buf->size);
	memset(pkt.data + pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

	frame = avcodec_alloc_frame();

	pkt.dts = pkt.pts = gstav_timestamp_to_pts(ctx, buf->timestamp);
#if LIBAVCODEC_VERSION_MAJOR < 53
	ctx->reordered_opaque = pkt.dts;
#endif

	g_mutex_lock(&self->mutex);
	read = avcodec_decode_video2(ctx, frame, &got_pic, &pkt);
	av_free_packet(&pkt);
	if (read < 0) {
		GST_WARNING_OBJECT(self, "error: %i", read);
		g_mutex_unlock(&self->mutex);
		goto leave;
	}

	if (got_pic) {
		GstBuffer *out_buf;
		out_buf = convert_frame(self, frame);
		g_mutex_unlock(&self->mutex);
		ret = gst_pad_push(self->srcpad, out_buf);
	}
	else
	  g_mutex_unlock(&self->mutex);
leave:
	av_free(frame);
	gst_buffer_unref(buf);
	GST_DEBUG_OBJECT (self, "pad chain returning %s", gst_flow_get_name (ret));
	return ret;
}
int
camera_device_read_frame(CameraDevice* ccd,
                         ClientFrameBuffer* framebuffers,
                         int fbs_num,
                         float r_scale,
                         float g_scale,
                         float b_scale,
                         float exp_comp)
{
    LinuxCameraDevice* cd;

    /* Sanity checks. */
    if (ccd == NULL || ccd->opaque == NULL) {
      E("%s: Invalid camera device descriptor", __FUNCTION__);
      return -1;
    }
    cd = (LinuxCameraDevice*)ccd->opaque;
    if (cd->handle < 0) {
      E("%s: Camera device is not opened", __FUNCTION__);
      return -1;
    }

    if (cd->io_type == CAMERA_IO_DIRECT) {
        /* Read directly from the device. */
        size_t total_read_bytes = 0;
        /* There is one framebuffer allocated for direct read. */
        void* buff = cd->framebuffers[0].data;
        do {
            int read_bytes =
                read(cd->handle, buff + total_read_bytes,
                     cd->actual_pixel_format.sizeimage - total_read_bytes);
            if (read_bytes < 0) {
                switch (errno) {
                    case EIO:
                    case EAGAIN:
                        continue;
                    default:
                        E("%s: Unable to read from the camera device '%s': %s",
                          __FUNCTION__, cd->device_name, strerror(errno));
                        return -1;
                }
            }
            total_read_bytes += read_bytes;
        } while (total_read_bytes < cd->actual_pixel_format.sizeimage);
        /* Convert the read frame into the caller's framebuffers. */
        return convert_frame(buff, cd->actual_pixel_format.pixelformat,
                             cd->actual_pixel_format.sizeimage,
                             cd->actual_pixel_format.width,
                             cd->actual_pixel_format.height,
                             framebuffers, fbs_num,
                             r_scale, g_scale, b_scale, exp_comp);
    } else {
        /* Dequeue next buffer from the device. */
        struct v4l2_buffer buf;
        int res;
        CLEAR(buf);
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = cd->io_type == CAMERA_IO_MEMMAP ? V4L2_MEMORY_MMAP :
                                                       V4L2_MEMORY_USERPTR;
        for (;;) {
            const int res = _xioctl(cd->handle, VIDIOC_DQBUF, &buf);
            if (res >= 0) {
                break;
            } else if (errno == EAGAIN) {
                return 1;   // Tells the caller to repeat.
            } else if (errno != EIO) {
                E("%s: VIDIOC_DQBUF on camera '%s' has failed: %s",
                  __FUNCTION__, cd->device_name, strerror(errno));
                return -1;
            }
        }

        /* Convert frame to the receiving buffers. */
        res = convert_frame(cd->framebuffers[buf.index].data,
                            cd->actual_pixel_format.pixelformat,
                            cd->actual_pixel_format.sizeimage,
                            cd->actual_pixel_format.width,
                            cd->actual_pixel_format.height,
                            framebuffers, fbs_num,
                            r_scale, g_scale, b_scale, exp_comp);

        /* Requeue the buffer back to the device. */
        if (_xioctl(cd->handle, VIDIOC_QBUF, &buf) < 0) {
            W("%s: VIDIOC_QBUF on camera '%s' has failed: %s",
              __FUNCTION__, cd->device_name, strerror(errno));
        }

        return res;
    }
}