示例#1
0
static VdpStatus yuv_new(video_surface_ctx_t *video_surface)
{
	video_surface->yuv = calloc(1, sizeof(yuv_data_t));
	if (!video_surface->yuv)
		return VDP_STATUS_RESOURCES;

	video_surface->yuv->ref_count = 1;

	switch (video_surface->chroma_type)
	{
	case VDP_CHROMA_TYPE_444:
		video_surface->yuv->data = ve_malloc(video_surface->luma_size * 3);
		break;
	case VDP_CHROMA_TYPE_422:
		video_surface->yuv->data = ve_malloc(video_surface->luma_size * 2);
		break;
	case VDP_CHROMA_TYPE_420:
		video_surface->yuv->data = ve_malloc(video_surface->luma_size +
			ALIGN(video_surface->width, 32) * ALIGN(video_surface->height / 2, 32));
		break;
	default:
		free(video_surface->yuv);
		return VDP_STATUS_INVALID_CHROMA_TYPE;
	}

	if (!(video_surface->yuv->data))
	{
		free(video_surface->yuv);
		return VDP_STATUS_RESOURCES;
	}

	return VDP_STATUS_OK;
}
示例#2
0
VdpStatus vdp_video_surface_create(VdpDevice device, VdpChromaType chroma_type, uint32_t width, uint32_t height, VdpVideoSurface *surface)
{
	if (!surface)
		return VDP_STATUS_INVALID_POINTER;

	if (!width || !height)
		return VDP_STATUS_INVALID_SIZE;

	device_ctx_t *dev = handle_get(device);
	if (!dev)
		return VDP_STATUS_INVALID_HANDLE;

	video_surface_ctx_t *vs = calloc(1, sizeof(video_surface_ctx_t));
	if (!vs)
		return VDP_STATUS_RESOURCES;

	vs->device = dev;
	vs->width = width;
	vs->height = height;
	vs->chroma_type = chroma_type;

	vs->plane_size = ((width + 63) & ~63) * ((height + 63) & ~63);

	switch (chroma_type)
	{
	case VDP_CHROMA_TYPE_444:
		vs->data = ve_malloc(vs->plane_size * 3);
		break;
	case VDP_CHROMA_TYPE_422:
		vs->data = ve_malloc(vs->plane_size * 2);
		break;
	case VDP_CHROMA_TYPE_420:
		vs->data = ve_malloc(vs->plane_size + (vs->plane_size / 2));
		break;
	default:
		free(vs);
		return VDP_STATUS_INVALID_CHROMA_TYPE;
	}

	if (!(vs->data))
	{
		free(vs);
		return VDP_STATUS_RESOURCES;
	}

	int handle = handle_create(vs);
	if (handle == -1)
	{
		free(vs);
		return VDP_STATUS_RESOURCES;
	}

	*surface = handle;

	return VDP_STATUS_OK;
}
示例#3
0
文件: main.c 项目: coog009/cedrus
struct frame_t *frame_new(uint16_t width, uint16_t height, int color)
{
	int size = ((width + 31) & ~31) * ((height + 31) & ~31);

	struct frame_t *frame = malloc(sizeof(struct frame_t));
	frame->luma_buffer = ve_malloc(size);
	frame->chroma_buffer = ve_malloc(size);

	frame->width = width;
	frame->height = height;
	frame->color = color;

	frame->ref_counter = 1;

	return frame;
}
示例#4
0
VdpStatus vdp_decoder_create(VdpDevice device, VdpDecoderProfile profile, uint32_t width, uint32_t height, uint32_t max_references, VdpDecoder *decoder)
{
	device_ctx_t *dev = handle_get(device);
	if (!dev)
		return VDP_STATUS_INVALID_HANDLE;

	if (max_references > 16)
		return VDP_STATUS_ERROR;

	decoder_ctx_t *dec = calloc(1, sizeof(decoder_ctx_t));
	if (!dec)
		return VDP_STATUS_RESOURCES;

	dec->device = dev;
	dec->profile = profile;
	dec->width = width;
	dec->height = height;

	dec->data = ve_malloc(VBV_SIZE);
	if (!(dec->data))
	{
		free(dec);
		return VDP_STATUS_RESOURCES;
	}

	switch (profile)
	{
	case VDP_DECODER_PROFILE_MPEG1:
	case VDP_DECODER_PROFILE_MPEG2_SIMPLE:
	case VDP_DECODER_PROFILE_MPEG2_MAIN:
		dec->decode = mpeg12_decode;
		break;

	case VDP_DECODER_PROFILE_H264_BASELINE:
	case VDP_DECODER_PROFILE_H264_MAIN:
	case VDP_DECODER_PROFILE_H264_HIGH:
		if (h264_init(dec))
			dec->decode = h264_decode;
		break;
	}

	if (!dec->decode)
	{
		free(dec);
		return VDP_STATUS_INVALID_DECODER_PROFILE;
	}

	int handle = handle_create(dec);
	if (handle == -1)
	{
		free(dec);
		return VDP_STATUS_RESOURCES;
	}

	*decoder = handle;

	return VDP_STATUS_OK;
}
示例#5
0
veBool pubnub_atPublishN(struct PubnubAt* nubat, char const *buf, size_t buf_len)
{
	struct PubnubRequest *nubreq;
	u8 const *json;
	size_t json_len;

	/* allocate request */
	if (!nubat->g) {
		nubat->g = yajl_gen_alloc(NULL);
		if (!nubat->g)
			return veFalse;
	}

	nubreq = (struct PubnubRequest *) ve_malloc(sizeof(struct PubnubRequest));
	if (!nubreq) {
		/* reuse the nubat->q next time */
		return veFalse;
	}

	pubnub_req_init(&nubat->nub, nubreq, 512, 512);

	/* build json data.. */
	if (yajl_gen_string(nubat->g, (u8*) buf, buf_len) != yajl_gen_status_ok) {
		ve_error("json: not a valid string");
		goto error;
	}

	if (yajl_gen_get_buf(nubat->g, &json, &json_len) != yajl_gen_status_ok) {
		ve_error("json: could not get buf");
		return veFalse;
	}

	/* sent it */
	if (pubnub_publish(nubreq, (char*) json, publish_callback) != RET_OK) {
		ve_error("could not pubnub_publish");
		goto error;
	}

	yajl_gen_clear(nubat->g);	/* empty buffers */
	yajl_gen_free(nubat->g);
	nubat->g = NULL;
	return veTrue;

error:
	pubnub_req_deinit(nubreq);
	ve_free(nubreq);
	yajl_gen_free(nubat->g);
	nubat->g = NULL;

	return veFalse;
}
示例#6
0
文件: str.c 项目: jhofstee/siwi2way
/** Initialises the string.
 *
 * A buffer of length is allocated and zero ended or error flag is set.
 *
 * @param str 		the string to initialise
 * @param length 	initial length of the buffer
 * @param step 		number of bytes to expand the buffer with when needed
 *
 * @note should only be called once.
 * @note error flag is set when out of memory
 */
void str_new(Str *str, size_t size, size_t step)
{
	if (str == NULL)
		return;
	// create new buffer
	str->data = (char*) ve_malloc(size);
	if (str->data == NULL)
	{
		str->error = veTrue;
		return;
	}
	dbg_memset(str->data, 0, size);
	str->data[0] = 0;
	str->step = step;
	str->buf_size = size;
	str->str_len = 0;
	str->error = veFalse;
}
示例#7
0
文件: str.c 项目: jhofstee/siwi2way
/** Allocates a larger buffer and copies the existing string.
 *
 * @param str		the string
 * @param newSize	the new size of the buffer, must be larger
 *
 */
static void str_resize(Str *str, u16 newSize)
{
	char *tmp;

	if (str->error)
		return;

	if (str->step == 0)
	{
		str_free(str);
		return;
	}

#ifdef VE_REALLOC_MISSING
	// create new buffer
	tmp = (char*) ve_malloc(newSize);
	if (tmp == NULL)
	{
		str_free(str);
		return;
	}

	// copy contents
	dbg_memset(tmp, 0, newSize);
	if (str->data != NULL)
	{
		strcpy(tmp, str->data);
		ve_free(str->data);
	}

	str->data = tmp;
#else
	tmp = ve_realloc(str->data, newSize);
	if (tmp == NULL)
	{
		str_free(str);
		return;
	}
	str->data = tmp;
#endif

	str->buf_size = newSize;
}
示例#8
0
文件: main.c 项目: dasteria/cedrus
void decode_jpeg(struct jpeg_t *jpeg)
{
	if (!ve_open())
		err(EXIT_FAILURE, "Can't open VE");

	int input_size =(jpeg->data_len + 65535) & ~65535;
	uint8_t *input_buffer = ve_malloc(input_size);
	int output_size = ((jpeg->width + 31) & ~31) * ((jpeg->height + 31) & ~31);
	uint8_t *luma_output = ve_malloc(output_size);
	uint8_t *chroma_output = ve_malloc(output_size);
	memcpy(input_buffer, jpeg->data, jpeg->data_len);
	ve_flush_cache(input_buffer, jpeg->data_len);

	// activate MPEG engine
	void *ve_regs = ve_get(VE_ENGINE_MPEG, 0);

	// set restart interval
	writel(jpeg->restart_interval, ve_regs + VE_MPEG_JPEG_RES_INT);

	// set JPEG format
	set_format(jpeg, ve_regs);

	// set output buffers (Luma / Croma)
	writel(ve_virt2phys(luma_output), ve_regs + VE_MPEG_ROT_LUMA);
	writel(ve_virt2phys(chroma_output), ve_regs + VE_MPEG_ROT_CHROMA);

	// set size
	set_size(jpeg, ve_regs);

	// ??
	writel(0x00000000, ve_regs + VE_MPEG_SDROT_CTRL);

	// input end
	writel(ve_virt2phys(input_buffer) + input_size - 1, ve_regs + VE_MPEG_VLD_END);

	// ??
	writel(0x0000007c, ve_regs + VE_MPEG_CTRL);

	// set input offset in bits
	writel(0 * 8, ve_regs + VE_MPEG_VLD_OFFSET);

	// set input length in bits
	writel(jpeg->data_len * 8, ve_regs + VE_MPEG_VLD_LEN);

	// set input buffer
	writel(ve_virt2phys(input_buffer) | 0x70000000, ve_regs + VE_MPEG_VLD_ADDR);

	// set Quantisation Table
	set_quantization_tables(jpeg, ve_regs);

	// set Huffman Table
	writel(0x00000000, ve_regs + VE_MPEG_RAM_WRITE_PTR);
	set_huffman_tables(jpeg, ve_regs);

	// start
	writeb(0x0e, ve_regs + VE_MPEG_TRIGGER);

	// wait for interrupt
	ve_wait(1);

	// clean interrupt flag (??)
	writel(0x0000c00f, ve_regs + VE_MPEG_STATUS);

	// stop MPEG engine
	ve_put();

	//output_ppm(stdout, jpeg, output, output + (output_buf_size / 2));

	if (!disp_open())
	{
		fprintf(stderr, "Can't open /dev/disp\n");
		return;
	}

	int color;
	switch ((jpeg->comp[0].samp_h << 4) | jpeg->comp[0].samp_v)
	{
	case 0x11:
	case 0x21:
		color = COLOR_YUV422;
		break;
	case 0x12:
	case 0x22:
	default:
		color = COLOR_YUV420;
		break;
	}

	disp_set_para(ve_virt2phys(luma_output), ve_virt2phys(chroma_output),
			color, jpeg->width, jpeg->height,
			0, 0, 800, 600);

	getchar();

	disp_close();

	ve_free(input_buffer);
	ve_free(luma_output);
	ve_free(chroma_output);
	ve_close();
}
示例#9
0
VdpStatus vdp_decoder_create(VdpDevice device,
                             VdpDecoderProfile profile,
                             uint32_t width,
                             uint32_t height,
                             uint32_t max_references,
                             VdpDecoder *decoder)
{
	device_ctx_t *dev = handle_get(device);
	if (!dev)
		return VDP_STATUS_INVALID_HANDLE;

	if (max_references > 16)
		return VDP_STATUS_ERROR;

	decoder_ctx_t *dec = calloc(1, sizeof(decoder_ctx_t));
	if (!dec)
		goto err_ctx;

	dec->device = dev;
	dec->profile = profile;
	dec->width = width;
	dec->height = height;

	dec->data = ve_malloc(VBV_SIZE);
	if (!(dec->data))
		goto err_data;

	VdpStatus ret;
	switch (profile)
	{
	case VDP_DECODER_PROFILE_MPEG1:
	case VDP_DECODER_PROFILE_MPEG2_SIMPLE:
	case VDP_DECODER_PROFILE_MPEG2_MAIN:
		ret = new_decoder_mpeg12(dec);
		break;

	case VDP_DECODER_PROFILE_H264_BASELINE:
	case VDP_DECODER_PROFILE_H264_MAIN:
	case VDP_DECODER_PROFILE_H264_HIGH:
		ret = new_decoder_h264(dec);
		break;

	case VDP_DECODER_PROFILE_MPEG4_PART2_SP:
	case VDP_DECODER_PROFILE_MPEG4_PART2_ASP:
		ret = new_decoder_mp4(dec);
		break;

	default:
		ret = VDP_STATUS_INVALID_DECODER_PROFILE;
		break;
	}

	if (ret != VDP_STATUS_OK)
		goto err_decoder;

	int handle = handle_create(dec);
	if (handle == -1)
		goto err_handle;

	*decoder = handle;
	return VDP_STATUS_OK;

err_handle:
	if (dec->private_free)
		dec->private_free(dec);
err_decoder:
	ve_free(dec->data);
err_data:
	free(dec);
err_ctx:
	return VDP_STATUS_RESOURCES;
}
示例#10
0
文件: main.c 项目: coog009/cedrus
void decode_mpeg(struct frame_buffers_t *frame_buffers, const struct mpeg_t * const mpeg)
{
	int input_size = (mpeg->len + 65535) & ~65535;
	uint8_t *input_buffer = ve_malloc(input_size);
	memcpy(input_buffer, mpeg->data, mpeg->len);
	ve_flush_cache(input_buffer, mpeg->len);

	void *ve_regs = ve_get_regs();

	// set quantisation tables
	set_quantization_tables(ve_regs, mpeg_default_intra_quant, mpeg_default_non_intra_quant);

	// set size
	uint16_t width = (mpeg->width + 15) / 16;
	uint16_t height = (mpeg->height + 15) / 16;
	writel(ve_regs + 0x100 + 0x08, (width << 8) | height);
	writel(ve_regs + 0x100 + 0x0c, ((width * 16) << 16) | (height * 16));

	// set picture header
	uint32_t pic_header = 0x00000000;
	pic_header |= ((mpeg->picture_coding_type & 0xf) << 28);
	pic_header |= ((mpeg->f_code[0][0] & 0xf) << 24);
	pic_header |= ((mpeg->f_code[0][1] & 0xf) << 20);
	pic_header |= ((mpeg->f_code[1][0] & 0xf) << 16);
	pic_header |= ((mpeg->f_code[1][1] & 0xf) << 12);
	pic_header |= ((mpeg->intra_dc_precision & 0x3) << 10);
	pic_header |= ((mpeg->picture_structure & 0x3) << 8);
	pic_header |= ((mpeg->top_field_first & 0x1) << 7);
	pic_header |= ((mpeg->frame_pred_frame_dct & 0x1) << 6);
	pic_header |= ((mpeg->concealment_motion_vectors & 0x1) << 5);
	pic_header |= ((mpeg->q_scale_type & 0x1) << 4);
	pic_header |= ((mpeg->intra_vlc_format & 0x1) << 3);
	pic_header |= ((mpeg->alternate_scan & 0x1) << 2);
	pic_header |= ((mpeg->full_pel_forward_vector & 0x1) << 1);
	pic_header |= ((mpeg->full_pel_backward_vector & 0x1) << 0);
	writel(ve_regs + 0x100 + 0x00, pic_header);

	// ??
	writel(ve_regs + 0x100 + 0x10, 0x00000000);

	// ??
	writel(ve_regs + 0x100 + 0x14, 0x800001b8);

	// ??
	writel(ve_regs + 0x100 + 0xc4, 0x00000000);

	// ??
	writel(ve_regs + 0x100 + 0xc8, 0x00000000);

	// set forward/backward predicion buffers
	if (mpeg->picture_coding_type == PCT_I || mpeg->picture_coding_type == PCT_P)
	{
		frame_unref(frame_buffers->forward);
		frame_buffers->forward = frame_ref(frame_buffers->backward);
		frame_unref(frame_buffers->backward);
		frame_buffers->backward = frame_ref(frame_buffers->output);
	}
	writel(ve_regs + 0x100 + 0x50, ve_virt2phys(frame_buffers->forward->luma_buffer));
	writel(ve_regs + 0x100 + 0x54, ve_virt2phys(frame_buffers->forward->chroma_buffer));
	writel(ve_regs + 0x100 + 0x58, ve_virt2phys(frame_buffers->backward->luma_buffer));
	writel(ve_regs + 0x100 + 0x5c, ve_virt2phys(frame_buffers->backward->chroma_buffer));

	// set output buffers (Luma / Croma)
	writel(ve_regs + 0x100 + 0x48, ve_virt2phys(frame_buffers->output->luma_buffer));
	writel(ve_regs + 0x100 + 0x4c, ve_virt2phys(frame_buffers->output->chroma_buffer));
	writel(ve_regs + 0x100 + 0xcc, ve_virt2phys(frame_buffers->output->luma_buffer));
	writel(ve_regs + 0x100 + 0xd0, ve_virt2phys(frame_buffers->output->chroma_buffer));

	// set input offset in bits
	writel(ve_regs + 0x100 + 0x2c, (mpeg->pos - 4) * 8);

	// set input length in bits (+ little bit more, else it fails sometimes ??)
	writel(ve_regs + 0x100 + 0x30, (mpeg->len - (mpeg->pos - 4) + 16) * 8);

	// input end
	writel(ve_regs + 0x100 + 0x34, ve_virt2phys(input_buffer) + input_size - 1);

	// set input buffer
	writel(ve_regs + 0x100 + 0x28, ve_virt2phys(input_buffer) | 0x50000000);

	// trigger
	writel(ve_regs + 0x100 + 0x18, (mpeg->type ? 0x02000000 : 0x01000000) | 0x8000000f);

	// wait for interrupt
	ve_wait(1);

	// clean interrupt flag (??)
	writel(ve_regs + 0x100 + 0x1c, 0x0000c00f);

	ve_free(input_buffer);
}
示例#11
0
h264enc *h264enc_new(const struct h264enc_params *p)
{
	h264enc *c;
	int i;

	/* check parameter validity */
	if (!IS_ALIGNED(p->src_width, 16) || !IS_ALIGNED(p->src_height, 16) ||
		!IS_ALIGNED(p->width, 2) || !IS_ALIGNED(p->height, 2) ||
		p->width > p->src_width || p->height > p->src_height)
	{
		MSG("invalid picture size");
		return NULL;
	}

	if (p->qp == 0 || p->qp > 47)
	{
		MSG("invalid QP");
		return NULL;
	}

	if (p->src_format != H264_FMT_NV12 && p->src_format != H264_FMT_NV16)
	{
		MSG("invalid color format");
		return NULL;
	}

	/* allocate memory for h264enc structure */
	c = calloc(1, sizeof(*c));
	if (c == NULL)
	{
		MSG("can't allocate h264enc data");
		return NULL;
	}

	/* copy parameters */
	c->mb_width = DIV_ROUND_UP(p->width, 16);
	c->mb_height = DIV_ROUND_UP(p->height, 16);
	c->mb_stride = p->src_width / 16;

	c->crop_right = (c->mb_width * 16 - p->width) / 2;
	c->crop_bottom = (c->mb_height * 16 - p->height) / 2;

	c->profile_idc = p->profile_idc;
	c->level_idc = p->level_idc;

	c->entropy_coding_mode_flag = p->entropy_coding_mode ? 1 : 0;
	c->pic_init_qp = p->qp;
	c->keyframe_interval = p->keyframe_interval;

	c->write_sps_pps = 1;
	c->current_frame_num = 0;

	/* allocate input buffer */
	c->input_color_format = p->src_format;
	switch (c->input_color_format)
	{
	case H264_FMT_NV12:
		c->input_buffer_size = p->src_width * (p->src_height + p->src_height / 2);
		break;
	case H264_FMT_NV16:
		c->input_buffer_size = p->src_width * p->src_height * 2;
		break;
	}

	c->luma_buffer = ve_malloc(c->input_buffer_size);
	if (c->luma_buffer == NULL)
		goto nomem;

	c->chroma_buffer = c->luma_buffer + p->src_width * p->src_height;

	/* allocate bytestream output buffer */
	c->bytestream_buffer_size = 1 * 1024 * 1024;
	c->bytestream_buffer = ve_malloc(c->bytestream_buffer_size);
	if (c->bytestream_buffer == NULL)
		goto nomem;

	/* allocate reference picture memory */
	unsigned int luma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 16, 32);
	unsigned int chroma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 8, 32);
	for (i = 0; i < 2; i++)
	{
		c->ref_picture[i].luma_buffer = ve_malloc(luma_size + chroma_size);
		c->ref_picture[i].chroma_buffer = c->ref_picture[i].luma_buffer + luma_size;
		c->ref_picture[i].extra_buffer = ve_malloc(luma_size / 4);
		if (c->ref_picture[i].luma_buffer == NULL || c->ref_picture[i].extra_buffer == NULL)
			goto nomem;
	}

	/* allocate unknown purpose buffers */
	c->extra_buffer_frame = ve_malloc(ALIGN(c->mb_width, 4) * c->mb_height * 8);
	c->extra_buffer_line = ve_malloc(c->mb_width * 32);
	if (c->extra_buffer_frame == NULL || c->extra_buffer_line == NULL)
		goto nomem;

	return c;

nomem:
	MSG("can't allocate VE memory");
	h264enc_free(c);
	return NULL;
}
示例#12
0
文件: main.c 项目: avafinger/jepocx
int main(int argc, char *argv[])
{
    int rc;
    char *outjpeg = "poc.jpeg";
    int quality = 100;
    uint32_t w = 0;
    uint32_t h = 0;
    uint32_t bufsize = 0;
    struct ve_mem *Y_mem = NULL;
    struct ve_mem *C_mem = NULL;
    struct ve_mem *J_mem = NULL;
    uint8_t *Y = NULL;
    uint8_t *C = NULL;
    uint8_t *J = NULL;
    uint32_t Jsize = 0;
    uint32_t Jwritten = 0;

    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s width height quality [out.jpeg]\n", argv[0]);
        return 1;
    }

    w = atoi(argv[1]);
    h = atoi(argv[2]);
    quality = atoi(argv[3]);
    if (argc > 4)
        outjpeg = argv[4];

    rc = ve_open();
    if (rc == 0) {
        printf("[JEPOC] error: could not open ve engine!\n");
        return 1;
    }

    w = (w + 15) & ~15;
    h = (h + 15) & ~15;
    printf("[JEPOC] picture %dx%-d at %d quality\n", w, h, quality);
    /* 3 times to leave enough room to try different color formats */
    bufsize = w * h;
    Y_mem = ve_malloc(bufsize);
    if (!Y_mem) {
        printf("[JEPOC] ve memory error! [%d]\n", __LINE__);
        return 1;
    }
    Y = (uint8_t *) Y_mem->virt;
    C_mem = ve_malloc(bufsize);
    if (!C_mem) {
        printf("[JEPOC] ve memory error! [%d]\n", __LINE__);
        return 1;
    }
    C = (uint8_t *) C_mem->virt;
    memset(Y, 0x80, bufsize);
    memset(C, 0x80, bufsize);
    picture_generate(w, h, Y, C);
    printf("[JEPOC] picture generated.\n");

    /* flush for H3 */
    ve_flush_cache(Y_mem);
    ve_flush_cache(C_mem);

    Jsize = 0x800000;
    J_mem = ve_malloc(Jsize);
    if (!J_mem) {
        printf("[JEPOC] ve memory error! [%d]\n", __LINE__);
        return 1;
    }
    J = (uint8_t *) J_mem->virt;

    veavc_select_subengine();
    veisp_set_buffers(Y_mem, C_mem);
    veisp_init_picture(w, h, VEISP_COLOR_FORMAT_NV12);

    veavc_init_vle(J_mem, Jsize);
    veavc_init_ctrl(VEAVC_ENCODER_MODE_JPEG);
    veavc_jpeg_parameters(1, 0, 0, 0);

    vejpeg_header_create(w, h, quality);
    vejpeg_write_SOF0();
    vejpeg_write_SOS();
    vejpeg_write_quantization();

    printf("[JEPOC] launch encoding.\n");
    veavc_launch_encoding();
    ve_wait(2);
    veavc_check_status();

    Jwritten = veavc_get_written();
    /* flush for H3 */
    ve_flush_cache(J_mem);
    vejpeg_write_file(outjpeg, J, Jwritten);
    printf("[JEPOC] written %d bytes to %s\n", Jwritten, outjpeg);

    ve_free(J_mem);
    ve_free(C_mem);
    ve_free(Y_mem);
    ve_close();
    return 0;
}