VdpStatus vdp_device_destroy(VdpDevice device) { device_ctx_t *dev = handle_get(device); if (!dev) return VDP_STATUS_INVALID_HANDLE; if (dev->osd_enabled) close(dev->g2d_fd); ve_close(); XCloseDisplay(dev->display); handle_destroy(device); return VDP_STATUS_OK; }
int main(const int argc, const char **argv) { int rc; if (argc != 5) { printf("Usage: %s <infile> <width> <height> <outfile>\n", argv[0]); return EXIT_FAILURE; } int width = atoi(argv[2]); int height = atoi(argv[3]); if (!ve_open()) return EXIT_FAILURE; int in = 0, out; if (strcmp(argv[1], "-") != 0) { if ((in = open(argv[1], O_RDONLY)) == -1) { printf("could not open input file\n"); return EXIT_FAILURE; } } if ((out = open(argv[4], O_CREAT | O_RDWR | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)) == -1) { printf("could not open output file\n"); return EXIT_FAILURE; } struct h264enc_params params; params.src_width = (width + 15) & ~15; params.width = width; params.src_height = (height + 15) & ~15; params.height = height; params.src_format = H264_FMT_NV12; params.profile_idc = 77; params.level_idc = 41; params.entropy_coding_mode = H264_EC_CABAC; params.qp = 24; params.keyframe_interval = 25; h264enc *encoder = h264enc_new(¶ms); if (encoder == NULL) { printf("could not create encoder\n"); goto err; } void* output_buf = h264enc_get_bytestream_buffer(encoder); int input_size = params.src_width * (params.src_height + params.src_height / 2); void* input_buf = h264enc_get_input_buffer(encoder); int len; while (input_buf && read_frame(in, input_buf, input_size)) { if (h264enc_encode_picture(encoder)) { len = h264enc_get_bytestream_length(encoder); if (len <= 0) { printf("Error encoding return len: %d\n", len); break; } rc = write(out, output_buf, len); if (rc != len) { printf("Error writing len: %d\n", len); } } else printf("encoding error\n"); } h264enc_free(encoder); err: ve_close(); close(out); close(in); return EXIT_SUCCESS; }
void decode_jpeg(struct jpeg_t *jpeg) { if (!ve_open()) err(EXIT_FAILURE, "Can't open VE"); int input_size =(jpeg->data_len + 65535) & ~65535; uint8_t *input_buffer = ve_malloc(input_size); int output_size = ((jpeg->width + 31) & ~31) * ((jpeg->height + 31) & ~31); uint8_t *luma_output = ve_malloc(output_size); uint8_t *chroma_output = ve_malloc(output_size); memcpy(input_buffer, jpeg->data, jpeg->data_len); ve_flush_cache(input_buffer, jpeg->data_len); // activate MPEG engine void *ve_regs = ve_get(VE_ENGINE_MPEG, 0); // set restart interval writel(jpeg->restart_interval, ve_regs + VE_MPEG_JPEG_RES_INT); // set JPEG format set_format(jpeg, ve_regs); // set output buffers (Luma / Croma) writel(ve_virt2phys(luma_output), ve_regs + VE_MPEG_ROT_LUMA); writel(ve_virt2phys(chroma_output), ve_regs + VE_MPEG_ROT_CHROMA); // set size set_size(jpeg, ve_regs); // ?? writel(0x00000000, ve_regs + VE_MPEG_SDROT_CTRL); // input end writel(ve_virt2phys(input_buffer) + input_size - 1, ve_regs + VE_MPEG_VLD_END); // ?? writel(0x0000007c, ve_regs + VE_MPEG_CTRL); // set input offset in bits writel(0 * 8, ve_regs + VE_MPEG_VLD_OFFSET); // set input length in bits writel(jpeg->data_len * 8, ve_regs + VE_MPEG_VLD_LEN); // set input buffer writel(ve_virt2phys(input_buffer) | 0x70000000, ve_regs + VE_MPEG_VLD_ADDR); // set Quantisation Table set_quantization_tables(jpeg, ve_regs); // set Huffman Table writel(0x00000000, ve_regs + VE_MPEG_RAM_WRITE_PTR); set_huffman_tables(jpeg, ve_regs); // start writeb(0x0e, ve_regs + VE_MPEG_TRIGGER); // wait for interrupt ve_wait(1); // clean interrupt flag (??) writel(0x0000c00f, ve_regs + VE_MPEG_STATUS); // stop MPEG engine ve_put(); //output_ppm(stdout, jpeg, output, output + (output_buf_size / 2)); if (!disp_open()) { fprintf(stderr, "Can't open /dev/disp\n"); return; } int color; switch ((jpeg->comp[0].samp_h << 4) | jpeg->comp[0].samp_v) { case 0x11: case 0x21: color = COLOR_YUV422; break; case 0x12: case 0x22: default: color = COLOR_YUV420; break; } disp_set_para(ve_virt2phys(luma_output), ve_virt2phys(chroma_output), color, jpeg->width, jpeg->height, 0, 0, 800, 600); getchar(); disp_close(); ve_free(input_buffer); ve_free(luma_output); ve_free(chroma_output); ve_close(); }
int main(int argc, char** argv) { AVFormatContext* avfmt_ctx = NULL; int video_stream; enum AVCodecID video_codec; if (argc < 2) { fprintf(stderr, "Usage: %s filename\n", argv[0]); exit(EXIT_FAILURE); } char *filename = argv[1]; av_register_all(); if (avformat_open_input(&avfmt_ctx, filename, NULL, NULL) < 0) { fprintf(stderr, "Could not open source file %s\n", filename); exit(1); } if (avformat_find_stream_info(avfmt_ctx, NULL) < 0) { fprintf(stderr, "Could not find stream information\n"); avformat_close_input(&avfmt_ctx); exit(1); } video_stream = av_find_best_stream(avfmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (video_stream < 0) { fprintf(stderr, "Could not find video stream in input file\n"); avformat_close_input(&avfmt_ctx); exit(1); } video_codec = avfmt_ctx->streams[video_stream]->codec->codec_id; if (video_codec != AV_CODEC_ID_MPEG1VIDEO && video_codec != AV_CODEC_ID_MPEG2VIDEO) { fprintf(stderr, "Can't handle codec %s\n", avcodec_get_name(video_codec)); avformat_close_input(&avfmt_ctx); exit(1); } AVPacket pkt; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; struct mpeg_t mpeg; memset(&mpeg, 0, sizeof(mpeg)); if (video_codec == AV_CODEC_ID_MPEG1VIDEO) mpeg.type = MPEG1; else mpeg.type = MPEG2; if (!ve_open()) err(EXIT_FAILURE, "Can't open VE"); struct frame_buffers_t frame_buffers = { NULL, NULL, NULL }; unsigned int disp_frame = 0, gop_offset = 0, gop_frames = 0, last_gop = 0; struct frame_t *frames[RING_BUFFER_SIZE]; memset(frames, 0, sizeof(frames)); // activate MPEG engine writel(ve_get_regs() + 0x00, 0x00130000); printf("Playing now... press Enter for next frame!\n"); while (av_read_frame(avfmt_ctx, &pkt) >= 0) { mpeg.data = pkt.data; mpeg.len = pkt.size; mpeg.pos = 0; if (pkt.stream_index == video_stream && parse_mpeg(&mpeg)) { // create output buffer frame_buffers.output = frame_new(mpeg.width, mpeg.height, COLOR_YUV420); if (!frame_buffers.backward) frame_buffers.backward = frame_ref(frame_buffers.output); if (!frame_buffers.forward) frame_buffers.forward = frame_ref(frame_buffers.output); // decode frame decode_mpeg(&frame_buffers, &mpeg); // simple frame reordering (not safe, only for testing) // count frames if (mpeg.gop > last_gop) { last_gop = mpeg.gop; gop_offset += gop_frames; gop_frames = 0; } gop_frames++; // save frame in ringbuffer if (frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE] != NULL) { printf("Buffer overrun!\n"); frame_unref(frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE]); } frames[(gop_offset + mpeg.temporal_reference) % RING_BUFFER_SIZE] = frame_buffers.output; // if we decoded a displayable frame, show it if (frames[disp_frame % RING_BUFFER_SIZE] != NULL) { frame_show(frames[disp_frame % RING_BUFFER_SIZE]); frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]); frames[(disp_frame - 2) % RING_BUFFER_SIZE] = NULL; disp_frame++; getchar(); } } av_free_packet(&pkt); } // stop MPEG engine writel(ve_get_regs() + 0x0, 0x00130007); // show left over frames while (disp_frame < gop_offset + gop_frames && frames[disp_frame % RING_BUFFER_SIZE] != NULL) { frame_show(frames[disp_frame % RING_BUFFER_SIZE]); frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]); frames[(disp_frame - 2) % RING_BUFFER_SIZE] = NULL; disp_frame++; getchar(); } disp_close(); frame_unref(frames[(disp_frame - 2) % RING_BUFFER_SIZE]); frame_unref(frames[(disp_frame - 1) % RING_BUFFER_SIZE]); frame_unref(frame_buffers.forward); frame_unref(frame_buffers.backward); ve_close(); avformat_close_input(&avfmt_ctx); return 0; }
int main(int argc, char *argv[]) { int rc; char *outjpeg = "poc.jpeg"; int quality = 100; uint32_t w = 0; uint32_t h = 0; uint32_t bufsize = 0; struct ve_mem *Y_mem = NULL; struct ve_mem *C_mem = NULL; struct ve_mem *J_mem = NULL; uint8_t *Y = NULL; uint8_t *C = NULL; uint8_t *J = NULL; uint32_t Jsize = 0; uint32_t Jwritten = 0; if (argc != 4 && argc != 5) { fprintf(stderr, "usage: %s width height quality [out.jpeg]\n", argv[0]); return 1; } w = atoi(argv[1]); h = atoi(argv[2]); quality = atoi(argv[3]); if (argc > 4) outjpeg = argv[4]; rc = ve_open(); if (rc == 0) { printf("[JEPOC] error: could not open ve engine!\n"); return 1; } w = (w + 15) & ~15; h = (h + 15) & ~15; printf("[JEPOC] picture %dx%-d at %d quality\n", w, h, quality); /* 3 times to leave enough room to try different color formats */ bufsize = w * h; Y_mem = ve_malloc(bufsize); if (!Y_mem) { printf("[JEPOC] ve memory error! [%d]\n", __LINE__); return 1; } Y = (uint8_t *) Y_mem->virt; C_mem = ve_malloc(bufsize); if (!C_mem) { printf("[JEPOC] ve memory error! [%d]\n", __LINE__); return 1; } C = (uint8_t *) C_mem->virt; memset(Y, 0x80, bufsize); memset(C, 0x80, bufsize); picture_generate(w, h, Y, C); printf("[JEPOC] picture generated.\n"); /* flush for H3 */ ve_flush_cache(Y_mem); ve_flush_cache(C_mem); Jsize = 0x800000; J_mem = ve_malloc(Jsize); if (!J_mem) { printf("[JEPOC] ve memory error! [%d]\n", __LINE__); return 1; } J = (uint8_t *) J_mem->virt; veavc_select_subengine(); veisp_set_buffers(Y_mem, C_mem); veisp_init_picture(w, h, VEISP_COLOR_FORMAT_NV12); veavc_init_vle(J_mem, Jsize); veavc_init_ctrl(VEAVC_ENCODER_MODE_JPEG); veavc_jpeg_parameters(1, 0, 0, 0); vejpeg_header_create(w, h, quality); vejpeg_write_SOF0(); vejpeg_write_SOS(); vejpeg_write_quantization(); printf("[JEPOC] launch encoding.\n"); veavc_launch_encoding(); ve_wait(2); veavc_check_status(); Jwritten = veavc_get_written(); /* flush for H3 */ ve_flush_cache(J_mem); vejpeg_write_file(outjpeg, J, Jwritten); printf("[JEPOC] written %d bytes to %s\n", Jwritten, outjpeg); ve_free(J_mem); ve_free(C_mem); ve_free(Y_mem); ve_close(); return 0; }