/** * Put a video frame into the video mixer * * @param src Video source * @param frame Video frame */ void vidmix_source_put(struct vidmix_source *src, const struct vidframe *frame) { if (!src || !frame || frame->fmt != VID_FMT_YUV420P) return; if (!src->frame_rx || !vidsz_cmp(&src->frame_rx->size, &frame->size)) { struct vidframe *frm; int err; err = vidframe_alloc(&frm, VID_FMT_YUV420P, &frame->size); if (err) return; pthread_rwlock_wrlock(&src->mix->rwlock); mem_deref(src->frame_rx); src->frame_rx = frm; clear_all(src->mix); pthread_rwlock_unlock(&src->mix->rwlock); } vidframe_copy(src->frame_rx, frame); }
int gst_video1_encode(struct videnc_state *st, bool update, const struct vidframe *frame) { int err; if (!st || !frame || frame->fmt != VID_FMT_YUV420P) return EINVAL; if (!st->streamer.valid || !vidsz_cmp(&st->encoder.size, &frame->size)) { pipeline_close(st); err = pipeline_init(st, &frame->size); if (err) { warning("gst_video: pipeline initialization failed\n"); return err; } st->encoder.size = frame->size; } if (update) { debug("gst_video: gstreamer picture update" ", it's not implemented...\n"); } /* * Push frame into pipeline. * Function call will return once frame has been processed completely. */ err = pipeline_push(st, frame); return err; }
int gst_video_encode(struct videnc_state *st, bool update, const struct vidframe *frame, videnc_packet_h *pkth, void *arg) { uint8_t *data; size_t size; int height; int err; if (!st || !frame || !pkth || frame->fmt != VID_FMT_YUV420P) return EINVAL; if (!st->gst_inited || !vidsz_cmp(&st->size, &frame->size)) { err = gst_encoder_init(st, frame->size.w, frame->size.h, st->fps, st->bitrate); if (err) { warning("gst_video codec: gst_video_alloc failed\n"); return err; } st->pkth = pkth; st->pkth_arg = arg; /* To detect if requested size was changed. */ st->size = frame->size; } if (update) { debug("gst_video: gstreamer picture update" ", it's not implemented...\n"); } height = frame->size.h; /* NOTE: I420 (YUV420P): hardcoded. */ size = frame->linesize[0] * height + frame->linesize[1] * height * 0.5 + frame->linesize[2] * height * 0.5; data = malloc(size); /* XXX: memory-leak ? */ if (!data) return ENOMEM; size = 0; /* XXX: avoid memcpy here ? */ memcpy(&data[size], frame->data[0], frame->linesize[0] * height); size += frame->linesize[0] * height; memcpy(&data[size], frame->data[1], frame->linesize[1] * height * 0.5); size += frame->linesize[1] * height * 0.5; memcpy(&data[size], frame->data[2], frame->linesize[2] * height * 0.5); size += frame->linesize[2] * height * 0.5; return gst_video_push(st, data, size); }
/** * Copy content between to equally sized video frames of same pixel format * * @param dst Destination frame * @param src Source frame */ void vidframe_copy(struct vidframe *dst, const struct vidframe *src) { const uint8_t *ds0, *ds1, *ds2; unsigned lsd, lss, w, h, y; uint8_t *dd0, *dd1, *dd2; if (!dst || !src) return; if (!vidsz_cmp(&dst->size, &src->size)) return; if (dst->fmt != src->fmt) return; switch (dst->fmt) { case VID_FMT_YUV420P: lsd = dst->linesize[0]; lss = src->linesize[0]; dd0 = dst->data[0]; dd1 = dst->data[1]; dd2 = dst->data[2]; ds0 = src->data[0]; ds1 = src->data[1]; ds2 = src->data[2]; w = dst->size.w & ~1; h = dst->size.h & ~1; for (y=0; y<h; y+=2) { memcpy(dd0, ds0, w); dd0 += lsd; ds0 += lss; memcpy(dd0, ds0, w); dd0 += lsd; ds0 += lss; memcpy(dd1, ds1, w/2); dd1 += lsd/2; ds1 += lss/2; memcpy(dd2, ds2, w/2); dd2 += lsd/2; ds2 += lss/2; } break; default: (void)re_printf("vidframe_copy(): unsupported format\n"); break; } }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { struct vidframe frame_rgb; int err = 0; if (!vidsz_cmp(&st->size, &frame->size)) { char capt[256]; if (st->size.w && st->size.h) { info("x11: reset: %u x %u ---> %u x %u\n", st->size.w, st->size.h, frame->size.w, frame->size.h); } if (st->internal && !st->win) err = create_window(st, &frame->size); err |= x11_reset(st, &frame->size); if (err) return err; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } XStoreName(st->disp, st->win, capt); } /* Convert from YUV420P to RGB */ vidframe_init_buf(&frame_rgb, st->pixfmt, &frame->size, (uint8_t *)st->shm.shmaddr); vidconv(&frame_rgb, frame, 0); /* draw */ if (st->xshmat) XShmPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h, false); else XPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h); XSync(st->disp, false); return err; }
static inline void source_mix_full(struct vidframe *mframe, const struct vidframe *frame_src) { if (!frame_src) return; if (vidsz_cmp(&mframe->size, &frame_src->size)) { vidframe_copy(mframe, frame_src); } else { struct vidrect rect; rect.w = mframe->size.w; rect.h = mframe->size.h; rect.x = 0; rect.y = 0; vidconv_aspect(mframe, frame_src, &rect); } }
/** * Set video mixer output frame size * * @param src Video mixer source * @param sz Size of output video frame * * @return 0 for success, otherwise error code */ int vidmix_source_set_size(struct vidmix_source *src, const struct vidsz *sz) { struct vidframe *frame; int err; if (!src || !sz) return EINVAL; if (src->frame_tx && vidsz_cmp(&src->frame_tx->size, sz)) return 0; err = vidframe_alloc(&frame, VID_FMT_YUV420P, sz); if (err) return err; clear_frame(frame); pthread_mutex_lock(&src->mutex); mem_deref(src->frame_tx); src->frame_tx = frame; pthread_mutex_unlock(&src->mutex); return 0; }
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt) { AVPicture pict; AVFrame *frame = NULL; struct vidframe vf; struct vidsz sz; unsigned i; if (st->codec) { int got_pict, ret; #if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100) frame = av_frame_alloc(); #else frame = avcodec_alloc_frame(); #endif #if LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0) ret = avcodec_decode_video(st->ctx, frame, &got_pict, pkt->data, pkt->size); #else ret = avcodec_decode_video2(st->ctx, frame, &got_pict, pkt); #endif if (ret < 0 || !got_pict) return; sz.w = st->ctx->width; sz.h = st->ctx->height; /* check if size changed */ if (!vidsz_cmp(&sz, &st->sz)) { info("size changed: %d x %d ---> %d x %d\n", st->sz.w, st->sz.h, sz.w, sz.h); st->sz = sz; if (st->sws) { sws_freeContext(st->sws); st->sws = NULL; } } if (!st->sws) { info("scaling: %d x %d ---> %d x %d\n", st->sz.w, st->sz.h, st->app_sz.w, st->app_sz.h); st->sws = sws_getContext(st->sz.w, st->sz.h, st->ctx->pix_fmt, st->app_sz.w, st->app_sz.h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); if (!st->sws) return; } ret = avpicture_alloc(&pict, PIX_FMT_YUV420P, st->app_sz.w, st->app_sz.h); if (ret < 0) return; ret = sws_scale(st->sws, SRCSLICE_CAST frame->data, frame->linesize, 0, st->sz.h, pict.data, pict.linesize); if (ret <= 0) goto end; } else { avpicture_fill(&pict, pkt->data, PIX_FMT_YUV420P, st->sz.w, st->sz.h); } vf.size = st->app_sz; vf.fmt = VID_FMT_YUV420P; for (i=0; i<4; i++) { vf.data[i] = pict.data[i]; vf.linesize[i] = pict.linesize[i]; } st->frameh(&vf, st->arg); end: if (st->codec) avpicture_free(&pict); if (frame) { #if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100) av_frame_free(&frame); #else av_free(frame); #endif } }
int encode(struct videnc_state *st, bool update, const struct vidframe *frame) { int i, err, ret; int pix_fmt; if (!st || !frame) return EINVAL; switch (frame->fmt) { case VID_FMT_YUV420P: pix_fmt = AV_PIX_FMT_YUV420P; break; case VID_FMT_NV12: pix_fmt = AV_PIX_FMT_NV12; break; default: warning("avcodec: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!st->ctx || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder(st, &st->encprm, &frame->size, pix_fmt); if (err) { warning("avcodec: open_encoder: %m\n", err); return err; } } for (i=0; i<4; i++) { st->pict->data[i] = frame->data[i]; st->pict->linesize[i] = frame->linesize[i]; } st->pict->pts = st->pts++; if (update) { debug("avcodec: encoder picture update\n"); st->pict->key_frame = 1; #ifdef FF_I_TYPE st->pict->pict_type = FF_I_TYPE; /* Infra Frame */ #else st->pict->pict_type = AV_PICTURE_TYPE_I; #endif } else { st->pict->key_frame = 0; st->pict->pict_type = 0; } mbuf_rewind(st->mb); #if LIBAVCODEC_VERSION_INT >= ((54<<16)+(1<<8)+0) do { AVPacket avpkt; int got_packet; av_init_packet(&avpkt); avpkt.data = st->mb->buf; avpkt.size = (int)st->mb->size; ret = avcodec_encode_video2(st->ctx, &avpkt, st->pict, &got_packet); if (ret < 0) return EBADMSG; if (!got_packet) return 0; mbuf_set_end(st->mb, avpkt.size); } while (0); #else ret = avcodec_encode_video(st->ctx, st->mb->buf, (int)st->mb->size, st->pict); if (ret < 0 ) return EBADMSG; /* todo: figure out proper buffer size */ if (ret > (int)st->sz_max) { debug("avcodec: grow encode buffer %u --> %d\n", st->sz_max, ret); st->sz_max = ret; } mbuf_set_end(st->mb, ret); #endif switch (st->codec_id) { case AV_CODEC_ID_H263: err = h263_packetize(st, st->mb, st->pkth, st->arg); break; case AV_CODEC_ID_H264: err = h264_packetize(st->mb->buf, st->mb->end, st->encprm.pktsize, st->pkth, st->arg); break; case AV_CODEC_ID_MPEG4: err = general_packetize(st->mb, st->encprm.pktsize, st->pkth, st->arg); break; default: err = EPROTO; break; } return err; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { struct vidframe frame_rgb; int err = 0; if (!st->disp) return ENODEV; /* * check for window delete - without blocking */ while (XPending(st->disp)) { XEvent e; XNextEvent(st->disp, &e); if (e.type == ClientMessage) { if ((Atom) e.xclient.data.l[0] == st->XwinDeleted) { info("x11: window deleted\n"); /* * we have to bail as all of the display * pointers are bad. */ close_window(st); return ENODEV; } } } if (!vidsz_cmp(&st->size, &frame->size)) { char capt[256]; if (st->size.w && st->size.h) { info("x11: reset: %u x %u ---> %u x %u\n", st->size.w, st->size.h, frame->size.w, frame->size.h); } if (st->internal && !st->win) err = create_window(st, &frame->size); err |= x11_reset(st, &frame->size); if (err) return err; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } XStoreName(st->disp, st->win, capt); } /* Convert from YUV420P to RGB */ vidframe_init_buf(&frame_rgb, st->pixfmt, &frame->size, (uint8_t *)st->shm.shmaddr); vidconv(&frame_rgb, frame, 0); /* draw */ if (st->xshmat) XShmPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h, false); else XPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h); XSync(st->disp, false); return err; }
static int enc(struct vidcodec_st *st, bool update, const struct vidframe *frame) { int i, err, ret; if (!st->enc.ctx || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder(st, &st->encprm, &frame->size); if (err) { DEBUG_WARNING("open_encoder: %m\n", err); return err; } } for (i=0; i<4; i++) { st->enc.pict->data[i] = frame->data[i]; st->enc.pict->linesize[i] = frame->linesize[i]; } st->enc.pict->pts = st->pts++; if (update) { re_printf("avcodec encoder picture update\n"); st->enc.pict->key_frame = 1; #ifdef FF_I_TYPE st->enc.pict->pict_type = FF_I_TYPE; /* Infra Frame */ #else st->enc.pict->pict_type = AV_PICTURE_TYPE_I; #endif } else { st->enc.pict->key_frame = 0; st->enc.pict->pict_type = 0; } mbuf_rewind(st->enc.mb); #if LIBAVCODEC_VERSION_INT >= ((54<<16)+(1<<8)+0) do { AVPacket avpkt; int got_packet; avpkt.data = st->enc.mb->buf; avpkt.size = (int)st->enc.mb->size; ret = avcodec_encode_video2(st->enc.ctx, &avpkt, st->enc.pict, &got_packet); if (ret < 0) return EBADMSG; if (!got_packet) return 0; mbuf_set_end(st->enc.mb, avpkt.size); } while (0); #else ret = avcodec_encode_video(st->enc.ctx, st->enc.mb->buf, (int)st->enc.mb->size, st->enc.pict); if (ret < 0 ) return EBADMSG; /* todo: figure out proper buffer size */ if (ret > (int)st->enc.sz_max) { re_printf("note: grow encode buffer %u --> %d\n", st->enc.sz_max, ret); st->enc.sz_max = ret; } mbuf_set_end(st->enc.mb, ret); #endif switch (st->codec_id) { case CODEC_ID_H263: err = h263_packetize(st, st->enc.mb); break; case CODEC_ID_H264: err = h264_packetize(st, st->enc.mb); break; case CODEC_ID_MPEG4: err = general_packetize(st, st->enc.mb); break; default: err = EPROTO; break; } return err; }
int encode_x264(struct videnc_state *st, bool update, const struct vidframe *frame) { x264_picture_t pic_in, pic_out; x264_nal_t *nal; int i_nal; int i, err, ret; int csp, pln; if (!st || !frame) return EINVAL; switch (frame->fmt) { case VID_FMT_YUV420P: csp = X264_CSP_I420; pln = 3; break; case VID_FMT_NV12: csp = X264_CSP_NV12; pln = 2; break; default: warning("avcodec: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!st->x264 || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder_x264(st, &st->encprm, &frame->size, csp); if (err) return err; } if (update) { #if X264_BUILD >= 95 x264_encoder_intra_refresh(st->x264); #endif debug("avcodec: x264 picture update\n"); } x264_picture_init(&pic_in); pic_in.i_type = update ? X264_TYPE_IDR : X264_TYPE_AUTO; pic_in.i_qpplus1 = 0; pic_in.i_pts = ++st->pts; pic_in.img.i_csp = csp; pic_in.img.i_plane = pln; for (i=0; i<pln; i++) { pic_in.img.i_stride[i] = frame->linesize[i]; pic_in.img.plane[i] = frame->data[i]; } ret = x264_encoder_encode(st->x264, &nal, &i_nal, &pic_in, &pic_out); if (ret < 0) { fprintf(stderr, "x264 [error]: x264_encoder_encode failed\n"); } if (i_nal == 0) return 0; err = 0; for (i=0; i<i_nal && !err; i++) { const uint8_t hdr = nal[i].i_ref_idc<<5 | nal[i].i_type<<0; int offset = 0; #if X264_BUILD >= 76 const uint8_t *p = nal[i].p_payload; /* Find the NAL Escape code [00 00 01] */ if (nal[i].i_payload > 4 && p[0] == 0x00 && p[1] == 0x00) { if (p[2] == 0x00 && p[3] == 0x01) offset = 4 + 1; else if (p[2] == 0x01) offset = 3 + 1; } #endif /* skip Supplemental Enhancement Information (SEI) */ if (nal[i].i_type == H264_NAL_SEI) continue; err = h264_nal_send(true, true, (i+1)==i_nal, hdr, nal[i].p_payload + offset, nal[i].i_payload - offset, st->encprm.pktsize, st->pkth, st->arg); } return err; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { struct vidframe frame_rgb; int err = 0; if (!st->disp) return ENODEV; /* * check for window delete - without blocking * the switch handles both the override redirect window * and the "standard" window manager managed window. */ while (XPending(st->disp)) { XEvent e; XNextEvent(st->disp, &e); switch (e.type) { case ClientMessage: if ((Atom) e.xclient.data.l[0] == st->XwinDeleted) { info("x11: window deleted\n"); /* * we have to bail as all of the display * pointers are bad. */ close_window(st); return ENODEV; } break; case ButtonPress: st->button_is_down = 1; break; case ButtonRelease: st->button_is_down = 0; break; case MotionNotify: if (st->button_is_down == 0) break; if ((e.xmotion.time - st->last_time) < 32) break; XMoveWindow(st->disp, st->win, e.xmotion.x_root - 16, e.xmotion.y_root - 16); st->last_time = e.xmotion.time; break; default: break; } } if (!vidsz_cmp(&st->size, &frame->size)) { char capt[256]; if (st->size.w && st->size.h) { info("x11: reset: %u x %u ---> %u x %u\n", st->size.w, st->size.h, frame->size.w, frame->size.h); } if (st->internal && !st->win) err = create_window(st, &frame->size); err |= x11_reset(st, &frame->size); if (err) return err; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } XStoreName(st->disp, st->win, capt); } /* Convert from YUV420P to RGB */ vidframe_init_buf(&frame_rgb, st->pixfmt, &frame->size, (uint8_t *)st->shm.shmaddr); vidconv(&frame_rgb, frame, 0); /* draw */ if (st->xshmat) XShmPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h, false); else XPutImage(st->disp, st->win, st->gc, st->image, 0, 0, 0, 0, st->size.w, st->size.h); XSync(st->disp, false); return err; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { void *pixels; uint8_t *p; int pitch, ret; unsigned i, h; if (!vidsz_cmp(&st->size, &frame->size)) { if (st->size.w && st->size.h) { info("sdl: reset size: %u x %u ---> %u x %u\n", st->size.w, st->size.h, frame->size.w, frame->size.h); } sdl_reset(st); } if (!st->window) { Uint32 flags = SDL_WINDOW_SHOWN | SDL_WINDOW_INPUT_FOCUS; char capt[256]; if (st->fullscreen) flags |= SDL_WINDOW_FULLSCREEN; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } st->window = SDL_CreateWindow(capt, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, frame->size.w, frame->size.h, flags); if (!st->window) { warning("sdl: unable to create sdl window: %s\n", SDL_GetError()); return ENODEV; } st->size = frame->size; SDL_RaiseWindow(st->window); SDL_SetWindowBordered(st->window, true); SDL_ShowWindow(st->window); } if (!st->renderer) { Uint32 flags = 0; flags |= SDL_RENDERER_ACCELERATED; flags |= SDL_RENDERER_PRESENTVSYNC; st->renderer = SDL_CreateRenderer(st->window, -1, flags); if (!st->renderer) { warning("sdl: unable to create renderer: %s\n", SDL_GetError()); return ENOMEM; } } if (!st->texture) { st->texture = SDL_CreateTexture(st->renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, frame->size.w, frame->size.h); if (!st->texture) { warning("sdl: unable to create texture: %s\n", SDL_GetError()); return ENODEV; } } ret = SDL_LockTexture(st->texture, NULL, &pixels, &pitch); if (ret != 0) { warning("sdl: unable to lock texture (ret=%d)\n", ret); return ENODEV; } p = pixels; for (i=0; i<3; i++) { const uint8_t *s = frame->data[i]; const unsigned stp = frame->linesize[0] / frame->linesize[i]; const unsigned sz = frame->size.w / stp; for (h = 0; h < frame->size.h; h += stp) { memcpy(p, s, sz); s += frame->linesize[i]; p += (pitch / stp); } } SDL_UnlockTexture(st->texture); /* Blit the sprite onto the screen */ SDL_RenderCopy(st->renderer, st->texture, NULL, NULL); /* Update the screen! */ SDL_RenderPresent(st->renderer); return 0; }
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt) { AVFrame *frame = NULL; struct vidframe vf; struct vidsz sz; unsigned i; if (st->codec) { int got_pict, ret; #if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100) frame = av_frame_alloc(); #else frame = avcodec_alloc_frame(); #endif #if LIBAVCODEC_VERSION_INT >= ((57<<16)+(37<<8)+100) ret = avcodec_send_packet(st->ctx, pkt); if (ret < 0) goto out; ret = avcodec_receive_frame(st->ctx, frame); if (ret < 0) goto out; got_pict = true; #elif LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0) ret = avcodec_decode_video(st->ctx, frame, &got_pict, pkt->data, pkt->size); #else ret = avcodec_decode_video2(st->ctx, frame, &got_pict, pkt); #endif if (ret < 0 || !got_pict) goto out; sz.w = st->ctx->width; sz.h = st->ctx->height; /* check if size changed */ if (!vidsz_cmp(&sz, &st->sz)) { info("avformat: size changed: %d x %d ---> %d x %d\n", st->sz.w, st->sz.h, sz.w, sz.h); st->sz = sz; } } else { /* No-codec option is not supported */ return; } #if LIBAVCODEC_VERSION_INT >= ((53<<16)+(5<<8)+0) switch (frame->format) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUVJ420P: vf.fmt = VID_FMT_YUV420P; break; default: warning("avformat: decode: bad pixel format" " (%i) (%s)\n", frame->format, av_get_pix_fmt_name(frame->format)); goto out; } #else vf.fmt = VID_FMT_YUV420P; #endif vf.size = sz; for (i=0; i<4; i++) { vf.data[i] = frame->data[i]; vf.linesize[i] = frame->linesize[i]; } st->frameh(&vf, st->arg); out: if (frame) { #if LIBAVUTIL_VERSION_INT >= ((52<<16)+(20<<8)+100) av_frame_free(&frame); #else av_free(frame); #endif } }
/** * Display a video frame * * @param st Video display state * @param title Window title * @param frame Video frame * * @return 0 if success, otherwise errorcode * * @note: On Darwin, this must be called from the main thread */ static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { SDL_Rect rect; if (!st || !sdl.open) return EINVAL; if (!vidsz_cmp(&sdl.size, &frame->size)) { if (sdl.size.w && sdl.size.h) { info("sdl: reset size %u x %u ---> %u x %u\n", sdl.size.w, sdl.size.h, frame->size.w, frame->size.h); } sdl_reset(); } if (!sdl.screen) { int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL; char capt[256]; if (sdl.fullscreen) flags |= SDL_FULLSCREEN; else if (sdl.resizeh) flags |= SDL_RESIZABLE; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } SDL_WM_SetCaption(capt, capt); sdl.screen = SDL_SetVideoMode(frame->size.w, frame->size.h, 0, flags); if (!sdl.screen) { warning("sdl: unable to get video screen: %s\n", SDL_GetError()); return ENODEV; } sdl.size = frame->size; } if (!sdl.bmp) { sdl.bmp = SDL_CreateYUVOverlay(frame->size.w, frame->size.h, SDL_YV12_OVERLAY, sdl.screen); if (!sdl.bmp) { warning("sdl: unable to create overlay: %s\n", SDL_GetError()); return ENODEV; } } SDL_LockYUVOverlay(sdl.bmp); picture_copy(sdl.bmp->pixels, sdl.bmp->pitches, frame); SDL_UnlockYUVOverlay(sdl.bmp); rect.x = 0; rect.y = 0; rect.w = sdl.size.w; rect.h = sdl.size.h; SDL_DisplayYUVOverlay(sdl.bmp, &rect); return 0; }
static void handle_packet(struct vidsrc_st *st, AVPacket *pkt) { AVPicture pict; struct vidframe vf; struct vidsz sz; if (st->codec) { AVFrame frame; int got_pict, ret; #if LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0) ret = avcodec_decode_video(st->ctx, &frame, &got_pict, pkt->data, pkt->size); #else ret = avcodec_decode_video2(st->ctx, &frame, &got_pict, pkt); #endif if (ret < 0 || !got_pict) return; sz.w = st->ctx->width; sz.h = st->ctx->height; /* check if size changed */ if (!vidsz_cmp(&sz, &st->sz)) { re_printf("size changed: %d x %d ---> %d x %d\n", st->sz.w, st->sz.h, sz.w, sz.h); st->sz = sz; if (st->sws) { sws_freeContext(st->sws); st->sws = NULL; } } if (!st->sws) { re_printf("scaling: %d x %d ---> %d x %d\n", st->sz.w, st->sz.h, st->app_sz.w, st->app_sz.h); st->sws = sws_getContext(st->sz.w, st->sz.h, st->ctx->pix_fmt, st->app_sz.w, st->app_sz.h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); if (!st->sws) return; } ret = avpicture_alloc(&pict, PIX_FMT_YUV420P, st->app_sz.w, st->app_sz.h); if (ret < 0) return; ret = sws_scale(st->sws, SRCSLICE_CAST frame.data, frame.linesize, 0, st->sz.h, pict.data, pict.linesize); if (ret <= 0) goto end; vidframe_init(&vf, VID_FMT_YUV420P, &st->app_sz, (void *)pict.data, pict.linesize); st->frameh(&vf, st->arg); end: avpicture_free(&pict); } else { avpicture_fill(&pict, pkt->data, PIX_FMT_YUV420P, st->sz.w, st->sz.h); vidframe_init(&vf, VID_FMT_YUV420P, &st->app_sz, (void *)pict.data, pict.linesize); st->frameh(&vf, st->arg); } }
int h265_encode(struct videnc_state *st, bool update, const struct vidframe *frame, videnc_packet_h *pkth, void *arg) { x265_picture *pic_in = NULL, pic_out; x265_nal *nalv; uint32_t i, nalc = 0; int n, err = 0; if (!st || !frame || !pkth || frame->fmt != VID_FMT_YUV420P) return EINVAL; if (!st->x265 || !vidsz_cmp(&st->size, &frame->size)) { err = open_encoder(st, &frame->size); if (err) return err; st->size = frame->size; } if (update) { debug("h265: encode: picture update was requested\n"); } pic_in = x265_picture_alloc(); if (!pic_in) { warning("h265: x265_picture_alloc failed\n"); return ENOMEM; } x265_picture_init(st->param, pic_in); pic_in->sliceType = update ? X265_TYPE_IDR : X265_TYPE_AUTO; pic_in->pts = ++st->pts; /* XXX: add PTS to API */ pic_in->colorSpace = X265_CSP_I420; for (i=0; i<3; i++) { pic_in->planes[i] = frame->data[i]; pic_in->stride[i] = frame->linesize[i]; } /* NOTE: important to get the PTS of the "out" picture */ n = x265_encoder_encode(st->x265, &nalv, &nalc, pic_in, &pic_out); if (n <= 0) goto out; for (i=0; i<nalc; i++) { x265_nal *nal = &nalv[i]; uint8_t *p = nal->payload; size_t len = nal->sizeBytes; bool marker; #if 1 debug("h265: encode: %s type=%2d %s\n", h265_is_keyframe(nal->type) ? "<KEY>" : " ", nal->type, h265_nalunit_name(nal->type)); #endif h265_skip_startcode(&p, &len); /* XXX: use pic_out.pts */ marker = (i+1)==nalc; /* last NAL */ err = packetize(marker, p, len, st->pktsize, pkth, arg); if (err) goto out; } out: if (pic_in) x265_picture_free(pic_in); return err; }
int encode_x264(struct videnc_state *st, bool update, const struct vidframe *frame, videnc_packet_h *pkth, void *arg) { x264_picture_t pic_in, pic_out; x264_nal_t *nal; int i_nal; int i, err, ret; if (!st->x264 || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder_x264(st, &st->encprm, &frame->size); if (err) return err; } if (update) { #if X264_BUILD >= 95 x264_encoder_intra_refresh(st->x264); #endif re_printf("x264 picture update\n"); } memset(&pic_in, 0, sizeof(pic_in)); pic_in.i_type = update ? X264_TYPE_IDR : X264_TYPE_AUTO; pic_in.i_qpplus1 = 0; pic_in.i_pts = ++st->pts; pic_in.img.i_csp = X264_CSP_I420; pic_in.img.i_plane = 3; for (i=0; i<3; i++) { pic_in.img.i_stride[i] = frame->linesize[i]; pic_in.img.plane[i] = frame->data[i]; } ret = x264_encoder_encode(st->x264, &nal, &i_nal, &pic_in, &pic_out); if (ret < 0) { fprintf(stderr, "x264 [error]: x264_encoder_encode failed\n"); } if (i_nal == 0) return 0; err = 0; for (i=0; i<i_nal && !err; i++) { const uint8_t hdr = nal[i].i_ref_idc<<5 | nal[i].i_type<<0; int offset = 0; #if X264_BUILD >= 76 const uint8_t *p = nal[i].p_payload; /* Find the NAL Escape code [00 00 01] */ if (nal[i].i_payload > 4 && p[0] == 0x00 && p[1] == 0x00) { if (p[2] == 0x00 && p[3] == 0x01) offset = 4 + 1; else if (p[2] == 0x01) offset = 3 + 1; } #endif /* skip Supplemental Enhancement Information (SEI) */ if (nal[i].i_type == H264_NAL_SEI) continue; err = h264_nal_send(true, true, (i+1)==i_nal, hdr, nal[i].p_payload + offset, nal[i].i_payload - offset, st->encprm.pktsize, pkth, arg); } return err; }
int h265_encode(struct videnc_state *st, bool update, const struct vidframe *frame, uint64_t timestamp) { AVFrame *pict = NULL; AVPacket *pkt = NULL; uint64_t rtp_ts; int i, ret, got_packet = 0, err = 0; if (!st || !frame) return EINVAL; if (!st->ctx || !vidsz_cmp(&st->size, &frame->size) || st->fmt != frame->fmt) { enum AVPixelFormat pix_fmt; pix_fmt = vidfmt_to_avpixfmt(frame->fmt); if (pix_fmt == AV_PIX_FMT_NONE) { warning("h265: encode: pixel format not supported" " (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } debug("h265: encoder: reset %u x %u (%s)\n", frame->size.w, frame->size.h, vidfmt_name(frame->fmt)); err = open_encoder(st, &frame->size, pix_fmt); if (err) return err; st->size = frame->size; st->fmt = frame->fmt; } pict = av_frame_alloc(); if (!pict) { err = ENOMEM; goto out; } pict->format = st->ctx->pix_fmt; pict->width = frame->size.w; pict->height = frame->size.h; pict->pts = timestamp; for (i=0; i<4; i++) { pict->data[i] = frame->data[i]; pict->linesize[i] = frame->linesize[i]; } if (update) { debug("h265: encoder picture update\n"); pict->key_frame = 1; pict->pict_type = AV_PICTURE_TYPE_I; } #if LIBAVUTIL_VERSION_MAJOR >= 55 pict->color_range = AVCOL_RANGE_MPEG; #endif #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 100) pkt = av_packet_alloc(); if (!pkt) { err = ENOMEM; goto out; } ret = avcodec_send_frame(st->ctx, pict); if (ret < 0) { err = EBADMSG; goto out; } /* NOTE: packet contains 4-byte startcode */ ret = avcodec_receive_packet(st->ctx, pkt); if (ret < 0) { info("h265: no packet yet ..\n"); err = 0; goto out; } got_packet = 1; #else pkt = av_malloc(sizeof(*pkt)); if (!pkt) { err = ENOMEM; goto out; } av_init_packet(pkt); av_new_packet(pkt, 65536); ret = avcodec_encode_video2(st->ctx, pkt, pict, &got_packet); if (ret < 0) { err = EBADMSG; goto out; } #endif if (!got_packet) goto out; rtp_ts = video_calc_rtp_timestamp_fix(pkt->dts); err = packetize_annexb(rtp_ts, pkt->data, pkt->size, st->pktsize, st->pkth, st->arg); if (err) goto out; out: if (pict) av_free(pict); if (pkt) av_packet_free(&pkt); return err; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { void *pixels; uint8_t *d; int dpitch, ret; unsigned i, h; uint32_t format; if (!st || !frame) return EINVAL; format = match_fmt(frame->fmt); if (format == SDL_PIXELFORMAT_UNKNOWN) { warning("sdl2: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!vidsz_cmp(&st->size, &frame->size) || frame->fmt != st->fmt) { if (st->size.w && st->size.h) { info("sdl: reset size:" " %s %u x %u ---> %s %u x %u\n", vidfmt_name(st->fmt), st->size.w, st->size.h, vidfmt_name(frame->fmt), frame->size.w, frame->size.h); } sdl_reset(st); } if (!st->window) { char capt[256]; st->flags = SDL_WINDOW_SHOWN | SDL_WINDOW_INPUT_FOCUS; if (st->fullscreen) st->flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } st->window = SDL_CreateWindow(capt, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, frame->size.w, frame->size.h, st->flags); if (!st->window) { warning("sdl: unable to create sdl window: %s\n", SDL_GetError()); return ENODEV; } st->size = frame->size; st->fmt = frame->fmt; SDL_RaiseWindow(st->window); SDL_SetWindowBordered(st->window, true); SDL_ShowWindow(st->window); } if (!st->renderer) { Uint32 flags = 0; flags |= SDL_RENDERER_ACCELERATED; flags |= SDL_RENDERER_PRESENTVSYNC; st->renderer = SDL_CreateRenderer(st->window, -1, flags); if (!st->renderer) { warning("sdl: unable to create renderer: %s\n", SDL_GetError()); return ENOMEM; } } if (!st->texture) { st->texture = SDL_CreateTexture(st->renderer, format, SDL_TEXTUREACCESS_STREAMING, frame->size.w, frame->size.h); if (!st->texture) { warning("sdl: unable to create texture: %s\n", SDL_GetError()); return ENODEV; } } ret = SDL_LockTexture(st->texture, NULL, &pixels, &dpitch); if (ret != 0) { warning("sdl: unable to lock texture (ret=%d)\n", ret); return ENODEV; } d = pixels; for (i=0; i<3; i++) { const uint8_t *s = frame->data[i]; unsigned sz, dsz, hstep, wstep; if (!frame->data[i] || !frame->linesize[i]) break; hstep = i==0 ? 1 : 2; wstep = i==0 ? 1 : chroma_step(frame->fmt); dsz = dpitch / wstep; sz = min(frame->linesize[i], dsz); for (h = 0; h < frame->size.h; h += hstep) { memcpy(d, s, sz); s += frame->linesize[i]; d += dsz; } } SDL_UnlockTexture(st->texture); /* Blit the sprite onto the screen */ SDL_RenderCopy(st->renderer, st->texture, NULL, NULL); /* Update the screen! */ SDL_RenderPresent(st->renderer); return 0; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { void *pixels; int pitch, i; unsigned h; uint8_t *p; (void) title; if (!vidsz_cmp(&st->size, &frame->size)) { if (st->size.w && st->size.h) { info("directfb: reset: %u x %u ---> %u x %u\n", st->size.w, st->size.h, frame->size.w, frame->size.h); } if (st->surface) { st->surface->Release(st->surface); st->surface = NULL; } if (st->window) { st->window->Release(st->window); st->window = NULL; } } if (!st->window) { DFBWindowDescription desc; desc.flags = DWDESC_WIDTH|DWDESC_HEIGHT|DWDESC_PIXELFORMAT; desc.width = frame->size.w; desc.height = frame->size.h; desc.pixelformat = DSPF_I420; st->layer->CreateWindow(st->layer, &desc, &st->window); st->size = frame->size; st->window->SetOpacity(st->window, 0xff); st->window->GetSurface(st->window, &st->surface); } st->surface->Lock(st->surface, DSLF_WRITE, &pixels, &pitch); p = pixels; for (i=0; i<3; i++) { const uint8_t *s = frame->data[i]; const unsigned stp = frame->linesize[0] / frame->linesize[i]; const unsigned sz = frame->size.w / stp; for (h = 0; h < frame->size.h; h += stp) { memcpy(p, s, sz); s += frame->linesize[i]; p += (pitch / stp); } } st->surface->Unlock(st->surface); /* Update the screen! */ st->surface->Flip(st->surface, 0, 0); return 0; }
int daala_encode(struct videnc_state *ves, bool update, const struct vidframe *frame) { int r, err = 0; daala_image img; unsigned i; (void)update; /* XXX: how to force a KEY-frame? */ if (!ves || !frame || frame->fmt != VID_FMT_YUV420P) return EINVAL; ++ves->stats.n_frame; if (!ves->enc || !vidsz_cmp(&ves->size, &frame->size)) { err = open_encoder(ves, &frame->size); if (err) return err; ves->size = frame->size; } img.planes[0].data = frame->data[0]; img.planes[0].xdec = 0; img.planes[0].ydec = 0; img.planes[0].xstride = 1; img.planes[0].ystride = frame->linesize[0]; img.planes[1].data = frame->data[1]; img.planes[1].xdec = 1; img.planes[1].ydec = 1; img.planes[1].xstride = 1; img.planes[1].ystride = frame->linesize[1]; img.planes[2].data = frame->data[2]; img.planes[2].xdec = 1; img.planes[2].ydec = 1; img.planes[2].xstride = 1; img.planes[2].ystride = frame->linesize[2]; for (i=0; i<3; i++) img.planes[i].bitdepth = 8; img.nplanes = 3; img.width = frame->size.w; img.height = frame->size.h; r = daala_encode_img_in(ves->enc, &img, 0); if (r != 0) { warning("daala: encoder: encode_img_in failed (ret = %d)\n", r); return EPROTO; } for (;;) { daala_packet dp; r = daala_encode_packet_out(ves->enc, 0, &dp); if (r < 0) { warning("daala: encoder: packet_out ret=%d\n", r); break; } else if (r == 0) { break; } err = send_packet(ves, dp.b_o_s, dp.packet, dp.bytes); if (err) break; } return 0; }