static int conf_get_vidfmt(const struct conf *conf, const char *name, int *fmtp) { struct pl pl; int fmt; int err; err = conf_get(conf, name, &pl); if (err) return err; for (fmt=0; fmt<VID_FMT_N; fmt++) { const char *str = vidfmt_name(fmt); if (0 == pl_strcasecmp(&pl, str)) { *fmtp = fmt; return 0; } } warning("config: %s: pixel format not supported (%r)\n", name, &pl); return ENOENT; }
/** * Fill a video frame with a nice color * * @param vf Video frame * @param r Red color component * @param g Green color component * @param b Blue color component */ void vidframe_fill(struct vidframe *vf, uint32_t r, uint32_t g, uint32_t b) { uint8_t *p; unsigned h, i; if (!vf) return; switch (vf->fmt) { case VID_FMT_YUV420P: h = vf->size.h; memset(vf->data[0], rgb2y(r, g, b), h * vf->linesize[0]); memset(vf->data[1], rgb2u(r, g, b), h/2 * vf->linesize[1]); memset(vf->data[2], rgb2v(r, g, b), h/2 * vf->linesize[2]); break; case VID_FMT_RGB32: p = vf->data[0]; for (i=0; i<vf->linesize[0] * vf->size.h; i+=4) { *p++ = b; *p++ = g; *p++ = r; *p++ = 0; } break; default: (void)re_printf("vidfill: no fmt %s\n", vidfmt_name(vf->fmt)); break; } }
static int encode_process(struct vidfilt_enc_st *st, struct vidframe *frame) { struct swscale_enc *enc = (struct swscale_enc *)st; enum AVPixelFormat avpixfmt, avpixfmt_dst; const uint8_t *srcSlice[4]; uint8_t *dst[4]; int srcStride[4], dstStride[4]; int width, height, i, h; int err = 0; if (!st) return EINVAL; if (!frame) return 0; width = frame->size.w; height = frame->size.h; avpixfmt = vidfmt_to_avpixfmt(frame->fmt); if (avpixfmt == AV_PIX_FMT_NONE) { warning("swscale: unknown pixel-format (%s)\n", vidfmt_name(frame->fmt)); return EINVAL; } avpixfmt_dst = vidfmt_to_avpixfmt(swscale_format); if (avpixfmt_dst == AV_PIX_FMT_NONE) { warning("swscale: unknown pixel-format (%s)\n", vidfmt_name(swscale_format)); return EINVAL; } if (!enc->sws) { struct SwsContext *sws; int flags = 0; sws = sws_getContext(width, height, avpixfmt, enc->dst_size.w, enc->dst_size.h, avpixfmt_dst, flags, NULL, NULL, NULL); if (!sws) { warning("swscale: sws_getContext error\n"); return ENOMEM; } enc->sws = sws; info("swscale: created SwsContext:" " `%s' %d x %d --> `%s' %u x %u\n", vidfmt_name(frame->fmt), width, height, vidfmt_name(swscale_format), enc->dst_size.w, enc->dst_size.h); } if (!enc->frame) { err = vidframe_alloc(&enc->frame, swscale_format, &enc->dst_size); if (err) { warning("swscale: vidframe_alloc error (%m)\n", err); return err; } } for (i=0; i<4; i++) { srcSlice[i] = frame->data[i]; srcStride[i] = frame->linesize[i]; dst[i] = enc->frame->data[i]; dstStride[i] = enc->frame->linesize[i]; } h = sws_scale(enc->sws, srcSlice, srcStride, 0, height, dst, dstStride); if (h <= 0) { warning("swscale: sws_scale error (%d)\n", h); return EPROTO; } /* Copy the converted frame back to the input frame */ for (i=0; i<4; i++) { frame->data[i] = enc->frame->data[i]; frame->linesize[i] = enc->frame->linesize[i]; } frame->size = enc->frame->size; frame->fmt = enc->frame->fmt; return 0; }
int encode(struct videnc_state *st, bool update, const struct vidframe *frame) { int i, err, ret; int pix_fmt; if (!st || !frame) return EINVAL; switch (frame->fmt) { case VID_FMT_YUV420P: pix_fmt = AV_PIX_FMT_YUV420P; break; case VID_FMT_NV12: pix_fmt = AV_PIX_FMT_NV12; break; default: warning("avcodec: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!st->ctx || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder(st, &st->encprm, &frame->size, pix_fmt); if (err) { warning("avcodec: open_encoder: %m\n", err); return err; } } for (i=0; i<4; i++) { st->pict->data[i] = frame->data[i]; st->pict->linesize[i] = frame->linesize[i]; } st->pict->pts = st->pts++; if (update) { debug("avcodec: encoder picture update\n"); st->pict->key_frame = 1; #ifdef FF_I_TYPE st->pict->pict_type = FF_I_TYPE; /* Infra Frame */ #else st->pict->pict_type = AV_PICTURE_TYPE_I; #endif } else { st->pict->key_frame = 0; st->pict->pict_type = 0; } mbuf_rewind(st->mb); #if LIBAVCODEC_VERSION_INT >= ((54<<16)+(1<<8)+0) do { AVPacket avpkt; int got_packet; av_init_packet(&avpkt); avpkt.data = st->mb->buf; avpkt.size = (int)st->mb->size; ret = avcodec_encode_video2(st->ctx, &avpkt, st->pict, &got_packet); if (ret < 0) return EBADMSG; if (!got_packet) return 0; mbuf_set_end(st->mb, avpkt.size); } while (0); #else ret = avcodec_encode_video(st->ctx, st->mb->buf, (int)st->mb->size, st->pict); if (ret < 0 ) return EBADMSG; /* todo: figure out proper buffer size */ if (ret > (int)st->sz_max) { debug("avcodec: grow encode buffer %u --> %d\n", st->sz_max, ret); st->sz_max = ret; } mbuf_set_end(st->mb, ret); #endif switch (st->codec_id) { case AV_CODEC_ID_H263: err = h263_packetize(st, st->mb, st->pkth, st->arg); break; case AV_CODEC_ID_H264: err = h264_packetize(st->mb->buf, st->mb->end, st->encprm.pktsize, st->pkth, st->arg); break; case AV_CODEC_ID_MPEG4: err = general_packetize(st->mb, st->encprm.pktsize, st->pkth, st->arg); break; default: err = EPROTO; break; } return err; }
int encode_x264(struct videnc_state *st, bool update, const struct vidframe *frame) { x264_picture_t pic_in, pic_out; x264_nal_t *nal; int i_nal; int i, err, ret; int csp, pln; if (!st || !frame) return EINVAL; switch (frame->fmt) { case VID_FMT_YUV420P: csp = X264_CSP_I420; pln = 3; break; case VID_FMT_NV12: csp = X264_CSP_NV12; pln = 2; break; default: warning("avcodec: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!st->x264 || !vidsz_cmp(&st->encsize, &frame->size)) { err = open_encoder_x264(st, &st->encprm, &frame->size, csp); if (err) return err; } if (update) { #if X264_BUILD >= 95 x264_encoder_intra_refresh(st->x264); #endif debug("avcodec: x264 picture update\n"); } x264_picture_init(&pic_in); pic_in.i_type = update ? X264_TYPE_IDR : X264_TYPE_AUTO; pic_in.i_qpplus1 = 0; pic_in.i_pts = ++st->pts; pic_in.img.i_csp = csp; pic_in.img.i_plane = pln; for (i=0; i<pln; i++) { pic_in.img.i_stride[i] = frame->linesize[i]; pic_in.img.plane[i] = frame->data[i]; } ret = x264_encoder_encode(st->x264, &nal, &i_nal, &pic_in, &pic_out); if (ret < 0) { fprintf(stderr, "x264 [error]: x264_encoder_encode failed\n"); } if (i_nal == 0) return 0; err = 0; for (i=0; i<i_nal && !err; i++) { const uint8_t hdr = nal[i].i_ref_idc<<5 | nal[i].i_type<<0; int offset = 0; #if X264_BUILD >= 76 const uint8_t *p = nal[i].p_payload; /* Find the NAL Escape code [00 00 01] */ if (nal[i].i_payload > 4 && p[0] == 0x00 && p[1] == 0x00) { if (p[2] == 0x00 && p[3] == 0x01) offset = 4 + 1; else if (p[2] == 0x01) offset = 3 + 1; } #endif /* skip Supplemental Enhancement Information (SEI) */ if (nal[i].i_type == H264_NAL_SEI) continue; err = h264_nal_send(true, true, (i+1)==i_nal, hdr, nal[i].p_payload + offset, nal[i].i_payload - offset, st->encprm.pktsize, st->pkth, st->arg); } return err; }
static int core_config_template(struct re_printf *pf, const struct config *cfg) { int err = 0; if (!cfg) return 0; err |= re_hprintf(pf, "\n# Core\n" "poll_method\t\t%s\t\t# poll, select" #ifdef HAVE_EPOLL ", epoll .." #endif #ifdef HAVE_KQUEUE ", kqueue .." #endif "\n" "\n# SIP\n" "#sip_listen\t\t0.0.0.0:5060\n" "#sip_certificate\tcert.pem\n" "#sip_cafile\t\t%s\n" "\n" "# Call\n" "call_local_timeout\t%u\n" "call_max_calls\t\t%u\n" "\n" "# Audio\n" #if defined (SHARE_PATH) "#audio_path\t\t" SHARE_PATH "\n" #elif defined (PREFIX) "#audio_path\t\t" PREFIX "/share/baresip\n" #else "#audio_path\t\t/usr/share/baresip\n" #endif "audio_player\t\t%s\n" "audio_source\t\t%s\n" "audio_alert\t\t%s\n" "#ausrc_srate\t\t48000\n" "#auplay_srate\t\t48000\n" "#ausrc_channels\t\t0\n" "#auplay_channels\t0\n" "#audio_txmode\t\tpoll\t\t# poll, thread\n" "audio_level\t\tno\n" "ausrc_format\t\ts16\t\t# s16, float, ..\n" "auplay_format\t\ts16\t\t# s16, float, ..\n" "auenc_format\t\ts16\t\t# s16, float, ..\n" "audec_format\t\ts16\t\t# s16, float, ..\n" , poll_method_name(poll_method_best()), default_cafile(), cfg->call.local_timeout, cfg->call.max_calls, default_audio_device(), default_audio_device(), default_audio_device()); err |= re_hprintf(pf, "\n# Video\n" "#video_source\t\t%s\n" "#video_display\t\t%s\n" "video_size\t\t%dx%d\n" "video_bitrate\t\t%u\n" "video_fps\t\t%.2f\n" "video_fullscreen\tyes\n" "videnc_format\t\t%s\n" , default_video_device(), default_video_display(), cfg->video.width, cfg->video.height, cfg->video.bitrate, cfg->video.fps, vidfmt_name(cfg->video.enc_fmt)); err |= re_hprintf(pf, "\n# AVT - Audio/Video Transport\n" "rtp_tos\t\t\t184\n" "#rtp_ports\t\t10000-20000\n" "#rtp_bandwidth\t\t512-1024 # [kbit/s]\n" "rtcp_mux\t\tno\n" "jitter_buffer_delay\t%u-%u\t\t# frames\n" "rtp_stats\t\tno\n" "#rtp_timeout\t\t60\n" "\n# Network\n" "prefer_ipv6\t\tno\n" "#dns_server\t\t10.0.0.1:53\n" "#net_interface\t\t%H\n", cfg->avt.jbuf_del.min, cfg->avt.jbuf_del.max, default_interface_print, NULL); err |= re_hprintf(pf, "\n# BFCP\n" "#bfcp_proto\t\tudp\n"); return err; }
/** * Print the baresip core config * * @param pf Print function * @param cfg Baresip core config * * @return 0 if success, otherwise errorcode */ int config_print(struct re_printf *pf, const struct config *cfg) { int err; if (!cfg) return 0; err = re_hprintf(pf, "\n" "# SIP\n" "sip_listen\t\t%s\n" "sip_certificate\t%s\n" "sip_cafile\t\t%s\n" "\n" "# Call\n" "call_local_timeout\t%u\n" "call_max_calls\t\t%u\n" "\n" "# Audio\n" "audio_path\t\t%s\n" "audio_player\t\t%s,%s\n" "audio_source\t\t%s,%s\n" "audio_alert\t\t%s,%s\n" "auplay_srate\t\t%u\n" "ausrc_srate\t\t%u\n" "auplay_channels\t\t%u\n" "ausrc_channels\t\t%u\n" "audio_level\t\t%s\n" "\n" "# Video\n" "video_source\t\t%s,%s\n" "video_display\t\t%s,%s\n" "video_size\t\t\"%ux%u\"\n" "video_bitrate\t\t%u\n" "video_fps\t\t%.2f\n" "video_fullscreen\t%s\n" "videnc_format\t\t%s\n" "\n" "# AVT\n" "rtp_tos\t\t\t%u\n" "rtp_ports\t\t%H\n" "rtp_bandwidth\t\t%H\n" "rtcp_mux\t\t%s\n" "jitter_buffer_delay\t%H\n" "rtp_stats\t\t%s\n" "rtp_timeout\t\t%u # in seconds\n" "\n" "# Network\n" "prefer_ipv6\t\t%s\n" "net_interface\t\t%s\n" "\n" "# BFCP\n" "bfcp_proto\t\t%s\n" "\n" , cfg->sip.local, cfg->sip.cert, cfg->sip.cafile, cfg->call.local_timeout, cfg->call.max_calls, cfg->audio.audio_path, cfg->audio.play_mod, cfg->audio.play_dev, cfg->audio.src_mod, cfg->audio.src_dev, cfg->audio.alert_mod, cfg->audio.alert_dev, cfg->audio.srate_play, cfg->audio.srate_src, cfg->audio.channels_play, cfg->audio.channels_src, cfg->audio.level ? "yes" : "no", cfg->video.src_mod, cfg->video.src_dev, cfg->video.disp_mod, cfg->video.disp_dev, cfg->video.width, cfg->video.height, cfg->video.bitrate, cfg->video.fps, cfg->video.fullscreen ? "yes" : "no", vidfmt_name(cfg->video.enc_fmt), cfg->avt.rtp_tos, range_print, &cfg->avt.rtp_ports, range_print, &cfg->avt.rtp_bw, cfg->avt.rtcp_mux ? "yes" : "no", range_print, &cfg->avt.jbuf_del, cfg->avt.rtp_stats ? "yes" : "no", cfg->avt.rtp_timeout, cfg->net.prefer_ipv6 ? "yes" : "no", cfg->net.ifname ,cfg->bfcp.proto ); return err; }
int h265_encode(struct videnc_state *st, bool update, const struct vidframe *frame, uint64_t timestamp) { AVFrame *pict = NULL; AVPacket *pkt = NULL; uint64_t rtp_ts; int i, ret, got_packet = 0, err = 0; if (!st || !frame) return EINVAL; if (!st->ctx || !vidsz_cmp(&st->size, &frame->size) || st->fmt != frame->fmt) { enum AVPixelFormat pix_fmt; pix_fmt = vidfmt_to_avpixfmt(frame->fmt); if (pix_fmt == AV_PIX_FMT_NONE) { warning("h265: encode: pixel format not supported" " (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } debug("h265: encoder: reset %u x %u (%s)\n", frame->size.w, frame->size.h, vidfmt_name(frame->fmt)); err = open_encoder(st, &frame->size, pix_fmt); if (err) return err; st->size = frame->size; st->fmt = frame->fmt; } pict = av_frame_alloc(); if (!pict) { err = ENOMEM; goto out; } pict->format = st->ctx->pix_fmt; pict->width = frame->size.w; pict->height = frame->size.h; pict->pts = timestamp; for (i=0; i<4; i++) { pict->data[i] = frame->data[i]; pict->linesize[i] = frame->linesize[i]; } if (update) { debug("h265: encoder picture update\n"); pict->key_frame = 1; pict->pict_type = AV_PICTURE_TYPE_I; } #if LIBAVUTIL_VERSION_MAJOR >= 55 pict->color_range = AVCOL_RANGE_MPEG; #endif #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 100) pkt = av_packet_alloc(); if (!pkt) { err = ENOMEM; goto out; } ret = avcodec_send_frame(st->ctx, pict); if (ret < 0) { err = EBADMSG; goto out; } /* NOTE: packet contains 4-byte startcode */ ret = avcodec_receive_packet(st->ctx, pkt); if (ret < 0) { info("h265: no packet yet ..\n"); err = 0; goto out; } got_packet = 1; #else pkt = av_malloc(sizeof(*pkt)); if (!pkt) { err = ENOMEM; goto out; } av_init_packet(pkt); av_new_packet(pkt, 65536); ret = avcodec_encode_video2(st->ctx, pkt, pict, &got_packet); if (ret < 0) { err = EBADMSG; goto out; } #endif if (!got_packet) goto out; rtp_ts = video_calc_rtp_timestamp_fix(pkt->dts); err = packetize_annexb(rtp_ts, pkt->data, pkt->size, st->pktsize, st->pkth, st->arg); if (err) goto out; out: if (pict) av_free(pict); if (pkt) av_packet_free(&pkt); return err; }
static int display(struct vidisp_st *st, const char *title, const struct vidframe *frame) { void *pixels; uint8_t *d; int dpitch, ret; unsigned i, h; uint32_t format; if (!st || !frame) return EINVAL; format = match_fmt(frame->fmt); if (format == SDL_PIXELFORMAT_UNKNOWN) { warning("sdl2: pixel format not supported (%s)\n", vidfmt_name(frame->fmt)); return ENOTSUP; } if (!vidsz_cmp(&st->size, &frame->size) || frame->fmt != st->fmt) { if (st->size.w && st->size.h) { info("sdl: reset size:" " %s %u x %u ---> %s %u x %u\n", vidfmt_name(st->fmt), st->size.w, st->size.h, vidfmt_name(frame->fmt), frame->size.w, frame->size.h); } sdl_reset(st); } if (!st->window) { char capt[256]; st->flags = SDL_WINDOW_SHOWN | SDL_WINDOW_INPUT_FOCUS; if (st->fullscreen) st->flags |= SDL_WINDOW_FULLSCREEN_DESKTOP; if (title) { re_snprintf(capt, sizeof(capt), "%s - %u x %u", title, frame->size.w, frame->size.h); } else { re_snprintf(capt, sizeof(capt), "%u x %u", frame->size.w, frame->size.h); } st->window = SDL_CreateWindow(capt, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, frame->size.w, frame->size.h, st->flags); if (!st->window) { warning("sdl: unable to create sdl window: %s\n", SDL_GetError()); return ENODEV; } st->size = frame->size; st->fmt = frame->fmt; SDL_RaiseWindow(st->window); SDL_SetWindowBordered(st->window, true); SDL_ShowWindow(st->window); } if (!st->renderer) { Uint32 flags = 0; flags |= SDL_RENDERER_ACCELERATED; flags |= SDL_RENDERER_PRESENTVSYNC; st->renderer = SDL_CreateRenderer(st->window, -1, flags); if (!st->renderer) { warning("sdl: unable to create renderer: %s\n", SDL_GetError()); return ENOMEM; } } if (!st->texture) { st->texture = SDL_CreateTexture(st->renderer, format, SDL_TEXTUREACCESS_STREAMING, frame->size.w, frame->size.h); if (!st->texture) { warning("sdl: unable to create texture: %s\n", SDL_GetError()); return ENODEV; } } ret = SDL_LockTexture(st->texture, NULL, &pixels, &dpitch); if (ret != 0) { warning("sdl: unable to lock texture (ret=%d)\n", ret); return ENODEV; } d = pixels; for (i=0; i<3; i++) { const uint8_t *s = frame->data[i]; unsigned sz, dsz, hstep, wstep; if (!frame->data[i] || !frame->linesize[i]) break; hstep = i==0 ? 1 : 2; wstep = i==0 ? 1 : chroma_step(frame->fmt); dsz = dpitch / wstep; sz = min(frame->linesize[i], dsz); for (h = 0; h < frame->size.h; h += hstep) { memcpy(d, s, sz); s += frame->linesize[i]; d += dsz; } } SDL_UnlockTexture(st->texture); /* Blit the sprite onto the screen */ SDL_RenderCopy(st->renderer, st->texture, NULL, NULL); /* Update the screen! */ SDL_RenderPresent(st->renderer); return 0; }
/** * Initialize a video frame from a buffer * * @param vf Video frame * @param fmt Video pixel format * @param sz Size of video frame * @param buf Frame buffer */ void vidframe_init_buf(struct vidframe *vf, enum vidfmt fmt, const struct vidsz *sz, uint8_t *buf) { unsigned w, h; if (!vf || !sz || !buf) return; w = (sz->w + 1) >> 1; h = (sz->h + 1) >> 1; memset(vf->linesize, 0, sizeof(vf->linesize)); memset(vf->data, 0, sizeof(vf->data)); switch (fmt) { case VID_FMT_YUV420P: vf->linesize[0] = sz->w; vf->linesize[1] = w; vf->linesize[2] = w; vf->data[0] = buf; vf->data[1] = vf->data[0] + vf->linesize[0] * sz->h; vf->data[2] = vf->data[1] + vf->linesize[1] * h; break; case VID_FMT_YUYV422: case VID_FMT_UYVY422: vf->linesize[0] = sz->w * 2; vf->data[0] = buf; break; case VID_FMT_RGB32: case VID_FMT_ARGB: vf->linesize[0] = sz->w * 4; vf->data[0] = buf; break; case VID_FMT_RGB565: case VID_FMT_RGB555: vf->linesize[0] = sz->w * 2; vf->data[0] = buf; break; case VID_FMT_NV12: case VID_FMT_NV21: vf->linesize[0] = sz->w; vf->linesize[1] = w*2; vf->data[0] = buf; vf->data[1] = vf->data[0] + vf->linesize[0] * sz->h; break; default: (void)re_printf("vidframe: no fmt %s\n", vidfmt_name(fmt)); return; } vf->size = *sz; vf->fmt = fmt; }