Beispiel #1
0
void
video_overlay_flush(video_decoder_t *vd, int send)
{
  video_overlay_t *vo;

  while((vo = TAILQ_FIRST(&vd->vd_overlay_queue)) != NULL)
    video_overlay_destroy(vd, vo);

  if(!send)
    return;

  vo = calloc(1, sizeof(video_overlay_t));
  vo->vo_type = VO_FLUSH;
  video_overlay_enqueue(vd, vo);
}
Beispiel #2
0
void 
video_overlay_decode(media_pipe_t *mp, media_buf_t *mb)
{
  media_codec_t *mc = mb->mb_cw;
  
  if(mc == NULL) {

    int offset = 0;
    char *str;

    if(mb->mb_codecid == AV_CODEC_ID_MOV_TEXT) {
      if(mb->mb_size < 2)
	return;
      offset = 2;
    }

    str = malloc(mb->mb_size + 1 - offset);
    memcpy(str, mb->mb_data + offset, mb->mb_size - offset);
    str[mb->mb_size - offset] = 0;

    video_overlay_t *vo;
    vo = video_overlay_render_cleartext(str, mb->mb_pts,
					mb->mb_duration ?
					mb->mb_pts + mb->mb_duration :
					PTS_UNSET,
                                        TEXT_PARSE_HTML_TAGS |
                                        TEXT_PARSE_HTML_ENTITIES |
                                        TEXT_PARSE_SLOPPY_TAGS,
					mb->mb_font_context);

    if(vo != NULL)
      video_overlay_enqueue(mp, vo);

    free(str);

  } else {
      
    if(mc->decode) 
      mc->decode(mc, NULL, NULL, mb, 0);
#if ENABLE_LIBAV
    else
      video_subtitles_lavc(mp, mb, mc->ctx);
#endif
  }
}
Beispiel #3
0
void
video_overlay_render_cleartext(video_decoder_t *vd, const char *txt,
			       int64_t start, int64_t stop, int tags)
{
  uint32_t *uc;
  int len;
  video_overlay_t *vo;

  if(strlen(txt) == 0) {
    vo = calloc(1, sizeof(video_overlay_t));
  } else {

    uint32_t pfx[5];

    pfx[0] = TR_CODE_COLOR | subtitle_settings.color;
    pfx[1] = TR_CODE_SHADOW | subtitle_settings.shadow_displacement;
    pfx[2] = TR_CODE_SHADOW_COLOR | subtitle_settings.shadow_color;
    pfx[3] = TR_CODE_OUTLINE | subtitle_settings.outline_size;
    pfx[4] = TR_CODE_OUTLINE_COLOR | subtitle_settings.outline_color;

    uc = text_parse(txt, &len, 
		    tags ? (TEXT_PARSE_TAGS | TEXT_PARSE_HTML_ENTETIES) : 0,
		    pfx, 5);
    if(uc == NULL)
      return;

    vo = calloc(1, sizeof(video_overlay_t));
    vo->vo_type = VO_TEXT;
    vo->vo_text = uc;
    vo->vo_text_length = len;
    vo->vo_padding_left = -1;  // auto padding
  }
  
  vo->vo_start = start;
  vo->vo_stop = stop;

  video_overlay_enqueue(vd, vo);
}
Beispiel #4
0
/**
 * Decode subtitles from LAVC
 */
static void
video_subtitles_lavc(video_decoder_t *vd, media_buf_t *mb,
		     AVCodecContext *ctx)
{
  AVSubtitle sub;
  int size = 0, i, x, y;
  video_overlay_t *vo;

  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = mb->mb_data;
  avpkt.size = mb->mb_size;

  if(avcodec_decode_subtitle2(ctx, &sub, &size, &avpkt) < 1 || size < 1) 
    return;

  if(sub.num_rects == 0) {
    // Flush screen
    vo = calloc(1, sizeof(video_overlay_t));
    vo->vo_type = VO_TIMED_FLUSH;
    vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
    video_overlay_enqueue(vd, vo);
    return;
  }

  for(i = 0; i < sub.num_rects; i++) {
    AVSubtitleRect *r = sub.rects[i];

    switch(r->type) {

    case SUBTITLE_BITMAP:
      vo = calloc(1, sizeof(video_overlay_t));

      vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
      vo->vo_stop  = mb->mb_pts + sub.end_display_time * 1000;
		  
      vo->vo_x = r->x;
      vo->vo_y = r->y;

      vo->vo_pixmap = pixmap_create(r->w, r->h, PIX_FMT_BGR32, 1);

      const uint8_t *src = r->pict.data[0];
      const uint32_t *clut = (uint32_t *)r->pict.data[1];
      uint32_t *dst = (uint32_t *)vo->vo_pixmap->pm_pixels;
      
      for(y = 0; y < r->h; y++) {
	for(x = 0; x < r->w; x++) {
	  *dst++ = clut[src[x]];
	}
	src += r->pict.linesize[0];
      }
      video_overlay_enqueue(vd, vo);
      break;

    case SUBTITLE_ASS:
      sub_ass_render(vd, r->ass,
		     ctx->subtitle_header, ctx->subtitle_header_size);
      break;

    default:
      break;
    }
  }
}
Beispiel #5
0
/**
 * Decode subtitles from LAVC
 */
static void
video_subtitles_lavc(media_pipe_t *mp, media_buf_t *mb,
		     AVCodecContext *ctx)
{
  AVSubtitle sub;
  int got_sub = 0, i, x, y;
  video_overlay_t *vo;

  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = mb->mb_data;
  avpkt.size = mb->mb_size;

  if(avcodec_decode_subtitle2(ctx, &sub, &got_sub, &avpkt) < 1 || !got_sub) 
    return;

  if(sub.num_rects == 0) {
    // Flush screen
    vo = calloc(1, sizeof(video_overlay_t));
    vo->vo_type = VO_TIMED_FLUSH;
    vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
    video_overlay_enqueue(mp, vo);
  } else {

    for(i = 0; i < sub.num_rects; i++) {
      AVSubtitleRect *r = sub.rects[i];

      switch(r->type) {

      case SUBTITLE_BITMAP:
	vo = calloc(1, sizeof(video_overlay_t));

	vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
	vo->vo_stop  = mb->mb_pts + sub.end_display_time * 1000;
        vo->vo_canvas_width  = ctx->width;
        vo->vo_canvas_height = ctx->height;

	vo->vo_x = r->x;
	vo->vo_y = r->y;

	vo->vo_pixmap = pixmap_create(r->w, r->h, PIXMAP_BGR32, 0);

	if(vo->vo_pixmap == NULL) {
	  free(vo);
	  break;
	}

	const uint8_t *src = r->pict.data[0];
	const uint32_t *clut = (uint32_t *)r->pict.data[1];
      
	for(y = 0; y < r->h; y++) {
	  uint32_t *dst = (uint32_t *)(vo->vo_pixmap->pm_pixels + 
				       y * vo->vo_pixmap->pm_linesize);
	  for(x = 0; x < r->w; x++)
	    *dst++ = clut[src[x]];

	  src += r->pict.linesize[0];
	}
	video_overlay_enqueue(mp, vo);
	break;

      case SUBTITLE_ASS:
	sub_ass_render(mp, r->ass,
		       ctx->subtitle_header, ctx->subtitle_header_size,
		       mb->mb_font_context);
	break;

      default:
	break;
      }
    }
  }
  avsubtitle_free(&sub);
}