예제 #1
0
static pixmap_t *
opengl_read_pixels(glw_root_t *gr)
{
    pixmap_t *pm = pixmap_create(gr->gr_width, gr->gr_height, PIXMAP_BGR32, 0);

    glReadPixels(0, 0, gr->gr_width, gr->gr_height,
                 GL_BGRA, GL_UNSIGNED_BYTE, pm->pm_data);
    pm->pm_flags |= PIXMAP_VFLIP;
    return pm;
}
예제 #2
0
static pixmap_t *
rsx_read_pixels(glw_root_t *gr)
{
  glw_ps3_t *gp = (glw_ps3_t *)gr;

  pixmap_t *pm = pixmap_create(gr->gr_width, gr->gr_height, PIXMAP_RGBA, 0);

  memcpy(pm->pm_data, rsx_to_ppu(gp->framebuffer[0]),
         pm->pm_linesize * pm->pm_height);
  return pm;
}
예제 #3
0
static pixmap_t *
fa_image_from_video2(const char *url, const image_meta_t *im, 
		     const char *cacheid, char *errbuf, size_t errlen,
		     int sec, time_t mtime, cancellable_t *c)
{
  pixmap_t *pm = NULL;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL);

    if(fh == NULL)
      return NULL;

    AVIOContext *avio = fa_libav_reopen(fh, 0);

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL, 0, 0,
				    0)) == NULL) {
      fa_libav_close(avio);
      snprintf(errbuf, errlen, "Unable to open format");
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to find codec");
      return NULL;
    }

    if(avcodec_open2(ctx, codec, NULL) < 0) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to open codec");
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = av_frame_alloc();
  int got_pic;


  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts);
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);

#define MAX_FRAME_SCAN 500
  
  int cnt = MAX_FRAME_SCAN;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);

    if(r == AVERROR(EAGAIN))
      continue;

    if(r == AVERROR_EOF)
      break;

    if(cancellable_is_cancelled(c)) {
      snprintf(errbuf, errlen, "Cancelled");
      av_free_packet(&pkt);
      break;
    }

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    av_free_packet(&pkt);
    if(got_pic == 0 || !want_pic) {
      continue;
    }
    int w,h;

    if(im->im_req_width != -1 && im->im_req_height != -1) {
      w = im->im_req_width;
      h = im->im_req_height;
    } else if(im->im_req_width != -1) {
      w = im->im_req_width;
      h = im->im_req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->im_req_height != -1) {
      w = im->im_req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->im_req_height;
    } else {
      w = im->im_req_width;
      h = im->im_req_height;
    }

    pm = pixmap_create(w, h, PIXMAP_BGR32, 0);

    if(pm == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Out of memory");
      av_free(frame);
      return NULL;
    }

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR,
                         NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Scaling failed");
      pixmap_release(pm);
      av_free(frame);
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    write_thumb(ifv_ctx, frame, w, h, cacheid, mtime);

    break;
  }

  av_frame_free(&frame);
  if(pm == NULL)
    snprintf(errbuf, errlen, "Frame not found (scanned %d)", 
	     MAX_FRAME_SCAN - cnt);

  avcodec_flush_buffers(ifv_ctx);
  callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5);
  return pm;
}
예제 #4
0
파일: show-polygon.c 프로젝트: AZed/cairo
static void
polygon_view_draw (PolygonView *self, cairo_t *cr)
{
    polygon_t *polygon;
    gdouble sf_x, sf_y, sf;
    gdouble mid, dim;
    gdouble x0,  y0;
    box_t extents;

    extents = self->extents;

    mid = (extents.p2.x + extents.p1.x) / 2.;
    dim = (extents.p2.x - extents.p1.x) / 2. * 1.25;
    sf_x = self->widget.allocation.width / dim / 2;

    mid = (extents.p2.y + extents.p1.y) / 2.;
    dim = (extents.p2.y - extents.p1.y) / 2. * 1.25;
    sf_y = self->widget.allocation.height / dim / 2;

    sf = MIN (sf_x, sf_y);

    mid = (extents.p2.x + extents.p1.x) / 2.;
    dim = sf_x / sf * (extents.p2.x - extents.p1.x) / 2. * 1.25;
    x0 = mid - dim;
    mid = (extents.p2.y + extents.p1.y) / 2.;
    dim = sf_y / sf * (extents.p2.y - extents.p1.y) / 2. * 1.25;
    y0 = mid - dim;

    if (self->pixmap_width != self->widget.allocation.width ||
	self->pixmap_height != self->widget.allocation.height)
    {
	cairo_surface_destroy (self->pixmap);
	self->pixmap = pixmap_create (self, cairo_get_target (cr));
	self->pixmap_width = self->widget.allocation.width;
	self->pixmap_height = self->widget.allocation.height;
    }

    cairo_set_source_surface (cr, self->pixmap, 0, 0);
    cairo_paint (cr);

    if (self->polygons == NULL)
	return;

    /* draw a zoom view of the area around the mouse */
    if (1) {
	double zoom = self->mag_zoom;
	int size = self->mag_size;
	int mag_x = self->mag_x;
	int mag_y = self->mag_y;

	if (1) {
	    if (self->px + size < self->widget.allocation.width/2)
		mag_x = self->px + size/4;
	    else
		mag_x = self->px - size/4 - size;
	    mag_y = self->py - size/2;
	    if (mag_y < 0)
		mag_y = 0;
	    if (mag_y + size > self->widget.allocation.height)
		mag_y = self->widget.allocation.height - size;
	}

	cairo_save (cr); {
	    /* bottom right */
	    cairo_rectangle (cr, mag_x, mag_y, size, size);
	    cairo_stroke_preserve (cr);
	    cairo_set_source_rgb (cr, 1, 1, 1);
	    cairo_fill_preserve (cr);
	    cairo_clip (cr);

	    /* compute roi in extents */
	    cairo_translate (cr, mag_x + size/2, mag_y + size/2);

	    cairo_save (cr); {
		cairo_scale (cr, zoom, zoom);
		cairo_translate (cr, -(self->px / sf + x0), -(self->py /sf + y0));
		for (polygon = self->polygons; polygon; polygon = polygon->next) {
		    if (polygon->num_edges == 0)
			continue;

		    draw_polygon (cr, polygon, zoom);
		}

		if (highlight != -1) {
		    cairo_move_to (cr, extents.p1.x, highlight);
		    cairo_line_to (cr, extents.p2.x, highlight);
		    cairo_set_source_rgb (cr, 0, .7, 0);
		    cairo_save (cr);
		    cairo_identity_matrix (cr);
		    cairo_set_line_width (cr, 1.);
		    cairo_stroke (cr);
		    cairo_restore (cr);
		}
	    } cairo_restore (cr);

	    /* grid */
	    cairo_save (cr); {
		int i;

		cairo_translate (cr,
				 -zoom*fmod (self->px/sf + x0, 1.),
				 -zoom*fmod (self->py/sf + y0, 1.));
		zoom /= 2;
		for (i = -size/2/zoom; i <= size/2/zoom + 1; i+=2) {
		    cairo_move_to (cr, zoom*i, -size/2);
		    cairo_line_to (cr, zoom*i, size/2 + zoom);
		    cairo_move_to (cr, -size/2, zoom*i);
		    cairo_line_to (cr, size/2 + zoom, zoom*i);
		}
		zoom *= 2;
		cairo_set_source_rgba (cr, .7, .7, .7, .5);
		cairo_set_line_width (cr, 1.);
		cairo_stroke (cr);

		for (i = -size/2/zoom - 1; i <= size/2/zoom + 1; i++) {
		    cairo_move_to (cr, zoom*i, -size/2);
		    cairo_line_to (cr, zoom*i, size/2 + zoom);
		    cairo_move_to (cr, -size/2, zoom*i);
		    cairo_line_to (cr, size/2 + zoom, zoom*i);
		}
		cairo_set_source_rgba (cr, .1, .1, .1, .5);
		cairo_set_line_width (cr, 2.);
		cairo_stroke (cr);
	    } cairo_restore (cr);

	} cairo_restore (cr);
    }
}
예제 #5
0
bool bbruntime_create(){
	if( blitz_create() ){
				if( hook_create() ){
						if( event_create() ){
								if( math_create() ){
										if( string_create() ){
												if( stdio_create() ){
														if( stream_create() ){
																if( sockets_create() ){
																		if( enet_create() ){
																				if( runtime_create() ){
																						if( system_create() ){
																								if( bank_create() ){
																										if( system_windows_create() ){
																												if( filesystem_create() ){
																														if( filesystem_windows_create() ){
																																if( timer_windows_create() ){
																																		if( input_create() ){
																																				if( input_directinput8_create() ){
																																						if( audio_create() ){
																																								if( audio_fmod_create() ){
																																										if( userlibs_create() ){
																																												if( pixmap_create() ){
																																														if( blitz2d_create() ){
																																																if( graphics_create() ){
																																																		if( runtime_glfw3_create() ){
																																																				return true;
																									}else sue( "runtime_glfw3_create failed" );
																									graphics_destroy();
																								}else sue( "graphics_create failed" );
																								blitz2d_destroy();
																							}else sue( "blitz2d_create failed" );
																							pixmap_destroy();
																						}else sue( "pixmap_create failed" );
																						userlibs_destroy();
																					}else sue( "userlibs_create failed" );
																					audio_fmod_destroy();
																				}else sue( "audio_fmod_create failed" );
																				audio_destroy();
																			}else sue( "audio_create failed" );
																			input_directinput8_destroy();
																		}else sue( "input_directinput8_create failed" );
																		input_destroy();
																	}else sue( "input_create failed" );
																	timer_windows_destroy();
																}else sue( "timer_windows_create failed" );
																filesystem_windows_destroy();
															}else sue( "filesystem_windows_create failed" );
															filesystem_destroy();
														}else sue( "filesystem_create failed" );
														system_windows_destroy();
													}else sue( "system_windows_create failed" );
													bank_destroy();
												}else sue( "bank_create failed" );
												system_destroy();
											}else sue( "system_create failed" );
											runtime_destroy();
										}else sue( "runtime_create failed" );
										enet_destroy();
									}else sue( "enet_create failed" );
									sockets_destroy();
								}else sue( "sockets_create failed" );
								stream_destroy();
							}else sue( "stream_create failed" );
							stdio_destroy();
						}else sue( "stdio_create failed" );
						string_destroy();
					}else sue( "string_create failed" );
					math_destroy();
				}else sue( "math_create failed" );
				event_destroy();
			}else sue( "event_create failed" );
			hook_destroy();
		}else sue( "hook_create failed" );
		blitz_destroy();
	}else sue( "blitz_create failed" );
	return false;
}
예제 #6
0
/**
 * Decode subtitles from LAVC
 */
static void
video_subtitles_lavc(video_decoder_t *vd, media_buf_t *mb,
		     AVCodecContext *ctx)
{
  AVSubtitle sub;
  int size = 0, i, x, y;
  video_overlay_t *vo;

  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = mb->mb_data;
  avpkt.size = mb->mb_size;

  if(avcodec_decode_subtitle2(ctx, &sub, &size, &avpkt) < 1 || size < 1) 
    return;

  if(sub.num_rects == 0) {
    // Flush screen
    vo = calloc(1, sizeof(video_overlay_t));
    vo->vo_type = VO_TIMED_FLUSH;
    vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
    video_overlay_enqueue(vd, vo);
    return;
  }

  for(i = 0; i < sub.num_rects; i++) {
    AVSubtitleRect *r = sub.rects[i];

    switch(r->type) {

    case SUBTITLE_BITMAP:
      vo = calloc(1, sizeof(video_overlay_t));

      vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
      vo->vo_stop  = mb->mb_pts + sub.end_display_time * 1000;
		  
      vo->vo_x = r->x;
      vo->vo_y = r->y;

      vo->vo_pixmap = pixmap_create(r->w, r->h, PIX_FMT_BGR32, 1);

      const uint8_t *src = r->pict.data[0];
      const uint32_t *clut = (uint32_t *)r->pict.data[1];
      uint32_t *dst = (uint32_t *)vo->vo_pixmap->pm_pixels;
      
      for(y = 0; y < r->h; y++) {
	for(x = 0; x < r->w; x++) {
	  *dst++ = clut[src[x]];
	}
	src += r->pict.linesize[0];
      }
      video_overlay_enqueue(vd, vo);
      break;

    case SUBTITLE_ASS:
      sub_ass_render(vd, r->ass,
		     ctx->subtitle_header, ctx->subtitle_header_size);
      break;

    default:
      break;
    }
  }
}
예제 #7
0
static image_t *
fa_image_from_video2(const char *url, const image_meta_t *im,
		     const char *cacheid, char *errbuf, size_t errlen,
		     int sec, time_t mtime, cancellable_t *c)
{
  image_t *img = NULL;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL);

    if(fh == NULL)
      return NULL;

    AVIOContext *avio = fa_libav_reopen(fh, 0);

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL, 0, 0,
				    0)) == NULL) {
      fa_libav_close(avio);
      snprintf(errbuf, errlen, "Unable to open format");
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    int vstream = 0;
    for(i = 0; i < fctx->nb_streams; i++) {
      AVStream *st = fctx->streams[i];
      AVCodecContext *c = st->codec;
      AVDictionaryEntry *mt;

      if(c == NULL)
        continue;

      switch(c->codec_type) {
      case AVMEDIA_TYPE_VIDEO:
        if(ctx == NULL) {
          vstream = i;
          ctx = fctx->streams[i]->codec;
        }
        break;

      case AVMEDIA_TYPE_ATTACHMENT:
        mt = av_dict_get(st->metadata, "mimetype", NULL, AV_DICT_IGNORE_SUFFIX);
        if(sec == -1 && mt != NULL &&
           (!strcmp(mt->value, "image/jpeg") ||
            !strcmp(mt->value, "image/png"))) {
          int64_t offset = st->attached_offset;
          int size = st->attached_size;
          fa_libav_close_format(fctx);
          return thumb_from_attachment(url, offset, size, errbuf, errlen,
                                       cacheid, mtime);
        }
        break;

      default:
        break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to find codec");
      return NULL;
    }

    if(avcodec_open2(ctx, codec, NULL) < 0) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to open codec");
      return NULL;
    }

    ifv_close();

    ifv_stream = vstream;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = av_frame_alloc();
  int got_pic;

#define MAX_FRAME_SCAN 500

  int cnt = MAX_FRAME_SCAN;

  AVStream *st = ifv_fctx->streams[ifv_stream];

  if(sec == -1) {
    // Automatically try to find a good frame

    int duration_in_seconds = ifv_fctx->duration / 1000000;


    sec = MAX(1, duration_in_seconds * 0.05); // 5% of duration
    sec = MIN(sec, 150); // , buy no longer than 2:30 in

    sec = MAX(0, MIN(sec, duration_in_seconds - 1));
    cnt = 1;
  }


  int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num);
  int delayed_seek = 0;

  if(ifv_ctx->codec_id == AV_CODEC_ID_RV40 ||
     ifv_ctx->codec_id == AV_CODEC_ID_RV30) {
    // Must decode one frame
    delayed_seek = 1;
  } else {
    if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
      ifv_close();
      snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts);
      return NULL;
    }
  }

  avcodec_flush_buffers(ifv_ctx);

  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);

    if(r == AVERROR(EAGAIN))
      continue;

    if(r == AVERROR_EOF)
      break;

    if(cancellable_is_cancelled(c)) {
      snprintf(errbuf, errlen, "Cancelled");
      av_free_packet(&pkt);
      break;
    }

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;

    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    av_free_packet(&pkt);

    if(delayed_seek) {
      delayed_seek = 0;
      if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
        ifv_close();
        break;
      }
      continue;
    }


    if(got_pic == 0 || !want_pic) {
      continue;
    }
    int w,h;

    if(im->im_req_width != -1 && im->im_req_height != -1) {
      w = im->im_req_width;
      h = im->im_req_height;
    } else if(im->im_req_width != -1) {
      w = im->im_req_width;
      h = im->im_req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->im_req_height != -1) {
      w = im->im_req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->im_req_height;
    } else {
      w = im->im_req_width;
      h = im->im_req_height;
    }

    pixmap_t *pm = pixmap_create(w, h, PIXMAP_BGR32, 0);

    if(pm == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Out of memory");
      av_free(frame);
      return NULL;
    }

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR,
                         NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Scaling failed");
      pixmap_release(pm);
      av_free(frame);
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_data;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    write_thumb(ifv_ctx, frame, w, h, cacheid, mtime);

    img = image_create_from_pixmap(pm);
    pixmap_release(pm);

    break;
  }

  av_frame_free(&frame);
  if(img == NULL)
    snprintf(errbuf, errlen, "Frame not found (scanned %d)", 
	     MAX_FRAME_SCAN - cnt);

  if(ifv_ctx != NULL) {
    avcodec_flush_buffers(ifv_ctx);
    callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5);
  }
  return img;
}
예제 #8
0
static pixmap_t *
fa_image_from_video2(const char *url, const image_meta_t *im, 
		     const char *cacheid, char *errbuf, size_t errlen,
		     int sec, time_t mtime, fa_load_cb_t *cb, void *opaque)
{
  pixmap_t *pm = NULL;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL);

    if(fh == NULL)
      return NULL;

    AVIOContext *avio = fa_libav_reopen(fh);

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL)) == NULL) {
      fa_libav_close(avio);
      snprintf(errbuf, errlen, "Unable to open format");
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to find codec");
      return NULL;
    }

    if(avcodec_open(ctx, codec) < 0) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to open codec");
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = avcodec_alloc_frame();
  int got_pic;


  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts);
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);

#define MAX_FRAME_SCAN 500
  
  int cnt = MAX_FRAME_SCAN;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);
    
    if(r == AVERROR(EAGAIN))
      continue;
    
    if(r == AVERROR_EOF) {
      break;
    }
    
    if(cb != NULL && cb(opaque, 0, 1)) {
      snprintf(errbuf, errlen, "Aborted");
      break;
    }

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    av_free_packet(&pkt);
    if(got_pic == 0 || !want_pic) {
      continue;
    }
    int w,h;

    if(im->im_req_width != -1 && im->im_req_height != -1) {
      w = im->im_req_width;
      h = im->im_req_height;
    } else if(im->im_req_width != -1) {
      w = im->im_req_width;
      h = im->im_req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->im_req_height != -1) {
      w = im->im_req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->im_req_height;
    } else {
      w = im->im_req_width;
      h = im->im_req_height;
    }

    pm = pixmap_create(w, h, PIXMAP_RGB24,
#ifdef __PPC__
		       16
#else
		       1
#endif
		       );



    if(pm == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Out of memory");
      return NULL;
    }

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Scaling failed");
      pixmap_release(pm);
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    if(pngencoder != NULL) {
      AVFrame *oframe = avcodec_alloc_frame();

      memset(&frame, 0, sizeof(frame));
      oframe->data[0] = pm->pm_pixels;
      oframe->linesize[0] = pm->pm_linesize;
      
      size_t outputsize = MAX(pm->pm_linesize * h, FF_MIN_BUFFER_SIZE);
      void *output = malloc(outputsize);
      pngencoder->width = w;
      pngencoder->height = h;
      pngencoder->pix_fmt = PIX_FMT_RGB24;

      r = avcodec_encode_video(pngencoder, output, outputsize, oframe);
      
      if(r > 0) 
	blobcache_put(cacheid, "videothumb", output, r, INT32_MAX,
		      NULL, mtime);
      free(output);
      av_free(oframe);
    }
    break;
  }

  av_free(frame);
  if(pm == NULL)
    snprintf(errbuf, errlen, "Frame not found (scanned %d)", 
	     MAX_FRAME_SCAN - cnt);
  return pm;
}
예제 #9
0
static pixmap_t *
fa_image_from_video2(const char *url0, const image_meta_t *im, 
		     const char *cacheid)
{
  pixmap_t *pm = NULL;
  char *url = mystrdupa(url0);
  char *tim = strchr(url, '#');

  *tim++ = 0;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    AVIOContext *avio;
    
    if((avio = fa_libav_open(url, 65536, NULL, 0, 0)) == NULL)
      return NULL;

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL)) == NULL) {
      fa_libav_close(avio);
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    if(avcodec_open(ctx, codec) < 0) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = avcodec_alloc_frame();
  int got_pic;


  int secs = atoi(tim);

  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(secs, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);


  
  int cnt = 500;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);
    
    if(r == AVERROR(EAGAIN))
      continue;
    
    if(r == AVERROR_EOF)
      break;

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    if(got_pic == 0 || !want_pic)
      continue;

    int w,h;

    if(im->req_width != -1 && im->req_height != -1) {
      w = im->req_width;
      h = im->req_height;
    } else if(im->req_width != -1) {
      w = im->req_width;
      h = im->req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->req_height != -1) {
      w = im->req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->req_height;
    } else {
      w = im->req_width;
      h = im->req_height;
    }

    pm = pixmap_create(w, h, PIX_FMT_RGB24);

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, PIX_FMT_RGB24, SWS_LANCZOS, NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    if(pngencoder != NULL) {
      AVFrame *oframe = avcodec_alloc_frame();

      memset(&frame, 0, sizeof(frame));
      oframe->data[0] = pm->pm_pixels;
      oframe->linesize[0] = pm->pm_linesize;
      
      size_t outputsize = pm->pm_linesize * h;
      void *output = malloc(outputsize);
      pngencoder->width = w;
      pngencoder->height = h;
      pngencoder->pix_fmt = PIX_FMT_RGB24;

      r = avcodec_encode_video(pngencoder, output, outputsize, oframe);
    
      if(r > 0) 
	blobcache_put(cacheid, "videothumb", output, outputsize, 86400 * 5);
      free(output);
      av_free(oframe);
    }
    break;
  }

  av_free(frame);
  return pm;
}
예제 #10
0
/**
 * Decode subtitles from LAVC
 */
static void
video_subtitles_lavc(media_pipe_t *mp, media_buf_t *mb,
		     AVCodecContext *ctx)
{
  AVSubtitle sub;
  int got_sub = 0, i, x, y;
  video_overlay_t *vo;

  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = mb->mb_data;
  avpkt.size = mb->mb_size;

  if(avcodec_decode_subtitle2(ctx, &sub, &got_sub, &avpkt) < 1 || !got_sub) 
    return;

  if(sub.num_rects == 0) {
    // Flush screen
    vo = calloc(1, sizeof(video_overlay_t));
    vo->vo_type = VO_TIMED_FLUSH;
    vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
    video_overlay_enqueue(mp, vo);
  } else {

    for(i = 0; i < sub.num_rects; i++) {
      AVSubtitleRect *r = sub.rects[i];

      switch(r->type) {

      case SUBTITLE_BITMAP:
	vo = calloc(1, sizeof(video_overlay_t));

	vo->vo_start = mb->mb_pts + sub.start_display_time * 1000;
	vo->vo_stop  = mb->mb_pts + sub.end_display_time * 1000;
        vo->vo_canvas_width  = ctx->width;
        vo->vo_canvas_height = ctx->height;

	vo->vo_x = r->x;
	vo->vo_y = r->y;

	vo->vo_pixmap = pixmap_create(r->w, r->h, PIXMAP_BGR32, 0);

	if(vo->vo_pixmap == NULL) {
	  free(vo);
	  break;
	}

	const uint8_t *src = r->pict.data[0];
	const uint32_t *clut = (uint32_t *)r->pict.data[1];
      
	for(y = 0; y < r->h; y++) {
	  uint32_t *dst = (uint32_t *)(vo->vo_pixmap->pm_pixels + 
				       y * vo->vo_pixmap->pm_linesize);
	  for(x = 0; x < r->w; x++)
	    *dst++ = clut[src[x]];

	  src += r->pict.linesize[0];
	}
	video_overlay_enqueue(mp, vo);
	break;

      case SUBTITLE_ASS:
	sub_ass_render(mp, r->ass,
		       ctx->subtitle_header, ctx->subtitle_header_size,
		       mb->mb_font_context);
	break;

      default:
	break;
      }
    }
  }
  avsubtitle_free(&sub);
}