static GdkPixbuf *
impl_load_pixbuf_file (const char     *uri,
                       int             available_width,
                       int             available_height,
                       GError        **error)
{
  GdkPixbuf *pixbuf = NULL;
  GFile *file;
  char *contents = NULL;
  gsize size;

  if (g_str_has_prefix (uri, "data:"))
    return decode_image (uri);

  file = g_file_new_for_uri (uri);
  if (g_file_load_contents (file, NULL, &contents, &size, NULL, error))
    {
      pixbuf = impl_load_pixbuf_data ((const guchar *) contents, size,
                                      available_width, available_height,
                                      error);
    }

  g_object_unref (file);
  g_free (contents);

  return pixbuf;
}
Exemple #2
0
// Make sure at least 1 filtered image is available.
// returns VD_* code
// A return value of VD_PROGRESS doesn't necessarily output a frame, but makes
// the promise that calling this function again will eventually do something.
static int video_decode_and_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    // There is already a filtered frame available.
    if (vf_output_frame(vf, false) > 0)
        return VD_PROGRESS;

    // Decoder output is different from filter input?
    bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
        !mp_image_params_equal(&d_video->decoder_output, &vf->input_params);

    // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
    if (need_vf_reconfig && d_video->decoder_output.imgfmt) {
        // Drain the filter chain.
        if (vf_output_frame(vf, true) > 0)
            return VD_PROGRESS;

        // The filter chain is drained; execute the filter format change.
        filter_reconfig(mpctx, false);
        if (vf->initialized == 0)
            return VD_PROGRESS; // hw decoding fallback; try again
        if (vf->initialized < 1)
            return VD_ERROR;
        init_filter_params(mpctx);
        return VD_PROGRESS;
    }

    // If something was decoded, and the filter chain is ready, filter it.
    if (!need_vf_reconfig && d_video->waiting_decoded_mpi) {
        vf_filter_frame(vf, d_video->waiting_decoded_mpi);
        d_video->waiting_decoded_mpi = NULL;
        return VD_PROGRESS;
    }

    if (!d_video->waiting_decoded_mpi) {
        // Decode a new image, or at least feed the decoder a packet.
        int r = decode_image(mpctx);
        if (r == VD_WAIT)
            return r;
        if (d_video->waiting_decoded_mpi)
            d_video->decoder_output = d_video->waiting_decoded_mpi->params;
        if (!d_video->waiting_decoded_mpi && (r == VD_EOF || r < 0)) {
            if (vf_output_frame(vf, true) > 0)
                return VD_PROGRESS;
            return VD_EOF; // true EOF
        }
    }

    // Image will be filtered on the next iteration.
    return VD_PROGRESS;
}
Exemple #3
0
	TargaLoader::TargaLoader(const IODevicePtr &iodevice, bool srgb)
		: file(iodevice), srgb(srgb)
	{
		read_header();
		read_image_id();
		read_color_map();
		read_image_data();
		decode_palette();
		decode_image();
	}
Exemple #4
0
	PNGLoader::PNGLoader(IODevice iodevice, bool force_srgb)
		: file(iodevice), force_srgb(force_srgb), scanline(nullptr), prev_scanline(nullptr), scanline_4ub(nullptr), scanline_4us(nullptr), palette(nullptr)
	{
		read_magic();
		read_chunks();
		decode_header();
		decode_palette();
		decode_colorkey();
		decode_image();
	}
Exemple #5
0
bool Images::load_texture( ClutterTexture * texture, const char * filename )
{
    TPImage * image = decode_image( filename );

    if ( ! image )
    {
        return false;
    }

    load_texture( texture, image, 0, 0, image->width, image->height );

    destroy_image( image );

    return true;
}
Exemple #6
0
bool Images::load_texture( ClutterTexture * texture, gpointer data, gsize size, const char * content_type )
{
    TPImage * image = decode_image( data, size, content_type );

    if ( ! image )
    {
        return false;
    }

    load_texture( texture, image, 0, 0, image->width, image->height );

    destroy_image( image );

    return true;
}
Exemple #7
0
// Make sure at least 1 filtered image is available, decode new video if needed.
// returns VD_* code
// A return value of VD_PROGRESS doesn't necessarily output a frame, but makes
// the promise that calling this function again will eventually do something.
static int video_decode_and_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    int r = video_filter(mpctx, false);
    if (r < 0)
        return r;

    if (!d_video->waiting_decoded_mpi) {
        // Decode a new image, or at least feed the decoder a packet.
        r = decode_image(mpctx);
        if (r == VD_WAIT)
            return r;
        if (d_video->waiting_decoded_mpi)
            d_video->decoder_output = d_video->waiting_decoded_mpi->params;
    }

    bool eof = !d_video->waiting_decoded_mpi && (r == VD_EOF || r < 0);
    return video_filter(mpctx, eof);
}
Exemple #8
0
int reinit_video_chain(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    assert(!mpctx->d_video);
    struct track *track = mpctx->current_track[0][STREAM_VIDEO];
    struct sh_stream *sh = track ? track->stream : NULL;
    if (!sh)
        goto no_video;

    MP_VERBOSE(mpctx, "[V] fourcc:0x%X  size:%dx%d  fps:%5.3f\n",
               sh->format,
               sh->video->disp_w, sh->video->disp_h,
               sh->video->fps);

    //================== Init VIDEO (codec & libvo) ==========================
    if (!opts->fixed_vo || !mpctx->video_out) {
        struct vo_extra ex = {
            .input_ctx = mpctx->input,
            .osd = mpctx->osd,
            .encode_lavc_ctx = mpctx->encode_lavc_ctx,
            .opengl_cb_context = mpctx->gl_cb_ctx,
        };
        mpctx->video_out = init_best_video_out(mpctx->global, &ex);
        if (!mpctx->video_out) {
            MP_FATAL(mpctx, "Error opening/initializing "
                    "the selected video_out (-vo) device.\n");
            mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED;
            goto err_out;
        }
        mpctx->mouse_cursor_visible = true;
    }

    update_window_title(mpctx, true);

    struct dec_video *d_video = talloc_zero(NULL, struct dec_video);
    mpctx->d_video = d_video;
    d_video->global = mpctx->global;
    d_video->log = mp_log_new(d_video, mpctx->log, "!vd");
    d_video->opts = mpctx->opts;
    d_video->header = sh;
    d_video->fps = sh->video->fps;
    d_video->vo = mpctx->video_out;

    if (opts->force_fps) {
        d_video->fps = opts->force_fps;
        MP_INFO(mpctx, "FPS forced to %5.3f.\n", d_video->fps);
        MP_INFO(mpctx, "Use --no-correct-pts to force FPS based timing.\n");
    }
    update_fps(mpctx);

    vo_control(mpctx->video_out, VOCTRL_GET_HWDEC_INFO, &d_video->hwdec_info);

    recreate_video_filters(mpctx);

    if (!video_init_best_codec(d_video, opts->video_decoders))
        goto err_out;

    bool saver_state = opts->pause || !opts->stop_screensaver;
    vo_control(mpctx->video_out, saver_state ? VOCTRL_RESTORE_SCREENSAVER
                                             : VOCTRL_KILL_SCREENSAVER, NULL);

    vo_set_paused(mpctx->video_out, mpctx->paused);

    mpctx->sync_audio_to_video = !sh->attached_picture;
    mpctx->vo_pts_history_seek_ts++;

    // If we switch on video again, ensure audio position matches up.
    if (mpctx->d_audio)
        mpctx->audio_status = STATUS_SYNCING;

    reset_video_state(mpctx);
    reset_subtitle_state(mpctx);

    return 1;

err_out:
no_video:
    uninit_video_chain(mpctx);
    if (track)
        error_on_track(mpctx, track);
    handle_force_window(mpctx, true);
    return 0;
}

// Try to refresh the video by doing a precise seek to the currently displayed
// frame. This can go wrong in all sorts of ways, so use sparingly.
void mp_force_video_refresh(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    struct dec_video *d_video = mpctx->d_video;

    if (!d_video || !d_video->decoder_output.imgfmt)
        return;

    // If not paused, the next frame should come soon enough.
    if (opts->pause && mpctx->last_vo_pts != MP_NOPTS_VALUE)
        queue_seek(mpctx, MPSEEK_ABSOLUTE, mpctx->last_vo_pts, 2, true);
}

static int check_framedrop(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;
    // check for frame-drop:
    if (mpctx->video_status == STATUS_PLAYING && !mpctx->paused &&
        mpctx->audio_status == STATUS_PLAYING && !ao_untimed(mpctx->ao))
    {
        float fps = mpctx->d_video->fps;
        double frame_time = fps > 0 ? 1.0 / fps : 0;
        // we should avoid dropping too many frames in sequence unless we
        // are too late. and we allow 100ms A-V delay here:
        if (mpctx->last_av_difference - 0.100 > mpctx->dropped_frames * frame_time)
            return !!(opts->frame_dropping & 2);
    }
    return 0;
}

// Read a packet, store decoded image into d_video->waiting_decoded_mpi
// returns VD_* code
static int decode_image(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    if (d_video->header->attached_picture) {
        d_video->waiting_decoded_mpi =
                    video_decode(d_video, d_video->header->attached_picture, 0);
        return VD_EOF;
    }

    struct demux_packet *pkt;
    if (demux_read_packet_async(d_video->header, &pkt) == 0)
        return VD_WAIT;
    if (pkt && pkt->pts != MP_NOPTS_VALUE)
        pkt->pts += mpctx->video_offset;
    if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
        d_video->has_broken_packet_pts ||
        !mpctx->opts->hr_seek_framedrop)
    {
        mpctx->hrseek_framedrop = false;
    }
    bool hrseek = mpctx->hrseek_active && mpctx->video_status == STATUS_SYNCING;
    int framedrop_type = hrseek && mpctx->hrseek_framedrop ?
                         2 : check_framedrop(mpctx);
    d_video->waiting_decoded_mpi =
        video_decode(d_video, pkt, framedrop_type);
    bool had_packet = !!pkt;
    talloc_free(pkt);

    if (had_packet && !d_video->waiting_decoded_mpi &&
        mpctx->video_status == STATUS_PLAYING)
    {
        mpctx->dropped_frames_total++;
        mpctx->dropped_frames++;
    }

    return had_packet ? VD_PROGRESS : VD_EOF;
}


// Called after video reinit. This can be generally used to try to insert more
// filters using the filter chain edit functionality in command.c.
static void init_filter_params(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;

    // Note that the filter chain is already initialized. This code might
    // recreate the chain a second time, which is not very elegant, but allows
    // us to test whether enabling deinterlacing works with the current video
    // format and other filters.
    if (opts->deinterlace >= 0)
        mp_property_do("deinterlace", M_PROPERTY_SET, &opts->deinterlace, mpctx);
}

// Feed newly decoded frames to the filter, take care of format changes.
// If eof=true, drain the filter chain, and return VD_EOF if empty.
static int video_filter(struct MPContext *mpctx, bool eof)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    // There is already a filtered frame available.
    // If vf_needs_input() returns > 0, the filter wants input anyway.
    if (vf_output_frame(vf, eof) > 0 && vf_needs_input(vf) < 1)
        return VD_PROGRESS;

    // Decoder output is different from filter input?
    bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
        !mp_image_params_equal(&d_video->decoder_output, &vf->input_params);

    // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
    if (need_vf_reconfig && d_video->decoder_output.imgfmt) {
        // Drain the filter chain.
        if (vf_output_frame(vf, true) > 0)
            return VD_PROGRESS;

        // The filter chain is drained; execute the filter format change.
        filter_reconfig(mpctx, false);
        if (vf->initialized == 0)
            return VD_PROGRESS; // hw decoding fallback; try again
        if (vf->initialized < 1)
            return VD_ERROR;
        init_filter_params(mpctx);
        return VD_RECONFIG;
    }

    // If something was decoded, and the filter chain is ready, filter it.
    if (!need_vf_reconfig && d_video->waiting_decoded_mpi) {
        vf_filter_frame(vf, d_video->waiting_decoded_mpi);
        d_video->waiting_decoded_mpi = NULL;
        return VD_PROGRESS;
    }

    return eof ? VD_EOF : VD_PROGRESS;
}

// Make sure at least 1 filtered image is available, decode new video if needed.
// returns VD_* code
// A return value of VD_PROGRESS doesn't necessarily output a frame, but makes
// the promise that calling this function again will eventually do something.
static int video_decode_and_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;

    int r = video_filter(mpctx, false);
    if (r < 0)
        return r;

    if (!d_video->waiting_decoded_mpi) {
        // Decode a new image, or at least feed the decoder a packet.
        r = decode_image(mpctx);
        if (r == VD_WAIT)
            return r;
        if (d_video->waiting_decoded_mpi)
            d_video->decoder_output = d_video->waiting_decoded_mpi->params;
    }

    bool eof = !d_video->waiting_decoded_mpi && (r == VD_EOF || r < 0);
    r = video_filter(mpctx, eof);
    if (r == VD_RECONFIG) // retry feeding decoded image
        r = video_filter(mpctx, eof);
    return r;
}

static int video_feed_async_filter(struct MPContext *mpctx)
{
    struct dec_video *d_video = mpctx->d_video;
    struct vf_chain *vf = d_video->vfilter;

    if (vf->initialized < 0)
        return VD_ERROR;

    if (vf_needs_input(vf) < 1)
        return 0;
    mpctx->sleeptime = 0; // retry until done
    return video_decode_and_filter(mpctx);
}

/* Modify video timing to match the audio timeline. There are two main
 * reasons this is needed. First, video and audio can start from different
 * positions at beginning of file or after a seek (MPlayer starts both
 * immediately even if they have different pts). Second, the file can have
 * audio timestamps that are inconsistent with the duration of the audio
 * packets, for example two consecutive timestamp values differing by
 * one second but only a packet with enough samples for half a second
 * of playback between them.
 */
static void adjust_sync(struct MPContext *mpctx, double v_pts, double frame_time)
{
    struct MPOpts *opts = mpctx->opts;

    if (mpctx->audio_status != STATUS_PLAYING)
        return;

    double a_pts = written_audio_pts(mpctx) + mpctx->audio_delay - mpctx->delay;
    double av_delay = a_pts - v_pts;

    double change = av_delay * 0.1;
    double max_change = opts->default_max_pts_correction >= 0 ?
                        opts->default_max_pts_correction : frame_time * 0.1;
    if (change < -max_change)
        change = -max_change;
    else if (change > max_change)
        change = max_change;
    mpctx->delay += change;
    mpctx->total_avsync_change += change;
}

// Enough video filtered already to push one frame to the VO?
static bool have_new_frame(struct MPContext *mpctx)
{
    bool need_2nd = !!(mpctx->opts->frame_dropping & 1) // we need the duration
        && mpctx->video_pts != MP_NOPTS_VALUE; // ...except for the 1st frame

    return mpctx->next_frame[0] && (!need_2nd || mpctx->next_frame[1]);
}
Exemple #9
0
image_t *
get_next_frame (bool_t store_wfa, int enlarge_factor,
		int smoothing, const char *reference_frame,
		format_e format, video_t *video, dectimer_t *timer,
		wfa_t *orig_wfa, bitfile_t *input)
/*
 *  Get next frame of the WFA 'video' from stream 'input'.
 *  'orig_wfa' is the constant part of the WFA used by all frames.
 *  Depending on values of 'enlarge_factor' and 'smoothing' enlarge and
 *  smooth image, respectively. 
 *  If 'store_wfa' is TRUE, then store WFA structure of reference frames
 *  (used by analysis tool xwfa).
 *  If 'reference_frame' is not NULL, then load image 'reference_frame'
 *  from disk.
 *  'format' gives the color format to be used (either 4:2:0 or 4:4:4).
 *  If 'timer' is not NULL, then accumulate running time statistics. 
 *
 *  Return value:
 *	pointer to decoded frame
 *
 *  Side effects:
 *	'video' and 'timer' struct are modified.
 */
{
   image_t *frame 			  = NULL; /* current frame */
   image_t *sframe 			  = NULL; /* current smoothed frame */
   bool_t   current_frame_is_future_frame = NO;

   if (video->future_display == video->display)	 
   {
      /*
       *  Future frame is already computed since it has been used
       *  as reference frame. So just return the stored frame.
       */
      if (video->frame) /* discard current frame */
	 free_image (video->frame);
      video->frame  = video->future;
      video->future = NULL;

      if (video->sframe) /* discard current (smoothed) frame */
	 free_image (video->sframe);
      video->sframe  = video->sfuture;
      video->sfuture = NULL;

      if (store_wfa)
	 copy_wfa (video->wfa, video->wfa_future);

      video->display++;

      if (!store_wfa)
	 video->wfa = NULL;
   }
   else
   {
      do				/* compute next frame(s) */
      {
	 unsigned      frame_number;	/* current frame number */
	 clock_t       ptimer;
	 unsigned int  stop_timer [3];
	 wfa_t	      *tmp_wfa = NULL;
	 
	 if (!store_wfa)
	    video->wfa = orig_wfa;
	 else
	 {
	    tmp_wfa = alloc_wfa (NO);
	    copy_wfa (tmp_wfa, video->wfa);
	    copy_wfa (video->wfa, orig_wfa);
	 }
   
	 /*
	  *  First step: read WFA from disk
	  */
	 prg_timer (&ptimer, START);
	 frame_number = read_next_wfa (video->wfa, input);
	 stop_timer [0] = prg_timer (&ptimer, STOP);
	 if (timer)
	 {
	    timer->input [video->wfa->frame_type] += stop_timer [0];
	    timer->frames [video->wfa->frame_type]++;
	 }
      
	 /*
	  *  Read reference frame from disk if required
	  *  (i.e., 1st frame is of type B or P)
	  */
	 if (video->display == 0 && video->wfa->frame_type != I_FRAME)
	 {
	    if (!reference_frame)
	       error ("First frame is %c-frame but no "
		      "reference frame is given.",
		      video->wfa->frame_type == B_FRAME ? 'B' : 'P');

	    video->frame  = read_image_file (reference_frame);
	    video->sframe = NULL;
	 }
   
	 /*
	  *  Depending on current frame type update past and future frames
	  */
	 if (video->wfa->frame_type == I_FRAME)
	 {
	    if (video->past)		/* discard past frame */
	       free_image (video->past);
	    video->past = NULL;
	    if (video->future)		/* discard future frame */
	       free_image (video->future);
	    video->future = NULL;
	    if (video->sfuture)		/* discard (smoothed) future frame */
	       free_image (video->sfuture);
	    video->sfuture = NULL;
	    if (video->frame)		/* discard current frame */
	       free_image (video->frame);
	    video->frame = NULL;
	    if (video->sframe)		/* discard current (smoothed) frame */
	       free_image (video->sframe);
	    video->sframe = NULL;
	 }
	 else if (video->wfa->frame_type == P_FRAME)
	 {
	    if (video->past)		/* discard past frame */
	       free_image (video->past);
	    video->past = video->frame;	/* past <- current frame */
	    video->frame = NULL;
	    if (video->sframe)		/* discard current (smoothed) frame */
	       free_image (video->sframe);
	    video->sframe = NULL;
	    if (store_wfa)
	       copy_wfa (video->wfa_past, tmp_wfa);
	    if (video->future)		/* discard future frame */
	       free_image (video->future);
	    video->future = NULL;
	    if (video->sfuture)		/* discard (smoothed) future frame */
	       free_image (video->sfuture);
	    video->sfuture = NULL;
	 }
	 else				/* B_FRAME */
	 {
	    if (current_frame_is_future_frame)
	    {
	       if (video->future)	/* discard future frame */
		  free_image (video->future);
	       video->future = frame;	/* future <- current frame */
	       if (video->sfuture)	/* discard (smoothed) future frame */
		  free_image (video->sfuture);
	       video->sfuture = sframe;	/* future <- current (smoothed) */
	       if (store_wfa)
		  copy_wfa (video->wfa_future, tmp_wfa);
	       if (video->frame)	/* discard current frame */
		  free_image (video->frame);
	       video->frame = NULL;
	       if (video->sframe)	/* discard current (smoothed) frame */
		  free_image (video->sframe);
	       video->sframe = NULL;
	       frame  = NULL;
	       sframe = NULL;
	    }
	    else
	    {
	       if (video->wfa->wfainfo->B_as_past_ref == YES)
	       {
		  if (video->past)	/* discard past frame */
		     free_image (video->past);
		  video->past  = video->frame; /* past <- current frame */
		  video->frame = NULL;
		  if (video->sframe)	/* discard current (smoothed) frame */
		     free_image (video->sframe);
		  video->sframe = NULL;
		  if (store_wfa)
		     copy_wfa (video->wfa_past, tmp_wfa);
	       }
	       else
	       {
		  if (video->frame)	/* discard current */
		     free_image (video->frame);
		  video->frame = NULL;
		  if (video->sframe)	/* discard current (smoothed) frame */
		     free_image (video->sframe);
		  video->sframe = NULL;
	       }
	    }
	 }
	 if (tmp_wfa)
	    free_wfa (tmp_wfa);
	 
	 current_frame_is_future_frame = NO;
	 /*
	  *  Second step: decode image
	  *  Optionally enlarge image if specified by option 'enlarge_factor'.
	  */
	 {
	    unsigned orig_width, orig_height;

	    stop_timer [0] = stop_timer [1] = stop_timer [2] = 0;
	 
	    enlarge_image (enlarge_factor, format,
			   (video->wfa->wfainfo->color
			    && format == FORMAT_4_2_0)
			   ? video->wfa->tree [video->wfa->tree [video->wfa->root_state][0]][0] : -1, video->wfa);

	    if (enlarge_factor > 0)
	    {
	       orig_width  = video->wfa->wfainfo->width  << enlarge_factor;
	       orig_height = video->wfa->wfainfo->height << enlarge_factor; 
	    }
	    else
	    { 
	       orig_width  = video->wfa->wfainfo->width  >> - enlarge_factor;
	       orig_height = video->wfa->wfainfo->height >> - enlarge_factor;
	       if (orig_width & 1)
		  orig_width++;
	       if (orig_height & 1)
		  orig_height++;
	    }
	 
	    frame = decode_image (orig_width, orig_height, format,
				  timer != NULL ? stop_timer : NULL,
				  video->wfa);
	    if (timer)
	    {
	       timer->preprocessing [video->wfa->frame_type] += stop_timer [0];
	       timer->decoder [video->wfa->frame_type]       += stop_timer [1];
	       timer->cleanup [video->wfa->frame_type]       += stop_timer [2];
	    }
	 }

	 /*
	  *  Third step: restore motion compensation
	  */
	 if (video->wfa->frame_type != I_FRAME)
	 {
	    prg_timer (&ptimer, START);
	    restore_mc (enlarge_factor, frame, video->past, video->future,
			video->wfa);
	    stop_timer [0] = prg_timer (&ptimer, STOP);
	    if (timer)
	       timer->motion [video->wfa->frame_type] += stop_timer [0];
	 }

	 /*
	  *  Fourth step: smooth image along partitioning borders
	  */
	 prg_timer (&ptimer, START);
	 if (smoothing < 0)	/* smoothing not changed by user */
	    smoothing = video->wfa->wfainfo->smoothing;
	 if (smoothing > 0 && smoothing <= 100)
	 {
	    sframe = clone_image (frame);
	    smooth_image (smoothing, video->wfa, sframe);
	 }
	 else
	    sframe = NULL;
	 
	 stop_timer [0] = prg_timer (&ptimer, STOP);
	 if (timer)
	    timer->smooth [video->wfa->frame_type] += stop_timer [0];

	 if (frame_number == video->display)
	 {
	    video->display++;
	    video->frame  = frame;
	    video->sframe = sframe;
	    frame         = NULL;
	    sframe        = NULL;
	 }
	 else if (frame_number > video->display)
	 {
	    video->future_display 	  = frame_number;
	    current_frame_is_future_frame = YES;
	 }
      
	 if (!store_wfa)
	    remove_states (video->wfa->basis_states, video->wfa);
      } while (!video->frame);

      if (!store_wfa)
	 video->wfa = NULL;
   }
   
   return video->sframe ? video->sframe : video->frame;
}
Exemple #10
0
static void Curl_EndDownload(downloadinfo *di, CurlStatus status, CURLcode error, const char *content_type_)
{
	char content_type[64];
	qboolean ok = false;
	if(!curl_dll)
		return;
	switch(status)
	{
		case CURL_DOWNLOAD_SUCCESS:
			ok = true;
			di->callback(CURLCBSTATUS_OK, di->bytes_received, di->buffer, di->callback_data);
			break;
		case CURL_DOWNLOAD_FAILED:
			di->callback(CURLCBSTATUS_FAILED, di->bytes_received, di->buffer, di->callback_data);
			break;
		case CURL_DOWNLOAD_ABORTED:
			di->callback(CURLCBSTATUS_ABORTED, di->bytes_received, di->buffer, di->callback_data);
			break;
		case CURL_DOWNLOAD_SERVERERROR:
			// reopen to enforce it to have zero bytes again
			if(di->stream)
			{
				FS_Close(di->stream);
				di->stream = FS_OpenRealFile(di->filename, "wb", false);
			}

			if(di->callback)
				di->callback(error ? (int) error : CURLCBSTATUS_SERVERERROR, di->bytes_received, di->buffer, di->callback_data);
			break;
		default:
			if(di->callback)
				di->callback(CURLCBSTATUS_UNKNOWN, di->bytes_received, di->buffer, di->callback_data);
			break;
	}
	if(content_type_)
		strlcpy(content_type, content_type_, sizeof(content_type));
	else
		*content_type = 0;

	if(di->curle)
	{
		qcurl_multi_remove_handle(curlm, di->curle);
		qcurl_easy_cleanup(di->curle);
		if(di->slist)
			qcurl_slist_free_all(di->slist);
	}

	if(!di->callback && ok && !di->bytes_received)
	{
		Con_Printf("ERROR: empty file\n");
		ok = false;
	}

	if(di->stream)
		FS_Close(di->stream);

#define CLEAR_AND_RETRY() \
	do \
	{ \
		di->stream = FS_OpenRealFile(di->filename, "wb", false); \
		FS_Close(di->stream); \
		if(di->startpos && !di->callback) \
		{ \
			Curl_Begin(di->url, di->extraheaders, di->maxspeed, di->filename, di->loadtype, di->forthismap, di->post_content_type, di->postbuf, di->postbufsize, NULL, 0, NULL, NULL); \
			di->forthismap = false; \
		} \
	} \
	while(0)

	if(ok && di->loadtype == LOADTYPE_PAK)
	{
		ok = FS_AddPack(di->filename, NULL, true);
		if(!ok)
			CLEAR_AND_RETRY();
	}
	else if(ok && di->loadtype == LOADTYPE_CACHEPIC)
	{
		const char *p;
		unsigned char *pixels = NULL;

		p = di->filename;
#ifdef WE_ARE_EVIL
		if(!strncmp(p, "dlcache/", 8))
			p += 8;
#endif

		pixels = decode_image(di, content_type);
		if(pixels)
			Draw_NewPic(p, image_width, image_height, true, pixels);
		else
			CLEAR_AND_RETRY();
	}
	else if(ok && di->loadtype == LOADTYPE_SKINFRAME)
	{
		const char *p;
		unsigned char *pixels = NULL;

		p = di->filename;
#ifdef WE_ARE_EVIL
		if(!strncmp(p, "dlcache/", 8))
			p += 8;
#endif

		pixels = decode_image(di, content_type);
		if(pixels)
			R_SkinFrame_LoadInternalBGRA(p, TEXF_FORCE_RELOAD | TEXF_MIPMAP | TEXF_ALPHA, pixels, image_width, image_height, false); // TODO what sRGB argument to put here?
		else
			CLEAR_AND_RETRY();
	}

	if(di->prev)
		di->prev->next = di->next;
	else
		downloads = di->next;
	if(di->next)
		di->next->prev = di->prev;

	--numdownloads;
	if(di->forthismap)
	{
		if(ok)
			++numdownloads_success;
		else
			++numdownloads_fail;
	}
	Z_Free(di);
}
Exemple #11
0
esp_err_t pretty_effect_init() 
{
    return decode_image(&pixels);
}