Example #1
0
static int32_t dt_control_run_job(dt_control_t *control)
{
  _dt_job_t *job = dt_control_schedule_job(control);

  if(!job) return -1;

  /* change state to running */
  dt_pthread_mutex_lock(&job->wait_mutex);
  if(dt_control_job_get_state(job) == DT_JOB_STATE_QUEUED)
  {
    dt_print(DT_DEBUG_CONTROL, "[run_job+] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
             dt_get_wtime());
    dt_control_job_print(job);
    dt_print(DT_DEBUG_CONTROL, "\n");

    dt_control_job_set_state(job, DT_JOB_STATE_RUNNING);

    /* execute job */
    job->result = job->execute(job);

    dt_control_job_set_state(job, DT_JOB_STATE_FINISHED);

    dt_print(DT_DEBUG_CONTROL, "[run_job-] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
             dt_get_wtime());
    dt_control_job_print(job);
    dt_print(DT_DEBUG_CONTROL, "\n");
  }

  /* free job */
  dt_pthread_mutex_unlock(&job->wait_mutex);
  dt_control_job_dispose(job);

  return 0;
}
Example #2
0
static void dt_control_job_execute(_dt_job_t *job)
{
  dt_print(DT_DEBUG_CONTROL, "[run_job+] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
           dt_get_wtime());
  dt_control_job_print(job);
  dt_print(DT_DEBUG_CONTROL, "\n");

  dt_control_job_set_state(job, DT_JOB_STATE_RUNNING);

  /* execute job */
  job->result = job->execute(job);

  dt_control_job_set_state(job, DT_JOB_STATE_FINISHED);

  dt_print(DT_DEBUG_CONTROL, "[run_job-] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
           dt_get_wtime());
  dt_control_job_print(job);
  dt_print(DT_DEBUG_CONTROL, "\n");
}
Example #3
0
static int32_t dt_control_run_job(dt_control_t *control)
{
  _dt_job_t *job = dt_control_schedule_job(control);

  if(!job) return -1;

  /* change state to running */
  dt_pthread_mutex_lock(&job->wait_mutex);
  if(dt_control_job_get_state(job) == DT_JOB_STATE_QUEUED)
  {
    dt_print(DT_DEBUG_CONTROL, "[run_job+] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
             dt_get_wtime());
    dt_control_job_print(job);
    dt_print(DT_DEBUG_CONTROL, "\n");

    dt_control_job_set_state(job, DT_JOB_STATE_RUNNING);

    /* execute job */
    job->result = job->execute(job);

    dt_control_job_set_state(job, DT_JOB_STATE_FINISHED);

    dt_print(DT_DEBUG_CONTROL, "[run_job-] %02d %f ", DT_CTL_WORKER_RESERVED + dt_control_get_threadid(),
             dt_get_wtime());
    dt_control_job_print(job);
    dt_print(DT_DEBUG_CONTROL, "\n");
  }

  dt_pthread_mutex_unlock(&job->wait_mutex);

  // remove the job from scheduled job array (for job deduping)
  dt_pthread_mutex_lock(&control->queue_mutex);
  control->job[dt_control_get_threadid()] = NULL;
  if(job->queue == DT_JOB_QUEUE_USER_EXPORT) control->export_scheduled = FALSE;
  dt_pthread_mutex_unlock(&control->queue_mutex);

  // and free it
  dt_control_job_dispose(job);

  return 0;
}
Example #4
0
static int32_t dt_control_run_job(dt_control_t *control)
{
  _dt_job_t *job = dt_control_schedule_job(control);

  if(!job) return -1;

  /* change state to running */
  dt_pthread_mutex_lock(&job->wait_mutex);
  if(dt_control_job_get_state(job) == DT_JOB_STATE_QUEUED)
    dt_control_job_execute(job);

  dt_pthread_mutex_unlock(&job->wait_mutex);

  // remove the job from scheduled job array (for job deduping)
  dt_pthread_mutex_lock(&control->queue_mutex);
  control->job[dt_control_get_threadid()] = NULL;
  if(job->queue == DT_JOB_QUEUE_USER_EXPORT) control->export_scheduled = FALSE;
  dt_pthread_mutex_unlock(&control->queue_mutex);

  // and free it
  dt_control_job_dispose(job);

  return 0;
}
Example #5
0
int32_t dt_control_run_job(dt_control_t *s)
{
  dt_job_t *j=NULL,*bj=NULL;
  dt_pthread_mutex_lock(&s->queue_mutex);

  /* check if queue is empty */
  if(g_list_length(s->queue) == 0)
  {
    dt_pthread_mutex_unlock(&s->queue_mutex);
    return -1;
  }

  /* go thru the queue and find one normal job and a background job
      that is up for execution.*/
  time_t ts_now = time(NULL);
  GList *jobitem=g_list_first(s->queue);
  if(jobitem)
    do
    {
      dt_job_t *tj = jobitem->data;

      /* check if it's a scheduled job and is waiting to be executed */
      if(!bj && (tj->ts_execute > tj->ts_added) && tj->ts_execute <= ts_now)
        bj = tj;
      else if ((tj->ts_execute < tj->ts_added) && !j)
        j = tj;

      /* if we got a normal job, and a background job, we are finished */
      if(bj && j) break;

    }
    while ((jobitem = g_list_next(jobitem)));

  /* remove the found jobs from queue */
  if (bj)
    s->queue = g_list_remove(s->queue, bj);

  if (j)
    s->queue = g_list_remove(s->queue, j);

  /* unlock the queue */
  dt_pthread_mutex_unlock(&s->queue_mutex);

  /* push background job on reserved background worker */
  if(bj)
  {
    dt_control_add_job_res(s,bj,DT_CTL_WORKER_7);
    g_free (bj);
  }
  /* don't continue if we don't have have a job to execute */
  if(!j)
    return -1;

  /* change state to running */
  dt_pthread_mutex_lock (&j->wait_mutex);
  if (dt_control_job_get_state (j) == DT_JOB_STATE_QUEUED)
  {
    dt_print(DT_DEBUG_CONTROL, "[run_job+] %02d %f ",
             DT_CTL_WORKER_RESERVED+dt_control_get_threadid(), dt_get_wtime());
    dt_control_job_print(j);
    dt_print(DT_DEBUG_CONTROL, "\n");

    _control_job_set_state (j,DT_JOB_STATE_RUNNING);

    /* execute job */
    j->result = j->execute (j);

    _control_job_set_state (j,DT_JOB_STATE_FINISHED);

    dt_print(DT_DEBUG_CONTROL, "[run_job-] %02d %f ",
             DT_CTL_WORKER_RESERVED+dt_control_get_threadid(), dt_get_wtime());
    dt_control_job_print(j);
    dt_print(DT_DEBUG_CONTROL, "\n");

    /* free job */
    dt_pthread_mutex_unlock (&j->wait_mutex);
    g_free(j);
    j = NULL;
  }
  if(j) dt_pthread_mutex_unlock (&j->wait_mutex);

  return 0;
}
Example #6
0
void
dt_mipmap_cache_read_get(
  dt_mipmap_cache_t *cache,
  dt_mipmap_buffer_t *buf,
  const uint32_t imgid,
  const dt_mipmap_size_t mip,
  const dt_mipmap_get_flags_t flags)
{
  const uint32_t key = get_key(imgid, mip);
  if(flags == DT_MIPMAP_TESTLOCK)
  {
    // simple case: only get and lock if it's there.
    struct dt_mipmap_buffer_dsc* dsc = (struct dt_mipmap_buffer_dsc*)dt_cache_read_testget(&cache->mip[mip].cache, key);
    if(dsc)
    {
      buf->width  = dsc->width;
      buf->height = dsc->height;
      buf->imgid  = imgid;
      buf->size   = mip;
      // skip to next 8-byte alignment, for sse buffers.
      buf->buf    = (uint8_t *)(dsc+1);
    }
    else
    {
      // set to NULL if failed.
      buf->width = buf->height = 0;
      buf->imgid = 0;
      buf->size  = DT_MIPMAP_NONE;
      buf->buf   = NULL;
    }
  }
  else if(flags == DT_MIPMAP_PREFETCH)
  {
    // and opposite: prefetch without locking
    if(mip > DT_MIPMAP_FULL || mip < DT_MIPMAP_0) return;
    dt_job_t j;
    dt_image_load_job_init(&j, imgid, mip);
    // if the job already exists, make it high-priority, if not, add it:
    if(dt_control_revive_job(darktable.control, &j) < 0)
      dt_control_add_job(darktable.control, &j);
  }
  else if(flags == DT_MIPMAP_BLOCKING)
  {
    // simple case: blocking get
    struct dt_mipmap_buffer_dsc* dsc = (struct dt_mipmap_buffer_dsc*)dt_cache_read_get(&cache->mip[mip].cache, key);
    if(!dsc)
    {
      // should never happen for anything but full images which have been moved.
      assert(mip == DT_MIPMAP_FULL || mip == DT_MIPMAP_F);
      // fprintf(stderr, "[mipmap cache get] no data in cache for imgid %u size %d!\n", imgid, mip);
      // sorry guys, no image for you :(
      buf->width = buf->height = 0;
      buf->imgid = 0;
      buf->size  = DT_MIPMAP_NONE;
      buf->buf   = NULL;
    }
    else
    {
      // fprintf(stderr, "[mipmap cache get] found data in cache for imgid %u size %d\n", imgid, mip);
      // uninitialized?
      //assert(dsc->flags & DT_MIPMAP_BUFFER_DSC_FLAG_GENERATE || dsc->size == 0);
      if(dsc->flags & DT_MIPMAP_BUFFER_DSC_FLAG_GENERATE)
      {
        __sync_fetch_and_add (&(cache->mip[mip].stats_fetches), 1);
        // fprintf(stderr, "[mipmap cache get] now initializing buffer for img %u mip %d!\n", imgid, mip);
        // we're write locked here, as requested by the alloc callback.
        // now fill it with data:
        if(mip == DT_MIPMAP_FULL)
        {
          // load the image:
          // make sure we access the r/w lock as shortly as possible!
          dt_image_t buffered_image;
          const dt_image_t *cimg = dt_image_cache_read_get(darktable.image_cache, imgid);
          buffered_image = *cimg;
          // dt_image_t *img = dt_image_cache_write_get(darktable.image_cache, cimg);
          // dt_image_cache_write_release(darktable.image_cache, img, DT_IMAGE_CACHE_RELAXED);
          dt_image_cache_read_release(darktable.image_cache, cimg);

          char filename[DT_MAX_PATH_LEN];
          gboolean from_cache = TRUE;
          dt_image_full_path(buffered_image.id, filename, DT_MAX_PATH_LEN, &from_cache);

          dt_mipmap_cache_allocator_t a = (dt_mipmap_cache_allocator_t)&dsc;
          struct dt_mipmap_buffer_dsc* prvdsc = dsc;
          dt_imageio_retval_t ret = dt_imageio_open(&buffered_image, filename, a);
          if(dsc != prvdsc)
          {
            // fprintf(stderr, "[mipmap cache] realloc %p\n", data);
            // write back to cache, too.
            // in case something went wrong, still keep the buffer and return it to the hashtable
            // so we don't produce mem leaks or unnecessary mem fragmentation.
            dt_cache_realloc(&cache->mip[mip].cache, key, 1, (void*)dsc);
          }
          if(ret != DT_IMAGEIO_OK)
          {
            // fprintf(stderr, "[mipmap read get] error loading image: %d\n", ret);
            //
            // we can only return a zero dimension buffer if the buffer has been allocated.
            // in case dsc couldn't be allocated and points to the static buffer, it contains
            // a dead image already.
            if((void *)dsc != (void *)dt_mipmap_cache_static_dead_image) dsc->width = dsc->height = 0;
          }
          else
          {
            // swap back new image data:
            cimg = dt_image_cache_read_get(darktable.image_cache, imgid);
            dt_image_t *img = dt_image_cache_write_get(darktable.image_cache, cimg);
            *img = buffered_image;
            // fprintf(stderr, "[mipmap read get] initializing full buffer img %u with %u %u -> %d %d (%p)\n", imgid, data[0], data[1], img->width, img->height, data);
            // don't write xmp for this (we only changed db stuff):
            dt_image_cache_write_release(darktable.image_cache, img, DT_IMAGE_CACHE_RELAXED);
            dt_image_cache_read_release(darktable.image_cache, img);
          }
        }
        else if(mip == DT_MIPMAP_F)
        {
          _init_f((float *)(dsc+1), &dsc->width, &dsc->height, imgid);
        }
        else
        {
          // 8-bit thumbs, possibly need to be compressed:
          if(cache->compression_type)
          {
            // get per-thread temporary storage without malloc from a separate cache:
            const int key = dt_control_get_threadid();
            // const void *cbuf =
            dt_cache_read_get(&cache->scratchmem.cache, key);
            uint8_t *scratchmem = (uint8_t *)dt_cache_write_get(&cache->scratchmem.cache, key);
            _init_8(scratchmem, &dsc->width, &dsc->height, imgid, mip);
            buf->width  = dsc->width;
            buf->height = dsc->height;
            buf->imgid  = imgid;
            buf->size   = mip;
            buf->buf = (uint8_t *)(dsc+1);
            dt_mipmap_cache_compress(buf, scratchmem);
            dt_cache_write_release(&cache->scratchmem.cache, key);
            dt_cache_read_release(&cache->scratchmem.cache, key);
          }
          else
          {
            _init_8((uint8_t *)(dsc+1), &dsc->width, &dsc->height, imgid, mip);
          }
        }
        dsc->flags &= ~DT_MIPMAP_BUFFER_DSC_FLAG_GENERATE;
        // drop the write lock
        dt_cache_write_release(&cache->mip[mip].cache, key);
        /* raise signal that mipmaps has been flushed to cache */
        dt_control_signal_raise(darktable.signals, DT_SIGNAL_DEVELOP_MIPMAP_UPDATED);
      }
      buf->width  = dsc->width;
      buf->height = dsc->height;
      buf->imgid  = imgid;
      buf->size   = mip;
      buf->buf = (uint8_t *)(dsc+1);
      if(dsc->width == 0 || dsc->height == 0)
      {
        // fprintf(stderr, "[mipmap cache get] got a zero-sized image for img %u mip %d!\n", imgid, mip);
        if(mip < DT_MIPMAP_F)       dead_image_8(buf);
        else if(mip == DT_MIPMAP_F) dead_image_f(buf);
        else buf->buf = NULL; // full images with NULL buffer have to be handled, indicates `missing image'
      }
    }
  }
  else if(flags == DT_MIPMAP_BEST_EFFORT)
  {
    __sync_fetch_and_add (&(cache->mip[mip].stats_requests), 1);
    // best-effort, might also return NULL.
    // never decrease mip level for float buffer or full image:
    dt_mipmap_size_t min_mip = (mip >= DT_MIPMAP_F) ? mip : DT_MIPMAP_0;
    for(int k=mip; k>=min_mip && k>=0; k--)
    {
      // already loaded?
      dt_mipmap_cache_read_get(cache, buf, imgid, k, DT_MIPMAP_TESTLOCK);
      if(buf->buf && buf->width > 0 && buf->height > 0)
      {
        if(mip != k) __sync_fetch_and_add (&(cache->mip[k].stats_standin), 1);
        return;
      }
      // didn't succeed the first time? prefetch for later!
      if(mip == k)
      {
        __sync_fetch_and_add (&(cache->mip[mip].stats_near_match), 1);
        dt_mipmap_cache_read_get(cache, buf, imgid, mip, DT_MIPMAP_PREFETCH);
      }
    }
    __sync_fetch_and_add (&(cache->mip[mip].stats_misses), 1);
    // fprintf(stderr, "[mipmap cache get] image not found in cache: imgid %u mip %d!\n", imgid, mip);
    // nothing found :(
    buf->buf   = NULL;
    buf->imgid = 0;
    buf->size  = DT_MIPMAP_NONE;
    buf->width = buf->height = 0;
  }
}
Example #7
0
static _dt_job_t *dt_control_schedule_job(dt_control_t *control)
{
  /*
   * job scheduling works like this:
   * - when there is a single job in the queue head with a maximal priority -> pick it
   * - otherwise pick among the ones with the maximal priority in the following order:
   *   * user foreground
   *   * system foreground
   *   * user background
   *   * system background
   * - the jobs that didn't get picked this round get their priority incremented
   */

  dt_pthread_mutex_lock(&control->queue_mutex);

  // find the job
  _dt_job_t *job = NULL;
  int winner_queue = DT_JOB_QUEUE_MAX;
  int max_priority = -1;
  for(int i = 0; i < DT_JOB_QUEUE_MAX; i++)
  {
    if(control->queues[i] == NULL) continue;
    if(control->export_scheduled && i == DT_JOB_QUEUE_USER_EXPORT) continue;
    _dt_job_t *_job = (_dt_job_t *)control->queues[i]->data;
    if(_job->priority > max_priority)
    {
      max_priority = _job->priority;
      job = _job;
      winner_queue = i;
    }
  }

  if(!job)
  {
    dt_pthread_mutex_unlock(&control->queue_mutex);
    return NULL;
  }

  // the order of the queues in control->queues matches our priority, and we only update job when the priority
  // is strictly bigger
  // invariant -> job is the one we are looking for

  // remove the to be scheduled job from its queue
  GList **queue = &control->queues[winner_queue];
  *queue = g_list_delete_link(*queue, *queue);
  control->queue_length[winner_queue]--;
  if(winner_queue == DT_JOB_QUEUE_USER_EXPORT) control->export_scheduled = TRUE;

  // and place it in scheduled job array (for job deduping)
  control->job[dt_control_get_threadid()] = job;

  // increment the priorities of the others
  for(int i = 0; i < DT_JOB_QUEUE_MAX; i++)
  {
    if(i == winner_queue || control->queues[i] == NULL) continue;
    ((_dt_job_t *)control->queues[i]->data)->priority++;
  }

  dt_pthread_mutex_unlock(&control->queue_mutex);

  return job;
}