cache_inode_status_t
cache_inode_readlink(cache_entry_t *entry,
                     fsal_path_t *link_content,
                     fsal_op_context_t *context,
                     cache_inode_status_t *status)
{
    fsal_status_t fsal_status = {0, 0};

    /* Set the return default to CACHE_INODE_SUCCESS */
    *status = CACHE_INODE_SUCCESS;

    if (entry->type != SYMBOLIC_LINK) {
        *status = CACHE_INODE_BAD_TYPE;
        return *status;
    }

    assert(entry->object.symlink);
    PTHREAD_RWLOCK_RDLOCK(&entry->content_lock);
    if (!(entry->flags & CACHE_INODE_TRUST_CONTENT)) {
        /* Our data are stale.  Drop the lock, get a
           write-lock, load in new data, and copy it out to
           the caller. */
        PTHREAD_RWLOCK_UNLOCK(&entry->content_lock);
        PTHREAD_RWLOCK_WRLOCK(&entry->content_lock);
        /* Make sure nobody updated the content while we were
           waiting. */
        if (!(entry->flags & CACHE_INODE_TRUST_CONTENT)) {
            fsal_status
                = FSAL_readlink(&entry->handle,
                                context,
                                &entry->object.symlink->content,
                                NULL);
            if (!(FSAL_IS_ERROR(fsal_status))) {
                atomic_set_uint32_t_bits(&entry->flags,
                                         CACHE_INODE_TRUST_CONTENT);
            }
        }
    }
    if (!(FSAL_IS_ERROR(fsal_status))) {
        FSAL_pathcpy(link_content,
                     &(entry->object.symlink->content));
    }
    PTHREAD_RWLOCK_UNLOCK(&entry->content_lock);

    if (FSAL_IS_ERROR(fsal_status)) {
        *status = cache_inode_error_convert(fsal_status);
        if (fsal_status.major == ERR_FSAL_STALE) {
            LogEvent(COMPONENT_CACHE_INODE,
                     "FSAL returned STALE from readlink");
            cache_inode_kill_entry(entry);
        }
        return *status;
    }

    return *status;
} /* cache_inode_readlink */
Exemple #2
0
double _starpu_history_based_job_expected_length(struct starpu_perfmodel_t *model, enum starpu_perf_archtype arch, struct starpu_job_s *j)
{
    double exp;
    struct starpu_per_arch_perfmodel_t *per_arch_model;
    struct starpu_history_entry_t *entry;
    struct starpu_htbl32_node_s *history;

    load_history_based_model(model, 1);

    if (STARPU_UNLIKELY(!j->footprint_is_computed))
        _starpu_compute_buffers_footprint(j);

    uint32_t key = j->footprint;

    per_arch_model = &model->per_arch[arch];

    history = per_arch_model->history;
    if (!history)
        return -1.0;

    PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock);
    entry = _starpu_htbl_search_32(history, key);
    PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);

    exp = entry?entry->mean:-1.0;

    return exp;
}
Exemple #3
0
static void _starpu_dump_registered_models(void)
{
    PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);

    struct starpu_model_list_t *node;
    node = registered_models;

    _STARPU_DEBUG("DUMP MODELS !\n");

    while (node) {
        save_history_based_model(node->model);
        node = node->next;

        /* XXX free node */
    }

    PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
}
Exemple #4
0
void starpu_wake_all_blocked_workers(void)
{
	/* workers may be blocked on the various queues' conditions */
	unsigned cond_id;

	starpu_mem_node_descr * const descr = _starpu_get_memory_node_description();

	PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);

	unsigned nconds = descr->total_condition_count;
	for (cond_id = 0; cond_id < nconds; cond_id++)
	{
		struct _cond_and_mutex *condition;
		condition  = &descr->conditions_all[cond_id];

		/* wake anybody waiting on that condition */
		PTHREAD_MUTEX_LOCK(condition->mutex);
		PTHREAD_COND_BROADCAST(condition->cond);
		PTHREAD_MUTEX_UNLOCK(condition->mutex);
	}

	PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
}
Exemple #5
0
void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid)
{
	/* wake up all workers on that memory node */
	unsigned cond_id;

	starpu_mem_node_descr * const descr = _starpu_get_memory_node_description();

	PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock);

	unsigned nconds = descr->condition_count[nodeid];
	for (cond_id = 0; cond_id < nconds; cond_id++)
	{
		struct _cond_and_mutex *condition;
		condition  = &descr->conditions_attached_to_node[nodeid][cond_id];

		/* wake anybody waiting on that condition */
		PTHREAD_MUTEX_LOCK(condition->mutex);
		PTHREAD_COND_BROADCAST(condition->cond);
		PTHREAD_MUTEX_UNLOCK(condition->mutex);
	}

	PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock);
}
Exemple #6
0
/**
 *
 * nlm4_send_grant_msg: Send NLMPROC4_GRANTED_MSG
 *
 * This runs in the nlm_asyn_thread context.
 */
static void nlm4_send_grant_msg(state_async_queue_t *arg)
{
  int                      retval;
  char                     buffer[1024];
  state_status_t           state_status = STATE_SUCCESS;
  state_cookie_entry_t   * cookie_entry;
  fsal_op_context_t        context, * pcontext = &context;
  state_nlm_async_data_t * nlm_arg = &arg->state_async_data.state_nlm_async_data;

  if(isDebug(COMPONENT_NLM))
    {
      netobj_to_string(&nlm_arg->nlm_async_args.nlm_async_grant.cookie,
                       buffer, sizeof(buffer));

      LogDebug(COMPONENT_NLM,
               "Sending GRANTED for arg=%p svid=%d start=%llx len=%llx cookie=%s",
               arg, nlm_arg->nlm_async_args.nlm_async_grant.alock.svid,
               (unsigned long long) nlm_arg->nlm_async_args.nlm_async_grant.alock.l_offset,
               (unsigned long long) nlm_arg->nlm_async_args.nlm_async_grant.alock.l_len,
               buffer);
    }

  retval = nlm_send_async(NLMPROC4_GRANTED_MSG,
                          nlm_arg->nlm_async_host,
                          &(nlm_arg->nlm_async_args.nlm_async_grant),
                          nlm_arg->nlm_async_key);

  dec_nlm_client_ref(nlm_arg->nlm_async_host);
  free_grant_arg(arg);

  /* If success, we are done. */
  if(retval == RPC_SUCCESS)
    return;

  /*
   * We are not able call granted callback. Some client may retry
   * the lock again. So remove the existing blocked nlm entry
   */
  LogMajor(COMPONENT_NLM,
           "GRANTED_MSG RPC call failed with return code %d. Removing the blocking lock",
           retval);

  if(state_find_grant(nlm_arg->nlm_async_args.nlm_async_grant.cookie.n_bytes,
                      nlm_arg->nlm_async_args.nlm_async_grant.cookie.n_len,
                      &cookie_entry,
                      &state_status) != STATE_SUCCESS)
    {
      /* This must be an old NLM_GRANTED_RES */
      LogFullDebug(COMPONENT_NLM,
                   "Could not find cookie=%s status=%s",
                   buffer, state_err_str(state_status));
      return;
    }

  PTHREAD_RWLOCK_WRLOCK(&cookie_entry->sce_pentry->state_lock);

  if(cookie_entry->sce_lock_entry->sle_block_data == NULL ||
     !nlm_block_data_to_fsal_context(cookie_entry->sce_lock_entry->sle_block_data,
                                     pcontext))
    {
      /* Wow, we're not doing well... */
      PTHREAD_RWLOCK_UNLOCK(&cookie_entry->sce_pentry->state_lock);
      LogFullDebug(COMPONENT_NLM,
                   "Could not find block data for cookie=%s (must be an old NLM_GRANTED_RES)",
                   buffer);
      return;
    }

  PTHREAD_RWLOCK_UNLOCK(&cookie_entry->sce_pentry->state_lock);

  if(state_release_grant(pcontext,
                         cookie_entry,
                         &state_status) != STATE_SUCCESS)
    {
      /* Huh? */
      LogFullDebug(COMPONENT_NLM,
                   "Could not release cookie=%s status=%s",
                   buffer, state_err_str(state_status));
    }
}
/**
 *
 * @brief invalidates an entry in the cache
 *
 * This function invalidates the related cache entry correponding to a
 * FSAL handle. It is designed to be called when an FSAL upcall is
 * triggered.
 *
 * @param[in]  handle FSAL handle for the entry to be invalidated
 * @param[out] status Returned status
 *
 * @retval CACHE_INODE_SUCCESS if operation is a success
 * @retval CACHE_INODE_INVALID_ARGUMENT bad parameter(s) as input
 * @retval CACHE_INODE_NOT_FOUND if entry is not cached
 * @retval CACHE_INODE_STATE_CONFLICT if invalidating this entry would
 *                                    result is state conflict
 * @retval CACHE_INODE_INCONSISTENT_ENTRY if entry is not consistent
 * @retval Other errors shows a FSAL error.
 *
 */
cache_inode_status_t
cache_inode_invalidate(cache_inode_fsal_data_t *fsal_data,
                       cache_inode_status_t *status,
                       uint32_t flags)
{
     hash_buffer_t key, value;
     int rc = 0 ;
     cache_entry_t *entry;
     struct hash_latch latch;

     if (status == NULL || fsal_data == NULL) {
          *status = CACHE_INODE_INVALID_ARGUMENT;
          goto out;
     }

     /* Locate the entry in the cache */
     FSAL_ExpandHandle(NULL,  /* pcontext but not used... */
                       FSAL_DIGEST_SIZEOF,
                       &fsal_data->fh_desc);

     /* Turn the input to a hash key */
     key.pdata = fsal_data->fh_desc.start;
     key.len = fsal_data->fh_desc.len;

     if ((rc = HashTable_GetLatch(fh_to_cache_entry_ht,
                                  &key,
                                  &value,
                                  FALSE,
                                  &latch)) == HASHTABLE_ERROR_NO_SUCH_KEY) {
          /* Entry is not cached */
          HashTable_ReleaseLatched(fh_to_cache_entry_ht, &latch);
          *status = CACHE_INODE_NOT_FOUND;
          return *status;
     } else if (rc != HASHTABLE_SUCCESS) {
          LogCrit(COMPONENT_CACHE_INODE,
                  "Unexpected error %u while calling HashTable_GetLatch", rc) ;
          *status = CACHE_INODE_INVALID_ARGUMENT;
          goto out;
     }

     entry = value.pdata;
     if (cache_inode_lru_ref(entry, 0) != CACHE_INODE_SUCCESS) {
          HashTable_ReleaseLatched(fh_to_cache_entry_ht, &latch);
          *status = CACHE_INODE_NOT_FOUND;
          return *status;
     }
     HashTable_ReleaseLatched(fh_to_cache_entry_ht, &latch);

     PTHREAD_RWLOCK_WRLOCK(&entry->attr_lock);
     PTHREAD_RWLOCK_WRLOCK(&entry->content_lock);

     /* We can invalidate entries with state just fine.  We force
        Cache_inode to contact the FSAL for any use of content or
        attributes, and if the FSAL indicates the entry is stale, it
        can be disposed of then. */

     /* We should have a way to invalidate content and attributes
        separately.  Or at least a way to invalidate attributes
        without invalidating content (since any change in content
        really ought to modify mtime, at least.) */

     if ((flags & CACHE_INODE_INVALIDATE_CLEARBITS) != 0)
       atomic_clear_uint32_t_bits(&entry->flags,
                                  CACHE_INODE_TRUST_ATTRS |
                                  CACHE_INODE_DIR_POPULATED |
                                  CACHE_INODE_TRUST_CONTENT);

     /* The main reason for holding the lock at this point is so we
        don't clear the trust bits while someone is populating the
        directory or refreshing attributes. */

     if (((flags & CACHE_INODE_INVALIDATE_CLOSE) != 0) &&
         (entry->type == REGULAR_FILE)) {
          cache_inode_close(entry,
                            NULL,
                            (CACHE_INODE_FLAG_REALLYCLOSE |
                             CACHE_INODE_FLAG_CONTENT_HAVE |
                             CACHE_INODE_FLAG_CONTENT_HOLD),
                            status);
     }

     PTHREAD_RWLOCK_UNLOCK(&entry->attr_lock);
     PTHREAD_RWLOCK_UNLOCK(&entry->content_lock);

     cache_inode_lru_unref(entry, 0);

out:

     /* Memory copying attributes with every call is expensive.
        Let's not do it.  */

     return (*status);
} /* cache_inode_invalidate */
cache_entry_t *
cache_inode_create(cache_entry_t *parent,
                   fsal_name_t *name,
                   cache_inode_file_type_t type,
                   fsal_accessmode_t mode,
                   cache_inode_create_arg_t *create_arg,
                   fsal_attrib_list_t *attr,
                   fsal_op_context_t *context,
                   cache_inode_status_t *status)
{
     cache_entry_t *entry = NULL;
     fsal_status_t fsal_status = {0, 0};
     fsal_handle_t object_handle;
     fsal_attrib_list_t object_attributes;
     cache_inode_fsal_data_t fsal_data;
     cache_inode_create_arg_t zero_create_arg;

     memset(&zero_create_arg, 0, sizeof(zero_create_arg));
     memset(&fsal_data, 0, sizeof(fsal_data));
     memset(&object_handle, 0, sizeof(object_handle));

     if (create_arg == NULL) {
          create_arg = &zero_create_arg;
     }

     /* Set the return default to CACHE_INODE_SUCCESS */
     *status = CACHE_INODE_SUCCESS;

     if ((type != REGULAR_FILE) && (type != DIRECTORY) &&
         (type != SYMBOLIC_LINK) && (type != SOCKET_FILE) &&
         (type != FIFO_FILE) && (type != CHARACTER_FILE) &&
         (type != BLOCK_FILE)) {
          *status = CACHE_INODE_BAD_TYPE;

          entry = NULL;
          goto out;
        }

     /* Check if an entry of the same name exists */
     entry = cache_inode_lookup(parent,
                                name,
                                attr,
                                context,
                                status);
     if (entry != NULL) {
          *status = CACHE_INODE_ENTRY_EXISTS;
          if (entry->type != type) {
               /* Incompatible types, returns NULL */
               cache_inode_lru_unref(entry, LRU_FLAG_NONE);
               entry = NULL;
               goto out;
          } else {
               goto out;
          }
     }

     /* The entry doesn't exist, so we can create it. */

     object_attributes.asked_attributes = cache_inode_params.attrmask;
     switch (type) {
     case REGULAR_FILE:
          fsal_status = FSAL_create(&parent->handle,
                                    name, context, mode,
                                    &object_handle, &object_attributes);
          break;

     case DIRECTORY:
          fsal_status = FSAL_mkdir(&parent->handle,
                                   name, context, mode,
                                   &object_handle, &object_attributes);
          break;

     case SYMBOLIC_LINK:
          fsal_status = FSAL_symlink(&parent->handle,
                                     name, &create_arg->link_content,
                                     context, mode, &object_handle,
                                     &object_attributes);
          break;

     case SOCKET_FILE:
          fsal_status = FSAL_mknode(&parent->handle, name, context,
                                    mode, FSAL_TYPE_SOCK, NULL,
                                    &object_handle, &object_attributes);
          break;

     case FIFO_FILE:
          fsal_status = FSAL_mknode(&parent->handle, name, context,
                                    mode, FSAL_TYPE_FIFO, NULL,
                                    &object_handle, &object_attributes);
          break;

     case BLOCK_FILE:
          fsal_status = FSAL_mknode(&parent->handle,
                                    name, context,
                                    mode, FSAL_TYPE_BLK,
                                    &create_arg->dev_spec,
                                    &object_handle, &object_attributes);
             break;

     case CHARACTER_FILE:
          fsal_status = FSAL_mknode(&parent->handle,
                                    name, context,
                                    mode, FSAL_TYPE_CHR,
                                    &create_arg->dev_spec,
                                    &object_handle,
                                    &object_attributes);
          break;

     default:
          /* we should never go there */
          *status = CACHE_INODE_INCONSISTENT_ENTRY;
          entry = NULL;
          goto out;
          break;
        }

     /* Check for the result */
     if (FSAL_IS_ERROR(fsal_status)) {
          if (fsal_status.major == ERR_FSAL_STALE) {
               LogEvent(COMPONENT_CACHE_INODE,
                        "FSAL returned STALE on create type %d",
                        type);
               cache_inode_kill_entry(parent);
          }
          *status = cache_inode_error_convert(fsal_status);
          entry = NULL;
          goto out;
     }
     fsal_data.fh_desc.start = (caddr_t) &object_handle;
     fsal_data.fh_desc.len = 0;
     FSAL_ExpandHandle(context->export_context,
                       FSAL_DIGEST_SIZEOF,
                       &fsal_data.fh_desc);

     entry = cache_inode_new_entry(&fsal_data,
                                   &object_attributes,
                                   type,
                                   create_arg,
                                   status);
     if (entry == NULL) {
          *status = CACHE_INODE_INSERT_ERROR;

          return NULL;
     }

     PTHREAD_RWLOCK_WRLOCK(&parent->content_lock);
     /* Add this entry to the directory (also takes an internal ref) */
     cache_inode_add_cached_dirent(parent,
                                   name, entry,
                                   NULL,
                                   status);
     PTHREAD_RWLOCK_UNLOCK(&parent->content_lock);
     if (*status != CACHE_INODE_SUCCESS) {
          cache_inode_lru_unref(entry, LRU_FLAG_NONE);
          entry = NULL;
          goto out;
     }

     PTHREAD_RWLOCK_WRLOCK(&parent->attr_lock);
     /* Update the parent cached attributes */
     cache_inode_set_time_current(&parent->attributes.mtime);
     parent->attributes.ctime = parent->attributes.mtime;
     /* if the created object is a directory, it contains a link
        to its parent : '..'. Thus the numlink attr must be increased. */
     if (type == DIRECTORY) {
          ++(parent->attributes.numlinks);
     }
     PTHREAD_RWLOCK_UNLOCK(&parent->attr_lock);

     /* Copy up the child attributes */
     *attr = object_attributes;

     *status = CACHE_INODE_SUCCESS;

out:

     return entry;
}
cache_inode_status_t
cache_inode_setattr(cache_entry_t *entry,
                    fsal_attrib_list_t *attr,
                    fsal_op_context_t *context,
                    int is_open_write,
                    cache_inode_status_t *status)
{
     fsal_status_t fsal_status = {0, 0};
     int           got_content_lock = FALSE;
#ifdef _USE_NFS4_ACL
     fsal_acl_t *saved_acl = NULL;
     fsal_acl_status_t acl_status = 0;
#endif /* _USE_NFS4_ACL */

     if ((entry->type == UNASSIGNED) ||
         (entry->type == RECYCLED)) {
          LogWarn(COMPONENT_CACHE_INODE,
                  "WARNING: unknown source entry type: type=%d, "
                  "line %d in file %s", entry->type, __LINE__, __FILE__);
          *status = CACHE_INODE_BAD_TYPE;
          goto out;
     }

     if ((attr->asked_attributes & FSAL_ATTR_SIZE) &&
         (entry->type != REGULAR_FILE)) {
          LogWarn(COMPONENT_CACHE_INODE,
                   "Attempt to truncate non-regular file: type=%d",
                   entry->type);
          *status = CACHE_INODE_BAD_TYPE;
          goto out;
     }

     /* Get wrlock on attr_lock and verify attrs */
     *status = cache_inode_lock_trust_attrs(entry, context, TRUE);
     if(*status != CACHE_INODE_SUCCESS)
       return *status;

     /* Do permission checks */
     if(cache_inode_check_setattr_perms(entry,
                                        attr,
                                        context,
                                        is_open_write,
                                        status) != CACHE_INODE_SUCCESS)
       {
         goto unlock;
       }

     if (attr->asked_attributes & FSAL_ATTR_SIZE) {
          PTHREAD_RWLOCK_WRLOCK(&entry->content_lock);
          got_content_lock = TRUE;
     }

#ifdef _USE_NFS4_ACL
     saved_acl = entry->attributes.acl;
#endif /* _USE_NFS4_ACL */
     fsal_status = FSAL_setattrs(&entry->handle, context, attr,
                                 &entry->attributes);
     if (FSAL_IS_ERROR(fsal_status)) {
          *status = cache_inode_error_convert(fsal_status);
          if (fsal_status.major == ERR_FSAL_STALE) {
               LogEvent(COMPONENT_CACHE_INODE,
                       "FSAL returned STALE from setattrs");
               cache_inode_kill_entry(entry);
          }
          goto unlock;
     } else {
#ifdef _USE_NFS4_ACL
          /* Decrement refcount on saved ACL */
         nfs4_acl_release_entry(saved_acl, &acl_status);
         if (acl_status != NFS_V4_ACL_SUCCESS) {
              LogCrit(COMPONENT_CACHE_INODE,
                      "Failed to release old acl, status=%d",
                      acl_status);
         }
#endif /* _USE_NFS4_ACL */
     }

     cache_inode_fixup_md(entry);
     /* Copy the complete set of new attributes out. */

     *attr = entry->attributes;
     set_mounted_on_fileid(entry, attr, context->export_context->fe_export);

     *status = CACHE_INODE_SUCCESS;

unlock:
     if(got_content_lock) {
          PTHREAD_RWLOCK_UNLOCK(&entry->content_lock);
     }
     PTHREAD_RWLOCK_UNLOCK(&entry->attr_lock);

out:

     return *status;
} /* cache_inode_setattr */
Exemple #10
0
void _starpu_update_perfmodel_history(starpu_job_t j, enum starpu_perf_archtype arch, unsigned cpuid __attribute__((unused)), double measured)
{
    struct starpu_perfmodel_t *model = j->task->cl->model;

    if (model)
    {
        struct starpu_per_arch_perfmodel_t *per_arch_model = &model->per_arch[arch];

        if (model->type == STARPU_HISTORY_BASED || model->type == STARPU_REGRESSION_BASED)
        {
            uint32_t key = j->footprint;
            struct starpu_history_entry_t *entry;

            struct starpu_htbl32_node_s *history;
            struct starpu_htbl32_node_s **history_ptr;
            struct starpu_regression_model_t *reg_model;

            struct starpu_history_list_t **list;


            history = per_arch_model->history;
            history_ptr = &per_arch_model->history;
            reg_model = &per_arch_model->regression;
            list = &per_arch_model->list;

            PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);

            entry = _starpu_htbl_search_32(history, key);

            if (!entry)
            {
                /* this is the first entry with such a footprint */
                entry = malloc(sizeof(struct starpu_history_entry_t));
                STARPU_ASSERT(entry);
                entry->mean = measured;
                entry->sum = measured;

                entry->deviation = 0.0;
                entry->sum2 = measured*measured;

                entry->size = _starpu_job_get_data_size(j);

                entry->footprint = key;
                entry->nsample = 1;

                insert_history_entry(entry, list, history_ptr);

            }
            else {
                /* there is already some entry with the same footprint */
                entry->sum += measured;
                entry->sum2 += measured*measured;
                entry->nsample++;

                unsigned n = entry->nsample;
                entry->mean = entry->sum / n;
                entry->deviation = sqrt((entry->sum2 - (entry->sum*entry->sum)/n)/n);
            }

            STARPU_ASSERT(entry);

            /* update the regression model as well */
            double logy, logx;
            logx = log(entry->size);
            logy = log(measured);

            reg_model->sumlnx += logx;
            reg_model->sumlnx2 += logx*logx;
            reg_model->sumlny += logy;
            reg_model->sumlnxlny += logx*logy;
            reg_model->nsample++;

            unsigned n = reg_model->nsample;

            double num = (n*reg_model->sumlnxlny - reg_model->sumlnx*reg_model->sumlny);
            double denom = (n*reg_model->sumlnx2 - reg_model->sumlnx*reg_model->sumlnx);

            reg_model->beta = num/denom;
            reg_model->alpha = exp((reg_model->sumlny - reg_model->beta*reg_model->sumlnx)/n);

            PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
        }

#ifdef STARPU_MODEL_DEBUG
        struct starpu_task *task = j->task;
        FILE * debug_file = per_arch_model->debug_file;

        PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);

        STARPU_ASSERT(j->footprint_is_computed);

        fprintf(debug_file, "0x%x\t%lu\t%lf\t%lf\t%d\t\t", j->footprint, (unsigned long) _starpu_job_get_data_size(j), measured, task->predicted, cpuid);
        unsigned i;

        for (i = 0; i < task->cl->nbuffers; i++)
        {
            struct starpu_data_handle_t *handle = task->buffers[i].handle;

            STARPU_ASSERT(handle->ops);
            STARPU_ASSERT(handle->ops->display);
            handle->ops->display(handle, debug_file);
        }
        fprintf(debug_file, "\n");

        PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);
#endif
    }
}
Exemple #11
0
/* We first try to grab the global lock in read mode to check whether the model
 * was loaded or not (this is very likely to have been already loaded). If the
 * model was not loaded yet, we take the lock in write mode, and if the model
 * is still not loaded once we have the lock, we do load it.  */
static void load_history_based_model(struct starpu_perfmodel_t *model, unsigned scan_history)
{

    STARPU_ASSERT(model);
    STARPU_ASSERT(model->symbol);

    int already_loaded;

    PTHREAD_RWLOCK_RDLOCK(&registered_models_rwlock);
    already_loaded = model->is_loaded;
    PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);

    if (already_loaded)
        return;

    /* The model is still not loaded so we grab the lock in write mode, and
     * if it's not loaded once we have the lock, we do load it. */

    PTHREAD_RWLOCK_WRLOCK(&registered_models_rwlock);

    /* Was the model initialized since the previous test ? */
    if (model->is_loaded)
    {
        PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
        return;
    }

    PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL);

    PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock);

    /* make sure the performance model directory exists (or create it) */
    _starpu_create_sampling_directory_if_needed();

    /*
     * We need to keep track of all the model that were opened so that we can
     * possibly update them at runtime termination ...
     */
    _starpu_register_model(model);

    char path[256];
    get_model_path(model, path, 256);

    _STARPU_DEBUG("Opening performance model file %s for model %s ... ", path, model->symbol);

    unsigned calibrate_flag = _starpu_get_calibrate_flag();
    model->benchmarking = calibrate_flag;

    /* try to open an existing file and load it */
    int res;
    res = access(path, F_OK);
    if (res == 0) {
        if (calibrate_flag == 2)
        {
            /* The user specified that the performance model should
             * be overwritten, so we don't load the existing file !
             * */
            _STARPU_DEBUG("Overwrite existing file\n");
            initialize_model(model);
        }
        else {
            /* We load the available file */
            _STARPU_DEBUG("File exists\n");
            FILE *f;
            f = fopen(path, "r");
            STARPU_ASSERT(f);

            parse_model_file(f, model, scan_history);

            fclose(f);
        }
    }
    else {
        _STARPU_DEBUG("File does not exists\n");
        if (!calibrate_flag) {
            _STARPU_DISP("Warning: model %s is not calibrated, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol);
            _starpu_set_calibrate_flag(1);
            model->benchmarking = 1;
        }
        initialize_model(model);
    }

    model->is_loaded = 1;

    PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock);

    PTHREAD_RWLOCK_UNLOCK(&registered_models_rwlock);
}