double _starpu_history_based_job_expected_length(struct starpu_perfmodel_t *model, enum starpu_perf_archtype arch, struct starpu_job_s *j) { double exp; struct starpu_per_arch_perfmodel_t *per_arch_model; struct starpu_history_entry_t *entry; struct starpu_htbl32_node_s *history; load_history_based_model(model, 1); if (STARPU_UNLIKELY(!j->footprint_is_computed)) _starpu_compute_buffers_footprint(j); uint32_t key = j->footprint; per_arch_model = &model->per_arch[arch]; history = per_arch_model->history; if (!history) return -1.0; PTHREAD_RWLOCK_RDLOCK(&model->model_rwlock); entry = _starpu_htbl_search_32(history, key); PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock); exp = entry?entry->mean:-1.0; return exp; }
cache_inode_status_t cache_inode_readlink(cache_entry_t *entry, fsal_path_t *link_content, fsal_op_context_t *context, cache_inode_status_t *status) { fsal_status_t fsal_status = {0, 0}; /* Set the return default to CACHE_INODE_SUCCESS */ *status = CACHE_INODE_SUCCESS; if (entry->type != SYMBOLIC_LINK) { *status = CACHE_INODE_BAD_TYPE; return *status; } assert(entry->object.symlink); PTHREAD_RWLOCK_RDLOCK(&entry->content_lock); if (!(entry->flags & CACHE_INODE_TRUST_CONTENT)) { /* Our data are stale. Drop the lock, get a write-lock, load in new data, and copy it out to the caller. */ PTHREAD_RWLOCK_UNLOCK(&entry->content_lock); PTHREAD_RWLOCK_WRLOCK(&entry->content_lock); /* Make sure nobody updated the content while we were waiting. */ if (!(entry->flags & CACHE_INODE_TRUST_CONTENT)) { fsal_status = FSAL_readlink(&entry->handle, context, &entry->object.symlink->content, NULL); if (!(FSAL_IS_ERROR(fsal_status))) { atomic_set_uint32_t_bits(&entry->flags, CACHE_INODE_TRUST_CONTENT); } } } if (!(FSAL_IS_ERROR(fsal_status))) { FSAL_pathcpy(link_content, &(entry->object.symlink->content)); } PTHREAD_RWLOCK_UNLOCK(&entry->content_lock); if (FSAL_IS_ERROR(fsal_status)) { *status = cache_inode_error_convert(fsal_status); if (fsal_status.major == ERR_FSAL_STALE) { LogEvent(COMPONENT_CACHE_INODE, "FSAL returned STALE from readlink"); cache_inode_kill_entry(entry); } return *status; } return *status; } /* cache_inode_readlink */
void starpu_wake_all_blocked_workers(void) { /* workers may be blocked on the various queues' conditions */ unsigned cond_id; starpu_mem_node_descr * const descr = _starpu_get_memory_node_description(); PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock); unsigned nconds = descr->total_condition_count; for (cond_id = 0; cond_id < nconds; cond_id++) { struct _cond_and_mutex *condition; condition = &descr->conditions_all[cond_id]; /* wake anybody waiting on that condition */ PTHREAD_MUTEX_LOCK(condition->mutex); PTHREAD_COND_BROADCAST(condition->cond); PTHREAD_MUTEX_UNLOCK(condition->mutex); } PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock); }
void _starpu_wake_all_blocked_workers_on_node(unsigned nodeid) { /* wake up all workers on that memory node */ unsigned cond_id; starpu_mem_node_descr * const descr = _starpu_get_memory_node_description(); PTHREAD_RWLOCK_RDLOCK(&descr->conditions_rwlock); unsigned nconds = descr->condition_count[nodeid]; for (cond_id = 0; cond_id < nconds; cond_id++) { struct _cond_and_mutex *condition; condition = &descr->conditions_attached_to_node[nodeid][cond_id]; /* wake anybody waiting on that condition */ PTHREAD_MUTEX_LOCK(condition->mutex); PTHREAD_COND_BROADCAST(condition->cond); PTHREAD_MUTEX_UNLOCK(condition->mutex); } PTHREAD_RWLOCK_UNLOCK(&descr->conditions_rwlock); }
/* We first try to grab the global lock in read mode to check whether the model * was loaded or not (this is very likely to have been already loaded). If the * model was not loaded yet, we take the lock in write mode, and if the model * is still not loaded once we have the lock, we do load it. */ static void load_history_based_model(struct starpu_perfmodel_t *model, unsigned scan_history) { STARPU_ASSERT(model); STARPU_ASSERT(model->symbol); int already_loaded; PTHREAD_RWLOCK_RDLOCK(®istered_models_rwlock); already_loaded = model->is_loaded; PTHREAD_RWLOCK_UNLOCK(®istered_models_rwlock); if (already_loaded) return; /* The model is still not loaded so we grab the lock in write mode, and * if it's not loaded once we have the lock, we do load it. */ PTHREAD_RWLOCK_WRLOCK(®istered_models_rwlock); /* Was the model initialized since the previous test ? */ if (model->is_loaded) { PTHREAD_RWLOCK_UNLOCK(®istered_models_rwlock); return; } PTHREAD_RWLOCK_INIT(&model->model_rwlock, NULL); PTHREAD_RWLOCK_WRLOCK(&model->model_rwlock); /* make sure the performance model directory exists (or create it) */ _starpu_create_sampling_directory_if_needed(); /* * We need to keep track of all the model that were opened so that we can * possibly update them at runtime termination ... */ _starpu_register_model(model); char path[256]; get_model_path(model, path, 256); _STARPU_DEBUG("Opening performance model file %s for model %s ... ", path, model->symbol); unsigned calibrate_flag = _starpu_get_calibrate_flag(); model->benchmarking = calibrate_flag; /* try to open an existing file and load it */ int res; res = access(path, F_OK); if (res == 0) { if (calibrate_flag == 2) { /* The user specified that the performance model should * be overwritten, so we don't load the existing file ! * */ _STARPU_DEBUG("Overwrite existing file\n"); initialize_model(model); } else { /* We load the available file */ _STARPU_DEBUG("File exists\n"); FILE *f; f = fopen(path, "r"); STARPU_ASSERT(f); parse_model_file(f, model, scan_history); fclose(f); } } else { _STARPU_DEBUG("File does not exists\n"); if (!calibrate_flag) { _STARPU_DISP("Warning: model %s is not calibrated, forcing calibration for this run. Use the STARPU_CALIBRATE environment variable to control this.\n", model->symbol); _starpu_set_calibrate_flag(1); model->benchmarking = 1; } initialize_model(model); } model->is_loaded = 1; PTHREAD_RWLOCK_UNLOCK(&model->model_rwlock); PTHREAD_RWLOCK_UNLOCK(®istered_models_rwlock); }