int ompi_osc_sm_lock(int lock_type, int target, int assert, struct ompi_win_t *win) { ompi_osc_sm_module_t *module = (ompi_osc_sm_module_t*) win->w_osc_module; int ret; if (lock_none != module->outstanding_locks[target]) { return OMPI_ERR_RMA_SYNC; } if (0 == (assert & MPI_MODE_NOCHECK)) { if (MPI_LOCK_EXCLUSIVE == lock_type) { module->outstanding_locks[target] = lock_exclusive; ret = start_exclusive(module, target); } else { module->outstanding_locks[target] = lock_shared; ret = start_shared(module, target); } } else { module->outstanding_locks[target] = lock_nocheck; ret = OMPI_SUCCESS; } return ret; }
int ompi_osc_ucx_lock(int lock_type, int target, int assert, struct ompi_win_t *win) { ompi_osc_ucx_module_t *module = (ompi_osc_ucx_module_t *)win->w_osc_module; ompi_osc_ucx_lock_t *lock = NULL; ompi_osc_ucx_epoch_t original_epoch = module->epoch_type.access; int ret = OMPI_SUCCESS; if (module->lock_count == 0) { if (module->epoch_type.access != NONE_EPOCH && module->epoch_type.access != FENCE_EPOCH) { return OMPI_ERR_RMA_SYNC; } } else { ompi_osc_ucx_lock_t *item = NULL; assert(module->epoch_type.access == PASSIVE_EPOCH); opal_hash_table_get_value_uint32(&module->outstanding_locks, (uint32_t) target, (void **) &item); if (item != NULL) { return OMPI_ERR_RMA_SYNC; } } module->epoch_type.access = PASSIVE_EPOCH; module->lock_count++; assert(module->lock_count <= ompi_comm_size(module->comm)); lock = OBJ_NEW(ompi_osc_ucx_lock_t); lock->target_rank = target; if ((assert & MPI_MODE_NOCHECK) == 0) { lock->is_nocheck = false; if (lock_type == MPI_LOCK_EXCLUSIVE) { ret = start_exclusive(module, target); lock->type = LOCK_EXCLUSIVE; } else { ret = start_shared(module, target); lock->type = LOCK_SHARED; } } else { lock->is_nocheck = true; } if (ret == OMPI_SUCCESS) { opal_hash_table_set_value_uint32(&module->outstanding_locks, (uint32_t)target, (void *)lock); } else { OBJ_RELEASE(lock); module->epoch_type.access = original_epoch; } return ret; }
void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } qemu_mutex_lock(&cpu->work_mutex); while (cpu->queued_work_first != NULL) { wi = cpu->queued_work_first; cpu->queued_work_first = wi->next; if (!cpu->queued_work_first) { cpu->queued_work_last = NULL; } qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: * 1) start_exclusive() is called with the BQL taken while another * CPU is running; 2) cpu_exec in the other CPU tries to takes the * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ qemu_mutex_unlock_iothread(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); qemu_mutex_lock_iothread(); } else { wi->func(cpu, wi->data); } qemu_mutex_lock(&cpu->work_mutex); if (wi->free) { g_free(wi); } else { atomic_mb_set(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); }
/* Make sure everything is in a consistent state for calling fork(). */ void fork_start(void) { start_exclusive(); mmap_fork_start(); cpu_list_lock(); }