int ompi_osc_ucx_unlock(int target, struct ompi_win_t *win) { ompi_osc_ucx_module_t *module = (ompi_osc_ucx_module_t *)win->w_osc_module; ompi_osc_ucx_lock_t *lock = NULL; int ret = OMPI_SUCCESS; ucp_ep_h ep; if (module->epoch_type.access != PASSIVE_EPOCH) { return OMPI_ERR_RMA_SYNC; } opal_hash_table_get_value_uint32(&module->outstanding_locks, (uint32_t) target, (void **) &lock); if (lock == NULL) { return OMPI_ERR_RMA_SYNC; } opal_hash_table_remove_value_uint32(&module->outstanding_locks, (uint32_t)target); ep = OSC_UCX_GET_EP(module->comm, target); ret = opal_common_ucx_ep_flush(ep, mca_osc_ucx_component.ucp_worker); if (ret != OMPI_SUCCESS) { return ret; } module->global_ops_num -= module->per_target_ops_nums[target]; module->per_target_ops_nums[target] = 0; if (lock->is_nocheck == false) { if (lock->type == LOCK_EXCLUSIVE) { ret = end_exclusive(module, target); } else { ret = end_shared(module, target); } } OBJ_RELEASE(lock); module->lock_count--; assert(module->lock_count >= 0); if (module->lock_count == 0) { module->epoch_type.access = NONE_EPOCH; assert(module->global_ops_num == 0); } return ret; }
int ompi_osc_sm_unlock(int target, struct ompi_win_t *win) { ompi_osc_sm_module_t *module = (ompi_osc_sm_module_t*) win->w_osc_module; int ret; /* ensure all memory operations have completed */ opal_atomic_mb(); switch (module->outstanding_locks[target]) { case lock_none: return OMPI_ERR_RMA_SYNC; case lock_nocheck: ret = OMPI_SUCCESS; break; case lock_exclusive: ret = end_exclusive(module, target); break; case lock_shared: ret = end_shared(module, target); break; default: // This is an OMPI programming error -- cause some pain. assert(module->outstanding_locks[target] == lock_none || module->outstanding_locks[target] == lock_nocheck || module->outstanding_locks[target] == lock_exclusive || module->outstanding_locks[target] == lock_shared); // In non-developer builds, assert() will be a no-op, so // ensure the error gets reported opal_output(0, "Unknown lock type in ompi_osc_sm_unlock -- this is an OMPI programming error"); ret = OMPI_ERR_BAD_PARAM; break; } module->outstanding_locks[target] = lock_none; return ret; }
void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; if (cpu->queued_work_first == NULL) { return; } qemu_mutex_lock(&cpu->work_mutex); while (cpu->queued_work_first != NULL) { wi = cpu->queued_work_first; cpu->queued_work_first = wi->next; if (!cpu->queued_work_first) { cpu->queued_work_last = NULL; } qemu_mutex_unlock(&cpu->work_mutex); if (wi->exclusive) { /* Running work items outside the BQL avoids the following deadlock: * 1) start_exclusive() is called with the BQL taken while another * CPU is running; 2) cpu_exec in the other CPU tries to takes the * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ qemu_mutex_unlock_iothread(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); qemu_mutex_lock_iothread(); } else { wi->func(cpu, wi->data); } qemu_mutex_lock(&cpu->work_mutex); if (wi->free) { g_free(wi); } else { atomic_mb_set(&wi->done, true); } } qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); }