void fibril_mutex_unlock(fibril_mutex_t *fm) { assert(fibril_mutex_is_locked(fm)); futex_down(&async_futex); _fibril_mutex_unlock_unsafe(fm); futex_up(&async_futex); }
/** Decrement reference count of VFS file structure. * * @param file File structure that will have reference count * decremented. */ static int vfs_file_delref(vfs_client_data_t *vfs_data, vfs_file_t *file) { int rc = EOK; assert(fibril_mutex_is_locked(&vfs_data->lock)); if (file->refcnt-- == 1) { /* * Lost the last reference to a file, need to close it in the * endpoint FS and drop our reference to the underlying VFS node. */ rc = vfs_file_close_remote(file); vfs_node_delref(file->node); free(file); } return rc; }
int fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm, suseconds_t timeout) { awaiter_t wdata; assert(fibril_mutex_is_locked(fm)); if (timeout < 0) return ETIMEOUT; awaiter_initialize(&wdata); wdata.fid = fibril_get_id(); wdata.to_event.inlist = timeout > 0; wdata.wu_event.inlist = true; futex_down(&async_futex); if (timeout) { getuptime(&wdata.to_event.expires); tv_add(&wdata.to_event.expires, timeout); async_insert_timeout(&wdata); } list_append(&wdata.wu_event.link, &fcv->waiters); _fibril_mutex_unlock_unsafe(fm); fibril_switch(FIBRIL_TO_MANAGER); fibril_mutex_lock(fm); /* async_futex not held after fibril_switch() */ futex_down(&async_futex); if (wdata.to_event.inlist) list_remove(&wdata.to_event.link); if (wdata.wu_event.inlist) list_remove(&wdata.wu_event.link); futex_up(&async_futex); return wdata.to_event.occurred ? ETIMEOUT : EOK; }
/** Increment reference count of VFS file structure. * * @param file File structure that will have reference count * incremented. */ static void vfs_file_addref(vfs_client_data_t *vfs_data, vfs_file_t *file) { assert(fibril_mutex_is_locked(&vfs_data->lock)); file->refcnt++; }