static void wq_init(wq_t *wq) { wq->ev = 0; wq->wait = 0; os_thread_mutex_init(&wq->lock); os_sem_init(&wq->sem, 0); }
void iscsi_target_perf_init(void) { target_eh = nbd_clientd_get_exaperf(); if (target_eh == NULL) return; target_iodepth[0] = exaperf_repart_init(target_eh, "ISCSI_TARGET_IODEPTH_READ", TARGET_REPART_DEPTH, limits_target_iodepth); target_iodepth[1] = exaperf_repart_init(target_eh, "ISCSI_TARGET_IODEPTH_WRITE", TARGET_REPART_DEPTH, limits_target_iodepth); target_req_time[0] = exaperf_duration_init(target_eh, "ISCSI_TARGET_DUR_READ", true); target_req_time[1] = exaperf_duration_init(target_eh, "ISCSI_TARGET_DUR_WRITE", true); target_req_size_repart[0] = exaperf_repart_init(target_eh, "ISCSI_TARGET_REQ_SIZE_READ", TARGET_REPART, limits_target_req_size); target_req_size_repart[1] = exaperf_repart_init(target_eh, "ISCSI_TARGET_REQ_SIZE_WRITE", TARGET_REPART, limits_target_req_size); os_thread_mutex_init(&iodepth_mutex); }
struct rain1_realdev *rain1_alloc_rdev_layout_data(vrt_realdev_t *rdev) { struct rain1_realdev *lr = os_malloc(sizeof(struct rain1_realdev)); if (lr == NULL) return NULL; memset(lr, 0, sizeof(struct rain1_realdev)); /* FIXME storing the rdev here means we can get rid of most of the * iterations of the storage, and just iterate over rxg->rain1_rdevs[]->rdev. */ lr->rdev = rdev; /* Store the rdev uuid as this is the only link with rdev when we need to * find it out after reread SB */ uuid_copy(&lr->uuid, &rdev->uuid); lr->mine = rdev->local; os_thread_mutex_init(&lr->rebuild_progress.lock); lr->rebuild_progress.complete = FALSE; lr->rebuild_progress.nb_slots_rebuilt = 0; lr->sync_tag = SYNC_TAG_ZERO; return lr; }
static void init_peers(void) { exa_nodeid_t node_id; exalog_debug("initializing peers"); for (node_id = 0; node_id < EXA_MAX_NODES_NUMBER; node_id++) __reset_peer(node_id); os_thread_mutex_init(&peers_lock); }
void vrt_cmd_handle_message_init(void) { os_thread_mutex_init(&pending_group_lock); }