int osp_init_precreate(struct osp_device *d) { struct l_wait_info lwi = { 0 }; cfs_task_t *task; ENTRY; /* initially precreation isn't ready */ d->opd_pre_status = -EAGAIN; fid_zero(&d->opd_pre_used_fid); d->opd_pre_used_fid.f_oid = 1; fid_zero(&d->opd_pre_last_created_fid); d->opd_pre_last_created_fid.f_oid = 1; d->opd_pre_reserved = 0; d->opd_got_disconnected = 1; d->opd_pre_grow_slow = 0; d->opd_pre_grow_count = OST_MIN_PRECREATE; d->opd_pre_min_grow_count = OST_MIN_PRECREATE; d->opd_pre_max_grow_count = OST_MAX_PRECREATE; spin_lock_init(&d->opd_pre_lock); cfs_waitq_init(&d->opd_pre_waitq); cfs_waitq_init(&d->opd_pre_user_waitq); cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq); /* * Initialize statfs-related things */ d->opd_statfs_maxage = 5; /* default update interval */ d->opd_statfs_fresh_till = cfs_time_shift(-1000); CDEBUG(D_OTHER, "current %llu, fresh till %llu\n", (unsigned long long)cfs_time_current(), (unsigned long long)d->opd_statfs_fresh_till); cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d); /* * start thread handling precreation and statfs updates */ task = kthread_run(osp_precreate_thread, d, "osp-pre-%u", d->opd_index); if (IS_ERR(task)) { CERROR("can't start precreate thread %ld\n", PTR_ERR(task)); RETURN(PTR_ERR(task)); } l_wait_event(d->opd_pre_thread.t_ctl_waitq, osp_precreate_running(d) || osp_precreate_stopped(d), &lwi); RETURN(0); }
int libcfs_debug_init(unsigned long bufsize) { int rc = 0; unsigned int max = libcfs_debug_mb; cfs_waitq_init(&debug_ctlwq); if (libcfs_console_max_delay <= 0 || /* not set by user or */ libcfs_console_min_delay <= 0 || /* set to invalid values */ libcfs_console_min_delay >= libcfs_console_max_delay) { libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY; libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; } if (libcfs_debug_file_path != NULL) { memset(libcfs_debug_file_path_arr, 0, PATH_MAX); strncpy(libcfs_debug_file_path_arr, libcfs_debug_file_path, PATH_MAX-1); } /* If libcfs_debug_mb is set to an invalid value or uninitialized * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */ if (max > cfs_trace_max_debug_mb() || max < cfs_num_possible_cpus()) { max = TCD_MAX_PAGES; } else { max = (max / cfs_num_possible_cpus()); max = (max << (20 - CFS_PAGE_SHIFT)); } rc = cfs_tracefile_init(max); if (rc == 0) libcfs_register_panic_notifier(); return rc; }
/***************************************************************************** * * Lov object operations. * */ int lov_object_init(const struct lu_env *env, struct lu_object *obj, const struct lu_object_conf *conf) { struct lov_device *dev = lu2lov_dev(obj->lo_dev); struct lov_object *lov = lu2lov(obj); const struct cl_object_conf *cconf = lu2cl_conf(conf); union lov_layout_state *set = &lov->u; const struct lov_layout_operations *ops; int result; ENTRY; init_rwsem(&lov->lo_type_guard); cfs_atomic_set(&lov->lo_active_ios, 0); cfs_waitq_init(&lov->lo_waitq); cl_object_page_init(lu2cl(obj), sizeof(struct lov_page)); /* no locking is necessary, as object is being created */ lov->lo_type = lov_type(cconf->u.coc_md->lsm); ops = &lov_dispatch[lov->lo_type]; result = ops->llo_init(env, dev, lov, cconf, set); if (result == 0) ops->llo_install(env, lov, set); RETURN(result); }
static void cfs_wi_sched_init(cfs_wi_sched_t *sched) { sched->ws_shuttingdown = 0; #ifdef __KERNEL__ cfs_spin_lock_init(&sched->ws_lock); cfs_waitq_init(&sched->ws_waitq); #endif CFS_INIT_LIST_HEAD(&sched->ws_runq); CFS_INIT_LIST_HEAD(&sched->ws_rerunq); }
/* * Initialize qsd-specific fields of quota entry. * * \param lqe - is the quota entry to initialize * \param arg - is the pointer to the qsd_qtype_info structure */ static void qsd_lqe_init(struct lquota_entry *lqe, void *arg) { LASSERT(!lqe_is_master(lqe)); /* initialize slave parameters */ rwlock_init(&lqe->lqe_lock); memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh)); lqe->lqe_pending_write = 0; lqe->lqe_pending_req = 0; cfs_waitq_init(&lqe->lqe_waiters); lqe->lqe_usage = 0; lqe->lqe_nopreacq = false; }
int mdt_ck_thread_start(struct mdt_device *mdt) { struct ptlrpc_thread *thread = &mdt->mdt_ck_thread; int rc; cfs_waitq_init(&thread->t_ctl_waitq); rc = cfs_create_thread(mdt_ck_thread_main, mdt, CFS_DAEMON_FLAGS); if (rc < 0) { CERROR("cannot start mdt_ck thread, rc = %d\n", rc); return rc; } l_wait_condition(thread->t_ctl_waitq, thread_is_running(thread)); return 0; }
int sptlrpc_enc_pool_init(void) { /* * maximum capacity is 1/8 of total physical memory. * is the 1/8 a good number? */ page_pools.epp_max_pages = cfs_num_physpages / 8; page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); cfs_waitq_init(&page_pools.epp_waitq); page_pools.epp_waitqlen = 0; page_pools.epp_pages_short = 0; page_pools.epp_growing = 0; page_pools.epp_idle_idx = 0; page_pools.epp_last_shrink = cfs_time_current_sec(); page_pools.epp_last_access = cfs_time_current_sec(); cfs_spin_lock_init(&page_pools.epp_lock); page_pools.epp_total_pages = 0; page_pools.epp_free_pages = 0; page_pools.epp_st_max_pages = 0; page_pools.epp_st_grows = 0; page_pools.epp_st_grow_fails = 0; page_pools.epp_st_shrinks = 0; page_pools.epp_st_access = 0; page_pools.epp_st_missings = 0; page_pools.epp_st_lowfree = 0; page_pools.epp_st_max_wqlen = 0; page_pools.epp_st_max_wait = 0; enc_pools_alloc(); if (page_pools.epp_pools == NULL) return -ENOMEM; pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks, enc_pools_shrink); if (pools_shrinker == NULL) { enc_pools_free(); return -ENOMEM; } return 0; }
int seq_client_init(struct lu_client_seq *seq, struct obd_export *exp, enum lu_cli_type type, const char *prefix, struct lu_server_seq *srv) { int rc; ENTRY; LASSERT(seq != NULL); LASSERT(prefix != NULL); seq->lcs_srv = srv; seq->lcs_type = type; mutex_init(&seq->lcs_mutex); if (type == LUSTRE_SEQ_METADATA) seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH; else seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH; cfs_waitq_init(&seq->lcs_waitq); /* Make sure that things are clear before work is started. */ seq_client_flush(seq); if (exp != NULL) seq->lcs_exp = class_export_get(exp); else if (type == LUSTRE_SEQ_METADATA) LASSERT(seq->lcs_srv != NULL); snprintf(seq->lcs_name, sizeof(seq->lcs_name), "cli-%s", prefix); rc = seq_client_proc_init(seq); if (rc) seq_client_fini(seq); RETURN(rc); }
static void lcw_dispatch_start(void) { int rc; ENTRY; LASSERT(lcw_refcount == 1); cfs_init_completion(&lcw_stop_completion); cfs_init_completion(&lcw_start_completion); cfs_waitq_init(&lcw_event_waitq); CDEBUG(D_INFO, "starting dispatch thread\n"); rc = cfs_create_thread(lcw_dispatch_main, NULL, 0); if (rc < 0) { CERROR("error spawning watchdog dispatch thread: %d\n", rc); EXIT; return; } cfs_wait_for_completion(&lcw_start_completion); CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n"); EXIT; }
static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp, const unsigned int id[], int pending[], int count, quota_acquire acquire, struct obd_trans_info *oti, int isblk, struct inode *inode, int frags) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; struct l_wait_info lwi = { 0 }; int rc = 0, cycle = 0, count_err = 1; ENTRY; if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET)) RETURN(0); if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* If the client has been evicted or if it * timed out and tried to reconnect already, * abort the request immediately */ RETURN(-ENOTCONN); CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name); pending[USRQUOTA] = pending[GRPQUOTA] = 0; /* Unfortunately, if quota master is too busy to handle the * pre-dqacq in time and quota hash on ost is used up, we * have to wait for the completion of in flight dqacq/dqrel, * in order to get enough quota for write b=12588 */ cfs_gettimeofday(&work_start); while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk, inode, frags)) & QUOTA_RET_ACQUOTA) { cfs_spin_lock(&qctxt->lqc_lock); if (!qctxt->lqc_import && oti) { cfs_spin_unlock(&qctxt->lqc_lock); LASSERT(oti && oti->oti_thread && oti->oti_thread->t_watchdog); lc_watchdog_disable(oti->oti_thread->t_watchdog); CDEBUG(D_QUOTA, "sleep for quota master\n"); l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt), &lwi); CDEBUG(D_QUOTA, "wake up when quota master is back\n"); lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); } else { cfs_spin_unlock(&qctxt->lqc_lock); } cycle++; if (isblk) OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90); /* after acquire(), we should run quota_check_common again * so that we confirm there are enough quota to finish write */ rc = acquire(obd, id, oti, isblk); /* please reference to dqacq_completion for the below */ /* a new request is finished, try again */ if (rc == QUOTA_REQ_RETURNED) { CDEBUG(D_QUOTA, "finish a quota req, try again\n"); continue; } /* it is out of quota already */ if (rc == -EDQUOT) { CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n"); break; } /* Related quota has been disabled by master, but enabled by * slave, do not try again. */ if (unlikely(rc == -ESRCH)) { CERROR("mismatched quota configuration, stop try.\n"); break; } if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* The client has been evicted or tried to * to reconnect already, abort the request */ RETURN(-ENOTCONN); /* -EBUSY and others, wait a second and try again */ if (rc < 0) { cfs_waitq_t waitq; struct l_wait_info lwi; if (oti && oti->oti_thread && oti->oti_thread->t_watchdog) lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc, count_err++); cfs_waitq_init(&waitq); lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL, NULL); l_wait_event(waitq, 0, &lwi); } if (rc < 0 || cycle % 10 == 0) { cfs_spin_lock(&last_print_lock); if (last_print == 0 || cfs_time_before((last_print + cfs_time_seconds(30)), cfs_time_current())) { last_print = cfs_time_current(); cfs_spin_unlock(&last_print_lock); CWARN("still haven't managed to acquire quota " "space from the quota master after %d " "retries (err=%d, rc=%d)\n", cycle, count_err - 1, rc); } else { cfs_spin_unlock(&last_print_lock); } } CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc, cycle); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, isblk ? LQUOTA_WAIT_FOR_CHK_BLK : LQUOTA_WAIT_FOR_CHK_INO, timediff); if (rc > 0) rc = 0; RETURN(rc); }
void cfs_init_completion(cfs_completion_t *c) { LASSERT(c != NULL); c->done = 0; cfs_waitq_init(&c->wait); }