Beispiel #1
0
static void null_init_internal(void)
{
        static CFS_HLIST_HEAD(__list);

        null_sec.ps_policy = &null_policy;
        cfs_atomic_set(&null_sec.ps_refcount, 1);     /* always busy */
        null_sec.ps_id = -1;
        null_sec.ps_import = NULL;
        null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
        null_sec.ps_flvr.sf_flags = 0;
        null_sec.ps_part = LUSTRE_SP_ANY;
        null_sec.ps_dying = 0;
        cfs_spin_lock_init(&null_sec.ps_lock);
        cfs_atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
        CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
        null_sec.ps_gc_interval = 0;
        null_sec.ps_gc_next = 0;

        cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
        cfs_atomic_set(&null_cli_ctx.cc_refcount, 1);    /* for hash */
        null_cli_ctx.cc_sec = &null_sec;
        null_cli_ctx.cc_ops = &null_ctx_ops;
        null_cli_ctx.cc_expire = 0;
        null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
                                PTLRPC_CTX_UPTODATE;
        null_cli_ctx.cc_vcred.vc_uid = 0;
        cfs_spin_lock_init(&null_cli_ctx.cc_lock);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
Beispiel #2
0
static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
{
        int pgcount = tcd->tcd_cur_pages / 10;
        struct page_collection pc;
        struct cfs_trace_page *tage;
        struct cfs_trace_page *tmp;

        /*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
         * from here: this will lead to infinite recursion.
         */

        if (printk_ratelimit())
                printk(CFS_KERN_WARNING "debug daemon buffer overflowed; "
                       "discarding 10%% of pages (%d of %ld)\n",
                       pgcount + 1, tcd->tcd_cur_pages);

        CFS_INIT_LIST_HEAD(&pc.pc_pages);
        cfs_spin_lock_init(&pc.pc_lock);

        cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
                                           struct cfs_trace_page, linkage) {
                if (pgcount-- == 0)
                        break;

                cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
                tcd->tcd_cur_pages--;
        }
        put_pages_on_tcd_daemon_list(&pc, tcd);
}
Beispiel #3
0
int lov_alloc_memmd(struct lov_stripe_md **lsmp, int stripe_count,
                    int pattern, int magic)
{
    int i, lsm_size;
    ENTRY;

    CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);

    *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
    if (!*lsmp) {
        CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
        RETURN(-ENOMEM);
    }

    cfs_spin_lock_init(&(*lsmp)->lsm_lock);
    (*lsmp)->lsm_magic = magic;
    (*lsmp)->lsm_stripe_count = stripe_count;
    (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
    (*lsmp)->lsm_pattern = pattern;
    (*lsmp)->lsm_pool_name[0] = '\0';
    (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;

    for (i = 0; i < stripe_count; i++)
        loi_init((*lsmp)->lsm_oinfo[i]);

    RETURN(lsm_size);
}
Beispiel #4
0
int
cfs_wi_startup (void)
{
        int i;
        int n;
        int rc;

        cfs_wi_data.wi_nthreads = 0;
        cfs_wi_data.wi_nsched   = CFS_WI_NSCHED;
        LIBCFS_ALLOC(cfs_wi_data.wi_scheds,
                     cfs_wi_data.wi_nsched * sizeof(cfs_wi_sched_t));
        if (cfs_wi_data.wi_scheds == NULL)
                return -ENOMEM;

        cfs_spin_lock_init(&cfs_wi_data.wi_glock);
        for (i = 0; i < cfs_wi_data.wi_nsched; i++)
                cfs_wi_sched_init(&cfs_wi_data.wi_scheds[i]);

#ifdef __KERNEL__
        n = cfs_num_online_cpus();
        for (i = 0; i <= n; i++) {
                rc = cfs_wi_start_thread(cfs_wi_scheduler,
                                         (void *)(long_ptr_t)(i == n ? -1 : i));
                if (rc != 0) {
                        CERROR ("Can't spawn workitem scheduler: %d\n", rc);
                        cfs_wi_shutdown();
                        return rc;
                }
        }
#else
        n = rc = 0;
#endif

        return 0;
}
Beispiel #5
0
int gss_init_lproc(void)
{
        int     rc;

        cfs_spin_lock_init(&gss_stat_oos.oos_lock);

        gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
                                         gss_lprocfs_vars, NULL);
        if (IS_ERR(gss_proc_root)) {
                gss_proc_root = NULL;
                GOTO(err_out, rc = PTR_ERR(gss_proc_root));
        }

        gss_proc_lk = lprocfs_register("lgss_keyring", gss_proc_root,
                                       gss_lk_lprocfs_vars, NULL);
        if (IS_ERR(gss_proc_lk)) {
                gss_proc_lk = NULL;
                GOTO(err_out, rc = PTR_ERR(gss_proc_root));
        }

        return 0;

err_out:
        CERROR("failed to initialize gss lproc entries: %d\n", rc);
        gss_exit_lproc();
        return rc;
}
Beispiel #6
0
static struct lustre_qunit_size *
quota_create_lqs(unsigned long long lqs_key, struct lustre_quota_ctxt *qctxt)
{
        struct lustre_qunit_size *lqs = NULL;
        cfs_hash_t *hs = NULL;
        int rc = 0;

        OBD_ALLOC_PTR(lqs);
        if (!lqs)
                GOTO(out, rc = -ENOMEM);

        lqs->lqs_key = lqs_key;

        cfs_spin_lock_init(&lqs->lqs_lock);
        lqs->lqs_bwrite_pending = 0;
        lqs->lqs_iwrite_pending = 0;
        lqs->lqs_ino_rec = 0;
        lqs->lqs_blk_rec = 0;
        lqs->lqs_id = LQS_KEY_ID(lqs->lqs_key);
        lqs->lqs_flags = LQS_KEY_GRP(lqs->lqs_key) ? LQUOTA_FLAGS_GRP : 0;
        lqs->lqs_bunit_sz = qctxt->lqc_bunit_sz;
        lqs->lqs_iunit_sz = qctxt->lqc_iunit_sz;
        lqs->lqs_btune_sz = qctxt->lqc_btune_sz;
        lqs->lqs_itune_sz = qctxt->lqc_itune_sz;
        lqs->lqs_ctxt = qctxt;
        if (qctxt->lqc_handler) {
                lqs->lqs_last_bshrink  = 0;
                lqs->lqs_last_ishrink  = 0;
        }
        lqs_initref(lqs);

        cfs_spin_lock(&qctxt->lqc_lock);
        if (qctxt->lqc_valid)
                hs = cfs_hash_getref(qctxt->lqc_lqs_hash);
        cfs_spin_unlock(&qctxt->lqc_lock);

        if (hs) {
                lqs_getref(lqs);
                rc = cfs_hash_add_unique(qctxt->lqc_lqs_hash,
                                         &lqs->lqs_key, &lqs->lqs_hash);
                if (rc)
                        lqs_putref(lqs);
                cfs_hash_putref(hs);
        } else {
                rc = -EBUSY;
        }

 out:
        if (rc && lqs)
                OBD_FREE_PTR(lqs);

        if (rc)
                return ERR_PTR(rc);
        else
                return lqs;
}
Beispiel #7
0
static void
cfs_wi_sched_init(cfs_wi_sched_t *sched)
{
        sched->ws_shuttingdown = 0;
#ifdef __KERNEL__
        cfs_spin_lock_init(&sched->ws_lock);
        cfs_waitq_init(&sched->ws_waitq);
#endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
        CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
}
Beispiel #8
0
int sptlrpc_enc_pool_init(void)
{
        /*
         * maximum capacity is 1/8 of total physical memory.
         * is the 1/8 a good number?
         */
        page_pools.epp_max_pages = cfs_num_physpages / 8;
        page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);

        cfs_waitq_init(&page_pools.epp_waitq);
        page_pools.epp_waitqlen = 0;
        page_pools.epp_pages_short = 0;

        page_pools.epp_growing = 0;

        page_pools.epp_idle_idx = 0;
        page_pools.epp_last_shrink = cfs_time_current_sec();
        page_pools.epp_last_access = cfs_time_current_sec();

        cfs_spin_lock_init(&page_pools.epp_lock);
        page_pools.epp_total_pages = 0;
        page_pools.epp_free_pages = 0;

        page_pools.epp_st_max_pages = 0;
        page_pools.epp_st_grows = 0;
        page_pools.epp_st_grow_fails = 0;
        page_pools.epp_st_shrinks = 0;
        page_pools.epp_st_access = 0;
        page_pools.epp_st_missings = 0;
        page_pools.epp_st_lowfree = 0;
        page_pools.epp_st_max_wqlen = 0;
        page_pools.epp_st_max_wait = 0;

        enc_pools_alloc();
        if (page_pools.epp_pools == NULL)
                return -ENOMEM;

        pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks,
                                          enc_pools_shrink);
        if (pools_shrinker == NULL) {
                enc_pools_free();
                return -ENOMEM;
        }

        return 0;
}
Beispiel #9
0
static inline int mgs_init_export(struct obd_export *exp)
{
        struct mgs_export_data *data = &exp->u.eu_mgs_data;

        /* init mgs_export_data for fsc */
        cfs_spin_lock_init(&data->med_lock);
        CFS_INIT_LIST_HEAD(&data->med_clients);

        cfs_spin_lock(&exp->exp_lock);
        exp->exp_connecting = 1;
        cfs_spin_unlock(&exp->exp_lock);

        /* self-export doesn't need client data and ldlm initialization */
        if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
                                     &exp->exp_client_uuid)))
                return 0;
        return ldlm_init_export(exp);
}
Beispiel #10
0
struct lc_watchdog *lc_watchdog_add(int timeout,
                                    void (*callback)(pid_t, void *),
                                    void *data)
{
        struct lc_watchdog *lcw = NULL;
        ENTRY;

        LIBCFS_ALLOC(lcw, sizeof(*lcw));
        if (lcw == NULL) {
                CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
                RETURN(ERR_PTR(-ENOMEM));
        }

        cfs_spin_lock_init(&lcw->lcw_lock);
        lcw->lcw_refcount = 1; /* refcount for owner */
        lcw->lcw_task     = cfs_current();
        lcw->lcw_pid      = cfs_curproc_pid();
        lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
        lcw->lcw_data     = data;
        lcw->lcw_state    = LC_WATCHDOG_DISABLED;

        CFS_INIT_LIST_HEAD(&lcw->lcw_list);
        cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);

        cfs_down(&lcw_refcount_sem);
        if (++lcw_refcount == 1)
                lcw_dispatch_start();
        cfs_up(&lcw_refcount_sem);

        /* Keep this working in case we enable them by default */
        if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
                lcw->lcw_last_touched = cfs_time_current();
                cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
                              cfs_time_current());
        }

        RETURN(lcw);
}
Beispiel #11
0
struct lustre_qunit_size *quota_search_lqs(unsigned long long lqs_key,
                                           struct lustre_quota_ctxt *qctxt,
                                           int create)
{
        struct lustre_qunit_size *lqs;
        struct lustre_qunit_size *lqs2;
        cfs_hash_t *hs = NULL;
        int rc = 0;

        cfs_spin_lock(&qctxt->lqc_lock);
        if (qctxt->lqc_valid) {
                LASSERT(qctxt->lqc_lqs_hash != NULL);
                hs = cfs_hash_getref(qctxt->lqc_lqs_hash);
        }
        cfs_spin_unlock(&qctxt->lqc_lock);

        if (hs == NULL) {
                rc = -EBUSY;
                goto out;
        }

        /* cfs_hash_lookup will +1 refcount for caller */
        lqs = cfs_hash_lookup(qctxt->lqc_lqs_hash, &lqs_key);
        if (lqs != NULL) /* found */
                goto out_put;

        if (!create)
                goto out_put;

        OBD_ALLOC_PTR(lqs);
        if (!lqs) {
                rc = -ENOMEM;
                goto out_put;
        }

        lqs->lqs_key = lqs_key;

        cfs_spin_lock_init(&lqs->lqs_lock);

        lqs->lqs_bwrite_pending = 0;
        lqs->lqs_iwrite_pending = 0;
        lqs->lqs_ino_rec = 0;
        lqs->lqs_blk_rec = 0;
        lqs->lqs_id = LQS_KEY_ID(lqs->lqs_key);
        lqs->lqs_flags = LQS_KEY_GRP(lqs->lqs_key) ? LQUOTA_FLAGS_GRP : 0;
        lqs->lqs_bunit_sz = qctxt->lqc_bunit_sz;
        lqs->lqs_iunit_sz = qctxt->lqc_iunit_sz;
        lqs->lqs_btune_sz = qctxt->lqc_btune_sz;
        lqs->lqs_itune_sz = qctxt->lqc_itune_sz;
        if (qctxt->lqc_handler) {
                lqs->lqs_last_bshrink  = 0;
                lqs->lqs_last_ishrink  = 0;
        }

        lqs->lqs_ctxt = qctxt; /* must be called before lqs_initref */
        cfs_atomic_set(&lqs->lqs_refcount, 1); /* 1 for caller */
        cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);

        /* lqc_lqs_hash will take +1 refcount on lqs on adding */
        lqs2 = cfs_hash_findadd_unique(qctxt->lqc_lqs_hash,
                                       &lqs->lqs_key, &lqs->lqs_hash);
        if (lqs2 == lqs) /* added to hash */
                goto out_put;

        create = 0;
        lqs_putref(lqs);
        lqs = lqs2;

 out_put:
        cfs_hash_putref(hs);
 out:
        if (rc != 0) { /* error */
                CERROR("get lqs error(rc: %d)\n", rc);
                return ERR_PTR(rc);
        }

        if (lqs != NULL) {
                LQS_DEBUG(lqs, "%s\n",
                          (create == 1 ? "create lqs" : "search lqs"));
        }
        return lqs;
}
Beispiel #12
0
void libcfs_init_nidstrings (void)
{
    cfs_spin_lock_init(&libcfs_nidstring_lock);
}
Beispiel #13
0
__init int ptlrpc_init(void)
{
        int rc, cleanup_phase = 0;
        ENTRY;

        lustre_assert_wire_constants();
#if RS_DEBUG
        cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
        cfs_spin_lock_init(&ptlrpc_all_services_lock);
        cfs_mutex_init(&pinger_mutex);
        cfs_mutex_init(&ptlrpcd_mutex);
        ptlrpc_init_xid();

        rc = req_layout_init();
        if (rc)
                RETURN(rc);

        rc = ptlrpc_hr_init();
        if (rc)
                RETURN(rc);

        cleanup_phase = 1;

        rc = ptlrpc_init_portals();
        if (rc)
                GOTO(cleanup, rc);
        cleanup_phase = 2;

        rc = ptlrpc_connection_init();
        if (rc)
                GOTO(cleanup, rc);
        cleanup_phase = 3;

        ptlrpc_put_connection_superhack = ptlrpc_connection_put;

        rc = ptlrpc_start_pinger();
        if (rc)
                GOTO(cleanup, rc);
        cleanup_phase = 4;

        rc = ldlm_init();
        if (rc)
                GOTO(cleanup, rc);
        cleanup_phase = 5;

        rc = sptlrpc_init();
        if (rc)
                GOTO(cleanup, rc);

        cleanup_phase = 6;
        rc = llog_recov_init();
        if (rc)
                GOTO(cleanup, rc);

        RETURN(0);

cleanup:
        switch(cleanup_phase) {
        case 6:
                sptlrpc_fini();
        case 5:
                ldlm_exit();
        case 4:
                ptlrpc_stop_pinger();
        case 3:
                ptlrpc_connection_fini();
        case 2:
                ptlrpc_exit_portals();
        case 1:
                ptlrpc_hr_fini();
                req_layout_fini();
        default: ;
        }

        return rc;
}