Exemplo n.º 1
0
static void null_init_internal(void)
{
        static CFS_HLIST_HEAD(__list);

        null_sec.ps_policy = &null_policy;
        cfs_atomic_set(&null_sec.ps_refcount, 1);     /* always busy */
        null_sec.ps_id = -1;
        null_sec.ps_import = NULL;
        null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
        null_sec.ps_flvr.sf_flags = 0;
        null_sec.ps_part = LUSTRE_SP_ANY;
        null_sec.ps_dying = 0;
        cfs_spin_lock_init(&null_sec.ps_lock);
        cfs_atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
        CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
        null_sec.ps_gc_interval = 0;
        null_sec.ps_gc_next = 0;

        cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
        cfs_atomic_set(&null_cli_ctx.cc_refcount, 1);    /* for hash */
        null_cli_ctx.cc_sec = &null_sec;
        null_cli_ctx.cc_ops = &null_ctx_ops;
        null_cli_ctx.cc_expire = 0;
        null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
                                PTLRPC_CTX_UPTODATE;
        null_cli_ctx.cc_vcred.vc_uid = 0;
        cfs_spin_lock_init(&null_cli_ctx.cc_lock);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
Exemplo n.º 2
0
struct page *alloc_page(int flags)
{
	struct page *pg;
	pg = kmem_cache_alloc(cfs_page_t_slab, 0);

	if (NULL == pg) {
	cfs_enter_debugger();
	return NULL;
	}

	memset(pg, 0, sizeof(struct page));
	pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
	cfs_atomic_set(&pg->count, 1);

	if (pg->addr) {
		if (cfs_is_flag_set(flags, __GFP_ZERO))
			memset(pg->addr, 0, PAGE_CACHE_SIZE);
		cfs_atomic_inc(&libcfs_total_pages);
	} else {
		cfs_enter_debugger();
		kmem_cache_free(cfs_page_t_slab, pg);
		pg = NULL;
	}

	return pg;
}
Exemplo n.º 3
0
/*****************************************************************************
 *
 * Lov object operations.
 *
 */
int lov_object_init(const struct lu_env *env, struct lu_object *obj,
                    const struct lu_object_conf *conf)
{
        struct lov_device            *dev   = lu2lov_dev(obj->lo_dev);
        struct lov_object            *lov   = lu2lov(obj);
        const struct cl_object_conf  *cconf = lu2cl_conf(conf);
        union  lov_layout_state      *set   = &lov->u;
        const struct lov_layout_operations *ops;
        int result;

        ENTRY;
	init_rwsem(&lov->lo_type_guard);
	cfs_atomic_set(&lov->lo_active_ios, 0);
	init_waitqueue_head(&lov->lo_waitq);

	cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));

        /* no locking is necessary, as object is being created */
	lov->lo_type = lov_type(cconf->u.coc_md->lsm);
        ops = &lov_dispatch[lov->lo_type];
        result = ops->llo_init(env, dev, lov, cconf, set);
        if (result == 0)
                ops->llo_install(env, lov, set);
        RETURN(result);
}
Exemplo n.º 4
0
int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
                    int pattern, int magic)
{
        int i, lsm_size;
        ENTRY;

        CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);

        *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
        if (!*lsmp) {
                CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
                RETURN(-ENOMEM);
        }

	cfs_atomic_set(&(*lsmp)->lsm_refc, 1);
	spin_lock_init(&(*lsmp)->lsm_lock);
        (*lsmp)->lsm_magic = magic;
        (*lsmp)->lsm_stripe_count = stripe_count;
        (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
        (*lsmp)->lsm_pattern = pattern;
        (*lsmp)->lsm_pool_name[0] = '\0';
        (*lsmp)->lsm_layout_gen = 0;
        (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;

        for (i = 0; i < stripe_count; i++)
                loi_init((*lsmp)->lsm_oinfo[i]);

        RETURN(lsm_size);
}
Exemplo n.º 5
0
Arquivo: llog.c Projeto: Lezval/lustre
/*
 * Allocate a new log or catalog handle
 * Used inside llog_open().
 */
struct llog_handle *llog_alloc_handle(void)
{
	struct llog_handle *loghandle;

	OBD_ALLOC_PTR(loghandle);
	if (loghandle == NULL)
		return NULL;

	init_rwsem(&loghandle->lgh_lock);
	spin_lock_init(&loghandle->lgh_hdr_lock);
	CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
	cfs_atomic_set(&loghandle->lgh_refcount, 1);

	return loghandle;
}
Exemplo n.º 6
0
static int osc_wr_resend_count(struct file *file, const char *buffer,
                               unsigned long count, void *data)
{
        struct obd_device *obd = data;
        int val, rc;

        rc = lprocfs_write_helper(buffer, count, &val);
        if (rc)
                return rc;

        if (val < 0)
               return -EINVAL;

        cfs_atomic_set(&obd->u.cli.cl_resends, val);

        return count;
}
Exemplo n.º 7
0
struct page *virt_to_page(void *addr)
{
	struct page *pg;
	pg = kmem_cache_alloc(cfs_page_t_slab, 0);

	if (NULL == pg) {
		cfs_enter_debugger();
		return NULL;
	}

	memset(pg, 0, sizeof(struct page));
	pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
	pg->mapping = addr;
	cfs_atomic_set(&pg->count, 1);
	set_bit(PG_virt, &(pg->flags));
	cfs_enter_debugger();
	return pg;
}
Exemplo n.º 8
0
struct ptlrpc_connection *
ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
                      struct obd_uuid *uuid)
{
    struct ptlrpc_connection *conn, *conn2;
    ENTRY;

    conn = cfs_hash_lookup(conn_hash, &peer);
    if (conn)
        GOTO(out, conn);

    OBD_ALLOC_PTR(conn);
    if (!conn)
        RETURN(NULL);

    conn->c_peer = peer;
    conn->c_self = self;
    CFS_INIT_HLIST_NODE(&conn->c_hash);
    cfs_atomic_set(&conn->c_refcount, 1);
    if (uuid)
        obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);

    /*
     * Add the newly created conn to the hash, on key collision we
     * lost a racing addition and must destroy our newly allocated
     * connection.  The object which exists in the has will be
     * returned and may be compared against out object.
     */
    conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
    if (conn != conn2) {
        OBD_FREE_PTR(conn);
        conn = conn2;
    }
    EXIT;
out:
    CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
           conn, cfs_atomic_read(&conn->c_refcount),
           libcfs_nid2str(conn->c_peer.nid));
    return conn;
}
Exemplo n.º 9
0
int __obd_fail_check_set(__u32 id, __u32 value, int set)
{
        static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);

        LASSERT(!(id & OBD_FAIL_ONCE));

        if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
            (OBD_FAILED | OBD_FAIL_ONCE)) {
                cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
                return 0;
        }

        /* Fail 1/obd_fail_val times */
        if (obd_fail_loc & OBD_FAIL_RAND) {
                if (obd_fail_val < 2 || cfs_rand() % obd_fail_val > 0)
                        return 0;
        }

        /* Skip the first obd_fail_val, then fail */
        if (obd_fail_loc & OBD_FAIL_SKIP) {
                if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
                        return 0;
        }

        /* Fail obd_fail_val times, overridden by FAIL_ONCE */
        if (obd_fail_loc & OBD_FAIL_SOME &&
            (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
                int count = cfs_atomic_inc_return(&obd_fail_count);

                if (count >= obd_fail_val) {
                        cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
                        cfs_atomic_set(&obd_fail_count, 0);
                        /* we are lost race to increase obd_fail_count */
                        if (count > obd_fail_val)
                                return 0;
                }
        }

        if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
            (value & OBD_FAIL_ONCE))
                cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);

        /* Lost race to set OBD_FAILED_BIT. */
        if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
                /* If OBD_FAIL_ONCE is valid, only one process can fail,
                 * otherwise multi-process can fail at the same time. */
                if (obd_fail_loc & OBD_FAIL_ONCE)
                        return 0;
        }

        switch (set) {
                case OBD_FAIL_LOC_NOSET:
                        break;
                case OBD_FAIL_LOC_ORSET:
                        obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
                        break;
                case OBD_FAIL_LOC_RESET:
                        obd_fail_loc = value;
                        break;
                default:
                        LASSERTF(0, "called with bad set %u\n", set);
                        break;
        }

        return 1;
}
Exemplo n.º 10
0
struct lustre_qunit_size *quota_search_lqs(unsigned long long lqs_key,
                                           struct lustre_quota_ctxt *qctxt,
                                           int create)
{
        struct lustre_qunit_size *lqs;
        struct lustre_qunit_size *lqs2;
        cfs_hash_t *hs = NULL;
        int rc = 0;

        cfs_spin_lock(&qctxt->lqc_lock);
        if (qctxt->lqc_valid) {
                LASSERT(qctxt->lqc_lqs_hash != NULL);
                hs = cfs_hash_getref(qctxt->lqc_lqs_hash);
        }
        cfs_spin_unlock(&qctxt->lqc_lock);

        if (hs == NULL) {
                rc = -EBUSY;
                goto out;
        }

        /* cfs_hash_lookup will +1 refcount for caller */
        lqs = cfs_hash_lookup(qctxt->lqc_lqs_hash, &lqs_key);
        if (lqs != NULL) /* found */
                goto out_put;

        if (!create)
                goto out_put;

        OBD_ALLOC_PTR(lqs);
        if (!lqs) {
                rc = -ENOMEM;
                goto out_put;
        }

        lqs->lqs_key = lqs_key;

        cfs_spin_lock_init(&lqs->lqs_lock);

        lqs->lqs_bwrite_pending = 0;
        lqs->lqs_iwrite_pending = 0;
        lqs->lqs_ino_rec = 0;
        lqs->lqs_blk_rec = 0;
        lqs->lqs_id = LQS_KEY_ID(lqs->lqs_key);
        lqs->lqs_flags = LQS_KEY_GRP(lqs->lqs_key) ? LQUOTA_FLAGS_GRP : 0;
        lqs->lqs_bunit_sz = qctxt->lqc_bunit_sz;
        lqs->lqs_iunit_sz = qctxt->lqc_iunit_sz;
        lqs->lqs_btune_sz = qctxt->lqc_btune_sz;
        lqs->lqs_itune_sz = qctxt->lqc_itune_sz;
        if (qctxt->lqc_handler) {
                lqs->lqs_last_bshrink  = 0;
                lqs->lqs_last_ishrink  = 0;
        }

        lqs->lqs_ctxt = qctxt; /* must be called before lqs_initref */
        cfs_atomic_set(&lqs->lqs_refcount, 1); /* 1 for caller */
        cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);

        /* lqc_lqs_hash will take +1 refcount on lqs on adding */
        lqs2 = cfs_hash_findadd_unique(qctxt->lqc_lqs_hash,
                                       &lqs->lqs_key, &lqs->lqs_hash);
        if (lqs2 == lqs) /* added to hash */
                goto out_put;

        create = 0;
        lqs_putref(lqs);
        lqs = lqs2;

 out_put:
        cfs_hash_putref(hs);
 out:
        if (rc != 0) { /* error */
                CERROR("get lqs error(rc: %d)\n", rc);
                return ERR_PTR(rc);
        }

        if (lqs != NULL) {
                LQS_DEBUG(lqs, "%s\n",
                          (create == 1 ? "create lqs" : "search lqs"));
        }
        return lqs;
}
Exemplo n.º 11
0
int __cfs_fail_check_set(__u32 id, __u32 value, int set)
{
        static cfs_atomic_t cfs_fail_count = CFS_ATOMIC_INIT(0);

        LASSERT(!(id & CFS_FAIL_ONCE));

        if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
            (CFS_FAILED | CFS_FAIL_ONCE)) {
                cfs_atomic_set(&cfs_fail_count, 0); /* paranoia */
                return 0;
        }

        /* Fail 1/cfs_fail_val times */
        if (cfs_fail_loc & CFS_FAIL_RAND) {
                if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
                        return 0;
        }

        /* Skip the first cfs_fail_val, then fail */
        if (cfs_fail_loc & CFS_FAIL_SKIP) {
                if (cfs_atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
                        return 0;
        }

        /* check cfs_fail_val... */
        if (set == CFS_FAIL_LOC_VALUE) {
                if (cfs_fail_val != -1 && cfs_fail_val != value)
                        return 0;
        }

        /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
        if (cfs_fail_loc & CFS_FAIL_SOME &&
            (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
                int count = cfs_atomic_inc_return(&cfs_fail_count);

                if (count >= cfs_fail_val) {
			set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
                        cfs_atomic_set(&cfs_fail_count, 0);
                        /* we are lost race to increase  */
                        if (count > cfs_fail_val)
                                return 0;
                }
        }

        if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
            (value & CFS_FAIL_ONCE))
		set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
        /* Lost race to set CFS_FAILED_BIT. */
	if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
                /* If CFS_FAIL_ONCE is valid, only one process can fail,
                 * otherwise multi-process can fail at the same time. */
                if (cfs_fail_loc & CFS_FAIL_ONCE)
                        return 0;
        }

        switch (set) {
                case CFS_FAIL_LOC_NOSET:
                case CFS_FAIL_LOC_VALUE:
                        break;
                case CFS_FAIL_LOC_ORSET:
                        cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
                        break;
                case CFS_FAIL_LOC_RESET:
                        cfs_fail_loc = value;
                        break;
                default:
                        LASSERTF(0, "called with bad set %u\n", set);
                        break;
        }

        return 1;
}
Exemplo n.º 12
0
/*
 * Allocate and initialize a qsd_qtype_info structure for quota type \qtype.
 * This opens the accounting object and initializes the proc file.
 * It's called on OSD start when the qsd_prepare() is invoked on the qsd
 * instance.
 *
 * \param env  - the environment passed by the caller
 * \param qsd  - is the qsd instance which will be in charge of the new
 *               qsd_qtype_info instance.
 * \param qtype - is quota type to set up
 *
 * \retval - 0 on success and qsd->qsd_type_array[qtype] is allocated,
 *           appropriate error on failure
 */
static int qsd_qtype_init(const struct lu_env *env, struct qsd_instance *qsd,
			  int qtype)
{
	struct qsd_qtype_info	*qqi;
	int			 rc;
	struct obd_uuid		 uuid;
	ENTRY;

	LASSERT(qsd->qsd_type_array[qtype] == NULL);

	/* allocate structure for this quota type */
	OBD_ALLOC_PTR(qqi);
	if (qqi == NULL)
		RETURN(-ENOMEM);
	qsd->qsd_type_array[qtype] = qqi;
	cfs_atomic_set(&qqi->qqi_ref, 1); /* referenced from qsd */

	/* set backpointer and other parameters */
	qqi->qqi_qsd   = qsd;
	qqi->qqi_qtype = qtype;
	lu_ref_init(&qqi->qqi_reference);
	lquota_generate_fid(&qqi->qqi_fid, qsd->qsd_pool_id, QSD_RES_TYPE(qsd),
			    qtype);
	qqi->qqi_glb_uptodate = false;
	qqi->qqi_slv_uptodate = false;
	qqi->qqi_reint        = false;
	init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
	thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
	CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
	CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);

	/* open accounting object */
	LASSERT(qqi->qqi_acct_obj == NULL);
	qqi->qqi_acct_obj = acct_obj_lookup(env, qsd->qsd_dev, qtype);
	if (IS_ERR(qqi->qqi_acct_obj)) {
		CDEBUG(D_QUOTA, "%s: no %s space accounting support rc:%ld\n",
		       qsd->qsd_svname, QTYPE_NAME(qtype),
		       PTR_ERR(qqi->qqi_acct_obj));
		qqi->qqi_acct_obj = NULL;
		qsd->qsd_acct_failed = true;
	}

	/* open global index copy */
	LASSERT(qqi->qqi_glb_obj == NULL);
	qqi->qqi_glb_obj = lquota_disk_glb_find_create(env, qsd->qsd_dev,
						       qsd->qsd_root,
						       &qqi->qqi_fid, true);
	if (IS_ERR(qqi->qqi_glb_obj)) {
		CERROR("%s: can't open global index copy "DFID" %ld\n",
		       qsd->qsd_svname, PFID(&qqi->qqi_fid),
		       PTR_ERR(qqi->qqi_glb_obj));
		GOTO(out, rc = PTR_ERR(qqi->qqi_glb_obj));
	}
	qqi->qqi_glb_ver = dt_version_get(env, qqi->qqi_glb_obj);

	/* open slave index copy */
	LASSERT(qqi->qqi_slv_obj == NULL);
	obd_str2uuid(&uuid, qsd->qsd_svname);
	qqi->qqi_slv_obj = lquota_disk_slv_find_create(env, qsd->qsd_dev,
						       qsd->qsd_root,
						       &qqi->qqi_fid, &uuid,
						       true);
	if (IS_ERR(qqi->qqi_slv_obj)) {
		CERROR("%s: can't open slave index copy "DFID" %ld\n",
		       qsd->qsd_svname, PFID(&qqi->qqi_fid),
		       PTR_ERR(qqi->qqi_slv_obj));
		GOTO(out, rc = PTR_ERR(qqi->qqi_slv_obj));
	}
	qqi->qqi_slv_ver = dt_version_get(env, qqi->qqi_slv_obj);

	/* allocate site */
	qqi->qqi_site = lquota_site_alloc(env, qqi, false, qtype, &qsd_lqe_ops);
	if (IS_ERR(qqi->qqi_site)) {
		CERROR("%s: can't allocate site "DFID" %ld\n", qsd->qsd_svname,
		       PFID(&qqi->qqi_fid), PTR_ERR(qqi->qqi_site));
		GOTO(out, rc = PTR_ERR(qqi->qqi_site));
	}

	/* register proc entry for accounting & global index copy objects */
	rc = lprocfs_seq_create(qsd->qsd_proc,
				qtype == USRQUOTA ? "acct_user" : "acct_group",
				0444, &lprocfs_quota_seq_fops,
				qqi->qqi_acct_obj);
	if (rc) {
		CERROR("%s: can't add procfs entry for accounting file %d\n",
		       qsd->qsd_svname, rc);
		GOTO(out, rc);
	}

	rc = lprocfs_seq_create(qsd->qsd_proc,
				qtype == USRQUOTA ? "limit_user" : "limit_group",
				0444, &lprocfs_quota_seq_fops,
				qqi->qqi_glb_obj);
	if (rc) {
		CERROR("%s: can't add procfs entry for global index copy %d\n",
		       qsd->qsd_svname, rc);
		GOTO(out, rc);
	}
	EXIT;
out:
	if (rc)
		qsd_qtype_fini(env, qsd, qtype);
	return rc;
}