Exemple #1
0
void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
                                       long amount)
{
        struct lprocfs_counter *percpu_cntr;
        int smp_id;

        if (stats == NULL)
                return;

        /* With per-client stats, statistics are allocated only for
         * single CPU area, so the smp_id should be 0 always. */
        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);

        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
                /*
                 * currently lprocfs_count_add() can only be called in thread
                 * context; sometimes we use RCU callbacks to free memory
                 * which calls lprocfs_counter_sub(), and RCU callbacks may
                 * execute in softirq context - right now that's the only case
                 * we're in softirq context here, use separate counter for that.
                 * bz20650.
                 */
                if (cfs_in_interrupt())
                        percpu_cntr->lc_sum_irq -= amount;
                else
                        percpu_cntr->lc_sum -= amount;
        }
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
        lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID);
}
Exemple #2
0
void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
                                       long amount)
{
        struct lprocfs_counter *percpu_cntr;
        int smp_id;

        if (stats == NULL)
                return;

        /* With per-client stats, statistics are allocated only for
         * single CPU area, so the smp_id should be 0 always. */
        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);

        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
        percpu_cntr->lc_count++;

        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
                /* see comment in lprocfs_counter_sub */
                LASSERT(!cfs_in_interrupt());

                percpu_cntr->lc_sum += amount;
                if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
                        percpu_cntr->lc_sumsquare += (__s64)amount * amount;
                if (amount < percpu_cntr->lc_min)
                        percpu_cntr->lc_min = amount;
                if (amount > percpu_cntr->lc_max)
                        percpu_cntr->lc_max = amount;
        }
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
        lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID);
}
Exemple #3
0
void gss_stat_oos_record_svc(int phase, int replay)
{
        LASSERT(phase >= 0 && phase <= 2);

        if (replay)
                cfs_atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
        else
                cfs_atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
}
Exemple #4
0
static
int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
{
        struct ptlrpc_reply_state *rs;
        int rs_size = sizeof(*rs) + msgsize;

        LASSERT(msgsize % 8 == 0);

        rs = req->rq_reply_state;

        if (rs) {
                /* pre-allocated */
                LASSERT(rs->rs_size >= rs_size);
        } else {
                OBD_ALLOC(rs, rs_size);
                if (rs == NULL)
                        return -ENOMEM;

                rs->rs_size = rs_size;
        }

        rs->rs_svc_ctx = req->rq_svc_ctx;
        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);

        rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
        rs->rs_repbuf_len = rs_size - sizeof(*rs);
        rs->rs_msg = rs->rs_repbuf;

        req->rq_reply_state = rs;
        return 0;
}
struct page *alloc_page(int flags)
{
	struct page *pg;
	pg = kmem_cache_alloc(cfs_page_t_slab, 0);

	if (NULL == pg) {
	cfs_enter_debugger();
	return NULL;
	}

	memset(pg, 0, sizeof(struct page));
	pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
	cfs_atomic_set(&pg->count, 1);

	if (pg->addr) {
		if (cfs_is_flag_set(flags, __GFP_ZERO))
			memset(pg->addr, 0, PAGE_CACHE_SIZE);
		cfs_atomic_inc(&libcfs_total_pages);
	} else {
		cfs_enter_debugger();
		kmem_cache_free(cfs_page_t_slab, pg);
		pg = NULL;
	}

	return pg;
}
Exemple #6
0
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
        cfs_page_t            *page;
        struct cfs_trace_page *tage;

        /* My caller is trying to free memory */
        if (!cfs_in_interrupt() && cfs_memory_pressure_get())
                return NULL;

        /*
         * Don't spam console with allocation failures: they will be reported
         * by upper layer anyway.
         */
        gfp |= CFS_ALLOC_NOWARN;
        page = cfs_alloc_page(gfp);
        if (page == NULL)
                return NULL;

        tage = cfs_alloc(sizeof(*tage), gfp);
        if (tage == NULL) {
                cfs_free_page(page);
                return NULL;
        }

        tage->page = page;
        cfs_atomic_inc(&cfs_tage_allocated);
        return tage;
}
Exemple #7
0
static
void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
{
	set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
        cfs_atomic_inc(&ctx->cc_refcount);
        cfs_hlist_add_head(&ctx->cc_cache, hash);
}
Exemple #8
0
static void
conn_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
    struct ptlrpc_connection *conn;

    conn = cfs_hlist_entry(hnode, struct ptlrpc_connection, c_hash);
    cfs_atomic_inc(&conn->c_refcount);
}
Exemple #9
0
static
struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
                                       struct vfs_cred *vcred,
                                       int create, int remove_dead)
{
        cfs_atomic_inc(&null_cli_ctx.cc_refcount);
        return &null_cli_ctx;
}
Exemple #10
0
void gss_stat_oos_record_cli(int behind)
{
        cfs_atomic_inc(&gss_stat_oos.oos_cli_count);

        cfs_spin_lock(&gss_stat_oos.oos_lock);
        if (behind > gss_stat_oos.oos_cli_behind)
                gss_stat_oos.oos_cli_behind = behind;
        cfs_spin_unlock(&gss_stat_oos.oos_lock);
}
Exemple #11
0
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
{
    ENTRY;

    cfs_atomic_inc(&conn->c_refcount);
    CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
           conn, cfs_atomic_read(&conn->c_refcount),
           libcfs_nid2str(conn->c_peer.nid));

    RETURN(conn);
}
Exemple #12
0
static
int null_accept(struct ptlrpc_request *req)
{
        LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
                SPTLRPC_POLICY_NULL);

        if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
                CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
                return SECSVC_DROP;
        }

        req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);

        req->rq_reqmsg = req->rq_reqbuf;
        req->rq_reqlen = req->rq_reqdata_len;

        req->rq_svc_ctx = &null_svc_ctx;
        cfs_atomic_inc(&req->rq_svc_ctx->sc_refcount);

        return SECSVC_OK;
}
struct lustre_qunit_size *quota_search_lqs(unsigned long long lqs_key,
                                           struct lustre_quota_ctxt *qctxt,
                                           int create)
{
        struct lustre_qunit_size *lqs;
        struct lustre_qunit_size *lqs2;
        cfs_hash_t *hs = NULL;
        int rc = 0;

        cfs_spin_lock(&qctxt->lqc_lock);
        if (qctxt->lqc_valid) {
                LASSERT(qctxt->lqc_lqs_hash != NULL);
                hs = cfs_hash_getref(qctxt->lqc_lqs_hash);
        }
        cfs_spin_unlock(&qctxt->lqc_lock);

        if (hs == NULL) {
                rc = -EBUSY;
                goto out;
        }

        /* cfs_hash_lookup will +1 refcount for caller */
        lqs = cfs_hash_lookup(qctxt->lqc_lqs_hash, &lqs_key);
        if (lqs != NULL) /* found */
                goto out_put;

        if (!create)
                goto out_put;

        OBD_ALLOC_PTR(lqs);
        if (!lqs) {
                rc = -ENOMEM;
                goto out_put;
        }

        lqs->lqs_key = lqs_key;

        cfs_spin_lock_init(&lqs->lqs_lock);

        lqs->lqs_bwrite_pending = 0;
        lqs->lqs_iwrite_pending = 0;
        lqs->lqs_ino_rec = 0;
        lqs->lqs_blk_rec = 0;
        lqs->lqs_id = LQS_KEY_ID(lqs->lqs_key);
        lqs->lqs_flags = LQS_KEY_GRP(lqs->lqs_key) ? LQUOTA_FLAGS_GRP : 0;
        lqs->lqs_bunit_sz = qctxt->lqc_bunit_sz;
        lqs->lqs_iunit_sz = qctxt->lqc_iunit_sz;
        lqs->lqs_btune_sz = qctxt->lqc_btune_sz;
        lqs->lqs_itune_sz = qctxt->lqc_itune_sz;
        if (qctxt->lqc_handler) {
                lqs->lqs_last_bshrink  = 0;
                lqs->lqs_last_ishrink  = 0;
        }

        lqs->lqs_ctxt = qctxt; /* must be called before lqs_initref */
        cfs_atomic_set(&lqs->lqs_refcount, 1); /* 1 for caller */
        cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);

        /* lqc_lqs_hash will take +1 refcount on lqs on adding */
        lqs2 = cfs_hash_findadd_unique(qctxt->lqc_lqs_hash,
                                       &lqs->lqs_key, &lqs->lqs_hash);
        if (lqs2 == lqs) /* added to hash */
                goto out_put;

        create = 0;
        lqs_putref(lqs);
        lqs = lqs2;

 out_put:
        cfs_hash_putref(hs);
 out:
        if (rc != 0) { /* error */
                CERROR("get lqs error(rc: %d)\n", rc);
                return ERR_PTR(rc);
        }

        if (lqs != NULL) {
                LQS_DEBUG(lqs, "%s\n",
                          (create == 1 ? "create lqs" : "search lqs"));
        }
        return lqs;
}
Exemple #14
0
static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj,
				loff_t off, ssize_t len, struct niobuf_local *lnb)
{
	struct osd_device *osd = osd_obj2dev(obj);
	int                plen, off_in_block, sz_in_block;
	int                i = 0, npages = 0;
	arc_buf_t         *abuf;
	uint32_t           bs;
	uint64_t           dummy;
	ENTRY;

	dmu_object_size_from_db(obj->oo_db, &bs, &dummy);

	/*
	 * currently only full blocks are subject to zerocopy approach:
	 * so that we're sure nobody is trying to update the same block
	 */
	while (len > 0) {
		LASSERT(npages < PTLRPC_MAX_BRW_PAGES);

		off_in_block = off & (bs - 1);
		sz_in_block = min_t(int, bs - off_in_block, len);

		if (sz_in_block == bs) {
			/* full block, try to use zerocopy */

			abuf = dmu_request_arcbuf(obj->oo_db, bs);
			if (unlikely(abuf == NULL))
				GOTO(out_err, -ENOMEM);

			cfs_atomic_inc(&osd->od_zerocopy_loan);

			/* go over pages arcbuf contains, put them as
			 * local niobufs for ptlrpc's bulks */
			while (sz_in_block > 0) {
				plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);

				lnb[i].lnb_file_offset = off;
				lnb[i].lnb_page_offset = 0;
				lnb[i].len = plen;
				lnb[i].rc = 0;
				if (sz_in_block == bs)
					lnb[i].dentry = (void *)abuf;
				else
					lnb[i].dentry = NULL;

				/* this one is not supposed to fail */
				lnb[i].page = kmem_to_page(abuf->b_data +
							off_in_block);
				LASSERT(lnb[i].page);

				lprocfs_counter_add(osd->od_stats,
						LPROC_OSD_ZEROCOPY_IO, 1);

				sz_in_block -= plen;
				len -= plen;
				off += plen;
				off_in_block += plen;
				i++;
				npages++;
			}
		} else {
			if (off_in_block == 0 && len < bs &&
Exemple #15
0
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
				loff_t off, ssize_t len, struct niobuf_local *lnb)
{
	struct osd_device *osd = osd_obj2dev(obj);
	dmu_buf_t        **dbp;
	int                rc, i, numbufs, npages = 0;
	ENTRY;

	/* grab buffers for read:
	 * OSD API let us to grab buffers first, then initiate IO(s)
	 * so that all required IOs will be done in parallel, but at the
	 * moment DMU doesn't provide us with a method to grab buffers.
	 * If we discover this is a vital for good performance we
	 * can get own replacement for dmu_buf_hold_array_by_bonus().
	 */
	while (len > 0) {
		rc = -dmu_buf_hold_array_by_bonus(obj->oo_db, off, len, TRUE,
						osd_zerocopy_tag, &numbufs,
						&dbp);
		LASSERT(rc == 0);

		for (i = 0; i < numbufs; i++) {
			int bufoff, tocpy, thispage;
			void *dbf = dbp[i];

			LASSERT(len > 0);

			cfs_atomic_inc(&osd->od_zerocopy_pin);

			bufoff = off - dbp[i]->db_offset;
			tocpy = min_t(int, dbp[i]->db_size - bufoff, len);

			/* kind of trick to differentiate dbuf vs. arcbuf */
			LASSERT(((unsigned long)dbp[i] & 1) == 0);
			dbf = (void *) ((unsigned long)dbp[i] | 1);

			while (tocpy > 0) {
				thispage = CFS_PAGE_SIZE;
				thispage -= bufoff & (CFS_PAGE_SIZE - 1);
				thispage = min(tocpy, thispage);

				lnb->rc = 0;
				lnb->lnb_file_offset = off;
				lnb->lnb_page_offset = bufoff & ~CFS_PAGE_MASK;
				lnb->len = thispage;
				lnb->page = kmem_to_page(dbp[i]->db_data +
								bufoff);
				/* mark just a single slot: we need this
				 * reference to dbuf to be release once */
				lnb->dentry = dbf;
				dbf = NULL;

				tocpy -= thispage;
				len -= thispage;
				bufoff += thispage;
				off += thispage;

				npages++;
				lnb++;
			}

			/* steal dbuf so dmu_buf_rele_array() cant release it */
			dbp[i] = NULL;
		}

		dmu_buf_rele_array(dbp, numbufs, osd_zerocopy_tag);
	}

	RETURN(npages);
}
Exemple #16
0
void llog_handle_get(struct llog_handle *loghandle)
{
	cfs_atomic_inc(&loghandle->lgh_refcount);
}