示例#1
0
void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
                                       long amount)
{
        struct lprocfs_counter *percpu_cntr;
        int smp_id;

        if (stats == NULL)
                return;

        /* With per-client stats, statistics are allocated only for
         * single CPU area, so the smp_id should be 0 always. */
        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);

        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
                /*
                 * currently lprocfs_count_add() can only be called in thread
                 * context; sometimes we use RCU callbacks to free memory
                 * which calls lprocfs_counter_sub(), and RCU callbacks may
                 * execute in softirq context - right now that's the only case
                 * we're in softirq context here, use separate counter for that.
                 * bz20650.
                 */
                if (cfs_in_interrupt())
                        percpu_cntr->lc_sum_irq -= amount;
                else
                        percpu_cntr->lc_sum -= amount;
        }
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
        lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID);
}
示例#2
0
文件: tracefile.c 项目: DCteam/lustre
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
        cfs_page_t            *page;
        struct cfs_trace_page *tage;

        /* My caller is trying to free memory */
        if (!cfs_in_interrupt() && cfs_memory_pressure_get())
                return NULL;

        /*
         * Don't spam console with allocation failures: they will be reported
         * by upper layer anyway.
         */
        gfp |= CFS_ALLOC_NOWARN;
        page = cfs_alloc_page(gfp);
        if (page == NULL)
                return NULL;

        tage = cfs_alloc(sizeof(*tage), gfp);
        if (tage == NULL) {
                cfs_free_page(page);
                return NULL;
        }

        tage->page = page;
        cfs_atomic_inc(&cfs_tage_allocated);
        return tage;
}
示例#3
0
void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
                                       long amount)
{
        struct lprocfs_counter *percpu_cntr;
        int smp_id;

        if (stats == NULL)
                return;

        /* With per-client stats, statistics are allocated only for
         * single CPU area, so the smp_id should be 0 always. */
        smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID);

        percpu_cntr = &(stats->ls_percpu[smp_id]->lp_cntr[idx]);
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_entry);
        percpu_cntr->lc_count++;

        if (percpu_cntr->lc_config & LPROCFS_CNTR_AVGMINMAX) {
                /* see comment in lprocfs_counter_sub */
                LASSERT(!cfs_in_interrupt());

                percpu_cntr->lc_sum += amount;
                if (percpu_cntr->lc_config & LPROCFS_CNTR_STDDEV)
                        percpu_cntr->lc_sumsquare += (__s64)amount * amount;
                if (amount < percpu_cntr->lc_min)
                        percpu_cntr->lc_min = amount;
                if (amount > percpu_cntr->lc_max)
                        percpu_cntr->lc_max = amount;
        }
        if (!(stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU))
                cfs_atomic_inc(&percpu_cntr->lc_cntl.la_exit);
        lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID);
}
示例#4
0
文件: workitem.c 项目: DCteam/lustre
/*
 * Workitem scheduled with (serial == 1) is strictly serialised not only with
 * itself, but also with others scheduled this way.
 *
 * Now there's only one static serialised queue, but in the future more might
 * be added, and even dynamic creation of serialised queues might be supported.
 */
void
cfs_wi_schedule(cfs_workitem_t *wi)
{
        cfs_wi_sched_t *sched = cfs_wi_to_sched(wi);

        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT (!sched->ws_shuttingdown);

        cfs_wi_sched_lock(sched);

        if (!wi->wi_scheduled) {
                LASSERT (cfs_list_empty(&wi->wi_list));

                wi->wi_scheduled = 1;
                if (!wi->wi_running) {
                        cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
                        cfs_waitq_signal(&sched->ws_waitq);
#endif
                } else {
                        cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
                }
        }

        LASSERT (!cfs_list_empty(&wi->wi_list));
        cfs_wi_sched_unlock(sched);
        return;
}
示例#5
0
文件: workitem.c 项目: DCteam/lustre
/**
 * cancel a workitem:
 */
int
cfs_wi_cancel (cfs_workitem_t *wi)
{
        cfs_wi_sched_t *sched = cfs_wi_to_sched(wi);
        int             rc;

        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT (!sched->ws_shuttingdown);

        cfs_wi_sched_lock(sched);
        /*
         * return 0 if it's running already, otherwise return 1, which
         * means the workitem will not be scheduled and will not have
         * any race with wi_action.
         */
        rc = !(wi->wi_running);

        if (wi->wi_scheduled) { /* cancel pending schedules */
                LASSERT (!cfs_list_empty(&wi->wi_list));
                cfs_list_del_init(&wi->wi_list);
                wi->wi_scheduled = 0;
        }

        LASSERT (cfs_list_empty(&wi->wi_list));

        cfs_wi_sched_unlock(sched);
        return rc;
}
示例#6
0
/**
 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
 * serialises with completion callback)
 */
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
{
        struct l_wait_info       lwi;
        int                      rc;

        LASSERT(!cfs_in_interrupt());           /* might sleep */

        if (!ptlrpc_server_bulk_active(desc))   /* completed or */
                return;                         /* never started */

        /* We used to poison the pages with 0xab here because we did not want to
         * send any meaningful data over the wire for evicted clients (bug 9297)
         * However, this is no longer safe now that we use the page cache on the
         * OSS (bug 20560) */

        /* The unlink ensures the callback happens ASAP and is the last
         * one.  If it fails, it must be because completion just happened,
         * but we must still l_wait_event() in this case, to give liblustre
         * a chance to run server_bulk_callback()*/
	mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);

        for (;;) {
                /* Network access will complete in finite time but the HUGE
                 * timeout lets us CWARN for visibility of sluggish NALs */
                lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
                                           cfs_time_seconds(1), NULL, NULL);
                rc = l_wait_event(desc->bd_waitq,
                                  !ptlrpc_server_bulk_active(desc), &lwi);
                if (rc == 0)
                        return;

                LASSERT(rc == -ETIMEDOUT);
                CWARN("Unexpectedly long timeout: desc %p\n", desc);
        }
}
示例#7
0
/**
 * Disconnect a bulk desc from the network. Idempotent. Not
 * thread-safe (i.e. only interlocks with completion callback).
 * Returns 1 on success or 0 if network unregistration failed for whatever
 * reason.
 */
int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
{
        struct ptlrpc_bulk_desc *desc = req->rq_bulk;
        cfs_waitq_t             *wq;
        struct l_wait_info       lwi;
        int                      rc;
        ENTRY;

        LASSERT(!cfs_in_interrupt());     /* might sleep */

        /* Let's setup deadline for reply unlink. */
        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
            async && req->rq_bulk_deadline == 0)
                req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;

	if (ptlrpc_client_bulk_active(req) == 0)	/* completed or */
		RETURN(1);				/* never registered */

	LASSERT(desc->bd_req == req);  /* bd_req NULL until registered */

	/* the unlink ensures the callback happens ASAP and is the last
	 * one.  If it fails, it must be because completion just happened,
	 * but we must still l_wait_event() in this case to give liblustre
	 * a chance to run client_bulk_callback() */
	mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);

	if (ptlrpc_client_bulk_active(req) == 0)	/* completed or */
		RETURN(1);				/* never registered */

        /* Move to "Unregistering" phase as bulk was not unlinked yet. */
        ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);

        /* Do not wait for unlink to finish. */
        if (async)
                RETURN(0);

        if (req->rq_set != NULL)
                wq = &req->rq_set->set_waitq;
        else
                wq = &req->rq_reply_waitq;

        for (;;) {
                /* Network access will complete in finite time but the HUGE
                 * timeout lets us CWARN for visibility of sluggish NALs */
                lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
                                           cfs_time_seconds(1), NULL, NULL);
                rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
                if (rc == 0) {
                        ptlrpc_rqphase_move(req, req->rq_next_phase);
                        RETURN(1);
                }

                LASSERT(rc == -ETIMEDOUT);
                DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
                          desc);
        }
        RETURN(0);
}
示例#8
0
文件: workitem.c 项目: DCteam/lustre
/* XXX:
 * 0. it only works when called from wi->wi_action.
 * 1. when it returns no one shall try to schedule the workitem.
 */
void
cfs_wi_exit(cfs_workitem_t *wi)
{
        cfs_wi_sched_t *sched = cfs_wi_to_sched(wi);

        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT (!sched->ws_shuttingdown);

        cfs_wi_sched_lock(sched);

#ifdef __KERNEL__
        LASSERT (wi->wi_running);
#endif
        if (wi->wi_scheduled) { /* cancel pending schedules */
                LASSERT (!cfs_list_empty(&wi->wi_list));
                cfs_list_del_init(&wi->wi_list);
        }

        LASSERT (cfs_list_empty(&wi->wi_list));
        wi->wi_scheduled = 1; /* LBUG future schedule attempts */

        cfs_wi_sched_unlock(sched);
        return;
}
示例#9
0
文件: ptllnd_cb.c 项目: DCteam/lustre
        unsigned int      payload_nob = lntmsg->msg_len;
        kptl_net_t       *net = ni->ni_data;
        kptl_peer_t      *peer = NULL;
        int               mpflag = 0;
        kptl_tx_t        *tx;
        int               nob;
        int               nfrag;
        int               rc;

        LASSERT (net->net_ni == ni);
        LASSERT (!net->net_shutdown);
        LASSERT (payload_nob == 0 || payload_niov > 0);
        LASSERT (payload_niov <= LNET_MAX_IOV);
        LASSERT (payload_niov <= PTL_MD_MAX_IOV); /* !!! */
        LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
        LASSERT (!cfs_in_interrupt());

        if (lntmsg->msg_vmflush)
                mpflag = cfs_memory_pressure_get_and_set();

        rc = kptllnd_find_target(net, target, &peer);
        if (rc != 0)
                goto out;

        /* NB peer->peer_id does NOT always equal target, be careful with
         * which one to use */
        switch (type) {
        default:
                LBUG();
                return -EINVAL;