Example #1
0
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
	int p_idx, g_idx;
	int i;

	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));

	if (!GET_ENC_KIOV(desc))
		return;

	LASSERT(desc->bd_iov_count > 0);

	spin_lock(&page_pools.epp_lock);

	p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
	g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;

	LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
		page_pools.epp_total_pages);
	LASSERT(page_pools.epp_pools[p_idx]);

	for (i = 0; i < desc->bd_iov_count; i++) {
		LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
		LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
		LASSERT(!page_pools.epp_pools[p_idx][g_idx]);

		page_pools.epp_pools[p_idx][g_idx] =
			BD_GET_ENC_KIOV(desc, i).bv_page;

		if (++g_idx == PAGES_PER_POOL) {
			p_idx++;
			g_idx = 0;
		}
	}

	page_pools.epp_free_pages += desc->bd_iov_count;

	enc_pools_wakeup();

	spin_unlock(&page_pools.epp_lock);

	kfree(GET_ENC_KIOV(desc));
	GET_ENC_KIOV(desc) = NULL;
}
Example #2
0
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
        int     p_idx, g_idx;
        int     i;

        if (desc->bd_enc_iov == NULL)
                return;

        LASSERT(desc->bd_iov_count > 0);

        cfs_spin_lock(&page_pools.epp_lock);

        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;

        LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
                page_pools.epp_total_pages);
        LASSERT(page_pools.epp_pools[p_idx]);

        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
                LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
                LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);

                page_pools.epp_pools[p_idx][g_idx] =
                                        desc->bd_enc_iov[i].kiov_page;

                if (++g_idx == PAGES_PER_POOL) {
                        p_idx++;
                        g_idx = 0;
                }
        }

        page_pools.epp_free_pages += desc->bd_iov_count;

        enc_pools_wakeup();

        cfs_spin_unlock(&page_pools.epp_lock);

        OBD_FREE(desc->bd_enc_iov,
                 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
        desc->bd_enc_iov = NULL;
}
Example #3
0
/*
 * we don't do much stuff for add_user/del_user anymore, except adding some
 * initial pages in add_user() if current pools are empty, rest would be
 * handled by the pools's self-adaption.
 */
int sptlrpc_enc_pool_add_user(void)
{
        int     need_grow = 0;

        cfs_spin_lock(&page_pools.epp_lock);
        if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
                page_pools.epp_growing = 1;
                need_grow = 1;
        }
        cfs_spin_unlock(&page_pools.epp_lock);

        if (need_grow) {
                enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
                                    PTLRPC_MAX_BRW_PAGES);

                cfs_spin_lock(&page_pools.epp_lock);
                page_pools.epp_growing = 0;
                enc_pools_wakeup();
                cfs_spin_unlock(&page_pools.epp_lock);
        }
        return 0;
}
Example #4
0
/*
 * we allocate the requested pages atomically.
 */
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
        cfs_waitlink_t  waitlink;
        unsigned long   this_idle = -1;
        cfs_time_t      tick = 0;
        long            now;
        int             p_idx, g_idx;
        int             i;

        LASSERT(desc->bd_iov_count > 0);
        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);

        /* resent bulk, enc iov might have been allocated previously */
        if (desc->bd_enc_iov != NULL)
                return 0;

        OBD_ALLOC(desc->bd_enc_iov,
                  desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
        if (desc->bd_enc_iov == NULL)
                return -ENOMEM;

        cfs_spin_lock(&page_pools.epp_lock);

        page_pools.epp_st_access++;
again:
        if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
                if (tick == 0)
                        tick = cfs_time_current();

                now = cfs_time_current_sec();

                page_pools.epp_st_missings++;
                page_pools.epp_pages_short += desc->bd_iov_count;

                if (enc_pools_should_grow(desc->bd_iov_count, now)) {
                        page_pools.epp_growing = 1;

                        cfs_spin_unlock(&page_pools.epp_lock);
                        enc_pools_add_pages(page_pools.epp_pages_short / 2);
                        cfs_spin_lock(&page_pools.epp_lock);

                        page_pools.epp_growing = 0;

                        enc_pools_wakeup();
                } else {
                        if (++page_pools.epp_waitqlen >
                            page_pools.epp_st_max_wqlen)
                                page_pools.epp_st_max_wqlen =
                                                page_pools.epp_waitqlen;

                        cfs_set_current_state(CFS_TASK_UNINT);
                        cfs_waitlink_init(&waitlink);
                        cfs_waitq_add(&page_pools.epp_waitq, &waitlink);

                        cfs_spin_unlock(&page_pools.epp_lock);
                        cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
                        cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
                        LASSERT(page_pools.epp_waitqlen > 0);
                        cfs_spin_lock(&page_pools.epp_lock);
                        page_pools.epp_waitqlen--;
                }

                LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
                page_pools.epp_pages_short -= desc->bd_iov_count;

                this_idle = 0;
                goto again;
        }

        /* record max wait time */
        if (unlikely(tick != 0)) {
                tick = cfs_time_current() - tick;
                if (tick > page_pools.epp_st_max_wait)
                        page_pools.epp_st_max_wait = tick;
        }

        /* proceed with rest of allocation */
        page_pools.epp_free_pages -= desc->bd_iov_count;

        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;

        for (i = 0; i < desc->bd_iov_count; i++) {
                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
                desc->bd_enc_iov[i].kiov_page =
                                        page_pools.epp_pools[p_idx][g_idx];
                page_pools.epp_pools[p_idx][g_idx] = NULL;

                if (++g_idx == PAGES_PER_POOL) {
                        p_idx++;
                        g_idx = 0;
                }
        }

        if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
                page_pools.epp_st_lowfree = page_pools.epp_free_pages;

        /*
         * new idle index = (old * weight + new) / (weight + 1)
         */
        if (this_idle == -1) {
                this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
                            page_pools.epp_total_pages;
        }
        page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
                                   this_idle) /
                                  (IDLE_IDX_WEIGHT + 1);

        page_pools.epp_last_access = cfs_time_current_sec();

        cfs_spin_unlock(&page_pools.epp_lock);
        return 0;
}