Exemplo n.º 1
0
TEST_END

TEST_BEGIN(test_bitmap_unset)
{
	size_t i;

	for (i = 1; i <= BITMAP_MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t *bitmap = (bitmap_t *)malloc(
			    bitmap_size(&binfo));
			bitmap_init(bitmap, &binfo);

			for (j = 0; j < i; j++)
				bitmap_set(bitmap, &binfo, j);
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			for (j = 0; j < i; j++)
				bitmap_unset(bitmap, &binfo, j);
			for (j = 0; j < i; j++)
				bitmap_set(bitmap, &binfo, j);
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			free(bitmap);
		}
	}
}
Exemplo n.º 2
0
static void
test_bitmap_unset(void)
{
    size_t i;

    for (i = 1; i <= MAXBITS; i++) {
        bitmap_info_t binfo;
        bitmap_info_init(&binfo, i);
        {
            size_t j;
            bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
                                      bitmap_info_ngroups(&binfo));
            bitmap_init(bitmap, &binfo);

            for (j = 0; j < i; j++)
                bitmap_set(bitmap, &binfo, j);
            assert(bitmap_full(bitmap, &binfo));
            for (j = 0; j < i; j++)
                bitmap_unset(bitmap, &binfo, j);
            for (j = 0; j < i; j++)
                bitmap_set(bitmap, &binfo, j);
            assert(bitmap_full(bitmap, &binfo));
            free(bitmap);
        }
    }
}
Exemplo n.º 3
0
TEST_END

TEST_BEGIN(test_bitmap_sfu)
{
	size_t i;

	for (i = 1; i <= BITMAP_MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t *bitmap = (bitmap_t *)malloc(
			    bitmap_size(&binfo));
			bitmap_init(bitmap, &binfo);

			/* Iteratively set bits starting at the beginning. */
			for (j = 0; j < i; j++) {
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should be just after "
				    "previous first unset bit");
			}
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");

			/*
			 * Iteratively unset bits starting at the end, and
			 * verify that bitmap_sfu() reaches the unset bits.
			 */
			for (j = i - 1; j < i; j--) { /* (i..0] */
				bitmap_unset(bitmap, &binfo, j);
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should the bit previously "
				    "unset");
				bitmap_unset(bitmap, &binfo, j);
			}
			assert_false(bitmap_get(bitmap, &binfo, 0),
			    "Bit should be unset");

			/*
			 * Iteratively set bits starting at the beginning, and
			 * verify that bitmap_sfu() looks past them.
			 */
			for (j = 1; j < i; j++) {
				bitmap_set(bitmap, &binfo, j - 1);
				assert_zd_eq(bitmap_sfu(bitmap, &binfo), j,
				    "First unset bit should be just after the "
				    "bit previously set");
				bitmap_unset(bitmap, &binfo, j);
			}
			assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1,
			    "First unset bit should be the last bit");
			assert_true(bitmap_full(bitmap, &binfo),
			    "All bits should be set");
			free(bitmap);
		}
	}
}
Exemplo n.º 4
0
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
{
	int o;
	int i;

	mutex_lock(&hr_dev->pgdir_mutex);

	o = db->order;
	i = db->index;

	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
		clear_bit(i ^ 1, db->u.pgdir->order0);
		++o;
	}

	i >>= o;
	set_bit(i, db->u.pgdir->bits[o]);

	if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
				  db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		kfree(db->u.pgdir);
	}

	mutex_unlock(&hr_dev->pgdir_mutex);
}
Exemplo n.º 5
0
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

	if (i != 0 && n != i) {
		pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
			n, i);
		return 0;
	}

	mdss_mdp_smp_mmb_free(smp_map->reserved, false);

	
	for (; i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Exemplo n.º 6
0
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	int o;
	int i;

	mutex_lock(&priv->pgdir_mutex);

	o = db->order;
	i = db->index;

	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
		clear_bit(i ^ 1, db->u.pgdir->order0);
		++o;
	}
	i >>= o;
	set_bit(i, db->u.pgdir->bits[o]);

	if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		kfree(db->u.pgdir);
	}

	mutex_unlock(&priv->pgdir_mutex);
}
Exemplo n.º 7
0
TEST_END

static void
test_bitmap_sfu_body(const bitmap_info_t *binfo, size_t nbits)
{
	size_t i;
	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
	bitmap_init(bitmap, binfo);

	/* Iteratively set bits starting at the beginning. */
	for (i = 0; i < nbits; i++) {
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should be just after previous first unset "
		    "bit");
	}
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");

	/*
	 * Iteratively unset bits starting at the end, and verify that
	 * bitmap_sfu() reaches the unset bits.
	 */
	for (i = nbits - 1; i < nbits; i--) { /* (nbits..0] */
		bitmap_unset(bitmap, binfo, i);
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should the bit previously unset");
		bitmap_unset(bitmap, binfo, i);
	}
	assert_false(bitmap_get(bitmap, binfo, 0), "Bit should be unset");

	/*
	 * Iteratively set bits starting at the beginning, and verify that
	 * bitmap_sfu() looks past them.
	 */
	for (i = 1; i < nbits; i++) {
		bitmap_set(bitmap, binfo, i - 1);
		assert_zd_eq(bitmap_sfu(bitmap, binfo), i,
		    "First unset bit should be just after the bit previously "
		    "set");
		bitmap_unset(bitmap, binfo, i);
	}
	assert_zd_eq(bitmap_sfu(bitmap, binfo), nbits - 1,
	    "First unset bit should be the last bit");
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
	free(bitmap);
}
Exemplo n.º 8
0
static void
test_bitmap_sfu(void)
{
    size_t i;

    for (i = 1; i <= MAXBITS; i++) {
        bitmap_info_t binfo;
        bitmap_info_init(&binfo, i);
        {
            ssize_t j;
            bitmap_t *bitmap = malloc(sizeof(bitmap_t) *
                                      bitmap_info_ngroups(&binfo));
            bitmap_init(bitmap, &binfo);

            /* Iteratively set bits starting at the beginning. */
            for (j = 0; j < i; j++)
                assert(bitmap_sfu(bitmap, &binfo) == j);
            assert(bitmap_full(bitmap, &binfo));

            /*
             * Iteratively unset bits starting at the end, and
             * verify that bitmap_sfu() reaches the unset bits.
             */
            for (j = i - 1; j >= 0; j--) {
                bitmap_unset(bitmap, &binfo, j);
                assert(bitmap_sfu(bitmap, &binfo) == j);
                bitmap_unset(bitmap, &binfo, j);
            }
            assert(bitmap_get(bitmap, &binfo, 0) == false);

            /*
             * Iteratively set bits starting at the beginning, and
             * verify that bitmap_sfu() looks past them.
             */
            for (j = 1; j < i; j++) {
                bitmap_set(bitmap, &binfo, j - 1);
                assert(bitmap_sfu(bitmap, &binfo) == j);
                bitmap_unset(bitmap, &binfo, j);
            }
            assert(bitmap_sfu(bitmap, &binfo) == i - 1);
            assert(bitmap_full(bitmap, &binfo));
            free(bitmap);
        }
    }
}
Exemplo n.º 9
0
TEST_END

static void
test_bitmap_unset_body(const bitmap_info_t *binfo, size_t nbits)
{
	size_t i;
	bitmap_t *bitmap = (bitmap_t *)malloc(bitmap_size(binfo));
	assert_ptr_not_null(bitmap, "Unexpected malloc() failure");
	bitmap_init(bitmap, binfo);

	for (i = 0; i < nbits; i++)
		bitmap_set(bitmap, binfo, i);
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
	for (i = 0; i < nbits; i++)
		bitmap_unset(bitmap, binfo, i);
	for (i = 0; i < nbits; i++)
		bitmap_set(bitmap, binfo, i);
	assert_true(bitmap_full(bitmap, binfo), "All bits should be set");
	free(bitmap);
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
                                    size_t n, bool force_alloc)
{
    u32 i, mmb;
    u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
    struct mdss_data_type *mdata = mdss_mdp_get_mdata();

    if (n <= fixed_cnt)
        return fixed_cnt;
    else
        n -= fixed_cnt;

    i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

    /*
     * SMP programming is not double buffered. Fail the request,
     * that calls for change in smp configuration (addition/removal
     * of smp blocks), so that fallback solution happens.
     */
#if defined(CONFIG_ARCH_MSM8226) || (CONFIG_ARCH_MSM8974)
    if (i != 0 && n != i && !force_alloc) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#else
    if (i != 0 && n != i) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#endif
    /*
     * Clear previous SMP reservations and reserve according to the
     * latest configuration
     */
    mdss_mdp_smp_mmb_free(smp_map->reserved, false);

    /* Reserve mmb blocks*/
    for (; i < n; i++) {
        if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
            break;

        mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
        set_bit(mmb, smp_map->reserved);
        set_bit(mmb, mdata->mmb_alloc_map);
    }

    return i + fixed_cnt;
}
Exemplo n.º 11
0
static u32 mdss_mdp_smp_mmb_reserve(unsigned long *smp, size_t n)
{
	u32 i, mmb;

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (i = bitmap_weight(smp, SMP_MB_CNT); i < n; i++) {
		if (bitmap_full(mdss_mdp_smp_mmb_pool, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdss_mdp_smp_mmb_pool, SMP_MB_CNT);
		set_bit(mmb, smp);
		set_bit(mmb, mdss_mdp_smp_mmb_pool);
	}
	return i;
}
Exemplo n.º 12
0
Arquivo: alloc.c Projeto: 020gzh/linux
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
	mutex_lock(&dev->priv.pgdir_mutex);

	__set_bit(db->index, db->u.pgdir->bitmap);

	if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		kfree(db->u.pgdir);
	}

	mutex_unlock(&dev->priv.pgdir_mutex);
}
Exemplo n.º 13
0
JEMALLOC_INLINE_C void *
arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
    const arena_bin_info_t *bin_info)
{
	void *ret;
	arena_slab_data_t *slab_data = extent_slab_data_get(slab);
	size_t regind;

	assert(slab_data->nfree > 0);
	assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));

	regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
	ret = (void *)((uintptr_t)extent_addr_get(slab) +
	    (uintptr_t)(bin_info->reg_size * regind));
	slab_data->nfree--;
	return (ret);
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe *pipe, struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);	

	/*
	 * SMP programming is not double buffered. Fail the request,
	 * that calls for change in smp configuration (addition/removal
	 * of smp blocks), so that fallback solution happens.
	 */
	if (pipe->src_fmt->is_yuv || (pipe->flags & MDP_BACKEND_COMPOSITION)) {
	if (i != 0 && n != i) {
		pr_debug("Can't change mmb configuration in set call\n");
		return 0;
	}
	}
	
	 /* 
	 * Clear previous SMP reservations and reserve according to the 
	 * latest configuration 
	 */ 
	 mdss_mdp_smp_mmb_free(smp_map->reserved, false); 

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (; i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Exemplo n.º 15
0
static void
test_bitmap_set(void)
{
	size_t i;

	for (i = 1; i <= MAXBITS; i++) {
		bitmap_info_t binfo;
		bitmap_info_init(&binfo, i);
		{
			size_t j;
			bitmap_t bitmap[bitmap_info_ngroups(&binfo)];
			bitmap_init(bitmap, &binfo);

			for (j = 0; j < i; j++)
				bitmap_set(bitmap, &binfo, j);
			assert(bitmap_full(bitmap, &binfo));
		}
	}
}
Exemplo n.º 16
0
bool msm_mpm_irqs_detectable(bool from_idle)
{
	unsigned long *apps_irq_bitmap = from_idle ?
			msm_mpm_enabled_apps_irqs : msm_mpm_wake_apps_irqs;

	if (MSM_MPM_DEBUG_NON_DETECTABLE_IRQ & msm_mpm_debug_mask) {
		static char buf[DIV_ROUND_UP(MSM_MPM_NR_APPS_IRQS, 32)*9+1];

		bitmap_scnprintf(buf, sizeof(buf), apps_irq_bitmap,
				MSM_MPM_NR_APPS_IRQS);
		buf[sizeof(buf) - 1] = '\0';

		pr_info("%s: cannot monitor %s", __func__, buf);
	}

	if(from_idle && !bitmap_full(msm_mpm_idle_apps_irqs, MSM_MPM_NR_APPS_IRQS))
		return msm_mpm_irqs_idle_ignore();
	else
		return (bool)__bitmap_empty(apps_irq_bitmap, MSM_MPM_NR_APPS_IRQS);
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (i = bitmap_weight(smp_map->allocated, SMP_MB_CNT); i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Exemplo n.º 18
0
/*
 * rrpc_move_valid_pages -- migrate live data off the block
 * @rrpc: the 'rrpc' structure
 * @block: the block from which to migrate live pages
 *
 * Description:
 *   GC algorithms may call this function to migrate remaining live
 *   pages off the block prior to erasing it. This function blocks
 *   further execution until the operation is complete.
 */
static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
{
	struct request_queue *q = rrpc->dev->q;
	struct rrpc_rev_addr *rev;
	struct nvm_rq *rqd;
	struct bio *bio;
	struct page *page;
	int slot;
	int nr_sec_per_blk = rrpc->dev->sec_per_blk;
	u64 phys_addr;
	DECLARE_COMPLETION_ONSTACK(wait);

	if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
		return 0;

	bio = bio_alloc(GFP_NOIO, 1);
	if (!bio) {
		pr_err("nvm: could not alloc bio to gc\n");
		return -ENOMEM;
	}

	page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
	if (!page) {
		bio_put(bio);
		return -ENOMEM;
	}

	while ((slot = find_first_zero_bit(rblk->invalid_pages,
					    nr_sec_per_blk)) < nr_sec_per_blk) {

		/* Lock laddr */
		phys_addr = rblk->parent->id * nr_sec_per_blk + slot;

try:
		spin_lock(&rrpc->rev_lock);
		/* Get logical address from physical to logical table */
		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
		/* already updated by previous regular write */
		if (rev->addr == ADDR_EMPTY) {
			spin_unlock(&rrpc->rev_lock);
			continue;
		}

		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
		if (IS_ERR_OR_NULL(rqd)) {
			spin_unlock(&rrpc->rev_lock);
			schedule();
			goto try;
		}

		spin_unlock(&rrpc->rev_lock);

		/* Perform read to do GC */
		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = READ;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		/* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc read failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);
		if (bio->bi_error) {
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}

		bio_reset(bio);
		reinit_completion(&wait);

		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
		bio->bi_rw = WRITE;
		bio->bi_private = &wait;
		bio->bi_end_io = rrpc_end_sync_bio;

		bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);

		/* turn the command around and write the data back to a new
		 * address
		 */
		if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
			pr_err("rrpc: gc write failed.\n");
			rrpc_inflight_laddr_release(rrpc, rqd);
			goto finished;
		}
		wait_for_completion_io(&wait);

		rrpc_inflight_laddr_release(rrpc, rqd);
		if (bio->bi_error)
			goto finished;

		bio_reset(bio);
	}

finished:
	mempool_free(page, rrpc->page_pool);
	bio_put(bio);

	if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
		pr_err("nvm: failed to garbage collect block\n");
		return -EIO;
	}

	return 0;
}

static void rrpc_block_gc(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_dev *dev = rrpc->dev;
	struct nvm_lun *lun = rblk->parent->lun;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);

	if (rrpc_move_valid_pages(rrpc, rblk))
		goto put_back;

	if (nvm_erase_blk(dev, rblk->parent))
		goto put_back;

	rrpc_put_blk(rrpc, rblk);

	return;

put_back:
	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);
}

/* the block with highest number of invalid pages, will be in the beginning
 * of the list
 */
static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
							struct rrpc_block *rb)
{
	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
		return ra;

	return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
}

/* linearly find the block with highest number of invalid pages
 * requires lun->lock
 */
static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
{
	struct list_head *prio_list = &rlun->prio_list;
	struct rrpc_block *rblock, *max;

	BUG_ON(list_empty(prio_list));

	max = list_first_entry(prio_list, struct rrpc_block, prio);
	list_for_each_entry(rblock, prio_list, prio)
		max = rblock_max_invalid(max, rblock);

	return max;
}

static void rrpc_lun_gc(struct work_struct *work)
{
	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
	struct rrpc *rrpc = rlun->rrpc;
	struct nvm_lun *lun = rlun->parent;
	struct rrpc_block_gc *gcb;
	unsigned int nr_blocks_need;

	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;

	if (nr_blocks_need < rrpc->nr_luns)
		nr_blocks_need = rrpc->nr_luns;

	spin_lock(&rlun->lock);
	while (nr_blocks_need > lun->nr_free_blocks &&
					!list_empty(&rlun->prio_list)) {
		struct rrpc_block *rblock = block_prio_find_max(rlun);
		struct nvm_block *block = rblock->parent;

		if (!rblock->nr_invalid_pages)
			break;

		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
		if (!gcb)
			break;

		list_del_init(&rblock->prio);

		BUG_ON(!block_is_full(rrpc, rblock));

		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);

		gcb->rrpc = rrpc;
		gcb->rblk = rblock;
		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);

		queue_work(rrpc->kgc_wq, &gcb->ws_gc);

		nr_blocks_need--;
	}
	spin_unlock(&rlun->lock);

	/* TODO: Hint that request queue can be started again */
}

static void rrpc_gc_queue(struct work_struct *work)
{
	struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
									ws_gc);
	struct rrpc *rrpc = gcb->rrpc;
	struct rrpc_block *rblk = gcb->rblk;
	struct nvm_lun *lun = rblk->parent->lun;
	struct nvm_block *blk = rblk->parent;
	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];

	spin_lock(&rlun->lock);
	list_add_tail(&rblk->prio, &rlun->prio_list);
	spin_unlock(&rlun->lock);

	spin_lock(&lun->lock);
	lun->nr_open_blocks--;
	lun->nr_closed_blocks++;
	blk->state &= ~NVM_BLK_ST_OPEN;
	blk->state |= NVM_BLK_ST_CLOSED;
	list_move_tail(&rblk->list, &rlun->closed_list);
	spin_unlock(&lun->lock);

	mempool_free(gcb, rrpc->gcb_pool);
	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
							rblk->parent->id);
}

static const struct block_device_operations rrpc_fops = {
	.owner		= THIS_MODULE,
};

static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
{
	unsigned int i;
	struct rrpc_lun *rlun, *max_free;

	if (!is_gc)
		return get_next_lun(rrpc);

	/* during GC, we don't care about RR, instead we want to make
	 * sure that we maintain evenness between the block luns.
	 */
	max_free = &rrpc->luns[0];
	/* prevent GC-ing lun from devouring pages of a lun with
	 * little free blocks. We don't take the lock as we only need an
	 * estimate.
	 */
	rrpc_for_each_lun(rrpc, rlun, i) {
		if (rlun->parent->nr_free_blocks >
					max_free->parent->nr_free_blocks)
			max_free = rlun;
	}

	return max_free;
}
Exemplo n.º 19
0
/**
 * media_entity_pipeline_start - Mark a pipeline as streaming
 * @entity: Starting entity
 * @pipe: Media pipeline to be assigned to all entities in the pipeline.
 *
 * Mark all entities connected to a given entity through enabled links, either
 * directly or indirectly, as streaming. The given pipeline object is assigned to
 * every entity in the pipeline and stored in the media_entity pipe field.
 *
 * Calls to this function can be nested, in which case the same number of
 * media_entity_pipeline_stop() calls will be required to stop streaming. The
 * pipeline pointer must be identical for all nested calls to
 * media_entity_pipeline_start().
 */
__must_check int media_entity_pipeline_start(struct media_entity *entity,
					     struct media_pipeline *pipe)
{
	struct media_device *mdev = entity->parent;
	struct media_entity_graph graph;
	struct media_entity *entity_err = entity;
	int ret;

	mutex_lock(&mdev->graph_mutex);

	media_entity_graph_walk_start(&graph, entity);

	while ((entity = media_entity_graph_walk_next(&graph))) {
		DECLARE_BITMAP(active, entity->num_pads);
		DECLARE_BITMAP(has_no_links, entity->num_pads);
		unsigned int i;

		entity->stream_count++;
		WARN_ON(entity->pipe && entity->pipe != pipe);
		entity->pipe = pipe;

		/* Already streaming --- no need to check. */
		if (entity->stream_count > 1)
			continue;

		if (!entity->ops || !entity->ops->link_validate)
			continue;

		bitmap_zero(active, entity->num_pads);
		bitmap_fill(has_no_links, entity->num_pads);

		for (i = 0; i < entity->num_links; i++) {
			struct media_link *link = &entity->links[i];
			struct media_pad *pad = link->sink->entity == entity
						? link->sink : link->source;

			/* Mark that a pad is connected by a link. */
			bitmap_clear(has_no_links, pad->index, 1);

			/*
			 * Pads that either do not need to connect or
			 * are connected through an enabled link are
			 * fine.
			 */
			if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
			    link->flags & MEDIA_LNK_FL_ENABLED)
				bitmap_set(active, pad->index, 1);

			/*
			 * Link validation will only take place for
			 * sink ends of the link that are enabled.
			 */
			if (link->sink != pad ||
			    !(link->flags & MEDIA_LNK_FL_ENABLED))
				continue;

			ret = entity->ops->link_validate(link);
			if (ret < 0 && ret != -ENOIOCTLCMD)
				goto error;
		}

		/* Either no links or validated links are fine. */
		bitmap_or(active, active, has_no_links, entity->num_pads);

		if (!bitmap_full(active, entity->num_pads)) {
			ret = -EPIPE;
			goto error;
		}
	}

	mutex_unlock(&mdev->graph_mutex);

	return 0;

error:
	/*
	 * Link validation on graph failed. We revert what we did and
	 * return the error.
	 */
	media_entity_graph_walk_start(&graph, entity_err);

	while ((entity_err = media_entity_graph_walk_next(&graph))) {
		entity_err->stream_count--;
		if (entity_err->stream_count == 0)
			entity_err->pipe = NULL;

		/*
		 * We haven't increased stream_count further than this
		 * so we quit here.
		 */
		if (entity_err == entity)
			break;
	}

	mutex_unlock(&mdev->graph_mutex);

	return ret;
}
Exemplo n.º 20
0
static void balloon_inflate_page(VirtIOBalloon *balloon,
                                 MemoryRegion *mr, hwaddr offset)
{
    void *addr = memory_region_get_ram_ptr(mr) + offset;
    RAMBlock *rb;
    size_t rb_page_size;
    int subpages;
    ram_addr_t ram_offset, host_page_base;

    /* XXX is there a better way to get to the RAMBlock than via a
     * host address? */
    rb = qemu_ram_block_from_host(addr, false, &ram_offset);
    rb_page_size = qemu_ram_pagesize(rb);
    host_page_base = ram_offset & ~(rb_page_size - 1);

    if (rb_page_size == BALLOON_PAGE_SIZE) {
        /* Easy case */

        ram_block_discard_range(rb, ram_offset, rb_page_size);
        /* We ignore errors from ram_block_discard_range(), because it
         * has already reported them, and failing to discard a balloon
         * page is not fatal */
        return;
    }

    /* Hard case
     *
     * We've put a piece of a larger host page into the balloon - we
     * need to keep track until we have a whole host page to
     * discard
     */
    warn_report_once(
"Balloon used with backing page size > 4kiB, this may not be reliable");

    subpages = rb_page_size / BALLOON_PAGE_SIZE;

    if (balloon->pbp
        && (rb != balloon->pbp->rb
            || host_page_base != balloon->pbp->base)) {
        /* We've partially ballooned part of a host page, but now
         * we're trying to balloon part of a different one.  Too hard,
         * give up on the old partial page */
        g_free(balloon->pbp);
        balloon->pbp = NULL;
    }

    if (!balloon->pbp) {
        /* Starting on a new host page */
        size_t bitlen = BITS_TO_LONGS(subpages) * sizeof(unsigned long);
        balloon->pbp = g_malloc0(sizeof(PartiallyBalloonedPage) + bitlen);
        balloon->pbp->rb = rb;
        balloon->pbp->base = host_page_base;
    }

    bitmap_set(balloon->pbp->bitmap,
               (ram_offset - balloon->pbp->base) / BALLOON_PAGE_SIZE,
               subpages);

    if (bitmap_full(balloon->pbp->bitmap, subpages)) {
        /* We've accumulated a full host page, we can actually discard
         * it now */

        ram_block_discard_range(rb, balloon->pbp->base, rb_page_size);
        /* We ignore errors from ram_block_discard_range(), because it
         * has already reported them, and failing to discard a balloon
         * page is not fatal */

        g_free(balloon->pbp);
        balloon->pbp = NULL;
    }
}