예제 #1
0
파일: rrpc.c 프로젝트: cherubzhao/linux
/* Simple round-robin Logical to physical address translation.
 *
 * Retrieve the mapping using the active append point. Then update the ap for
 * the next write to the disk.
 *
 * Returns rrpc_addr with the physical address and block. Remember to return to
 * rrpc->addr_cache when request is finished.
 */
static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
								int is_gc)
{
	struct rrpc_lun *rlun;
	struct rrpc_block *rblk;
	struct nvm_lun *lun;
	u64 paddr;

	rlun = rrpc_get_lun_rr(rrpc, is_gc);
	lun = rlun->parent;

	if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
		return NULL;

	spin_lock(&rlun->lock);

	rblk = rlun->cur;
retry:
	paddr = rrpc_alloc_addr(rrpc, rblk);

	if (paddr == ADDR_EMPTY) {
		rblk = rrpc_get_blk(rrpc, rlun, 0);
		if (rblk) {
			rrpc_set_lun_cur(rlun, rblk);
			goto retry;
		}

		if (is_gc) {
			/* retry from emergency gc block */
			paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
			if (paddr == ADDR_EMPTY) {
				rblk = rrpc_get_blk(rrpc, rlun, 1);
				if (!rblk) {
					pr_err("rrpc: no more blocks");
					goto err;
				}

				rlun->gc_cur = rblk;
				paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
			}
			rblk = rlun->gc_cur;
		}
	}

	spin_unlock(&rlun->lock);
	return rrpc_update_map(rrpc, laddr, rblk, paddr);
err:
	spin_unlock(&rlun->lock);
	return NULL;
}
예제 #2
0
파일: rrpc.c 프로젝트: AshishNamdev/linux
/* Map logical address to a physical page. The mapping implements a round robin
 * approach and allocates a page from the next lun available.
 *
 * Returns rrpc_addr with the physical address and block. Returns NULL if no
 * blocks in the next rlun are available.
 */
static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
								int is_gc)
{
	struct nvm_tgt_dev *tgt_dev = rrpc->dev;
	struct rrpc_lun *rlun;
	struct rrpc_block *rblk, **cur_rblk;
	struct rrpc_addr *p;
	struct ppa_addr ppa;
	u64 paddr;
	int gc_force = 0;

	ppa.ppa = ADDR_EMPTY;
	rlun = rrpc_get_lun_rr(rrpc, is_gc);

	if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
		return ppa;

	/*
	 * page allocation steps:
	 * 1. Try to allocate new page from current rblk
	 * 2a. If succeed, proceed to map it in and return
	 * 2b. If fail, first try to allocate a new block from media manger,
	 *     and then retry step 1. Retry until the normal block pool is
	 *     exhausted.
	 * 3. If exhausted, and garbage collector is requesting the block,
	 *    go to the reserved block and retry step 1.
	 *    In the case that this fails as well, or it is not GC
	 *    requesting, report not able to retrieve a block and let the
	 *    caller handle further processing.
	 */

	spin_lock(&rlun->lock);
	cur_rblk = &rlun->cur;
	rblk = rlun->cur;
retry:
	paddr = rrpc_alloc_addr(rrpc, rblk);

	if (paddr != ADDR_EMPTY)
		goto done;

	if (!list_empty(&rlun->wblk_list)) {
new_blk:
		rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
									prio);
		rrpc_set_lun_cur(rlun, rblk, cur_rblk);
		list_del(&rblk->prio);
		goto retry;
	}