Exemplo n.º 1
0
static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
				struct nvm_rq *rqd, unsigned long flags)
{
	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
	struct rrpc_addr *p;
	int is_gc = flags & NVM_IOTYPE_GC;
	sector_t laddr = rrpc_get_laddr(bio);

	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
		return NVM_IO_REQUEUE;

	p = rrpc_map_page(rrpc, laddr, is_gc);
	if (!p) {
		BUG_ON(is_gc);
		rrpc_unlock_rq(rrpc, rqd);
		rrpc_gc_kick(rrpc);
		return NVM_IO_REQUEUE;
	}

	rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
	rqd->opcode = NVM_OP_HBWRITE;
	rrqd->addr = p;

	return NVM_IO_OK;
}
Exemplo n.º 2
0
static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
							unsigned long flags)
{
	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
	int is_gc = flags & NVM_IOTYPE_GC;
	sector_t laddr = rrpc_get_laddr(bio);
	struct rrpc_addr *gp;

	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
		return NVM_IO_REQUEUE;

	BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
	gp = &rrpc->trans_map[laddr];

	if (gp->rblk) {
		rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
	} else {
		BUG_ON(is_gc);
		rrpc_unlock_rq(rrpc, rqd);
		return NVM_IO_DONE;
	}

	rqd->opcode = NVM_OP_HBREAD;
	rrqd->addr = gp;

	return NVM_IO_OK;
}
Exemplo n.º 3
0
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
				struct nvm_rq *rqd, unsigned long flags)
{
	int err;
	struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
	uint8_t nr_pages = rrpc_get_pages(bio);
	int bio_size = bio_sectors(bio) << 9;

	if (bio_size < rrpc->dev->sec_size)
		return NVM_IO_ERR;
	else if (bio_size > rrpc->dev->max_rq_size)
		return NVM_IO_ERR;

	err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
	if (err)
		return err;

	bio_get(bio);
	rqd->bio = bio;
	rqd->ins = &rrpc->instance;
	rqd->nr_pages = nr_pages;
	rrq->flags = flags;

	err = nvm_submit_io(rrpc->dev, rqd);
	if (err) {
		pr_err("rrpc: I/O submission failed: %d\n", err);
		return NVM_IO_ERR;
	}

	return NVM_IO_OK;
}
Exemplo n.º 4
0
static int rrpc_end_io(struct nvm_rq *rqd, int error)
{
	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
	uint8_t npages = rqd->nr_pages;
	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;

	if (bio_data_dir(rqd->bio) == WRITE)
		rrpc_end_io_write(rrpc, rrqd, laddr, npages);

	if (rrqd->flags & NVM_IOTYPE_GC)
		return 0;

	rrpc_unlock_rq(rrpc, rqd);
	bio_put(rqd->bio);

	if (npages > 1)
		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
	if (rqd->metadata)
		nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);

	mempool_free(rqd, rrpc->rq_pool);

	return 0;
}
Exemplo n.º 5
0
void pblk_submit_rec(struct work_struct *work)
{
	struct pblk_rec_ctx *recovery =
			container_of(work, struct pblk_rec_ctx, ws_rec);
	struct pblk *pblk = recovery->pblk;
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_rq *rqd = recovery->rqd;
	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
	int max_secs = nvm_max_phys_sects(dev);
	struct bio *bio;
	unsigned int nr_rec_secs;
	unsigned int pgs_read;
	int ret;

	nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
								max_secs);

	bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
	if (!bio) {
		pr_err("pblk: not able to create recovery bio\n");
		return;
	}

	bio->bi_iter.bi_sector = 0;
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
	rqd->bio = bio;
	rqd->nr_ppas = nr_rec_secs;

	pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
								nr_rec_secs);
	if (pgs_read != nr_rec_secs) {
		pr_err("pblk: could not read recovery entries\n");
		goto err;
	}

	if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
		pr_err("pblk: could not setup recovery request\n");
		goto err;
	}

#ifdef CONFIG_NVM_DEBUG
	atomic_long_add(nr_rec_secs, &pblk->recov_writes);
#endif

	ret = pblk_submit_io(pblk, rqd);
	if (ret) {
		pr_err("pblk: I/O submission failed: %d\n", ret);
		goto err;
	}

	mempool_free(recovery, pblk->rec_pool);
	return;

err:
	bio_put(bio);
	pblk_free_rqd(pblk, rqd, WRITE);
}
Exemplo n.º 6
0
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
			struct pblk_rec_ctx *recovery, u64 *comp_bits,
			unsigned int comp)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	int max_secs = nvm_max_phys_sects(dev);
	struct nvm_rq *rec_rqd;
	struct pblk_c_ctx *rec_ctx;
	int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;

	rec_rqd = pblk_alloc_rqd(pblk, WRITE);
	if (IS_ERR(rec_rqd)) {
		pr_err("pblk: could not create recovery req.\n");
		return -ENOMEM;
	}

	rec_ctx = nvm_rq_to_pdu(rec_rqd);

	/* Copy completion bitmap, but exclude the first X completed entries */
	bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
				(unsigned long int *)comp_bits,
				comp, max_secs);

	/* Save the context for the entries that need to be re-written and
	 * update current context with the completed entries.
	 */
	rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
	if (comp >= c_ctx->nr_valid) {
		rec_ctx->nr_valid = 0;
		rec_ctx->nr_padded = nr_entries - comp;

		c_ctx->nr_padded = comp - c_ctx->nr_valid;
	} else {
		rec_ctx->nr_valid = c_ctx->nr_valid - comp;
		rec_ctx->nr_padded = c_ctx->nr_padded;

		c_ctx->nr_valid = comp;
		c_ctx->nr_padded = 0;
	}

	recovery->rqd = rec_rqd;
	recovery->pblk = pblk;

	return 0;
}