static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, sector_t laddr, uint8_t npages) { struct rrpc_addr *p; struct rrpc_block *rblk; struct nvm_lun *lun; int cmnt_size, i; for (i = 0; i < npages; i++) { p = &rrpc->trans_map[laddr + i]; rblk = p->rblk; lun = rblk->parent->lun; cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { struct nvm_block *blk = rblk->parent; struct rrpc_lun *rlun = rblk->rlun; spin_lock(&lun->lock); lun->nr_open_blocks--; lun->nr_closed_blocks++; blk->state &= ~NVM_BLK_ST_OPEN; blk->state |= NVM_BLK_ST_CLOSED; list_move_tail(&rblk->list, &rlun->closed_list); spin_unlock(&lun->lock); rrpc_run_gc(rrpc, rblk); } } }
static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, sector_t laddr, uint8_t npages) { struct rrpc_addr *p; struct rrpc_block *rblk; struct nvm_lun *lun; int cmnt_size, i; for (i = 0; i < npages; i++) { p = &rrpc->trans_map[laddr + i]; rblk = p->rblk; lun = rblk->parent->lun; cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); if (unlikely(cmnt_size == rrpc->dev->sec_per_blk)) rrpc_run_gc(rrpc, rblk); } }