void llm_noq_lock_end_req (bdbm_drv_info_t* bdi, bdbm_llm_req_t* llm_req) { struct bdbm_llm_noq_lock_private* p; uint64_t punit_id; p = (struct bdbm_llm_noq_lock_private*)BDBM_LLM_PRIV(bdi); /* get a parallel unit ID */ punit_id = llm_req->phyaddr.punit_id; /* complete a lock */ bdbm_sema_unlock (&p->punit_locks[punit_id]); pmu_update_tot (bdi, llm_req); pmu_inc (bdi, llm_req); /* finish a request */ bdi->ptr_hlm_inf->end_req (bdi, llm_req); }
void llm_rmq_end_req (h4h_drv_info_t* bdi, h4h_llm_req_t* r) { struct h4h_llm_rmq_private* p = (struct h4h_llm_rmq_private*)H4H_LLM_PRIV(bdi); h4h_rd_prior_queue_item_t* qitem = (h4h_rd_prior_queue_item_t*)r->ptr_qitem; switch (r->req_type) { case REQTYPE_RMW_READ: /* get a parallel unit ID */ h4h_sema_unlock (&p->punit_locks[r->phyaddr->punit_id]); /* change its type to WRITE if req_type is RMW */ r->phyaddr = &r->phyaddr_w; r->req_type = REQTYPE_RMW_WRITE; #ifdef QUICK_FIX_FOR_RWM h4h_rd_prior_queue_remove (p->q, qitem); #else /* put it to Q again */ if (h4h_rd_prior_queue_move (p->q, r->phyaddr->punit_id, qitem)) { h4h_msg ("h4h_prior_queue_enqueue failed"); h4h_bug_on (1); } #endif pmu_inc (bdi, r); /* wake up thread if it sleeps */ h4h_thread_wakeup (p->llm_thread); break; case REQTYPE_META_WRITE: case REQTYPE_META_READ: case REQTYPE_READ: case REQTYPE_READ_DUMMY: case REQTYPE_WRITE: case REQTYPE_RMW_WRITE: case REQTYPE_GC_READ: case REQTYPE_GC_WRITE: case REQTYPE_GC_ERASE: case REQTYPE_TRIM: /* get a parallel unit ID */ h4h_rd_prior_queue_remove (p->q, qitem); /* if (r->req_type == REQTYPE_GC_WRITE && (int64_t)r->lpa == -2LL) { h4h_msg ("done - writing mapping pages: llu phy: %lld %lld %lld %lld, oob: %lld %lld", r->phyaddr->channel_no, r->phyaddr->chip_no, r->phyaddr->block_no, r->phyaddr->page_no, ((uint64_t*)r->ptr_oob)[0], ((uint64_t*)r->ptr_oob)[1]); } */ /* complete a lock */ h4h_sema_unlock (&p->punit_locks[r->phyaddr->punit_id]); /* update the elapsed time taken by NAND devices */ pmu_update_tot (bdi, r); pmu_inc (bdi, r); /* finish a request */ bdi->ptr_hlm_inf->end_req (bdi, r); #if defined(ENABLE_SEQ_DBG) h4h_sema_unlock (&p->dbg_seq); #endif break; default: h4h_error ("invalid req-type (%u)", r->req_type); break; } }