int ghd_reset_notify(ccc_t *cccp, gtgt_t *gtgtp, int flag, void (*callback)(caddr_t), caddr_t arg) { ghd_reset_notify_list_t *rnp; int rc = FALSE; switch (flag) { case SCSI_RESET_NOTIFY: rnp = (ghd_reset_notify_list_t *)kmem_zalloc(sizeof (*rnp), KM_SLEEP); rnp->gtgtp = gtgtp; rnp->callback = callback; rnp->arg = arg; mutex_enter(&cccp->ccc_reset_notify_mutex); L2_add(&cccp->ccc_reset_notify_list, &rnp->l2_link, (void *)rnp); mutex_exit(&cccp->ccc_reset_notify_mutex); rc = TRUE; break; case SCSI_RESET_CANCEL: mutex_enter(&cccp->ccc_reset_notify_mutex); for (rnp = (ghd_reset_notify_list_t *) L2_next(&cccp->ccc_reset_notify_list); rnp != NULL; rnp = (ghd_reset_notify_list_t *)L2_next(&rnp->l2_link)) { if (rnp->gtgtp == gtgtp && rnp->callback == callback && rnp->arg == arg) { L2_delete(&rnp->l2_link); kmem_free(rnp, sizeof (*rnp)); rc = TRUE; } } mutex_exit(&cccp->ccc_reset_notify_mutex); break; default: rc = FALSE; break; } return (rc); }
void ghd_waitq_shuffle_up(ccc_t *cccp, gdev_t *gdevp) { gcmd_t *gcmdp; ASSERT(mutex_owned(&cccp->ccc_waitq_mutex)); GDBG_WAITQ(("ghd_waitq_shuffle_up: cccp 0x%p gdevp 0x%p N %ld " "max %ld\n", cccp, gdevp, GDEV_NACTIVE(gdevp), GDEV_MAXACTIVE(gdevp))); for (;;) { /* * Now check the device wait queue throttle to see if I can * shuffle up a request to the HBA wait queue. */ if (GDEV_NACTIVE(gdevp) >= GDEV_MAXACTIVE(gdevp)) { GDBG_WAITQ(("ghd_waitq_shuffle_up: N>MAX gdevp 0x%p\n", gdevp)); return; } /* * single thread requests while multiple instances * because the different target drives might have * conflicting maxactive throttles. */ if (gdevp->gd_ninstances > 1 && GDEV_NACTIVE(gdevp) > 0) { GDBG_WAITQ(("ghd_waitq_shuffle_up: multi gdevp 0x%p\n", gdevp)); return; } /* * promote the topmost request from the device queue to * the HBA queue. */ if ((gcmdp = L2_remove_head(&GDEV_QHEAD(gdevp))) == NULL) { /* the device is empty so we're done */ GDBG_WAITQ(("ghd_waitq_shuffle_up: MT gdevp 0x%p\n", gdevp)); return; } L2_add(&GHBA_QHEAD(cccp), &gcmdp->cmd_q, gcmdp); GDEV_NACTIVE(gdevp)++; gcmdp->cmd_waitq_level++; GDBG_WAITQ(("ghd_waitq_shuffle_up: gdevp 0x%p gcmdp 0x%p\n", gdevp, gcmdp)); } }
int ghd_transport(ccc_t *cccp, gcmd_t *gcmdp, gtgt_t *gtgtp, ulong_t timeout, int polled, void *intr_status) { gdev_t *gdevp = gtgtp->gt_gdevp; ASSERT(!mutex_owned(&cccp->ccc_hba_mutex)); ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); if (polled) { /* * Grab the HBA mutex so no other requests are started * until after this one completes. */ mutex_enter(&cccp->ccc_hba_mutex); GDBG_START(("ghd_transport: polled" " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n", (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp)); /* * Lock the doneq so no other thread flushes the Q. */ ghd_doneq_pollmode_enter(cccp); } #if defined(GHD_DEBUG) || defined(__lint) else { GDBG_START(("ghd_transport: non-polled" " cccp 0x%p gdevp 0x%p gtgtp 0x%p gcmdp 0x%p\n", (void *)cccp, (void *)gdevp, (void *)gtgtp, (void *)gcmdp)); } #endif /* * add this request to the tail of the waitq */ gcmdp->cmd_waitq_level = 1; mutex_enter(&cccp->ccc_waitq_mutex); L2_add(&GDEV_QHEAD(gdevp), &gcmdp->cmd_q, gcmdp); /* * Add this request to the packet timer active list and start its * abort timer. */ gcmdp->cmd_state = GCMD_STATE_WAITQ; ghd_timer_start(cccp, gcmdp, timeout); /* * Check the device wait queue throttle and perhaps move * some requests to the end of the HBA wait queue. */ ghd_waitq_shuffle_up(cccp, gdevp); if (!polled) { /* * See if the HBA mutex is available but use the * tryenter so I don't deadlock. */ if (!mutex_tryenter(&cccp->ccc_hba_mutex)) { /* The HBA mutex isn't available */ GDBG_START(("ghd_transport: !mutex cccp 0x%p\n", (void *)cccp)); mutex_exit(&cccp->ccc_waitq_mutex); return (TRAN_ACCEPT); } GDBG_START(("ghd_transport: got mutex cccp 0x%p\n", (void *)cccp)); /* * start as many requests as possible from the head * of the HBA wait queue */ ghd_waitq_process_and_mutex_exit(cccp); ASSERT(!mutex_owned(&cccp->ccc_hba_mutex)); ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); return (TRAN_ACCEPT); } /* * If polled mode (FLAG_NOINTR specified in scsi_pkt flags), * then ghd_poll() waits until the request completes or times out * before returning. */ mutex_exit(&cccp->ccc_waitq_mutex); (void) ghd_poll(cccp, GHD_POLL_REQUEST, 0, gcmdp, gtgtp, intr_status); ghd_doneq_pollmode_exit(cccp); mutex_enter(&cccp->ccc_waitq_mutex); ghd_waitq_process_and_mutex_exit(cccp); /* call HBA's completion function but don't do callback to target */ (*cccp->ccc_hba_complete)(cccp->ccc_hba_handle, gcmdp, FALSE); GDBG_START(("ghd_transport: polled done cccp 0x%p\n", (void *)cccp)); return (TRAN_ACCEPT); }
static int ghd_poll(ccc_t *cccp, gpoll_t polltype, ulong_t polltime, gcmd_t *poll_gcmdp, gtgt_t *gtgtp, void *intr_status) { gcmd_t *gcmdp; L2el_t gcmd_hold_queue; int got_it = FALSE; clock_t poll_lbolt; clock_t start_lbolt; clock_t current_lbolt; ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); L2_INIT(&gcmd_hold_queue); /* Que hora es? */ poll_lbolt = drv_usectohz((clock_t)polltime); start_lbolt = ddi_get_lbolt(); /* unqueue and save all CMD/CCBs until I find the right one */ while (!got_it) { /* Give up yet? */ current_lbolt = ddi_get_lbolt(); if (poll_lbolt && (current_lbolt - start_lbolt >= poll_lbolt)) break; /* * delay 1 msec each time around the loop (this is an * arbitrary delay value, any value should work) except * zero because some devices don't like being polled too * fast and it saturates the bus on an MP system. */ drv_usecwait(1000); /* * check for any new device status */ if ((*cccp->ccc_get_status)(cccp->ccc_hba_handle, intr_status)) (*cccp->ccc_process_intr)(cccp->ccc_hba_handle, intr_status); /* * If something completed then try to start the * next request from the wait queue. Don't release * the HBA mutex because I don't know whether my * request(s) is/are on the done queue yet. */ mutex_enter(&cccp->ccc_waitq_mutex); (void) ghd_waitq_process_and_mutex_hold(cccp); mutex_exit(&cccp->ccc_waitq_mutex); /* * Process the first of any timed-out requests. */ ghd_timer_poll(cccp, GHD_TIMER_POLL_ONE); /* * Unqueue all the completed requests, look for mine */ while (gcmdp = ghd_doneq_get(cccp)) { /* * If we got one and it's my request, then * we're done. */ if (gcmdp == poll_gcmdp) { poll_gcmdp->cmd_state = GCMD_STATE_IDLE; got_it = TRUE; continue; } /* fifo queue the other cmds on my local list */ L2_add(&gcmd_hold_queue, &gcmdp->cmd_q, gcmdp); } /* * Check whether we're done yet. */ switch (polltype) { case GHD_POLL_DEVICE: /* * wait for everything queued on a specific device */ if (GDEV_NACTIVE(gtgtp->gt_gdevp) == 0) got_it = TRUE; break; case GHD_POLL_ALL: /* * if waiting for all outstanding requests and * if active list is now empty then exit */ if (GHBA_NACTIVE(cccp) == 0) got_it = TRUE; break; case GHD_POLL_REQUEST: break; } } if (L2_EMPTY(&gcmd_hold_queue)) { ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); return (got_it); } /* * copy the local gcmd_hold_queue back to the doneq so * that the order of completion callbacks is preserved */ while (gcmdp = L2_next(&gcmd_hold_queue)) { L2_delete(&gcmdp->cmd_q); GHD_DONEQ_PUT_TAIL(cccp, gcmdp); } ASSERT(!mutex_owned(&cccp->ccc_waitq_mutex)); ASSERT(mutex_owned(&cccp->ccc_hba_mutex)); return (got_it); }