static uint_t ds1287_issue_shutdown(caddr_t arg) { struct ds1287 *softsp = (struct ds1287 *)arg; DPRINTF("ds1287_issue_shutdown\n"); mutex_enter(&softsp->ds1287_mutex); softsp->events |= PB_BUTTON_PRESS; if (softsp->monitor_on != 0) { mutex_exit(&softsp->ds1287_mutex); pollwakeup(&softsp->pollhd, POLLRDNORM); pollwakeup(&softsp->pollhd, POLLIN); return (DDI_INTR_CLAIMED); } if (!softsp->shutdown_pending) { cmn_err(CE_WARN, "Power button is pressed, powering down " "the system!"); softsp->shutdown_pending = 1; do_shutdown(); /* * Wait a while for "do_shutdown()" to shut down the system * before logging an error message. */ (void) timeout((void(*)(void *))ds1287_log_message, NULL, 100 * hz); } mutex_exit(&softsp->ds1287_mutex); return (DDI_INTR_CLAIMED); }
/*ARGSUSED*/ void ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req) { struct ipmi_device *dev; IPMI_LOCK_ASSERT(sc); if (req->ir_status == IRS_CANCELED) { ASSERT(req->ir_owner == NULL); ipmi_free_request(req); return; } req->ir_status = IRS_COMPLETED; /* * Anonymous requests (from inside the driver) always have a * waiter that we awaken. */ if (req->ir_owner == NULL) { cv_signal(&req->ir_cv); } else { dev = req->ir_owner; TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link); pollwakeup(dev->ipmi_pollhead, POLLIN | POLLRDNORM); dev->ipmi_status &= ~IPMI_BUSY; if (dev->ipmi_status & IPMI_CLOSING) cv_signal(&dev->ipmi_cv); } }
/* * xpvtap_user_request_push() */ static int xpvtap_user_request_push(xpvtap_state_t *state, blkif_request_t *req, uint_t uid) { blkif_request_t *outstanding_req; blkif_front_ring_t *uring; blkif_request_t *target; xpvtap_user_map_t *map; uring = &state->bt_user_ring.ur_ring; map = &state->bt_map; target = RING_GET_REQUEST(uring, uring->req_prod_pvt); /* * Save request from the frontend. used for ID mapping and unmap * on response/cleanup */ outstanding_req = &map->um_outstanding_reqs[uid]; bcopy(req, outstanding_req, sizeof (*outstanding_req)); /* put the request on the user ring */ bcopy(req, target, sizeof (*req)); target->id = (uint64_t)uid; uring->req_prod_pvt++; pollwakeup(&state->bt_pollhead, POLLIN | POLLRDNORM); return (DDI_SUCCESS); }
/* * This event port internal function is used by port_free_event() and * other port internal functions to return event structures back to the * kmem_cache. */ void port_free_event_local(port_kevent_t *pkevp, int counter) { port_t *pp = pkevp->portkev_port; port_queue_t *portq = &pp->port_queue; int wakeup; pkevp->portkev_callback = NULL; pkevp->portkev_flags = 0; pkevp->portkev_port = NULL; mutex_destroy(&pkevp->portkev_lock); kmem_cache_free(port_control.pc_cache, pkevp); mutex_enter(&portq->portq_mutex); if (counter == 0) { if (--pp->port_curr < pp->port_max_events) cv_signal(&pp->port_cv); } wakeup = (portq->portq_flags & PORTQ_POLLOUT); portq->portq_flags &= ~PORTQ_POLLOUT; mutex_exit(&portq->portq_mutex); /* Submit a POLLOUT event if requested */ if (wakeup) pollwakeup(&pp->port_pollhd, POLLOUT); }
void VBoxGuestNativeISRMousePollEvent(PVBOXGUESTDEVEXT pDevExt) { LogFlow((DEVICE_NAME "::NativeISRMousePollEvent:\n")); /* * Wake up poll waiters. */ pollwakeup(&g_PollHead, POLLIN | POLLRDNORM); }
/** * Poll notifier for mouse poll events. * * @param pDevExt Pointer to the device extension. * * @remarks This must be called without holding any spinlocks. */ void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT pDevExt) { LogFlow(("VGDrvNativeISRMousePollEvent:\n")); /* * Wake up poll waiters. */ pollwakeup(&g_PollHead, POLLIN | POLLRDNORM); }
/* remove a vldc port */ static int i_vldc_remove_port(vldc_t *vldcp, uint_t portno) { vldc_port_t *vport; vldc_minor_t *vminor; ASSERT(vldcp != NULL); ASSERT(MUTEX_HELD(&vldcp->lock)); vport = &(vldcp->port[portno]); vminor = vport->minorp; if (vminor == NULL) { cmn_err(CE_NOTE, "?i_vldc_remove_port: trying to remove a " "port (%u) which is not bound", portno); return (MDEG_FAILURE); } /* * Make sure that all new attempts to open or use the minor node * associated with the port will fail. */ mutex_enter(&vminor->lock); vminor->portno = VLDC_INVALID_PORTNO; mutex_exit(&vminor->lock); /* send hangup to anyone polling */ pollwakeup(&vport->poll, POLLHUP); /* Now wait for all current users of the minor node to finish. */ mutex_enter(&vminor->lock); while (vminor->in_use > 0) { cv_wait(&vminor->cv, &vminor->lock); } if (vport->status != VLDC_PORT_CLOSED) { /* close the port before it is torn down */ (void) i_vldc_close_port(vldcp, portno); } /* remove minor node */ ddi_remove_minor_node(vldcp->dip, vport->minorp->sname); vport->minorp = NULL; mutex_exit(&vminor->lock); D1("i_vldc_remove_port: removed vldc port %u\n", portno); return (MDEG_SUCCESS); }
/* * _heci_cmpl: process completed operation. * * @file_ext: private data of the file object. * @priv_cb_pos: callback block. */ static void _heci_cmpl(struct heci_file_private *file_ext, struct heci_cb_private *priv_cb_pos) { if (priv_cb_pos->major_file_operations == HECI_WRITE) { heci_free_cb_private(priv_cb_pos); DBG("completing write call back.\n"); file_ext->writing_state = HECI_WRITE_COMPLETE; pollwakeup(&file_ext->tx_pollwait, POLL_IN|POLLRDNORM); } else if (priv_cb_pos->major_file_operations == HECI_READ && HECI_READING == file_ext->reading_state) { DBG("completing read call back information= %lu\n", priv_cb_pos->information); file_ext->reading_state = HECI_READ_COMPLETE; cv_broadcast(&file_ext->rx_wait); } }
void evtchn_device_upcall() { struct evtsoftdata *ep; int port; /* * This is quite gross, we had to leave the evtchn that led to this * invocation in a per-cpu mailbox, retrieve it now. * We do this because the interface doesn't offer us a way to pass * a dynamic argument up through the generic interrupt service layer. * The mailbox is safe since we either run with interrupts disabled or * non-preemptable till we reach here. */ port = CPU->cpu_m.mcpu_ec_mbox; ASSERT(port != 0); CPU->cpu_m.mcpu_ec_mbox = 0; ec_clear_evtchn(port); mutex_enter(&port_user_lock); if ((ep = port_user[port]) != NULL) { mutex_enter(&ep->evtchn_lock); if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) { ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port; /* * Wake up reader when ring goes non-empty */ if (ep->ring_cons == ep->ring_prod++) { cv_signal(&ep->evtchn_wait); mutex_exit(&ep->evtchn_lock); pollwakeup(&ep->evtchn_pollhead, POLLIN | POLLRDNORM); goto done; } } else { ep->ring_overflow = 1; } mutex_exit(&ep->evtchn_lock); } done: mutex_exit(&port_user_lock); }
/* * put a message on the read queue, take care of polling */ void av1394_async_putq_rq(av1394_inst_t *avp, mblk_t *mp) { av1394_async_t *ap = &avp->av_a; if (!av1394_putq(&ap->a_rq, mp)) { freemsg(mp); TNF_PROBE_0(av1394_async_putq_rq_error_putq, AV1394_TNF_ASYNC_ERROR, ""); } else { mutex_enter(&ap->a_mutex); if (ap->a_pollevents & POLLIN) { ap->a_pollevents &= ~POLLIN; mutex_exit(&ap->a_mutex); pollwakeup(&ap->a_pollhead, POLLIN); } else { mutex_exit(&ap->a_mutex); } } }
/* * _heci_cmpl_iamthif: process completed iamthif operation. * * @dev: Device object for our driver. * @priv_cb_pos: callback block. */ static void _heci_cmpl_iamthif(struct iamt_heci_device *dev, struct heci_cb_private *priv_cb_pos) { if (dev->iamthif_canceled != 1) { dev->iamthif_state = HECI_IAMTHIF_READ_COMPLETE; dev->iamthif_stall_timer = 0; (void) memcpy(priv_cb_pos->response_buffer.data, dev->iamthif_msg_buf, dev->iamthif_msg_buf_index); list_add_tail(&priv_cb_pos->cb_list, &dev->pthi_read_complete_list.heci_cb.cb_list); DBG("pthi read completed.\n"); } else { run_next_iamthif_cmd(dev); } if (&dev->iamthif_file_ext.pollwait) { DBG("completing pthi call back.\n"); pollwakeup(&dev->iamthif_file_ext.pollwait, POLL_IN|POLLRDNORM); } }
/* * The port_send_event() function is used by all event sources to submit * trigerred events to a port. All the data required for the event management * is already stored in the port_kevent_t structure. * The event port internal data is stored in the port_kevent_t structure * during the allocation time (see port_alloc_event()). The data related to * the event itself and to the event source management is stored in the * port_kevent_t structure between the allocation time and submit time * (see port_init_event()). * * This function is often called from interrupt level. */ void port_send_event(port_kevent_t *pkevp) { port_queue_t *portq; portq = &pkevp->portkev_port->port_queue; mutex_enter(&portq->portq_mutex); if (pkevp->portkev_flags & PORT_KEV_DONEQ) { /* Event already in the port queue */ if (pkevp->portkev_source == PORT_SOURCE_FD) { mutex_exit(&pkevp->portkev_lock); } mutex_exit(&portq->portq_mutex); return; } /* put event in the port queue */ list_insert_tail(&portq->portq_list, pkevp); portq->portq_nent++; /* * Remove the PORTQ_WAIT_EVENTS flag to indicate * that new events are available. */ portq->portq_flags &= ~PORTQ_WAIT_EVENTS; pkevp->portkev_flags |= PORT_KEV_DONEQ; /* event enqueued */ if (pkevp->portkev_source == PORT_SOURCE_FD) { mutex_exit(&pkevp->portkev_lock); } /* Check if thread is in port_close() waiting for outstanding events */ if (portq->portq_flags & PORTQ_CLOSE) { /* Check if all outstanding events are already in port queue */ if (pkevp->portkev_port->port_curr <= portq->portq_nent) cv_signal(&portq->portq_closecv); } if (portq->portq_getn == 0) { /* * No thread retrieving events -> check if enough events are * available to satify waiting threads. */ if (portq->portq_thread && (portq->portq_nent >= portq->portq_nget)) cv_signal(&portq->portq_thread->portget_cv); } /* * If some thread is polling the port's fd, then notify it. * For PORT_SOURCE_FD source, we don't need to call pollwakeup() * here as it will result in a recursive call(PORT_SOURCE_FD source * is pollwakeup()). Therefore pollwakeup() itself will notify the * ports if being polled. */ if (pkevp->portkev_source != PORT_SOURCE_FD && portq->portq_flags & PORTQ_POLLIN) { port_t *pp; portq->portq_flags &= ~PORTQ_POLLIN; /* * Need to save port_t for calling pollwakeup since port_getn() * may end up freeing pkevp once portq_mutex is dropped. */ pp = pkevp->portkev_port; mutex_exit(&portq->portq_mutex); pollwakeup(&pp->port_pollhd, POLLIN); } else { mutex_exit(&portq->portq_mutex); } }
/* ldc callback */ static uint_t i_vldc_cb(uint64_t event, caddr_t arg) { int rv; vldc_port_t *vport = (vldc_port_t *)arg; ldc_status_t old_status; short pollevents = 0; ASSERT(vport != NULL); ASSERT(vport->minorp != NULL); D1("i_vldc_cb: vldc@%d:%d callback invoked, channel=0x%lx, " "event=0x%lx\n", vport->inst, vport->number, vport->ldc_id, event); /* ensure the port can't be destroyed while we are handling the cb */ mutex_enter(&vport->minorp->lock); if (vport->status == VLDC_PORT_CLOSED) { return (LDC_SUCCESS); } old_status = vport->ldc_status; rv = ldc_status(vport->ldc_handle, &vport->ldc_status); if (rv != 0) { DWARN("i_vldc_cb: vldc@%d:%d could not get ldc status, " "rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } if (event & LDC_EVT_UP) { pollevents |= POLLOUT; vport->hanged_up = B_FALSE; } else if (event & LDC_EVT_RESET) { /* * Mark the port in reset, if it is not CLOSED and * the channel was previously in LDC_UP state. This * implies that the port cannot be used until it has * been closed and reopened. */ if (old_status == LDC_UP) { vport->status = VLDC_PORT_RESET; vport->hanged_up = B_TRUE; pollevents = POLLHUP; } else { rv = ldc_up(vport->ldc_handle); if (rv) { DWARN("i_vldc_cb: vldc@%d:%d cannot bring " "channel UP rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } rv = ldc_status(vport->ldc_handle, &vport->ldc_status); if (rv != 0) { DWARN("i_vldc_cb: vldc@%d:%d could not get " "ldc status, rv=%d\n", vport->inst, vport->number, rv); mutex_exit(&vport->minorp->lock); return (LDC_SUCCESS); } if (vport->ldc_status == LDC_UP) { pollevents |= POLLOUT; vport->hanged_up = B_FALSE; } } } else if (event & LDC_EVT_DOWN) { /* * The other side went away - mark port in RESET state */ vport->status = VLDC_PORT_RESET; vport->hanged_up = B_TRUE; pollevents = POLLHUP; } if (event & LDC_EVT_READ) pollevents |= POLLIN; mutex_exit(&vport->minorp->lock); if (pollevents != 0) { D1("i_vldc_cb: port@%d pollwakeup=0x%x\n", vport->number, pollevents); pollwakeup(&vport->poll, pollevents); } return (LDC_SUCCESS); }