/** * This function is called when an EP is disabled due to disconnect or * change in configuration. Any pending requests will terminate with a * status of -ESHUTDOWN. * * This function modifies the dwc_otg_ep_t data structure for this EP, * and then calls dwc_otg_ep_deactivate. */ static int dwc_otg_pcd_ep_disable(struct usb_ep *_ep) { dwc_otg_pcd_ep_t *ep; unsigned long flags; DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _ep); ep = container_of(_ep, dwc_otg_pcd_ep_t, ep); if (!_ep || !ep->desc) { DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, _ep ? ep->ep.name : NULL); return -EINVAL; } SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); request_nuke( ep ); dwc_otg_ep_deactivate( GET_CORE_IF(ep->pcd), &ep->dwc_ep ); ep->desc = 0; ep->stopped = 1; if(ep->dwc_ep.is_in) { release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); } SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); DWC_DEBUGPL(DBG_PCD, "%s disabled\n", _ep->name); return 0; }
int pcan_fifo_put (register FIFO_MANAGER * anchor, void *pvPutData) { int err = 0; DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; /* DPRINTK("sn_fifo_put() %d %p %p\n", anchor->nStored, anchor->r, anchor->w); */ SPIN_LOCK_IRQSAVE (&anchor->lock); if (anchor->nStored < anchor->nCount) { memcpy (anchor->w, pvPutData, anchor->wCopySize); anchor->nStored++; anchor->dwTotal++; if (anchor->w < anchor->bufferEnd) anchor->w += anchor->wStepSize; // increment to next else anchor->w = anchor->bufferBegin; // start from begin } else err = -ENOSPC; SPIN_UNLOCK_IRQRESTORE (&anchor->lock); return err; }
int pcan_fifo_get (register FIFO_MANAGER * anchor, void *pvGetData) { int err = 0; DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; /* DPRINTK("pcan_fifo_get() %d %p %p\n", anchor->nStored, anchor->r, anchor->w); */ SPIN_LOCK_IRQSAVE (&anchor->lock); if (anchor->nStored > 0) { memcpy (pvGetData, anchor->r, anchor->wCopySize); anchor->nStored--; if (anchor->r < anchor->bufferEnd) anchor->r += anchor->wStepSize; /* increment to next */ else anchor->r = anchor->bufferBegin; /* start from begin */ } else err = -ENODATA; SPIN_UNLOCK_IRQRESTORE (&anchor->lock); return err; }
//---------------------------------------------------------------------------- // returns 0 if the fifo is full int pcan_fifo_not_full(FIFO_MANAGER *anchor) { #ifdef PCAN_FIFO_FIX_NOT_FULL_TEST int r; DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; SPIN_LOCK_IRQSAVE(&anchor->lock); r = (anchor->nStored < anchor->nCount); SPIN_UNLOCK_IRQRESTORE(&anchor->lock); return r; #else return (anchor->nStored < (anchor->nCount - 1)); #endif }
void request_done_tasklet(unsigned long data) { ifxpcd_ep_t *_ifxep = (ifxpcd_ep_t *)data; ifxpcd_request_t *req; unsigned long flags=0; int k=10; _ifxep->cmpt_tasklet_in_process=1; while (k) { SPIN_LOCK_IRQSAVE(&_ifxep->cmp_lock,flags); if(_ifxep->queue_cmpt.next != &_ifxep->queue_cmpt) { req = list_entry(_ifxep->queue_cmpt.next, ifxpcd_request_t,trq); list_del_init(&req->trq); SPIN_UNLOCK_IRQRESTORE(&_ifxep->cmp_lock,flags); if(req->sysreq.complete) req->sysreq.complete(&_ifxep->sysep, &req->sysreq); else { #ifdef __req_num_dbg__ IFX_ERROR("%s() no complete EP%d Req%d\n",__func__,_ifxep->num, req->reqid); #else IFX_ERROR("%s() no complete EP%d Req %p\n",__func__,_ifxep->num, req); #endif } } else { SPIN_UNLOCK_IRQRESTORE(&_ifxep->cmp_lock,flags); break; } k--; } if(!list_empty(&_ifxep->queue_cmpt)) { #ifdef __GADGET_TASKLET_HIGH__ tasklet_hi_schedule(&_ifxep->cmpt_tasklet); #else tasklet_schedule(&_ifxep->cmpt_tasklet); #endif } else _ifxep->cmpt_tasklet_in_process=0; }
//**************************************************************************** // CODE int pcan_fifo_reset(register FIFO_MANAGER *anchor) { DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; SPIN_LOCK_IRQSAVE(&anchor->lock); anchor->dwTotal = 0; anchor->nStored = 0; anchor->r = anchor->w = anchor->bufferBegin; // nothing to read SPIN_UNLOCK_IRQRESTORE(&anchor->lock); // DPRINTK(KERN_DEBUG "%s: pcan_fifo_reset() %d %p %pd\n", DEVICE_NAME, anchor->nStored, anchor->r, anchor->w); return 0; }
static void pcan_dongle_writereg(struct pcandev *dev, u8 port, u8 data) // write a register { u16 _PA_ = (u16)dev->port.dng.dwPort; u16 _PC_ = _PA_ + 2; u8 irqEnable = inb(_PC_) & 0x10; // don't influence irqEnable DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; SPIN_LOCK_IRQSAVE(&dev->port.dng.lock); outb((0x0B ^ 0x0D) | irqEnable, _PC_); outb(port & 0x1F, _PA_); outb((0x0B ^ 0x0C) | irqEnable, _PC_); outb(data, _PA_); outb((0x0B ^ 0x0D) | irqEnable, _PC_); SPIN_UNLOCK_IRQRESTORE(&dev->port.dng.lock); }
// functions for EPP port static u8 pcan_dongle_epp_readreg(struct pcandev *dev, u8 port) // read a register { u16 _PA_ = (u16)dev->port.dng.dwPort; u16 _PC_ = _PA_ + 2; u8 wert; u8 irqEnable = inb(_PC_) & 0x10; // don't influence irqEnable DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; SPIN_LOCK_IRQSAVE(&dev->port.dng.lock); outb((0x0B ^ 0x0F) | irqEnable, _PC_); outb((port & 0x1F) | 0x80, _PA_); outb((0x0B ^ 0x2E) | irqEnable, _PC_); wert = inb(_PA_); outb((0x0B ^ 0x0F) | irqEnable, _PC_); SPIN_UNLOCK_IRQRESTORE(&dev->port.dng.lock); return wert; }
/** * Removes a QH from either the non-periodic or periodic schedule. Memory is * not freed. * * @param[in] hcd The HCD state structure. * @param[in] qh QH to remove from schedule. */ void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) { unsigned long flags; SPIN_LOCK_IRQSAVE(&hcd->lock, flags); if (list_empty(&qh->qh_list_entry)) { /* QH is not in a schedule. */ goto done; } if (dwc_qh_is_non_per(qh)) { if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; } list_del_init(&qh->qh_list_entry); } else { deschedule_periodic(hcd, qh); } done: SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) }
// functions for SP port static u8 pcan_dongle_sp_readreg(struct pcandev *dev, u8 port) // read a register { u16 _PA_ = (u16)dev->port.dng.dwPort; u16 _PB_ = _PA_ + 1; u16 _PC_ = _PB_ + 1; u8 b0, b1 ; u8 irqEnable = inb(_PC_) & 0x10; // don't influence irqEnable DECLARE_SPIN_LOCK_IRQSAVE_FLAGS; SPIN_LOCK_IRQSAVE(&dev->port.dng.lock); outb((0x0B ^ 0x0D) | irqEnable, _PC_); outb((port & 0x1F) | 0x80, _PA_); outb((0x0B ^ 0x0C) | irqEnable, _PC_); b1=nibble_decode[inb(_PB_)>>3]; outb(0x40, _PA_); b0=nibble_decode[inb(_PB_)>>3]; outb((0x0B ^ 0x0D) | irqEnable, _PC_); SPIN_UNLOCK_IRQRESTORE(&dev->port.dng.lock); return (b1 << 4) | b0 ; }
/** * This function adds a QTD to the QTD-list of a QH. It will find the correct * QH to place the QTD into. If it does not find a QH, then it will create a * new QH. If the QH to which the QTD is added is not currently scheduled, it * is placed into the proper schedule based on its EP type. * * @param[in] qtd The QTD to add * @param[in] dwc_otg_hcd The DWC HCD structure * * @return 0 if successful, negative error code otherwise. */ int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd) { struct usb_host_endpoint *ep; dwc_otg_qh_t *qh; unsigned long flags; int retval = 0; struct urb *urb = qtd->urb; SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); /* * Get the QH which holds the QTD-list to insert to. Create QH if it * doesn't exist. */ ep = dwc_urb_to_endpoint(urb); qh = (dwc_otg_qh_t *)ep->hcpriv; if (qh == NULL) { qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); if (qh == NULL) { goto done; } ep->hcpriv = qh; } retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); if (retval == 0) { list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); } done: SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); return retval; }
/** * Deactivates a QH. For non-periodic QHs, removes the QH from the active * non-periodic schedule. The QH is added to the inactive non-periodic * schedule if any QTDs are still attached to the QH. * * For periodic QHs, the QH is removed from the periodic queued schedule. If * there are any QTDs still attached to the QH, the QH is added to either the * periodic inactive schedule or the periodic ready schedule and its next * scheduled frame is calculated. The QH is placed in the ready schedule if * the scheduled frame has been reached already. Otherwise it's placed in the * inactive schedule. If there are no QTDs attached to the QH, the QH is * completely removed from the periodic schedule. */ void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) { unsigned long flags; SPIN_LOCK_IRQSAVE(&hcd->lock, flags); if (dwc_qh_is_non_per(qh)) { dwc_otg_hcd_qh_remove(hcd, qh); if (!list_empty(&qh->qtd_list)) { /* Add back to inactive non-periodic schedule. */ dwc_otg_hcd_qh_add(hcd, qh); } } else { uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); if (qh->do_split) { /* Schedule the next continuing periodic split transfer */ if (sched_next_periodic_split) { qh->sched_frame = frame_number; if (dwc_frame_num_le(frame_number, dwc_frame_num_inc(qh->start_split_frame, 1))) { /* * Allow one frame to elapse after start * split microframe before scheduling * complete split, but DONT if we are * doing the next start split in the * same frame for an ISOC out. */ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); } } } else { qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, qh->interval); if (dwc_frame_num_le(qh->sched_frame, frame_number)) { qh->sched_frame = frame_number; } qh->sched_frame |= 0x7; qh->start_split_frame = qh->sched_frame; } } else { qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); if (dwc_frame_num_le(qh->sched_frame, frame_number)) { qh->sched_frame = frame_number; } } if (list_empty(&qh->qtd_list)) { dwc_otg_hcd_qh_remove(hcd, qh); } else { /* * Remove from periodic_sched_queued and move to * appropriate queue. */ if (qh->sched_frame == frame_number) { list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); } else { list_move(&qh->qh_list_entry, &hcd->periodic_sched_inactive); } } } SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); }
/** * This function is used to submit an I/O Request to an EP. * * - When the request completes the request's completion callback * is called to return the request to the driver. * - An EP, except control EPs, may have multiple requests * pending. * - Once submitted the request cannot be examined or modified. * - Each request is turned into one or more packets. * - A BULK EP can queue any amount of data; the transfer is * packetized. * - Zero length Packets are specified with the request 'zero' * flag. */ static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int _gfp_flags) { int prevented = 0; dwc_otg_pcd_request_t *req; dwc_otg_pcd_ep_t *ep; dwc_otg_pcd_t *pcd; unsigned long flags = 0; DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", __func__, _ep, _req, _gfp_flags); req = container_of(_req, dwc_otg_pcd_request_t, req); if (!_req || !_req->complete || !_req->buf || !list_empty(&req->queue)) { if( !_req ) printk("bad _req\n"); if( !_req->complete ) printk("bad _req->complete\n"); if( !_req->buf ) printk("bad _req->buf\n"); if( !list_empty(&req->queue) ) printk("bad list_empty\n"); DWC_WARN("%s, bad params\n", __func__); return -EINVAL; } ep = container_of(_ep, dwc_otg_pcd_ep_t, ep); if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) { DWC_WARN("%s, bad ep\n", __func__); return -EINVAL; } pcd = ep->pcd; //cathy, if suspended, drop request if ( (GET_CORE_IF(pcd)->dev_if->suspended == 1) && (ep->dwc_ep.num != 0) ) { DWC_DEBUGPL(DBG_PCDV,"%s, epnum = %d, drop request\n", __func__, ep->dwc_ep.num); return -ESHUTDOWN; } if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); DWC_WARN("%s, bogus device state\n", __func__); return -ESHUTDOWN; } DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf); if (!GET_CORE_IF(pcd)->core_params->opt) { if (ep->dwc_ep.num != 0) { DWC_ERROR("%s queue req %p, len %d buf %p\n", _ep->name, _req, _req->length, _req->buf); } } SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); #if defined(DEBUG) & defined(VERBOSE) dump_msg(_req->buf, _req->length); #endif _req->status = -EINPROGRESS; _req->actual = 0; /* * For EP0 IN without premature status, zlp is required? */ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name); //_req->zero = 1; } /* Start the transfer */ if (list_empty(&ep->queue) && !ep->stopped) { /* EP0 Transfer? */ if (ep->dwc_ep.num == 0) { switch (pcd->ep0state) { case EP0_IN_DATA_PHASE: DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_IN_DATA_PHASE\n", __func__); break; case EP0_OUT_DATA_PHASE: DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_OUT_DATA_PHASE\n", __func__); if (pcd->request_config) { /* Complete STATUS PHASE */ ep->dwc_ep.is_in = 1; pcd->ep0state = EP0_STATUS; } break; default: DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", pcd->ep0state); SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); return -EL2HLT; } ep->dwc_ep.dma_addr = (u32)_req->buf & ~Uncache_Mask; //_req->dma; //cathy ep->dwc_ep.start_xfer_buff = _req->buf; ep->dwc_ep.xfer_buff = _req->buf; ep->dwc_ep.xfer_len = _req->length; ep->dwc_ep.xfer_count = 0; ep->dwc_ep.sent_zlp = 0; ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; dwc_otg_ep0_start_transfer( GET_CORE_IF(pcd), &ep->dwc_ep ); } else { /* Setup and start the Transfer */ ep->dwc_ep.dma_addr = (u32)_req->buf & ~Uncache_Mask; //_req->dma; //cathy //ep->dwc_ep.dma_addr = _req->dma; ep->dwc_ep.start_xfer_buff = _req->buf; ep->dwc_ep.xfer_buff = _req->buf; ep->dwc_ep.xfer_len = _req->length; ep->dwc_ep.xfer_count = 0; ep->dwc_ep.sent_zlp = 0; ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; dwc_otg_ep_start_transfer( GET_CORE_IF(pcd), &ep->dwc_ep ); } } if ((req != 0) || prevented) { ++pcd->request_pending; list_add_tail(&req->queue, &ep->queue); //cathy #if 0 if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { /** @todo NGS Create a function for this. */ diepmsk_data_t diepmsk = { .d32 = 0}; diepmsk.b.intktxfemp = 1; dwc_modify_reg32( &GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32 ); } #endif }
/** * This function is called by the Gadget Driver for each EP to be * configured for the current configuration (SET_CONFIGURATION). * * This function initializes the dwc_otg_ep_t data structure, and then * calls dwc_otg_ep_activate. */ static int dwc_otg_pcd_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *_desc) { dwc_otg_pcd_ep_t *ep = 0; dwc_otg_pcd_t *pcd = 0; unsigned long flags; DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, _ep, _desc ); ep = container_of(_ep, dwc_otg_pcd_ep_t, ep); if (!_ep || !_desc || ep->desc || _desc->bDescriptorType != USB_DT_ENDPOINT) { DWC_WARN( "%s, bad ep or descriptor\n", __func__); return -EINVAL; } if (ep == &ep->pcd->ep0) { DWC_WARN("%s, bad ep(0)\n", __func__); return -EINVAL; } /* Check FIFO size? */ if (!_desc->wMaxPacketSize) { DWC_WARN("%s, bad %s maxpacket\n", __func__, _ep->name); return -ERANGE; } pcd = ep->pcd; if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { if (!pcd->driver) { bh_otg_dbg2("[%s:%d] gadget[%s] !pcd->driver\n", __FUNCTION__, __LINE__, pcd->gadget.name); } if (pcd->gadget.speed == USB_SPEED_UNKNOWN) { bh_otg_dbg2("[%s:%d] gadget[%s] USB_SPEED_UNKNOWN\n", __FUNCTION__, __LINE__, pcd->gadget.name); } DWC_WARN("%s, bogus device state\n", __func__); return -ESHUTDOWN; } SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ep->desc = _desc; ep->ep.maxpacket = le16_to_cpu (_desc->wMaxPacketSize); /* * Activate the EP */ ep->stopped = 0; ep->dwc_ep.is_in = (USB_DIR_IN & _desc->bEndpointAddress) != 0; ep->dwc_ep.maxpacket = ep->ep.maxpacket; ep->dwc_ep.type = _desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; if(ep->dwc_ep.is_in) { if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { ep->dwc_ep.tx_fifo_num = 0; if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == //cathy USB_ENDPOINT_XFER_ISOC ) USB_ENDPOINT_XFER_INT ) //assign interrupt in ep (ep3) to periodic tx fifo { /* * if ISOC EP then assign a Periodic Tx FIFO. */ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); } } else { /* * if Dedicated FIFOs mode is on then assign a Tx FIFO. */ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); } } /* Set initial data PID. */ if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK ) { ep->dwc_ep.data_pid_start = 0; } DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc ); dwc_otg_ep_activate( GET_CORE_IF(pcd), &ep->dwc_ep ); SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); return 0; }
static void _sis1100_irq_thread(struct sis1100_softc* sc, enum handlercomm command) { u_int32_t new_irqs=0; int i; mutex_lock(&sc->sem_irqinfo); if (command&handlercomm_doorbell) { DECLARE_SPINLOCKFLAGS(flags) u_int32_t doorbell; SPIN_LOCK_IRQSAVE(sc->lock_doorbell, flags); doorbell=sc->doorbell; sc->doorbell=0; SPIN_UNLOCK_IRQRESTORE(sc->lock_doorbell, flags); switch (sc->remote_hw) { case sis1100_hw_vme: new_irqs|=sis3100rem_irq_handler(sc, doorbell); break; case sis1100_hw_camac: new_irqs|=sis5100rem_irq_handler(sc, doorbell); break; case sis1100_hw_pci: /* do nothing */ break; case sis1100_hw_lvd: new_irqs|=zellvd_rem_irq_handler(sc, doorbell); break; case sis1100_hw_pandapixel: new_irqs|=pandapixel_rem_irq_handler(sc, doorbell); break; case sis1100_hw_psf4ad: /* do nothing */ case sis1100_hw_invalid: /* do nothing */ break; } } if (command&handlercomm_lemo) { new_irqs|=sis1100_lemo_handler(sc); } if (command&handlercomm_mbx0) { new_irqs|=sis1100_mbox0_handler(sc); } /* this is called from sis1100_link_up_handler for both 'UP' and 'DOWN' of link one second after status change */ if (command&handlercomm_up) { new_irqs|=sis1100_synch_handler(sc); } if (command&handlercomm_ddma) { new_irqs|=sis1100_ddma_handler(sc); } sc->pending_irqs|=new_irqs; mutex_unlock(&sc->sem_irqinfo); /* inform processes via signal if requested */ mutex_lock(&sc->sem_fdata); for (i=0; i<sis1100_MINORUTMASK+1; i++) { if (sc->fdata[i]) { struct sis1100_fdata* fd=sc->fdata[i]; if (fd->sig>0 && ((new_irqs & fd->owned_irqs)|| (fd->old_remote_hw!=sc->remote_hw))) { int res; /* XXXY muss raus */ pERROR(sc, "irq_pending=%d pending_irqs=0x%x", irq_pending(sc, fd, fd->owned_irqs), sc->pending_irqs); pERROR(sc, "sig=%d new_irqs=0x%x owned_irqs=0x%x", fd->sig, new_irqs, fd->owned_irqs); pERROR(sc, "old_remote_hw=%d remote_hw=%d", fd->old_remote_hw, sc->remote_hw); /* XXXY muss raus */ pERROR(sc, "send sig to %d", pid_nr(fd->pid)); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) res=kill_proc(fd->pid, fd->sig, 1); #else res=kill_pid(fd->pid, fd->sig, 1); #endif if (res) pINFO(sc, "send sig %d to %u: res=%d", fd->sig, pid_nr(fd->pid), res); } } } mutex_unlock(&sc->sem_fdata); /* wake up processes waiting in sis1100_irq_wait or doing select */ #ifdef __NetBSD__ wakeup(&sc->remoteirq_wait); selwakeup(&sc->sel); #elif __linux__ wake_up_interruptible(&sc->remoteirq_wait); #endif }
int #else void #endif sis1100_irq_thread(void* data) { struct sis1100_softc* sc=(struct sis1100_softc*)data; enum handlercomm command; DECLARE_SPINLOCKFLAGS(flags); #ifdef __linux__ #if LINUX_VERSION_CODE < 0x20600 daemonize(); snprintf(current->comm, sizeof(current->comm), "sis1100_%02d", sc->unit); SPIN_LOCK_IRQSAVE(current->sigmask_lock, flags); sigemptyset(¤t->blocked); recalc_sigpending(current); SPIN_UNLOCK_IRQRESTORE(current->sigmask_lock, flags); #endif #endif /*__linux__*/ while (1) { #ifdef __NetBSD__ tsleep(&sc->handler_wait, PCATCH, "thread_vmeirq", 0); #elif __linux__ /* prepare to sleep */ __set_current_state(TASK_INTERRUPTIBLE); /* don't sleep if command!=0 */ if (sc->handlercommand.command) __set_current_state(TASK_RUNNING); else schedule(); #endif if (kthread_should_stop()) return 0; SPIN_LOCK_IRQSAVE(sc->handlercommand.lock, flags); command=sc->handlercommand.command; sc->handlercommand.command=0; SPIN_UNLOCK_IRQRESTORE(sc->handlercommand.lock, flags); #if 0 pERROR(sc, "irq_thread: command=0x%x", command); #endif _sis1100_irq_thread(sc, command); #ifdef __linux__ if (signal_pending (current)) { SPIN_LOCK_IRQSAVE(current->SIGMASK_LOCK, flags); flush_signals(current); SPIN_UNLOCK_IRQRESTORE(current->SIGMASK_LOCK, flags); } #endif /*__linux__*/ } #ifdef __linux__ return 0; #endif }