static void pvscsi_init_rings(void *iobase, struct pvscsi_ring_dsc_s **ring_dsc) { struct PVSCSICmdDescSetupRings cmd = {0,}; struct pvscsi_ring_dsc_s *dsc = memalign_low(sizeof(*dsc), PAGE_SIZE); if (!dsc) { warn_noalloc(); return; } dsc->ring_state = (struct PVSCSIRingsState *)memalign_low(PAGE_SIZE, PAGE_SIZE); dsc->ring_reqs = (struct PVSCSIRingReqDesc *)memalign_low(PAGE_SIZE, PAGE_SIZE); dsc->ring_cmps = (struct PVSCSIRingCmpDesc *)memalign_low(PAGE_SIZE, PAGE_SIZE); if (!dsc->ring_state || !dsc->ring_reqs || !dsc->ring_cmps) { warn_noalloc(); return; } memset(dsc->ring_state, 0, PAGE_SIZE); memset(dsc->ring_reqs, 0, PAGE_SIZE); memset(dsc->ring_cmps, 0, PAGE_SIZE); cmd.reqRingNumPages = 1; cmd.cmpRingNumPages = 1; cmd.ringsStatePPN = virt_to_phys(dsc->ring_state) >> PAGE_SHIFT; cmd.reqRingPPNs[0] = virt_to_phys(dsc->ring_reqs) >> PAGE_SHIFT; cmd.cmpRingPPNs[0] = virt_to_phys(dsc->ring_cmps) >> PAGE_SHIFT; pvscsi_write_cmd_desc(iobase, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd)); *ring_dsc = dsc; }
struct usb_pipe * ehci_alloc_bulk_pipe(struct usb_pipe *dummy) { // XXX - this func is same as alloc_control except for malloc_low if (! CONFIG_USB_EHCI) return NULL; struct usb_ehci_s *cntl = container_of( dummy->cntl, struct usb_ehci_s, usb); dprintf(7, "ehci_alloc_bulk_pipe %p\n", &cntl->usb); // Allocate a queue head. struct ehci_pipe *pipe = memalign_low(EHCI_QH_ALIGN, sizeof(*pipe)); if (!pipe) { warn_noalloc(); return NULL; } memset(pipe, 0, sizeof(*pipe)); memcpy(&pipe->pipe, dummy, sizeof(pipe->pipe)); pipe->qh.qtd_next = pipe->qh.alt_next = EHCI_PTR_TERM; // Add queue head to controller list. struct ehci_qh *async_qh = cntl->async_qh; pipe->qh.next = async_qh->next; barrier(); async_qh->next = (u32)&pipe->qh | EHCI_PTR_QH; return &pipe->pipe; }
int vp_find_vq(unsigned int ioaddr, int queue_index, struct vring_virtqueue **p_vq) { u16 num; ASSERT32FLAT(); struct vring_virtqueue *vq = *p_vq = memalign_low(PAGE_SIZE, sizeof(*vq)); if (!vq) { warn_noalloc(); goto fail; } memset(vq, 0, sizeof(*vq)); /* select the queue */ outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL); /* check if the queue is available */ num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM); if (!num) { dprintf(1, "ERROR: queue size is 0\n"); goto fail; } if (num > MAX_QUEUE_NUM) { dprintf(1, "ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM); goto fail; } /* check if the queue is already active */ if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) { dprintf(1, "ERROR: queue already active\n"); goto fail; } vq->queue_index = queue_index; /* initialize the queue */ struct vring * vr = &vq->vring; vring_init(vr, num, (unsigned char*)&vq->queue); /* activate the queue * * NOTE: vr->desc is initialized by vring_init() */ outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT, ioaddr + VIRTIO_PCI_QUEUE_PFN); return num; fail: free(vq); *p_vq = NULL; return -1; }
struct usb_pipe * ehci_alloc_pipe(struct usbdevice_s *usbdev , struct usb_endpoint_descriptor *epdesc) { if (! CONFIG_USB_EHCI) return NULL; u8 eptype = epdesc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; if (eptype == USB_ENDPOINT_XFER_INT) return ehci_alloc_intr_pipe(usbdev, epdesc); struct usb_ehci_s *cntl = container_of( usbdev->hub->cntl, struct usb_ehci_s, usb); dprintf(7, "ehci_alloc_async_pipe %p %d\n", &cntl->usb, eptype); struct usb_pipe *usbpipe = usb_getFreePipe(&cntl->usb, eptype); if (usbpipe) { // Use previously allocated pipe. struct ehci_pipe *pipe = container_of(usbpipe, struct ehci_pipe, pipe); ehci_desc2pipe(pipe, usbdev, epdesc); return usbpipe; } // Allocate a new queue head. struct ehci_pipe *pipe; if (eptype == USB_ENDPOINT_XFER_CONTROL) pipe = memalign_tmphigh(EHCI_QH_ALIGN, sizeof(*pipe)); else pipe = memalign_low(EHCI_QH_ALIGN, sizeof(*pipe)); if (!pipe) { warn_noalloc(); return NULL; } memset(pipe, 0, sizeof(*pipe)); ehci_desc2pipe(pipe, usbdev, epdesc); pipe->qh.qtd_next = pipe->qh.alt_next = EHCI_PTR_TERM; // Add queue head to controller list. struct ehci_qh *async_qh = cntl->async_qh; pipe->qh.next = async_qh->next; barrier(); async_qh->next = (u32)&pipe->qh | EHCI_PTR_QH; return &pipe->pipe; }
static struct usb_pipe * ehci_alloc_intr_pipe(struct usbdevice_s *usbdev , struct usb_endpoint_descriptor *epdesc) { struct usb_ehci_s *cntl = container_of( usbdev->hub->cntl, struct usb_ehci_s, usb); int frameexp = usb_getFrameExp(usbdev, epdesc); dprintf(7, "ehci_alloc_intr_pipe %p %d\n", &cntl->usb, frameexp); if (frameexp > 10) frameexp = 10; int maxpacket = epdesc->wMaxPacketSize; // Determine number of entries needed for 2 timer ticks. int ms = 1<<frameexp; int count = DIV_ROUND_UP(PIT_TICK_INTERVAL * 1000 * 2, PIT_TICK_RATE * ms); struct ehci_pipe *pipe = memalign_low(EHCI_QH_ALIGN, sizeof(*pipe)); struct ehci_qtd *tds = memalign_low(EHCI_QTD_ALIGN, sizeof(*tds) * count); void *data = malloc_low(maxpacket * count); if (!pipe || !tds || !data) { warn_noalloc(); goto fail; } memset(pipe, 0, sizeof(*pipe)); ehci_desc2pipe(pipe, usbdev, epdesc); pipe->next_td = pipe->tds = tds; pipe->data = data; pipe->qh.qtd_next = (u32)tds; int i; for (i=0; i<count; i++) { struct ehci_qtd *td = &tds[i]; td->qtd_next = (i==count-1 ? (u32)tds : (u32)&td[1]); td->alt_next = EHCI_PTR_TERM; td->token = (ehci_explen(maxpacket) | QTD_STS_ACTIVE | QTD_PID_IN | ehci_maxerr(3)); td->buf[0] = (u32)data + maxpacket * i; } // Add to interrupt schedule. struct ehci_framelist *fl = (void*)readl(&cntl->regs->periodiclistbase); if (frameexp == 0) { // Add to existing interrupt entry. struct ehci_qh *intr_qh = (void*)(fl->links[0] & ~EHCI_PTR_BITS); pipe->qh.next = intr_qh->next; barrier(); intr_qh->next = (u32)&pipe->qh | EHCI_PTR_QH; } else { int startpos = 1<<(frameexp-1); pipe->qh.next = fl->links[startpos]; barrier(); for (i=startpos; i<ARRAY_SIZE(fl->links); i+=ms) fl->links[i] = (u32)&pipe->qh | EHCI_PTR_QH; } return &pipe->pipe; fail: free(pipe); free(tds); free(data); return NULL; }
struct usb_pipe * ehci_alloc_intr_pipe(struct usb_pipe *dummy, int frameexp) { if (! CONFIG_USB_EHCI) return NULL; struct usb_ehci_s *cntl = container_of( dummy->cntl, struct usb_ehci_s, usb); dprintf(7, "ehci_alloc_intr_pipe %p %d\n", &cntl->usb, frameexp); if (frameexp > 10) frameexp = 10; int maxpacket = dummy->maxpacket; // Determine number of entries needed for 2 timer ticks. int ms = 1<<frameexp; int count = DIV_ROUND_UP(PIT_TICK_INTERVAL * 1000 * 2, PIT_TICK_RATE * ms); struct ehci_pipe *pipe = memalign_low(EHCI_QH_ALIGN, sizeof(*pipe)); struct ehci_qtd *tds = memalign_low(EHCI_QTD_ALIGN, sizeof(*tds) * count); void *data = malloc_low(maxpacket * count); if (!pipe || !tds || !data) { warn_noalloc(); goto fail; } memset(pipe, 0, sizeof(*pipe)); memcpy(&pipe->pipe, dummy, sizeof(pipe->pipe)); pipe->next_td = pipe->tds = tds; pipe->data = data; pipe->qh.info1 = ( (1 << QH_MULT_SHIFT) | (maxpacket << QH_MAXPACKET_SHIFT) | (pipe->pipe.speed << QH_SPEED_SHIFT) | (pipe->pipe.ep << QH_EP_SHIFT) | (pipe->pipe.devaddr << QH_DEVADDR_SHIFT)); pipe->qh.info2 = ((1 << QH_MULT_SHIFT) | (pipe->pipe.tt_port << QH_HUBPORT_SHIFT) | (pipe->pipe.tt_devaddr << QH_HUBADDR_SHIFT) | (0x01 << QH_SMASK_SHIFT) | (0x1c << QH_CMASK_SHIFT)); pipe->qh.qtd_next = (u32)tds; int i; for (i=0; i<count; i++) { struct ehci_qtd *td = &tds[i]; td->qtd_next = (i==count-1 ? (u32)tds : (u32)&td[1]); td->alt_next = EHCI_PTR_TERM; td->token = (ehci_explen(maxpacket) | QTD_STS_ACTIVE | QTD_PID_IN | ehci_maxerr(3)); td->buf[0] = (u32)data + maxpacket * i; } // Add to interrupt schedule. struct ehci_framelist *fl = (void*)readl(&cntl->regs->periodiclistbase); if (frameexp == 0) { // Add to existing interrupt entry. struct ehci_qh *intr_qh = (void*)(fl->links[0] & ~EHCI_PTR_BITS); pipe->qh.next = intr_qh->next; barrier(); intr_qh->next = (u32)&pipe->qh | EHCI_PTR_QH; } else { int startpos = 1<<(frameexp-1); pipe->qh.next = fl->links[startpos]; barrier(); for (i=startpos; i<ARRAY_SIZE(fl->links); i+=ms) fl->links[i] = (u32)&pipe->qh | EHCI_PTR_QH; } return &pipe->pipe; fail: free(pipe); free(tds); free(data); return NULL; }