int usb_interrupt_msg(struct usb_device *dev, u32 pipe, void *data, int len, int interval) { int rc; struct urb u; struct vmm_completion uc; /* Initialize URB */ usb_init_urb(&u); /* Initialize URB completion */ INIT_COMPLETION(&uc); /* Fill Interrupt URB */ usb_fill_int_urb(&u, dev, pipe, data, len, urb_request_complete, &uc, interval); /* Submit URB */ rc = usb_hcd_submit_urb(&u); if (rc) { return rc; } /* Wait for completion */ vmm_completion_wait(&uc); /* If URB failed then return status */ if (u.status < 0) { return u.status; } return VMM_OK; }
static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); u64 timeout = 100; spi_imx->tx_buf = transfer->tx_buf; spi_imx->rx_buf = transfer->rx_buf; spi_imx->count = transfer->len; spi_imx->txfifo = 0; init_completion(&spi_imx->xfer_done); spi_imx_push(spi_imx); spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); vmm_completion_wait_timeout(&spi_imx->xfer_done, &timeout); if (!timeout) { spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE); vmm_completion_wait(&spi_imx->xfer_done); } return transfer->len; }
static int blockdev_rw_blocks(struct vmm_blockdev *bdev, enum vmm_request_type type, u8 *buf, u64 lba, u64 bcnt) { int rc; struct blockdev_rw rw; rw.failed = FALSE; rw.req.type = type; rw.req.lba = bdev->start_lba + lba; rw.req.bcnt = bcnt; rw.req.data = buf; rw.req.priv = &rw; rw.req.completed = blockdev_rw_completed; rw.req.failed = blockdev_rw_failed; INIT_COMPLETION(&rw.done); if ((rc = vmm_blockdev_submit_request(bdev, &rw.req))) { return rc; } vmm_completion_wait(&rw.done); if (rw.failed) { return VMM_EFAIL; } return VMM_OK; }
int usb_control_msg(struct usb_device *dev, u32 pipe, u8 request, u8 requesttype, u16 value, u16 index, void *data, u16 size, int timeout) { int rc; u64 tout; struct urb u; struct vmm_completion uc; struct usb_devrequest setup_packet; /* Initialize setup packet */ setup_packet.requesttype = requesttype; setup_packet.request = request; setup_packet.value = vmm_cpu_to_le16(value); setup_packet.index = vmm_cpu_to_le16(index); setup_packet.length = vmm_cpu_to_le16(size); DPRINTF("%s: request: 0x%X, requesttype: 0x%X, " \ "value 0x%X index 0x%X length 0x%X\n", __func__, request, requesttype, value, index, size); /* Initialize URB */ usb_init_urb(&u); /* Initialize URB completion */ INIT_COMPLETION(&uc); /* Fill URB */ usb_fill_control_urb(&u, dev, pipe, (unsigned char *)&setup_packet, data, size, urb_request_complete, &uc); /* Submit URB */ rc = usb_hcd_submit_urb(&u); if (rc) { return rc; } /* Wait for completion */ if (timeout < 1) { vmm_completion_wait(&uc); rc = VMM_OK; } else { tout = timeout * 1000000ULL; rc = vmm_completion_wait_timeout(&uc, &tout); } if (rc) { return rc; } /* If URB failed then return status */ if (u.status < 0) { return u.status; } return VMM_OK; }
static noinline void pci_wait_cfg(struct pci_dev *dev) { DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&wait); do { raw_spin_unlock_irq(&pci_lock); vmm_completion_wait(&__pci_cfg_completion); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); }
static void imx_putc_sleepable(struct imx_port *port, u8 ch) { /* Wait until there is space in the FIFO */ if (!imx_lowlevel_can_putc(port->base)) { /* Enable the RX interrupt */ port->mask |= UCR1_TRDYEN; vmm_writel(port->mask, (void *)port->base + UCR1); /* Wait for completion */ vmm_completion_wait(&port->write_possible); } /* Write data to FIFO */ imx_lowlevel_putc(port->base, ch); }
static u8 imx_getc_sleepable(struct imx_port *port) { /* Wait until there is data in the FIFO */ while (!imx_lowlevel_can_getc(port->base)) { /* Enable the RX interrupt */ port->mask |= UCR1_RRDYEN; vmm_writel(port->mask, (void *)port->base + UCR1); /* Wait for completion */ vmm_completion_wait(&port->read_possible); } /* Read data to destination */ return imx_lowlevel_getc(port->base); }
int usb_bulk_msg(struct usb_device *dev, u32 pipe, void *data, int len, int *actual_length, int timeout) { int rc; u64 tout; struct urb u; struct vmm_completion uc; /* Initialize URB */ usb_init_urb(&u); /* Initialize URB completion */ INIT_COMPLETION(&uc); /* Fill Bulk URB */ usb_fill_bulk_urb(&u, dev, pipe, data, len, urb_request_complete, &uc); /* Submit URB */ rc = usb_hcd_submit_urb(&u); if (rc) { return rc; } /* Wait for completion */ if (timeout < 1) { vmm_completion_wait(&uc); rc = VMM_OK; } else { tout = timeout * 1000000ULL; rc = vmm_completion_wait_timeout(&uc, &tout); } if (rc) { return rc; } /* If URB failed then return status */ if (u.status < 0) { return u.status; } /* Return actual transfer length */ if (actual_length) { *actual_length = u.actual_length; } return VMM_OK; }
static void omap_uart_putc_sleepable(struct omap_uart_port *port, u8 ch) { /* Wait until there is space in the FIFO */ if (!omap_uart_lowlevel_can_putc(port->base, port->reg_align)) { /* Enable the TX interrupt */ port->ier |= UART_IER_THRI; omap_serial_out(port, UART_IER, port->ier); /* Wait for completion */ vmm_completion_wait(&port->write_possible); } /* Write data to FIFO */ omap_serial_out(port, UART_THR, ch); }
static u8 omap_uart_getc_sleepable(struct omap_uart_port *port) { /* Wait until there is data in the FIFO */ if (!(omap_serial_in(port, UART_LSR) & UART_LSR_DR)) { /* Enable the RX interrupt */ port->ier |= (UART_IER_RDI | UART_IER_RLSI); omap_serial_out(port, UART_IER, port->ier); /* Wait for completion */ vmm_completion_wait(&port->read_possible); } /* Read data to destination */ return (omap_serial_in(port, UART_RBR)); }
static int mutex4_do_test(struct vmm_chardev *cdev) { int done_count = 0; /* Initialize work done completion */ INIT_COMPLETION(&work_done); /* Acquire mutex1 */ vmm_mutex_lock(&mutex1); /* Start workers */ vmm_threads_start(workers[0]); vmm_threads_start(workers[1]); vmm_threads_start(workers[2]); vmm_threads_start(workers[3]); vmm_msleep(SLEEP_MSECS*40); /* Release mutex1 */ vmm_mutex_unlock(&mutex1); /* Wait for workers to complete */ do { if (done_count == NUM_THREADS) { break; } vmm_completion_wait(&work_done); done_count++; } while (1); /* Stop workers */ vmm_threads_stop(workers[3]); vmm_threads_stop(workers[2]); vmm_threads_stop(workers[1]); vmm_threads_stop(workers[0]); return 0; }