static bool delta_mjpeg_check_status(struct delta_ctx *pctx, struct jpeg_decode_return_params_t *status) { struct delta_dev *delta = pctx->dev; bool dump = false; if (status->error_code == JPEG_DECODER_NO_ERROR) goto out; if (is_stream_error(status->error_code)) { dev_warn_ratelimited(delta->dev, "%s firmware: stream error @ frame %d (%s)\n", pctx->name, pctx->decoded_frames, err_str(status->error_code)); pctx->stream_errors++; } else { dev_warn_ratelimited(delta->dev, "%s firmware: decode error @ frame %d (%s)\n", pctx->name, pctx->decoded_frames, err_str(status->error_code)); pctx->decode_errors++; dump = true; } out: dev_dbg(delta->dev, "%s firmware: decoding time(us)=%d\n", pctx->name, status->decode_time_in_us); return dump; }
int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { void __iomem *addr; u32 mask, tmp; addr = bus->ops->map_bus(bus, devfn, where & ~0x3); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } /* * In general, hardware that supports only 32-bit writes on PCI is * not spec-compliant. For example, software may perform a 16-bit * write. If the hardware only supports 32-bit accesses, we must * do a 32-bit read, merge in the 16 bits we intend to write, * followed by a 32-bit write. If the 16 bits we *don't* intend to * write happen to have any RW1C (write-one-to-clear) bits set, we * just inadvertently cleared something we shouldn't have. */ dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", size, pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; }
static int submit_rx_urb(struct atusb *atusb, struct urb *urb) { struct usb_device *usb_dev = atusb->usb_dev; struct sk_buff *skb = urb->context; int ret; if (!skb) { skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL); if (!skb) { dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate skb\n"); return -ENOMEM; } skb_put(skb, MAX_RX_XFER); SKB_ATUSB(skb) = atusb; } usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1), skb->data, MAX_RX_XFER, atusb_in, skb); usb_anchor_urb(urb, &atusb->rx_urbs); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); kfree_skb(skb); urb->context = NULL; } return ret; }
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u16 *lan9303_tag; u16 lan9303_tag1; unsigned int source_port; if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) { dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull\n"); return NULL; } /* '->data' points into the middle of our special VLAN tag information: * * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type * ^ * ->data */ lan9303_tag = (u16 *)(skb->data - 2); if (lan9303_tag[0] != htons(ETH_P_8021Q)) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n"); return NULL; } lan9303_tag1 = ntohs(lan9303_tag[1]); source_port = lan9303_tag1 & 0x3; skb->dev = dsa_master_find_slave(dev, 0, source_port); if (!skb->dev) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); return NULL; } /* remove the special VLAN tag between the MAC addresses * and the current ethertype field. */ skb_pull_rcsum(skb, 2 + 2); memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN), 2 * ETH_ALEN); skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU); return skb; }
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen) { unsigned int sts, ch, flag; if (unlikely(rxlen >= port->fifosize)) { dev_warn_ratelimited(port->dev, "Port %i: Possible RX FIFO overrun\n", port->line); port->icount.buf_overrun++; /* Ensure sanity of RX level */ rxlen = port->fifosize; } while (rxlen--) { ch = max310x_port_read(port, MAX310X_RHR_REG); sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG); sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT | MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT; port->icount.rx++; flag = TTY_NORMAL; if (unlikely(sts)) { if (sts & MAX310X_LSR_RXBRK_BIT) { port->icount.brk++; if (uart_handle_break(port)) continue; } else if (sts & MAX310X_LSR_RXPAR_BIT) port->icount.parity++; else if (sts & MAX310X_LSR_FRERR_BIT) port->icount.frame++; else if (sts & MAX310X_LSR_RXOVR_BIT) port->icount.overrun++; sts &= port->read_status_mask; if (sts & MAX310X_LSR_RXBRK_BIT) flag = TTY_BREAK; else if (sts & MAX310X_LSR_RXPAR_BIT) flag = TTY_PARITY; else if (sts & MAX310X_LSR_FRERR_BIT) flag = TTY_FRAME; else if (sts & MAX310X_LSR_RXOVR_BIT) flag = TTY_OVERRUN; } if (uart_handle_sysrq_char(port, ch)) continue; if (sts & port->ignore_status_mask) continue; uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag); } tty_flip_buffer_push(&port->state->port); }
static int ucsi_reset_ppm(struct ucsi *ucsi) { struct ucsi_control ctrl; unsigned long tmo; int ret; ctrl.raw_cmd = 0; ctrl.cmd.cmd = UCSI_PPM_RESET; trace_ucsi_command(&ctrl); ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl); if (ret) goto err; tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); do { /* Here sync is critical. */ ret = ucsi_sync(ucsi); if (ret) goto err; if (ucsi->ppm->data->cci.reset_complete) break; /* If the PPM is still doing something else, reset it again. */ if (ucsi->ppm->data->raw_cci) { dev_warn_ratelimited(ucsi->dev, "Failed to reset PPM! Trying again..\n"); trace_ucsi_command(&ctrl); ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl); if (ret) goto err; } /* Letting the PPM settle down. */ msleep(20); ret = -ETIMEDOUT; } while (time_is_after_jiffies(tmo)); err: trace_ucsi_reset_ppm(&ctrl, ret); return ret; }
/** * ti_abb_wait_tranx() - waits for ABB tranxdone event * @dev: device * @abb: pointer to the abb instance * * Return: 0 on success or -ETIMEDOUT if the event is not cleared on time. */ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb) { int timeout = 0; bool status; while (timeout++ <= abb->settling_time) { status = ti_abb_check_txdone(abb); if (status) break; udelay(1); } if (timeout > abb->settling_time) { dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n", __func__, timeout, readl(abb->int_base)); return -ETIMEDOUT; } return 0; }
static int atusb_xmit(struct ieee802154_dev *wpan_dev, struct sk_buff *skb) { struct atusb *atusb = wpan_dev->priv; struct usb_device *usb_dev = atusb->usb_dev; unsigned long flags; int ret; dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len); if (down_trylock(&atusb->tx_sem)) { dev_dbg(&usb_dev->dev, "atusb_xmit busy\n"); return -EBUSY; } INIT_COMPLETION(atusb->tx_complete); ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), ATUSB_TX, ATUSB_REQ_TO_DEV, 0, atusb->tx_ack_seq, skb->data, skb->len, 1000); if (ret < 0) { dev_warn_ratelimited(&usb_dev->dev, "ATUSB_TX failed, error %d\n", ret); goto done; } ret = wait_for_completion_interruptible_timeout( &atusb->tx_complete, msecs_to_jiffies(TX_TIMEOUT_MS)); if (!ret) ret = -ETIMEDOUT; if (ret > 0) ret = 0; done: spin_lock_irqsave(&atusb->lock, flags); atusb->tx_ack_seq++; spin_unlock_irqrestore(&atusb->lock, flags); up(&atusb->tx_sem); dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret); return ret; }
static void work_urbs(struct work_struct *work) { struct atusb *atusb = container_of(to_delayed_work(work), struct atusb, work); struct usb_device *usb_dev = atusb->usb_dev; struct urb *urb; int ret; if (atusb->shutdown) return; do { urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) return; ret = submit_rx_urb(atusb, urb); } while (!ret); usb_anchor_urb(urb, &atusb->idle_urbs); dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate/submit URB (%d)\n", ret); schedule_delayed_work(&atusb->work, msecs_to_jiffies(ALLOC_DELAY_MS) + 1); }
static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, unsigned int iir) { struct sc16is7xx_port *s = dev_get_drvdata(port->dev); unsigned int lsr = 0, ch, flag, bytes_read, i; bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false; if (unlikely(rxlen >= sizeof(s->buf))) { dev_warn_ratelimited(port->dev, "Port %i: Possible RX FIFO overrun: %d\n", port->line, rxlen); port->icount.buf_overrun++; /* Ensure sanity of RX level */ rxlen = sizeof(s->buf); } while (rxlen) { /* Only read lsr if there are possible errors in FIFO */ if (read_lsr) { lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG); if (!(lsr & SC16IS7XX_LSR_FIFOE_BIT)) read_lsr = false; /* No errors left in FIFO */ } else lsr = 0; if (read_lsr) { s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { regcache_cache_bypass(s->regmap, true); regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, s->buf, rxlen); regcache_cache_bypass(s->regmap, false); bytes_read = rxlen; } lsr &= SC16IS7XX_LSR_BRK_ERROR_MASK; port->icount.rx++; flag = TTY_NORMAL; if (unlikely(lsr)) { if (lsr & SC16IS7XX_LSR_BI_BIT) { port->icount.brk++; if (uart_handle_break(port)) continue; } else if (lsr & SC16IS7XX_LSR_PE_BIT) port->icount.parity++; else if (lsr & SC16IS7XX_LSR_FE_BIT) port->icount.frame++; else if (lsr & SC16IS7XX_LSR_OE_BIT) port->icount.overrun++; lsr &= port->read_status_mask; if (lsr & SC16IS7XX_LSR_BI_BIT) flag = TTY_BREAK; else if (lsr & SC16IS7XX_LSR_PE_BIT) flag = TTY_PARITY; else if (lsr & SC16IS7XX_LSR_FE_BIT) flag = TTY_FRAME; else if (lsr & SC16IS7XX_LSR_OE_BIT) flag = TTY_OVERRUN; } for (i = 0; i < bytes_read; ++i) { ch = s->buf[i]; if (uart_handle_sysrq_char(port, ch)) continue; if (lsr & port->ignore_status_mask) continue; uart_insert_char(port, lsr, SC16IS7XX_LSR_OE_BIT, ch, flag); } rxlen -= bytes_read; } tty_flip_buffer_push(&port->state->port); }
static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct serial_state *state = tty->driver_data; struct tty_port *port = &state->tport; bool change_spd; int retval = 0; tty_lock(tty); change_spd = ((ss->flags ^ port->flags) & ASYNC_SPD_MASK) || ss->custom_divisor != state->custom_divisor; if (ss->irq || ss->port != state->port || ss->xmit_fifo_size != state->xmit_fifo_size) { tty_unlock(tty); return -EINVAL; } if (!serial_isroot()) { if ((ss->baud_base != state->baud_base) || (ss->close_delay != port->close_delay) || (ss->xmit_fifo_size != state->xmit_fifo_size) || ((ss->flags & ~ASYNC_USR_MASK) != (port->flags & ~ASYNC_USR_MASK))) { tty_unlock(tty); return -EPERM; } port->flags = ((port->flags & ~ASYNC_USR_MASK) | (ss->flags & ASYNC_USR_MASK)); state->custom_divisor = ss->custom_divisor; goto check_and_exit; } if (ss->baud_base < 9600) { tty_unlock(tty); return -EINVAL; } /* * OK, past this point, all the error checking has been done. * At this point, we start making changes..... */ state->baud_base = ss->baud_base; port->flags = ((port->flags & ~ASYNC_FLAGS) | (ss->flags & ASYNC_FLAGS)); state->custom_divisor = ss->custom_divisor; port->close_delay = ss->close_delay * HZ/100; port->closing_wait = ss->closing_wait * HZ/100; port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; check_and_exit: if (tty_port_initialized(port)) { if (change_spd) { /* warn about deprecation unless clearing */ if (ss->flags & ASYNC_SPD_MASK) dev_warn_ratelimited(tty->dev, "use of SPD flags is deprecated\n"); change_speed(tty, state, NULL); } } else retval = startup(tty, state); tty_unlock(tty); return retval; }
static int omap_8250_startup(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; int ret; if (priv->wakeirq) { ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq); if (ret) return ret; } pm_runtime_get_sync(port->dev); up->mcr = 0; serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_LCR, UART_LCR_WLEN8); up->lsr_saved_flags = 0; up->msr_saved_flags = 0; /* Disable DMA for console UART */ if (uart_console(port)) up->dma = NULL; if (up->dma) { ret = serial8250_request_dma(up); if (ret) { dev_warn_ratelimited(port->dev, "failed to request DMA\n"); up->dma = NULL; } } ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED, dev_name(port->dev), port); if (ret < 0) goto err; up->ier = UART_IER_RLSI | UART_IER_RDI; serial_out(up, UART_IER, up->ier); #ifdef CONFIG_PM up->capabilities |= UART_CAP_RPM; #endif /* Enable module level wake up */ priv->wer = OMAP_UART_WER_MOD_WKUP; if (priv->habit & OMAP_UART_WER_HAS_TX_WAKEUP) priv->wer |= OMAP_UART_TX_WAKEUP_EN; serial_out(up, UART_OMAP_WER, priv->wer); if (up->dma) up->dma->rx_dma(up); pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); return 0; err: pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); dev_pm_clear_wake_irq(port->dev); return ret; }