Beispiel #1
0
static int eth_close(struct net_device *dev)
{
	struct port *port = netdev_priv(dev);
	struct msg msg;
	int buffs = RX_DESCS; /* allocated RX buffers */
	int i;

	ports_open--;
	qmgr_disable_irq(port->plat->rxq);
	napi_disable(&port->napi);
	netif_stop_queue(dev);

	while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
		buffs--;

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_SETLOOPBACK_MODE;
	msg.eth_id = port->id;
	msg.byte3 = 1;
	if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
		printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);

	i = 0;
	do {			/* drain RX buffers */
		while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
			buffs--;
		if (!buffs)
			break;
		if (qmgr_stat_empty(TX_QUEUE(port->id))) {
			/* we have to inject some packet */
			struct desc *desc;
			u32 phys;
			int n = queue_get_desc(port->plat->txreadyq, port, 1);
			BUG_ON(n < 0);
			desc = tx_desc_ptr(port, n);
			phys = tx_desc_phys(port, n);
			desc->buf_len = desc->pkt_len = 1;
			wmb();
			queue_put_desc(TX_QUEUE(port->id), phys, desc);
		}
		udelay(1);
	} while (++i < MAX_CLOSE_WAIT);

	if (buffs)
		printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
		       " left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
	if (!buffs)
		printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
#endif

	buffs = TX_DESCS;
	while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
		buffs--; /* cancel TX */

	i = 0;
	do {
		while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
			buffs--;
		if (!buffs)
			break;
	} while (++i < MAX_CLOSE_WAIT);

	if (buffs)
		printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
		       "left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
	if (!buffs)
		printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif

	msg.byte3 = 0;
	if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
		printk(KERN_CRIT "%s: unable to disable loopback\n",
		       dev->name);

	phy_stop(port->phydev);

	if (!ports_open)
		qmgr_disable_irq(TXDONE_QUEUE);
	destroy_queues(port);
	release_queues(port);
	return 0;
}
static void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
{
	int ptype;
	struct mdp4_overlay_pipe *pipe;
	int ret;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;


	if (mfd->key != MFD_KEY)
		return;

	vctrl = &vsync_ctrl_db[cndx];

	if (vctrl->base_pipe == NULL) {
		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
		if (ptype < 0)
			printk(KERN_INFO "%s: format2type failed\n", __func__);
		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
		if (pipe == NULL) {
			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
			return;
		}
		pipe->pipe_used++;
		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
		pipe->mixer_num  = MDP4_MIXER0;
		pipe->src_format = mfd->fb_imgType;
		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
		ret = mdp4_overlay_format2pipe(pipe);
		if (ret < 0)
			printk(KERN_INFO "%s: format2type failed\n", __func__);

		vctrl->base_pipe = pipe; /* keep it */
		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
		pipe->ov_blt_addr = 0;
		pipe->dma_blt_addr = 0;
	} else {
		pipe = vctrl->base_pipe;
	}

	MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */

	/*
	 * configure dsi stream id
	 * dma_p = 0, dma_s = 1
	 */
	MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
	/* disable dsi trigger */
	MDP_OUTP(MDP_BASE + 0x000a4, 0x00);

	mdp4_overlay_setup_pipe_addr(mfd, pipe);

	mdp4_overlay_rgb_setup(pipe);

	mdp4_overlay_reg_flush(pipe, 1);

	mdp4_mixer_stage_up(pipe, 0);

	mdp4_overlayproc_cfg(pipe);

	mdp4_overlay_dmap_xy(pipe);

	mdp4_overlay_dmap_cfg(mfd, 0);

	wmb();
}
Beispiel #3
0
int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
{
	int ret = 0;
	struct mipi_panel_info *mipi;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	pr_info("%s:%d DSI on for continuous splash.\n", __func__, __LINE__);

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return -EINVAL;
	}

#if defined(CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)
		mdss_dsi_reset(pdata);
#endif

	mipi = &pdata->panel_info.mipi;

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	pr_debug("%s+: ctrl=%p ndx=%d\n", __func__,
				ctrl_pdata, ctrl_pdata->ndx);

	WARN((ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT),
		"Incorrect Ctrl state=0x%x\n", ctrl_pdata->ctrl_state);

#if defined(CONFIG_FB_MSM_MDSS_SDC_WXGA_PANEL)
	ret = mdss_dsi_panel_power_on(pdata, 0);
	if (ret) {
		pr_err("%s: Panel power on with 0 failed\n", __func__);
		return ret;
	}
	mdelay(10);
	ret = mdss_dsi_panel_power_on(pdata, 1);
	if (ret) {
		pr_err("%s: Panel power on with 1 failed\n", __func__);
		return ret;
	}
#endif
	mdss_dsi_sw_reset(pdata);
	mdss_dsi_host_init(mipi, pdata);
#if RESET_IN_LP11 // add for LP11
	// LP11
	{
	u32 tmp;
	tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
	tmp &= ~(1<<28);
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
	wmb();
	}
	// LP11

	(ctrl_pdata->panel_data).panel_reset_fn(pdata, 1);
#endif

	if (mipi->force_clk_lane_hs) {
		u32 tmp;

		tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
		tmp |= (1<<28);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
		wmb();
	}
	mdss_dsi_op_mode_config(mipi->mode, pdata);

	if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) {
		ret = mdss_dsi_unblank(pdata);
		if (ret) {
			pr_err("%s: unblank failed\n", __func__);
			return ret;
		}
	}

	pr_debug("%s-:End\n", __func__);
	return ret;
}
void __init prom_init(void)
{
	prom_argc = fw_arg0;
	_prom_argv = (int *) fw_arg1;
	_prom_envp = (int *) fw_arg2;

	mips_display_message("LINUX");

	/*
	 * early setup of _pcictrl_bonito so that we can determine
	 * the system controller on a CORE_EMUL board
	 */
	_pcictrl_bonito = (unsigned long)ioremap(BONITO_REG_BASE, BONITO_REG_SIZE);

	mips_revision_corid = MIPS_REVISION_CORID;

	if (mips_revision_corid == MIPS_REVISION_CORID_CORE_EMUL) {
		if (BONITO_PCIDID == 0x0001df53 ||
		    BONITO_PCIDID == 0x0003df53)
			mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_BON;
		else
			mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
	}

	mips_revision_sconid = MIPS_REVISION_SCONID;
	if (mips_revision_sconid == MIPS_REVISION_SCON_OTHER) {
		switch (mips_revision_corid) {
		case MIPS_REVISION_CORID_QED_RM5261:
		case MIPS_REVISION_CORID_CORE_LV:
		case MIPS_REVISION_CORID_CORE_FPGA:
		case MIPS_REVISION_CORID_CORE_FPGAR2:
			mips_revision_sconid = MIPS_REVISION_SCON_GT64120;
			break;
		case MIPS_REVISION_CORID_CORE_EMUL_BON:
		case MIPS_REVISION_CORID_BONITO64:
		case MIPS_REVISION_CORID_CORE_20K:
			mips_revision_sconid = MIPS_REVISION_SCON_BONITO;
			break;
		case MIPS_REVISION_CORID_CORE_MSC:
		case MIPS_REVISION_CORID_CORE_FPGA2:
		case MIPS_REVISION_CORID_CORE_24K:
			/*
			 * SOCit/ROCit support is essentially identical
			 * but make an attempt to distinguish them
			 */
			mips_revision_sconid = MIPS_REVISION_SCON_SOCIT;
			break;
		case MIPS_REVISION_CORID_CORE_FPGA3:
		case MIPS_REVISION_CORID_CORE_FPGA4:
		case MIPS_REVISION_CORID_CORE_FPGA5:
		case MIPS_REVISION_CORID_CORE_EMUL_MSC:
		default:
			/* See above */
			mips_revision_sconid = MIPS_REVISION_SCON_ROCIT;
			break;
		}
	}

	switch (mips_revision_sconid) {
		u32 start, map, mask, data;

	case MIPS_REVISION_SCON_GT64120:
		/*
		 * Setup the North bridge to do Master byte-lane swapping
		 * when running in bigendian.
		 */
		_pcictrl_gt64120 = (unsigned long)ioremap(MIPS_GT_BASE, 0x2000);

#ifdef CONFIG_CPU_LITTLE_ENDIAN
		GT_WRITE(GT_PCI0_CMD_OFS, GT_PCI0_CMD_MBYTESWAP_BIT |
			 GT_PCI0_CMD_SBYTESWAP_BIT);
#else
		GT_WRITE(GT_PCI0_CMD_OFS, 0);
#endif
		/* Fix up PCI I/O mapping if necessary (for Atlas).  */
		start = GT_READ(GT_PCI0IOLD_OFS);
		map = GT_READ(GT_PCI0IOREMAP_OFS);
		if ((start & map) != 0) {
			map &= ~start;
			GT_WRITE(GT_PCI0IOREMAP_OFS, map);
		}

		set_io_port_base(MALTA_GT_PORT_BASE);
		break;

	case MIPS_REVISION_SCON_BONITO:
		_pcictrl_bonito_pcicfg = (unsigned long)ioremap(BONITO_PCICFG_BASE, BONITO_PCICFG_SIZE);

		/*
		 * Disable Bonito IOBC.
		 */
		BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
			~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
			  BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);

		/*
		 * Setup the North bridge to do Master byte-lane swapping
		 * when running in bigendian.
		 */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
		BONITO_BONGENCFG = BONITO_BONGENCFG &
			~(BONITO_BONGENCFG_MSTRBYTESWAP |
			  BONITO_BONGENCFG_BYTESWAP);
#else
		BONITO_BONGENCFG = BONITO_BONGENCFG |
			BONITO_BONGENCFG_MSTRBYTESWAP |
			BONITO_BONGENCFG_BYTESWAP;
#endif

		set_io_port_base(MALTA_BONITO_PORT_BASE);
		break;

	case MIPS_REVISION_SCON_SOCIT:
	case MIPS_REVISION_SCON_ROCIT:
		_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
	mips_pci_controller:
		mb();
		MSC_READ(MSC01_PCI_CFG, data);
		MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
		wmb();

		/* Fix up lane swapping.  */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
		MSC_WRITE(MSC01_PCI_SWAP, MSC01_PCI_SWAP_NOSWAP);
#else
		MSC_WRITE(MSC01_PCI_SWAP,
			  MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_IO_SHF |
			  MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
			  MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
		/* Fix up target memory mapping.  */
		MSC_READ(MSC01_PCI_BAR0, mask);
		MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);

		/* Don't handle target retries indefinitely.  */
		if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
		    MSC01_PCI_CFG_MAXRTRY_MSK)
			data = (data & ~(MSC01_PCI_CFG_MAXRTRY_MSK <<
					 MSC01_PCI_CFG_MAXRTRY_SHF)) |
			       ((MSC01_PCI_CFG_MAXRTRY_MSK - 1) <<
				MSC01_PCI_CFG_MAXRTRY_SHF);

		wmb();
		MSC_WRITE(MSC01_PCI_CFG, data);
		mb();

		set_io_port_base(MALTA_MSC_PORT_BASE);
		break;

	case MIPS_REVISION_SCON_SOCITSC:
	case MIPS_REVISION_SCON_SOCITSCP:
		_pcictrl_msc = (unsigned long)ioremap(MIPS_SOCITSC_PCI_REG_BASE, 0x2000);
		goto mips_pci_controller;

	default:
		/* Unknown system controller */
		mips_display_message("SC Error");
		while (1);   /* We die here... */
	}
	board_nmi_handler_setup = mips_nmi_setup;
	board_ejtag_handler_setup = mips_ejtag_setup;

	prom_init_cmdline();
	prom_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
	console_config();
#endif
	/* Early detection of CMP support */
	if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
		if (!register_cmp_smp_ops())
			return;

	if (!register_vsmp_smp_ops())
		return;

#ifdef CONFIG_MIPS_MT_SMTC
	register_smp_ops(&msmtc_smp_ops);
#endif
}
Beispiel #5
0
static int
isl_upload_firmware(islpci_private *priv)
{
	u32 reg, rc;
	void __iomem *device_base = priv->device_base;

	/* clear the RAMBoot and the Reset bit */
	reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
	reg &= ~ISL38XX_CTRL_STAT_RESET;
	reg &= ~ISL38XX_CTRL_STAT_RAMBOOT;
	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
	wmb();
	udelay(ISL38XX_WRITEIO_DELAY);

	/* set the Reset bit without reading the register ! */
	reg |= ISL38XX_CTRL_STAT_RESET;
	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
	wmb();
	udelay(ISL38XX_WRITEIO_DELAY);

	/* clear the Reset bit */
	reg &= ~ISL38XX_CTRL_STAT_RESET;
	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
	wmb();

	/* wait a while for the device to reboot */
	mdelay(50);

	{
		const struct firmware *fw_entry = NULL;
		long fw_len;
		const u32 *fw_ptr;

		rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV);
		if (rc) {
			printk(KERN_ERR
			       "%s: request_firmware() failed for '%s'\n",
			       "prism54", priv->firmware);
			return rc;
		}
		/* prepare the Direct Memory Base register */
		reg = ISL38XX_DEV_FIRMWARE_ADDRES;

		fw_ptr = (u32 *) fw_entry->data;
		fw_len = fw_entry->size;

		if (fw_len % 4) {
			printk(KERN_ERR
			       "%s: firmware '%s' size is not multiple of 32bit, aborting!\n",
			       "prism54", priv->firmware);
			release_firmware(fw_entry);
			return -EILSEQ; /* Illegal byte sequence  */;
		}

		while (fw_len > 0) {
			long _fw_len =
			    (fw_len >
			     ISL38XX_MEMORY_WINDOW_SIZE) ?
			    ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
			u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;

			/* set the card's base address for writing the data */
			isl38xx_w32_flush(device_base, reg,
					  ISL38XX_DIR_MEM_BASE_REG);
			wmb();	/* be paranoid */

			/* increment the write address for next iteration */
			reg += _fw_len;
			fw_len -= _fw_len;

			/* write the data to the Direct Memory Window 32bit-wise */
			/* memcpy_toio() doesn't guarantee 32bit writes :-| */
			while (_fw_len > 0) {
				/* use non-swapping writel() */
				__raw_writel(*fw_ptr, dev_fw_ptr);
				fw_ptr++, dev_fw_ptr++;
				_fw_len -= 4;
			}

			/* flush PCI posting */
			(void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH);
			wmb();	/* be paranoid again */

			BUG_ON(_fw_len != 0);
		}

		BUG_ON(fw_len != 0);

		/* Firmware version is at offset 40 (also for "newmac") */
		printk(KERN_DEBUG "%s: firmware version: %.8s\n",
		       priv->ndev->name, fw_entry->data + 40);

		release_firmware(fw_entry);
	}

	/* now reset the device
	 * clear the Reset & ClkRun bit, set the RAMBoot bit */
	reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
	reg &= ~ISL38XX_CTRL_STAT_CLKRUN;
	reg &= ~ISL38XX_CTRL_STAT_RESET;
	reg |= ISL38XX_CTRL_STAT_RAMBOOT;
	isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG);
	wmb();
	udelay(ISL38XX_WRITEIO_DELAY);

	/* set the reset bit latches the host override and RAMBoot bits
	 * into the device for operation when the reset bit is reset */
	reg |= ISL38XX_CTRL_STAT_RESET;
	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
	/* don't do flush PCI posting here! */
	wmb();
	udelay(ISL38XX_WRITEIO_DELAY);

	/* clear the reset bit should start the whole circus */
	reg &= ~ISL38XX_CTRL_STAT_RESET;
	writel(reg, device_base + ISL38XX_CTRL_STAT_REG);
	/* don't do flush PCI posting here! */
	wmb();
	udelay(ISL38XX_WRITEIO_DELAY);

	return 0;
}
Beispiel #6
0
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
	struct greth_private *greth = netdev_priv(dev);
	struct greth_bd *bdp;
	u32 status = 0, dma_addr, ctrl;
	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
	unsigned long flags;

	nr_frags = skb_shinfo(skb)->nr_frags;

	/* Clean TX Ring */
	greth_clean_tx_gbit(dev);

	if (greth->tx_free < nr_frags + 1) {
		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
		ctrl = GRETH_REGLOAD(greth->regs->control);
		/* Enable TX IRQ only if not already in poll() routine */
		if (ctrl & GRETH_RXI)
			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&greth->devlock, flags);
		err = NETDEV_TX_BUSY;
		goto out;
	}

	if (netif_msg_pktdata(greth))
		greth_print_tx_packet(skb);

	if (unlikely(skb->len > MAX_FRAME_SIZE)) {
		dev->stats.tx_errors++;
		goto out;
	}

	/* Save skb pointer. */
	greth->tx_skbuff[greth->tx_next] = skb;

	/* Linear buf */
	if (nr_frags != 0)
		status = GRETH_TXBD_MORE;

	if (skb->ip_summed == CHECKSUM_PARTIAL)
		status |= GRETH_TXBD_CSALL;
	status |= skb_headlen(skb) & GRETH_BD_LEN;
	if (greth->tx_next == GRETH_TXBD_NUM_MASK)
		status |= GRETH_BD_WR;


	bdp = greth->tx_bd_base + greth->tx_next;
	greth_write_bd(&bdp->stat, status);
	dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);

	if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
		goto map_error;

	greth_write_bd(&bdp->addr, dma_addr);

	curr_tx = NEXT_TX(greth->tx_next);

	/* Frags */
	for (i = 0; i < nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		greth->tx_skbuff[curr_tx] = NULL;
		bdp = greth->tx_bd_base + curr_tx;

		status = GRETH_BD_EN;
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			status |= GRETH_TXBD_CSALL;
		status |= skb_frag_size(frag) & GRETH_BD_LEN;

		/* Wrap around descriptor ring */
		if (curr_tx == GRETH_TXBD_NUM_MASK)
			status |= GRETH_BD_WR;

		/* More fragments left */
		if (i < nr_frags - 1)
			status |= GRETH_TXBD_MORE;
		else
			status |= GRETH_BD_IE; /* enable IRQ on last fragment */

		greth_write_bd(&bdp->stat, status);

		dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
					    DMA_TO_DEVICE);

		if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
			goto frag_map_error;

		greth_write_bd(&bdp->addr, dma_addr);

		curr_tx = NEXT_TX(curr_tx);
	}

	wmb();

	/* Enable the descriptor chain by enabling the first descriptor */
	bdp = greth->tx_bd_base + greth->tx_next;
	greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
	greth->tx_next = curr_tx;
	greth->tx_free -= nr_frags + 1;

	wmb();

	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
	greth_enable_tx(greth);
	spin_unlock_irqrestore(&greth->devlock, flags);

	return NETDEV_TX_OK;

frag_map_error:
	/* Unmap SKB mappings that succeeded and disable descriptor */
	for (i = 0; greth->tx_next + i != curr_tx; i++) {
		bdp = greth->tx_bd_base + greth->tx_next + i;
		dma_unmap_single(greth->dev,
				 greth_read_bd(&bdp->addr),
				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
				 DMA_TO_DEVICE);
		greth_write_bd(&bdp->stat, 0);
	}
map_error:
	if (net_ratelimit())
		dev_warn(greth->dev, "Could not create TX DMA mapping\n");
	dev_kfree_skb(skb);
out:
	return err;
}
Beispiel #7
0
static int greth_rx_gbit(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb, *newskb;
	int pkt_len;
	int bad, count = 0;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		skb = greth->rx_skbuff[greth->rx_cur];
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);
		bad = 0;

		if (status & GRETH_BD_EN)
			break;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {

			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			} else if (status &
				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			} else if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}

		/* Allocate new skb to replace current, not needed if the
		 * current skb can be reused */
		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
			skb_reserve(newskb, NET_IP_ALIGN);

			dma_addr = dma_map_single(greth->dev,
						      newskb->data,
						      MAX_FRAME_SIZE + NET_IP_ALIGN,
						      DMA_FROM_DEVICE);

			if (!dma_mapping_error(greth->dev, dma_addr)) {
				/* Process the incoming frame. */
				pkt_len = status & GRETH_BD_LEN;

				dma_unmap_single(greth->dev,
						 greth_read_bd(&bdp->addr),
						 MAX_FRAME_SIZE + NET_IP_ALIGN,
						 DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);

				skb_put(skb, pkt_len);

				if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
					skb->ip_summed = CHECKSUM_UNNECESSARY;
				else
					skb_checksum_none_assert(skb);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += pkt_len;
				netif_receive_skb(skb);

				greth->rx_skbuff[greth->rx_cur] = newskb;
				greth_write_bd(&bdp->addr, dma_addr);
			} else {
				if (net_ratelimit())
					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
				dev_kfree_skb(newskb);
				/* reusing current skb, so it is a drop */
				dev->stats.rx_dropped++;
			}
		} else if (bad) {
			/* Bad Frame transfer, the skb is reused */
			dev->stats.rx_dropped++;
		} else {
			/* Failed Allocating a new skb. This is rather stupid
			 * but the current "filled" skb is reused, as if
			 * transfer failure. One could argue that RX descriptor
			 * table handling should be divided into cleaning and
			 * filling as the TX part of the driver
			 */
			if (net_ratelimit())
				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
			/* reusing current skb, so it is a drop */
			dev->stats.rx_dropped++;
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);
		spin_lock_irqsave(&greth->devlock, flags);
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);
		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;

}
Beispiel #8
0
static int mipi_dsi_on(struct platform_device *pdev)
{
	int ret = 0;
	u32 clk_rate;
	struct msm_fb_data_type *mfd;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_panel_info *pinfo;
	struct mipi_panel_info *mipi;
	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
	u32 ystride, bpp, data;
	u32 dummy_xres, dummy_yres;
	int target_type = 0;

	pr_debug("%s+:\n", __func__);

	mfd = platform_get_drvdata(pdev);
	fbi = mfd->fbi;
	var = &fbi->var;
	pinfo = &mfd->panel_info;

#ifdef CONFIG_MSM_ALT_DSI_ESCAPE_CLOCK
	esc_byte_ratio = pinfo->mipi.esc_byte_ratio;
#else
	esc_byte_ratio = 2;
#endif

	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(1);

	cont_splash_clk_ctrl(0);
	mipi_dsi_prepare_clocks();

	mipi_dsi_ahb_ctrl(1);

	clk_rate = mfd->fbi->var.pixclock;
	clk_rate = min(clk_rate, mfd->panel_info.clk_max);

	mipi_dsi_phy_ctrl(1);

	if (mdp_rev == MDP_REV_42 && mipi_dsi_pdata)
		target_type = mipi_dsi_pdata->target_type;

	mipi_dsi_phy_init(0, &(mfd->panel_info), target_type);

	mipi_dsi_clk_enable();

	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 1);
	MIPI_OUTP(MIPI_DSI_BASE + 0x114, 0);

	hbp = var->left_margin;
	hfp = var->right_margin;
	vbp = var->upper_margin;
	vfp = var->lower_margin;
	hspw = var->hsync_len;
	vspw = var->vsync_len;
	width = mfd->panel_info.xres;
	height = mfd->panel_info.yres;

	mipi  = &mfd->panel_info.mipi;
	if (mfd->panel_info.type == MIPI_VIDEO_PANEL) {
		dummy_xres = mfd->panel_info.lcdc.xres_pad;
		dummy_yres = mfd->panel_info.lcdc.yres_pad;

		if (mdp_rev >= MDP_REV_41) {
			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hspw + hbp + width + dummy_xres) << 16 |
				(hspw + hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vspw + vbp + height + dummy_yres) << 16 |
				(vspw + vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vspw + vbp + height + dummy_yres +
					vfp - 1) << 16 | (hspw + hbp +
					width + dummy_xres + hfp - 1));
		} else {
			/* DSI_LAN_SWAP_CTRL */
			MIPI_OUTP(MIPI_DSI_BASE + 0x00ac, mipi->dlane_swap);

			MIPI_OUTP(MIPI_DSI_BASE + 0x20,
				((hbp + width + dummy_xres) << 16 | (hbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x24,
				((vbp + height + dummy_yres) << 16 | (vbp)));
			MIPI_OUTP(MIPI_DSI_BASE + 0x28,
				(vbp + height + dummy_yres + vfp) << 16 |
					(hbp + width + dummy_xres + hfp));
		}

		MIPI_OUTP(MIPI_DSI_BASE + 0x2c, (hspw << 16));
		MIPI_OUTP(MIPI_DSI_BASE + 0x30, 0);
		MIPI_OUTP(MIPI_DSI_BASE + 0x34, (vspw << 16));

	} else {		/* command mode */
		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
			bpp = 2;
		else
			bpp = 3;	/* Default format set to RGB888 */

		ystride = width * bpp + 1;

		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
		MIPI_OUTP(MIPI_DSI_BASE + 0x5c, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x54, data);

		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
		data = height << 16 | width;
		MIPI_OUTP(MIPI_DSI_BASE + 0x60, data);
		MIPI_OUTP(MIPI_DSI_BASE + 0x58, data);
	}

	mipi_dsi_host_init(mipi);

	if (mipi->force_clk_lane_hs) {
		u32 tmp;

		tmp = MIPI_INP(MIPI_DSI_BASE + 0xA8);
		tmp |= (1<<28);
		MIPI_OUTP(MIPI_DSI_BASE + 0xA8, tmp);
		wmb();
	}

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		down(&mfd->dma->mutex);

	ret = panel_next_on(pdev);

	mipi_dsi_op_mode_config(mipi->mode);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (mdp_rev >= MDP_REV_41) {
					if (gpio_request(vsync_gpio,
						"MDP_VSYNC") == 0)
						gpio_direction_input(
							vsync_gpio);
					else
						pr_err("%s: unable to \
							request gpio=%d\n",
							__func__, vsync_gpio);
				} else if (mdp_rev == MDP_REV_303) {
					if (!tlmm_settings && gpio_request(
						vsync_gpio, "MDP_VSYNC") == 0) {
						ret = gpio_tlmm_config(
							GPIO_CFG(
							vsync_gpio, 1,
							GPIO_CFG_INPUT,
							GPIO_CFG_PULL_DOWN,
							GPIO_CFG_2MA),
							GPIO_CFG_ENABLE);

						if (ret) {
							pr_err(
							"%s: unable to config \
							tlmm = %d\n",
							__func__, vsync_gpio);
						}
						tlmm_settings = TRUE;

						gpio_direction_input(
							vsync_gpio);
					} else {
						if (!tlmm_settings) {
							pr_err(
							"%s: unable to request \
							gpio=%d\n",
							__func__, vsync_gpio);
						}
					}
				}
Beispiel #9
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

restart_poll:
	do {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
static int mdp3_dmap_histo_get(struct mdp3_dma *dma)
{
	int i, state, timeout, ret;
	u32 addr;
	unsigned long flag;

	spin_lock_irqsave(&dma->histo_lock, flag);
	state = dma->histo_state;
	spin_unlock_irqrestore(&dma->histo_lock, flag);

	if (state != MDP3_DMA_HISTO_STATE_START &&
		state != MDP3_DMA_HISTO_STATE_READY) {
		pr_err("mdp3_dmap_histo_get invalid state %d\n", state);
		return -EINVAL;
	}

	timeout = HIST_WAIT_TIMEOUT(dma->histogram_config.frame_count);
	ret = wait_for_completion_killable_timeout(&dma->histo_comp, timeout);

	if (ret == 0) {
		pr_debug("mdp3_dmap_histo_get time out\n");
		ret = -ETIMEDOUT;
	} else if (ret < 0) {
		pr_err("mdp3_dmap_histo_get interrupted\n");
	}

	if (ret < 0)
		return ret;

	if (dma->histo_state != MDP3_DMA_HISTO_STATE_READY) {
		pr_debug("mdp3_dmap_histo_get after dma shut down\n");
		return -EPERM;
	}

	addr = MDP3_REG_DMA_P_HIST_R_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.r_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	addr = MDP3_REG_DMA_P_HIST_G_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.g_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	addr = MDP3_REG_DMA_P_HIST_B_DATA;
	for (i = 0; i < MDP_HISTOGRAM_BIN_NUM; i++) {
		dma->histo_data.b_data[i] = MDP3_REG_READ(addr);
		addr += 4;
	}

	dma->histo_data.extra[0] =
			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
	dma->histo_data.extra[1] =
			MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);

	spin_lock_irqsave(&dma->histo_lock, flag);
	init_completion(&dma->histo_comp);
	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
	wmb();
	dma->histo_state = MDP3_DMA_HISTO_STATE_START;
	spin_unlock_irqrestore(&dma->histo_lock, flag);

	return 0;
}
Beispiel #11
0
/**
 *	usb_tranzport_write
 */
static ssize_t usb_tranzport_write(struct file *file,
				const char __user *buffer, size_t count,
				loff_t *ppos)
{
	struct usb_tranzport *dev;
	size_t bytes_to_write;
	int retval = 0;

	dev = file->private_data;

	/* verify that we actually have some data to write */
	if (count == 0)
		goto exit;

	/* lock this object */
	if (mutex_lock_interruptible(&dev->mtx)) {
		retval = -ERESTARTSYS;
		goto exit;
	}
	/* verify that the device wasn't unplugged */
	if (dev->intf == NULL) {
		retval = -ENODEV;
		err("No device or device unplugged %d\n", retval);
		goto unlock_exit;
	}

	/* wait until previous transfer is finished */
	if (dev->interrupt_out_busy) {
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			goto unlock_exit;
		}
		retval = wait_event_interruptible(dev->write_wait,
						!dev->interrupt_out_busy);
		if (retval < 0)
			goto unlock_exit;
	}

	/* write the data into interrupt_out_buffer from userspace */
	bytes_to_write = min(count,
			write_buffer_size *
			dev->interrupt_out_endpoint_size);
	if (bytes_to_write < count)
		dev_warn(&dev->intf->dev,
			"Write buffer overflow, %zd bytes dropped\n",
			count - bytes_to_write);

	dbg_info(&dev->intf->dev,
		"%s: count = %zd, bytes_to_write = %zd\n", __func__,
		count, bytes_to_write);

	if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
		retval = -EFAULT;
		goto unlock_exit;
	}

	if (dev->interrupt_out_endpoint == NULL) {
		err("Endpoint should not be be null!\n");
		goto unlock_exit;
	}

	/* send off the urb */
	usb_fill_int_urb(dev->interrupt_out_urb,
			interface_to_usbdev(dev->intf),
			usb_sndintpipe(interface_to_usbdev(dev->intf),
				dev->interrupt_out_endpoint->
				bEndpointAddress),
			dev->interrupt_out_buffer, bytes_to_write,
			usb_tranzport_interrupt_out_callback, dev,
			dev->interrupt_out_interval);

	dev->interrupt_out_busy = 1;
	wmb();

	retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
	if (retval) {
		dev->interrupt_out_busy = 0;
		err("Couldn't submit interrupt_out_urb %d\n", retval);
		goto unlock_exit;
	}
	retval = bytes_to_write;

unlock_exit:
	/* unlock the device */
	mutex_unlock(&dev->mtx);

exit:
	return retval;
}
Beispiel #12
0
static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
						   struct mlxsw_pci_queue *q)
{
	wmb(); /* ensure all writes are done before we ring a bell */
	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
}
Beispiel #13
0
static inline bool nvhdcp_set_plugged(struct tegra_nvhdcp *nvhdcp, bool plugged)
{
	nvhdcp->plugged = plugged;
	wmb();
	return plugged;
}
Beispiel #14
0
static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct port *port = netdev_priv(dev);
	unsigned int txreadyq = port->plat->txreadyq;
	int len, offset, bytes, n;
	void *mem;
	u32 phys;
	struct desc *desc;

#if DEBUG_TX
	printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
#endif

	if (unlikely(skb->len > MAX_MRU)) {
		dev_kfree_skb(skb);
		dev->stats.tx_errors++;
		return NETDEV_TX_OK;
	}

	debug_pkt(dev, "eth_xmit", skb->data, skb->len);

	len = skb->len;
#ifdef __ARMEB__
	offset = 0; /* no need to keep alignment */
	bytes = len;
	mem = skb->data;
#else
	offset = (int)skb->data & 3; /* keep 32-bit alignment */
	bytes = ALIGN(offset + len, 4);
	if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
	dev_kfree_skb(skb);
#endif

	phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
	if (dma_mapping_error(&dev->dev, phys)) {
#ifdef __ARMEB__
		dev_kfree_skb(skb);
#else
		kfree(mem);
#endif
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}

	n = queue_get_desc(txreadyq, port, 1);
	BUG_ON(n < 0);
	desc = tx_desc_ptr(port, n);

#ifdef __ARMEB__
	port->tx_buff_tab[n] = skb;
#else
	port->tx_buff_tab[n] = mem;
#endif
	desc->data = phys + offset;
	desc->buf_len = desc->pkt_len = len;

	/* NPE firmware pads short frames with zeros internally */
	wmb();
	queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
	dev->trans_start = jiffies;

	if (qmgr_stat_empty(txreadyq)) {
#if DEBUG_TX
		printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
#endif
		netif_stop_queue(dev);
		/* we could miss TX ready interrupt */
		if (!qmgr_stat_empty(txreadyq)) {
#if DEBUG_TX
			printk(KERN_DEBUG "%s: eth_xmit ready again\n",
			       dev->name);
#endif
			netif_wake_queue(dev);
		}
	}

#if DEBUG_TX
	printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
#endif
	return NETDEV_TX_OK;
}
/**
 *	ld_usb_write
 */
static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
			    size_t count, loff_t *ppos)
{
	struct ld_usb *dev;
	size_t bytes_to_write;
	int retval = 0;

	dev = file->private_data;

	/* verify that we actually have some data to write */
	if (count == 0)
		goto exit;

	/* lock this object */
	if (mutex_lock_interruptible(&dev->mutex)) {
		retval = -ERESTARTSYS;
		goto exit;
	}

	/* verify that the device wasn't unplugged */
	if (dev->intf == NULL) {
		retval = -ENODEV;
		printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
		goto unlock_exit;
	}

	/* wait until previous transfer is finished */
	if (dev->interrupt_out_busy) {
		if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			goto unlock_exit;
		}
		retval = wait_event_interruptible(dev->write_wait, !dev->interrupt_out_busy);
		if (retval < 0) {
			goto unlock_exit;
		}
	}

	/* write the data into interrupt_out_buffer from userspace */
	bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
	if (bytes_to_write < count)
		dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
	dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
		__func__, count, bytes_to_write);

	if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
		retval = -EFAULT;
		goto unlock_exit;
	}

	if (dev->interrupt_out_endpoint == NULL) {
		/* try HID_REQ_SET_REPORT=9 on control_endpoint instead of interrupt_out_endpoint */
		retval = usb_control_msg(interface_to_usbdev(dev->intf),
					 usb_sndctrlpipe(interface_to_usbdev(dev->intf), 0),
					 9,
					 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
					 1 << 8, 0,
					 dev->interrupt_out_buffer,
					 bytes_to_write,
					 USB_CTRL_SET_TIMEOUT * HZ);
		if (retval < 0)
			dev_err(&dev->intf->dev,
				"Couldn't submit HID_REQ_SET_REPORT %d\n",
				retval);
		goto unlock_exit;
	}

	/* send off the urb */
	usb_fill_int_urb(dev->interrupt_out_urb,
			 interface_to_usbdev(dev->intf),
			 usb_sndintpipe(interface_to_usbdev(dev->intf),
					dev->interrupt_out_endpoint->bEndpointAddress),
			 dev->interrupt_out_buffer,
			 bytes_to_write,
			 ld_usb_interrupt_out_callback,
			 dev,
			 dev->interrupt_out_interval);

	dev->interrupt_out_busy = 1;
	wmb();

	retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
	if (retval) {
		dev->interrupt_out_busy = 0;
		dev_err(&dev->intf->dev,
			"Couldn't submit interrupt_out_urb %d\n", retval);
		goto unlock_exit;
	}
	retval = bytes_to_write;

unlock_exit:
	/* unlock the device */
	mutex_unlock(&dev->mutex);

exit:
	return retval;
}
static void msm_bus_noc_set_qos_bw(struct msm_bus_noc_info *ninfo,
	uint32_t mport, uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
{
	uint32_t reg_val, val, mode;

	if (!ninfo->qos_freq) {
		MSM_BUS_DBG("Zero QoS Freq\n");
		return;
	}


	/* If Limiter or Regulator modes are not supported, bw not available*/
	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
		NOC_QOS_PERM_MODE_REGULATOR)) {
		uint32_t bw_val = noc_bw_field(qbw->bw, ninfo->qos_freq);
		uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
			ninfo->qos_freq);

		MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
			perm_mode, bw_val, sat_val);
		/*
		 * If in Limiter/Regulator mode, first go to fixed mode.
		 * Clear QoS accumulator
		 **/
		mode = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->base,
			mport)) & NOC_QOS_MODEn_MODE_BMSK;
		if (mode == NOC_QOS_MODE_REGULATOR || mode ==
			NOC_QOS_MODE_LIMITER) {
			reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->
				base, mport));
			val = NOC_QOS_MODE_FIXED;
			writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
				| (val & NOC_QOS_MODEn_MODE_BMSK),
				NOC_QOS_MODEn_ADDR(ninfo->base, mport));
		}

		reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(ninfo->base, mport));
		val = bw_val << NOC_QOS_BWn_BW_SHFT;
		writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
			(val & NOC_QOS_BWn_BW_BMSK)),
			NOC_QOS_BWn_ADDR(ninfo->base, mport));

		MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
			(~NOC_QOS_BWn_BW_BMSK)) | (val &
			NOC_QOS_BWn_BW_BMSK)));

		reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(ninfo->base,
			mport));
		val = sat_val << NOC_QOS_SATn_SAT_SHFT;
		writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
			(val & NOC_QOS_SATn_SAT_BMSK)),
			NOC_QOS_SATn_ADDR(ninfo->base, mport));

		MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
			(~NOC_QOS_SATn_SAT_BMSK)) | (val &
			NOC_QOS_SATn_SAT_BMSK)));

		/* Set mode back to what it was initially */
		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(ninfo->base,
			mport));
		writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
			| (mode & NOC_QOS_MODEn_MODE_BMSK),
			NOC_QOS_MODEn_ADDR(ninfo->base, mport));
		/* Ensure that all writes for bandwidth registers have
		 * completed before returning
		 */
		wmb();
	}
}
Beispiel #17
0
static inline void greth_enable_rx(struct greth_private *greth)
{
	wmb();
	GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
}
Beispiel #18
0
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
					struct dsi_buf *tp)
{
	int len, ret = 0;
	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
	char *bp;
	unsigned long size, addr;

#ifdef DEBUG_CMD
	int i;
	bp = tp->data;

	pr_info("%s: ", __func__);
	for (i = 0; i < tp->len; i++)
		printk("%x ", *bp++);

	pr_info("\n");
#endif
	bp = tp->data;

	len = ALIGN(tp->len, 4);
	size = ALIGN(tp->len, SZ_4K);

	tp->dmap = dma_map_single(&dsi_dev, tp->data, size, DMA_TO_DEVICE);
	if (dma_mapping_error(&dsi_dev, tp->dmap)) {
		pr_err("%s: dmap mapp failed\n", __func__);
		return -ENOMEM;
	}

	if (is_mdss_iommu_attached()) {
		int ret = msm_iommu_map_contig_buffer(tp->dmap,
					mdss_get_iommu_domain(domain), 0,
					size, SZ_4K, 0, &(addr));
		if (IS_ERR_VALUE(ret)) {
			pr_err("unable to map dma memory to iommu(%d)\n", ret);
			return -ENOMEM;
		}
	} else {
		addr = tp->dmap;
	}

	INIT_COMPLETION(ctrl->dma_comp);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr);
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
	wmb();

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);	/* trigger */
	wmb();

	ret = wait_for_completion_timeout(&ctrl->dma_comp,
				msecs_to_jiffies(DMA_TX_TIMEOUT));
	if (ret == 0) {
#if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
		dumpreg();
		mdp5_dump_regs();
		mdss_dsi_dump_power_clk(&ctrl->panel_data, 0);
		mdss_mdp_dump_power_clk();
		mdss_mdp_debug_bus();
		xlog_dump();
#endif
		pr_err("dma tx timeout!!\n");
		ret = -ETIMEDOUT;
	} else
		ret = tp->len;

	if (is_mdss_iommu_attached())
		msm_iommu_unmap_contig_buffer(addr,
			mdss_get_iommu_domain(domain), 0, size);

	return ret;
}
Beispiel #19
0
static int greth_rx(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb;
	int pkt_len;
	int bad, count;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);

		if (unlikely(status & GRETH_BD_EN)) {
			break;
		}

		dma_addr = greth_read_bd(&bdp->addr);
		bad = 0;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {
			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			}
			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			}
			if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}
		if (unlikely(bad)) {
			dev->stats.rx_errors++;

		} else {

			pkt_len = status & GRETH_BD_LEN;

			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);

			if (unlikely(skb == NULL)) {

				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");

				dev->stats.rx_dropped++;

			} else {
				skb_reserve(skb, NET_IP_ALIGN);

				dma_sync_single_for_cpu(greth->dev,
							dma_addr,
							pkt_len,
							DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);

				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_bytes += pkt_len;
				dev->stats.rx_packets++;
				netif_receive_skb(skb);
			}
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);

		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);

		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);

		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;
}
Beispiel #20
0
static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
					struct dsi_buf *tp)
{
	int len, ret = 0;
	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
	char *bp;
	unsigned long size, addr;

	bp = tp->data;

	len = ALIGN(tp->len, 4);
	size = ALIGN(tp->len, SZ_4K);


	if (is_mdss_iommu_attached()) {
		int ret = msm_iommu_map_contig_buffer(tp->dmap,
					mdss_get_iommu_domain(domain), 0,
					size, SZ_4K, 0, &(addr));
		if (IS_ERR_VALUE(ret)) {
			pr_err("unable to map dma memory to iommu(%d)\n", ret);
			return -ENOMEM;
		}
	} else {
		addr = tp->dmap;
	}

	INIT_COMPLETION(ctrl->dma_comp);

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr);
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x048, addr);
	MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
	wmb();

	if (ctrl->shared_pdata.broadcast_enable)
		if ((ctrl->ndx == DSI_CTRL_1)
		  && (left_ctrl_pdata != NULL)) {
			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01);
		}

	MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);	/* trigger */
	wmb();

	ret = wait_for_completion_timeout(&ctrl->dma_comp,
				msecs_to_jiffies(DMA_TX_TIMEOUT));
	if (ret == 0)
		ret = -ETIMEDOUT;
	else
		ret = tp->len;

	if (is_mdss_iommu_attached())
		msm_iommu_unmap_contig_buffer(addr,
			mdss_get_iommu_domain(domain), 0, size);

	return ret;
}
Beispiel #21
0
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	/*
	 * Allocate skbuffs greedily, even though we batch updates to the
	 * receive ring. This creates a less bursty demand on the memory
	 * allocator, so should reduce the chance of failed allocation requests
	 * both for ourself and for other kernel subsystems.
	 */
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		/* Align ip header to a 16 bytes boundary */
		skb_reserve(skb, NET_IP_ALIGN);

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			/* Any skbuffs queued for refill? Force them out. */
			if (i != 0)
				goto refill;
			/* Could not allocate any skbuffs. Try again later. */
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	/* Is the batch large enough to be worthwhile? */
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	/* Adjust our fill target if we risked running out of buffers. */
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		/* barrier so backend seens requests */

	/* Above is a suitable barrier to ensure backend will see requests. */
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Beispiel #22
0
void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
				struct mdss_panel_data *pdata)
{
	u32 dsi_ctrl, intr_ctrl;
	u32 data;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;

	ctrl_pdata->panel_mode = pinfo->mode;

	if (pinfo->mode == DSI_VIDEO_MODE) {
		data = 0;
		if (pinfo->pulse_mode_hsa_he)
			data |= BIT(28);
		if (pinfo->hfp_power_stop)
			data |= BIT(24);
		if (pinfo->hbp_power_stop)
			data |= BIT(20);
		if (pinfo->hsa_power_stop)
			data |= BIT(16);
		if (pinfo->eof_bllp_power_stop)
			data |= BIT(15);
		if (pinfo->bllp_power_stop)
			data |= BIT(12);
		data |= ((pinfo->traffic_mode & 0x03) << 8);
		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
		data |= (pinfo->vc & 0x03);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0010, data);

		data = 0;
		data |= ((pinfo->rgb_swap & 0x07) << 12);
		if (pinfo->b_sel)
			data |= BIT(8);
		if (pinfo->g_sel)
			data |= BIT(4);
		if (pinfo->r_sel)
			data |= BIT(0);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0020, data);
	} else if (pinfo->mode == DSI_CMD_MODE) {
		data = 0;
		data |= ((pinfo->interleave_max & 0x0f) << 20);
		data |= ((pinfo->rgb_swap & 0x07) << 16);
		if (pinfo->b_sel)
			data |= BIT(12);
		if (pinfo->g_sel)
			data |= BIT(8);
		if (pinfo->r_sel)
			data |= BIT(4);
		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0040, data);

		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
		data = pinfo->wr_mem_continue & 0x0ff;
		data <<= 8;
		data |= (pinfo->wr_mem_start & 0x0ff);
		if (pinfo->insert_dcs_cmd)
			data |= BIT(16);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0044, data);
	} else
		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);

	dsi_ctrl = BIT(8) | BIT(2);	/* clock enable & cmd mode */
	intr_ctrl = 0;
	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);

	if (pinfo->crc_check)
		dsi_ctrl |= BIT(24);
	if (pinfo->ecc_check)
		dsi_ctrl |= BIT(20);
	if (pinfo->data_lane3)
		dsi_ctrl |= BIT(7);
	if (pinfo->data_lane2)
		dsi_ctrl |= BIT(6);
	if (pinfo->data_lane1)
		dsi_ctrl |= BIT(5);
	if (pinfo->data_lane0)
		dsi_ctrl |= BIT(4);

	/* from frame buffer, low power mode */
	/* DSI_COMMAND_MODE_DMA_CTRL */
	if (ctrl_pdata->shared_pdata.broadcast_enable)
		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x94000000);
	else
		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x14000000);

	data = 0;
	if (pinfo->te_sel)
		data |= BIT(31);
	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
	data |= pinfo->dma_trigger;	/* cmd dma trigger */
	data |= (pinfo->stream & 0x01) << 8;
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0084,
				data); /* DSI_TRIG_CTRL */

	/* DSI_LAN_SWAP_CTRL */
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00b0, pinfo->dlane_swap);

	/* clock out ctrl */
	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
	data <<= 8;
	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
	/* DSI_CLKOUT_TIMING_CTRL */
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xc4, data);

	data = 0;
	if (pinfo->rx_eot_ignore)
		data |= BIT(4);
	if (pinfo->tx_eot_append)
		data |= BIT(0);
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00cc,
				data); /* DSI_EOT_PACKET_CTRL */


	/* allow only ack-err-status  to generate interrupt */
	/* DSI_ERR_INT_MASK0 */
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x010c, 0x13ff3fe0);

	intr_ctrl |= DSI_INTR_ERROR_MASK;
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
				intr_ctrl); /* DSI_INTL_CTRL */

	/* turn esc, byte, dsi, pclk, sclk, hclk on */
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x11c,
					0x23f); /* DSI_CLK_CTRL */

	dsi_ctrl |= BIT(0);	/* enable dsi */
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);

	wmb();
}
Beispiel #23
0
static int p54p_upload_firmware(struct ieee80211_hw *dev)
{
	struct p54p_priv *priv = dev->priv;
	__le32 reg;
	int err;
	__le32 *data;
	u32 remains, left, device_addr;

	P54P_WRITE(int_enable, cpu_to_le32(0));
	P54P_READ(int_enable);
	udelay(10);

	reg = P54P_READ(ctrl_stat);
	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
	P54P_WRITE(ctrl_stat, reg);
	P54P_READ(ctrl_stat);
	udelay(10);

	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
	P54P_WRITE(ctrl_stat, reg);
	wmb();
	udelay(10);

	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
	P54P_WRITE(ctrl_stat, reg);
	wmb();

	/* wait for the firmware to reset properly */
	mdelay(10);

	err = p54_parse_firmware(dev, priv->firmware);
	if (err)
		return err;

	if (priv->common.fw_interface != FW_LM86) {
		dev_err(&priv->pdev->dev, "wrong firmware, "
			"please get a LM86(PCI) firmware a try again.\n");
		return -EINVAL;
	}

	data = (__le32 *) priv->firmware->data;
	remains = priv->firmware->size;
	device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
	while (remains) {
		u32 i = 0;
		left = min((u32)0x1000, remains);
		P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
		P54P_READ(int_enable);

		device_addr += 0x1000;
		while (i < left) {
			P54P_WRITE(direct_mem_win[i], *data++);
			i += sizeof(u32);
		}

		remains -= left;
		P54P_READ(int_enable);
	}

	reg = P54P_READ(ctrl_stat);
	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
	P54P_WRITE(ctrl_stat, reg);
	P54P_READ(ctrl_stat);
	udelay(10);

	reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
	P54P_WRITE(ctrl_stat, reg);
	wmb();
	udelay(10);

	reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
	P54P_WRITE(ctrl_stat, reg);
	wmb();
	udelay(10);

	/* wait for the firmware to boot properly */
	mdelay(100);

	return 0;
}
Beispiel #24
0
int
islpci_eth_receive(islpci_private *priv)
{
    struct net_device *ndev = priv->ndev;
    isl38xx_control_block *control_block = priv->control_block;
    struct sk_buff *skb;
    u16 size;
    u32 index, offset;
    unsigned char *src;
    int discard = 0;

#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n");
#endif

    /* the device has written an Ethernet frame in the data area
     * of the sk_buff without updating the structure, do it now */
    index = priv->free_data_rx % ISL38XX_CB_RX_QSIZE;
    size = le16_to_cpu(control_block->rx_data_low[index].size);
    skb = priv->data_low_rx[index];
    offset = ((unsigned long)
          le32_to_cpu(control_block->rx_data_low[index].address) -
          (unsigned long) skb->data) & 3;

#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_TRACING,
          "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ",
          control_block->rx_data_low[priv->free_data_rx].address, skb->data,
          skb->len, offset, skb->truesize);
#endif

    /* delete the streaming DMA mapping before processing the skb */
    pci_unmap_single(priv->pdev,
             priv->pci_map_rx_address[index],
             MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);

    /* update the skb structure and allign the buffer */
    skb_put(skb, size);
    if (offset) {
        /* shift the buffer allocation offset bytes to get the right frame */
        skb_pull(skb, 2);
        skb_put(skb, 2);
    }
#if VERBOSE > SHOW_ERROR_MESSAGES
    /* display the buffer contents for debugging */
    DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
    display_buffer((char *) skb->data, skb->len);
#endif

    /* check whether WDS is enabled and whether the data frame is a WDS frame */

    if (init_wds) {
        /* WDS enabled, check for the wds address on the first 6 bytes of the buffer */
        src = skb->data + 6;
        memmove(skb->data, src, skb->len - 6);
        skb_trim(skb, skb->len - 6);
    }
#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_TRACING, "Fragment size %i in skb at %p\n", size, skb);
    DEBUG(SHOW_TRACING, "Skb data at %p, length %i\n", skb->data, skb->len);

    /* display the buffer contents for debugging */
    DEBUG(SHOW_BUFFER_CONTENTS, "\nrx %p ", skb->data);
    display_buffer((char *) skb->data, skb->len);
#endif
    /* take care of monitor mode and spy monitoring. */
    if (unlikely(priv->iw_mode == IW_MODE_MONITOR)) {
        skb->dev = ndev;
        discard = islpci_monitor_rx(priv, &skb);
    } else {
        if (unlikely(skb->data[2 * ETH_ALEN] == 0)) {
            /* The packet has a rx_annex. Read it for spy monitoring, Then
             * remove it, while keeping the 2 leading MAC addr.
             */
            struct iw_quality wstats;
            struct rx_annex_header *annex =
                (struct rx_annex_header *) skb->data;
            wstats.level = annex->rfmon.rssi;
            /* The noise value can be a bit outdated if nobody's
             * reading wireless stats... */
            wstats.noise = priv->local_iwstatistics.qual.noise;
            wstats.qual = wstats.level - wstats.noise;
            wstats.updated = 0x07;
            /* Update spy records */
            wireless_spy_update(ndev, annex->addr2, &wstats);

            skb_copy_from_linear_data(skb,
                          (skb->data +
                           sizeof(struct rfmon_header)),
                          2 * ETH_ALEN);
            skb_pull(skb, sizeof (struct rfmon_header));
        }
        skb->protocol = eth_type_trans(skb, ndev);
    }
    skb->ip_summed = CHECKSUM_NONE;
    priv->statistics.rx_packets++;
    priv->statistics.rx_bytes += size;

    /* deliver the skb to the network layer */
#ifdef ISLPCI_ETH_DEBUG
    printk
        ("islpci_eth_receive:netif_rx %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
         skb->data[0], skb->data[1], skb->data[2], skb->data[3],
         skb->data[4], skb->data[5]);
#endif
    if (unlikely(discard)) {
        dev_kfree_skb_irq(skb);
        skb = NULL;
    } else
        netif_rx(skb);

    /* increment the read index for the rx data low queue */
    priv->free_data_rx++;

    /* add one or more sk_buff structures */
    while (index =
           le32_to_cpu(control_block->
               driver_curr_frag[ISL38XX_CB_RX_DATA_LQ]),
           index - priv->free_data_rx < ISL38XX_CB_RX_QSIZE) {
        /* allocate an sk_buff for received data frames storage
         * include any required allignment operations */
        skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
        if (unlikely(skb == NULL)) {
            /* error allocating an sk_buff structure elements */
            DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n");
            break;
        }
        skb_reserve(skb, (4 - (long) skb->data) & 0x03);
        /* store the new skb structure pointer */
        index = index % ISL38XX_CB_RX_QSIZE;
        priv->data_low_rx[index] = skb;

#if VERBOSE > SHOW_ERROR_MESSAGES
        DEBUG(SHOW_TRACING,
              "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ",
              skb, skb->data, skb->len, index, skb->truesize);
#endif

        /* set the streaming DMA mapping for proper PCI bus operation */
        priv->pci_map_rx_address[index] =
            pci_map_single(priv->pdev, (void *) skb->data,
                   MAX_FRAGMENT_SIZE_RX + 2,
                   PCI_DMA_FROMDEVICE);
        if (unlikely(priv->pci_map_rx_address[index] == (dma_addr_t) NULL)) {
            /* error mapping the buffer to device accessable memory address */
            DEBUG(SHOW_ERROR_MESSAGES,
                  "Error mapping DMA address\n");

            /* free the skbuf structure before aborting */
            dev_kfree_skb_irq((struct sk_buff *) skb);
            skb = NULL;
            break;
        }
        /* update the fragment address */
        control_block->rx_data_low[index].address =
            cpu_to_le32((u32)priv->pci_map_rx_address[index]);
        wmb();

        /* increment the driver read pointer */
        add_le32p(&control_block->
              driver_curr_frag[ISL38XX_CB_RX_DATA_LQ], 1);
    }

    /* trigger the device */
    islpci_trigger(priv);

    return 0;
}
Beispiel #25
0
/*
 * Report back to the Boot Processor.
 * Running on AP.
 */
static void __cpuinit smp_callin(void)
{
	int cpuid, phys_id;
	unsigned long timeout;

	/*
	 * If waken up by an INIT in an 82489DX configuration
	 * we may get here before an INIT-deassert IPI reaches
	 * our local APIC.  We have to wait for the IPI or we'll
	 * lock up on an APIC access.
	 */
	if (apic->wait_for_init_deassert)
		apic->wait_for_init_deassert(&init_deasserted);

	/*
	 * (This works even if the APIC is not enabled.)
	 */
	phys_id = read_apic_id();
	cpuid = smp_processor_id();
	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
					phys_id, cpuid);
	}
	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);

	/*
	 * STARTUP IPIs are fragile beasts as they might sometimes
	 * trigger some glue motherboard logic. Complete APIC bus
	 * silence for 1 second, this overestimates the time the
	 * boot CPU is spending to send the up to 2 STARTUP IPIs
	 * by a factor of two. This should be enough.
	 */

	/*
	 * Waiting 2s total for startup (udelay is not yet working)
	 */
	timeout = jiffies + 2*HZ;
	while (time_before(jiffies, timeout)) {
		/*
		 * Has the boot CPU finished it's STARTUP sequence?
		 */
		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
			break;
		cpu_relax();
	}

	if (!time_before(jiffies, timeout)) {
		panic("%s: CPU%d started up but did not get a callout!\n",
		      __func__, cpuid);
	}

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */

	pr_debug("CALLIN, before setup_local_APIC().\n");
	if (apic->smp_callin_clear_local_apic)
		apic->smp_callin_clear_local_apic();
	setup_local_APIC();
	end_local_APIC_setup();

	/*
	 * Need to setup vector mappings before we enable interrupts.
	 */
	setup_vector_irq(smp_processor_id());

	/*
	 * Save our processor parameters. Note: this information
	 * is needed for clock calibration.
	 */
	smp_store_cpu_info(cpuid);

	/*
	 * Get our bogomips.
	 * Update loops_per_jiffy in cpu_data. Previous call to
	 * smp_store_cpu_info() stored a value that is close but not as
	 * accurate as the value just calculated.
	 */
	calibrate_delay();
	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
	pr_debug("Stack at about %p\n", &cpuid);

	/*
	 * This must be done before setting cpu_online_mask
	 * or calling notify_cpu_starting.
	 */
	set_cpu_sibling_map(raw_smp_processor_id());
	wmb();

	notify_cpu_starting(cpuid);

	/*
	 * Allow the master to continue.
	 */
	cpumask_set_cpu(cpuid, cpu_callin_mask);
}
Beispiel #26
0
int
islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
{
    islpci_private *priv = netdev_priv(ndev);
    isl38xx_control_block *cb = priv->control_block;
    u32 index;
    dma_addr_t pci_map_address;
    int frame_size;
    isl38xx_fragment *fragment;
    int offset;
    struct sk_buff *newskb;
    int newskb_offset;
    unsigned long flags;
    unsigned char wds_mac[6];
    u32 curr_frag;
    int err = 0;

#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n");
#endif

    /* lock the driver code */
    spin_lock_irqsave(&priv->slock, flags);

    /* check whether the destination queue has enough fragments for the frame */
    curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
    if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
        printk(KERN_ERR "%s: transmit device queue full when awake\n",
               ndev->name);
        netif_stop_queue(ndev);

        /* trigger the device */
        isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
                  ISL38XX_DEV_INT_REG);
        udelay(ISL38XX_WRITEIO_DELAY);

        err = -EBUSY;
        goto drop_free;
    }
    /* Check alignment and WDS frame formatting. The start of the packet should
     * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
     * and add WDS address information */
    if (likely(((long) skb->data & 0x03) | init_wds)) {
        /* get the number of bytes to add and re-allign */
        offset = (4 - (long) skb->data) & 0x03;
        offset += init_wds ? 6 : 0;

        /* check whether the current skb can be used  */
        if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
            unsigned char *src = skb->data;

#if VERBOSE > SHOW_ERROR_MESSAGES
            DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
                  init_wds);
#endif

            /* align the buffer on 4-byte boundary */
            skb_reserve(skb, (4 - (long) skb->data) & 0x03);
            if (init_wds) {
                /* wds requires an additional address field of 6 bytes */
                skb_put(skb, 6);
#ifdef ISLPCI_ETH_DEBUG
                printk("islpci_eth_transmit:wds_mac\n");
#endif
                memmove(skb->data + 6, src, skb->len);
                skb_copy_to_linear_data(skb, wds_mac, 6);
            } else {
                memmove(skb->data, src, skb->len);
            }

#if VERBOSE > SHOW_ERROR_MESSAGES
            DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data,
                  src, skb->len);
#endif
        } else {
            newskb =
                dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
            if (unlikely(newskb == NULL)) {
                printk(KERN_ERR "%s: Cannot allocate skb\n",
                       ndev->name);
                err = -ENOMEM;
                goto drop_free;
            }
            newskb_offset = (4 - (long) newskb->data) & 0x03;

            /* Check if newskb->data is aligned */
            if (newskb_offset)
                skb_reserve(newskb, newskb_offset);

            skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
            if (init_wds) {
                skb_copy_from_linear_data(skb,
                              newskb->data + 6,
                              skb->len);
                skb_copy_to_linear_data(newskb, wds_mac, 6);
#ifdef ISLPCI_ETH_DEBUG
                printk("islpci_eth_transmit:wds_mac\n");
#endif
            } else
                skb_copy_from_linear_data(skb, newskb->data,
                              skb->len);

#if VERBOSE > SHOW_ERROR_MESSAGES
            DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
                  newskb->data, skb->data, skb->len, init_wds);
#endif

            newskb->dev = skb->dev;
            dev_kfree_skb_irq(skb);
            skb = newskb;
        }
    }
    /* display the buffer contents for debugging */
#if VERBOSE > SHOW_ERROR_MESSAGES
    DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
    display_buffer((char *) skb->data, skb->len);
#endif

    /* map the skb buffer to pci memory for DMA operation */
    pci_map_address = pci_map_single(priv->pdev,
                     (void *) skb->data, skb->len,
                     PCI_DMA_TODEVICE);
    if (unlikely(pci_map_address == 0)) {
        printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
               ndev->name);

        err = -EIO;
        goto drop_free;
    }
    /* Place the fragment in the control block structure. */
    index = curr_frag % ISL38XX_CB_TX_QSIZE;
    fragment = &cb->tx_data_low[index];

    priv->pci_map_tx_address[index] = pci_map_address;
    /* store the skb address for future freeing  */
    priv->data_low_tx[index] = skb;
    /* set the proper fragment start address and size information */
    frame_size = skb->len;
    fragment->size = cpu_to_le16(frame_size);
    fragment->flags = cpu_to_le16(0);    /* set to 1 if more fragments */
    fragment->address = cpu_to_le32(pci_map_address);
    curr_frag++;

    /* The fragment address in the control block must have been
     * written before announcing the frame buffer to device. */
    wmb();
    cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);

    if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
        > ISL38XX_CB_TX_QSIZE) {
        /* stop sends from upper layers */
        netif_stop_queue(ndev);

        /* set the full flag for the transmission queue */
        priv->data_low_tx_full = 1;
    }

    /* set the transmission time */
    ndev->trans_start = jiffies;
    priv->statistics.tx_packets++;
    priv->statistics.tx_bytes += skb->len;

    /* trigger the device */
    islpci_trigger(priv);

    /* unlock the driver code */
    spin_unlock_irqrestore(&priv->slock, flags);

    return 0;

      drop_free:
    priv->statistics.tx_dropped++;
    spin_unlock_irqrestore(&priv->slock, flags);
    dev_kfree_skb(skb);
    return err;
}
Beispiel #27
0
int mdss_dsi_on(struct mdss_panel_data *pdata)
{
	int ret = 0;
	u32 clk_rate;
	struct mdss_panel_info *pinfo;
	struct mipi_panel_info *mipi;
	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
	u32 ystride, bpp, data, dst_bpp;
	u32 dummy_xres, dummy_yres;
	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
	u32 hsync_period, vsync_period;

	if (pdata == NULL) {
		pr_err("%s: Invalid input data\n", __func__);
		return -EINVAL;
	}

	if (pdata->panel_info.panel_power_on) {
		pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
		return 0;
	}

	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
				panel_data);

	pr_debug("%s+: ctrl=%p ndx=%d\n",
				__func__, ctrl_pdata, ctrl_pdata->ndx);

	pinfo = &pdata->panel_info;

	ret = mdss_dsi_panel_power_on(pdata, 1);
	if (ret) {
		pr_err("%s: Panel power on failed\n", __func__);
		return ret;
	}

	pdata->panel_info.panel_power_on = 1;

	ret = mdss_dsi_enable_bus_clocks(ctrl_pdata);
	if (ret) {
		pr_err("%s: failed to enable bus clocks. rc=%d\n", __func__,
			ret);
		mdss_dsi_panel_power_on(pdata, 0);
		return ret;
	}

	mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
	mdss_dsi_phy_init(pdata);
	mdss_dsi_disable_bus_clocks(ctrl_pdata);

	mdss_dsi_clk_ctrl(ctrl_pdata, 1);

	clk_rate = pdata->panel_info.clk_rate;
	clk_rate = min(clk_rate, pdata->panel_info.clk_max);

	dst_bpp = pdata->panel_info.fbc.enabled ?
		(pdata->panel_info.fbc.target_bpp) : (pinfo->bpp);

	hbp = mult_frac(pdata->panel_info.lcdc.h_back_porch, dst_bpp,
			pdata->panel_info.bpp);
	hfp = mult_frac(pdata->panel_info.lcdc.h_front_porch, dst_bpp,
			pdata->panel_info.bpp);
	vbp = mult_frac(pdata->panel_info.lcdc.v_back_porch, dst_bpp,
			pdata->panel_info.bpp);
	vfp = mult_frac(pdata->panel_info.lcdc.v_front_porch, dst_bpp,
			pdata->panel_info.bpp);
	hspw = mult_frac(pdata->panel_info.lcdc.h_pulse_width, dst_bpp,
			pdata->panel_info.bpp);
	vspw = pdata->panel_info.lcdc.v_pulse_width;
	width = mult_frac(pdata->panel_info.xres, dst_bpp,
			pdata->panel_info.bpp);
	height = pdata->panel_info.yres;

	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
		dummy_xres = pdata->panel_info.lcdc.xres_pad;
		dummy_yres = pdata->panel_info.lcdc.yres_pad;
	}

	vsync_period = vspw + vbp + height + dummy_yres + vfp;
	hsync_period = hspw + hbp + width + dummy_xres + hfp;

	mipi  = &pdata->panel_info.mipi;
	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
			((hspw + hbp + width + dummy_xres) << 16 |
			(hspw + hbp)));
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x28,
			((vspw + vbp + height + dummy_yres) << 16 |
			(vspw + vbp)));
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
				((vsync_period - 1) << 16)
				| (hsync_period - 1));

		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x30, (hspw << 16));
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x34, 0);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x38, (vspw << 16));

	} else {		/* command mode */
		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
			bpp = 3;
		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
			bpp = 2;
		else
			bpp = 3;	/* Default format set to RGB888 */

		ystride = width * bpp + 1;

		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, data);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, data);

		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
		data = height << 16 | width;
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, data);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, data);
	}

	mdss_dsi_sw_reset(pdata);
	mdss_dsi_host_init(mipi, pdata);
#if RESET_IN_LP11 // add for LP11
	// LP11
	{
	u32 tmp;
	tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
	tmp &= ~(1<<28);
	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
	wmb();
	}
	// LP11
	(ctrl_pdata->panel_data).panel_reset_fn(pdata, 1);
#endif

	if (mipi->force_clk_lane_hs) {
		u32 tmp;

		tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
		tmp |= (1<<28);
		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
		wmb();
	}

	if (pdata->panel_info.type == MIPI_CMD_PANEL)
		mdss_dsi_clk_ctrl(ctrl_pdata, 0);

	pr_debug("%s-:\n", __func__);
	return 0;
}
Beispiel #28
0
static int ltq_pci_startup(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	const __be32 *req_mask, *bus_clk;
	u32 temp_buffer;

	/* get our clocks */
	clk_pci = clk_get(&pdev->dev, NULL);
	if (IS_ERR(clk_pci)) {
		dev_err(&pdev->dev, "failed to get pci clock\n");
		return PTR_ERR(clk_pci);
	}

	clk_external = clk_get(&pdev->dev, "external");
	if (IS_ERR(clk_external)) {
		clk_put(clk_pci);
		dev_err(&pdev->dev, "failed to get external pci clock\n");
		return PTR_ERR(clk_external);
	}

	/* read the bus speed that we want */
	bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
	if (bus_clk)
		clk_set_rate(clk_pci, *bus_clk);

	/* and enable the clocks */
	clk_enable(clk_pci);
	if (of_find_property(node, "lantiq,external-clock", NULL))
		clk_enable(clk_external);
	else
		clk_disable(clk_external);

	/* setup reset gpio used by pci */
	reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
	if (gpio_is_valid(reset_gpio))
		devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");

	/* enable auto-switching between PCI and EBU */
	ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);

	/* busy, i.e. configuration is not done, PCI access has to be retried */
	ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD);
	wmb();
	/* BUS Master/IO/MEM access */
	ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD);

	/* enable external 2 PCI masters */
	temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
	/* setup the request mask */
	req_mask = of_get_property(node, "req-mask", NULL);
	if (req_mask)
		temp_buffer &= ~((*req_mask & 0xf) << 16);
	else
		temp_buffer &= ~0xf0000;
	/* enable internal arbiter */
	temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
	/* enable internal PCI master reqest */
	temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS));

	/* enable EBU request */
	temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS));

	/* enable all external masters request */
	temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS));
	ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB);
	wmb();

	/* setup BAR memory regions */
	ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0);
	ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1);
	ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2);
	ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3);
	ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4);
	ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5);
	ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6);
	ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7);
	ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg);
	ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK);
	ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11);
	ltq_pci_w32(0, PCI_CS_BASE_ADDR1);
	/* both TX and RX endian swap are enabled */
	ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI);
	wmb();
	ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000,
		PCI_CR_BAR12MASK);
	ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000,
		PCI_CR_BAR13MASK);
	/*use 8 dw burst length */
	ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH);
	ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD);
	wmb();

	/* setup irq line */
	ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON);
	ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);

	/* toggle reset pin */
	if (gpio_is_valid(reset_gpio)) {
		__gpio_set_value(reset_gpio, 0);
		wmb();
		mdelay(1);
		__gpio_set_value(reset_gpio, 1);
	}
	return 0;
}
void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
{
	/* complete all the writes before starting */
	wmb();

	/* kick off PPP engine */
	if (term == MDP_PPP_TERM) {
		if (mdp_debug[MDP_PPP_BLOCK])
			jiffies_to_timeval(jiffies, &mdp_ppp_timeval);

		/* let's turn on PPP block */
		mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);

		mdp_enable_irq(term);
		INIT_COMPLETION(mdp_ppp_comp);
		mdp_ppp_waiting = TRUE;
		outpdw(MDP_BASE + 0x30, 0x1000);
		wait_for_completion_killable(&mdp_ppp_comp);
		mdp_disable_irq(term);

		if (mdp_debug[MDP_PPP_BLOCK]) {
			struct timeval now;

			jiffies_to_timeval(jiffies, &now);
			mdp_ppp_timeval.tv_usec =
			    now.tv_usec - mdp_ppp_timeval.tv_usec;
			MSM_FB_INFO("MDP-PPP: %d\n",
				    (int)mdp_ppp_timeval.tv_usec);
		}
	} else if (term == MDP_DMA2_TERM) {
		if (mdp_debug[MDP_DMA2_BLOCK]) {
			MSM_FB_INFO("MDP-DMA2: %d\n",
				    (int)mdp_dma2_timeval.tv_usec);
			jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
		}
		/* DMA update timestamp */
		mdp_dma2_last_update_time = ktime_get_real();
		/* let's turn on DMA2 block */
#if 0
		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
#endif
#ifdef CONFIG_FB_MSM_MDP22
		outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
#else
		mdp_lut_enable();

#ifdef CONFIG_FB_MSM_MDP40
		outpdw(MDP_BASE + 0x000c, 0x0);	/* start DMA */
#else
		outpdw(MDP_BASE + 0x0044, 0x0);	/* start DMA */
#endif
#endif
#ifdef CONFIG_FB_MSM_MDP40
	} else if (term == MDP_DMA_S_TERM) {
		mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		outpdw(MDP_BASE + 0x0010, 0x0);	/* start DMA */
	} else if (term == MDP_DMA_E_TERM) {
		mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		outpdw(MDP_BASE + 0x0014, 0x0);	/* start DMA */
	} else if (term == MDP_OVERLAY0_TERM) {
		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mdp_lut_enable();
		outpdw(MDP_BASE + 0x0004, 0);
	} else if (term == MDP_OVERLAY1_TERM) {
		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mdp_lut_enable();
		outpdw(MDP_BASE + 0x0008, 0);
	}
#else
	} else if (term == MDP_DMA_S_TERM) {
Beispiel #30
0
void nlm_common_oprofile_int_handler(int irq, void * dev_id,
	struct pt_regs *regs)
{
	uint32_t counter1, counter2;
	uint32_t control1, control2;
	int i;
	int cpu_id = netlogic_cpu_id() * 4; /* 0, 4, 8 ... 28 */
	int h_id = hard_smp_processor_id();/* 0, 1, 2, 3, 4, .....31 */
	int ret, lcpu;
	int sample1_taken=0;
	int sample2_taken=0;
	extern struct plat_smp_ops *mp_ops;

	if(g_stop_pmc[h_id])
		return;

	if(((ret = nlm_common_pmc_owner_nolock()) == 0)) {
		/* if any counter overflow occured on this core.... */
		if(nlm_common_pc_of_mask1[h_id]) {
			oprofile_add_sample(regs, 0);
		}
		if(nlm_common_pc_of_mask2[h_id]) {
			oprofile_add_sample(regs, 1);
		}
		return;
	}

	control1 = __read_32bit_c0_register ($25, 0);
	control2 = __read_32bit_c0_register ($25, 2);


	counter1 = __read_32bit_c0_register ($25, 1);
	counter2 = __read_32bit_c0_register ($25, 3);

	if (((int)counter1) < 0) {
		__write_32bit_c0_register($25, 0, 0);
		oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter[0];
		sample1_taken = 1;

		for(i=0; i < 4; i++)
			nlm_common_pc_of_mask1[cpu_id + i] = 1;
		wmb();
		for(i=1; i < 4; i++) {
			lcpu = cpu_number_map(cpu_id+i);
			if(lcpu && cpu_isset(lcpu, cpu_online_map)) {
				mp_ops->send_ipi_single(lcpu, SMP_OPROFILE_IPI);
			}
		}
	}
	if (((int)counter2) < 0) {
		__write_32bit_c0_register($25, 2, 0);
		oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter[1];
		sample2_taken = 1;

		for(i=0; i < 4; i++)
			nlm_common_pc_of_mask2[cpu_id + i] = 1;
		wmb();
		for(i=1; i < 4; i++) {
			lcpu = cpu_number_map(cpu_id+i);
			if(lcpu && cpu_isset(lcpu, cpu_online_map)) {
				mp_ops->send_ipi_single(lcpu, SMP_OPROFILE_IPI);
			}
		}
	}

	if(sample1_taken) {
		__write_32bit_c0_register($25, 1, counter1);
		__write_32bit_c0_register($25, 0, reg.control[0]);
	}
	if(sample2_taken) {
		__write_32bit_c0_register($25, 3, counter2);
		__write_32bit_c0_register($25, 2, reg.control[1]);
	}

	return ;
}