static void qspi_write32(u32 flags, u32 *addr, u32 val)
{
	flags & QSPI_FLAG_REGMAP_ENDIAN_BIG ?
		out_be32(addr, val) : out_le32(addr, val);
}
示例#2
0
/*
 * start the DMA
 */
static inline void snd_pmac_dma_run(struct pmac_stream *rec, int status)
{
	out_le32(&rec->dma->control, status | (status << 16));
}
示例#3
0
void ppc440spe_setup_pcie(struct pci_controller *hose, int port)
{
    void __iomem *mbase;

    /*
     * Map 16MB, which is enough for 4 bits of bus #
     */
    hose->cfg_data = ioremap64(0xc40000000ull + port * 0x40000000,
                   1 << 24);
    hose->ops = &pcie_pci_ops;

    /*
     * Set bus numbers on our root port
     */
    mbase = ioremap64(0xc50000000ull + port * 0x40000000, 4096);
    out_8(mbase + PCI_PRIMARY_BUS, 0);
    out_8(mbase + PCI_SECONDARY_BUS, 0);

    /*
     * Set up outbound translation to hose->mem_space from PLB
     * addresses at an offset of 0xd_0000_0000.  We set the low
     * bits of the mask to 11 to turn off splitting into 8
     * subregions and to enable the outbound translation.
     */
    out_le32(mbase + PECFG_POM0LAH, 0);
    out_le32(mbase + PECFG_POM0LAL, hose->mem_space.start);

    switch (port) {
    case 0:
        mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0),  0x0000000d);
        mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0),  hose->mem_space.start);
        mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff);
        mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0),
              ~(hose->mem_space.end - hose->mem_space.start) | 3);
        break;
    case 1:
        mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1),  0x0000000d);
        mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1),  hose->mem_space.start);
        mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff);
        mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1),
              ~(hose->mem_space.end - hose->mem_space.start) | 3);

        break;
    case 2:
        mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2),  0x0000000d);
        mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2),  hose->mem_space.start);
        mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff);
        mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2),
              ~(hose->mem_space.end - hose->mem_space.start) | 3);
        break;
    }

    /* Set up 16GB inbound memory window at 0 */
    out_le32(mbase + PCI_BASE_ADDRESS_0, 0);
    out_le32(mbase + PCI_BASE_ADDRESS_1, 0);
    out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc);
    out_le32(mbase + PECFG_BAR0LMPA, 0);
    out_le32(mbase + PECFG_PIM0LAL, 0);
    out_le32(mbase + PECFG_PIM0LAH, 0);
    out_le32(mbase + PECFG_PIMEN, 0x1);

    /* Enable I/O, Mem, and Busmaster cycles */
    out_le16(mbase + PCI_COMMAND,
         in_le16(mbase + PCI_COMMAND) |
         PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);

    iounmap(mbase);
}
示例#4
0
文件: ehci-fsl.c 项目: vamanea/u-boot
/*
 * Create the appropriate control structures to manage
 * a new EHCI host controller.
 *
 * Excerpts from linux ehci fsl driver.
 */
int ehci_hcd_init(int index, struct ehci_hccr **hccr, struct ehci_hcor **hcor)
{
	struct usb_ehci *ehci;
	const char *phy_type = NULL;
	size_t len;
#ifdef CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY
	char usb_phy[5];

	usb_phy[0] = '\0';
#endif

	ehci = (struct usb_ehci *)CONFIG_SYS_FSL_USB_ADDR;
	*hccr = (struct ehci_hccr *)((uint32_t)&ehci->caplength);
	*hcor = (struct ehci_hcor *)((uint32_t) *hccr +
			HC_LENGTH(ehci_readl(&(*hccr)->cr_capbase)));

	/* Set to Host mode */
	setbits_le32(&ehci->usbmode, CM_HOST);

	out_be32(&ehci->snoop1, SNOOP_SIZE_2GB);
	out_be32(&ehci->snoop2, 0x80000000 | SNOOP_SIZE_2GB);

	/* Init phy */
	if (hwconfig_sub("usb1", "phy_type"))
		phy_type = hwconfig_subarg("usb1", "phy_type", &len);
	else
		phy_type = getenv("usb_phy_type");

	if (!phy_type) {
#ifdef CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY
		/* if none specified assume internal UTMI */
		strcpy(usb_phy, "utmi");
		phy_type = usb_phy;
#else
		printf("WARNING: USB phy type not defined !!\n");
		return -1;
#endif
	}

	if (!strcmp(phy_type, "utmi")) {
#if defined(CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY)
		setbits_be32(&ehci->control, PHY_CLK_SEL_UTMI);
		setbits_be32(&ehci->control, UTMI_PHY_EN);
		udelay(1000); /* delay required for PHY Clk to appear */
#endif
		out_le32(&(*hcor)->or_portsc[0], PORT_PTS_UTMI);
	} else {
#if defined(CONFIG_SYS_FSL_USB_INTERNAL_UTMI_PHY)
		clrbits_be32(&ehci->control, UTMI_PHY_EN);
		setbits_be32(&ehci->control, PHY_CLK_SEL_ULPI);
		udelay(1000); /* delay required for PHY Clk to appear */
#endif
		out_le32(&(*hcor)->or_portsc[0], PORT_PTS_ULPI);
	}

	/* Enable interface. */
	setbits_be32(&ehci->control, USB_EN);

	out_be32(&ehci->prictrl, 0x0000000c);
	out_be32(&ehci->age_cnt_limit, 0x00000040);
	out_be32(&ehci->sictrl, 0x00000001);

	in_le32(&ehci->usbmode);

	return 0;
}
示例#5
0
/*
 * stop the DMA transfer
 */
static inline void snd_pmac_dma_stop(struct pmac_stream *rec)
{
	out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16);
	snd_pmac_wait_ack(rec);
}
示例#6
0
文件: pasemi.c 项目: Cribstone/linino
static int __devinit
pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct pasemi_softc *sc;
	int ret, i;

	DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);

	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
	if (!sc)
		return -ENOMEM;

	softc_device_init(sc, DRV_NAME, 1, pasemi_methods);

	pci_set_drvdata(pdev, sc);

	spin_lock_init(&sc->sc_chnlock);

	sc->sc_sessions = (struct pasemi_session **)
		kzalloc(PASEMI_INITIAL_SESSIONS *
			sizeof(struct pasemi_session *), GFP_ATOMIC);
	if (sc->sc_sessions == NULL) {
		ret = -ENOMEM;
		goto out;
	}

	sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
	sc->sc_lastchn = 0;
	sc->base_irq = pdev->irq + 6;
	sc->base_chan = 6;
	sc->sc_cid = -1;
	sc->dma_pdev = pdev;

	sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
	if (!sc->iob_pdev) {
		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
		ret = -ENODEV;
		goto out;
	}

	/* This is hardcoded and ugly, but we have some firmware versions
	 * who don't provide the register space in the device tree. Luckily
	 * they are at well-known locations so we can just do the math here.
	 */
	sc->dma_regs =
		ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
	sc->iob_regs =
		ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
	if (!sc->dma_regs || !sc->iob_regs) {
		dev_err(&pdev->dev, "Can't map registers\n");
		ret = -ENODEV;
		goto out;
	}

	dma_status = __ioremap(0xfd800000, 0x1000, 0);
	if (!dma_status) {
		ret = -ENODEV;
		dev_err(&pdev->dev, "Can't map dmastatus space\n");
		goto out;
	}

	sc->tx = (struct pasemi_fnu_txring *)
		kzalloc(sizeof(struct pasemi_fnu_txring)
			* 8, GFP_KERNEL);
	if (!sc->tx) {
		ret = -ENOMEM;
		goto out;
	}

	/* Initialize the h/w */
	out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
		 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
		  PAS_DMA_COM_CFG_FWF));
	out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);

	for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
		sc->sc_num_channels++;
		ret = pasemi_dma_setup_tx_resources(sc, i);
		if (ret)
			goto out;
	}

	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
					 CRYPTOCAP_F_HARDWARE);
	if (sc->sc_cid < 0) {
		printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
		ret = -ENXIO;
		goto out;
	}

	/* register algorithms with the framework */
	printk(DRV_NAME ":");

	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);

	return 0;

out:
	pasemi_dma_remove(pdev);
	return ret;
}
示例#7
0
/*
 * pmac_ide_build_dmatable builds the DBDMA command list
 * for a transfer and sets the DBDMA channel to point to it.
 */
static int
pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr)
{
	struct dbdma_cmd *table, *tstart;
	int count = 0;
	struct request *rq = HWGROUP(drive)->rq;
	struct buffer_head *bh = rq->bh;
	unsigned int size, addr;
	volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs;

	table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(pmac_ide[ix].dma_table);
	out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
	while (in_le32(&dma->status) & RUN)
		udelay(1);

	do {
		/*
		 * Determine addr and size of next buffer area.  We assume that
		 * individual virtual buffers are always composed linearly in
		 * physical memory.  For example, we assume that any 8kB buffer
		 * is always composed of two adjacent physical 4kB pages rather
		 * than two possibly non-adjacent physical 4kB pages.
		 */
		if (bh == NULL) {  /* paging requests have (rq->bh == NULL) */
			addr = virt_to_bus(rq->buffer);
			size = rq->nr_sectors << 9;
		} else {
			/* group sequential buffers into one large buffer */
			addr = virt_to_bus(bh->b_data);
			size = bh->b_size;
			while ((bh = bh->b_reqnext) != NULL) {
				if ((addr + size) != virt_to_bus(bh->b_data))
					break;
				size += bh->b_size;
			}
		}

		/*
		 * Fill in the next DBDMA command block.
		 * Note that one DBDMA command can transfer
		 * at most 65535 bytes.
		 */
		while (size) {
			unsigned int tc = (size < 0xfe00)? size: 0xfe00;

			if (++count >= MAX_DCMDS) {
				printk(KERN_WARNING "%s: DMA table too small\n",
				       drive->name);
				return 0; /* revert to PIO for this request */
			}
			st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);
			st_le16(&table->req_count, tc);
			st_le32(&table->phy_addr, addr);
			table->cmd_dep = 0;
			table->xfer_status = 0;
			table->res_count = 0;
			addr += tc;
			size -= tc;
			++table;
		}
	} while (bh != NULL);

	/* convert the last command to an input/output last command */
	if (count)
		st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);
	else
		printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);

	/* add the stop command to the end of the list */
	memset(table, 0, sizeof(struct dbdma_cmd));
	out_le16(&table->command, DBDMA_STOP);

	out_le32(&dma->cmdptr, virt_to_bus(tstart));
	return 1;
}
示例#8
0
文件: bspstart.c 项目: epicsdeb/rtems
/*
 * Return the current value of the Embedded Utilities Memory Block Base Address
 * Register (EUMBBAR) as read from the processor configuration register using
 * Processor Address Map B (CHRP).
 */
unsigned int get_eumbbar(void) {
  out_le32( (volatile unsigned *)0xfec00000, 0x80000078 );
  return in_le32( (volatile unsigned *)0xfee00000 );
}
static inline void mdio_lo(struct mii_bus *bus)
{
	out_le32(gpio_regs+0x10, 1 << MDIO_PIN(bus));
}
static void fixup_pci(void)
{
	struct pci_range *mem = NULL, *mmio = NULL,
	                 *io = NULL, *mem_base = NULL;
	u32 *pci_regs[3];
	u8 *soc_regs;
	int i, len;
	void *node, *parent_node;
	u32 naddr, nsize, mem_pow2, mem_mask;

	node = finddevice("/pci");
	if (!node || !dt_is_compatible(node, "fsl,pq2-pci"))
		return;

	for (i = 0; i < 3; i++)
		if (!dt_xlate_reg(node, i,
		                  (unsigned long *)&pci_regs[i], NULL))
			goto err;

	soc_regs = (u8 *)fsl_get_immr();
	if (!soc_regs)
		goto unhandled;

	dt_get_reg_format(node, &naddr, &nsize);
	if (naddr != 3 || nsize != 2)
		goto err;

	parent_node = get_parent(node);
	if (!parent_node)
		goto err;

	dt_get_reg_format(parent_node, &naddr, &nsize);
	if (naddr != 1 || nsize != 1)
		goto unhandled;

	len = getprop(node, "ranges", pci_ranges_buf,
	              sizeof(pci_ranges_buf));

	for (i = 0; i < len / sizeof(struct pci_range); i++) {
		u32 flags = pci_ranges_buf[i].flags & 0x43000000;

		if (flags == 0x42000000)
			mem = &pci_ranges_buf[i];
		else if (flags == 0x02000000)
			mmio = &pci_ranges_buf[i];
		else if (flags == 0x01000000)
			io = &pci_ranges_buf[i];
	}

	if (!mem || !mmio || !io)
		goto unhandled;
	if (mem->size[1] != mmio->size[1])
		goto unhandled;
	if (mem->size[1] & (mem->size[1] - 1))
		goto unhandled;
	if (io->size[1] & (io->size[1] - 1))
		goto unhandled;

	if (mem->phys_addr + mem->size[1] == mmio->phys_addr)
		mem_base = mem;
	else if (mmio->phys_addr + mmio->size[1] == mem->phys_addr)
		mem_base = mmio;
	else
		goto unhandled;

	out_be32(&pci_regs[1][0], mem_base->phys_addr | 1);
	out_be32(&pci_regs[2][0], ~(mem->size[1] + mmio->size[1] - 1));

	out_be32(&pci_regs[1][1], io->phys_addr | 1);
	out_be32(&pci_regs[2][1], ~(io->size[1] - 1));

	out_le32(&pci_regs[0][0], mem->pci_addr[1] >> 12);
	out_le32(&pci_regs[0][2], mem->phys_addr >> 12);
	out_le32(&pci_regs[0][4], (~(mem->size[1] - 1) >> 12) | 0xa0000000);

	out_le32(&pci_regs[0][6], mmio->pci_addr[1] >> 12);
	out_le32(&pci_regs[0][8], mmio->phys_addr >> 12);
	out_le32(&pci_regs[0][10], (~(mmio->size[1] - 1) >> 12) | 0x80000000);

	out_le32(&pci_regs[0][12], io->pci_addr[1] >> 12);
	out_le32(&pci_regs[0][14], io->phys_addr >> 12);
	out_le32(&pci_regs[0][16], (~(io->size[1] - 1) >> 12) | 0xc0000000);

	/*                     */
	out_le32(&pci_regs[0][58], 0);
	out_le32(&pci_regs[0][60], 0);

	mem_pow2 = 1 << (__ilog2_u32(bd.bi_memsize - 1) + 1);
	mem_mask = ~(mem_pow2 - 1) >> 12;
	out_le32(&pci_regs[0][62], 0xa0000000 | mem_mask);

	/*                                               */
	if (!(in_le32(&pci_regs[0][32]) & 1)) {
		 /*                                         */
		udelay(100000);

		out_le32(&pci_regs[0][32], 1);

		/*                                                   */
		udelay(1020000);
	}

	/*                                     */
	out_le32(&pci_regs[0][64], 0x80000004);
	out_le32(&pci_regs[0][65], in_le32(&pci_regs[0][65]) | 6);

	/*                                                             
                                                    
  */
	out_8(&soc_regs[0x10028], 3);
	out_be32((u32 *)&soc_regs[0x1002c], 0x01236745);

	return;

err:
	printf("Bad PCI node -- using existing firmware setup.\r\n");
	return;

unhandled:
	printf("Unsupported PCI node -- using existing firmware setup.\r\n");
}
示例#11
0
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;

	intr = in_8(&sw->intr);
	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
	if ((intr & ERROR_INTR) && fs->state != do_transfer)
		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
		       fs->state, rq_data_dir(fd_req), intr, err);
	switch (fs->state) {
	case locating:
		if (intr & SEEN_SECTOR) {
			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (sw->ctrack == 0xff) {
				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
				fs->cur_cyl = -1;
				if (fs->retries > 5) {
					swim3_end_request_cur(-EIO);
					fs->state = idle;
					start_request(fs);
				} else {
					fs->state = jogging;
					act(fs);
				}
				break;
			}
			fs->cur_cyl = sw->ctrack;
			fs->cur_sector = sw->csect;
			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
				       fs->expect_cyl, fs->cur_cyl);
			fs->state = do_transfer;
			act(fs);
		}
		break;
	case seeking:
	case jogging:
		if (sw->nseek == 0) {
			out_8(&sw->control_bic, DO_SEEK);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (fs->state == seeking)
				++fs->retries;
			fs->state = settling;
			act(fs);
		}
		break;
	case settling:
		out_8(&sw->intr_enable, 0);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		act(fs);
		break;
	case do_transfer:
		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
			break;
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
		out_8(&sw->select, RELAX);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		dr = fs->dma;
		cp = fs->dma_cmd;
		if (rq_data_dir(fd_req) == WRITE)
			++cp;
		/*
		 * Check that the main data transfer has finished.
		 * On writing, the swim3 sometimes doesn't use
		 * up all the bytes of the postamble, so we can still
		 * see DMA active here.  That doesn't matter as long
		 * as all the sector data has been transferred.
		 */
		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
			/* wait a little while for DMA to complete */
			for (n = 0; n < 100; ++n) {
				if (cp->xfer_status != 0)
					break;
				udelay(1);
				barrier();
			}
		}
		/* turn off DMA */
		out_le32(&dr->control, (RUN | PAUSE) << 16);
		stat = ld_le16(&cp->xfer_status);
		resid = ld_le16(&cp->res_count);
		if (intr & ERROR_INTR) {
			n = fs->scount - 1 - resid / 512;
			if (n > 0) {
				blk_update_request(fd_req, 0, n << 9);
				fs->req_sector += n;
			}
			if (fs->retries < 5) {
				++fs->retries;
				act(fs);
			} else {
				printk("swim3: error %sing block %ld (err=%x)\n",
				       rq_data_dir(fd_req) == WRITE? "writ": "read",
				       (long)blk_rq_pos(fd_req), err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
			}
		} else {
			if ((stat & ACTIVE) == 0 || resid != 0) {
				/* musta been an error */
				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
				printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
				       fs->state, rq_data_dir(fd_req), intr, err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
				start_request(fs);
				break;
			}
			if (swim3_end_request(0, fs->scount << 9)) {
				fs->req_sector += fs->scount;
				if (fs->req_sector > fs->secpertrack) {
					fs->req_sector -= fs->secpertrack;
					if (++fs->head > 1) {
						fs->head = 0;
						++fs->req_cyl;
					}
				}
				act(fs);
			} else
				fs->state = idle;
		}
		if (fs->state == idle)
			start_request(fs);
		break;
	default:
		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
	}
	return IRQ_HANDLED;
}
示例#12
0
void post_word_store(ulong value)
{
	volatile void* addr = (void *) (gd->ram_size - BOOTCOUNT_ADDR + POST_WORD_OFF);
	out_le32(addr, value);
}
示例#13
0
void post_word_store(ulong value)
{
	void* addr = (ulong *) (CPM_POST_WORD_ADDR);
	debug("post_word_store 0x%08lX: 0x%08lX\n", (ulong)addr, value);
	out_le32(addr, value);
}
示例#14
0
文件: il-gen.c 项目: jcbeaudoin/MKCL
static void out_opi(int op, int c)
{
    out_op1(op);
    out_le32(c);
    fprintf(il_outfile, " %s 0x%x\n", il_opcodes_str[op], c);
}
示例#15
0
static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
			  u_int transp, struct fb_info *info)
{
	struct offb_par *par = (struct offb_par *) info->par;

	if (!par->cmap_adr || regno > 255)
		return 1;

	red >>= 8;
	green >>= 8;
	blue >>= 8;

	switch (par->cmap_type) {
	case cmap_m64:
		writeb(regno, par->cmap_adr);
		writeb(red, par->cmap_data);
		writeb(green, par->cmap_data);
		writeb(blue, par->cmap_data);
		break;
	case cmap_M3A:
		/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) & ~0x20);
	case cmap_r128:
		/* Set palette index & data */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4,
			 (red << 16 | green << 8 | blue));
		break;
	case cmap_M3B:
		/* Set PALETTE_ACCESS_CNTL in DAC_CNTL */
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) | 0x20);
		/* Set palette index & data */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_radeon:
		/* Set palette index & data (could be smarter) */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_gxt2000:
		out_le32((unsigned __iomem *) par->cmap_adr + regno,
			 (red << 16 | green << 8 | blue));
		break;
	}

	if (regno < 16)
		switch (info->var.bits_per_pixel) {
		case 16:
			((u16 *) (info->pseudo_palette))[regno] =
			    (regno << 10) | (regno << 5) | regno;
			break;
		case 32:
			{
				int i = (regno << 8) | regno;
				((u32 *) (info->pseudo_palette))[regno] =
				    (i << 16) | i;
				break;
			}
		}
	return 0;
}
static inline void mdc_hi(struct mii_bus *bus)
{
	out_le32(gpio_regs, 1 << MDC_PIN(bus));
}
示例#17
0
文件: pasemi.c 项目: Cribstone/linino
static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
{
	u32 val;
	int chan_index = chan + sc->base_chan;
	int ret;
	struct pasemi_fnu_txring *ring;

	ring = &sc->tx[chan];

	spin_lock_init(&ring->fill_lock);
	spin_lock_init(&ring->clean_lock);

	ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
				  TX_RING_SIZE, GFP_KERNEL);
	if (!ring->desc_info)
		return -ENOMEM;

	/* Allocate descriptors */
	ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
					TX_RING_SIZE *
					2 * sizeof(u64),
					&ring->dma, GFP_KERNEL);
	if (!ring->desc)
		return -ENOMEM;

	memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));

	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);

	ring->total_pktcnt = 0;

	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
		 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));

	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);

	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);

	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
		 PAS_DMA_TXCHAN_CFG_TY_FUNC |
		 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
		 PAS_DMA_TXCHAN_CFG_WT(2));

	/* enable tx channel */
	out_le32(sc->dma_regs +
		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
		 PAS_DMA_TXCHAN_TCMDSTA_EN);

	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
		 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));

	ring->next_to_fill = 0;
	ring->next_to_clean = 0;

	snprintf(ring->irq_name, sizeof(ring->irq_name),
		 "%s%d", "crypto", chan);

	ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
	ret = request_irq(ring->irq, (irq_handler_t)
			  pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
	if (ret) {
		printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
		       ring->irq, ret);
		ring->irq = -1;
		return ret;
	}

	setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);

	return 0;
}
static inline void mdio_active(struct mii_bus *bus)
{
	out_le32(gpio_regs+0x20, (1 << MDC_PIN(bus)) | (1 << MDIO_PIN(bus)));
}
示例#19
0
文件: pasemi.c 项目: Cribstone/linino
static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
{
	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
		 incr);
}
static inline void mdio_tristate(struct mii_bus *bus)
{
	out_le32(gpio_regs+0x30, (1 << MDIO_PIN(bus)));
}
示例#21
0
int pmac_ide_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
{
	int ix, dstat, i;
	volatile struct dbdma_regs *dma;

	/* Can we stuff a pointer to our intf structure in config_data
	 * or select_data in hwif ?
	 */
	ix = pmac_ide_find(drive);
	if (ix < 0)
		return 0;		
	dma = pmac_ide[ix].dma_regs;

	switch (func) {
	case ide_dma_off:
		printk(KERN_INFO "%s: DMA disabled\n", drive->name);
	case ide_dma_off_quietly:
		drive->using_dma = 0;
		break;
	case ide_dma_on:
	case ide_dma_check:
		pmac_ide_check_dma(drive);
		break;
	case ide_dma_read:
	case ide_dma_write:
		if (!pmac_ide_build_dmatable(drive, ix, func==ide_dma_write))
			return 1;
		drive->waiting_for_dma = 1;
		if (drive->media != ide_disk)
			return 0;
		ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL);
		OUT_BYTE(func==ide_dma_write? WIN_WRITEDMA: WIN_READDMA,
			 IDE_COMMAND_REG);
	case ide_dma_begin:
		out_le32(&dma->control, (RUN << 16) | RUN);
		break;
	case ide_dma_end:
		drive->waiting_for_dma = 0;
		dstat = in_le32(&dma->status);
		out_le32(&dma->control, ((RUN|WAKE|DEAD) << 16));
		/* verify good dma status */
		return (dstat & (RUN|DEAD|ACTIVE)) != RUN;
	case ide_dma_test_irq:
		if ((in_le32(&dma->status) & (RUN|ACTIVE)) == RUN)
			return 1;
		/* That's a bit ugly and dangerous, but works in our case
		 * to workaround a problem with the channel status staying
		 * active if the drive returns an error
		 */
		if (IDE_CONTROL_REG) {
			byte stat;
			stat = GET_ALTSTAT();
			if (stat & ERR_STAT)
				return 1;
		}
		/* In some edge cases, some datas may still be in the dbdma
		 * engine fifo, we wait a bit for dbdma to complete
		 */
		while ((in_le32(&dma->status) & (RUN|ACTIVE)) != RUN) {
			if (++i > 100)
				return 0;
			udelay(1);
		}
		return 1;

		/* Let's implement tose just in case someone wants them */
	case ide_dma_bad_drive:
	case ide_dma_good_drive:
		return check_drive_lists(drive, (func == ide_dma_good_drive));
	case ide_dma_verbose:
		return report_drive_dmaing(drive);
	case ide_dma_retune:
	case ide_dma_lostirq:
	case ide_dma_timeout:
		printk(KERN_WARNING "ide_pmac_dmaproc: chipset supported %s func only: %d\n", ide_dmafunc_verbose(func),  func);
		return 1;
	default:
		printk(KERN_WARNING "ide_pmac_dmaproc: unsupported %s func: %d\n", ide_dmafunc_verbose(func), func);
		return 1;
	}
	return 0;
}
示例#22
0
/* pasemi_write_iob_reg - write IOB register
 * @reg: Register to write to (offset into PCI CFG space)
 * @val: Value to write
 */
void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
{
	out_le32(iob_regs+reg, val);
}
示例#23
0
文件: mc.c 项目: rosterloh/u-boot
int mc_init(u64 mc_fw_addr, u64 mc_dpc_addr)
{
	int error = 0;
	int portal_id = 0;
	struct mc_ccsr_registers __iomem *mc_ccsr_regs = MC_CCSR_BASE_ADDR;
	u64 mc_ram_addr = mc_get_dram_addr();
	u32 reg_gsr;
	u32 reg_mcfbalr;
#ifndef CONFIG_SYS_LS_MC_FW_IN_DDR
	const void *raw_image_addr;
	size_t raw_image_size = 0;
#endif
	struct mc_version mc_ver_info;
	u64 mc_ram_aligned_base_addr;
	u8 mc_ram_num_256mb_blocks;
	size_t mc_ram_size = mc_get_dram_block_size();


	error = calculate_mc_private_ram_params(mc_ram_addr,
						mc_ram_size,
						&mc_ram_aligned_base_addr,
						&mc_ram_num_256mb_blocks);
	if (error != 0)
		goto out;

	/*
	 * Management Complex cores should be held at reset out of POR.
	 * U-boot should be the first software to touch MC. To be safe,
	 * we reset all cores again by setting GCR1 to 0. It doesn't do
	 * anything if they are held at reset. After we setup the firmware
	 * we kick off MC by deasserting the reset bit for core 0, and
	 * deasserting the reset bits for Command Portal Managers.
	 * The stop bits are not touched here. They are used to stop the
	 * cores when they are active. Setting stop bits doesn't stop the
	 * cores from fetching instructions when they are released from
	 * reset.
	 */
	out_le32(&mc_ccsr_regs->reg_gcr1, 0);
	dmb();

#ifdef CONFIG_SYS_LS_MC_FW_IN_DDR
	printf("MC firmware is preloaded to %#llx\n", mc_ram_addr);
#else
	error = parse_mc_firmware_fit_image(mc_fw_addr, &raw_image_addr,
					    &raw_image_size);
	if (error != 0)
		goto out;
	/*
	 * Load the MC FW at the beginning of the MC private DRAM block:
	 */
	mc_copy_image("MC Firmware",
		      (u64)raw_image_addr, raw_image_size, mc_ram_addr);
#endif
	dump_ram_words("firmware", (void *)mc_ram_addr);

	error = load_mc_dpc(mc_ram_addr, mc_ram_size, mc_dpc_addr);
	if (error != 0)
		goto out;

	debug("mc_ccsr_regs %p\n", mc_ccsr_regs);
	dump_mc_ccsr_regs(mc_ccsr_regs);

	/*
	 * Tell MC what is the address range of the DRAM block assigned to it:
	 */
	reg_mcfbalr = (u32)mc_ram_aligned_base_addr |
		      (mc_ram_num_256mb_blocks - 1);
	out_le32(&mc_ccsr_regs->reg_mcfbalr, reg_mcfbalr);
	out_le32(&mc_ccsr_regs->reg_mcfbahr,
		 (u32)(mc_ram_aligned_base_addr >> 32));
	out_le32(&mc_ccsr_regs->reg_mcfapr, FSL_BYPASS_AMQ);

	/*
	 * Tell the MC that we want delayed DPL deployment.
	 */
	out_le32(&mc_ccsr_regs->reg_gsr, 0xDD00);

	printf("\nfsl-mc: Booting Management Complex ... ");

	/*
	 * Deassert reset and release MC core 0 to run
	 */
	out_le32(&mc_ccsr_regs->reg_gcr1, GCR1_P1_DE_RST | GCR1_M_ALL_DE_RST);
	error = wait_for_mc(true, &reg_gsr);
	if (error != 0)
		goto out;

	/*
	 * TODO: need to obtain the portal_id for the root container from the
	 * DPL
	 */
	portal_id = 0;

	/*
	 * Initialize the global default MC portal
	 * And check that the MC firmware is responding portal commands:
	 */
	root_mc_io = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
	if (!root_mc_io) {
		printf(" No memory: malloc() failed\n");
		return -ENOMEM;
	}

	root_mc_io->mmio_regs = SOC_MC_PORTAL_ADDR(portal_id);
	debug("Checking access to MC portal of root DPRC container (portal_id %d, portal physical addr %p)\n",
	      portal_id, root_mc_io->mmio_regs);

	error = mc_get_version(root_mc_io, MC_CMD_NO_FLAGS, &mc_ver_info);
	if (error != 0) {
		printf("fsl-mc: ERROR: Firmware version check failed (error: %d)\n",
		       error);
		goto out;
	}

	if (MC_VER_MAJOR != mc_ver_info.major) {
		printf("fsl-mc: ERROR: Firmware major version mismatch (found: %d, expected: %d)\n",
		       mc_ver_info.major, MC_VER_MAJOR);
		printf("fsl-mc: Update the Management Complex firmware\n");

		error = -ENODEV;
		goto out;
	}

	if (MC_VER_MINOR != mc_ver_info.minor)
		printf("fsl-mc: WARNING: Firmware minor version mismatch (found: %d, expected: %d)\n",
		       mc_ver_info.minor, MC_VER_MINOR);

	printf("fsl-mc: Management Complex booted (version: %d.%d.%d, boot status: %#x)\n",
	       mc_ver_info.major, mc_ver_info.minor, mc_ver_info.revision,
	       reg_gsr & GSR_FS_MASK);

out:
	if (error != 0)
		mc_boot_status = error;
	else
		mc_boot_status = 0;

	return error;
}
示例#24
0
/* pasemi_write_mac_reg - write MAC register
 * @intf: MAC interface
 * @reg: Register to write to (offset into PCI CFG space)
 * @val: Value to write
 */
void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
{
	out_le32(mac_regs[intf]+reg, val);
}
示例#25
0
/*
 * set the command pointer address
 */
static inline void snd_pmac_dma_set_command(struct pmac_stream *rec, struct pmac_dbdma *cmd)
{
	out_le32(&rec->dma->cmdptr, cmd->addr);
}
示例#26
0
void __init
bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
{
#ifdef CONFIG_PCI

	unsigned int bar_response, bar;
	/*
	 * Expected PCI mapping:
	 *
	 *  PLB addr             PCI memory addr
	 *  ---------------------       ---------------------
	 *  0000'0000 - 7fff'ffff <---  0000'0000 - 7fff'ffff
	 *  8000'0000 - Bfff'ffff --->  8000'0000 - Bfff'ffff
	 *
	 *  PLB addr             PCI io addr
	 *  ---------------------       ---------------------
	 *  e800'0000 - e800'ffff --->  0000'0000 - 0001'0000
	 *
	 * The following code is simplified by assuming that the bootrom
	 * has been well behaved in following this mapping.
	 */

#ifdef DEBUG
	int i;

	printk("ioremap PCLIO_BASE = 0x%x\n", pcip);
	printk("PCI bridge regs before fixup \n");
	for (i = 0; i <= 3; i++) {
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha)));
	}
	printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms)));
	printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la)));
	printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms)));
	printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));

#endif

	/* added for IBM boot rom version 1.15 bios bar changes  -AK */

	/* Disable region first */
	out_le32((void *) &(pcip->pmm[0].ma), 0x00000000);
	/* PLB starting addr, PCI: 0x80000000 */
	out_le32((void *) &(pcip->pmm[0].la), 0x80000000);
	/* PCI start addr, 0x80000000 */
	out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE);
	/* 512MB range of PLB to PCI */
	out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000);
	/* Enable no pre-fetch, enable region */
	out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff -
						(PPC405_PCI_UPPER_MEM -
						 PPC405_PCI_MEM_BASE)) | 0x01));

	/* Disable region one */
	out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
	out_le32((void *) &(pcip->pmm[1].la), 0x00000000);
	out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000);
	out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000);
	out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
	out_le32((void *) &(pcip->ptm1ms), 0x00000001);

	/* Disable region two */
	out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
	out_le32((void *) &(pcip->pmm[2].la), 0x00000000);
	out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000);
	out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000);
	out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
	out_le32((void *) &(pcip->ptm2ms), 0x00000000);
	out_le32((void *) &(pcip->ptm2la), 0x00000000);

	/* Zero config bars */
	for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) {
		early_write_config_dword(hose, hose->first_busno,
					 PCI_FUNC(hose->first_busno), bar,
					 0x00000000);
		early_read_config_dword(hose, hose->first_busno,
					PCI_FUNC(hose->first_busno), bar,
					&bar_response);
		DBG("BUS %d, device %d, Function %d bar 0x%8.8x is 0x%8.8x\n",
		    hose->first_busno, PCI_SLOT(hose->first_busno),
		    PCI_FUNC(hose->first_busno), bar, bar_response);
	}
	/* end work arround */

#ifdef DEBUG
	printk("PCI bridge regs after fixup \n");
	for (i = 0; i <= 3; i++) {
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila)));
		printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha)));
	}
	printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms)));
	printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la)));
	printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms)));
	printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));

#endif
#endif
}
示例#27
0
static int
indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
		      int len, u32 val)
{
	struct pci_controller *hose = pci_bus_to_host(bus);
	volatile void __iomem *cfg_data;
	u8 cfg_type = 0;
	u32 bus_no, reg;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
		if (bus->number != hose->first_busno)
			return PCIBIOS_DEVICE_NOT_FOUND;
		if (devfn != 0)
			return PCIBIOS_DEVICE_NOT_FOUND;
	}

	if (ppc_md.pci_exclude_device)
		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
			return PCIBIOS_DEVICE_NOT_FOUND;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
		if (bus->number != hose->first_busno)
			cfg_type = 1;

	bus_no = (bus->number == hose->first_busno) ?
			hose->self_busno : bus->number;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
		reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
	else
		reg = offset & 0xfc;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
		out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
			 (devfn << 8) | reg | cfg_type));
	else
		out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
			 (devfn << 8) | reg | cfg_type));

	/* suppress setting of PCI_PRIMARY_BUS */
	if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
		if ((offset == PCI_PRIMARY_BUS) &&
			(bus->number == hose->first_busno))
		val &= 0xffffff00;

	/* Workaround for PCI_28 Errata in 440EPx/GRx */
	if ((hose->indirect_type & PPC_INDIRECT_TYPE_BROKEN_MRM) &&
			offset == PCI_CACHE_LINE_SIZE) {
		val = 0;
	}

	/*
	 * Note: the caller has already checked that offset is
	 * suitably aligned and that len is 1, 2 or 4.
	 */
	cfg_data = hose->cfg_data + (offset & 3);
	switch (len) {
	case 1:
		out_8(cfg_data, val);
		break;
	case 2:
		out_le16(cfg_data, val);
		break;
	default:
		out_le32(cfg_data, val);
		break;
	}
	return PCIBIOS_SUCCESSFUL;
}
示例#28
0
void BSP_motload_pci_fixup(void)
{
uint32_t	b0,b1,r0,r1,lim,dis;

	/* MotLoad on the mvme5500 and mvme6100 configures the PCI
	 * busses nicely, i.e., the values read from the memory address
	 * space BARs by means of PCI config cycles directly reflect the
	 * CPU memory map. Thus, the presence of two hoses is already hidden.
	 *
	 * Unfortunately, all PCI I/O addresses are 'zero-based' i.e.,
	 * a hose-specific base address would have to be added to
	 * the values read from config space.
	 *
	 * We fix this here so I/O BARs also reflect the CPU memory map.
	 *
	 * Furthermore, the mvme5500 uses
	 *    f000.0000
	 *  ..f07f.ffff  for PCI-0 / hose0
	 *
	 *  and
	 *
	 *    f080.0000
	 *  ..f0ff.0000  for PCI-1 / hose 0
	 *
	 *  whereas the mvme6100 does it the other way round...
	 */

	b0 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Low_Decode) );
	b1 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Low_Decode) );

	r0 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap) );
	r1 = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap) );

	switch ( BSP_getDiscoveryVersion(0) ) {
		case MV_64360:
			/* In case of the MV64360 the 'limit' is actually a 'size'!
			 * Disable by setting special bits in the 'BAR disable reg'.
			 */
			dis = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL) );
			/* disable PCI0 I/O and PCI1 I/O */
			out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL), dis | (1<<9) | (1<<14) );
			/* remap busses on hose 0; if the remap register was already set, assume
			 * that someone else [such as the bootloader] already performed the fixup
			 */
			if ( (b0 & 0xffff) && 0 == (r0 & 0xffff) ) {
				rtems_pci_io_remap( 0, BSP_pci_hose1_bus_base, (b0 & 0xffff)<<16 );
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap), (b0 & 0xffff) );
			}

			/* remap busses on hose 1 */
			if ( (b1 & 0xffff) && 0 == (r1 & 0xffff) ) {
				rtems_pci_io_remap( BSP_pci_hose1_bus_base, pci_bus_count(), (b1 & 0xffff)<<16 );
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap), (b1 & 0xffff) );
			}

			/* re-enable */
			out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + MV_64360_BASE_ADDR_DISBL), dis );
		break;

		case GT_64260_A:
		case GT_64260_B:
			
			if ( (b0 & 0xfff) && 0 == (r0 & 0xfff) ) { /* base are only 12 bits */
				/* switch window off by setting the limit < base */
				lim = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode) );
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode), 0 );
				/* remap busses on hose 0 */
				rtems_pci_io_remap( 0, BSP_pci_hose1_bus_base, (b0 & 0xfff)<<20 );

				/* BTW: it seems that writing the base register also copies the
				 * value into the 'remap' register automatically (??)
				 */
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_Remap), (b0 & 0xfff) );

				/* re-enable */
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI0_IO_High_Decode), lim );
			}

			if ( (b1 & 0xfff) && 0 == (r1 & 0xfff) ) { /* base are only 12 bits */
				/* switch window off by setting the limit < base */
				lim = in_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode) );
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode), 0 );

				/* remap busses on hose 1 */
				rtems_pci_io_remap( BSP_pci_hose1_bus_base, pci_bus_count(), (b1 & 0xfff)<<20 );

				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_Remap), (b1 & 0xfff) );

				/* re-enable */
				out_le32( (volatile unsigned*)(BSP_MV64x60_BASE + GT_PCI1_IO_High_Decode), lim );
			}
		break;

		default:
			BSP_panic("Unknown discovery version; switch in file: "__FILE__" not implemented (yet)");
		break; /* never get here */
	}

	/* Fixup the IRQ lines; the mvme6100 maps them nicely into our scheme, i.e., GPP
	 * interrupts start at 64 upwards
	 *
	 * The mvme5500 is apparently initialized differently :-(. GPP interrupts start at 0
	 * Since all PCI interrupts are wired to GPP we simply check for a value < 64 and
	 * reprogram the interrupt line register.
	 */
	BSP_pciScan(0, fixup_irq_line, 0);
}
示例#29
0
static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
					       struct pci_controller *hose,
					       void __iomem *mbase)
{
	u32 lah, lal, pciah, pcial, sa;
	int i, j;

	/* Setup outbound memory windows */
	for (i = j = 0; i < 3; i++) {
		struct resource *res = &hose->mem_resources[i];

		/* we only care about memory windows */
		if (!(res->flags & IORESOURCE_MEM))
			continue;
		if (j > 1) {
			printk(KERN_WARNING "%s: Too many ranges\n",
			       port->node->full_name);
			break;
		}

		/* Calculate register values */
		lah = RES_TO_U32_HIGH(res->start);
		lal = RES_TO_U32_LOW(res->start);
		pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
		pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
		sa = res->end + 1 - res->start;
		if (!is_power_of_2(sa) || sa < 0x100000 ||
		    sa > 0xffffffffu) {
			printk(KERN_WARNING "%s: Resource out of range\n",
			       port->node->full_name);
			continue;
		}
		sa = (0xffffffffu << ilog2(sa)) | 0x1;

		/* Program register values */
		switch (j) {
		case 0:
			out_le32(mbase + PECFG_POM0LAH, pciah);
			out_le32(mbase + PECFG_POM0LAL, pcial);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
			break;
		case 1:
			out_le32(mbase + PECFG_POM1LAH, pciah);
			out_le32(mbase + PECFG_POM1LAL, pcial);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
			dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
			break;
		}
		j++;
	}

	/* Configure IO, always 64K starting at 0 */
	if (hose->io_resource.flags & IORESOURCE_IO) {
		lah = RES_TO_U32_HIGH(hose->io_base_phys);
		lal = RES_TO_U32_LOW(hose->io_base_phys);
		out_le32(mbase + PECFG_POM2LAH, 0);
		out_le32(mbase + PECFG_POM2LAL, 0);
		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3);
	}
}
示例#30
0
static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
			  u_int transp, struct fb_info *info)
{
	struct offb_par *par = (struct offb_par *) info->par;
	int i, depth;
	u32 *pal = info->pseudo_palette;

	depth = info->var.bits_per_pixel;
	if (depth == 16)
		depth = (info->var.green.length == 5) ? 15 : 16;

	if (regno > 255 ||
	    (depth == 16 && regno > 63) ||
	    (depth == 15 && regno > 31))
		return 1;

	if (regno < 16) {
		switch (depth) {
		case 15:
			pal[regno] = (regno << 10) | (regno << 5) | regno;
			break;
		case 16:
			pal[regno] = (regno << 11) | (regno << 5) | regno;
			break;
		case 24:
			pal[regno] = (regno << 16) | (regno << 8) | regno;
			break;
		case 32:
			i = (regno << 8) | regno;
			pal[regno] = (i << 16) | i;
			break;
		}
	}

	red >>= 8;
	green >>= 8;
	blue >>= 8;

	if (!par->cmap_adr)
		return 0;

	switch (par->cmap_type) {
	case cmap_m64:
		writeb(regno, par->cmap_adr);
		writeb(red, par->cmap_data);
		writeb(green, par->cmap_data);
		writeb(blue, par->cmap_data);
		break;
	case cmap_M3A:
		
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) & ~0x20);
	case cmap_r128:
		
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4,
			 (red << 16 | green << 8 | blue));
		break;
	case cmap_M3B:
		
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) | 0x20);
		
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_radeon:
		
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_gxt2000:
		out_le32(((unsigned __iomem *) par->cmap_adr) + regno,
			 (red << 16 | green << 8 | blue));
		break;
	case cmap_avivo:
		
		writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT);
		writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX);
		writel(((red) << 22) | ((green) << 12) | ((blue) << 2),
		       par->cmap_adr + AVIVO_DC_LUT_30_COLOR);
		writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT);
		writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX);
		writel(((red) << 22) | ((green) << 12) | ((blue) << 2),
		       par->cmap_adr + AVIVO_DC_LUT_30_COLOR);
		break;
	}

	return 0;
}