Пример #1
0
/*
 * Set up the memory map and initialize registers
 */
void cpu_init_f(void)
{
	sim_t *sim = (sim_t *)(MMAP_SIM);

	out_8(&sim->sypcr, 0x00);
	out_8(&sim->swivr, 0x0f);
	out_8(&sim->swsr,  0x00);
	out_8(&sim->mpark, 0x00);

	intctrl_t *icr = (intctrl_t *)(MMAP_INTC);

	/* timer 2 not masked */
	out_be32(&icr->imr, 0xfffffbff);

	out_8(&icr->icr0, 0x00); /* sw watchdog */
	out_8(&icr->icr1, 0x00); /* timer 1     */
	out_8(&icr->icr2, 0x88); /* timer 2     */
	out_8(&icr->icr3, 0x00); /* i2c         */
	out_8(&icr->icr4, 0x00); /* uart 0      */
	out_8(&icr->icr5, 0x00); /* uart 1      */
	out_8(&icr->icr6, 0x00); /* dma  0      */
	out_8(&icr->icr7, 0x00); /* dma  1      */
	out_8(&icr->icr8, 0x00); /* dma  2      */
	out_8(&icr->icr9, 0x00); /* dma  3      */

	/* Chipselect Init */
	init_csm();

	/* enable data/instruction cache now */
	icache_enable();
}
Пример #2
0
static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
				  int reg, u8 val)
{
	out_8(priv->reg_base + reg, val);
}
Пример #3
0
static int do_lcd_cur (cmd_tbl_t * cmdtp, int flag, int argc, char *argv[])
{
    ulong count;
    ulong dir;
    char cur_addr;

    if (argc < 3) {
        cmd_usage(cmdtp);
        return 1;
    }

    count = simple_strtoul(argv[1], NULL, 16);
    if (count > 31) {
        printf("unable to shift > 0x20\n");
        count = 0;
    }

    dir = simple_strtoul(argv[2], NULL, 16);
    cur_addr = in_8((u8 *) LCD_CMD_ADDR);
    udelay(50);

    if (dir == 0x0) {
        if (addr_flag == 0x80) {
            if (count >= (cur_addr & 0xf)) {
                out_8((u8 *) LCD_CMD_ADDR, 0x80);
                udelay(50);
                count = 0;
            }
        } else {
            if (count >= ((cur_addr & 0x0f) + 0x0f)) {
                out_8((u8 *) LCD_CMD_ADDR, 0x80);
                addr_flag = 0x80;
                udelay(50);
                count = 0x0;
            } else if (count >= ( cur_addr & 0xf)) {
                count -= cur_addr & 0xf ;
                out_8((u8 *) LCD_CMD_ADDR, 0x80 | 0xf);
                addr_flag = 0x80;
                udelay(50);
            }
        }
    } else {
        if (addr_flag == 0x80) {
            if (count >= (0x1f - (cur_addr & 0xf))) {
                count = 0x0;
                addr_flag = 0xc0;
                out_8((u8 *) LCD_CMD_ADDR, 0xc0 | 0xf);
                udelay(50);
            } else if ((count + (cur_addr & 0xf ))>=  0x0f) {
                count = count + (cur_addr & 0xf) - 0x0f;
                addr_flag = 0xc0;
                out_8((u8 *) LCD_CMD_ADDR, 0xc0);
                udelay(50);
            }
        } else if ((count + (cur_addr & 0xf )) >= 0x0f) {
            count = 0x0;
            out_8((u8 *) LCD_CMD_ADDR, 0xC0 | 0x0F);
            udelay(50);
        }
    }
    while (count--) {
        if (dir == 0)
            out_8((u8 *) LCD_CMD_ADDR, 0x10);
        else
            out_8((u8 *) LCD_CMD_ADDR, 0x14);
        udelay(50);
    }

    return 0;
}
Пример #4
0
static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
			  u_int transp, struct fb_info *info)
{
	struct offb_par *par = (struct offb_par *) info->par;
	int i, depth;
	u32 *pal = info->pseudo_palette;

	depth = info->var.bits_per_pixel;
	if (depth == 16)
		depth = (info->var.green.length == 5) ? 15 : 16;

	if (regno > 255 ||
	    (depth == 16 && regno > 63) ||
	    (depth == 15 && regno > 31))
		return 1;

	if (regno < 16) {
		switch (depth) {
		case 15:
			pal[regno] = (regno << 10) | (regno << 5) | regno;
			break;
		case 16:
			pal[regno] = (regno << 11) | (regno << 5) | regno;
			break;
		case 24:
			pal[regno] = (regno << 16) | (regno << 8) | regno;
			break;
		case 32:
			i = (regno << 8) | regno;
			pal[regno] = (i << 16) | i;
			break;
		}
	}

	red >>= 8;
	green >>= 8;
	blue >>= 8;

	if (!par->cmap_adr)
		return 0;

	switch (par->cmap_type) {
	case cmap_m64:
		writeb(regno, par->cmap_adr);
		writeb(red, par->cmap_data);
		writeb(green, par->cmap_data);
		writeb(blue, par->cmap_data);
		break;
	case cmap_M3A:
		/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) & ~0x20);
	case cmap_r128:
		/* Set palette index & data */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4,
			 (red << 16 | green << 8 | blue));
		break;
	case cmap_M3B:
		/* Set PALETTE_ACCESS_CNTL in DAC_CNTL */
		out_le32(par->cmap_adr + 0x58,
			 in_le32(par->cmap_adr + 0x58) | 0x20);
		/* Set palette index & data */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_radeon:
		/* Set palette index & data (could be smarter) */
		out_8(par->cmap_adr + 0xb0, regno);
		out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
		break;
	case cmap_gxt2000:
		out_le32(((unsigned __iomem *) par->cmap_adr) + regno,
			 (red << 16 | green << 8 | blue));
		break;
	case cmap_avivo:
		/* Write to both LUTs for now */
		writel(1, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT);
		writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX);
		writel(((red) << 22) | ((green) << 12) | ((blue) << 2),
		       par->cmap_adr + AVIVO_DC_LUT_30_COLOR);
		writel(0, par->cmap_adr + AVIVO_DC_LUT_RW_SELECT);
		writeb(regno, par->cmap_adr + AVIVO_DC_LUT_RW_INDEX);
		writel(((red) << 22) | ((green) << 12) | ((blue) << 2),
		       par->cmap_adr + AVIVO_DC_LUT_30_COLOR);
		break;
	}

	return 0;
}
Пример #5
0
/*
 * Per table 27, page 58 of MPC8641HPCN spec.
 */
int set_px_sysclk(ulong sysclk)
{
	u8 sysclk_s, sysclk_r, sysclk_v, vclkh, vclkl, sysclk_aux;
	u8 *pixis_base = (u8 *)PIXIS_BASE;

	switch (sysclk) {
	case 33:
		sysclk_s = 0x04;
		sysclk_r = 0x04;
		sysclk_v = 0x07;
		sysclk_aux = 0x00;
		break;
	case 40:
		sysclk_s = 0x01;
		sysclk_r = 0x1F;
		sysclk_v = 0x20;
		sysclk_aux = 0x01;
		break;
	case 50:
		sysclk_s = 0x01;
		sysclk_r = 0x1F;
		sysclk_v = 0x2A;
		sysclk_aux = 0x02;
		break;
	case 66:
		sysclk_s = 0x01;
		sysclk_r = 0x04;
		sysclk_v = 0x04;
		sysclk_aux = 0x03;
		break;
	case 83:
		sysclk_s = 0x01;
		sysclk_r = 0x1F;
		sysclk_v = 0x4B;
		sysclk_aux = 0x04;
		break;
	case 100:
		sysclk_s = 0x01;
		sysclk_r = 0x1F;
		sysclk_v = 0x5C;
		sysclk_aux = 0x05;
		break;
	case 134:
		sysclk_s = 0x06;
		sysclk_r = 0x1F;
		sysclk_v = 0x3B;
		sysclk_aux = 0x06;
		break;
	case 166:
		sysclk_s = 0x06;
		sysclk_r = 0x1F;
		sysclk_v = 0x4B;
		sysclk_aux = 0x07;
		break;
	default:
		printf("Unsupported SYSCLK frequency.\n");
		return 0;
	}

	vclkh = (sysclk_s << 5) | sysclk_r;
	vclkl = sysclk_v;

	out_8(pixis_base + PIXIS_VCLKH, vclkh);
	out_8(pixis_base + PIXIS_VCLKL, vclkl);

	out_8(pixis_base + PIXIS_AUX, sysclk_aux);

	return 1;
}
/**
 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
 */
static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
{
	size_t transfer_size = req->size - req->pos;
	struct bcom_bd *bd;
	void __iomem *reg;
	u32 *data;
	int i;
	int bit_fields;
	int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
	int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
	int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;

	/* Set and clear the reset bits; is good practice in User Manual */
	out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);

	/* set master enable bit */
	out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
	if (!dma) {
		/* While the FIFO can be setup for transfer sizes as large as
		 * 16M-1, the FIFO itself is only 512 bytes deep and it does
		 * not generate interrupts for FIFO full events (only transfer
		 * complete will raise an IRQ).  Therefore when not using
		 * Bestcomm to drive the FIFO it needs to either be polled, or
		 * transfers need to constrained to the size of the fifo.
		 *
		 * This driver restricts the size of the transfer
		 */
		if (transfer_size > 512)
			transfer_size = 512;

		/* Load the FIFO with data */
		if (write) {
			reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
			data = req->data + req->pos;
			for (i = 0; i < transfer_size; i += 4)
				out_be32(reg, *data++);
		}

		/* Unmask both error and completion irqs */
		out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
	} else {
		/* Choose the correct direction
		 *
		 * Configure the watermarks so DMA will always complete correctly.
		 * It may be worth experimenting with the ALARM value to see if
		 * there is a performance impacit.  However, if it is wrong there
		 * is a risk of DMA not transferring the last chunk of data
		 */
		if (write) {
			out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
			out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
			lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
		} else {
			out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
			out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
			lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;

			if (poll_dma) {
				if (lpbfifo.dma_irqs_enabled) {
					disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
					lpbfifo.dma_irqs_enabled = 0;
				}
			} else {
				if (!lpbfifo.dma_irqs_enabled) {
					enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
					lpbfifo.dma_irqs_enabled = 1;
				}
			}
		}

		bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
		bd->status = transfer_size;
		if (!write) {
			/*
			 * In the DMA read case, the DMA doesn't complete,
			 * possibly due to incorrect watermarks in the ALARM
			 * and CONTROL regs. For now instead of trying to
			 * determine the right watermarks that will make this
			 * work, just increase the number of bytes the FIFO is
			 * expecting.
			 *
			 * When submitting another operation, the FIFO will get
			 * reset, so the condition of the FIFO waiting for a
			 * non-existent 4 bytes will get cleared.
			 */
			transfer_size += 4; /* BLECH! */
		}
		bd->data[0] = req->data_phys + req->pos;
		bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);

		/* error irq & master enabled bit */
		bit_fields = 0x00000201;

		/* Unmask irqs */
		if (write && (!poll_dma))
			bit_fields |= 0x00000100; /* completion irq too */
		out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
	}

	/* Set transfer size, width, chip select and READ mode */
	out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
		 req->offset + req->pos);
	out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);

	bit_fields = req->cs << 24 | 0x000008;
	if (!write)
		bit_fields |= 0x010000; /* read mode */
	out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);

	/* Kick it off */
	out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
	if (dma)
		bcom_enable(lpbfifo.bcom_cur_task);
}
Пример #7
0
static void ace_out_8(struct ace_device *ace, int reg, u16 val)
{
	void *r = ace->baseaddr + reg;
	out_8(r, val);
	out_8(r + 1, val >> 8);
}
Пример #8
0
/*
 * Read a byte from an address in SRAM
 */
static u8 read_sram(struct fsl_dcm_data *dcm, u8 offset)
{
	out_8(dcm->addr, offset);

	return in_8(dcm->data);
}
Пример #9
0
static int fsl_dcm_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct fsl_dcm_data *dcm;
	int ret;
	u8 ver;

	dcm = kzalloc(sizeof(struct fsl_dcm_data), GFP_KERNEL);
	if (!dcm)
		return -ENOMEM;

	dcm->base = of_iomap(np, 0);
	if (!dcm->base) {
		dev_err(&pdev->dev, "could not map fpga node\n");
		ret = -ENOMEM;
		goto error_kzalloc;
	}

	dcm->dev = &pdev->dev;
	dcm->board = pdev->dev.platform_data;

	/*
	 * write 0x1F to GDC register then read GDD register
	 * to get GMSA version.
	 * 0x00: v1  -> pixis
	 * 0x01: v2  -> qixis
	 */
	out_8(dcm->base + 0x16, 0x1F);
	ver = in_8(dcm->base + 0x17);
	if (ver == 0x0) {
		dcm->addr = dcm->base + 0x0a;
		dcm->data = dcm->base + 0x0d;
	} else if (ver == 0x01) {
		dcm->addr = dcm->base + 0x12;
		dcm->data = dcm->base + 0x13;
	}

	dcm->ocmd = dcm->base + 0x14;
	dcm->omsg = dcm->base + 0x15;
	dcm->mack = dcm->base + 0x18;

	/* Check to make sure the DCM is enable and working */
	if (!is_sram_available(dcm)) {
		dev_err(&pdev->dev, "dcm is not responding\n");
		ret = -ENODEV;
		goto error_iomap;
	}

	dev_set_drvdata(&pdev->dev, dcm);

	ret = sysfs_create_group(&pdev->dev.kobj, &fsl_dcm_attr_group);
	if (ret) {
		dev_err(&pdev->dev, "could not create sysfs group\n");
		goto error_iomap;
	}

	if (!select_dcm_channels(dcm, dcm->board->mask)) {
		dev_err(&pdev->dev, "could not set crecord mask\n");
		ret = -ENODEV;
		goto error_sysfs;
	}

	/* Set the timer to the fastest support rate. */
	if (!set_dcm_frequency(dcm, 1)) {
		dev_err(&pdev->dev, "could not set frequency\n");
		ret = -ENODEV;
		goto error_sysfs;
	}

	return 0;

error_sysfs:
	sysfs_remove_group(&pdev->dev.kobj, &fsl_dcm_attr_group);

error_iomap:
	iounmap(dcm->base);

error_kzalloc:
	kfree(dcm);

	return ret;
}
Пример #10
0
static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value)
{
    out_8(instance->io_port + (reg<<4), value);
}
Пример #11
0
/*
 * Write a byte to an address in SRAM
 */
static void write_sram(struct fsl_dcm_data *dcm, u8 offset, u8 v)
{
	out_8(dcm->addr, offset);
	out_8(dcm->data, v);
}
Пример #12
0
/**
 * p1022ds_set_monitor_port: switch the output to a different monitor port
 */
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
	struct device_node *guts_node;
	struct device_node *lbc_node = NULL;
	struct device_node *law_node = NULL;
	struct ccsr_guts __iomem *guts;
	struct fsl_lbc_regs *lbc = NULL;
	void *ecm = NULL;
	u8 __iomem *lbc_lcs0_ba = NULL;
	u8 __iomem *lbc_lcs1_ba = NULL;
	phys_addr_t cs0_addr, cs1_addr;
	u32 br0, or0, br1, or1;
	const __be32 *iprop;
	unsigned int num_laws;
	u8 b;

	/* Map the global utilities registers. */
	guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
	if (!guts_node) {
		pr_err("p1022ds: missing global utilities device node\n");
		return;
	}

	guts = of_iomap(guts_node, 0);
	if (!guts) {
		pr_err("p1022ds: could not map global utilities device\n");
		goto exit;
	}

	lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
	if (!lbc_node) {
		pr_err("p1022ds: missing localbus node\n");
		goto exit;
	}

	lbc = of_iomap(lbc_node, 0);
	if (!lbc) {
		pr_err("p1022ds: could not map localbus node\n");
		goto exit;
	}

	law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
	if (!law_node) {
		pr_err("p1022ds: missing local access window node\n");
		goto exit;
	}

	ecm = of_iomap(law_node, 0);
	if (!ecm) {
		pr_err("p1022ds: could not map local access window node\n");
		goto exit;
	}

	iprop = of_get_property(law_node, "fsl,num-laws", NULL);
	if (!iprop) {
		pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
		goto exit;
	}
	num_laws = be32_to_cpup(iprop);

	/*
	 * Indirect mode requires both BR0 and BR1 to be set to "GPCM",
	 * otherwise writes to these addresses won't actually appear on the
	 * local bus, and so the PIXIS won't see them.
	 *
	 * In FCM mode, writes go to the NAND controller, which does not pass
	 * them to the localbus directly.  So we force BR0 and BR1 into GPCM
	 * mode, since we don't care about what's behind the localbus any
	 * more.
	 */
	br0 = in_be32(&lbc->bank[0].br);
	br1 = in_be32(&lbc->bank[1].br);
	or0 = in_be32(&lbc->bank[0].or);
	or1 = in_be32(&lbc->bank[1].or);

	/* Make sure CS0 and CS1 are programmed */
	if (!(br0 & BR_V) || !(br1 & BR_V)) {
		pr_err("p1022ds: CS0 and/or CS1 is not programmed\n");
		goto exit;
	}

	/*
	 * Use the existing BRx/ORx values if it's already GPCM. Otherwise,
	 * force the values to simple 32KB GPCM windows with the most
	 * conservative timing.
	 */
	if ((br0 & BR_MSEL) != BR_MS_GPCM) {
		br0 = (br0 & BR_BA) | BR_V;
		or0 = 0xFFFF8000 | 0xFF7;
		out_be32(&lbc->bank[0].br, br0);
		out_be32(&lbc->bank[0].or, or0);
	}
	if ((br1 & BR_MSEL) != BR_MS_GPCM) {
		br1 = (br1 & BR_BA) | BR_V;
		or1 = 0xFFFF8000 | 0xFF7;
		out_be32(&lbc->bank[1].br, br1);
		out_be32(&lbc->bank[1].or, or1);
	}

	cs0_addr = lbc_br_to_phys(ecm, num_laws, br0);
	if (!cs0_addr) {
		pr_err("p1022ds: could not determine physical address for CS0"
		       " (BR0=%08x)\n", br0);
		goto exit;
	}
	cs1_addr = lbc_br_to_phys(ecm, num_laws, br1);
	if (!cs1_addr) {
		pr_err("p1022ds: could not determine physical address for CS1"
		       " (BR1=%08x)\n", br1);
		goto exit;
	}

	lbc_lcs0_ba = ioremap(cs0_addr, 1);
	if (!lbc_lcs0_ba) {
		pr_err("p1022ds: could not ioremap CS0 address %llx\n",
		       (unsigned long long)cs0_addr);
		goto exit;
	}
	lbc_lcs1_ba = ioremap(cs1_addr, 1);
	if (!lbc_lcs1_ba) {
		pr_err("p1022ds: could not ioremap CS1 address %llx\n",
		       (unsigned long long)cs1_addr);
		goto exit;
	}

	/* Make sure we're in indirect mode first. */
	if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
	    PMUXCR_ELBCDIU_DIU) {
		struct device_node *pixis_node;
		void __iomem *pixis;

		pixis_node =
			of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
		if (!pixis_node) {
			pr_err("p1022ds: missing pixis node\n");
			goto exit;
		}

		pixis = of_iomap(pixis_node, 0);
		of_node_put(pixis_node);
		if (!pixis) {
			pr_err("p1022ds: could not map pixis registers\n");
			goto exit;
		}

		/* Enable indirect PIXIS mode.  */
		setbits8(pixis + PX_CTL, PX_CTL_ALTACC);
		iounmap(pixis);

		/* Switch the board mux to the DIU */
		out_8(lbc_lcs0_ba, PX_BRDCFG0);	/* BRDCFG0 */
		b = in_8(lbc_lcs1_ba);
		b |= PX_BRDCFG0_ELBC_DIU;
		out_8(lbc_lcs1_ba, b);

		/* Set the chip mux to DIU mode. */
		clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK,
				PMUXCR_ELBCDIU_DIU);
		in_be32(&guts->pmuxcr);
	}


	switch (port) {
	case FSL_DIU_PORT_DVI:
		/* Enable the DVI port, disable the DFP and the backlight */
		out_8(lbc_lcs0_ba, PX_BRDCFG1);
		b = in_8(lbc_lcs1_ba);
		b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT);
		b |= PX_BRDCFG1_DVIEN;
		out_8(lbc_lcs1_ba, b);
		break;
	case FSL_DIU_PORT_LVDS:
		/*
		 * LVDS also needs backlight enabled, otherwise the display
		 * will be blank.
		 */
		/* Enable the DFP port, disable the DVI and the backlight */
		out_8(lbc_lcs0_ba, PX_BRDCFG1);
		b = in_8(lbc_lcs1_ba);
		b &= ~PX_BRDCFG1_DVIEN;
		b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT;
		out_8(lbc_lcs1_ba, b);
		break;
	default:
		pr_err("p1022ds: unsupported monitor port %i\n", port);
	}

exit:
	if (lbc_lcs1_ba)
		iounmap(lbc_lcs1_ba);
	if (lbc_lcs0_ba)
		iounmap(lbc_lcs0_ba);
	if (lbc)
		iounmap(lbc);
	if (ecm)
		iounmap(ecm);
	if (guts)
		iounmap(guts);

	of_node_put(law_node);
	of_node_put(lbc_node);
	of_node_put(guts_node);
}
Пример #13
0
int do_caddy(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
	unsigned long base_addr;
	uint32_t ptr;
	struct caddy_cmd *caddy_cmd;
	uint32_t result[5];
	uint16_t data16;
	uint8_t data8;
	uint32_t status;
	pci_dev_t dev;
	void *pci_ptr;

	if (argc < 2) {
		puts("Missing parameter\n");
		return 1;
	}

	base_addr = simple_strtoul(argv[1], NULL, 16);
	caddy_interface = (struct caddy_interface *) base_addr;

	memset((void *)caddy_interface, 0, sizeof(struct caddy_interface));
	memcpy((void *)&caddy_interface->magic[0], &CADDY_MAGIC, 16);

	while (ctrlc() == 0) {
		if (caddy_interface->cmd_in != caddy_interface->cmd_out) {
			memset(result, 0, 5 * sizeof(result[0]));
			status = 0;
			caddy_cmd = &caddy_interface->cmd[caddy_interface->cmd_out];
			pci_ptr = (void *)CONFIG_SYS_PCI1_IO_PHYS +
				(caddy_cmd->addr & 0x001fffff);

			switch (caddy_cmd->cmd) {
			case CADDY_CMD_IO_READ_8:
				result[0] = in_8(pci_ptr);
				break;

			case CADDY_CMD_IO_READ_16:
				result[0] = in_be16(pci_ptr);
				break;

			case CADDY_CMD_IO_READ_32:
				result[0] = in_be32(pci_ptr);
				break;

			case CADDY_CMD_IO_WRITE_8:
				data8 = caddy_cmd->par[0] & 0x000000ff;
				out_8(pci_ptr, data8);
				break;

			case CADDY_CMD_IO_WRITE_16:
				data16 = caddy_cmd->par[0] & 0x0000ffff;
				out_be16(pci_ptr, data16);
				break;

			case CADDY_CMD_IO_WRITE_32:
				out_be32(pci_ptr, caddy_cmd->par[0]);
				break;

			case CADDY_CMD_CONFIG_READ_8:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				status = pci_read_config_byte(dev,
							      caddy_cmd->addr,
							      &data8);
				result[0] = data8;
				break;

			case CADDY_CMD_CONFIG_READ_16:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				status = pci_read_config_word(dev,
							      caddy_cmd->addr,
							      &data16);
				result[0] = data16;
				break;

			case CADDY_CMD_CONFIG_READ_32:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				status = pci_read_config_dword(dev,
							       caddy_cmd->addr,
							       &result[0]);
				break;

			case CADDY_CMD_CONFIG_WRITE_8:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				data8 = caddy_cmd->par[3] & 0x000000ff;
				status = pci_write_config_byte(dev,
							       caddy_cmd->addr,
							       data8);
				break;

			case CADDY_CMD_CONFIG_WRITE_16:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				data16 = caddy_cmd->par[3] & 0x0000ffff;
				status = pci_write_config_word(dev,
							       caddy_cmd->addr,
							       data16);
				break;

			case CADDY_CMD_CONFIG_WRITE_32:
				dev = PCI_BDF(caddy_cmd->par[0],
					      caddy_cmd->par[1],
					      caddy_cmd->par[2]);
				status = pci_write_config_dword(dev,
								caddy_cmd->addr,
								caddy_cmd->par[3]);
				break;

			default:
				status = 0xffffffff;
				break;
			}

			generate_answer(caddy_cmd, status, &result[0]);

			ptr = caddy_interface->cmd_out + 1;
			ptr = ptr & (CMD_SIZE - 1);
			caddy_interface->cmd_out = ptr;
		}

		caddy_interface->heartbeat++;
	}

	return 0;
}
Пример #14
0
int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
			  int offset, int len, u32 val)
{
	struct pci_controller *hose = pci_bus_to_host(bus);
	volatile void __iomem *cfg_data;
	u8 cfg_type = 0;
	u32 bus_no, reg;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
		if (bus->number != hose->first_busno)
			return PCIBIOS_DEVICE_NOT_FOUND;
		if (devfn != 0)
			return PCIBIOS_DEVICE_NOT_FOUND;
	}

	if (ppc_md.pci_exclude_device)
		if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
			return PCIBIOS_DEVICE_NOT_FOUND;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
		if (bus->number != hose->first_busno)
			cfg_type = 1;

	bus_no = (bus->number == hose->first_busno) ?
			hose->self_busno : bus->number;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
		reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
	else
		reg = offset & 0xfc;

	if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
		out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
			 (devfn << 8) | reg | cfg_type));
	else
		out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
			 (devfn << 8) | reg | cfg_type));

	/* suppress setting of PCI_PRIMARY_BUS */
	if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
		if ((offset == PCI_PRIMARY_BUS) &&
			(bus->number == hose->first_busno))
		val &= 0xffffff00;

	/* Workaround for PCI_28 Errata in 440EPx/GRx */
	if ((hose->indirect_type & PPC_INDIRECT_TYPE_BROKEN_MRM) &&
			offset == PCI_CACHE_LINE_SIZE) {
		val = 0;
	}

	/*
	 * Note: the caller has already checked that offset is
	 * suitably aligned and that len is 1, 2 or 4.
	 */
	cfg_data = hose->cfg_data + (offset & 3);
	switch (len) {
	case 1:
		out_8(cfg_data, val);
		break;
	case 2:
		out_le16(cfg_data, val);
		break;
	default:
		out_le32(cfg_data, val);
		break;
	}
	return PCIBIOS_SUCCESSFUL;
}
Пример #15
0
int misc_init_r(void)
{
	unsigned char *dst;
	unsigned char fctr;
	ulong len = sizeof(fpgadata);
	int status;
	int index;
	int i;

	/* adjust flash start and offset */
	gd->bd->bi_flashstart = 0 - gd->bd->bi_flashsize;
	gd->bd->bi_flashoffset = 0;

	dst = malloc(CONFIG_SYS_FPGA_MAX_SIZE);
	if (gunzip(dst, CONFIG_SYS_FPGA_MAX_SIZE,
		   (uchar *)fpgadata, &len) != 0) {
		printf("GUNZIP ERROR - must RESET board to recover\n");
		do_reset(NULL, 0, 0, NULL);
	}

	status = fpga_boot(dst, len);
	if (status != 0) {
		printf("\nFPGA: Booting failed ");
		switch (status) {
		case ERROR_FPGA_PRG_INIT_LOW:
			printf("(Timeout: INIT not low "
			       "after asserting PROGRAM*)\n");
			break;
		case ERROR_FPGA_PRG_INIT_HIGH:
			printf("(Timeout: INIT not high "
			       "after deasserting PROGRAM*)\n");
			break;
		case ERROR_FPGA_PRG_DONE:
			printf("(Timeout: DONE not high "
			       "after programming FPGA)\n");
			break;
		}

		/* display infos on fpgaimage */
		index = 15;
		for (i=0; i<4; i++) {
			len = dst[index];
			printf("FPGA: %s\n", &(dst[index+1]));
			index += len+3;
		}
		putc ('\n');
		/* delayed reboot */
		for (i=20; i>0; i--) {
			printf("Rebooting in %2d seconds \r",i);
			for (index=0;index<1000;index++)
				udelay(1000);
		}
		putc('\n');
		do_reset(NULL, 0, 0, NULL);
	}

	puts("FPGA:  ");

	/* display infos on fpgaimage */
	index = 15;
	for (i=0; i<4; i++) {
		len = dst[index];
		printf("%s ", &(dst[index+1]));
		index += len+3;
	}
	putc('\n');

	free(dst);

	/*
	 * Reset FPGA via FPGA_DATA pin
	 */
	SET_FPGA(FPGA_PRG | FPGA_CLK);
	udelay(1000); /* wait 1ms */
	SET_FPGA(FPGA_PRG | FPGA_CLK | FPGA_DATA);
	udelay(1000); /* wait 1ms */

	/*
	 * Reset external DUARTs
	 */
	out_be32((void*)GPIO0_OR,
		 in_be32((void*)GPIO0_OR) | CONFIG_SYS_DUART_RST);
	udelay(10);
	out_be32((void*)GPIO0_OR,
		 in_be32((void*)GPIO0_OR) & ~CONFIG_SYS_DUART_RST);
	udelay(1000);

	/*
	 * Set NAND-FLASH GPIO signals to default
	 */
	out_be32((void*)GPIO0_OR,
		 in_be32((void*)GPIO0_OR) &
		 ~(CONFIG_SYS_NAND_CLE | CONFIG_SYS_NAND_ALE));
	out_be32((void*)GPIO0_OR,
		 in_be32((void*)GPIO0_OR) | CONFIG_SYS_NAND_CE);

	/*
	 * Setup EEPROM write protection
	 */
	out_be32((void*)GPIO0_OR,
		 in_be32((void*)GPIO0_OR) | CONFIG_SYS_EEPROM_WP);
	out_be32((void*)GPIO0_TCR,
		 in_be32((void*)GPIO0_TCR) | CONFIG_SYS_EEPROM_WP);

	/*
	 * Enable interrupts in exar duart mcr[3]
	 */
	out_8((void *)DUART0_BA + 4, 0x08);
	out_8((void *)DUART1_BA + 4, 0x08);

	/*
	 * Enable auto RS485 mode in 2nd external uart
	 */
	out_8((void *)DUART1_BA + 3, 0xbf); /* write LCR */
	fctr = in_8((void *)DUART1_BA + 1); /* read FCTR */
	fctr |= 0x08;                       /* enable RS485 mode */
	out_8((void *)DUART1_BA + 1, fctr); /* write FCTR */
	out_8((void *)DUART1_BA + 3, 0);    /* write LCR */

	/*
	 * Init magnetic couplers
	 */
	if (!getenv("noinitcoupler")) {
		init_coupler(CAN0_BA);
		init_coupler(CAN1_BA);
	}
	return 0;
}
Пример #16
0
/*
 * OF Platform Bus Binding
 */
static int __devinit mpc52xx_spi_probe(struct platform_device *op,
				       const struct of_device_id *match)
{
	struct spi_master *master;
	struct mpc52xx_spi *ms;
	void __iomem *regs;
	u8 ctrl1;
	int rc, i = 0;
	int gpio_cs;

	/* MMIO registers */
	dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
	regs = of_iomap(op->dev.of_node, 0);
	if (!regs)
		return -ENODEV;

	/* initialize the device */
	ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
	out_8(regs + SPI_CTRL1, ctrl1);
	out_8(regs + SPI_CTRL2, 0x0);
	out_8(regs + SPI_DATADIR, 0xe);	/* Set output pins */
	out_8(regs + SPI_PORTDATA, 0x8);	/* Deassert /SS signal */

	/* Clear the status register and re-read it to check for a MODF
	 * failure.  This driver cannot currently handle multiple masters
	 * on the SPI bus.  This fault will also occur if the SPI signals
	 * are not connected to any pins (port_config setting) */
	in_8(regs + SPI_STATUS);
	out_8(regs + SPI_CTRL1, ctrl1);

	in_8(regs + SPI_DATA);
	if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
		dev_err(&op->dev, "mode fault; is port_config correct?\n");
		rc = -EIO;
		goto err_init;
	}

	dev_dbg(&op->dev, "allocating spi_master struct\n");
	master = spi_alloc_master(&op->dev, sizeof *ms);
	if (!master) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	master->bus_num = -1;
	master->setup = mpc52xx_spi_setup;
	master->transfer = mpc52xx_spi_transfer;
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
	master->dev.of_node = op->dev.of_node;

	dev_set_drvdata(&op->dev, master);

	ms = spi_master_get_devdata(master);
	ms->master = master;
	ms->regs = regs;
	ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
	ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
	ms->state = mpc52xx_spi_fsmstate_idle;
	ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
	ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
	if (ms->gpio_cs_count > 0) {
		master->num_chipselect = ms->gpio_cs_count;
		ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
				GFP_KERNEL);
		if (!ms->gpio_cs) {
			rc = -ENOMEM;
			goto err_alloc;
		}

		for (i = 0; i < ms->gpio_cs_count; i++) {
			gpio_cs = of_get_gpio(op->dev.of_node, i);
			if (gpio_cs < 0) {
				dev_err(&op->dev,
					"could not parse the gpio field "
					"in oftree\n");
				rc = -ENODEV;
				goto err_gpio;
			}

			rc = gpio_request(gpio_cs, dev_name(&op->dev));
			if (rc) {
				dev_err(&op->dev,
					"can't request spi cs gpio #%d "
					"on gpio line %d\n", i, gpio_cs);
				goto err_gpio;
			}

			gpio_direction_output(gpio_cs, 1);
			ms->gpio_cs[i] = gpio_cs;
		}
	} else {
		master->num_chipselect = 1;
	}

	spin_lock_init(&ms->lock);
	INIT_LIST_HEAD(&ms->queue);
	INIT_WORK(&ms->work, mpc52xx_spi_wq);

	/* Decide if interrupts can be used */
	if (ms->irq0 && ms->irq1) {
		rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
				  "mpc5200-spi-modf", ms);
		rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
				  "mpc5200-spi-spif", ms);
		if (rc) {
			free_irq(ms->irq0, ms);
			free_irq(ms->irq1, ms);
			ms->irq0 = ms->irq1 = 0;
		}
	} else {
		/* operate in polled mode */
		ms->irq0 = ms->irq1 = 0;
	}

	if (!ms->irq0)
		dev_info(&op->dev, "using polled mode\n");

	dev_dbg(&op->dev, "registering spi_master struct\n");
	rc = spi_register_master(master);
	if (rc)
		goto err_register;

	dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");

	return rc;

 err_register:
	dev_err(&ms->master->dev, "initialization failed\n");
	spi_master_put(master);
 err_gpio:
	while (i-- > 0)
		gpio_free(ms->gpio_cs[i]);

	kfree(ms->gpio_cs);
 err_alloc:
 err_init:
	iounmap(regs);
	return rc;
}
Пример #17
0
/**
 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
 *
 * On transmit, the dma completion irq triggers before the fifo completion
 * triggers.  Handle the dma completion here instead of the LPB FIFO Bestcomm
 * task completion irq becuase everyting is not really done until the LPB FIFO
 * completion irq triggers.
 *
 * In other words:
 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
 *
 * Reasons for entering this routine:
 * 1) PIO mode rx and tx completion irq
 * 2) DMA interrupt mode tx completion irq
 * 3) DMA polled mode tx
 *
 * Exit conditions:
 * 1) Transfer aborted
 * 2) FIFO complete without DMA; more data to do
 * 3) FIFO complete without DMA; all data transfered
 * 4) FIFO complete using DMA
 *
 * Condition 1 can occur regardless of whether or not DMA is used.
 * It requires executing the callback to report the error and exiting
 * immediately.
 *
 * Condition 2 requires programming the FIFO with the next block of data
 *
 * Condition 3 requires executing the callback to report completion
 *
 * Condition 4 means the same as 3, except that we also retrieve the bcom
 * buffer so DMA doesn't get clogged up.
 *
 * To make things trickier, the spinlock must be dropped before
 * executing the callback, otherwise we could end up with a deadlock
 * or nested spinlock condition.  The out path is non-trivial, so
 * extra fiddling is done to make sure all paths lead to the same
 * outbound code.
 */
static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
{
	struct mpc52xx_lpbfifo_request *req;
	u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
	void __iomem *reg;
	u32 *data;
	int count, i;
	int do_callback = 0;
	u32 ts;
	unsigned long flags;
	int dma, write, poll_dma;

	spin_lock_irqsave(&lpbfifo.lock, flags);
	ts = get_tbl();

	req = lpbfifo.req;
	if (!req) {
		spin_unlock_irqrestore(&lpbfifo.lock, flags);
		pr_err("bogus LPBFIFO IRQ\n");
		return IRQ_HANDLED;
	}

	dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
	write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
	poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;

	if (dma && !write) {
		spin_unlock_irqrestore(&lpbfifo.lock, flags);
		pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
		return IRQ_HANDLED;
	}

	if ((status & 0x01) == 0) {
		goto out;
	}

	/* check abort bit */
	if (status & 0x10) {
		out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
		do_callback = 1;
		goto out;
	}

	/* Read result from hardware */
	count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
	count &= 0x00ffffff;

	if (!dma && !write) {
		/* copy the data out of the FIFO */
		reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
		data = req->data + req->pos;
		for (i = 0; i < count; i += 4)
			*data++ = in_be32(reg);
	}

	/* Update transfer position and count */
	req->pos += count;

	/* Decide what to do next */
	if (req->size - req->pos)
		mpc52xx_lpbfifo_kick(req); /* more work to do */
	else
		do_callback = 1;

 out:
	/* Clear the IRQ */
	out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);

	if (dma && (status & 0x11)) {
		/*
		 * Count the DMA as complete only when the FIFO completion
		 * status or abort bits are set.
		 *
		 * (status & 0x01) should always be the case except sometimes
		 * when using polled DMA.
		 *
		 * (status & 0x10) {transfer aborted}: This case needs more
		 * testing.
		 */
		bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
	}
	req->last_byte = ((u8 *)req->data)[req->size - 1];

	/* When the do_callback flag is set; it means the transfer is finished
	 * so set the FIFO as idle */
	if (do_callback)
		lpbfifo.req = NULL;

	if (irq != 0) /* don't increment on polled case */
		req->irq_count++;

	req->irq_ticks += get_tbl() - ts;
	spin_unlock_irqrestore(&lpbfifo.lock, flags);

	/* Spinlock is released; it is now safe to call the callback */
	if (do_callback && req->callback)
		req->callback(req);

	return IRQ_HANDLED;
}
Пример #18
0
phys_size_t initdram(int board_type)
{
	sdram_t *sdram = (sdram_t *)(MMAP_SDRAM);
	gpio_t *gpio = (gpio_t *)(MMAP_GPIO);
	u32 dramsize, i, dramclk;

	/*
	 * When booting from external Flash, the port-size is less than
	 * the port-size of SDRAM.  In this case it is necessary to enable
	 * Data[15:0] on Port Address/Data.
	 */
	out_8(&gpio->par_ad,
		GPIO_PAR_AD_ADDR23 | GPIO_PAR_AD_ADDR22 | GPIO_PAR_AD_ADDR21 |
		GPIO_PAR_AD_DATAL);

	/* Initialize PAR to enable SDRAM signals */
	out_8(&gpio->par_sdram,
		GPIO_PAR_SDRAM_SDWE | GPIO_PAR_SDRAM_SCAS |
		GPIO_PAR_SDRAM_SRAS | GPIO_PAR_SDRAM_SCKE |
		GPIO_PAR_SDRAM_SDCS(3));

	dramsize = CONFIG_SYS_SDRAM_SIZE * 0x100000;
	for (i = 0x13; i < 0x20; i++) {
		if (dramsize == (1 << i))
			break;
	}
	i--;

	if (!(in_be32(&sdram->dacr0) & SDRAMC_DARCn_RE)) {
		dramclk = gd->bus_clk / (CONFIG_SYS_HZ * CONFIG_SYS_HZ);

		/* Initialize DRAM Control Register: DCR */
		out_be16(&sdram->dcr, SDRAMC_DCR_RTIM_9CLKS |
			SDRAMC_DCR_RTIM_6CLKS |
			SDRAMC_DCR_RC((15 * dramclk) >> 4));

		/* Initialize DACR0 */
		out_be32(&sdram->dacr0,
			SDRAMC_DARCn_BA(CONFIG_SYS_SDRAM_BASE) |
			SDRAMC_DARCn_CASL_C1 | SDRAMC_DARCn_CBM_CMD20 |
			SDRAMC_DARCn_PS_32);
		asm("nop");

		/* Initialize DMR0 */
		out_be32(&sdram->dmr0,
			((dramsize - 1) & 0xFFFC0000) | SDRAMC_DMRn_V);
		asm("nop");

		/* Set IP (bit 3) in DACR */
		setbits_be32(&sdram->dacr0, SDRAMC_DARCn_IP);

		/* Wait 30ns to allow banks to precharge */
		for (i = 0; i < 5; i++) {
			asm("nop");
		}

		/* Write to this block to initiate precharge */
		*(u32 *) (CONFIG_SYS_SDRAM_BASE) = 0xA5A59696;

		/*  Set RE (bit 15) in DACR */
		setbits_be32(&sdram->dacr0, SDRAMC_DARCn_RE);

		/* Wait for at least 8 auto refresh cycles to occur */
		for (i = 0; i < 0x2000; i++) {
			asm("nop");
		}

		/* Finish the configuration by issuing the MRS. */
		setbits_be32(&sdram->dacr0, SDRAMC_DARCn_IMRS);
		asm("nop");

		/* Write to the SDRAM Mode Register */
		*(u32 *) (CONFIG_SYS_SDRAM_BASE + 0x400) = 0xA5A59696;
	}
Пример #19
0
static void restart(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	const struct fs_platform_info *fpi = fep->fpi;
	fcc_t __iomem *fccp = fep->fcc.fccp;
	fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
	fcc_enet_t __iomem *ep = fep->fcc.ep;
	dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
	u16 paddrh, paddrm, paddrl;
	const unsigned char *mac;
	int i;

	C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);

	/* clear everything (slow & steady does it) */
	for (i = 0; i < sizeof(*ep); i++)
		out_8((u8 __iomem *)ep + i, 0);

	/* get physical address */
	rx_bd_base_phys = fep->ring_mem_addr;
	tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;

	/* point to bds */
	W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
	W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);

	/* Set maximum bytes per receive buffer.
	 * It must be a multiple of 32.
	 */
	W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);

	W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
	W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);

	/* Allocate space in the reserved FCC area of DPRAM for the
	 * internal buffers.  No one uses this space (yet), so we
	 * can do this.  Later, we will add resource management for
	 * this area.
	 */

	W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
	W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);

	W16(ep, fen_padptr, fpi->dpram_offset + 64);

	/* fill with special symbol...  */
	memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);

	W32(ep, fen_genfcc.fcc_rbptr, 0);
	W32(ep, fen_genfcc.fcc_tbptr, 0);
	W32(ep, fen_genfcc.fcc_rcrc, 0);
	W32(ep, fen_genfcc.fcc_tcrc, 0);
	W16(ep, fen_genfcc.fcc_res1, 0);
	W32(ep, fen_genfcc.fcc_res2, 0);

	/* no CAM */
	W32(ep, fen_camptr, 0);

	/* Set CRC preset and mask */
	W32(ep, fen_cmask, 0xdebb20e3);
	W32(ep, fen_cpres, 0xffffffff);

	W32(ep, fen_crcec, 0);		/* CRC Error counter       */
	W32(ep, fen_alec, 0);		/* alignment error counter */
	W32(ep, fen_disfc, 0);		/* discard frame counter   */
	W16(ep, fen_retlim, 15);	/* Retry limit threshold   */
	W16(ep, fen_pper, 0);		/* Normal persistence      */

	/* set group address */
	W32(ep, fen_gaddrh, fep->fcc.gaddrh);
	W32(ep, fen_gaddrl, fep->fcc.gaddrh);

	/* Clear hash filter tables */
	W32(ep, fen_iaddrh, 0);
	W32(ep, fen_iaddrl, 0);

	/* Clear the Out-of-sequence TxBD  */
	W16(ep, fen_tfcstat, 0);
	W16(ep, fen_tfclen, 0);
	W32(ep, fen_tfcptr, 0);

	W16(ep, fen_mflr, PKT_MAXBUF_SIZE);	/* maximum frame length register */
	W16(ep, fen_minflr, PKT_MINBUF_SIZE);	/* minimum frame length register */

	/* set address */
	mac = dev->dev_addr;
	paddrh = ((u16)mac[5] << 8) | mac[4];
	paddrm = ((u16)mac[3] << 8) | mac[2];
	paddrl = ((u16)mac[1] << 8) | mac[0];

	W16(ep, fen_paddrh, paddrh);
	W16(ep, fen_paddrm, paddrm);
	W16(ep, fen_paddrl, paddrl);

	W16(ep, fen_taddrh, 0);
	W16(ep, fen_taddrm, 0);
	W16(ep, fen_taddrl, 0);

	W16(ep, fen_maxd1, 1520);	/* maximum DMA1 length */
	W16(ep, fen_maxd2, 1520);	/* maximum DMA2 length */

	/* Clear stat counters, in case we ever enable RMON */
	W32(ep, fen_octc, 0);
	W32(ep, fen_colc, 0);
	W32(ep, fen_broc, 0);
	W32(ep, fen_mulc, 0);
	W32(ep, fen_uspc, 0);
	W32(ep, fen_frgc, 0);
	W32(ep, fen_ospc, 0);
	W32(ep, fen_jbrc, 0);
	W32(ep, fen_p64c, 0);
	W32(ep, fen_p65c, 0);
	W32(ep, fen_p128c, 0);
	W32(ep, fen_p256c, 0);
	W32(ep, fen_p512c, 0);
	W32(ep, fen_p1024c, 0);

	W16(ep, fen_rfthr, 0);	/* Suggested by manual */
	W16(ep, fen_rfcnt, 0);
	W16(ep, fen_cftype, 0);

	fs_init_bds(dev);

	/* adjust to speed (for RMII mode) */
	if (fpi->use_rmii) {
		if (fep->phydev->speed == 100)
			C8(fcccp, fcc_gfemr, 0x20);
		else
			S8(fcccp, fcc_gfemr, 0x20);
	}

	fcc_cr_cmd(fep, CPM_CR_INIT_TRX);

	/* clear events */
	W16(fccp, fcc_fcce, 0xffff);

	/* Enable interrupts we wish to service */
	W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);

	/* Set GFMR to enable Ethernet operating mode */
	W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);

	/* set sync/delimiters */
	W16(fccp, fcc_fdsr, 0xd555);

	W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);

	if (fpi->use_rmii)
		S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);

	/* adjust to duplex mode */
	if (fep->phydev->duplex)
		S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
	else
		C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);

	/* Restore multicast and promiscuous settings */
	set_multicast_list(dev);

	S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
}
Пример #20
0
/*
 * Submitting a data frame to a specified endpoint of a USB device
 * The frame is put in the driver's transmit queue for this endpoint
 *
 * Arguments:
 * usb          A pointer to the USB structure
 * pkt          A pointer to the user frame structure
 * trans_type   Transaction tyep - IN,OUT or SETUP
 * dest_addr    Device address - 0~127
 * dest_ep      Endpoint number of the device - 0~16
 * trans_mode   Pipe type - ISO,Interrupt,bulk or control
 * dest_speed   USB speed - Low speed or FULL speed
 * data_toggle  Data sequence toggle - 0 or 1
 */
u32 fhci_host_transaction(struct fhci_usb *usb,
			  struct packet *pkt,
			  enum fhci_ta_type trans_type,
			  u8 dest_addr,
			  u8 dest_ep,
			  enum fhci_tf_mode trans_mode,
			  enum fhci_speed dest_speed, u8 data_toggle)
{
	struct endpoint *ep = usb->ep0;
	struct usb_td __iomem *td;
	u16 extra_data;
	u16 td_status;

	fhci_usb_disable_interrupt(usb);
	/* start from the next BD that should be filled */
	td = ep->empty_td;
	td_status = in_be16(&td->status);

	if (td_status & TD_R && in_be16(&td->length)) {
		/* if the TD is not free */
		fhci_usb_enable_interrupt(usb);
		return -1;
	}

	/* get the next TD in the ring */
	ep->empty_td = next_bd(ep->td_base, ep->empty_td, td_status);
	fhci_usb_enable_interrupt(usb);
	pkt->priv_data = td;
	out_be32(&td->buf_ptr, virt_to_phys(pkt->data));
	/* sets up transaction parameters - addr,endp,dir,and type */
	extra_data = (dest_ep << TD_ENDP_SHIFT) | dest_addr;
	switch (trans_type) {
	case FHCI_TA_IN:
		extra_data |= TD_TOK_IN;
		break;
	case FHCI_TA_OUT:
		extra_data |= TD_TOK_OUT;
		break;
	case FHCI_TA_SETUP:
		extra_data |= TD_TOK_SETUP;
		break;
	}
	if (trans_mode == FHCI_TF_ISO)
		extra_data |= TD_ISO;
	out_be16(&td->extra, extra_data);

	/* sets up the buffer descriptor */
	td_status = ((td_status & TD_W) | TD_R | TD_L | TD_I | TD_CNF);
	if (!(pkt->info & PKT_NO_CRC))
		td_status |= TD_TC;

	switch (trans_type) {
	case FHCI_TA_IN:
		if (data_toggle)
			pkt->info |= PKT_PID_DATA1;
		else
			pkt->info |= PKT_PID_DATA0;
		break;
	default:
		if (data_toggle) {
			td_status |= TD_PID_DATA1;
			pkt->info |= PKT_PID_DATA1;
		} else {
			td_status |= TD_PID_DATA0;
			pkt->info |= PKT_PID_DATA0;
		}
		break;
	}

	if ((dest_speed == FHCI_LOW_SPEED) &&
	    (usb->port_status == FHCI_PORT_FULL))
		td_status |= TD_LSP;

	out_be16(&td->status, td_status);

	/* set up buffer length */
	if (trans_type == FHCI_TA_IN)
		out_be16(&td->length, pkt->len + CRC_SIZE);
	else
		out_be16(&td->length, pkt->len);

	/* put the frame to the confirmation queue */
	cq_put(ep->conf_frame_Q, pkt);

	if (cq_howmany(ep->conf_frame_Q) == 1)
		out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);

	return 0;
}
Пример #21
0
void ppc440spe_setup_pcie_rootpoint(struct pci_controller *hose, int port)
{
	volatile void *mbase = NULL;
	volatile void *rmbase = NULL;

	pci_set_ops(hose,
		    pcie_read_config_byte,
		    pcie_read_config_word,
		    pcie_read_config_dword,
		    pcie_write_config_byte,
		    pcie_write_config_word,
		    pcie_write_config_dword);

	switch (port) {
	case 0:
		mbase = (u32 *)CFG_PCIE0_XCFGBASE;
		rmbase = (u32 *)CFG_PCIE0_CFGBASE;
		hose->cfg_data = (u8 *)CFG_PCIE0_CFGBASE;
		break;
	case 1:
		mbase = (u32 *)CFG_PCIE1_XCFGBASE;
		rmbase = (u32 *)CFG_PCIE1_CFGBASE;
		hose->cfg_data = (u8 *)CFG_PCIE1_CFGBASE;
		break;
	case 2:
		mbase = (u32 *)CFG_PCIE2_XCFGBASE;
		rmbase = (u32 *)CFG_PCIE2_CFGBASE;
		hose->cfg_data = (u8 *)CFG_PCIE2_CFGBASE;
		break;
	}

	/*
	 * Set bus numbers on our root port
	 */
	out_8((u8 *)mbase + PCI_PRIMARY_BUS, 0);
	out_8((u8 *)mbase + PCI_SECONDARY_BUS, 1);
	out_8((u8 *)mbase + PCI_SUBORDINATE_BUS, 1);

	/*
	 * Set up outbound translation to hose->mem_space from PLB
	 * addresses at an offset of 0xd_0000_0000.  We set the low
	 * bits of the mask to 11 to turn off splitting into 8
	 * subregions and to enable the outbound translation.
	 */
	out_le32(mbase + PECFG_POM0LAH, 0x00000000);
	out_le32(mbase + PECFG_POM0LAL, 0x00000000);

	switch (port) {
	case 0:
		mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0),  0x0000000d);
		mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0),  CFG_PCIE_MEMBASE +
			port * CFG_PCIE_MEMSIZE);
		mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff);
		mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0),
			~(CFG_PCIE_MEMSIZE - 1) | 3);
		break;
	case 1:
		mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1),  0x0000000d);
		mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1),  (CFG_PCIE_MEMBASE +
			port * CFG_PCIE_MEMSIZE));
		mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff);
		mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1),
			~(CFG_PCIE_MEMSIZE - 1) | 3);
		break;
	case 2:
		mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2),  0x0000000d);
		mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2),  (CFG_PCIE_MEMBASE +
			port * CFG_PCIE_MEMSIZE));
		mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff);
		mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2),
			~(CFG_PCIE_MEMSIZE - 1) | 3);
		break;
	}

	/* Set up 16GB inbound memory window at 0 */
	out_le32(mbase + PCI_BASE_ADDRESS_0, 0);
	out_le32(mbase + PCI_BASE_ADDRESS_1, 0);
	out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc);
	out_le32(mbase + PECFG_BAR0LMPA, 0);

	out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
	out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
	out_le32(mbase + PECFG_PIM0LAL, 0);
	out_le32(mbase + PECFG_PIM0LAH, 0);
	out_le32(mbase + PECFG_PIM1LAL,  0x00000000);
	out_le32(mbase + PECFG_PIM1LAH,  0x00000004);
	out_le32(mbase + PECFG_PIMEN, 0x1);

	/* Enable I/O, Mem, and Busmaster cycles */
	out_le16((u16 *)(mbase + PCI_COMMAND),
		 in_le16((u16 *)(mbase + PCI_COMMAND)) |
		 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
	printf("PCIE:%d successfully set as rootpoint\n",port);
}
static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c)
{
	out_8(&PSC(port)->mpc52xx_psc_buffer_8, c);
}
Пример #23
0
/*
 * Simple board reset.
 */
void pixis_reset(void)
{
	u8 *pixis_base = (u8 *)PIXIS_BASE;
	out_8(pixis_base + PIXIS_RST, 0);
}
static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c)
{
	out_8(&FIFO_512x(port)->txdata_8, c);
}
Пример #25
0
phys_size_t initdram(int board_type)
{
	u32 dramsize;

#if defined(CONFIG_SERIAL_BOOT)
	/*
	 * Serial Boot: The dram is already initialized in start.S
	 * only require to return DRAM size
	 */
	dramsize = CONFIG_SYS_SDRAM_SIZE * 0x100000;
#else
	sdramc_t *sdram = (sdramc_t *)(MMAP_SDRAM);
	ccm_t *ccm = (ccm_t *)MMAP_CCM;
	gpio_t *gpio = (gpio_t *) MMAP_GPIO;
	pm_t *pm = (pm_t *) MMAP_PM;
	u32 i;

	dramsize = CONFIG_SYS_SDRAM_SIZE * 0x100000;

	for (i = 0x13; i < 0x20; i++) {
		if (dramsize == (1 << i))
			break;
	}

	out_8(&pm->pmcr0, 0x2E);
	out_8(&gpio->mscr_sdram, 1);

	clrbits_be16(&ccm->misccr2, CCM_MISCCR2_FBHALF);
	setbits_be16(&ccm->misccr2, CCM_MISCCR2_DDR2CLK);

	out_be32(&sdram->rcrcr, 0x40000000);
	out_be32(&sdram->padcr, 0x01030203);

	out_be32(&sdram->cr00, 0x01010101);
	out_be32(&sdram->cr01, 0x00000101);
	out_be32(&sdram->cr02, 0x01010100);
	out_be32(&sdram->cr03, 0x01010000);
	out_be32(&sdram->cr04, 0x00010101);
	out_be32(&sdram->cr06, 0x00010100);
	out_be32(&sdram->cr07, 0x00000001);
	out_be32(&sdram->cr08, 0x01000001);
	out_be32(&sdram->cr09, 0x00000100);
	out_be32(&sdram->cr10, 0x00010001);
	out_be32(&sdram->cr11, 0x00000200);
	out_be32(&sdram->cr12, 0x01000002);
	out_be32(&sdram->cr13, 0x00000000);
	out_be32(&sdram->cr14, 0x00000100);
	out_be32(&sdram->cr15, 0x02000100);
	out_be32(&sdram->cr16, 0x02000407);
	out_be32(&sdram->cr17, 0x02030007);
	out_be32(&sdram->cr18, 0x02000100);
	out_be32(&sdram->cr19, 0x0A030203);
	out_be32(&sdram->cr20, 0x00020708);
	out_be32(&sdram->cr21, 0x00050008);
	out_be32(&sdram->cr22, 0x04030002);
	out_be32(&sdram->cr23, 0x00000004);
	out_be32(&sdram->cr24, 0x020A0000);
	out_be32(&sdram->cr25, 0x0C00000E);
	out_be32(&sdram->cr26, 0x00002004);
	out_be32(&sdram->cr28, 0x00100010);
	out_be32(&sdram->cr29, 0x00100010);
	out_be32(&sdram->cr31, 0x07990000);
	out_be32(&sdram->cr40, 0x00000000);
	out_be32(&sdram->cr41, 0x00C80064);
	out_be32(&sdram->cr42, 0x44520002);
	out_be32(&sdram->cr43, 0x00C80023);
	out_be32(&sdram->cr45, 0x0000C350);
	out_be32(&sdram->cr56, 0x04000000);
	out_be32(&sdram->cr57, 0x03000304);
	out_be32(&sdram->cr58, 0x40040000);
	out_be32(&sdram->cr59, 0xC0004004);
	out_be32(&sdram->cr60, 0x0642C000);
	out_be32(&sdram->cr61, 0x00000642);
	asm("tpf");

	out_be32(&sdram->cr09, 0x01000100);

	udelay(100);
#endif
	return dramsize;
};
Пример #26
0
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;
	unsigned long flags;
	struct request *req = fs->cur_req;

	swim3_dbg("* interrupt, state=%d\n", fs->state);

	spin_lock_irqsave(&swim3_lock, flags);
	intr = in_8(&sw->intr);
	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
	if ((intr & ERROR_INTR) && fs->state != do_transfer)
		swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
			  fs->state, rq_data_dir(req), intr, err);
	switch (fs->state) {
	case locating:
		if (intr & SEEN_SECTOR) {
			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (sw->ctrack == 0xff) {
				swim3_err("%s", "Seen sector but cyl=ff?\n");
				fs->cur_cyl = -1;
				if (fs->retries > 5) {
					swim3_end_request(fs, -EIO, 0);
					fs->state = idle;
					start_request(fs);
				} else {
					fs->state = jogging;
					act(fs);
				}
				break;
			}
			fs->cur_cyl = sw->ctrack;
			fs->cur_sector = sw->csect;
			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
				swim3_err("Expected cyl %d, got %d\n",
					  fs->expect_cyl, fs->cur_cyl);
			fs->state = do_transfer;
			act(fs);
		}
		break;
	case seeking:
	case jogging:
		if (sw->nseek == 0) {
			out_8(&sw->control_bic, DO_SEEK);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (fs->state == seeking)
				++fs->retries;
			fs->state = settling;
			act(fs);
		}
		break;
	case settling:
		out_8(&sw->intr_enable, 0);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		act(fs);
		break;
	case do_transfer:
		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
			break;
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
		out_8(&sw->select, RELAX);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		dr = fs->dma;
		cp = fs->dma_cmd;
		if (rq_data_dir(req) == WRITE)
			++cp;
		/*
		 * Check that the main data transfer has finished.
		 * On writing, the swim3 sometimes doesn't use
		 * up all the bytes of the postamble, so we can still
		 * see DMA active here.  That doesn't matter as long
		 * as all the sector data has been transferred.
		 */
		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
			/* wait a little while for DMA to complete */
			for (n = 0; n < 100; ++n) {
				if (cp->xfer_status != 0)
					break;
				udelay(1);
				barrier();
			}
		}
		/* turn off DMA */
		out_le32(&dr->control, (RUN | PAUSE) << 16);
		stat = ld_le16(&cp->xfer_status);
		resid = ld_le16(&cp->res_count);
		if (intr & ERROR_INTR) {
			n = fs->scount - 1 - resid / 512;
			if (n > 0) {
				blk_update_request(req, 0, n << 9);
				fs->req_sector += n;
			}
			if (fs->retries < 5) {
				++fs->retries;
				act(fs);
			} else {
				swim3_err("Error %sing block %ld (err=%x)\n",
				       rq_data_dir(req) == WRITE? "writ": "read",
				       (long)blk_rq_pos(req), err);
				swim3_end_request(fs, -EIO, 0);
				fs->state = idle;
			}
		} else {
			if ((stat & ACTIVE) == 0 || resid != 0) {
				/* musta been an error */
				swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
				swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
					  fs->state, rq_data_dir(req), intr, err);
				swim3_end_request(fs, -EIO, 0);
				fs->state = idle;
				start_request(fs);
				break;
			}
			fs->retries = 0;
			if (swim3_end_request(fs, 0, fs->scount << 9)) {
				fs->req_sector += fs->scount;
				if (fs->req_sector > fs->secpertrack) {
					fs->req_sector -= fs->secpertrack;
					if (++fs->head > 1) {
						fs->head = 0;
						++fs->req_cyl;
					}
				}
				act(fs);
			} else
				fs->state = idle;
		}
		if (fs->state == idle)
			start_request(fs);
		break;
	default:
		swim3_err("Don't know what to do in state %d\n", fs->state);
	}
	spin_unlock_irqrestore(&swim3_lock, flags);
	return IRQ_HANDLED;
}
Пример #27
0
Файл: uart.c Проект: AoLaD/rtems
static inline void
uwrite(int uart, int reg, unsigned int val)
{
	out_8((uint8_t*)(uart_data[uart].ioBase + reg), val);
}
Пример #28
0
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
	struct floppy_state *fs = bdev->bd_disk->private_data;
	struct swim3 __iomem *sw = fs->swim3;
	int n, err = 0;

	if (fs->ref_count == 0) {
		if (fs->mdev->media_bay &&
		    check_media_bay(fs->mdev->media_bay) != MB_FD)
			return -ENXIO;
		out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
		out_8(&sw->control_bic, 0xff);
		out_8(&sw->mode, 0x95);
		udelay(10);
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
		swim3_action(fs, MOTOR_ON);
		fs->write_prot = -1;
		fs->cur_cyl = -1;
		for (n = 0; n < 2 * HZ; ++n) {
			if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
				break;
			if (signal_pending(current)) {
				err = -EINTR;
				break;
			}
			swim3_select(fs, RELAX);
			schedule_timeout_interruptible(1);
		}
		if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
				 || swim3_readbit(fs, DISK_IN) == 0))
			err = -ENXIO;
		swim3_action(fs, SETMFM);
		swim3_select(fs, RELAX);

	} else if (fs->ref_count == -1 || mode & FMODE_EXCL)
		return -EBUSY;

	if (err == 0 && (mode & FMODE_NDELAY) == 0
	    && (mode & (FMODE_READ|FMODE_WRITE))) {
		check_disk_change(bdev);
		if (fs->ejected)
			err = -ENXIO;
	}

	if (err == 0 && (mode & FMODE_WRITE)) {
		if (fs->write_prot < 0)
			fs->write_prot = swim3_readbit(fs, WRITE_PROT);
		if (fs->write_prot)
			err = -EROFS;
	}

	if (err) {
		if (fs->ref_count == 0) {
			swim3_action(fs, MOTOR_OFF);
			out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
			swim3_select(fs, RELAX);
		}
		return err;
	}

	if (mode & FMODE_EXCL)
		fs->ref_count = -1;
	else
		++fs->ref_count;

	return 0;
}
Пример #29
0
static void lcd_bl_ctrl(char val)
{
    out_8((u8 *) LCD_BLK_CTRL, in_8((u8 *) LCD_BLK_CTRL) | val);
}
Пример #30
0
static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
{
	u8	bmrx = 0;
	int	i;
	uec_82xx_address_filtering_pram_t	*p_af_pram;

	/* Allocate global Rx parameter RAM page */
	uec->rx_glbl_pram_offset = qe_muram_alloc(
		sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
	uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
				qe_muram_addr(uec->rx_glbl_pram_offset);

	/* Zero Global Rx parameter RAM */
	memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));

	/* Init global Rx parameter RAM */
	/* REMODER, Extended feature mode disable, VLAN disable,
	 LossLess flow control disable, Receive firmware statisic disable,
	 Extended address parsing mode disable, One Rx queues,
	 Dynamic maximum/minimum frame length disable, IP checksum check
	 disable, IP address alignment disable
	*/
	out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);

	/* RQPTR */
	uec->thread_dat_rx_offset = qe_muram_alloc(
			num_threads_rx * sizeof(uec_thread_data_rx_t),
			 UEC_THREAD_DATA_ALIGNMENT);
	uec->p_thread_data_rx = (uec_thread_data_rx_t *)
				qe_muram_addr(uec->thread_dat_rx_offset);
	out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);

	/* Type_or_Len */
	out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);

	/* RxRMON base pointer, we don't need it */
	out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);

	/* IntCoalescingPTR, we don't need it, no interrupt */
	out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);

	/* RSTATE, global snooping, big endian, the CSB bus selected */
	bmrx = BMR_INIT_VALUE;
	out_8(&uec->p_rx_glbl_pram->rstate, bmrx);

	/* MRBLR */
	out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);

	/* RBDQPTR */
	uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
				sizeof(uec_rx_bd_queues_entry_t) + \
				sizeof(uec_rx_prefetched_bds_t),
				 UEC_RX_BD_QUEUES_ALIGNMENT);
	uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
				qe_muram_addr(uec->rx_bd_qs_tbl_offset);

	/* Zero it */
	memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
					sizeof(uec_rx_prefetched_bds_t));
	out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
	out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
		 (u32)uec->p_rx_bd_ring);

	/* MFLR */
	out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
	/* MINFLR */
	out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
	/* MAXD1 */
	out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
	/* MAXD2 */
	out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
	/* ECAM_PTR */
	out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
	/* L2QT */
	out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
	/* L3QT */
	for (i = 0; i < 8; i++)	{
		out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
	}

	/* VLAN_TYPE */
	out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
	/* TCI */
	out_be16(&uec->p_rx_glbl_pram->vlantci, 0);

	/* Clear PQ2 style address filtering hash table */
	p_af_pram = (uec_82xx_address_filtering_pram_t *) \
			uec->p_rx_glbl_pram->addressfiltering;

	p_af_pram->iaddr_h = 0;
	p_af_pram->iaddr_l = 0;
	p_af_pram->gaddr_h = 0;
	p_af_pram->gaddr_l = 0;
}