Ejemplo n.º 1
0
Archivo: smp.c Proyecto: AllenDou/linux
static int __cpuinit smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem struct epapr_spin_table *spin_table;
	struct device_node *np;
	int hw_cpu = get_hard_smp_processor_id(nr);
	int ioremappable;
	int ret = 0;

	WARN_ON(nr < 0 || nr >= NR_CPUS);
	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return -ENOENT;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		spin_table = ioremap(*cpu_rel_addr,
				sizeof(struct epapr_spin_table));
	else
		spin_table = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);
#ifdef CONFIG_PPC32
#ifdef CONFIG_HOTPLUG_CPU
	/* Corresponding to generic_set_cpu_dead() */
	generic_set_cpu_up(nr);

	if (system_state == SYSTEM_RUNNING) {
		out_be32(&spin_table->addr_l, 0);

		/*
		 * We don't set the BPTR register here since it already points
		 * to the boot page properly.
		 */
		mpic_reset_core(hw_cpu);

		/* wait until core is ready... */
		if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
						10000, 100)) {
			pr_err("%s: timeout waiting for core %d to reset\n",
							__func__, hw_cpu);
			ret = -ENOENT;
			goto out;
		}

		/*  clear the acknowledge status */
		__secondary_hold_acknowledge = -1;
	}
#endif
	out_be32(&spin_table->pir, hw_cpu);
	out_be32(&spin_table->addr_l, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));

	/* Wait a bit for the CPU to ack. */
	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
					10000, 100)) {
		pr_err("%s: timeout waiting for core %d to ack\n",
						__func__, hw_cpu);
		ret = -ENOENT;
		goto out;
	}
out:
#else
	smp_generic_kick_cpu(nr);

	out_be32(&spin_table->pir, hw_cpu);
	out_be64((u64 *)(&spin_table->addr_h),
	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(spin_table);

	return ret;
}
Ejemplo n.º 2
0
int zynq_load(Xilinx_desc *desc, const void *buf, size_t bsize)
{
	unsigned long ts; /* Timestamp */
	u32 partialbit = 0;
	u32 i, control, isr_status, status, swap, diff;
	u32 *buf_start;

	/* Detect if we are going working with partial or full bitstream */
	if (bsize != desc->size) {
		printf("%s: Working with partial bitstream\n", __func__);
		partialbit = 1;
	}

	buf_start = check_data((u8 *)buf, bsize, &swap);
	if (!buf_start)
		return FPGA_FAIL;

	/* Check if data is postpone from start */
	diff = (u32)buf_start - (u32)buf;
	if (diff) {
		printf("%s: Bitstream is not validated yet (diff %x)\n",
		       __func__, diff);
		return FPGA_FAIL;
	}

	if ((u32)buf < SZ_1M) {
		printf("%s: Bitstream has to be placed up to 1MB (%x)\n",
		       __func__, (u32)buf);
		return FPGA_FAIL;
	}

	if ((u32)buf != ALIGN((u32)buf, ARCH_DMA_MINALIGN)) {
		u32 *new_buf = (u32 *)ALIGN((u32)buf, ARCH_DMA_MINALIGN);

		printf("%s: Align buffer at %x to %x(swap %d)\n", __func__,
		       (u32)buf_start, (u32)new_buf, swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
		buf = new_buf;
	} else if (swap != SWAP_DONE) {
		/* For bitstream which are aligned */
		u32 *new_buf = (u32 *)buf;

		printf("%s: Bitstream is not swapped(%d) - swap it\n", __func__,
		       swap);

		for (i = 0; i < (bsize/4); i++)
			new_buf[i] = load_word(&buf_start[i], swap);

		swap = SWAP_DONE;
	}

	/* Clear loopback bit */
	clrbits_le32(&devcfg_base->mctrl, DEVCFG_MCTRL_PCAP_LPBK);

	if (!partialbit) {
		zynq_slcr_devcfg_disable();

		/* Setting PCFG_PROG_B signal to high */
		control = readl(&devcfg_base->ctrl);
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);
		/* Setting PCFG_PROG_B signal to low */
		writel(control & ~DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Reset */
		ts = get_timer(0);
		while (readl(&devcfg_base->status) & DEVCFG_STATUS_PCFG_INIT) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to clear\n",
				       __func__);
				return FPGA_FAIL;
			}
		}

		/* Setting PCFG_PROG_B signal to high */
		writel(control | DEVCFG_CTRL_PCFG_PROG_B, &devcfg_base->ctrl);

		/* Polling the PCAP_INIT status for Set */
		ts = get_timer(0);
		while (!(readl(&devcfg_base->status) &
			DEVCFG_STATUS_PCFG_INIT)) {
			if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
				printf("%s: Timeout wait for INIT to set\n",
				       __func__);
				return FPGA_FAIL;
			}
		}
	}

	isr_status = readl(&devcfg_base->int_sts);

	/* Clear it all, so if Boot ROM comes back, it can proceed */
	writel(0xFFFFFFFF, &devcfg_base->int_sts);

	if (isr_status & DEVCFG_ISR_FATAL_ERROR_MASK) {
		debug("%s: Fatal errors in PCAP 0x%X\n", __func__, isr_status);

		/* If RX FIFO overflow, need to flush RX FIFO first */
		if (isr_status & DEVCFG_ISR_RX_FIFO_OV) {
			writel(DEVCFG_MCTRL_RFIFO_FLUSH, &devcfg_base->mctrl);
			writel(0xFFFFFFFF, &devcfg_base->int_sts);
		}
		return FPGA_FAIL;
	}

	status = readl(&devcfg_base->status);

	debug("%s: Status = 0x%08X\n", __func__, status);

	if (status & DEVCFG_STATUS_DMA_CMD_Q_F) {
		debug("%s: Error: device busy\n", __func__);
		return FPGA_FAIL;
	}

	debug("%s: Device ready\n", __func__);

	if (!(status & DEVCFG_STATUS_DMA_CMD_Q_E)) {
		if (!(readl(&devcfg_base->int_sts) & DEVCFG_ISR_DMA_DONE)) {
			/* Error state, transfer cannot occur */
			debug("%s: ISR indicates error\n", __func__);
			return FPGA_FAIL;
		} else {
			/* Clear out the status */
			writel(DEVCFG_ISR_DMA_DONE, &devcfg_base->int_sts);
		}
	}

	if (status & DEVCFG_STATUS_DMA_DONE_CNT_MASK) {
		/* Clear the count of completed DMA transfers */
		writel(DEVCFG_STATUS_DMA_DONE_CNT_MASK, &devcfg_base->status);
	}

	debug("%s: Source = 0x%08X\n", __func__, (u32)buf);
	debug("%s: Size = %zu\n", __func__, bsize);

	/* flush(clean & invalidate) d-cache range buf */
	flush_dcache_range((u32)buf, (u32)buf +
			   roundup(bsize, ARCH_DMA_MINALIGN));

	/* Set up the transfer */
	writel((u32)buf | 1, &devcfg_base->dma_src_addr);
	writel(0xFFFFFFFF, &devcfg_base->dma_dst_addr);
	writel(bsize >> 2, &devcfg_base->dma_src_len);
	writel(0, &devcfg_base->dma_dst_len);

	isr_status = readl(&devcfg_base->int_sts);

	/* Polling the PCAP_INIT status for Set */
	ts = get_timer(0);
	while (!(isr_status & DEVCFG_ISR_DMA_DONE)) {
		if (isr_status & DEVCFG_ISR_ERROR_FLAGS_MASK) {
			debug("%s: Error: isr = 0x%08X\n", __func__,
			      isr_status);
			debug("%s: Write count = 0x%08X\n", __func__,
			      readl(&devcfg_base->write_count));
			debug("%s: Read count = 0x%08X\n", __func__,
			      readl(&devcfg_base->read_count));

			return FPGA_FAIL;
		}
		if (get_timer(ts) > CONFIG_SYS_FPGA_PROG_TIME) {
			printf("%s: Timeout wait for DMA to complete\n",
			       __func__);
			return FPGA_FAIL;
		}
		isr_status = readl(&devcfg_base->int_sts);
	}

	debug("%s: DMA transfer is done\n", __func__);

	/* Check FPGA configuration completion */
	ts = get_timer(0);
	while (!(isr_status & DEVCFG_ISR_PCFG_DONE)) {
		if (get_timer(ts) > CONFIG_SYS_FPGA_WAIT) {
			printf("%s: Timeout wait for FPGA to config\n",
			       __func__);
			return FPGA_FAIL;
		}
		isr_status = readl(&devcfg_base->int_sts);
	}

	debug("%s: FPGA config done\n", __func__);

	/* Clear out the DMA status */
	writel(DEVCFG_ISR_DMA_DONE, &devcfg_base->int_sts);

	if (!partialbit)
		zynq_slcr_devcfg_enable();

	return FPGA_SUCCESS;
}
Ejemplo n.º 3
0
/**
 * Transmit one frame
 * @param[in] dev Our ethernet device to handle
 * @param[in] packet Pointer to the data to be transmitted
 * @param[in] length Data count in bytes
 * @return 0 on success
 */
static int fec_send(struct eth_device *dev, void *packet, int length)
{
	unsigned int status;
	uint32_t size, end;
	uint32_t addr;
	int timeout = FEC_XFER_TIMEOUT;
	int ret = 0;

	/*
	 * This routine transmits one frame.  This routine only accepts
	 * 6-byte Ethernet addresses.
	 */
	struct fec_priv *fec = (struct fec_priv *)dev->priv;

	/*
	 * Check for valid length of data.
	 */
	if ((length > 1500) || (length <= 0)) {
		printf("Payload (%d) too large\n", length);
		return -1;
	}

	/*
	 * Setup the transmit buffer. We are always using the first buffer for
	 * transmission, the second will be empty and only used to stop the DMA
	 * engine. We also flush the packet to RAM here to avoid cache trouble.
	 */
#ifdef CONFIG_FEC_MXC_SWAP_PACKET
	swap_packet((uint32_t *)packet, length);
#endif

	addr = (uint32_t)packet;
	end = roundup(addr + length, ARCH_DMA_MINALIGN);
	addr &= ~(ARCH_DMA_MINALIGN - 1);
	flush_dcache_range(addr, end);

	writew(length, &fec->tbd_base[fec->tbd_index].data_length);
	writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer);

	/*
	 * update BD's status now
	 * This block:
	 * - is always the last in a chain (means no chain)
	 * - should transmitt the CRC
	 * - might be the last BD in the list, so the address counter should
	 *   wrap (-> keep the WRAP flag)
	 */
	status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
	status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
	writew(status, &fec->tbd_base[fec->tbd_index].status);

	/*
	 * Flush data cache. This code flushes both TX descriptors to RAM.
	 * After this code, the descriptors will be safely in RAM and we
	 * can start DMA.
	 */
	size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
	addr = (uint32_t)fec->tbd_base;
	flush_dcache_range(addr, addr + size);

	/*
	 * Below we read the DMA descriptor's last four bytes back from the
	 * DRAM. This is important in order to make sure that all WRITE
	 * operations on the bus that were triggered by previous cache FLUSH
	 * have completed.
	 *
	 * Otherwise, on MX28, it is possible to observe a corruption of the
	 * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
	 * for the bus structure of MX28. The scenario is as follows:
	 *
	 * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
	 *    to DRAM due to flush_dcache_range()
	 * 2) ARM core writes the FEC registers via AHB_ARB2
	 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
	 *
	 * Note that 2) does sometimes finish before 1) due to reordering of
	 * WRITE accesses on the AHB bus, therefore triggering 3) before the
	 * DMA descriptor is fully written into DRAM. This results in occasional
	 * corruption of the DMA descriptor.
	 */
	readl(addr + size - 4);

	/*
	 * Enable SmartDMA transmit task
	 */
	fec_tx_task_enable(fec);

	/*
	 * Wait until frame is sent. On each turn of the wait cycle, we must
	 * invalidate data cache to see what's really in RAM. Also, we need
	 * barrier here.
	 */
	while (--timeout) {
		if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
			break;
	}

	if (!timeout) {
		ret = -EINVAL;
		goto out;
	}

	/*
	 * The TDAR bit is cleared when the descriptors are all out from TX
	 * but on mx6solox we noticed that the READY bit is still not cleared
	 * right after TDAR.
	 * These are two distinct signals, and in IC simulation, we found that
	 * TDAR always gets cleared prior than the READY bit of last BD becomes
	 * cleared.
	 * In mx6solox, we use a later version of FEC IP. It looks like that
	 * this intrinsic behaviour of TDAR bit has changed in this newer FEC
	 * version.
	 *
	 * Fix this by polling the READY bit of BD after the TDAR polling,
	 * which covers the mx6solox case and does not harm the other SoCs.
	 */
	timeout = FEC_XFER_TIMEOUT;
	while (--timeout) {
		invalidate_dcache_range(addr, addr + size);
		if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
		    FEC_TBD_READY))
			break;
	}

	if (!timeout)
		ret = -EINVAL;

out:
	debug("fec_send: status 0x%x index %d ret %i\n",
			readw(&fec->tbd_base[fec->tbd_index].status),
			fec->tbd_index, ret);
	/* for next transmission use the other buffer */
	if (fec->tbd_index)
		fec->tbd_index = 0;
	else
		fec->tbd_index = 1;

	return ret;
}
Ejemplo n.º 4
0
static int zynq_gem_init(struct udevice *dev)
{
	u32 i, nwconfig;
	int ret;
	unsigned long clk_rate = 0;
	struct zynq_gem_priv *priv = dev_get_priv(dev);
	struct zynq_gem_regs *regs = priv->iobase;
	struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
	struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];

	if (!priv->init) {
		/* Disable all interrupts */
		writel(0xFFFFFFFF, &regs->idr);

		/* Disable the receiver & transmitter */
		writel(0, &regs->nwctrl);
		writel(0, &regs->txsr);
		writel(0, &regs->rxsr);
		writel(0, &regs->phymntnc);

		/* Clear the Hash registers for the mac address
		 * pointed by AddressPtr
		 */
		writel(0x0, &regs->hashl);
		/* Write bits [63:32] in TOP */
		writel(0x0, &regs->hashh);

		/* Clear all counters */
		for (i = 0; i < STAT_SIZE; i++)
			readl(&regs->stat[i]);

		/* Setup RxBD space */
		memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));

		for (i = 0; i < RX_BUF; i++) {
			priv->rx_bd[i].status = 0xF0000000;
			priv->rx_bd[i].addr =
					((ulong)(priv->rxbuffers) +
							(i * PKTSIZE_ALIGN));
		}
		/* WRAP bit to last BD */
		priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
		/* Write RxBDs to IP */
		writel((ulong)priv->rx_bd, &regs->rxqbase);

		/* Setup for DMA Configuration register */
		writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);

		/* Setup for Network Control register, MDIO, Rx and Tx enable */
		setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);

		/* Disable the second priority queue */
		dummy_tx_bd->addr = 0;
		dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
				ZYNQ_GEM_TXBUF_LAST_MASK|
				ZYNQ_GEM_TXBUF_USED_MASK;

		dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
				ZYNQ_GEM_RXBUF_NEW_MASK;
		dummy_rx_bd->status = 0;
		flush_dcache_range((ulong)&dummy_tx_bd, (ulong)&dummy_tx_bd +
				   sizeof(dummy_tx_bd));
		flush_dcache_range((ulong)&dummy_rx_bd, (ulong)&dummy_rx_bd +
				   sizeof(dummy_rx_bd));

		writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
		writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);

		priv->init++;
	}

	ret = phy_startup(priv->phydev);
	if (ret)
		return ret;

	if (!priv->phydev->link) {
		printf("%s: No link.\n", priv->phydev->dev->name);
		return -1;
	}

	nwconfig = ZYNQ_GEM_NWCFG_INIT;

	if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
		nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
			    ZYNQ_GEM_NWCFG_PCS_SEL;
#ifdef CONFIG_ARM64
		writel(readl(&regs->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
		       &regs->pcscntrl);
#endif
	}

	switch (priv->phydev->speed) {
	case SPEED_1000:
		writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
		       &regs->nwcfg);
		clk_rate = ZYNQ_GEM_FREQUENCY_1000;
		break;
	case SPEED_100:
		writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
		       &regs->nwcfg);
		clk_rate = ZYNQ_GEM_FREQUENCY_100;
		break;
	case SPEED_10:
		clk_rate = ZYNQ_GEM_FREQUENCY_10;
		break;
	}

	/* Change the rclk and clk only not using EMIO interface */
	if (!priv->emio)
#ifndef CONFIG_CLK_ZYNQMP
		zynq_slcr_gem_clk_setup((ulong)priv->iobase !=
					ZYNQ_GEM_BASEADDR0, clk_rate);
#else
		ret = clk_set_rate(&priv->clk, clk_rate);
		if (IS_ERR_VALUE(ret))
			return -1;
#endif

	setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
					ZYNQ_GEM_NWCTRL_TXEN_MASK);

	return 0;
}
Ejemplo n.º 5
0
static inline void macb_flush_rx_buffer(struct macb_device *macb)
{
	flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
				MACB_RX_BUFFER_SIZE);
}
Ejemplo n.º 6
0
static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
{
	uint32_t addr = (uint32_t)info->cmd_buf;

	flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
}
Ejemplo n.º 7
0
static int fec_init(struct eth_device *dev, bd_t* bd)
{
	struct fec_priv *fec = (struct fec_priv *)dev->priv;
	uint32_t mib_ptr = (uint32_t)&fec->eth->rmon_t_drop;
	uint32_t size;
	int i, ret;

	/* Initialize MAC address */
	fec_set_hwaddr(dev);

	/*
	 * Allocate transmit descriptors, there are two in total. This
	 * allocation respects cache alignment.
	 */
	if (!fec->tbd_base) {
		size = roundup(2 * sizeof(struct fec_bd),
				ARCH_DMA_MINALIGN);
		fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
		if (!fec->tbd_base) {
			ret = -ENOMEM;
			goto err1;
		}
		memset(fec->tbd_base, 0, size);
		fec_tbd_init(fec);
	}

	/*
	 * Allocate receive descriptors. This allocation respects cache
	 * alignment.
	 */
	if (!fec->rbd_base) {
		size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
				ARCH_DMA_MINALIGN);
		fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
		if (!fec->rbd_base) {
			ret = -ENOMEM;
			goto err2;
		}
		memset(fec->rbd_base, 0, size);
		/*
		 * Initialize RxBD ring
		 */
		if (fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE) < 0) {
			ret = -ENOMEM;
			goto err3;
		}
		flush_dcache_range((unsigned)fec->rbd_base,
				   (unsigned)fec->rbd_base + size);
	}

	fec_reg_setup(fec);

	if (fec->xcv_type != SEVENWIRE)
		fec_mii_setspeed(fec->bus->priv);

	/*
	 * Set Opcode/Pause Duration Register
	 */
	writel(0x00010020, &fec->eth->op_pause);	/* FIXME 0xffff0020; */
	writel(0x2, &fec->eth->x_wmrk);
	/*
	 * Set multicast address filter
	 */
	writel(0x00000000, &fec->eth->gaddr1);
	writel(0x00000000, &fec->eth->gaddr2);


	/* clear MIB RAM */
	for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
		writel(0, i);

	/* FIFO receive start register */
	writel(0x520, &fec->eth->r_fstart);

	/* size and address of each buffer */
	writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
	writel((uint32_t)fec->tbd_base, &fec->eth->etdsr);
	writel((uint32_t)fec->rbd_base, &fec->eth->erdsr);

#ifndef CONFIG_PHYLIB
	if (fec->xcv_type != SEVENWIRE)
		miiphy_restart_aneg(dev);
#endif
	fec_open(dev);
	return 0;

err3:
	free(fec->rbd_base);
err2:
	free(fec->tbd_base);
err1:
	return ret;
}
Ejemplo n.º 8
0
/*******************************************************************************
 * This function initializes the power domain topology tree by querying the
 * platform. The power domain nodes higher than the CPU are populated in the
 * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
 * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
 * populate_power_domain_topology_tree() API. The algorithm populates the
 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
 * topology map.  On a platform that implements two clusters of 2 cpus each, and
 * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
 * like this:
 *
 * ---------------------------------------------------
 * | system node | cluster 0 node  | cluster 1 node  |
 * ---------------------------------------------------
 *
 * And populated psci_cpu_pd_nodes would look like this :
 * <-    cpus cluster0   -><-   cpus cluster1   ->
 * ------------------------------------------------
 * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
 * ------------------------------------------------
 ******************************************************************************/
int psci_setup(void)
{
	const unsigned char *topology_tree;

	/* Query the topology map from the platform */
	topology_tree = plat_get_power_domain_tree_desc();

	/* Populate the power domain arrays using the platform topology map */
	populate_power_domain_tree(topology_tree);

	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
	psci_update_pwrlvl_limits();

	/* Populate the mpidr field of cpu node for this CPU */
	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
		read_mpidr() & MPIDR_AFFINITY_MASK;

#if !USE_COHERENT_MEM
	/*
	 * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
	 * coherent memory.
	 */
	flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
			   sizeof(psci_non_cpu_pd_nodes));
#endif

	flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
			   sizeof(psci_cpu_pd_nodes));

	psci_init_req_local_pwr_states();

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domain levels for this CPU to run.
	 */
	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);

	plat_setup_psci_ops((uintptr_t)psci_entrypoint,
					&psci_plat_pm_ops);
	assert(psci_plat_pm_ops);

	/* Initialize the psci capability */
	psci_caps = PSCI_GENERIC_CAP;

	if (psci_plat_pm_ops->pwr_domain_off)
		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
	if (psci_plat_pm_ops->pwr_domain_on &&
			psci_plat_pm_ops->pwr_domain_on_finish)
		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
	if (psci_plat_pm_ops->pwr_domain_suspend &&
			psci_plat_pm_ops->pwr_domain_suspend_finish) {
		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
		if (psci_plat_pm_ops->get_sys_suspend_power_state)
			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
	}
	if (psci_plat_pm_ops->system_off)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
	if (psci_plat_pm_ops->system_reset)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);

	return 0;
}
Ejemplo n.º 9
0
static int32_t tbase_init_entry()
{
  DBG_PRINTF("tbase_init\n\r");

  // Save el1 registers in case non-secure world has already been set up.
  cm_el1_sysregs_context_save(NON_SECURE);

  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  
  
  // Note: mapping is 1:1, so physical and virtual addresses are here the same.
  cpu_context_t *ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE);  
  
  // ************************************************************************************
  // Configure parameter passing to tbase
  
  // Calculate page start addresses for register areas.
  registerFileStart[REGISTER_FILE_NWD] = page_align((uint64_t)&ns_entry_context, DOWN);
  registerFileStart[REGISTER_FILE_MONITOR] = page_align((uint64_t)&msm_area, DOWN);

  // Calculate page end addresses for register areas.
  registerFileEnd[REGISTER_FILE_NWD] = (uint64_t)(&ns_entry_context[TBASE_CORE_COUNT]);
  registerFileEnd[REGISTER_FILE_MONITOR] = ((uint64_t)&msm_area) +sizeof(msm_area);

  int32_t totalPages = 0;
  for (int area=0; area<REGISTER_FILE_COUNT; area++) {
    int32_t pages = page_align(registerFileEnd[area] - registerFileStart[area], UP) / PAGE_SIZE;
    assert( pages +totalPages <= TBASE_INTERFACE_PAGES );
    tbase_init_register_file(area, totalPages, pages);
    totalPages += pages;
  }

  // ************************************************************************************
  // Create boot structure
  tbaseBootCfg.magic       = TBASE_BOOTCFG_MAGIC;
  tbaseBootCfg.length      = sizeof(bootCfg_t);
  tbaseBootCfg.version     = TBASE_MONITOR_INTERFACE_VERSION;
  
  tbaseBootCfg.dRamBase    = TBASE_NWD_DRAM_BASE;
  tbaseBootCfg.dRamSize    = TBASE_NWD_DRAM_SIZE;
  tbaseBootCfg.secDRamBase = TBASE_SWD_DRAM_BASE;
  tbaseBootCfg.secDRamSize = TBASE_SWD_DRAM_SIZE;
  tbaseBootCfg.secIRamBase = TBASE_SWD_IMEM_BASE;
  tbaseBootCfg.secIRamSize = TBASE_SWD_IMEM_SIZE;
  
  tbaseBootCfg.conf_mair_el3 = read_mair_el3();
  tbaseBootCfg.MSMPteCount = totalPages;
  tbaseBootCfg.MSMBase = (uint64_t)registerFileL2;
  
  tbaseBootCfg.gic_distributor_base = TBASE_GIC_DIST_BASE;
  tbaseBootCfg.gic_cpuinterface_base = TBASE_GIC_CPU_BASE;
  tbaseBootCfg.gic_version = TBASE_GIC_VERSION;
  
  tbaseBootCfg.total_number_spi = TBASE_SPI_COUNT;
  tbaseBootCfg.ssiq_number = TBASE_SSIQ_NRO;
  
  tbaseBootCfg.flags       = TBASE_MONITOR_FLAGS;


        DBG_PRINTF("*** tbase boot cfg ***\n\r");
        DBG_PRINTF("* magic                 : 0x%.X\n\r",tbaseBootCfg.magic);
        DBG_PRINTF("* length                : 0x%.X\n\r",tbaseBootCfg.length);
        DBG_PRINTF("* version               : 0x%.X\n\r",tbaseBootCfg.version);
        DBG_PRINTF("* dRamBase              : 0x%.X\n\r",tbaseBootCfg.dRamBase);
        DBG_PRINTF("* dRamSize              : 0x%.X\n\r",tbaseBootCfg.dRamSize);
        DBG_PRINTF("* secDRamBase           : 0x%.X\n\r",tbaseBootCfg.secDRamBase);
        DBG_PRINTF("* secDRamSize           : 0x%.X\n\r",tbaseBootCfg.secDRamSize);
        DBG_PRINTF("* secIRamBase           : 0x%.X\n\r",tbaseBootCfg.secIRamBase);
        DBG_PRINTF("* secIRamSize           : 0x%.X\n\r",tbaseBootCfg.secIRamSize);
        DBG_PRINTF("* conf_mair_el3         : 0x%.X\n\r",tbaseBootCfg.conf_mair_el3);
        DBG_PRINTF("* MSMPteCount           : 0x%.X\n\r",tbaseBootCfg.MSMPteCount);
        DBG_PRINTF("* MSMBase               : 0x%.X\n\r",tbaseBootCfg.MSMBase);
        DBG_PRINTF("* gic_distributor_base  : 0x%.X\n\r",tbaseBootCfg.gic_distributor_base);
        DBG_PRINTF("* gic_cpuinterface_base : 0x%.X\n\r",tbaseBootCfg.gic_cpuinterface_base);
        DBG_PRINTF("* gic_version           : 0x%.X\n\r",tbaseBootCfg.gic_version);
        DBG_PRINTF("* total_number_spi      : 0x%.X\n\r",tbaseBootCfg.total_number_spi);
        DBG_PRINTF("* ssiq_number           : 0x%.X\n\r",tbaseBootCfg.ssiq_number);
        DBG_PRINTF("* flags                 : 0x%.X\n\r",tbaseBootCfg.flags);

  // ************************************************************************************
  // tbaseBootCfg and l2 entries may be accesses uncached, so must flush those.
  flush_dcache_range((unsigned long)&tbaseBootCfg, sizeof(bootCfg_t));
  flush_dcache_range((unsigned long)&registerFileL2, sizeof(registerFileL2));
  
  // ************************************************************************************
  // Set registers for tbase initialization entry
  cpu_context_t *s_entry_context = &tbase_ctx->cpu_ctx;
  gp_regs_t *s_entry_gpregs = get_gpregs_ctx(s_entry_context);
  write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, 0);
  write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, (int64_t)&tbaseBootCfg);

  
  // SPSR for SMC handling (FIQ mode)
  tbaseEntrySpsr = TBASE_ENTRY_SPSR;
  
  DBG_PRINTF("tbase init SPSR 0x%x\n\r", read_ctx_reg(get_el3state_ctx(&tbase_ctx->cpu_ctx), 
             CTX_SPSR_EL3) );
  DBG_PRINTF("tbase SMC SPSR %x\nr\r", tbaseEntrySpsr );

  // ************************************************************************************
  // Start tbase

  tbase_synchronous_sp_entry(tbase_ctx);
  tbase_ctx->state = TBASE_STATE_ON;
  
#if TBASE_PM_ENABLE
  // Register power managemnt hooks with PSCI
  psci_register_spd_pm_hook(&tbase_pm);
#endif

  cm_el1_sysregs_context_restore(NON_SECURE);
  cm_set_next_eret_context(NON_SECURE);

  return 1;
}
Ejemplo n.º 10
0
static int
cpm_enet_start_xmit(struct sk_buff *skb, struct device *dev)
{
	struct cpm_enet_private *cep = (struct cpm_enet_private *)dev->priv;
	volatile cbd_t	*bdp;
	unsigned long flags;

	/* Transmitter timeout, serious problems. */
	if (dev->tbusy) {
		int tickssofar = jiffies - dev->trans_start;
		if (tickssofar < 200)
			return 1;
		printk("%s: transmit timed out.\n", dev->name);
		cep->stats.tx_errors++;
#ifndef final_version
		{
			int	i;
			cbd_t	*bdp;
			printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
				   cep->cur_tx, cep->tx_full ? " (full)" : "",
				   cep->cur_rx);
			bdp = cep->tx_bd_base;
			for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
				printk("%04x %04x %08x\n",
					bdp->cbd_sc,
					bdp->cbd_datlen,
					bdp->cbd_bufaddr);
			bdp = cep->rx_bd_base;
			for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
				printk("%04x %04x %08x\n",
					bdp->cbd_sc,
					bdp->cbd_datlen,
					bdp->cbd_bufaddr);
		}
#endif

		dev->tbusy=0;
		dev->trans_start = jiffies;

		return 0;
	}

	/* Block a timer-based transmit from overlapping.  This could better be
	   done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
	if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
		printk("%s: Transmitter access conflict.\n", dev->name);
		return 1;
	}

	if (test_and_set_bit(0, (void*)&cep->lock) != 0) {
		printk("%s: tx queue lock!.\n", dev->name);
		/* don't clear dev->tbusy flag. */
		return 1;
	}

	/* Fill in a Tx ring entry */
	bdp = cep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since dev->tbusy should be set.
		 */
		printk("%s: tx queue full!.\n", dev->name);
		cep->lock = 0;
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* If the frame is short, tell CPM to pad it.
	*/
	if (skb->len <= ETH_ZLEN)
		bdp->cbd_sc |= BD_ENET_TX_PAD;
	else
		bdp->cbd_sc &= ~BD_ENET_TX_PAD;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_datlen = skb->len;
	bdp->cbd_bufaddr = __pa(skb->data);

	/* Save skb pointer.
	*/
	cep->tx_skbuff[cep->skb_cur] = skb;

	cep->stats.tx_bytes += skb->len;
	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
	
	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range(skb->data, skb->data + skb->len);

	/* Send it on its way.  Tell CPM its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */
	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);

	dev->trans_start = jiffies;

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
		bdp = cep->tx_bd_base;
	else
		bdp++;

	save_flags(flags);
	cli();
	cep->lock = 0;
	if (bdp->cbd_sc & BD_ENET_TX_READY)
		cep->tx_full = 1;
	else
		dev->tbusy=0;
	restore_flags(flags);

	cep->cur_tx = (cbd_t *)bdp;

	return 0;
}
Ejemplo n.º 11
0
static int nand_read_page(const struct nfc_config *conf, u32 offs,
			  void *dest, int len)
{
	dma_addr_t dst = (dma_addr_t)dest;
	int nsectors = len / conf->ecc_size;
	u16 rand_seed;
	u32 val;
	int page;

	page = offs / conf->page_size;

	if (offs % conf->page_size || len % conf->ecc_size ||
	    len > conf->page_size || len < 0)
		return -EINVAL;

	/* clear ecc status */
	writel(0, SUNXI_NFC_BASE + NFC_ECC_ST);

	/* Choose correct seed */
	rand_seed = random_seed[page % conf->nseeds];

	writel((rand_seed << 16) | (conf->ecc_strength << 12) |
		(conf->randomize ? NFC_ECC_RANDOM_EN : 0) |
		(conf->ecc_size == 512 ? NFC_ECC_BLOCK_SIZE : 0) |
		NFC_ECC_EN | NFC_ECC_PIPELINE | NFC_ECC_EXCEPTION,
		SUNXI_NFC_BASE + NFC_ECC_CTL);

	flush_dcache_range(dst, ALIGN(dst + conf->ecc_size, ARCH_DMA_MINALIGN));

	/* SUNXI_DMA */
	writel(0x0, SUNXI_DMA_BASE + SUNXI_DMA_CFG_REG0); /* clr dma cmd */
	/* read from REG_IO_DATA */
	writel(SUNXI_NFC_BASE + NFC_IO_DATA,
	       SUNXI_DMA_BASE + SUNXI_DMA_SRC_START_ADDR_REG0);
	/* read to RAM */
	writel(dst, SUNXI_DMA_BASE + SUNXI_DMA_DEST_START_ADDRR_REG0);
	writel(SUNXI_DMA_DDMA_PARA_REG_SRC_WAIT_CYC |
	       SUNXI_DMA_DDMA_PARA_REG_SRC_BLK_SIZE,
	       SUNXI_DMA_BASE + SUNXI_DMA_DDMA_PARA_REG0);
	writel(len, SUNXI_DMA_BASE + SUNXI_DMA_DDMA_BC_REG0);
	writel(SUNXI_DMA_DDMA_CFG_REG_LOADING |
	       SUNXI_DMA_DDMA_CFG_REG_DMA_DEST_DATA_WIDTH_32 |
	       SUNXI_DMA_DDMA_CFG_REG_DDMA_DST_DRQ_TYPE_DRAM |
	       SUNXI_DMA_DDMA_CFG_REG_DMA_SRC_DATA_WIDTH_32 |
	       SUNXI_DMA_DDMA_CFG_REG_DMA_SRC_ADDR_MODE_IO |
	       SUNXI_DMA_DDMA_CFG_REG_DDMA_SRC_DRQ_TYPE_NFC,
	       SUNXI_DMA_BASE + SUNXI_DMA_CFG_REG0);

	writel(nsectors, SUNXI_NFC_BASE + NFC_SECTOR_NUM);
	writel(NFC_ST_DMA_INT_FLAG, SUNXI_NFC_BASE + NFC_ST);
	writel(NFC_DATA_TRANS |	NFC_PAGE_CMD | NFC_DATA_SWAP_METHOD,
	       SUNXI_NFC_BASE + NFC_CMD);

	if (!check_value(SUNXI_NFC_BASE + NFC_ST, NFC_ST_DMA_INT_FLAG,
			 DEFAULT_TIMEOUT_US)) {
		printf("Error while initializing dma interrupt\n");
		return -EIO;
	}
	writel(NFC_ST_DMA_INT_FLAG, SUNXI_NFC_BASE + NFC_ST);

	if (!check_value_negated(SUNXI_DMA_BASE + SUNXI_DMA_CFG_REG0,
				 SUNXI_DMA_DDMA_CFG_REG_LOADING,
				 DEFAULT_TIMEOUT_US)) {
		printf("Error while waiting for dma transfer to finish\n");
		return -EIO;
	}

	invalidate_dcache_range(dst,
				ALIGN(dst + conf->ecc_size, ARCH_DMA_MINALIGN));

	val = readl(SUNXI_NFC_BASE + NFC_ECC_ST);

	/* ECC error detected. */
	if (val & 0xffff)
		return -EIO;

	/*
	 * Return 1 if the page is empty.
	 * We consider the page as empty if the first ECC block is marked
	 * empty.
	 */
	return (val & 0x10000) ? 1 : 0;
}
Ejemplo n.º 12
0
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	int err = 0;
	int signal;
	unsigned long address = 0;
#ifdef CONFIG_MMU
	pmd_t *pmdp;
	pte_t *ptep;
#endif

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	if (info)
		err |= copy_siginfo_to_user(&frame->info, info);

	/* Create the ucontext. */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(NULL, &frame->uc.uc_link);
	err |= __save_altstack(&frame->uc.uc_stack, regs->r1);
	err |= setup_sigcontext(&frame->uc.uc_mcontext,
			regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	/* Set up to return from userspace. If provided, use a stub
	 already in userspace. */
	/* minus 8 is offset to cater for "rtsd r15,8" */
	/* addi r12, r0, __NR_sigreturn */
	err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
			frame->tramp + 0);
	/* brki r14, 0x8 */
	err |= __put_user(0xb9cc0008, frame->tramp + 1);

	/* Return from sighandler will jump to the tramp.
	 Negative 8 offset because return is rtsd r15, 8 */
	regs->r15 = ((unsigned long)frame->tramp)-8;

	address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
	pmdp = pmd_offset(pud_offset(
			pgd_offset(current->mm, address),
					address), address);

	preempt_disable();
	ptep = pte_offset_map(pmdp, address);
	if (pte_present(*ptep)) {
		address = (unsigned long) page_address(pte_page(*ptep));
		/* MS: I need add offset in page */
		address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
		/* MS address is virtual */
		address = __virt_to_phys(address);
		invalidate_icache_range(address, address + 8);
		flush_dcache_range(address, address + 8);
	}
	pte_unmap(ptep);
	preempt_enable();
#else
	flush_icache_range(address, address + 8);
	flush_dcache_range(address, address + 8);
#endif
	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->r1 = (unsigned long) frame;

	/* Signal handler args: */
	regs->r5 = signal; /* arg 0: signum */
	regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
	regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
	/* Offset to handle microblaze rtid r14, 0 */
	regs->pc = (unsigned long)ka->sa.sa_handler;

	set_fs(USER_DS);

#ifdef DEBUG_SIG
	pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
		current->comm, current->pid, frame, regs->pc);
#endif

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
Ejemplo n.º 13
0
static int mv_ata_exec_ata_cmd(int port, struct sata_fis_h2d *cfis,
			       u8 *buffer, u32 len, u32 iswrite)
{
	struct mv_priv *priv = (struct mv_priv *)sata_dev_desc[port].priv;
	struct crqb *req;
	int slot;
	u32 start;

	if (len >= 64 * 1024) {
		printf("We only support <64K transfers for now\n");
		return -1;
	}

	/* Initialize request */
	slot = get_reqip(port);
	memset(&priv->request[slot], 0, sizeof(struct crqb));
	req = &priv->request[slot];

	req->dtb_low = (u32)buffer;

	/* Dont use PRDs */
	req->control_flags = CRQB_CNTRLFLAGS_PRDMODE;
	req->control_flags |= iswrite ? 0 : CRQB_CNTRLFLAGS_DIR;
	req->control_flags |=
	    ((cfis->pm_port_c << CRQB_CNTRLFLAGS_PMPORTSHIFT)
	     & CRQB_CNTRLFLAGS_PMPORTMASK);

	req->drb_count = len;

	req->ata_cmd_feat = (cfis->command << CRQB_CMDFEAT_CMDSHIFT) &
		CRQB_CMDFEAT_CMDMASK;
	req->ata_cmd_feat |= (cfis->features << CRQB_CMDFEAT_FEATSHIFT) &
		CRQB_CMDFEAT_FEATMASK;

	req->ata_addr = (cfis->lba_low << CRQB_ADDR_LBA_LOWSHIFT) &
		CRQB_ADDR_LBA_LOWMASK;
	req->ata_addr |= (cfis->lba_mid << CRQB_ADDR_LBA_MIDSHIFT) &
		CRQB_ADDR_LBA_MIDMASK;
	req->ata_addr |= (cfis->lba_high << CRQB_ADDR_LBA_HIGHSHIFT) &
		CRQB_ADDR_LBA_HIGHMASK;
	req->ata_addr |= (cfis->device << CRQB_ADDR_DEVICE_SHIFT) &
		CRQB_ADDR_DEVICE_MASK;

	req->ata_addr_exp = (cfis->lba_low_exp << CRQB_ADDR_LBA_LOW_EXP_SHIFT) &
		CRQB_ADDR_LBA_LOW_EXP_MASK;
	req->ata_addr_exp |=
		(cfis->lba_mid_exp << CRQB_ADDR_LBA_MID_EXP_SHIFT) &
		CRQB_ADDR_LBA_MID_EXP_MASK;
	req->ata_addr_exp |=
		(cfis->lba_high_exp << CRQB_ADDR_LBA_HIGH_EXP_SHIFT) &
		CRQB_ADDR_LBA_HIGH_EXP_MASK;
	req->ata_addr_exp |=
		(cfis->features_exp << CRQB_ADDR_FEATURE_EXP_SHIFT) &
		CRQB_ADDR_FEATURE_EXP_MASK;

	req->ata_sect_count =
		(cfis->sector_count << CRQB_SECTCOUNT_COUNT_SHIFT) &
		CRQB_SECTCOUNT_COUNT_MASK;
	req->ata_sect_count |=
		(cfis->sector_count_exp << CRQB_SECTCOUNT_COUNT_EXP_SHIFT) &
		CRQB_SECTCOUNT_COUNT_EXP_MASK;

	/* Flush data */
	start = (u32)req & ~(ARCH_DMA_MINALIGN - 1);
	flush_dcache_range(start,
			   start + ALIGN(sizeof(*req), ARCH_DMA_MINALIGN));

	/* Trigger operation */
	slot = get_next_reqip(port);
	set_reqip(port, slot);

	/* Wait for completion */
	if (wait_dma_completion(port, slot, 10000)) {
		printf("ATA operation timed out\n");
		return -1;
	}

	process_responses(port);

	/* Invalidate data on read */
	if (buffer && len) {
		start = (u32)buffer & ~(ARCH_DMA_MINALIGN - 1);
		invalidate_dcache_range(start,
					start + ALIGN(len, ARCH_DMA_MINALIGN));
	}

	return len;
}
Ejemplo n.º 14
0
/*
 * This mmu table looks as below
 * Level 0 table contains two entries to 512GB sizes. One is Level1 Table 0
 * and other Level1 Table1.
 * Level1 Table0 contains entries for each 1GB from 0 to 511GB.
 * Level1 Table1 contains entries for each 1GB from 512GB to 1TB.
 * Level2 Table0, Level2 Table1, Level2 Table2 and Level2 Table3 contains
 * entries for each 2MB starting from 0GB, 1GB, 2GB and 3GB respectively.
 */
static void zynqmp_mmu_setup(void)
{
	int el;
	u32 index_attr;
	u64 i, section_l1t0, section_l1t1;
	u64 section_l2t0, section_l2t1, section_l2t2, section_l2t3;
	u64 *level0_table = (u64 *)gd->arch.tlb_addr;
	u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + TLB_TABLE_SIZE);
	u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + (2 * TLB_TABLE_SIZE));
	u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + (3 * TLB_TABLE_SIZE));
	u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + (4 * TLB_TABLE_SIZE));
	u64 *level2_table_2 = (u64 *)(gd->arch.tlb_addr + (5 * TLB_TABLE_SIZE));
	u64 *level2_table_3 = (u64 *)(gd->arch.tlb_addr + (6 * TLB_TABLE_SIZE));

	level0_table[0] =
		(u64)level1_table_0 | PMD_TYPE_TABLE;
	level0_table[1] =
		(u64)level1_table_1 | PMD_TYPE_TABLE;

	/*
	 * set level 1 table 0, covering 0 to 512GB
	 * set level 1 table 1, covering 512GB to 1TB
	 */
	section_l1t0 = 0;
	section_l1t1 = BLOCK_SIZE_L0;

	index_attr = 0;
	for (i = 0; i < 512; i++) {
		level1_table_0[i] = section_l1t0;
		level1_table_0[i] |= attr_tbll1t0[index_attr].attr;
		attr_tbll1t0[index_attr].num--;
		if (attr_tbll1t0[index_attr].num == 0)
			index_attr++;
		level1_table_1[i] = section_l1t1;
		level1_table_1[i] |= DEVICE_ATTR;
		section_l1t0 += BLOCK_SIZE_L1;
		section_l1t1 += BLOCK_SIZE_L1;
	}

	level1_table_0[0] =
		(u64)level2_table_0 | PMD_TYPE_TABLE;
	level1_table_0[1] =
		(u64)level2_table_1 | PMD_TYPE_TABLE;
	level1_table_0[2] =
		(u64)level2_table_2 | PMD_TYPE_TABLE;
	level1_table_0[3] =
		(u64)level2_table_3 | PMD_TYPE_TABLE;

	section_l2t0 = 0;
	section_l2t1 = section_l2t0 + BLOCK_SIZE_L1; /* 1GB */
	section_l2t2 = section_l2t1 + BLOCK_SIZE_L1; /* 2GB */
	section_l2t3 = section_l2t2 + BLOCK_SIZE_L1; /* 3GB */

	index_attr = 0;

	for (i = 0; i < 512; i++) {
		level2_table_0[i] = section_l2t0 | MEMORY_ATTR;
		level2_table_1[i] = section_l2t1 | MEMORY_ATTR;
		level2_table_2[i] = section_l2t2 | DEVICE_ATTR;
		level2_table_3[i] = section_l2t3 |
				    attr_tbll2t3[index_attr].attr;
		attr_tbll2t3[index_attr].num--;
		if (attr_tbll2t3[index_attr].num == 0)
			index_attr++;
		section_l2t0 += BLOCK_SIZE_L2;
		section_l2t1 += BLOCK_SIZE_L2;
		section_l2t2 += BLOCK_SIZE_L2;
		section_l2t3 += BLOCK_SIZE_L2;
	}

	/* flush new MMU table */
	flush_dcache_range(gd->arch.tlb_addr,
			   gd->arch.tlb_addr + gd->arch.tlb_size);

	/* point TTBR to the new table */
	el = current_el();
	set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
			  ZYNQMP_TCR, MEMORY_ATTRIBUTES);

	set_sctlr(get_sctlr() | CR_M);
}
Ejemplo n.º 15
0
/*
 * Flush range from all levels of d-cache/unified-cache.
 * Affects the range [start, start + size - 1].
 */
__weak void flush_cache(unsigned long start, unsigned long size)
{
	flush_dcache_range(start, start + size);
}
Ejemplo n.º 16
0
static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
{
	struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
	struct dpaa_fd fd;
	u64 buffer_start;
	int data_offset, err;
	u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
	u32 time_start;
	struct qbman_swp *swp = dflt_dpio->sw_portal;
	struct qbman_eq_desc ed;
	struct qbman_release_desc releasedesc;

	/* Setup the FD fields */
	memset(&fd, 0, sizeof(fd));

	data_offset = priv->tx_data_offset;

	do {
		err = qbman_swp_acquire(dflt_dpio->sw_portal,
					dflt_dpbp->dpbp_attr.bpid,
					&buffer_start, 1);
	} while (err == -EBUSY);

	if (err < 0) {
		printf("qbman_swp_acquire() failed\n");
		return -ENOMEM;
	}

	debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);

	memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);

	flush_dcache_range(buffer_start, buffer_start +
					LDPAA_ETH_RX_BUFFER_SIZE);

	ldpaa_fd_set_addr(&fd, (u64)buffer_start);
	ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
	ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
	ldpaa_fd_set_len(&fd, len);

	fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
				LDPAA_FD_CTRL_PTV1;

	qbman_eq_desc_clear(&ed);
	qbman_eq_desc_set_no_orp(&ed, 0);
	qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);

	time_start = get_timer(0);

	while (get_timer(time_start) < timeo) {
		err = qbman_swp_enqueue(swp, &ed,
				(const struct qbman_fd *)(&fd));
		if (err != -EBUSY)
			break;
	}

	if (err < 0) {
		printf("error enqueueing Tx frame\n");
		goto error;
	}

	return err;

error:
	qbman_release_desc_clear(&releasedesc);
	qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
	time_start = get_timer(0);
	do {
		/* Release buffer into the QBMAN */
		err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
	} while (get_timer(time_start) < timeo && err == -EBUSY);

	if (err == -EBUSY)
		printf("TX data: QBMAN buffer release fails\n");

	return err;
}
Ejemplo n.º 17
0
static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
{
	uint32_t addr = (uint32_t)info->data_buf;

	flush_dcache_range(addr, addr + info->data_buf_size);
}
Ejemplo n.º 18
0
/*******************************************************************************
 * Generic function to load an image into the trusted RAM,
 * given a name, extents of free memory & whether the image should be loaded at
 * the bottom or top of the free memory. It updates the memory layout if the
 * load is successful. It also updates the image information and the entry point
 * information in the params passed
 ******************************************************************************/
int load_image(meminfo_t *mem_layout,
			 const char *image_name,
			 unsigned int load_type,
			 unsigned long fixed_addr,
			 image_info_t *image_data,
			 entry_point_info_t *entry_point_info)
{
	uintptr_t dev_handle;
	uintptr_t image_handle;
	uintptr_t image_spec;
	unsigned long temp_image_base = 0;
	unsigned long image_base = 0;
	long offset = 0;
	size_t image_size = 0;
	size_t bytes_read = 0;
	int io_result = IO_FAIL;

	assert(mem_layout != NULL);
	assert(image_name != NULL);
	assert(image_data->h.version >= VERSION_1);

	/* Obtain a reference to the image by querying the platform layer */
	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
	if (io_result != IO_SUCCESS) {
		WARN("Failed to obtain reference to image '%s' (%i)\n",
			image_name, io_result);
		return io_result;
	}

	/* Attempt to access the image */
	io_result = io_open(dev_handle, image_spec, &image_handle);
	if (io_result != IO_SUCCESS) {
		WARN("Failed to access image '%s' (%i)\n",
			image_name, io_result);
		return io_result;
	}

	/* Find the size of the image */
	io_result = io_size(image_handle, &image_size);
	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
		WARN("Failed to determine the size of the image '%s' file (%i)\n",
			image_name, io_result);
		goto exit;
	}

	/* See if we have enough space */
	if (image_size > mem_layout->free_size) {
		WARN("Cannot load '%s' file: Not enough space.\n",
			image_name);
		dump_load_info(0, image_size, mem_layout);
		goto exit;
	}

	switch (load_type) {

	case TOP_LOAD:

	  /* Load the image in the top of free memory */
	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
	  temp_image_base -= image_size;

	  /* Page align base address and check whether the image still fits */
	  image_base = page_align(temp_image_base, DOWN);
	  assert(image_base <= temp_image_base);

	  if (image_base < mem_layout->free_base) {
		WARN("Cannot load '%s' file: Not enough space.\n",
			image_name);
		dump_load_info(image_base, image_size, mem_layout);
		io_result = -ENOMEM;
		goto exit;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = temp_image_base - image_base;

	  break;

	case BOT_LOAD:

	  /* Load the BL2 image in the bottom of free memory */
	  temp_image_base = mem_layout->free_base;
	  image_base = page_align(temp_image_base, UP);
	  assert(image_base >= temp_image_base);

	  /* Page align base address and check whether the image still fits */
	  if (image_base + image_size >
	      mem_layout->free_base + mem_layout->free_size) {
		WARN("Cannot load '%s' file: Not enough space.\n",
		  image_name);
		dump_load_info(image_base, image_size, mem_layout);
		io_result = -ENOMEM;
		goto exit;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = image_base - temp_image_base;

	  break;

	default:
	  assert(0);

	}

	/*
	 * Some images must be loaded at a fixed address, not a dynamic one.
	 *
	 * This has been implemented as a hack on top of the existing dynamic
	 * loading mechanism, for the time being.  If the 'fixed_addr' function
	 * argument is different from zero, then it will force the load address.
	 * So we still have this principle of top/bottom loading but the code
	 * determining the load address is bypassed and the load address is
	 * forced to the fixed one.
	 *
	 * This can result in quite a lot of wasted space because we still use
	 * 1 sole meminfo structure to represent the extents of free memory,
	 * where we should use some sort of linked list.
	 *
	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
	 *      layout should look as follows:
	 * ------------ 0x04040000
	 * |          |  <- Free space (1)
	 * |----------|
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 *
	 * But in the current hacky implementation, we'll need to specify
	 * whether BL2 is loaded at the top or bottom of the free memory.
	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
	 * will give the following view of the memory, hiding the chunk of
	 * free memory above BL2:
	 * ------------ 0x04040000
	 * |          |
	 * |          |
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 */
	if (fixed_addr != 0) {
		/* Load the image at the given address. */
		image_base = fixed_addr;

		/* Check whether the image fits. */
		if ((image_base < mem_layout->free_base) ||
		    (image_base + image_size >
		       mem_layout->free_base + mem_layout->free_size)) {
			WARN("Cannot load '%s' file: Not enough space.\n",
				image_name);
			dump_load_info(image_base, image_size, mem_layout);
			io_result = -ENOMEM;
			goto exit;
		}

		/* Check whether the fixed load address is page-aligned. */
		if (!is_page_aligned(image_base)) {
			WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
				image_name, fixed_addr);
			io_result = -ENOMEM;
			goto exit;
		}

		/*
		 * Calculate the amount of extra memory used due to fixed
		 * loading.
		 */
		if (load_type == TOP_LOAD) {
			unsigned long max_addr, space_used;
			/*
			 * ------------ max_addr
			 * | /wasted/ |                 | offset
			 * |..........|..............................
			 * |  image   |                 | image_flen
			 * |----------| fixed_addr
			 * |          |
			 * |          |
			 * ------------ total_base
			 */
			max_addr = mem_layout->total_base + mem_layout->total_size;
			/*
			 * Compute the amount of memory used by the image.
			 * Corresponds to all space above the image load
			 * address.
			 */
			space_used = max_addr - fixed_addr;
			/*
			 * Calculate the amount of wasted memory within the
			 * amount of memory used by the image.
			 */
			offset = space_used - image_size;
		} else /* BOT_LOAD */
			/*
			 * ------------
			 * |          |
			 * |          |
			 * |----------|
			 * |  image   |
			 * |..........| fixed_addr
			 * | /wasted/ |                 | offset
			 * ------------ total_base
			 */
			offset = fixed_addr - mem_layout->total_base;
	}

	/* We have enough space so load the image now */
	/* TODO: Consider whether to try to recover/retry a partially successful read */
	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
		goto exit;
	}

	image_data->image_base = image_base;
	image_data->image_size = image_size;

	entry_point_info->pc = image_base;

	/*
	 * File has been successfully loaded. Update the free memory
	 * data structure & flush the contents of the TZRAM so that
	 * the next EL can see it.
	 */
	/* Update the memory contents */
	flush_dcache_range(image_base, image_size);

	mem_layout->free_size -= image_size + offset;

	/* Update the base of free memory since its moved up */
	if (load_type == BOT_LOAD)
		mem_layout->free_base += offset + image_size;

exit:
	io_close(image_handle);
	/* Ignore improbable/unrecoverable error in 'close' */

	/* TODO: Consider maintaining open device connection from this bootloader stage */
	io_dev_close(dev_handle);
	/* Ignore improbable/unrecoverable error in 'dev_close' */

	return io_result;
}
Ejemplo n.º 19
0
/* -1 --- error, can't enqueue -- no space available */
static int jr_enqueue(uint32_t *desc_addr,
	       void (*callback)(uint32_t status, void *arg),
	       void *arg)
{
	struct jr_regs *regs = (struct jr_regs *)CONFIG_SYS_FSL_JR0_ADDR;
	int head = jr.head;
	uint32_t desc_word;
	int length = desc_len(desc_addr);
	int i;
#ifdef CONFIG_PHYS_64BIT
	uint32_t *addr_hi, *addr_lo;
#endif

	/* The descriptor must be submitted to SEC block as per endianness
	 * of the SEC Block.
	 * So, if the endianness of Core and SEC block is different, each word
	 * of the descriptor will be byte-swapped.
	 */
	for (i = 0; i < length; i++) {
		desc_word = desc_addr[i];
		sec_out32((uint32_t *)&desc_addr[i], desc_word);
	}

	phys_addr_t desc_phys_addr = virt_to_phys(desc_addr);

	if (sec_in32(&regs->irsa) == 0 ||
	    CIRC_SPACE(jr.head, jr.tail, jr.size) <= 0)
		return -1;

	jr.info[head].desc_phys_addr = desc_phys_addr;
	jr.info[head].callback = (void *)callback;
	jr.info[head].arg = arg;
	jr.info[head].op_done = 0;

	unsigned long start = (unsigned long)&jr.info[head] &
					~(ARCH_DMA_MINALIGN - 1);
	unsigned long end = ALIGN((unsigned long)&jr.info[head] +
				  sizeof(struct jr_info), ARCH_DMA_MINALIGN);
	flush_dcache_range(start, end);

#ifdef CONFIG_PHYS_64BIT
	/* Write the 64 bit Descriptor address on Input Ring.
	 * The 32 bit hign and low part of the address will
	 * depend on endianness of SEC block.
	 */
#ifdef CONFIG_SYS_FSL_SEC_LE
	addr_lo = (uint32_t *)(&jr.input_ring[head]);
	addr_hi = (uint32_t *)(&jr.input_ring[head]) + 1;
#elif defined(CONFIG_SYS_FSL_SEC_BE)
	addr_hi = (uint32_t *)(&jr.input_ring[head]);
	addr_lo = (uint32_t *)(&jr.input_ring[head]) + 1;
#endif /* ifdef CONFIG_SYS_FSL_SEC_LE */

	sec_out32(addr_hi, (uint32_t)(desc_phys_addr >> 32));
	sec_out32(addr_lo, (uint32_t)(desc_phys_addr));

#else
	/* Write the 32 bit Descriptor address on Input Ring. */
	sec_out32(&jr.input_ring[head], desc_phys_addr);
#endif /* ifdef CONFIG_PHYS_64BIT */

	start = (unsigned long)&jr.input_ring[head] & ~(ARCH_DMA_MINALIGN - 1);
	end = ALIGN((unsigned long)&jr.input_ring[head] +
		     sizeof(dma_addr_t), ARCH_DMA_MINALIGN);
	flush_dcache_range(start, end);

	jr.head = (head + 1) & (jr.size - 1);

	/* Invalidate output ring */
	start = (unsigned long)jr.output_ring &
					~(ARCH_DMA_MINALIGN - 1);
	end = ALIGN((unsigned long)jr.output_ring + jr.op_size,
		     ARCH_DMA_MINALIGN);
	invalidate_dcache_range(start, end);

	sec_out32(&regs->irja, 1);

	return 0;
}
Ejemplo n.º 20
0
static void mxc_udc_queue_update(u8 epnum,
	u8 *data, u32 len, u32 tx)
{
	struct mxc_ep_t *ep;
	struct ep_queue_item *tqi, *head, *last;
	int send = 0;
	int in;

	head = last = NULL;
	in = ep_is_in(epnum, tx);
	ep = mxc_udc.mxc_ep + (epnum * 2 + in);
	DBG("epnum = %d,  in = %d\n", epnum, in);
	do {
		tqi = ep->ep_dtd[ep->index];
		DBG("%s, index = %d, tqi = %p\n", __func__, ep->index, tqi);
		while (mxc_tqi_is_busy(tqi))
			;
		mxc_tqi_init_page(tqi);
		DBG("%s, line = %d, len = %d\n", __func__, __LINE__, len);
		inc_index(ep->index);
		send = min(len, ep->max_pkt_size);
		if (data) {
			memcpy((void *)tqi->page_vir, (void *)data, send);
			_dump_buf((u8 *)(tqi->page_vir), send);
		}
		flush_dcache_range((unsigned long)(tqi->page_vir),
				CACHE_ALIGNED_END(tqi->page_vir, ep->max_pkt_size));
		if (!head)
			last = head = tqi;
		else {
			last->next_item_ptr = virt_to_phys(tqi);
			last->next_item_vir = tqi;
			last = tqi;
		}
		if (!tx)
			tqi->reserved[0] = send;
		/* we set IOC for every dtd */
		tqi->info = ((send << 16) | (1 << 15) | (1 << 7));
		data += send;
		len -= send;

		flush_dcache_range((unsigned long)tqi,
			CACHE_ALIGNED_END(tqi, sizeof(struct ep_queue_item)));
	} while (len);

	last->next_item_ptr = 0x1; /* end */
	flush_dcache_range((unsigned long)last,
			CACHE_ALIGNED_END(last, sizeof(struct ep_queue_item)));

	if (ep->tail) {
		ep->tail->next_item_ptr = virt_to_phys(head);
		ep->tail->next_item_vir = head;
		flush_dcache_range((unsigned long)(ep->tail),
			CACHE_ALIGNED_END(ep->tail, sizeof(struct ep_queue_item)));

		if (mxc_ep_xfer_is_working(ep, in)) {
			DBG("ep is working\n");
			goto out;
		}
	}
	mxc_update_qh(ep, head, in);
out:
	ep->tail = last;
}
Ejemplo n.º 21
0
static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
{
	u32 gen_fifo_cmd;
	u32 *buf;
	u32 addr;
	u32 size, len;
	u32 timeout = 10000000;
	u32 actuallen = priv->len;
	struct zynqmp_qspi_regs *regs = priv->regs;
	struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;

	gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
	gen_fifo_cmd |= ZYNQMP_QSPI_GFIFO_RX |
			ZYNQMP_QSPI_GFIFO_DATA_XFR_MASK;

	if (last_cmd == QUAD_OUT_READ_CMD)
		gen_fifo_cmd |= ZYNQMP_QSPI_SPI_MODE_QSPI;
	else
		gen_fifo_cmd |= ZYNQMP_QSPI_SPI_MODE_SPI;

	if (priv->stripe)
		gen_fifo_cmd |= ZYNQMP_QSPI_GFIFO_STRIPE_MASK;

	if (!((u32)priv->rx_buf & 0x3) && !(actuallen % 4)) {
		buf = (u32 *)priv->rx_buf;
	} else {
		ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len, 4));
		buf = (u32 *)tmp;
	}
	writel((u32)buf, &dma_regs->dmadst);
	writel(roundup(priv->len, 4), &dma_regs->dmasize);
	writel(ZYNQMP_QSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
	addr = (u32)buf;
	size = roundup(priv->len, ARCH_DMA_MINALIGN);
	flush_dcache_range(addr, addr+size);

	while (priv->len) {
		len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
		if (!(gen_fifo_cmd & ZYNQMP_QSPI_GFIFO_EXP_MASK) &&
		    (len % 4)) {
			gen_fifo_cmd &= ~(0xFF);
			gen_fifo_cmd |= (len/4 + 1) * 4;
		}
		writel(gen_fifo_cmd, &regs->genfifo);

		debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
	}

	while (timeout) {
		if (readl(&dma_regs->dmaisr) &
		    ZYNQMP_QSPI_DMA_DST_I_STS_DONE) {
			writel(ZYNQMP_QSPI_DMA_DST_I_STS_DONE,
			       &dma_regs->dmaisr);
			break;
		}
		timeout--;
	}

	debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
	      (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
	      actuallen);
	if (!timeout) {
		debug("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
		return -1;
	}

	if (buf != priv->rx_buf)
		memcpy(priv->rx_buf, buf, actuallen);

	return 0;
}
Ejemplo n.º 22
0
static int
ehci_submit_async(struct usb_device *dev, unsigned long pipe, void *buffer,
		   int length, struct devrequest *req)
{
	static struct QH qh __attribute__((aligned(32)));
	static struct qTD qtd[3] __attribute__((aligned (32)));
	int qtd_counter = 0;

	volatile struct qTD *vtd;
	unsigned long ts;
	uint32_t *tdp;
	uint32_t endpt, token, usbsts;
	uint32_t c, toggle;
	uint32_t cmd;
	int timeout;
	int ret = 0;

	debug("dev=%p, pipe=%lx, buffer=%p, length=%d, req=%p\n", dev, pipe,
	      buffer, length, req);
	if (req != NULL)
		debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
		      req->request, req->request,
		      req->requesttype, req->requesttype,
		      le16_to_cpu(req->value), le16_to_cpu(req->value),
		      le16_to_cpu(req->index));

	memset(&qh, 0, sizeof(struct QH));
	memset(qtd, 0, sizeof(qtd));

	toggle = usb_gettoggle(dev, usb_pipeendpoint(pipe), usb_pipeout(pipe));

	/*
	 * Setup QH (3.6 in ehci-r10.pdf)
	 *
	 *   qh_link ................. 03-00 H
	 *   qh_endpt1 ............... 07-04 H
	 *   qh_endpt2 ............... 0B-08 H
	 * - qh_curtd
	 *   qh_overlay.qt_next ...... 13-10 H
	 * - qh_overlay.qt_altnext
	 */
	qh.qh_link = cpu_to_hc32((uint32_t)PHYSICAL_ADDR(&qh_list) | QH_LINK_TYPE_QH);
	c = (usb_pipespeed(pipe) != USB_SPEED_HIGH &&
	     usb_pipeendpoint(pipe) == 0) ? 1 : 0;
	endpt = (8 << 28) |
	    (c << 27) |
	    (usb_maxpacket(dev, pipe) << 16) |
	    (0 << 15) |
	    (1 << 14) |
	    (usb_pipespeed(pipe) << 12) |
	    (usb_pipeendpoint(pipe) << 8) |
	    (0 << 7) | (usb_pipedevice(pipe) << 0);
	qh.qh_endpt1 = cpu_to_hc32(endpt);
	endpt = (1 << 30) |
	    (dev->portnr << 23) |
	    (dev->parent->devnum << 16) | (0 << 8) | (0 << 0);
	qh.qh_endpt2 = cpu_to_hc32(endpt);
	qh.qh_overlay.qt_next = cpu_to_hc32(QT_NEXT_TERMINATE);

	tdp = &qh.qh_overlay.qt_next;

	if (req != NULL) {
		/*
		 * Setup request qTD (3.5 in ehci-r10.pdf)
		 *
		 *   qt_next ................ 03-00 H
		 *   qt_altnext ............. 07-04 H
		 *   qt_token ............... 0B-08 H
		 *
		 *   [ buffer, buffer_hi ] loaded with "req".
		 */
		qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE);
		qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE);
		token = (0 << 31) |
		    (sizeof(*req) << 16) |
		    (0 << 15) | (0 << 12) | (3 << 10) | (2 << 8) | (0x80 << 0);
		qtd[qtd_counter].qt_token = cpu_to_hc32(token);
		if (ehci_td_buffer(&qtd[qtd_counter], req, sizeof(*req)) != 0) {
			debug("unable construct SETUP td\n");
			goto fail;
		}
		/* Update previous qTD! */
		*tdp = cpu_to_hc32((uint32_t)PHYSICAL_ADDR(&qtd[qtd_counter]));
		tdp = &qtd[qtd_counter++].qt_next;
		toggle = 1;
	}

	if (length > 0 || req == NULL) {
		/*
		 * Setup request qTD (3.5 in ehci-r10.pdf)
		 *
		 *   qt_next ................ 03-00 H
		 *   qt_altnext ............. 07-04 H
		 *   qt_token ............... 0B-08 H
		 *
		 *   [ buffer, buffer_hi ] loaded with "buffer".
		 */
		qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE);
		qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE);
		token = (toggle << 31) |
		    (length << 16) |
		    ((req == NULL ? 1 : 0) << 15) |
		    (0 << 12) |
		    (3 << 10) |
		    ((usb_pipein(pipe) ? 1 : 0) << 8) | (0x80 << 0);
		qtd[qtd_counter].qt_token = cpu_to_hc32(token);
		if (ehci_td_buffer(&qtd[qtd_counter], buffer, length) != 0) {
			debug("unable construct DATA td\n");
			goto fail;
		}
		/* Update previous qTD! */
		*tdp = cpu_to_hc32((uint32_t)PHYSICAL_ADDR(&qtd[qtd_counter]));
		tdp = &qtd[qtd_counter++].qt_next;
	}

	if (req != NULL) {
		/*
		 * Setup request qTD (3.5 in ehci-r10.pdf)
		 *
		 *   qt_next ................ 03-00 H
		 *   qt_altnext ............. 07-04 H
		 *   qt_token ............... 0B-08 H
		 */
		qtd[qtd_counter].qt_next = cpu_to_hc32(QT_NEXT_TERMINATE);
		qtd[qtd_counter].qt_altnext = cpu_to_hc32(QT_NEXT_TERMINATE);
		token = (toggle << 31) |
		    (0 << 16) |
		    (1 << 15) |
		    (0 << 12) |
		    (3 << 10) |
		    ((usb_pipein(pipe) ? 0 : 1) << 8) | (0x80 << 0);
		qtd[qtd_counter].qt_token = cpu_to_hc32(token);
		/* Update previous qTD! */
		*tdp = cpu_to_hc32((uint32_t)PHYSICAL_ADDR(&qtd[qtd_counter]));
		tdp = &qtd[qtd_counter++].qt_next;
	}

	qh_list.qh_link = cpu_to_hc32((uint32_t) PHYSICAL_ADDR(&qh) | QH_LINK_TYPE_QH);

	/* Flush dcache */
	flush_dcache_range((uint32_t)&qh_list,
		(uint32_t)&qh_list + sizeof(struct QH));
	flush_dcache_range((uint32_t)&qh, (uint32_t)&qh + sizeof(struct QH));
	flush_dcache_range((uint32_t)qtd, (uint32_t)qtd + sizeof(qtd));

	usbsts = ehci_readl(&hcor->or_usbsts);
	ehci_writel(&hcor->or_usbsts, (usbsts & 0x3f));

	/* Enable async. schedule. */
	cmd = ehci_readl(&hcor->or_usbcmd);
	cmd |= CMD_ASE;
	ehci_writel(&hcor->or_usbcmd, cmd);

	ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, STD_ASS,
			100 * 1000);
	if (ret < 0) {
		printf("EHCI fail timeout STD_ASS set\n");
		goto fail;
	}

	/* Wait for TDs to be processed. */
	ts = get_timer(0);
	vtd = &qtd[qtd_counter - 1];
	timeout = USB_TIMEOUT_MS(pipe);
	do {
		/* Invalidate dcache */
		invalidate_dcache_range((uint32_t)&qh_list,
			(uint32_t)&qh_list + sizeof(struct QH));
		invalidate_dcache_range((uint32_t)&qh,
			(uint32_t)&qh + sizeof(struct QH));
		invalidate_dcache_range((uint32_t)qtd,
			(uint32_t)qtd + sizeof(qtd));

		token = hc32_to_cpu(vtd->qt_token);
		if (!(token & 0x80))
			break;
		WATCHDOG_RESET();
	} while (get_timer(ts) < timeout);

	/* Invalidate the memory area occupied by buffer */
	if (buffer!=NULL) {
		invalidate_dcache_range(((uint32_t)buffer & ~31),
			((uint32_t)buffer & ~31) + roundup(length, 32));
	}

	/* Check that the TD processing happened */
	if (token & 0x80) {
		printf("EHCI timed out on TD - token=%#x\n", token);
	}

	/* Disable async schedule. */
	cmd = ehci_readl(&hcor->or_usbcmd);
	cmd &= ~CMD_ASE;
	ehci_writel(&hcor->or_usbcmd, cmd);

	ret = handshake((uint32_t *)&hcor->or_usbsts, STD_ASS, 0,
			100 * 1000);
	if (ret < 0) {
		printf("EHCI fail timeout STD_ASS reset\n");
		goto fail;
	}

	qh_list.qh_link = cpu_to_hc32((uint32_t)PHYSICAL_ADDR(&qh_list) | QH_LINK_TYPE_QH);

	token = hc32_to_cpu(qh.qh_overlay.qt_token);
	if (!(token & 0x80)) {
		debug("TOKEN=%#x\n", token);
		switch (token & 0xfc) {
		case 0:
			toggle = token >> 31;
			usb_settoggle(dev, usb_pipeendpoint(pipe),
				       usb_pipeout(pipe), toggle);
			dev->status = 0;
			break;
		case 0x40:
			dev->status = USB_ST_STALLED;
			break;
		case 0xa0:
		case 0x20:
			dev->status = USB_ST_BUF_ERR;
			break;
		case 0x50:
		case 0x10:
			dev->status = USB_ST_BABBLE_DET;
			break;
		default:
			dev->status = USB_ST_CRC_ERR;
			if ((token & 0x40) == 0x40)
				dev->status |= USB_ST_STALLED;
			break;
		}
		dev->act_len = length - ((token >> 16) & 0x7fff);
	} else {
Ejemplo n.º 23
0
/* Flush the TF params and the TF plat params */
void bl2_plat_flush_bl31_params(void)
{
	flush_dcache_range((unsigned long)&bl31_params_mem,
			sizeof(bl2_to_bl31_params_mem_t));
}
Ejemplo n.º 24
0
int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
{
	void *bufp, *datap;
	size_t rcvlen = 0, buflen = 0;
	uint32_t stat0 = 0, stat1 = 0;
	uint32_t control, offset;
	uint8_t statbuf[HWRXOFF*2];

	int index, curr, active;
	dma64dd_t *descp = NULL;

	/* udelay(50); */

	/*
	 * this api will check if a packet has been received.
	 * If so it will return the address of the buffer and current
	 * descriptor index will be incremented to the
	 * next descriptor. Once done with the frame the buffer should be
	 * added back onto the descriptor and the lastdscr should be updated
	 * to this descriptor.
	 */
	index = dma->cur_rx_index;
	offset = (uint32_t)(dma->rx_desc_aligned);
	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
	curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
	active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);

	/* check if any frame */
	if (index == curr)
		return -1;

	debug("received packet\n");
	debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
	/* remove warning */
	if (index == active)
		;

	/* get the packet pointer that corresponds to the rx descriptor */
	bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;

	descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
	/* flush descriptor and buffer */
	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
			   (unsigned long)dma->rx_desc_aligned +
			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);
	flush_dcache_range((unsigned long)bufp,
			   (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);

	buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);

	stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
	stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);

	debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
	      (uint32_t)bufp, index, buflen, stat0, stat1);

	dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);

	/* get buffer offset */
	control = readl(GMAC0_DMA_RX_CTRL_ADDR);
	offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
	rcvlen = *(uint16_t *)bufp;

	debug("Received %d bytes\n", rcvlen);
	/* copy status into temp buf then copy data from rx buffer */
	memcpy(statbuf, bufp, offset);
	datap = (void *)((uint32_t)bufp + offset);
	memcpy(buf, datap, rcvlen);

	/* update descriptor that is being added back on ring */
	descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
	descp->addrlow = (uint32_t)bufp;
	descp->addrhigh = 0;
	/* flush descriptor */
	flush_dcache_range((unsigned long)dma->rx_desc_aligned,
			   (unsigned long)dma->rx_desc_aligned +
			   DESCP_SIZE_ALIGNED * RX_BUF_NUM);

	/* set the lastdscr for the rx ring */
	writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);

	return (int)rcvlen;
}
Ejemplo n.º 25
0
static int
scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
{
	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
	volatile cbd_t	*bdp;

	RT_DEBUG(__FUNCTION__": ...\n");

	/* Fill in a Tx ring entry */
	bdp = cep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since cep->tx_busy should be set.
		 */
		printk("%s: tx queue full!.\n", rtdev->name);
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* If the frame is short, tell CPM to pad it.
	*/
	if (skb->len <= ETH_ZLEN)
		bdp->cbd_sc |= BD_ENET_TX_PAD;
	else
		bdp->cbd_sc &= ~BD_ENET_TX_PAD;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_datlen = skb->len;
	bdp->cbd_bufaddr = __pa(skb->data);

	/* Save skb pointer.
	*/
	cep->tx_skbuff[cep->skb_cur] = skb;

	cep->stats.tx_bytes += skb->len;
	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
	
	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range((unsigned long)(skb->data),
					(unsigned long)(skb->data + skb->len));

	/* Prevent interrupts from changing the Tx ring from underneath us. */
	// *** RTnet ***
	rt_sem_wait(&rtdev->xmit_sem);
	rt_disable_irq(rtdev->irq);
	rt_spin_lock(&cep->lock);

	/* Send it on its way.  Tell CPM its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */
	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
#if 0
	dev->trans_start = jiffies;
#endif

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
		bdp = cep->tx_bd_base;
	else
		bdp++;

	if (bdp->cbd_sc & BD_ENET_TX_READY) {
	        rtnetif_stop_queue(rtdev);
		cep->tx_full = 1;
	}

	cep->cur_tx = (cbd_t *)bdp;

	rt_spin_unlock(&cep->lock);
	rt_enable_irq(rtdev->irq);
	rt_sem_signal(&rtdev->xmit_sem);

	return 0;
}
Ejemplo n.º 26
0
static int
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct fec_enet_private *fep;
	volatile fec_t	*fecp;
	volatile cbd_t	*bdp;

	fep = dev->priv;
	fecp = (volatile fec_t*)dev->base_addr;

	if (!fep->link) {
		/* Link is down or autonegotiation is in progress. */
		return 1;
	}

	/* Fill in a Tx ring entry */
	bdp = fep->cur_tx;

#ifndef final_version
	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		/* Ooops.  All transmit buffers are full.  Bail out.
		 * This should not happen, since dev->tbusy should be set.
		 */
		printk("%s: tx queue full!.\n", dev->name);
		return 1;
	}
#endif

	/* Clear all of the status flags.
	 */
	bdp->cbd_sc &= ~BD_ENET_TX_STATS;

	/* Set buffer length and buffer pointer.
	*/
	bdp->cbd_bufaddr = __pa(skb->data);
	bdp->cbd_datlen = skb->len;

	/* Save skb pointer.
	*/
	fep->tx_skbuff[fep->skb_cur] = skb;

	fep->stats.tx_bytes += skb->len;
	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;

	/* Push the data cache so the CPM does not get stale memory
	 * data.
	 */
	flush_dcache_range((unsigned long)skb->data,
			   (unsigned long)skb->data + skb->len);

	/* disable interrupts while triggering transmit */
	spin_lock_irq(&fep->lock);

	/* Send it on its way.  Tell FEC its ready, interrupt when done,
	 * its the last BD of the frame, and to put the CRC on the end.
	 */

	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
			| BD_ENET_TX_LAST | BD_ENET_TX_TC);

	dev->trans_start = jiffies;

	/* Trigger transmission start */
	fecp->fec_x_des_active = 0x01000000;

	/* If this was the last BD in the ring, start at the beginning again.
	*/
	if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
		bdp = fep->tx_bd_base;
	} else {
		bdp++;
	}

	if (bdp->cbd_sc & BD_ENET_TX_READY) {
		netif_stop_queue(dev);
		fep->tx_full = 1;
	}

	fep->cur_tx = (cbd_t *)bdp;

	spin_unlock_irq(&fep->lock);

	return 0;
}
Ejemplo n.º 27
0
/**
 * Start the FEC engine
 * @param[in] dev Our device to handle
 */
static int fec_open(struct eth_device *edev)
{
	struct fec_priv *fec = (struct fec_priv *)edev->priv;
	int speed;
	uint32_t addr, size;
	int i;

	debug("fec_open: fec_open(dev)\n");
	/* full-duplex, heartbeat disabled */
	writel(1 << 2, &fec->eth->x_cntrl);
	fec->rbd_index = 0;

	/* Invalidate all descriptors */
	for (i = 0; i < FEC_RBD_NUM - 1; i++)
		fec_rbd_clean(0, &fec->rbd_base[i]);
	fec_rbd_clean(1, &fec->rbd_base[i]);

	/* Flush the descriptors into RAM */
	size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
			ARCH_DMA_MINALIGN);
	addr = (uint32_t)fec->rbd_base;
	flush_dcache_range(addr, addr + size);

#ifdef FEC_QUIRK_ENET_MAC
	/* Enable ENET HW endian SWAP */
	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
		&fec->eth->ecntrl);
	/* Enable ENET store and forward mode */
	writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
		&fec->eth->x_wmrk);
#endif
	/*
	 * Enable FEC-Lite controller
	 */
	writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
		&fec->eth->ecntrl);
#if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
	udelay(100);
	/*
	 * setup the MII gasket for RMII mode
	 */

	/* disable the gasket */
	writew(0, &fec->eth->miigsk_enr);

	/* wait for the gasket to be disabled */
	while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
		udelay(2);

	/* configure gasket for RMII, 50 MHz, no loopback, and no echo */
	writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);

	/* re-enable the gasket */
	writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);

	/* wait until MII gasket is ready */
	int max_loops = 10;
	while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
		if (--max_loops <= 0) {
			printf("WAIT for MII Gasket ready timed out\n");
			break;
		}
	}
#endif

#ifdef CONFIG_PHYLIB
	{
		/* Start up the PHY */
		int ret = phy_startup(fec->phydev);

		if (ret) {
			printf("Could not initialize PHY %s\n",
			       fec->phydev->dev->name);
			return ret;
		}
		speed = fec->phydev->speed;
	}
#elif CONFIG_FEC_FIXED_SPEED
	speed = CONFIG_FEC_FIXED_SPEED;
#else
	miiphy_wait_aneg(edev);
	speed = miiphy_speed(edev->name, fec->phy_id);
	miiphy_duplex(edev->name, fec->phy_id);
#endif

#ifdef FEC_QUIRK_ENET_MAC
	{
		u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
		u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
		if (speed == _1000BASET)
			ecr |= FEC_ECNTRL_SPEED;
		else if (speed != _100BASET)
			rcr |= FEC_RCNTRL_RMII_10T;
		writel(ecr, &fec->eth->ecntrl);
		writel(rcr, &fec->eth->r_cntrl);
	}
#endif
	debug("%s:Speed=%i\n", __func__, speed);

	/*
	 * Enable SmartDMA receive task
	 */
	fec_rx_task_enable(fec);

	udelay(100000);
	return 0;
}
Ejemplo n.º 28
0
/*******************************************************************************
 * Generic function to load an image into the trusted RAM using semihosting
 * given a name, extents of free memory & whether the image should be loaded at
 * the bottom or top of the free memory. It updates the memory layout if the
 * load is successful.
 ******************************************************************************/
unsigned long load_image(meminfo *mem_layout,
			 const char *image_name,
			 unsigned int load_type,
			 unsigned long fixed_addr)
{
	unsigned long temp_image_base, image_base;
	long offset;
	int image_flen;

	/* Find the size of the image */
	image_flen = semihosting_get_flen(image_name);
	if (image_flen < 0) {
		printf("ERROR: Cannot access '%s' file (%i).\r\n",
			image_name, image_flen);
		return 0;
	}

	/* See if we have enough space */
	if (image_flen > mem_layout->free_size) {
		printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
			image_name);
		dump_load_info(0, image_flen, mem_layout);
		return 0;
	}

	switch (load_type) {

	case TOP_LOAD:

	  /* Load the image in the top of free memory */
	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
	  temp_image_base -= image_flen;

	  /* Page align base address and check whether the image still fits */
	  image_base = page_align(temp_image_base, DOWN);
	  assert(image_base <= temp_image_base);

	  if (image_base < mem_layout->free_base) {
		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
			  image_name);
		  dump_load_info(image_base, image_flen, mem_layout);
		  return 0;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = temp_image_base - image_base;

	  break;

	case BOT_LOAD:

	  /* Load the BL2 image in the bottom of free memory */
	  temp_image_base = mem_layout->free_base;
	  image_base = page_align(temp_image_base, UP);
	  assert(image_base >= temp_image_base);

	  /* Page align base address and check whether the image still fits */
	  if (image_base + image_flen >
	      mem_layout->free_base + mem_layout->free_size) {
		  printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
			  image_name);
		  dump_load_info(image_base, image_flen, mem_layout);
		  return 0;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = image_base - temp_image_base;

	  break;

	default:
	  assert(0);

	}

	/*
	 * Some images must be loaded at a fixed address, not a dynamic one.
	 *
	 * This has been implemented as a hack on top of the existing dynamic
	 * loading mechanism, for the time being.  If the 'fixed_addr' function
	 * argument is different from zero, then it will force the load address.
	 * So we still have this principle of top/bottom loading but the code
	 * determining the load address is bypassed and the load address is
	 * forced to the fixed one.
	 *
	 * This can result in quite a lot of wasted space because we still use
	 * 1 sole meminfo structure to represent the extents of free memory,
	 * where we should use some sort of linked list.
	 *
	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
	 *      layout should look as follows:
	 * ------------ 0x04040000
	 * |          |  <- Free space (1)
	 * |----------|
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 *
	 * But in the current hacky implementation, we'll need to specify
	 * whether BL2 is loaded at the top or bottom of the free memory.
	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
	 * will give the following view of the memory, hiding the chunk of
	 * free memory above BL2:
	 * ------------ 0x04040000
	 * |          |
	 * |          |
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 */
	if (fixed_addr != 0) {
		/* Load the image at the given address. */
		image_base = fixed_addr;

		/* Check whether the image fits. */
		if ((image_base < mem_layout->free_base) ||
		    (image_base + image_flen >
		       mem_layout->free_base + mem_layout->free_size)) {
			printf("ERROR: Cannot load '%s' file: Not enough space.\r\n",
				image_name);
			dump_load_info(image_base, image_flen, mem_layout);
			return 0;
		}

		/* Check whether the fixed load address is page-aligned. */
		if (!is_page_aligned(image_base)) {
			printf("ERROR: Cannot load '%s' file at unaligned address 0x%lx.\r\n",
				image_name, fixed_addr);
			return 0;
		}

		/*
		 * Calculate the amount of extra memory used due to fixed
		 * loading.
		 */
		if (load_type == TOP_LOAD) {
			unsigned long max_addr, space_used;
			/*
			 * ------------ max_addr
			 * | /wasted/ |                 | offset
			 * |..........|..............................
			 * |  image   |                 | image_flen
			 * |----------| fixed_addr
			 * |          |
			 * |          |
			 * ------------ total_base
			 */
			max_addr = mem_layout->total_base + mem_layout->total_size;
			/*
			 * Compute the amount of memory used by the image.
			 * Corresponds to all space above the image load
			 * address.
			 */
			space_used = max_addr - fixed_addr;
			/*
			 * Calculate the amount of wasted memory within the
			 * amount of memory used by the image.
			 */
			offset = space_used - image_flen;
		} else /* BOT_LOAD */
			/*
			 * ------------
			 * |          |
			 * |          |
			 * |----------|
			 * |  image   |
			 * |..........| fixed_addr
			 * | /wasted/ |                 | offset
			 * ------------ total_base
			 */
			offset = fixed_addr - mem_layout->total_base;
	}

	/* We have enough space so load the image now */
	image_flen = semihosting_download_file(image_name,
					       image_flen,
					       (void *) image_base);
	if (image_flen <= 0) {
		printf("ERROR: Failed to load '%s' file from semihosting (%i).\r\n",
			image_name, image_flen);
		return 0;
	}

	/*
	 * File has been successfully loaded. Update the free memory
	 * data structure & flush the contents of the TZRAM so that
	 * the next EL can see it.
	 */
	/* Update the memory contents */
	flush_dcache_range(image_base, image_flen);

	mem_layout->free_size -= image_flen + offset;

	/* Update the base of free memory since its moved up */
	if (load_type == BOT_LOAD)
		mem_layout->free_base += offset + image_flen;

	return image_base;
}
Ejemplo n.º 29
0
/**
 * Pull one frame from the card
 * @param[in] dev Our ethernet device to handle
 * @return Length of packet read
 */
static int fec_recv(struct eth_device *dev)
{
	struct fec_priv *fec = (struct fec_priv *)dev->priv;
	struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
	unsigned long ievent;
	int frame_length, len = 0;
	uint16_t bd_status;
	uint32_t addr, size, end;
	int i;
	ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);

	/*
	 * Check if any critical events have happened
	 */
	ievent = readl(&fec->eth->ievent);
	writel(ievent, &fec->eth->ievent);
	debug("fec_recv: ievent 0x%lx\n", ievent);
	if (ievent & FEC_IEVENT_BABR) {
		fec_halt(dev);
		fec_init(dev, fec->bd);
		printf("some error: 0x%08lx\n", ievent);
		return 0;
	}
	if (ievent & FEC_IEVENT_HBERR) {
		/* Heartbeat error */
		writel(0x00000001 | readl(&fec->eth->x_cntrl),
				&fec->eth->x_cntrl);
	}
	if (ievent & FEC_IEVENT_GRA) {
		/* Graceful stop complete */
		if (readl(&fec->eth->x_cntrl) & 0x00000001) {
			fec_halt(dev);
			writel(~0x00000001 & readl(&fec->eth->x_cntrl),
					&fec->eth->x_cntrl);
			fec_init(dev, fec->bd);
		}
	}

	/*
	 * Read the buffer status. Before the status can be read, the data cache
	 * must be invalidated, because the data in RAM might have been changed
	 * by DMA. The descriptors are properly aligned to cachelines so there's
	 * no need to worry they'd overlap.
	 *
	 * WARNING: By invalidating the descriptor here, we also invalidate
	 * the descriptors surrounding this one. Therefore we can NOT change the
	 * contents of this descriptor nor the surrounding ones. The problem is
	 * that in order to mark the descriptor as processed, we need to change
	 * the descriptor. The solution is to mark the whole cache line when all
	 * descriptors in the cache line are processed.
	 */
	addr = (uint32_t)rbd;
	addr &= ~(ARCH_DMA_MINALIGN - 1);
	size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
	invalidate_dcache_range(addr, addr + size);

	bd_status = readw(&rbd->status);
	debug("fec_recv: status 0x%x\n", bd_status);

	if (!(bd_status & FEC_RBD_EMPTY)) {
		if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
			((readw(&rbd->data_length) - 4) > 14)) {
			/*
			 * Get buffer address and size
			 */
			addr = readl(&rbd->data_pointer);
			frame_length = readw(&rbd->data_length) - 4;
			/*
			 * Invalidate data cache over the buffer
			 */
			end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
			addr &= ~(ARCH_DMA_MINALIGN - 1);
			invalidate_dcache_range(addr, end);

			/*
			 *  Fill the buffer and pass it to upper layers
			 */
#ifdef CONFIG_FEC_MXC_SWAP_PACKET
			swap_packet((uint32_t *)addr, frame_length);
#endif
			memcpy(buff, (char *)addr, frame_length);
			net_process_received_packet(buff, frame_length);
			len = frame_length;
		} else {
			if (bd_status & FEC_RBD_ERR)
				printf("error frame: 0x%08x 0x%08x\n",
				       addr, bd_status);
		}

		/*
		 * Free the current buffer, restart the engine and move forward
		 * to the next buffer. Here we check if the whole cacheline of
		 * descriptors was already processed and if so, we mark it free
		 * as whole.
		 */
		size = RXDESC_PER_CACHELINE - 1;
		if ((fec->rbd_index & size) == size) {
			i = fec->rbd_index - size;
			addr = (uint32_t)&fec->rbd_base[i];
			for (; i <= fec->rbd_index ; i++) {
				fec_rbd_clean(i == (FEC_RBD_NUM - 1),
					      &fec->rbd_base[i]);
			}
			flush_dcache_range(addr,
				addr + ARCH_DMA_MINALIGN);
		}

		fec_rx_task_enable(fec);
		fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
	}
	debug("fec_recv: stop\n");

	return len;
}
Ejemplo n.º 30
0
/*
 * Consistent memory allocators. Used for DMA devices that want to
 * share uncached memory with the processor core.
 * My crufty no-MMU approach is simple. In the HW platform we can optionally
 * mirror the DDR up above the processor cacheable region.  So, memory accessed
 * in this mirror region will not be cached.  It's alloced from the same
 * pool as normal memory, but the handle we return is shifted up into the
 * uncached region.  This will no doubt cause big problems if memory allocated
 * here is not also freed properly. -- JW
 */
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
	unsigned long order, vaddr;
	void *ret;
	unsigned int i, err = 0;
	struct page *page, *end;

#ifdef CONFIG_MMU
	phys_addr_t pa;
	struct vm_struct *area;
	unsigned long va;
#endif

	if (in_interrupt())
		BUG();

	/* Only allocate page size areas. */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	vaddr = __get_free_pages(gfp, order);
	if (!vaddr)
		return NULL;

	/*
	 * we need to ensure that there are no cachelines in use,
	 * or worse dirty in this area.
	 */
	flush_dcache_range(virt_to_phys((void *)vaddr),
					virt_to_phys((void *)vaddr) + size);

#ifndef CONFIG_MMU
	ret = (void *)vaddr;
	/*
	 * Here's the magic!  Note if the uncached shadow is not implemented,
	 * it's up to the calling code to also test that condition and make
	 * other arranegments, such as manually flushing the cache and so on.
	 */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
	if ((unsigned int)ret > cpuinfo.dcache_base &&
				(unsigned int)ret < cpuinfo.dcache_high)
		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");

	/* dma_handle is same as physical (shadowed) address */
	*dma_handle = (dma_addr_t)ret;
#else
	/* Allocate some common virtual space to map the new pages. */
	area = get_vm_area(size, VM_ALLOC);
	if (!area) {
		free_pages(vaddr, order);
		return NULL;
	}
	va = (unsigned long) area->addr;
	ret = (void *)va;

	/* This gives us the real physical address of the first page. */
	*dma_handle = pa = __virt_to_phys(vaddr);
#endif

	/*
	 * free wasted pages.  We skip the first page since we know
	 * that it will have count = 1 and won't require freeing.
	 * We also mark the pages in use as reserved so that
	 * remap_page_range works.
	 */
	page = virt_to_page(vaddr);
	end = page + (1 << order);

	split_page(page, order);

	for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
		/* MS: This is the whole magic - use cache inhibit pages */
		err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif

		SetPageReserved(page);
		page++;
	}

	/* Free the otherwise unused pages. */
	while (page < end) {
		__free_page(page);
		page++;
	}

	if (err) {
		free_pages(vaddr, order);
		return NULL;
	}

	return ret;
}