static void micveth_clientpoll(struct work_struct *work) { micveth_info_t *veth_info; uint32_t transRingHi; uint32_t transRingLo; uint32_t scratch14 = 0; uint32_t scratch15 = 0; int bd; static int enter = 0; if (enter == 0) { printk("micveth is polling\n"); enter = 1; } mutex_lock(&micveth.lv_state_mutex); if (micveth.lv_pollstate == CLIENT_POLL_STOPPING) { micveth.lv_pollstate = CLIENT_POLL_STOPPED; mutex_unlock(&micveth.lv_state_mutex); wake_up(&micveth.lv_wq); return; } // Check for state changes for each board in the system for (bd = 0; bd < micveth.lv_num_clients; bd++) { veth_info = &micveth.lv_info[bd]; // Do not poll boards that have not had the interface started. if (veth_info->vi_state == VETH_STATE_INITIALIZED) { break; } #ifdef NO_SRATCHREGREAD_AFTER_CONNECT if(veth_info->vi_state != VETH_STATE_LINKUP) { #endif scratch14 = readl(veth_info->vi_scratch14); scratch15 = readl(veth_info->vi_scratch15); #ifdef NO_SRATCHREGREAD_AFTER_CONNECT } #endif if (veth_info->vi_state == VETH_STATE_LINKUP) { if (scratch14 == MICVETH_LINK_DOWN_MAGIC) { veth_info->vi_state = VETH_STATE_LINKDOWN; } } else if (veth_info->vi_state == VETH_STATE_LINKDOWN) { if (scratch14 == MICVETH_LINK_UP_MAGIC) { // Write the transfer ring address. transRingHi = (uint32_t)(veth_info->vi_ring.phys >> 32); transRingLo = (uint32_t)(veth_info->vi_ring.phys & 0xffffffff); writel(transRingLo, veth_info->vi_scratch14); writel(transRingHi, veth_info->vi_scratch15); veth_info->vi_state = VETH_STATE_LINKUP; printk("MIC virtual ethernet up for board %d\n", bd); #ifdef MIC_IS_EMULATION printk("Card wrote Magic: It must be UP!\n"); #endif if (mic_vnet_mode == VNET_MODE_POLL) { schedule_delayed_work(&veth_info->vi_poll, msecs_to_jiffies(MICVETH_POLL_TIMER_DELAY)); } micveth.lv_num_links_remaining--; } #ifdef MIC_IS_EMULATION else if (scratch14) { printk("---> 0x%x \n", scratch14); writel(0x0, veth_info->vi_scratch14); } #endif }
/* * adapt v9r1 */ u32 balong_get_rtc_value (void) { u32 rtc_value = 0; rtc_value = readl(g_rtc_ctrl.rtc_base_addr + HI_RTC_CCVR_OFFSET); return rtc_value; }
static int __devinit mxs_auart_probe(struct platform_device *pdev) { struct mxs_auart_port *s; u32 version; int ret = 0; struct resource *r; s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL); if (!s) { ret = -ENOMEM; goto out; } s->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(s->clk)) { ret = PTR_ERR(s->clk); goto out_free; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { ret = -ENXIO; goto out_free_clk; } s->port.mapbase = r->start; s->port.membase = ioremap(r->start, resource_size(r)); s->port.ops = &mxs_auart_ops; s->port.iotype = UPIO_MEM; s->port.line = pdev->id < 0 ? 0 : pdev->id; s->port.fifosize = 16; s->port.uartclk = clk_get_rate(s->clk); s->port.type = PORT_IMX; s->port.dev = s->dev = get_device(&pdev->dev); s->flags = 0; s->ctrl = 0; s->irq = platform_get_irq(pdev, 0); s->port.irq = s->irq; ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) goto out_free_clk; platform_set_drvdata(pdev, s); auart_port[pdev->id] = s; mxs_auart_reset(&s->port); ret = uart_add_one_port(&auart_driver, &s->port); if (ret) goto out_free_irq; version = readl(s->port.membase + AUART_VERSION); dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n", (version >> 24) & 0xff, (version >> 16) & 0xff, version & 0xffff); return 0; out_free_irq: auart_port[pdev->id] = NULL; free_irq(s->irq, s); out_free_clk: clk_put(s->clk); out_free: kfree(s); out: return ret; }
inline int get_key(void) { return (readl(P_AO_GPIO_I) & (1 << 3)) ? 0 : 1; }
static int i2s_init(Exynos5I2s *bus) { Exynos5I2sRegs *regs = bus->regs; uint32_t mode = readl(®s->mode); // Set transmit only mode. mode &= ~MOD_MASK; mode &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE); mode |= MOD_SDF_IIS; // Sets the frame size for I2S LR clock. mode &= ~MOD_MULTI_RCLK_MASK; switch (bus->lr_frame_size) { case 768: mode |= MOD_MULTI_RCLK_768FS; break; case 512: mode |= MOD_MULTI_RCLK_512FS; break; case 384: mode |= MOD_MULTI_RCLK_384FS; break; case 256: mode |= MOD_MULTI_RCLK_256FS; break; default: printf("%s: Unrecognized frame size %d.\n", __func__, bus->lr_frame_size); return 1; } mode &= ~MOD_BLC_MASK; switch (bus->bits_per_sample) { case 8: mode |= MOD_BLC_8BIT; break; case 16: mode |= MOD_BLC_16BIT; break; case 24: mode |= MOD_BLC_24BIT; break; default: printf("%s: Invalid sample size input %d\n", __func__, bus->bits_per_sample); return 1; } // Set the bit clock frame size (in multiples of LRCLK) mode &= ~MOD_BCLK_MASK; switch (bus->bits_per_sample * bus->channels) { case 48: mode |= MOD_BCLK_48FS; break; case 32: mode |= MOD_BCLK_32FS; break; case 24: mode |= MOD_BCLK_24FS; break; case 16: mode |= MOD_BCLK_16FS; break; default: printf("%s: Unrecognignized clock frame size %d.\n", __func__, bus->bits_per_sample * bus->channels); return 1; } writel(mode, ®s->mode); i2s_transmit_enable(®s->control, 0); i2s_flush_tx_fifo(®s->fifo_control); return 0; }
static inline u32 b1dma_readl(avmcard *card, int off) { return readl(card->mbase + off); }
static int isl_upload_firmware(islpci_private *priv) { u32 reg, rc; void __iomem *device_base = priv->device_base; /* clear the RAMBoot and the Reset bit */ reg = readl(device_base + ISL38XX_CTRL_STAT_REG); reg &= ~ISL38XX_CTRL_STAT_RESET; reg &= ~ISL38XX_CTRL_STAT_RAMBOOT; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); udelay(ISL38XX_WRITEIO_DELAY); /* set the Reset bit without reading the register ! */ reg |= ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); udelay(ISL38XX_WRITEIO_DELAY); /* clear the Reset bit */ reg &= ~ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); wmb(); /* wait a while for the device to reboot */ mdelay(50); { const struct firmware *fw_entry = NULL; long fw_len; const u32 *fw_ptr; rc = request_firmware(&fw_entry, priv->firmware, PRISM_FW_PDEV); if (rc) { printk(KERN_ERR "%s: request_firmware() failed for '%s'\n", "prism54", priv->firmware); return rc; } /* prepare the Direct Memory Base register */ reg = ISL38XX_DEV_FIRMWARE_ADDRES; fw_ptr = (u32 *) fw_entry->data; fw_len = fw_entry->size; if (fw_len % 4) { printk(KERN_ERR "%s: firmware '%s' size is not multiple of 32bit, aborting!\n", "prism54", priv->firmware); release_firmware(fw_entry); return -EILSEQ; /* Illegal byte sequence */; } while (fw_len > 0) { long _fw_len = (fw_len > ISL38XX_MEMORY_WINDOW_SIZE) ? ISL38XX_MEMORY_WINDOW_SIZE : fw_len; u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; /* set the card's base address for writing the data */ isl38xx_w32_flush(device_base, reg, ISL38XX_DIR_MEM_BASE_REG); wmb(); /* be paranoid */ /* increment the write address for next iteration */ reg += _fw_len; fw_len -= _fw_len; /* write the data to the Direct Memory Window 32bit-wise */ /* memcpy_toio() doesn't guarantee 32bit writes :-| */ while (_fw_len > 0) { /* use non-swapping writel() */ __raw_writel(*fw_ptr, dev_fw_ptr); fw_ptr++, dev_fw_ptr++; _fw_len -= 4; } /* flush PCI posting */ (void) readl(device_base + ISL38XX_PCI_POSTING_FLUSH); wmb(); /* be paranoid again */ BUG_ON(_fw_len != 0); } BUG_ON(fw_len != 0); /* Firmware version is at offset 40 (also for "newmac") */ printk(KERN_DEBUG "%s: firmware version: %.8s\n", priv->ndev->name, fw_entry->data + 40); release_firmware(fw_entry); } /* now reset the device * clear the Reset & ClkRun bit, set the RAMBoot bit */ reg = readl(device_base + ISL38XX_CTRL_STAT_REG); reg &= ~ISL38XX_CTRL_STAT_CLKRUN; reg &= ~ISL38XX_CTRL_STAT_RESET; reg |= ISL38XX_CTRL_STAT_RAMBOOT; isl38xx_w32_flush(device_base, reg, ISL38XX_CTRL_STAT_REG); wmb(); udelay(ISL38XX_WRITEIO_DELAY); /* set the reset bit latches the host override and RAMBoot bits * into the device for operation when the reset bit is reset */ reg |= ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); /* don't do flush PCI posting here! */ wmb(); udelay(ISL38XX_WRITEIO_DELAY); /* clear the reset bit should start the whole circus */ reg &= ~ISL38XX_CTRL_STAT_RESET; writel(reg, device_base + ISL38XX_CTRL_STAT_REG); /* don't do flush PCI posting here! */ wmb(); udelay(ISL38XX_WRITEIO_DELAY); return 0; }
/** * Start the FEC engine * @param[in] dev Our device to handle */ static int fec_open(struct eth_device *edev) { struct fec_priv *fec = (struct fec_priv *)edev->priv; int speed; uint32_t addr, size; int i; debug("fec_open: fec_open(dev)\n"); /* full-duplex, heartbeat disabled */ writel(1 << 2, &fec->eth->x_cntrl); fec->rbd_index = 0; /* Invalidate all descriptors */ for (i = 0; i < FEC_RBD_NUM - 1; i++) fec_rbd_clean(0, &fec->rbd_base[i]); fec_rbd_clean(1, &fec->rbd_base[i]); /* Flush the descriptors into RAM */ size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); addr = (uint32_t)fec->rbd_base; flush_dcache_range(addr, addr + size); #ifdef FEC_QUIRK_ENET_MAC /* Enable ENET HW endian SWAP */ writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP, &fec->eth->ecntrl); /* Enable ENET store and forward mode */ writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD, &fec->eth->x_wmrk); #endif /* * Enable FEC-Lite controller */ writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN, &fec->eth->ecntrl); #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL) udelay(100); /* * setup the MII gasket for RMII mode */ /* disable the gasket */ writew(0, &fec->eth->miigsk_enr); /* wait for the gasket to be disabled */ while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) udelay(2); /* configure gasket for RMII, 50 MHz, no loopback, and no echo */ writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr); /* re-enable the gasket */ writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr); /* wait until MII gasket is ready */ int max_loops = 10; while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) { if (--max_loops <= 0) { printf("WAIT for MII Gasket ready timed out\n"); break; } } #endif #ifdef CONFIG_PHYLIB { /* Start up the PHY */ int ret = phy_startup(fec->phydev); if (ret) { printf("Could not initialize PHY %s\n", fec->phydev->dev->name); return ret; } speed = fec->phydev->speed; } #else miiphy_wait_aneg(edev); speed = miiphy_speed(edev->name, fec->phy_id); miiphy_duplex(edev->name, fec->phy_id); #endif #ifdef FEC_QUIRK_ENET_MAC { u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED; u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T; if (speed == _1000BASET) ecr |= FEC_ECNTRL_SPEED; else if (speed != _100BASET) rcr |= FEC_RCNTRL_RMII_10T; writel(ecr, &fec->eth->ecntrl); writel(rcr, &fec->eth->r_cntrl); } #endif debug("%s:Speed=%i\n", __func__, speed); /* * Enable SmartDMA receive task */ fec_rx_task_enable(fec); udelay(100000); return 0; }
/** * Transmit one frame * @param[in] dev Our ethernet device to handle * @param[in] packet Pointer to the data to be transmitted * @param[in] length Data count in bytes * @return 0 on success */ static int fec_send(struct eth_device *dev, void *packet, int length) { unsigned int status; uint32_t size, end; uint32_t addr; int timeout = FEC_XFER_TIMEOUT; int ret = 0; /* * This routine transmits one frame. This routine only accepts * 6-byte Ethernet addresses. */ struct fec_priv *fec = (struct fec_priv *)dev->priv; /* * Check for valid length of data. */ if ((length > 1500) || (length <= 0)) { printf("Payload (%d) too large\n", length); return -1; } /* * Setup the transmit buffer. We are always using the first buffer for * transmission, the second will be empty and only used to stop the DMA * engine. We also flush the packet to RAM here to avoid cache trouble. */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)packet, length); #endif addr = (uint32_t)packet; end = roundup(addr + length, ARCH_DMA_MINALIGN); addr &= ~(ARCH_DMA_MINALIGN - 1); flush_dcache_range(addr, end); writew(length, &fec->tbd_base[fec->tbd_index].data_length); writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer); /* * update BD's status now * This block: * - is always the last in a chain (means no chain) * - should transmitt the CRC * - might be the last BD in the list, so the address counter should * wrap (-> keep the WRAP flag) */ status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; writew(status, &fec->tbd_base[fec->tbd_index].status); /* * Flush data cache. This code flushes both TX descriptors to RAM. * After this code, the descriptors will be safely in RAM and we * can start DMA. */ size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); addr = (uint32_t)fec->tbd_base; flush_dcache_range(addr, addr + size); /* * Below we read the DMA descriptor's last four bytes back from the * DRAM. This is important in order to make sure that all WRITE * operations on the bus that were triggered by previous cache FLUSH * have completed. * * Otherwise, on MX28, it is possible to observe a corruption of the * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM * for the bus structure of MX28. The scenario is as follows: * * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going * to DRAM due to flush_dcache_range() * 2) ARM core writes the FEC registers via AHB_ARB2 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3 * * Note that 2) does sometimes finish before 1) due to reordering of * WRITE accesses on the AHB bus, therefore triggering 3) before the * DMA descriptor is fully written into DRAM. This results in occasional * corruption of the DMA descriptor. */ readl(addr + size - 4); /* * Enable SmartDMA transmit task */ fec_tx_task_enable(fec); /* * Wait until frame is sent. On each turn of the wait cycle, we must * invalidate data cache to see what's really in RAM. Also, we need * barrier here. */ while (--timeout) { if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR)) break; } if (!timeout) { ret = -EINVAL; goto out; } /* * The TDAR bit is cleared when the descriptors are all out from TX * but on mx6solox we noticed that the READY bit is still not cleared * right after TDAR. * These are two distinct signals, and in IC simulation, we found that * TDAR always gets cleared prior than the READY bit of last BD becomes * cleared. * In mx6solox, we use a later version of FEC IP. It looks like that * this intrinsic behaviour of TDAR bit has changed in this newer FEC * version. * * Fix this by polling the READY bit of BD after the TDAR polling, * which covers the mx6solox case and does not harm the other SoCs. */ timeout = FEC_XFER_TIMEOUT; while (--timeout) { invalidate_dcache_range(addr, addr + size); if (!(readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY)) break; } if (!timeout) ret = -EINVAL; out: debug("fec_send: status 0x%x index %d ret %i\n", readw(&fec->tbd_base[fec->tbd_index].status), fec->tbd_index, ret); /* for next transmission use the other buffer */ if (fec->tbd_index) fec->tbd_index = 0; else fec->tbd_index = 1; return ret; }
/* * IRQ handler. * * If a message comes from the board we read it, construct a sk_buff containing * the message and we queue the sk_buff on the board's receive queue, and we * trigger the execution of the board's receive task queue. * * If a message ack comes from the board we can go on and send a new message, * so we trigger the execution of the board's send task queue. * * irq: the irq number * dev_id: the registered board to the irq * regs: not used. */ void tpam_irq(int irq, void *dev_id, struct pt_regs *regs) { tpam_card *card = (tpam_card *)dev_id; u32 ackupload, uploadptr; u32 waiting_too_long; u32 hpic; struct sk_buff *skb; pci_mpb mpb; skb_header *skbh; dprintk("TurboPAM(tpam_irq): IRQ received, card=%d\n", card->id); /* grab the board lock */ spin_lock(&card->lock); /* get the message type */ ackupload = copy_from_pam_dword(card, (void *)TPAM_ACKUPLOAD_REGISTER); /* acknowledge the interrupt */ copy_to_pam_dword(card, (void *)TPAM_INTERRUPTACK_REGISTER, 0); readl(card->bar0 + TPAM_HINTACK_REGISTER); if (!ackupload) { /* it is a new message from the board */ dprintk("TurboPAM(tpam_irq): message received, card=%d\n", card->id); /* get the upload pointer */ uploadptr = copy_from_pam_dword(card, (void *)TPAM_UPLOADPTR_REGISTER); /* get the beginning of the message (pci_mpb part) */ copy_from_pam(card, &mpb, (void *)uploadptr, sizeof(pci_mpb)); /* allocate the sk_buff */ if (!(skb = alloc_skb(sizeof(skb_header) + sizeof(pci_mpb) + mpb.actualBlockTLVSize + mpb.actualDataSize, GFP_ATOMIC))) { printk(KERN_ERR "TurboPAM(tpam_irq): " "alloc_skb failed\n"); spin_unlock(&card->lock); return; } /* build the skb_header */ skbh = (skb_header *)skb_put(skb, sizeof(skb_header)); skbh->size = sizeof(pci_mpb) + mpb.actualBlockTLVSize; skbh->data_size = mpb.actualDataSize; skbh->ack = 0; skbh->ack_size = 0; /* copy the pci_mpb into the sk_buff */ memcpy(skb_put(skb, sizeof(pci_mpb)), &mpb, sizeof(pci_mpb)); /* copy the TLV block into the sk_buff */ copy_from_pam(card, skb_put(skb, mpb.actualBlockTLVSize), (void *)uploadptr + sizeof(pci_mpb), mpb.actualBlockTLVSize); /* if existent, copy the data block into the sk_buff */ if (mpb.actualDataSize) copy_from_pam(card, skb_put(skb, mpb.actualDataSize), (void *)uploadptr + sizeof(pci_mpb) + 4096, mpb.actualDataSize); /* wait for the board to become ready */ waiting_too_long = 0; do { hpic = readl(card->bar0 + TPAM_HPIC_REGISTER); if (waiting_too_long++ > 0xfffffff) { spin_unlock(&card->lock); printk(KERN_ERR "TurboPAM(tpam_irq): " "waiting too long...\n"); return; } } while (hpic & 0x00000002); /* acknowledge the message */ copy_to_pam_dword(card, (void *)TPAM_ACKDOWNLOAD_REGISTER, 0xffffffff); readl(card->bar0 + TPAM_DSPINT_REGISTER); /* release the board lock */ spin_unlock(&card->lock); if (mpb.messageID == ID_U3ReadyToReceiveInd) { /* this message needs immediate treatment */ tpam_recv_U3ReadyToReceiveInd(card, skb); kfree_skb(skb); } else { /* put the message in the receive queue */ skb_queue_tail(&card->recvq, skb); queue_task(&card->recv_tq, &tq_immediate); mark_bh(IMMEDIATE_BH); } return; } else { /* it is a ack from the board */ dprintk("TurboPAM(tpam_irq): message acknowledged, card=%d\n", card->id); /* board is not busy anymore */ card->busy = 0; /* release the lock */ spin_unlock(&card->lock); /* schedule the send queue for execution */ queue_task(&card->send_tq, &tq_immediate); mark_bh(IMMEDIATE_BH); return; } /* not reached */ }
int board_eth_init(bd_t *bis) { int rv, n = 0; uint8_t mac_addr[6]; uint32_t mac_hi, mac_lo; __maybe_unused struct am335x_baseboard_id header; puts("board_eth_init \n"); /* try reading mac address from efuse */ mac_lo = readl(&cdev->macid0l); mac_hi = readl(&cdev->macid0h); mac_addr[0] = mac_hi & 0xFF; mac_addr[1] = (mac_hi & 0xFF00) >> 8; mac_addr[2] = (mac_hi & 0xFF0000) >> 16; mac_addr[3] = (mac_hi & 0xFF000000) >> 24; mac_addr[4] = mac_lo & 0xFF; mac_addr[5] = (mac_lo & 0xFF00) >> 8; #if (defined(CONFIG_DRIVER_TI_CPSW) && !defined(CONFIG_SPL_BUILD)) || \ (defined(CONFIG_SPL_ETH_SUPPORT) && defined(CONFIG_SPL_BUILD)) if (!getenv("ethaddr")) { printf("<ethaddr> not set. Validating first E-fuse MAC\n"); if (is_valid_ether_addr(mac_addr)) eth_setenv_enetaddr("ethaddr", mac_addr); } #ifdef CONFIG_DRIVER_TI_CPSW mac_lo = readl(&cdev->macid1l); mac_hi = readl(&cdev->macid1h); mac_addr[0] = mac_hi & 0xFF; mac_addr[1] = (mac_hi & 0xFF00) >> 8; mac_addr[2] = (mac_hi & 0xFF0000) >> 16; mac_addr[3] = (mac_hi & 0xFF000000) >> 24; mac_addr[4] = mac_lo & 0xFF; mac_addr[5] = (mac_lo & 0xFF00) >> 8; if (!getenv("eth1addr")) { if (is_valid_ether_addr(mac_addr)) eth_setenv_enetaddr("eth1addr", mac_addr); } if (read_eeprom(&header) < 0) puts("Could not get board ID.\n"); if (board_is_bone(&header) || board_is_bone_lt(&header) || board_is_idk(&header)) { writel(MII_MODE_ENABLE, &cdev->miisel); cpsw_slaves[0].phy_if = cpsw_slaves[1].phy_if = PHY_INTERFACE_MODE_MII; } else { writel((RGMII_MODE_ENABLE | RGMII_INT_DELAY), &cdev->miisel); cpsw_slaves[0].phy_if = cpsw_slaves[1].phy_if = PHY_INTERFACE_MODE_RGMII; } rv = cpsw_register(&cpsw_data); if (rv < 0) printf("Error %d registering CPSW switch\n", rv); else n += rv; #endif /* * * CPSW RGMII Internal Delay Mode is not supported in all PVT * operating points. So we must set the TX clock delay feature * in the AR8051 PHY. Since we only support a single ethernet * device in U-Boot, we only do this for the first instance. */ #define AR8051_PHY_DEBUG_ADDR_REG 0x1d #define AR8051_PHY_DEBUG_DATA_REG 0x1e #define AR8051_DEBUG_RGMII_CLK_DLY_REG 0x5 #define AR8051_RGMII_TX_CLK_DLY 0x100 if (board_is_evm_sk(&header) || board_is_gp_evm(&header)) { const char *devname; devname = miiphy_get_current_dev(); miiphy_write(devname, 0x0, AR8051_PHY_DEBUG_ADDR_REG, AR8051_DEBUG_RGMII_CLK_DLY_REG); miiphy_write(devname, 0x0, AR8051_PHY_DEBUG_DATA_REG, AR8051_RGMII_TX_CLK_DLY); } #endif #if defined(CONFIG_USB_ETHER) && \ (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_USBETH_SUPPORT)) if (is_valid_ether_addr(mac_addr)) eth_setenv_enetaddr("usbnet_devaddr", mac_addr); rv = usb_eth_initialize(bis); if (rv < 0) printf("Error %d registering USB_ETHER\n", rv); else n += rv; #endif return n; }
/* * Try to send a packet from the board's send queue or from the channel's * send queue. * * card: the board. * channel: the channel (if NULL, the packet will be taken from the * board's send queue. If not, it will be taken from the * channel's send queue. * * Return: 0 if tpam_send_tq must try another card/channel combination * (meaning that no packet has been send), 1 if no more packets * can be send at that time (a packet has been send or the card is * still busy from a previous send). */ static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) { struct sk_buff *skb; u32 hpic; u32 downloadptr; skb_header *skbh; u32 waiting_too_long; dprintk("TurboPAM(tpam_sendpacket), card=%d, channel=%d\n", card->id, channel ? channel->num : -1); if (channel) { /* dequeue a packet from the channel's send queue */ if (!(skb = skb_dequeue(&channel->sendq))) { dprintk("TurboPAM(tpam_sendpacket): " "card=%d, channel=%d, no packet\n", card->id, channel->num); return 0; } /* if the channel is not ready to receive, requeue the packet * and return 0 to give a chance to another channel */ if (!channel->readytoreceive) { dprintk("TurboPAM(tpam_sendpacket): " "card=%d, channel=%d, channel not ready\n", card->id, channel->num); skb_queue_head(&channel->sendq, skb); return 0; } /* grab the board lock */ spin_lock_irq(&card->lock); /* if the board is busy, requeue the packet and return 1 since * there is no need to try another channel */ if (card->busy) { dprintk("TurboPAM(tpam_sendpacket): " "card=%d, channel=%d, card busy\n", card->id, channel->num); skb_queue_head(&channel->sendq, skb); spin_unlock_irq(&card->lock); return 1; } } else { /* dequeue a packet from the board's send queue */ if (!(skb = skb_dequeue(&card->sendq))) { dprintk("TurboPAM(tpam_sendpacket): " "card=%d, no packet\n", card->id); return 0; } /* grab the board lock */ spin_lock_irq(&card->lock); /* if the board is busy, requeue the packet and return 1 since * there is no need to try another channel */ if (card->busy) { dprintk("TurboPAM(tpam_sendpacket): " "card=%d, card busy\n", card->id); skb_queue_head(&card->sendq, skb); spin_unlock_irq(&card->lock); return 1; } } /* wait for the board to become ready */ waiting_too_long = 0; do { hpic = readl(card->bar0 + TPAM_HPIC_REGISTER); if (waiting_too_long++ > 0xfffffff) { spin_unlock_irq(&card->lock); printk(KERN_ERR "TurboPAM(tpam_sendpacket): " "waiting too long...\n"); return 1; } } while (hpic & 0x00000002); skbh = (skb_header *)skb->data; dprintk("TurboPAM(tpam_sendpacket): " "card=%d, card ready, sending %d/%d bytes\n", card->id, skbh->size, skbh->data_size); /* get the board's download pointer */ downloadptr = copy_from_pam_dword(card, (void *)TPAM_DOWNLOADPTR_REGISTER); /* copy the packet to the board at the downloadptr location */ copy_to_pam(card, (void *)downloadptr, skb->data + sizeof(skb_header), skbh->size); if (skbh->data_size) /* if there is some data in the packet, copy it too */ copy_to_pam(card, (void *)downloadptr + sizeof(pci_mpb) + 4096, skb->data + sizeof(skb_header) + skbh->size, skbh->data_size); /* card will become busy right now */ card->busy = 1; /* interrupt the board */ copy_to_pam_dword(card, (void *)TPAM_ACKDOWNLOAD_REGISTER, 0); readl(card->bar0 + TPAM_DSPINT_REGISTER); /* release the lock */ spin_unlock_irq(&card->lock); /* if a data ack was requested by the ISDN link layer, send it now */ if (skbh->ack) { isdn_ctrl ctrl; ctrl.driver = card->id; ctrl.command = ISDN_STAT_BSENT; ctrl.arg = channel->num; ctrl.parm.length = skbh->ack_size; (* card->interface.statcallb)(&ctrl); } /* free the sk_buff */ kfree_skb(skb); return 1; }
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) { return readl(dev->iobase + offset); }
static inline u32 spdif_readl(unsigned long base, u32 reg) { u32 val = readl(base + reg); SPDIF_DEBUG_PRINT("Spdif Read 0x%lx : %08x\n",base + reg, val); return val; }
static int check_ide_device(int slot, int ide_base_bus) { volatile char *ident = NULL; volatile char *feature_p[MAX_FEATURES]; volatile char *p, *start; int n_features = 0; uchar func_id = ~0; uchar code, len; ushort config_base = 0; int found = 0; int i; u32 socket_status; debug ("PCMCIA MEM: %08X\n", pcmcia_cis_ptr); socket_status = readl(socket_base+8); if ((socket_status & 6) != 0 || (socket_status & 0x20) != 0) { printf("no card or CardBus card\n"); return 1; } start = p = (volatile char *) pcmcia_cis_ptr; while ((p - start) < MAX_TUPEL_SZ) { code = *p; p += 2; if (code == 0xFF) { /* End of chain */ break; } len = *p; p += 2; #if defined(DEBUG) && (DEBUG > 1) { volatile uchar *q = p; printf ("\nTuple code %02x length %d\n\tData:", code, len); for (i = 0; i < len; ++i) { printf (" %02x", *q); q+= 2; } } #endif /* DEBUG */ switch (code) { case CISTPL_VERS_1: ident = p + 4; break; case CISTPL_FUNCID: /* Fix for broken SanDisk which may have 0x80 bit set */ func_id = *p & 0x7F; break; case CISTPL_FUNCE: if (n_features < MAX_FEATURES) feature_p[n_features++] = p; break; case CISTPL_CONFIG: config_base = (*(p+6) << 8) + (*(p+4)); debug ("\n## Config_base = %04x ###\n", config_base); default: break; } p += 2 * len; } found = identify(ident); if (func_id != ((uchar)~0)) { print_funcid (func_id); if (func_id == CISTPL_FUNCID_FIXED) found = 1; else return 1; /* no disk drive */ } for (i=0; i<n_features; ++i) { print_fixed(feature_p[i]); } if (!found) { printf("unknown card type\n"); return 1; } /* select config index 1 */ writeb(1, pcmcia_cis_ptr + config_base); #if 0 printf("Confiuration Option Register: %02x\n", readb(pcmcia_cis_ptr + config_base)); printf("Card Confiuration and Status Register: %02x\n", readb(pcmcia_cis_ptr + config_base + 2)); printf("Pin Replacement Register Register: %02x\n", readb(pcmcia_cis_ptr + config_base + 4)); printf("Socket and Copy Register: %02x\n", readb(pcmcia_cis_ptr + config_base + 6)); #endif ide_devices_found |= (1 << (slot+ide_base_bus)); return 0; }
/** * Pull one frame from the card * @param[in] dev Our ethernet device to handle * @return Length of packet read */ static int fec_recv(struct eth_device *dev) { struct fec_priv *fec = (struct fec_priv *)dev->priv; struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; unsigned long ievent; int frame_length, len = 0; struct nbuf *frame; uint16_t bd_status; uint32_t addr, size, end; int i; ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE); /* * Check if any critical events have happened */ ievent = readl(&fec->eth->ievent); writel(ievent, &fec->eth->ievent); debug("fec_recv: ievent 0x%lx\n", ievent); if (ievent & FEC_IEVENT_BABR) { fec_halt(dev); fec_init(dev, fec->bd); printf("some error: 0x%08lx\n", ievent); return 0; } if (ievent & FEC_IEVENT_HBERR) { /* Heartbeat error */ writel(0x00000001 | readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); } if (ievent & FEC_IEVENT_GRA) { /* Graceful stop complete */ if (readl(&fec->eth->x_cntrl) & 0x00000001) { fec_halt(dev); writel(~0x00000001 & readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); fec_init(dev, fec->bd); } } /* * Read the buffer status. Before the status can be read, the data cache * must be invalidated, because the data in RAM might have been changed * by DMA. The descriptors are properly aligned to cachelines so there's * no need to worry they'd overlap. * * WARNING: By invalidating the descriptor here, we also invalidate * the descriptors surrounding this one. Therefore we can NOT change the * contents of this descriptor nor the surrounding ones. The problem is * that in order to mark the descriptor as processed, we need to change * the descriptor. The solution is to mark the whole cache line when all * descriptors in the cache line are processed. */ addr = (uint32_t)rbd; addr &= ~(ARCH_DMA_MINALIGN - 1); size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN); invalidate_dcache_range(addr, addr + size); bd_status = readw(&rbd->status); debug("fec_recv: status 0x%x\n", bd_status); if (!(bd_status & FEC_RBD_EMPTY)) { if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && ((readw(&rbd->data_length) - 4) > 14)) { /* * Get buffer address and size */ frame = (struct nbuf *)readl(&rbd->data_pointer); frame_length = readw(&rbd->data_length) - 4; /* * Invalidate data cache over the buffer */ addr = (uint32_t)frame; end = roundup(addr + frame_length, ARCH_DMA_MINALIGN); addr &= ~(ARCH_DMA_MINALIGN - 1); invalidate_dcache_range(addr, end); /* * Fill the buffer and pass it to upper layers */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)frame->data, frame_length); #endif memcpy(buff, frame->data, frame_length); NetReceive(buff, frame_length); len = frame_length; } else { if (bd_status & FEC_RBD_ERR) printf("error frame: 0x%08lx 0x%08x\n", (ulong)rbd->data_pointer, bd_status); } /* * Free the current buffer, restart the engine and move forward * to the next buffer. Here we check if the whole cacheline of * descriptors was already processed and if so, we mark it free * as whole. */ size = RXDESC_PER_CACHELINE - 1; if ((fec->rbd_index & size) == size) { i = fec->rbd_index - size; addr = (uint32_t)&fec->rbd_base[i]; for (; i <= fec->rbd_index ; i++) { fec_rbd_clean(i == (FEC_RBD_NUM - 1), &fec->rbd_base[i]); } flush_dcache_range(addr, addr + ARCH_DMA_MINALIGN); } fec_rx_task_enable(fec); fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; } debug("fec_recv: stop\n"); return len; }
void setup_usb_host_phy(int hsic_gpio) { unsigned int hostphy_ctrl0; setbits_le32(&exynos_sysreg->usb20_phy_cfg, USB20_PHY_CFG_EN); setbits_le32(&exynos_power->usb_host_phy_ctrl, POWER_USB_PHY_CTRL_EN); printk(BIOS_DEBUG, "Powering up USB HOST PHY (%s HSIC)\n", hsic_gpio ? "with" : "without"); hostphy_ctrl0 = readl(&exynos_usb_host_phy->usbphyctrl0); hostphy_ctrl0 &= ~(HOST_CTRL0_FSEL_MASK | HOST_CTRL0_COMMONON_N | /* HOST Phy setting */ HOST_CTRL0_PHYSWRST | HOST_CTRL0_PHYSWRSTALL | HOST_CTRL0_SIDDQ | HOST_CTRL0_FORCESUSPEND | HOST_CTRL0_FORCESLEEP); hostphy_ctrl0 |= (/* Setting up the ref freq */ CLK_24MHZ << 16 | /* HOST Phy setting */ HOST_CTRL0_LINKSWRST | HOST_CTRL0_UTMISWRST); writel(hostphy_ctrl0, &exynos_usb_host_phy->usbphyctrl0); udelay(10); clrbits_le32(&exynos_usb_host_phy->usbphyctrl0, HOST_CTRL0_LINKSWRST | HOST_CTRL0_UTMISWRST); udelay(20); /* EHCI Ctrl setting */ setbits_le32(&exynos_usb_host_phy->ehcictrl, EHCICTRL_ENAINCRXALIGN | EHCICTRL_ENAINCR4 | EHCICTRL_ENAINCR8 | EHCICTRL_ENAINCR16); /* HSIC USB Hub initialization. */ if (hsic_gpio) { gpio_direction_output(hsic_gpio, 0); udelay(100); gpio_direction_output(hsic_gpio, 1); udelay(5000); clrbits_le32(&exynos_usb_host_phy->hsicphyctrl1, HOST_CTRL0_SIDDQ | HOST_CTRL0_FORCESLEEP | HOST_CTRL0_FORCESUSPEND); setbits_le32(&exynos_usb_host_phy->hsicphyctrl1, HOST_CTRL0_PHYSWRST); udelay(10); clrbits_le32(&exynos_usb_host_phy->hsicphyctrl1, HOST_CTRL0_PHYSWRST); } /* At this point we need to wait for 50ms before talking to * the USB controller (PHY clock and power setup time) * By the time we are actually in the payload, these 50ms * will have passed. */ }
static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr, struct mii_dev *bus, int phy_id) #endif { struct eth_device *edev; struct fec_priv *fec; unsigned char ethaddr[6]; uint32_t start; int ret = 0; /* create and fill edev struct */ edev = (struct eth_device *)malloc(sizeof(struct eth_device)); if (!edev) { puts("fec_mxc: not enough malloc memory for eth_device\n"); ret = -ENOMEM; goto err1; } fec = (struct fec_priv *)malloc(sizeof(struct fec_priv)); if (!fec) { puts("fec_mxc: not enough malloc memory for fec_priv\n"); ret = -ENOMEM; goto err2; } memset(edev, 0, sizeof(*edev)); memset(fec, 0, sizeof(*fec)); ret = fec_alloc_descs(fec); if (ret) goto err3; edev->priv = fec; edev->init = fec_init; edev->send = fec_send; edev->recv = fec_recv; edev->halt = fec_halt; edev->write_hwaddr = fec_set_hwaddr; fec->eth = (struct ethernet_regs *)base_addr; fec->bd = bd; fec->xcv_type = CONFIG_FEC_XCV_TYPE; /* Reset chip. */ writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl); start = get_timer(0); while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) { if (get_timer(start) > (CONFIG_SYS_HZ * 5)) { printf("FEC MXC: Timeout reseting chip\n"); goto err4; } udelay(10); } fec_reg_setup(fec); fec_set_dev_name(edev->name, dev_id); fec->dev_id = (dev_id == -1) ? 0 : dev_id; fec->bus = bus; fec_mii_setspeed(bus->priv); #ifdef CONFIG_PHYLIB fec->phydev = phydev; phy_connect_dev(phydev, edev); /* Configure phy */ phy_config(phydev); #else fec->phy_id = phy_id; #endif eth_register(edev); if (fec_get_hwaddr(edev, dev_id, ethaddr) == 0) { debug("got MAC%d address from fuse: %pM\n", dev_id, ethaddr); memcpy(edev->enetaddr, ethaddr, 6); if (!getenv("ethaddr")) eth_setenv_enetaddr("ethaddr", ethaddr); } return ret; err4: fec_free_descs(fec); err3: free(fec); err2: free(edev); err1: return ret; }
irqreturn_t islpci_interrupt(int irq, void *config) { u32 reg; islpci_private *priv = config; struct net_device *ndev = priv->ndev; void __iomem *device = priv->device_base; int powerstate = ISL38XX_PSM_POWERSAVE_STATE; /* lock the interrupt handler */ spin_lock(&priv->slock); /* received an interrupt request on a shared IRQ line * first check whether the device is in sleep mode */ reg = readl(device + ISL38XX_CTRL_STAT_REG); if (reg & ISL38XX_CTRL_STAT_SLEEPMODE) /* device is in sleep mode, IRQ was generated by someone else */ { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* check whether there is any source of interrupt on the device */ reg = readl(device + ISL38XX_INT_IDENT_REG); /* also check the contents of the Interrupt Enable Register, because this * will filter out interrupt sources from other devices on the same irq ! */ reg &= readl(device + ISL38XX_INT_EN_REG); reg &= ISL38XX_INT_SOURCES; if (reg != 0) { if (islpci_get_state(priv) != PRV_STATE_SLEEP) powerstate = ISL38XX_PSM_ACTIVE_STATE; /* reset the request bits in the Identification register */ isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "IRQ: Identification register 0x%p 0x%x\n", device, reg); #endif /* check for each bit in the register separately */ if (reg & ISL38XX_INT_IDENT_UPDATE) { #if VERBOSE > SHOW_ERROR_MESSAGES /* Queue has been updated */ DEBUG(SHOW_TRACING, "IRQ: Update flag\n"); DEBUG(SHOW_QUEUE_INDEXES, "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> driver_curr_frag[0]), le32_to_cpu(priv->control_block-> driver_curr_frag[1]), le32_to_cpu(priv->control_block-> driver_curr_frag[2]), le32_to_cpu(priv->control_block-> driver_curr_frag[3]), le32_to_cpu(priv->control_block-> driver_curr_frag[4]), le32_to_cpu(priv->control_block-> driver_curr_frag[5]) ); DEBUG(SHOW_QUEUE_INDEXES, "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> device_curr_frag[0]), le32_to_cpu(priv->control_block-> device_curr_frag[1]), le32_to_cpu(priv->control_block-> device_curr_frag[2]), le32_to_cpu(priv->control_block-> device_curr_frag[3]), le32_to_cpu(priv->control_block-> device_curr_frag[4]), le32_to_cpu(priv->control_block-> device_curr_frag[5]) ); #endif /* cleanup the data low transmit queue */ islpci_eth_cleanup_transmit(priv, priv->control_block); /* device is in active state, update the * powerstate flag if necessary */ powerstate = ISL38XX_PSM_ACTIVE_STATE; /* check all three queues in priority order * call the PIMFOR receive function until the * queue is empty */ if (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_MGMTQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Management Queue\n"); #endif islpci_mgt_receive(ndev); islpci_mgt_cleanup_transmit(ndev); /* Refill slots in receive queue */ islpci_mgmt_rx_fill(ndev); /* no need to trigger the device, next islpci_mgt_transaction does it */ } while (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_DATA_LQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Data Low Queue\n"); #endif islpci_eth_receive(priv); } /* check whether the data transmit queues were full */ if (priv->data_low_tx_full) { /* check whether the transmit is not full anymore */ if (ISL38XX_CB_TX_QSIZE - isl38xx_in_queue(priv->control_block, ISL38XX_CB_TX_DATA_LQ) >= ISL38XX_MIN_QTHRESHOLD) { /* nope, the driver is ready for more network frames */ netif_wake_queue(priv->ndev); /* reset the full flag */ priv->data_low_tx_full = 0; } } } if (reg & ISL38XX_INT_IDENT_INIT) { /* Device has been initialized */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Init flag, device initialized\n"); #endif wake_up(&priv->reset_done); } if (reg & ISL38XX_INT_IDENT_SLEEP) { /* Device intends to move to powersave state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n"); #endif isl38xx_handle_sleep_request(priv->control_block, &powerstate, priv->device_base); } if (reg & ISL38XX_INT_IDENT_WAKEUP) { /* Device has been woken up to active state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n"); #endif isl38xx_handle_wakeup(priv->control_block, &powerstate, priv->device_base); } } else { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* sleep -> ready */ if (islpci_get_state(priv) == PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_ACTIVE_STATE) islpci_set_state(priv, PRV_STATE_READY); /* !sleep -> sleep */ if (islpci_get_state(priv) != PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_POWERSAVE_STATE) islpci_set_state(priv, PRV_STATE_SLEEP); /* unlock the interrupt handler */ spin_unlock(&priv->slock); return IRQ_HANDLED; }
unsigned long long notrace sched_clock(void) { u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC); return cyc_to_sched_clock(&cd, cyc, (u32)~0); }
static inline u32 ssbi_readl(struct msm_ssbi *ssbi, u32 reg) { return readl(ssbi->base + reg); }
static void notrace u300_update_sched_clock(void) { u32 cyc = readl(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC); update_sched_clock(&cd, cyc, (u32)~0); }
int __msm_adsp_write(struct msm_adsp_module *module, unsigned dsp_queue_addr, void *cmd_buf, size_t cmd_size) { uint32_t ctrl_word; uint32_t dsp_q_addr; uint32_t dsp_addr; uint32_t cmd_id = 0; int cnt = 0; int ret_status = 0; unsigned long flags; struct adsp_info *info; if (!module || !cmd_buf) { MM_ERR("Called with NULL parameters\n"); return -EINVAL; } info = module->info; spin_lock_irqsave(&adsp_write_lock, flags); if (module->state != ADSP_STATE_ENABLED) { spin_unlock_irqrestore(&adsp_write_lock, flags); MM_ERR("module %s not enabled before write\n", module->name); return -ENODEV; } if (adsp_validate_module(module->id)) { spin_unlock_irqrestore(&adsp_write_lock, flags); MM_ERR("module id validation failed %s %d\n", module->name, module->id); return -ENXIO; } if (dsp_queue_addr >= QDSP_MAX_NUM_QUEUES) { spin_unlock_irqrestore(&adsp_write_lock, flags); MM_ERR("Invalid Queue Index: %d\n", dsp_queue_addr); return -ENXIO; } if (adsp_validate_queue(module->id, dsp_queue_addr, cmd_size)) { spin_unlock_irqrestore(&adsp_write_lock, flags); return -EINVAL; } dsp_q_addr = adsp_get_queue_offset(info, dsp_queue_addr); dsp_q_addr &= ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M; /* Poll until the ADSP is ready to accept a command. * Wait for 100us, return error if it's not responding. * If this returns an error, we need to disable ALL modules and * then retry. */ while (((ctrl_word = readl(info->write_ctrl)) & ADSP_RTOS_WRITE_CTRL_WORD_READY_M) != ADSP_RTOS_WRITE_CTRL_WORD_READY_V) { if (cnt > 50) { MM_ERR("timeout waiting for DSP write ready\n"); ret_status = -EIO; goto fail; } MM_DBG("waiting for DSP write ready\n"); udelay(2); cnt++; } /* Set the mutex bits */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M); ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V; /* Clear the command bits */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M); /* Set the queue address bits */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M); ctrl_word |= dsp_q_addr; writel(ctrl_word, info->write_ctrl); /* Generate an interrupt to the DSP. This notifies the DSP that * we are about to send a command on this particular queue. The * DSP will in response change its state. */ writel(1, info->send_irq); /* Poll until the adsp responds to the interrupt; this does not * generate an interrupt from the adsp. This should happen within * 5ms. */ cnt = 0; while ((readl(info->write_ctrl) & ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M) == ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V) { if (cnt > 2500) { MM_ERR("timeout waiting for adsp ack\n"); ret_status = -EIO; goto fail; } udelay(2); cnt++; } /* Read the ctrl word */ ctrl_word = readl(info->write_ctrl); if ((ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_STATUS_M) != ADSP_RTOS_WRITE_CTRL_WORD_NO_ERR_V) { ret_status = -EAGAIN; goto fail; } else { /* No error */ /* Get the DSP buffer address */ dsp_addr = (ctrl_word & ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M) + (uint32_t)MSM_AD5_BASE; if (dsp_addr < (uint32_t)(MSM_AD5_BASE + QDSP_RAMC_OFFSET)) { uint16_t *buf_ptr = (uint16_t *) cmd_buf; uint16_t *dsp_addr16 = (uint16_t *)dsp_addr; cmd_size /= sizeof(uint16_t); /* Save the command ID */ cmd_id = (uint32_t) buf_ptr[0]; /* Copy the command to DSP memory */ cmd_size++; while (--cmd_size) *dsp_addr16++ = *buf_ptr++; } else { uint32_t *buf_ptr = (uint32_t *) cmd_buf; uint32_t *dsp_addr32 = (uint32_t *)dsp_addr; cmd_size /= sizeof(uint32_t); /* Save the command ID */ cmd_id = buf_ptr[0]; cmd_size++; while (--cmd_size) *dsp_addr32++ = *buf_ptr++; } /* Set the mutex bits */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_M); ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_MUTEX_NAVAIL_V; /* Set the command bits to write done */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_CMD_M); ctrl_word |= ADSP_RTOS_WRITE_CTRL_WORD_CMD_WRITE_DONE_V; /* Set the queue address bits */ ctrl_word &= ~(ADSP_RTOS_WRITE_CTRL_WORD_DSP_ADDR_M); ctrl_word |= dsp_q_addr; writel(ctrl_word, info->write_ctrl); /* Generate an interrupt to the DSP. It does not respond with * an interrupt, and we do not need to wait for it to * acknowledge, because it will hold the mutex lock until it's * ready to receive more commands again. */ writel(1, info->send_irq); module->num_commands++; } /* Ctrl word status bits were 00, no error in the ctrl word */ fail: spin_unlock_irqrestore(&adsp_write_lock, flags); return ret_status; }
#include <mach/io.h> #include <plat/io.h> #include <asm/io.h> #include <linux/mali/mali_utgard.h> #include <common/mali_kernel_common.h> #include <common/mali_pmu.h> #include "meson_main.h" #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON8 static ssize_t domain_stat_read(struct class *class, struct class_attribute *attr, char *buf) { unsigned int val; val = readl((u32 *)(IO_AOBUS_BASE + 0xf0)) & 0xff; return sprintf(buf, "%x\n", val>>4); } #define PREHEAT_CMD "preheat" #define PLL2_CMD "mpl2" /* mpl2 [11] or [0xxxxxxx] */ #define SCMPP_CMD "scmpp" /* scmpp [number of pp your want in most of time]. */ #define BSTGPU_CMD "bstgpu" /* bstgpu [0-256] */ #define BSTPP_CMD "bstpp" /* bstpp [0-256] */ #define LIMIT_CMD "lmt" /* lmt [0 or 1] */ #define MAX_TOKEN 20 #define FULL_UTILIZATION 256 static ssize_t mpgpu_write(struct class *class, struct class_attribute *attr, const char *buf, size_t count) {
static void i2s_flush_tx_fifo(uint32_t *fifo_control_ptr) { uint32_t fifo_control = readl(fifo_control_ptr); writel(fifo_control | FIC_TXFLUSH, fifo_control_ptr); writel(fifo_control & ~FIC_TXFLUSH, fifo_control_ptr); }
static inline void cpld_wait(struct fb_info *info) { do { } while (readl(info->screen_base + CPLD_STATUS) & 1); }
static void mxs_auart_settermios(struct uart_port *u, struct ktermios *termios, struct ktermios *old) { u32 bm, ctrl, ctrl2, div; unsigned int cflag, baud; cflag = termios->c_cflag; ctrl = AUART_LINECTRL_FEN; ctrl2 = readl(u->membase + AUART_CTRL2); /* byte size */ switch (cflag & CSIZE) { case CS5: bm = 0; break; case CS6: bm = 1; break; case CS7: bm = 2; break; case CS8: bm = 3; break; default: return; } ctrl |= AUART_LINECTRL_WLEN(bm); /* parity */ if (cflag & PARENB) { ctrl |= AUART_LINECTRL_PEN; if ((cflag & PARODD) == 0) ctrl |= AUART_LINECTRL_EPS; } u->read_status_mask = 0; if (termios->c_iflag & INPCK) u->read_status_mask |= AUART_STAT_PERR; if (termios->c_iflag & (BRKINT | PARMRK)) u->read_status_mask |= AUART_STAT_BERR; /* * Characters to ignore */ u->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) u->ignore_status_mask |= AUART_STAT_PERR; if (termios->c_iflag & IGNBRK) { u->ignore_status_mask |= AUART_STAT_BERR; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) u->ignore_status_mask |= AUART_STAT_OERR; } /* * ignore all characters if CREAD is not set */ if (cflag & CREAD) ctrl2 |= AUART_CTRL2_RXE; else ctrl2 &= ~AUART_CTRL2_RXE; /* figure out the stop bits requested */ if (cflag & CSTOPB) ctrl |= AUART_LINECTRL_STP2; /* figure out the hardware flow control settings */ if (cflag & CRTSCTS) ctrl2 |= AUART_CTRL2_CTSEN; else ctrl2 &= ~AUART_CTRL2_CTSEN; /* set baud rate */ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); div = u->uartclk * 32 / baud; ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6); writel(ctrl, u->membase + AUART_LINECTRL); writel(ctrl2, u->membase + AUART_CTRL2); }
int pcmcia_on(int ide_base_bus) { u16 dev_id; u32 socket_status; int slot = 0; int cis_len; u16 io_base; u16 io_len; /* * Find the CardBus PCI device(s). */ if ((devbusfn = pci_find_devices(supported, 0)) < 0) { printf("Ti CardBus: not found\n"); return 1; } pci_read_config_word(devbusfn, PCI_DEVICE_ID, &dev_id); if (dev_id == 0xac56) { debug("Enable PCMCIA Ti PCI1510\n"); } else { debug("Enable PCMCIA Ti PCI1410A\n"); } pcmcia_cis_ptr = CONFIG_SYS_PCMCIA_CIS_WIN; cis_len = CONFIG_SYS_PCMCIA_CIS_WIN_SIZE; io_base = CONFIG_SYS_PCMCIA_IO_WIN; io_len = CONFIG_SYS_PCMCIA_IO_WIN_SIZE; /* * Setup the PCI device. */ pci_read_config_dword(devbusfn, PCI_BASE_ADDRESS_0, &socket_base); socket_base &= ~0xf; socket_status = readl(socket_base+8); if ((socket_status & 6) == 0) { printf("Card Present: "); switch (socket_status & 0x3c00) { case 0x400: printf("5V "); break; case 0x800: printf("3.3V "); break; case 0xc00: printf("3.3/5V "); break; default: printf("unsupported Vcc "); break; } switch (socket_status & 0x30) { case 0x10: printf("16bit PC-Card\n"); break; case 0x20: printf("32bit CardBus Card\n"); break; default: printf("8bit PC-Card\n"); break; } } writeb(0x41, socket_base + 0x806); /* Enable I/O window 0 and memory window 0 */ writeb(0x0e, socket_base + 0x807); /* Reset I/O window options */ /* Careful: the linux yenta driver do not seem to reset the offset * in the i/o windows, so leaving them non-zero is a problem */ writeb(io_base & 0xff, socket_base + 0x808); /* I/O window 0 base address */ writeb(io_base>>8, socket_base + 0x809); writeb((io_base + io_len - 1) & 0xff, socket_base + 0x80a); /* I/O window 0 end address */ writeb((io_base + io_len - 1)>>8, socket_base + 0x80b); writeb(0x00, socket_base + 0x836); /* I/O window 0 offset address 0x000 */ writeb(0x00, socket_base + 0x837); writeb((pcmcia_cis_ptr&0x000ff000) >> 12, socket_base + 0x810); /* Memory window 0 start address bits 19-12 */ writeb((pcmcia_cis_ptr&0x00f00000) >> 20, socket_base + 0x811); /* Memory window 0 start address bits 23-20 */ writeb(((pcmcia_cis_ptr+cis_len-1) & 0x000ff000) >> 12, socket_base + 0x812); /* Memory window 0 end address bits 19-12*/ writeb(((pcmcia_cis_ptr+cis_len-1) & 0x00f00000) >> 20, socket_base + 0x813); /* Memory window 0 end address bits 23-20*/ writeb(0x00, socket_base + 0x814); /* Memory window 0 offset bits 19-12 */ writeb(0x40, socket_base + 0x815); /* Memory window 0 offset bits 23-20 and * options (read/write, attribute access) */ writeb(0x00, socket_base + 0x816); /* ExCA card-detect and general control */ writeb(0x00, socket_base + 0x81e); /* ExCA global control (interrupt modes) */ writeb((pcmcia_cis_ptr & 0xff000000) >> 24, socket_base + 0x840); /* Memory window address bits 31-24 */ /* turn off voltage */ if (voltage_set(slot, 0, 0)) { return 1; } /* Enable external hardware */ if (hardware_enable(slot)) { return 1; } if (check_ide_device(slot, ide_base_bus)) { return 1; } return 0; }
static int sdio_detect(unsigned port) { setbits_le32(P_PREG_PAD_GPIO5_EN_N,1<<29);//CARD_6 return readl(P_PREG_PAD_GPIO5_I)&(1<<29)?1:0; }
static inline void msm_spm_load_shadow( struct msm_spm_device *dev, unsigned int reg_index) { dev->reg_shadow[reg_index] = readl(dev->reg_base_addr + msm_spm_reg_offsets[reg_index]); }