int setcorepll(int clock, int freq) { int pll; int index; struct owl_pll *node; struct owl_clocknode *core_clk; int swap; unsigned long flags; if (clock == CLOCK__COREPLL) { if (clocks[clock].frequency == freq) { return 0; } pll = clock - CLOCK__COREPLL; if (pll != 0) { goto fail; } node = &pllnode[pll]; index = freq / node->freq.step.step; if (node->freq.step.step * index != freq) { goto fail; } if (index < node->range_from) { goto fail; } if (index > node->range_to) { index = node->range_to; goto fail; } node->sel = index; spin_lock_irqsave(&cpu_lock, flags); core_clk = &clocks[CLOCK__CORE_CLK]; swap = 0; if (core_clk->parent == &clocks[clock]) { sourcesel(CLOCK__CORE_CLK, COREPLL_CHANGE_MIDDLE_CLK); swap = 1; } if (node->reg_pllfreq) { write_clkreg_val(node->reg_pllfreq, node->sel); if (node->delay == 0) udelay((PLLDELAY+COREPLL_CHANGE_DELAY_DIV-1)/COREPLL_CHANGE_DELAY_DIV); else udelay((node->delay+COREPLL_CHANGE_DELAY_DIV-1)/COREPLL_CHANGE_DELAY_DIV); } clocks[clock].frequency = freq; changeclock(clock); if (swap) { sourcesel(CLOCK__CORE_CLK, CLOCK__COREPLL); } spin_unlock_irqrestore(&cpu_lock, flags); return 0; } fail: return -1; }
static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; int ok; /* * Write the command into Mailbox 0 */ writel(command, &dev->IndexRegs->Mailbox[0]); /* * Write the parameters into Mailboxes 1 - 6 */ writel(p1, &dev->IndexRegs->Mailbox[1]); writel(p2, &dev->IndexRegs->Mailbox[2]); writel(p3, &dev->IndexRegs->Mailbox[3]); writel(p4, &dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell to start on a clean slate. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Disable doorbell interrupts */ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); /* * Force the completion of the mask register write before issuing * the interrupt. */ rx_readb (dev, MUnit.OIMR); /* * Signal that there is a new synch command */ rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); ok = 0; start = jiffies; /* * Wait up to 30 seconds */ while (time_before(jiffies, start+30*HZ)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); ok = 1; break; } /* * Yield the processor in case we are slow */ msleep(1); } if (unlikely(ok != 1)) { /* * Restore interrupt mask even though we timed out */ aac_adapter_enable_int(dev); return -ETIMEDOUT; } /* * Pull the synch status from Mailbox 0. */ if (status) *status = readl(&dev->IndexRegs->Mailbox[0]); if (r1) *r1 = readl(&dev->IndexRegs->Mailbox[1]); if (r2) *r2 = readl(&dev->IndexRegs->Mailbox[2]); if (r3) *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Restore interrupt mask */ aac_adapter_enable_int(dev); return 0; }
static int msm_hsic_resume(struct msm_hsic_hcd *mehci) { struct usb_hcd *hcd = hsic_to_hcd(mehci); int cnt = 0, ret; unsigned temp; int min_vol, max_vol; if (!atomic_read(&mehci->in_lpm)) { dev_dbg(mehci->dev, "%s called in !in_lpm\n", __func__); return 0; } if (mehci->wakeup_irq_enabled) { disable_irq_wake(mehci->wakeup_irq); disable_irq_nosync(mehci->wakeup_irq); mehci->wakeup_irq_enabled = 0; } wake_lock(&mehci->wlock); if (mehci->bus_perf_client && debug_bus_voting_enabled) { mehci->bus_vote = true; queue_work(ehci_wq, &mehci->bus_vote_w); } min_vol = vdd_val[mehci->vdd_type][VDD_MIN]; max_vol = vdd_val[mehci->vdd_type][VDD_MAX]; ret = regulator_set_voltage(mehci->hsic_vddcx, min_vol, max_vol); if (ret < 0) dev_err(mehci->dev, "unable to set nominal vddcx voltage (no VDD MIN)\n"); clk_prepare_enable(mehci->core_clk); clk_prepare_enable(mehci->phy_clk); clk_prepare_enable(mehci->cal_clk); clk_prepare_enable(mehci->ahb_clk); temp = readl_relaxed(USB_USBCMD); temp &= ~ASYNC_INTR_CTRL; temp &= ~ULPI_STP_CTRL; writel_relaxed(temp, USB_USBCMD); if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) goto skip_phy_resume; temp = readl_relaxed(USB_PORTSC); temp &= ~(PORT_RWC_BITS | PORTSC_PHCD); writel_relaxed(temp, USB_PORTSC); while (cnt < PHY_RESUME_TIMEOUT_USEC) { if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD) && (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_SYNC_STATE)) break; udelay(1); cnt++; } if (cnt >= PHY_RESUME_TIMEOUT_USEC) { /* * This is a fatal error. Reset the link and * PHY to make hsic working. */ dev_err(mehci->dev, "Unable to resume USB. Reset the hsic\n"); msm_hsic_config_gpios(mehci, 0); msm_hsic_reset(mehci); } skip_phy_resume: usb_hcd_resume_root_hub(hcd); atomic_set(&mehci->in_lpm, 0); if (mehci->async_int) { mehci->async_int = false; pm_runtime_put_noidle(mehci->dev); enable_irq(hcd->irq); } if (atomic_read(&mehci->pm_usage_cnt)) { atomic_set(&mehci->pm_usage_cnt, 0); pm_runtime_put_noidle(mehci->dev); } dev_dbg(mehci->dev, "HSIC-USB exited from low power mode\n"); return 0; }
static int setpll(int clock, int freq) { int pll; int index; struct owl_pll *node; if (clock >= 0 && clock < CLOCK__MAX) { if (clocks[clock].type == TYPE_PLL) { pll = clock - CLOCK__COREPLL; if (pll < 0 || pll >= PLL__MAX) { goto fail; } node = &pllnode[pll]; switch (node->type) { case PLL_T_STEP: case PLL_T_D4DYN: index = (freq / node->freq.step.step) - node->freq.step.offset; if ((node->freq.step.step + node->freq.step.offset) * index != freq) { goto fail; } if (index < node->range_from) { goto fail; } if (index > node->range_to) { goto fail; } node->sel = index; break; case PLL_T_FREQ: for (index = node->range_from; index <= node->range_to; index++) { if (node->freq.freqtab[index] == freq) { goto found; } } goto fail; found: node->sel = index; break; default: break; } if (clocks[clock].frequency == freq) { return 0; } if (node->reg_pllfreq) { pllsub_putaway(clock); write_clkreg_val(node->reg_pllfreq, node->sel); if (node->delay == 0) udelay(PLLDELAY); else udelay(node->delay); pllsub_resume(clock); } clocks[clock].frequency = freq; changeclock(clock); if (pll == PLL__TVOUTPLL) { pllnode[PLL__DEEPCOLORPLL].freq.step.step = freq / 4; clocks[CLOCK__DEEPCOLORPLL].frequency = pllnode[PLL__DEEPCOLORPLL].freq.step.step * pllnode[PLL__DEEPCOLORPLL].sel; clocks[CLOCK__DEEPCOLORPLL].changed = 0; } return 0; } } fail: return -1; }
static int send_pcc_cmd(u16 cmd) { int ret = -EIO; struct acpi_pcct_shared_memory *generic_comm_base = (struct acpi_pcct_shared_memory *) pcc_comm_addr; static ktime_t last_cmd_cmpl_time, last_mpar_reset; static int mpar_count; unsigned int time_delta; /* * For CMD_WRITE we know for a fact the caller should have checked * the channel before writing to PCC space */ if (cmd == CMD_READ) { ret = check_pcc_chan(); if (ret) return ret; } /* * Handle the Minimum Request Turnaround Time(MRTT) * "The minimum amount of time that OSPM must wait after the completion * of a command before issuing the next command, in microseconds" */ if (pcc_mrtt) { time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); if (pcc_mrtt > time_delta) udelay(pcc_mrtt - time_delta); } /* * Handle the non-zero Maximum Periodic Access Rate(MPAR) * "The maximum number of periodic requests that the subspace channel can * support, reported in commands per minute. 0 indicates no limitation." * * This parameter should be ideally zero or large enough so that it can * handle maximum number of requests that all the cores in the system can * collectively generate. If it is not, we will follow the spec and just * not send the request to the platform after hitting the MPAR limit in * any 60s window */ if (pcc_mpar) { if (mpar_count == 0) { time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); if (time_delta < 60 * MSEC_PER_SEC) { pr_debug("PCC cmd not sent due to MPAR limit"); return -EIO; } last_mpar_reset = ktime_get(); mpar_count = pcc_mpar; } mpar_count--; } /* Write to the shared comm region. */ writew_relaxed(cmd, &generic_comm_base->command); /* Flip CMD COMPLETE bit */ writew_relaxed(0, &generic_comm_base->status); /* Ring doorbell */ ret = mbox_send_message(pcc_channel, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", cmd, ret); return ret; } /* * For READs we need to ensure the cmd completed to ensure * the ensuing read()s can proceed. For WRITEs we dont care * because the actual write()s are done before coming here * and the next READ or WRITE will check if the channel * is busy/free at the entry of this call. * * If Minimum Request Turnaround Time is non-zero, we need * to record the completion time of both READ and WRITE * command for proper handling of MRTT, so we need to check * for pcc_mrtt in addition to CMD_READ */ if (cmd == CMD_READ || pcc_mrtt) { ret = check_pcc_chan(); if (pcc_mrtt) last_cmd_cmpl_time = ktime_get(); } mbox_client_txdone(pcc_channel, ret); return ret; }
static int msm_hsic_suspend(struct msm_hsic_hcd *mehci) { struct usb_hcd *hcd = hsic_to_hcd(mehci); int cnt = 0, ret; u32 val; int none_vol, max_vol; if (atomic_read(&mehci->in_lpm)) { dev_dbg(mehci->dev, "%s called in lpm\n", __func__); return 0; } if (!(readl_relaxed(USB_PORTSC) & PORT_PE)) { dev_dbg(mehci->dev, "%s:port is not enabled skip suspend\n", __func__); return -EAGAIN; } disable_irq(hcd->irq); /* make sure we don't race against a remote wakeup */ if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) || readl_relaxed(USB_PORTSC) & PORT_RESUME) { dev_dbg(mehci->dev, "wakeup pending, aborting suspend\n"); enable_irq(hcd->irq); return -EBUSY; } /* * PHY may take some time or even fail to enter into low power * mode (LPM). Hence poll for 500 msec and reset the PHY and link * in failure case. */ val = readl_relaxed(USB_PORTSC); val &= ~PORT_RWC_BITS; val |= PORTSC_PHCD; writel_relaxed(val, USB_PORTSC); while (cnt < PHY_SUSPEND_TIMEOUT_USEC) { if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD) break; udelay(1); cnt++; } if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) { dev_err(mehci->dev, "Unable to suspend PHY\n"); msm_hsic_config_gpios(mehci, 0); msm_hsic_reset(mehci); } /* * PHY has capability to generate interrupt asynchronously in low * power mode (LPM). This interrupt is level triggered. So USB IRQ * line must be disabled till async interrupt enable bit is cleared * in USBCMD register. Assert STP (ULPI interface STOP signal) to * block data communication from PHY. */ writel_relaxed(readl_relaxed(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD); /* * Ensure that hardware is put in low power mode before * clocks are turned OFF and VDD is allowed to minimize. */ mb(); clk_disable_unprepare(mehci->core_clk); clk_disable_unprepare(mehci->phy_clk); clk_disable_unprepare(mehci->cal_clk); clk_disable_unprepare(mehci->ahb_clk); none_vol = vdd_val[mehci->vdd_type][VDD_NONE]; max_vol = vdd_val[mehci->vdd_type][VDD_MAX]; ret = regulator_set_voltage(mehci->hsic_vddcx, none_vol, max_vol); if (ret < 0) dev_err(mehci->dev, "unable to set vddcx voltage for VDD MIN\n"); if (mehci->bus_perf_client && debug_bus_voting_enabled) { mehci->bus_vote = false; queue_work(ehci_wq, &mehci->bus_vote_w); } atomic_set(&mehci->in_lpm, 1); enable_irq(hcd->irq); mehci->wakeup_irq_enabled = 1; enable_irq_wake(mehci->wakeup_irq); enable_irq(mehci->wakeup_irq); wake_unlock(&mehci->wlock); dev_dbg(mehci->dev, "HSIC-USB in low power mode\n"); return 0; }
/*! * The function is registered in the adapter structure. It is called when an MXC * driver wishes to transfer data to a device connected to the I2C device. * * @param adap adapter structure for the MXC i2c device * @param msgs[] array of messages to be transferred to the device * @param num number of messages to be transferred to the device * * @return The function returns the number of messages transferred, * \b -EREMOTEIO on I2C failure and a 0 if the num argument is * less than 0. */ static int mxc_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { mxc_i2c_device *dev = (mxc_i2c_device *) (i2c_get_adapdata(adap)); int i, ret = 0, addr_comp = 0; volatile unsigned int sr; int retry = 5; if (dev->low_power) { dev_err(&dev->adap.dev, "I2C Device in low power mode\n"); return -EREMOTEIO; } if (num < 1) { return 0; } mxc_i2c_module_en(dev, msgs[0].flags); sr = readw(dev->membase + MXC_I2SR); /* * Check bus state */ while ((sr & MXC_I2SR_IBB) && retry--) { udelay(5); sr = readw(dev->membase + MXC_I2SR); } if ((sr & MXC_I2SR_IBB) && retry < 0) { mxc_i2c_module_dis(dev); dev_err(&dev->adap.dev, "Bus busy\n"); return -EREMOTEIO; } //gpio_i2c_active(dev->adap.id); dev->transfer_done = false; dev->tx_success = false; for (i = 0; i < num && ret >= 0; i++) { addr_comp = 0; /* * Send the slave address and transfer direction in the * address cycle */ if (i == 0) { /* * Send a start or repeat start signal */ if (mxc_i2c_start(dev, &msgs[0])) return -EREMOTEIO; /* Wait for the address cycle to complete */ if (mxc_i2c_wait_for_tc(dev, msgs[0].flags)) { mxc_i2c_stop(dev); //gpio_i2c_inactive(dev->adap.id); mxc_i2c_module_dis(dev); return -EREMOTEIO; } addr_comp = 1; } else { /* * Generate repeat start only if required i.e the address * changed or the transfer direction changed */ if ((msgs[i].addr != msgs[i - 1].addr) || ((msgs[i].flags & I2C_M_RD) != (msgs[i - 1].flags & I2C_M_RD))) { mxc_i2c_repstart(dev, &msgs[i]); /* Wait for the address cycle to complete */ if (mxc_i2c_wait_for_tc(dev, msgs[i].flags)) { mxc_i2c_stop(dev); //gpio_i2c_inactive(dev->adap.id); mxc_i2c_module_dis(dev); return -EREMOTEIO; } addr_comp = 1; } } /* Transfer the data */ if (msgs[i].flags & I2C_M_RD) { /* Read the data */ ret = mxc_i2c_readbytes(dev, &msgs[i], (i + 1 == num), addr_comp); if (ret < 0) { dev_err(&dev->adap.dev, "mxc_i2c_readbytes:" " fail.\n"); break; } } else { /* Write the data */ ret = mxc_i2c_writebytes(dev, &msgs[i], (i + 1 == num)); if (ret < 0) { dev_err(&dev->adap.dev, "mxc_i2c_writebytes:" " fail.\n"); break; } } } //gpio_i2c_inactive(dev->adap.id); mxc_i2c_module_dis(dev); /* * Decrease by 1 as we do not want Start message to be included in * the count */ return (i < 0 ? ret : i); }
static int xiic_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[], int num) { struct xiic_data *dev = (struct xiic_data *) i2c_adap; struct i2c_msg *pmsg; u32 options; int i, retries; u32 Status; u32 writeop; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (!pmsg->len) /* If length is zero */ continue; /* on to the next request. */ /* * This code checks up to 16 times for the * bus busy condition. */ retries = 4; while((XIic_IsIicBusy(&dev->Iic) == TRUE) && (retries-- != 0)) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ/250); } /* If bus is still busy, bail */ if (XIic_IsIicBusy(&dev->Iic) == TRUE) { printk(KERN_WARNING "%s #%d: Could not talk to device 0x%2x (%d), bus always busy, trying to reset\n", dev->adap.name, dev->index, pmsg->addr, dev->status_intr_flag); /* Try stopping, reseting and starting device to clear condition */ if (XIic_Stop(&dev->Iic) != XST_SUCCESS) { /* The bus was in use.. */ printk(KERN_WARNING "%s #%d: Could not stop device. Restart from higher layer.\n", dev->adap.name, dev->index); return -ENXIO; } else { XIic_Reset(&dev->Iic); if (XIic_Start(&dev->Iic) != XST_SUCCESS) { printk(KERN_ERR "%s #%d: Could not start device.\n", dev->adap.name, dev->index); return -ENODEV; } return -ENXIO; } } options = 0; if (pmsg->flags & I2C_M_TEN) options |= XII_SEND_10_BIT_OPTION; XIic_SetOptions(&dev->Iic, options); if (XIic_SetAddress(&dev->Iic, XII_ADDR_TO_SEND_TYPE, pmsg->addr) != XST_SUCCESS) { printk(KERN_WARNING "%s #%d: Could not set address to 0x%2x.\n", dev->adap.name, dev->index, pmsg->addr); return -EIO; } dev->transmit_intr_flag = 0xFFFFFFFF; dev->receive_intr_flag = 0xFFFFFFFF; dev->status_intr_flag = 0xFFFFFFFF; /* set the writeop flag to 0 so the adapter does not wait * at bottom of loop */ writeop = 0; dev->Iic.Stats.TxErrors = 0; if (pmsg->flags & I2C_M_RD) { Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len); } else { Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len); } if (Status != XST_SUCCESS) { printk(KERN_WARNING "%s #%d: Unexpected error %d.\n", dev->adap.name, dev->index, (int)Status); return -EIO; } /* * Wait till the data is transmitted or received. If there is an error * retry for 10 times. */ retries = 10; if(pmsg->flags & I2C_M_RD) { while((((volatile int)(dev->receive_intr_flag)) != 0) && (retries != 0)) { if ( dev->Iic.Stats.TxErrors != 0) { udelay(25); Status = XIic_MasterRecv(&dev->Iic, pmsg->buf, pmsg->len); dev->Iic.Stats.TxErrors = 0; retries--; } /* the udelay was not working for Microblaze and this seems like a better solution */ schedule_timeout_interruptible(1); } } else { while((((volatile int)(dev->transmit_intr_flag)) != 0) && (retries != 0)) { if ( dev->Iic.Stats.TxErrors != 0) { udelay(25); Status = XIic_MasterSend(&dev->Iic, pmsg->buf, pmsg->len); dev->Iic.Stats.TxErrors = 0; retries--; } /* the udelay was not working for Microblaze and this seems like a better solution */ schedule_timeout_interruptible(1); } } if(retries == 0) { printk("Unable to talk to Device\n"); printk("Wrong Slave address or Slave device Busy\n"); } } return num; }
static void __init ap4evb_init(void) { u32 srcr4; struct clk *clk; sh7372_pinmux_init(); /* enable SCIFA0 */ gpio_request(GPIO_FN_SCIFA0_TXD, NULL); gpio_request(GPIO_FN_SCIFA0_RXD, NULL); /* enable SMSC911X */ gpio_request(GPIO_FN_CS5A, NULL); gpio_request(GPIO_FN_IRQ6_39, NULL); /* enable Debug switch (S6) */ gpio_request(GPIO_PORT32, NULL); gpio_request(GPIO_PORT33, NULL); gpio_request(GPIO_PORT34, NULL); gpio_request(GPIO_PORT35, NULL); gpio_direction_input(GPIO_PORT32); gpio_direction_input(GPIO_PORT33); gpio_direction_input(GPIO_PORT34); gpio_direction_input(GPIO_PORT35); gpio_export(GPIO_PORT32, 0); gpio_export(GPIO_PORT33, 0); gpio_export(GPIO_PORT34, 0); gpio_export(GPIO_PORT35, 0); /* SDHI0 */ gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); gpio_request(GPIO_FN_SDHID0_3, NULL); gpio_request(GPIO_FN_SDHID0_2, NULL); gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); /* SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); gpio_request(GPIO_FN_SDHICLK1, NULL); gpio_request(GPIO_FN_SDHID1_3, NULL); gpio_request(GPIO_FN_SDHID1_2, NULL); gpio_request(GPIO_FN_SDHID1_1, NULL); gpio_request(GPIO_FN_SDHID1_0, NULL); /* MMCIF */ gpio_request(GPIO_FN_MMCD0_0, NULL); gpio_request(GPIO_FN_MMCD0_1, NULL); gpio_request(GPIO_FN_MMCD0_2, NULL); gpio_request(GPIO_FN_MMCD0_3, NULL); gpio_request(GPIO_FN_MMCD0_4, NULL); gpio_request(GPIO_FN_MMCD0_5, NULL); gpio_request(GPIO_FN_MMCD0_6, NULL); gpio_request(GPIO_FN_MMCD0_7, NULL); gpio_request(GPIO_FN_MMCCMD0, NULL); gpio_request(GPIO_FN_MMCCLK0, NULL); /* USB enable */ gpio_request(GPIO_FN_VBUS0_1, NULL); gpio_request(GPIO_FN_IDIN_1_18, NULL); gpio_request(GPIO_FN_PWEN_1_115, NULL); gpio_request(GPIO_FN_OVCN_1_114, NULL); gpio_request(GPIO_FN_EXTLP_1, NULL); gpio_request(GPIO_FN_OVCN2_1, NULL); /* setup USB phy */ __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ /* enable FSI2 port A (ak4643) */ gpio_request(GPIO_FN_FSIAIBT, NULL); gpio_request(GPIO_FN_FSIAILR, NULL); gpio_request(GPIO_FN_FSIAISLD, NULL); gpio_request(GPIO_FN_FSIAOSLD, NULL); gpio_request(GPIO_PORT161, NULL); gpio_direction_output(GPIO_PORT161, 0); /* slave */ gpio_request(GPIO_PORT9, NULL); gpio_request(GPIO_PORT10, NULL); gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */ gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */ /* card detect pin for MMC slot (CN7) */ gpio_request(GPIO_PORT41, NULL); gpio_direction_input(GPIO_PORT41); /* setup FSI2 port B (HDMI) */ gpio_request(GPIO_FN_FSIBCK, NULL); __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */ /* set SPU2 clock to 119.6 MHz */ clk = clk_get(NULL, "spu_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 119600000)); clk_put(clk); } /* * set irq priority, to avoid sound chopping * when NFS rootfs is used * FSI(3) > SMSC911X(2) */ intc_set_priority(IRQ_FSI, 3); i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices)); #ifdef CONFIG_AP4EVB_QHD /* * For QHD Panel (MIPI-DSI, CONFIG_AP4EVB_QHD=y) and * IRQ28 for Touch Panel, set dip switches S3, S43 as OFF, ON. */ /* enable KEYSC */ gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT4, NULL); gpio_request(GPIO_FN_KEYIN0_136, NULL); gpio_request(GPIO_FN_KEYIN1_135, NULL); gpio_request(GPIO_FN_KEYIN2_134, NULL); gpio_request(GPIO_FN_KEYIN3_133, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); /* enable TouchScreen */ irq_set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW); tsc_device.irq = IRQ28; i2c_register_board_info(1, &tsc_device, 1); /* LCDC0 */ lcdc_info.clock_source = LCDC_CLK_PERIPHERAL; lcdc_info.ch[0].interface_type = RGB24; lcdc_info.ch[0].clock_divider = 1; lcdc_info.ch[0].flags = LCDC_FLAGS_DWPOL; lcdc_info.ch[0].lcd_size_cfg.width = 44; lcdc_info.ch[0].lcd_size_cfg.height = 79; platform_add_devices(qhd_devices, ARRAY_SIZE(qhd_devices)); #else /* * For WVGA Panel (18-bit RGB, CONFIG_AP4EVB_WVGA=y) and * IRQ7 for Touch Panel, set dip switches S3, S43 to ON, OFF. */ gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDDISP, NULL); gpio_request(GPIO_FN_LCDDCK, NULL); gpio_request(GPIO_PORT189, NULL); /* backlight */ gpio_direction_output(GPIO_PORT189, 1); gpio_request(GPIO_PORT151, NULL); /* LCDDON */ gpio_direction_output(GPIO_PORT151, 1); lcdc_info.clock_source = LCDC_CLK_BUS; lcdc_info.ch[0].interface_type = RGB18; lcdc_info.ch[0].clock_divider = 3; lcdc_info.ch[0].flags = 0; lcdc_info.ch[0].lcd_size_cfg.width = 152; lcdc_info.ch[0].lcd_size_cfg.height = 91; /* enable TouchScreen */ irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW); tsc_device.irq = IRQ7; i2c_register_board_info(0, &tsc_device, 1); #endif /* CONFIG_AP4EVB_QHD */ /* CEU */ /* * TODO: reserve memory for V4L2 DMA buffers, when a suitable API * becomes available */ /* MIPI-CSI stuff */ gpio_request(GPIO_FN_VIO_CKO, NULL); clk = clk_get(NULL, "vck1_clk"); if (!IS_ERR(clk)) { clk_set_rate(clk, clk_round_rate(clk, 13000000)); clk_enable(clk); clk_put(clk); } sh7372_add_standard_devices(); /* HDMI */ gpio_request(GPIO_FN_HDMI_HPD, NULL); gpio_request(GPIO_FN_HDMI_CEC, NULL); /* Reset HDMI, must be held at least one EXTALR (32768Hz) period */ #define SRCR4 0xe61580bc srcr4 = __raw_readl(SRCR4); __raw_writel(srcr4 | (1 << 13), SRCR4); udelay(50); __raw_writel(srcr4 & ~(1 << 13), SRCR4); platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); hdmi_init_pm_clock(); fsi_init_pm_clock(); sh7372_pm_init(); }
static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmc_omap_slot *slot = mmc_priv(mmc); struct mmc_omap_host *host = slot->host; int i, dsor; int clk_enabled; mmc_omap_select_slot(slot, 0); dsor = mmc_omap_calc_divisor(mmc, ios); if (ios->vdd != slot->vdd) slot->vdd = ios->vdd; clk_enabled = 0; switch (ios->power_mode) { case MMC_POWER_OFF: mmc_omap_set_power(slot, 0, ios->vdd); break; case MMC_POWER_UP: /* Cannot touch dsor yet, just power up MMC */ mmc_omap_set_power(slot, 1, ios->vdd); goto exit; case MMC_POWER_ON: mmc_omap_fclk_enable(host, 1); clk_enabled = 1; dsor |= 1 << 11; break; } if (slot->bus_mode != ios->bus_mode) { if (slot->pdata->set_bus_mode != NULL) slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id, ios->bus_mode); slot->bus_mode = ios->bus_mode; } /* On insanely high arm_per frequencies something sometimes * goes somehow out of sync, and the POW bit is not being set, * which results in the while loop below getting stuck. * Writing to the CON register twice seems to do the trick. */ for (i = 0; i < 2; i++) OMAP_MMC_WRITE(host, CON, dsor); slot->saved_con = dsor; if (ios->power_mode == MMC_POWER_ON) { /* worst case at 400kHz, 80 cycles makes 200 microsecs */ int usecs = 250; /* Send clock cycles, poll completion */ OMAP_MMC_WRITE(host, IE, 0); OMAP_MMC_WRITE(host, STAT, 0xffff); OMAP_MMC_WRITE(host, CMD, 1 << 7); while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) { udelay(1); usecs--; } OMAP_MMC_WRITE(host, STAT, 1); } exit: mmc_omap_release_slot(slot, clk_enabled); }
/* port->lock is not held. */ static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000); unsigned int quot = uart_get_divisor(port, baud); unsigned int iflag, cflag; unsigned long flags; u8 mr1, mr2, csr; spin_lock_irqsave(&port->lock, flags); while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc) udelay(2); WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX); iflag = termios->c_iflag; cflag = termios->c_cflag; port->read_status_mask = SR_OVERRUN; if (iflag & INPCK) port->read_status_mask |= SR_PARITY | SR_FRAME; if (iflag & (BRKINT | PARMRK)) port->read_status_mask |= SR_BREAK; port->ignore_status_mask = 0; if (iflag & IGNBRK) port->ignore_status_mask |= SR_BREAK; if ((cflag & CREAD) == 0) port->ignore_status_mask |= SR_BREAK | SR_FRAME | SR_PARITY | SR_OVERRUN; switch (cflag & CSIZE) { case CS5: mr1 = 0x00; break; case CS6: mr1 = 0x01; break; case CS7: mr1 = 0x02; break; default: case CS8: mr1 = 0x03; break; } mr2 = 0x07; if (cflag & CSTOPB) mr2 = 0x0f; if (cflag & PARENB) { if (cflag & PARODD) mr1 |= (1 << 2); } else mr1 |= (2 << 3); switch (baud) { case 50: csr = 0x00; break; case 110: csr = 0x11; break; case 134: csr = 0x22; break; case 200: csr = 0x33; break; case 300: csr = 0x44; break; case 600: csr = 0x55; break; case 1200: csr = 0x66; break; case 2400: csr = 0x88; break; case 4800: csr = 0x99; break; default: case 9600: csr = 0xbb; break; case 19200: csr = 0xcc; break; } WRITE_SC_PORT(port, CR, CR_RES_MR); WRITE_SC_PORT(port, MRx, mr1); WRITE_SC_PORT(port, MRx, mr2); WRITE_SC(port, ACR, 0x80); WRITE_SC_PORT(port, CSR, csr); /* reset tx and rx */ WRITE_SC_PORT(port, CR, CR_RES_RX); WRITE_SC_PORT(port, CR, CR_RES_TX); WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX); while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc) udelay(2); /* XXX */ uart_update_timeout(port, cflag, (port->uartclk / (16 * quot))); spin_unlock_irqrestore(&port->lock, flags); }
static int pc300_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PC300"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || pci_resource_len(pdev, 2) != PC300_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { pr_err("invalid card EEPROM parameters\n"); pc300_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PC300_SCA_SIZE); ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = pci_ioremap_bar(pdev, 3); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { pr_err("ioremap() failed\n"); pc300_pci_remove_one(pdev); } /* PLX PCI 9050 workaround for local configuration register read bug */ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_2) card->type = PC300_TE; /* not fully supported */ else if (card->init_ctrl_value & PC300_CTYPE_MASK) card->type = PC300_X21; else card->type = PC300_RSV; if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_1) card->n_ports = 1; else card->n_ports = 2; for (i = 0; i < card->n_ports; i++) if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) { pr_err("unable to allocate memory\n"); pc300_pci_remove_one(pdev); return -ENOMEM; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(card->init_ctrl_value | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); /* Reload Config. Registers from EEPROM */ writel(card->init_ctrl_value | 0x20000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); if (use_crystal_clock) card->init_ctrl_value &= ~PC300_CLKSEL_MASK; else card->init_ctrl_value |= PC300_CLKSEL_MASK; writel(card->init_ctrl_value, &card->plxbase->init_ctrl); /* number of TX + RX buffers for one port */ i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = card->n_ports * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); pr_info("PC300/%s, %u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n", card->type == PC300_X21 ? "X21" : card->type == PC300_TE ? "TE" : "RSV", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (card->tx_ring_buffers < 1) { pr_err("RAM test failed\n"); pc300_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge, LINTi1 active low */ writew(0x0041, &card->plxbase->intr_ctrl_stat); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) { pr_warn("could not allocate IRQ%d\n", pdev->irq); pc300_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); // COTE not set - allows better TX DMA settings // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card); sca_out(0x10, BTCR, card); for (i = 0; i < card->n_ports; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port->netdev; hdlc_device *hdlc = dev_to_hdlc(dev); port->chan = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->netdev_ops = &pc300_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; if (card->type == PC300_X21) port->iface = IF_IFACE_X21; else port->iface = IF_IFACE_V35; sca_init_port(port); if (register_hdlc_device(dev)) { pr_err("unable to register hdlc device\n"); port->card = NULL; pc300_pci_remove_one(pdev); return -ENOBUFS; } netdev_info(dev, "PC300 channel %d\n", port->chan); } return 0; }
static irqreturn_t dmfe_interrupt(int irq, void *dev_id) /* for kernel 2.6.20*/ #endif #endif { struct net_device *dev = dev_id; board_info_t *db; int int_status,i; u8 reg_save; DMFE_DBUG(0, "dmfe_interrupt()", 0); /* A real interrupt coming */ db = netdev_priv(dev); spin_lock(&db->lock); /* Save previous register address */ reg_save = inb(db->io_addr); /* Disable all interrupt */ iow(db, DM9KS_IMR, DM9KS_DISINTR); /* Got DM9000/DM9010 interrupt status */ int_status = ior(db, DM9KS_ISR); /* Got ISR */ iow(db, DM9KS_ISR, int_status); /* Clear ISR status */ /* Link status change */ if (int_status & DM9KS_LINK_INTR) { netif_stop_queue(dev); for(i=0; i<500; i++) /*wait link OK, waiting time =0.5s */ { phy_read(db,0x1); if(phy_read(db,0x1) & 0x4) /*Link OK*/ { /* wait for detected Speed */ for(i=0; i<200;i++) udelay(1000); /* set media speed */ if(phy_read(db,0)&0x2000) db->Speed =100; else db->Speed =10; break; } udelay(1000); } netif_wake_queue(dev); //printk("[INTR]i=%d speed=%d\n",i, (int)(db->Speed)); } /* Received the coming packet */ if (int_status & DM9KS_RX_INTR) dmfe_packet_receive(dev); /* Trnasmit Interrupt check */ if (int_status & DM9KS_TX_INTR) dmfe_tx_done(0); if (db->cont_rx_pkt_cnt>=CONT_RX_PKT_CNT) { iow(db, DM9KS_IMR, 0xa2); } else { /* Re-enable interrupt mask */ iow(db, DM9KS_IMR, DM9KS_REGFF); } /* Restore previous register address */ outb(reg_save, db->io_addr); spin_unlock(&db->lock); #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) return IRQ_HANDLED; #endif }
/* Initilize dm9000 board */ static void dmfe_init_dm9000(struct net_device *dev) { board_info_t *db = netdev_priv(dev); DMFE_DBUG(0, "dmfe_init_dm9000()", 0); spin_lock_init(&db->lock); iow(db, DM9KS_GPR, 0); /* GPR (reg_1Fh)bit GPIO0=0 pre-activate PHY */ mdelay(20); /* wait for PHY power-on ready */ /* do a software reset and wait 20us */ iow(db, DM9KS_NCR, 3); udelay(20); /* wait 20us at least for software reset ok */ iow(db, DM9KS_NCR, 3); /* NCR (reg_00h) bit[0] RST=1 & Loopback=1, reset on */ udelay(20); /* wait 20us at least for software reset ok */ /* I/O mode */ db->io_mode = ior(db, DM9KS_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ /* Set PHY */ db->op_mode = media_mode; set_PHY_mode(db); /* Program operating register */ iow(db, DM9KS_NCR, 0); iow(db, DM9KS_TCR, 0); /* TX Polling clear */ iow(db, DM9KS_BPTR, 0x3f); /* Less 3kb, 600us */ iow(db, DM9KS_SMCR, 0); /* Special Mode */ iow(db, DM9KS_NSR, 0x2c); /* clear TX status */ iow(db, DM9KS_ISR, 0x0f); /* Clear interrupt status */ iow(db, DM9KS_TCR2, 0x80); /* Set LED mode 1 */ if (db->chip_revision == 0x1A){ /* Data bus current driving/sinking capability */ iow(db, DM9KS_BUSCR, 0x01); /* default: 2mA */ } #ifdef FLOW_CONTROL iow(db, DM9KS_BPTR, 0x37); iow(db, DM9KS_FCTR, 0x38); iow(db, DM9KS_FCR, 0x29); #endif #ifdef DM8606 iow(db,0x34,1); #endif if (dev->features & NETIF_F_HW_CSUM){ printk(KERN_INFO "DM9KS:enable TX checksum\n"); iow(db, DM9KS_TCCR, 0x07); /* TX UDP/TCP/IP checksum enable */ } if (db->rx_csum){ printk(KERN_INFO "DM9KS:enable RX checksum\n"); iow(db, DM9KS_RCSR, 0x02); /* RX checksum enable */ } #ifdef ETRANS /*If TX loading is heavy, the driver can try to anbel "early transmit". The programmer can tune the "Early Transmit Threshold" to get the optimization. (DM9KS_ETXCSR.[1-0]) Side Effect: It will happen "Transmit under-run". When TX under-run always happens, the programmer can increase the value of "Early Transmit Threshold". */ iow(db, DM9KS_ETXCSR, 0x83); #endif /* Set address filter table */ dm9000_hash_table(dev); /* Activate DM9000/DM9010 */ iow(db, DM9KS_IMR, DM9KS_REGFF); /* Enable TX/RX interrupt mask */ iow(db, DM9KS_RXCR, DM9KS_REG05 | 1); /* RX enable */ /* Init Driver variable */ db->tx_pkt_cnt = 0; netif_carrier_on(dev); }
/* omap_start_ehc * - Start the TI USBHOST controller */ static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); u8 tll_ch_mask = 0; unsigned reg = 0; int ret = 0; printk("omap_start_ehc kernel 2.6.35 V0.7 [09/02/2011] \n"); gpio_direction_output(23, 0); PHY_reset = 0; #ifndef CONFIG_MODEM_SMS gpio_direction_output(21, 1); gpio_direction_output(35, 1); gpio_direction_output(36, 1); EP10_HW_ID=ep_get_hardware_id(); if(EP10_HW_ID==BOARD_VERSION_UNKNOWN) { EP10_HW_ID = BOARD_ID_DVT1 ; } if(EP10_HW_ID>=BOARD_ID_DVT1) { gpio_direction_output(109, 1); //printk("GPIO-109 enabled \n"); } //printk("HW-ver= %d\n",EP10_HW_ID); modem_PW = 1; #endif dev_dbg(omap->dev, "starting TI EHCI USB Controller\n"); /* Get all the clock handles we need */ omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick"); if (IS_ERR(omap->usbhost_ick)) { dev_err(omap->dev, "could not get usbhost_ick\n"); ret = PTR_ERR(omap->usbhost_ick); goto err_host_ick; } omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck"); if (IS_ERR(omap->usbhost2_120m_fck)) { dev_err(omap->dev, "could not get usbhost2_120m_fck\n"); ret = PTR_ERR(omap->usbhost2_120m_fck); goto err_host_120m_fck; } omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck"); if (IS_ERR(omap->usbhost1_48m_fck)) { dev_err(omap->dev, "could not get usbhost_48m_fck\n"); ret = PTR_ERR(omap->usbhost1_48m_fck); goto err_host_48m_fck; } if (omap->phy_reset) { printk(KERN_DEBUG "reset the phys\n"); /* Refer: ISSUE1 */ if (gpio_is_valid(omap->reset_gpio_port[0])) { gpio_request(omap->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output(omap->reset_gpio_port[0], 0); } if (gpio_is_valid(omap->reset_gpio_port[1])) { gpio_request(omap->reset_gpio_port[1],"USB2 PHY reset"); gpio_direction_output(omap->reset_gpio_port[1], 0); } /* Hold the PHY in RESET for enough time till DIR is high */ udelay(10); } omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck"); if (IS_ERR(omap->usbtll_fck)) { dev_err(omap->dev, "could not get usbtll_fck\n"); ret = PTR_ERR(omap->usbtll_fck); goto err_tll_fck; } omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick"); if (IS_ERR(omap->usbtll_ick)) { dev_err(omap->dev, "could not get usbtll_ick\n"); ret = PTR_ERR(omap->usbtll_ick); goto err_tll_ick; } /* Now enable all the clocks in the correct order */ ehci_omap_clock_power(omap, 1); /* Put UHH in NoIdle/NoStandby mode */ reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG); reg |= OMAP_UHH_SYSCONFIG_CACTIVITY | OMAP_UHH_SYSCONFIG_AUTOIDLE | OMAP_UHH_SYSCONFIG_ENAWAKEUP; reg &= ~(OMAP_UHH_SYSCONFIG_SIDLEMASK | OMAP_UHH_SYSCONFIG_MIDLEMASK); reg |= OMAP_UHH_SYSCONFIG_NOIDLE | OMAP_UHH_SYSCONFIG_NOSTDBY; ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG); /* setup ULPI bypass and burst configurations */ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN) reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; /* Bypass the TLL module for PHY mode operation */ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n"); if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) || (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) || (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; } else { dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n"); if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL) reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; } ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg); /* * An undocumented "feature" in the OMAP3 EHCI controller, * causes suspended ports to be taken out of suspend when * the USBCMD.Run/Stop bit is cleared (for example when * we do ehci_bus_suspend). * This breaks suspend-resume if the root-hub is allowed * to suspend. Writing 1 to this undocumented register bit * disables this feature and restores normal behavior. */ ehci_omap_writel(omap->ehci_base, EHCI_INSNREG04, EHCI_INSNREG04_DISABLE_UNSUSPEND); if (omap->phy_reset) { printk(KERN_DEBUG "unreset the phys\n"); /* Refer ISSUE1: * Hold the PHY in RESET for enough time till * PHY is settled and ready */ udelay(10); if (gpio_is_valid(omap->reset_gpio_port[0])) gpio_set_value(omap->reset_gpio_port[0], 1); if (gpio_is_valid(omap->reset_gpio_port[1])) gpio_set_value(omap->reset_gpio_port[1], 1); } //gpio_direction_output(21, 1); //gpio_direction_output(36, 1); //gpio_direction_output(35, 1); //msleep(10); printk("RESET USB PHY\n"); gpio_direction_output(23, 1); PHY_reset = 1; return 0; err_sys_status: ehci_omap_clock_power(omap, 0); clk_put(omap->usbtll_ick); err_tll_ick: clk_put(omap->usbtll_fck); err_tll_fck: clk_put(omap->usbhost1_48m_fck); if (omap->phy_reset) { if (gpio_is_valid(omap->reset_gpio_port[0])) gpio_free(omap->reset_gpio_port[0]); if (gpio_is_valid(omap->reset_gpio_port[1])) gpio_free(omap->reset_gpio_port[1]); } err_host_48m_fck: clk_put(omap->usbhost2_120m_fck); err_host_120m_fck: clk_put(omap->usbhost_ick); err_host_ick: return ret; }