static void __init tx4927_pci_setup(void) { int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) txx9_pci_option = (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) | TXX9_PCI_OPT_CLK_66; /* already configured */ /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_66) tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_AUTO && txx9_pci66_check(c, 0, 0)) { /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); /* Reinitialize PCIC */ tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); } tx4927_setup_pcierr_irq(); }
static cycle_t sb1250_hpt_read(struct clocksource *cs) { unsigned int count; count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)))); return SB1250_HPT_VALUE - count; }
static unsigned int dw8250_serial_inq(struct uart_port *p, int offset) { unsigned int value; value = (u8)__raw_readq(p->membase + (offset << p->regshift)); return dw8250_modify_msr(p, offset, value); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending; #ifdef CONFIG_SIBYTE_SB1250_PROF /* Set compare to count to silence count/compare timer interrupts */ write_c0_compare(read_c0_count()); #endif /* * What a pain. We have to be really careful saving the upper 32 bits * of any * register across function calls if we don't want them * trashed--since were running in -o32, the calling routing never saves * the full 64 bits of a register across a function call. Being the * interrupt handler, we're guaranteed that interrupts are disabled * during this code so we don't have to worry about random interrupts * blasting the high 32 bits. */ pending = read_c0_cause() & read_c0_status() & ST0_IM; #ifdef CONFIG_SIBYTE_SB1250_PROF if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */ sbprof_cpu_intr(); else #endif if (pending & CAUSEF_IP4) sb1250_timer_interrupt(); #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) sb1250_mailbox_interrupt(); #endif #ifdef CONFIG_KGDB else if (pending & CAUSEF_IP6) /* KGDB (uart 1) */ sb1250_kgdb_interrupt(); #endif else if (pending & CAUSEF_IP2) { unsigned long long mask; /* * Default...we've hit an IP[2] interrupt, which means we've * got to check the 1250 interrupt registers to figure out what * to do. Need to detect which CPU we're on, now that * smp_affinity is supported. */ mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(), R_IMR_INTERRUPT_STATUS_BASE))); if (mask) do_IRQ(fls64(mask) - 1); else spurious_interrupt(); } else spurious_interrupt(); }
/** * octeon_i2c_reg_write - write an I2C core register * @i2c: The struct octeon_i2c * @eop_reg: Register selector * @data: Value to be written * * The I2C core registers are accessed indirectly via the SW_TWSI CSR. */ static void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data) { u64 tmp; __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI); do { tmp = __raw_readq(i2c->twsi_base + SW_TWSI); } while ((tmp & SW_TWSI_V) != 0); }
int __init tx4938_pciclk66_setup(void) { int pciclk; /* Assert M66EN */ tx4938_ccfg_set(TX4938_CCFG_PCI66); /* Double PCICLK (if possible) */ if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) { unsigned int pcidivmode = 0; u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); pcidivmode = (unsigned long)ccfg & TX4938_CCFG_PCIDIVMODE_MASK; switch (pcidivmode) { case TX4938_CCFG_PCIDIVMODE_8: case TX4938_CCFG_PCIDIVMODE_4: pcidivmode = TX4938_CCFG_PCIDIVMODE_4; pciclk = txx9_cpu_clock / 4; break; case TX4938_CCFG_PCIDIVMODE_9: case TX4938_CCFG_PCIDIVMODE_4_5: pcidivmode = TX4938_CCFG_PCIDIVMODE_4_5; pciclk = txx9_cpu_clock * 2 / 9; break; case TX4938_CCFG_PCIDIVMODE_10: case TX4938_CCFG_PCIDIVMODE_5: pcidivmode = TX4938_CCFG_PCIDIVMODE_5; pciclk = txx9_cpu_clock / 5; break; case TX4938_CCFG_PCIDIVMODE_11: case TX4938_CCFG_PCIDIVMODE_5_5: default: pcidivmode = TX4938_CCFG_PCIDIVMODE_5_5; pciclk = txx9_cpu_clock * 2 / 11; break; } tx4938_ccfg_change(TX4938_CCFG_PCIDIVMODE_MASK, pcidivmode); printk(KERN_DEBUG "PCICLK: ccfg:%08lx\n", (unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg)); } else pciclk = -1; return pciclk; }
int __init tx4938_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4938_pcic1ptr) { switch (slot) { case TX4927_PCIC_IDSEL_AD_TO_SLOT(31): if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_ETH0_SEL) return TXX9_IRQ_BASE + TX4938_IR_ETH0; break; case TX4927_PCIC_IDSEL_AD_TO_SLOT(30): if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_ETH1_SEL) return TXX9_IRQ_BASE + TX4938_IR_ETH1; break; } return 0; } return -1; }
/** * octeon_i2c_read_sw - write an I2C core register. * @i2c: The struct octeon_i2c. * @eop_reg: Register selector. * * Returns the data. * * The I2C core registers are accessed indirectly via the SW_TWSI CSR. */ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg) { u64 tmp; __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI); do { tmp = __raw_readq(i2c->twsi_base + SW_TWSI); } while ((tmp & SW_TWSI_V) != 0); return tmp & 0xFF; }
void __init tx4938_report_pci1clk(void) { __u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); unsigned int pciclk = txx9_gbus_clock / ((ccfg & TX4938_CCFG_PCI1DMD) ? 4 : 2); printk(KERN_INFO "PCIC1 -- %sPCICLK:%u.%uMHz\n", (ccfg & TX4938_CCFG_PCI1_66) ? "PCI66 " : "", (pciclk + 50000) / 1000000, ((pciclk + 50000) / 100000) % 10); }
static void dw8250_serial_outq(struct uart_port *p, int offset, int value) { struct dw8250_data *d = p->private_data; value &= 0xff; __raw_writeq(value, p->membase + (offset << p->regshift)); /* Read back to ensure register write ordering. */ __raw_readq(p->membase + (UART_LCR << p->regshift)); if (offset == UART_LCR && !d->uart_16550_compatible) dw8250_check_lcr(p, value); }
static void __init rbtx4927_clock_init(void) { switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIDIVMODE_MASK) { case TX4927_CCFG_PCIDIVMODE_2_5: case TX4927_CCFG_PCIDIVMODE_5: txx9_cpu_clock = 166666666; break; default: txx9_cpu_clock = 200000000; } }
static int xicor_write(uint8_t addr, int b) { while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq(addr, SMB_CSR(R_SMB_CMD)); __raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA)); __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE, SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) { /* Clear error bit by writing a 1 */ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS)); return -1; } else { return 0; } }
/** * mc_read_response - reads the response for the last MC command from a * Management Complex (MC) portal * * @portal: pointer to an MC portal * @resp: pointer to command response buffer * * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. */ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * portal, struct mc_command *resp) { int i; enum mc_cmd_status status; /* Copy command response header from MC portal: */ __iormb(); resp->header = __raw_readq(&portal->header); __iormb(); status = mc_cmd_hdr_read_status(resp); if (status != MC_CMD_STATUS_OK) return status; /* Copy command response data from MC portal: */ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) resp->params[i] = __raw_readq(&portal->params[i]); __iormb(); return status; }
int __init tx4938_report_pciclk(void) { int pciclk = 0; printk(KERN_INFO "PCIC --%s PCICLK:", (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) ? " PCI66" : ""); if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_PCICLKEN_ALL) { u64 ccfg = __raw_readq(&tx4938_ccfgptr->ccfg); switch ((unsigned long)ccfg & TX4938_CCFG_PCIDIVMODE_MASK) { case TX4938_CCFG_PCIDIVMODE_4: pciclk = txx9_cpu_clock / 4; break; case TX4938_CCFG_PCIDIVMODE_4_5: pciclk = txx9_cpu_clock * 2 / 9; break; case TX4938_CCFG_PCIDIVMODE_5: pciclk = txx9_cpu_clock / 5; break; case TX4938_CCFG_PCIDIVMODE_5_5: pciclk = txx9_cpu_clock * 2 / 11; break; case TX4938_CCFG_PCIDIVMODE_8: pciclk = txx9_cpu_clock / 8; break; case TX4938_CCFG_PCIDIVMODE_9: pciclk = txx9_cpu_clock / 9; break; case TX4938_CCFG_PCIDIVMODE_10: pciclk = txx9_cpu_clock / 10; break; case TX4938_CCFG_PCIDIVMODE_11: pciclk = txx9_cpu_clock / 11; break; } printk("Internal(%u.%uMHz)", (pciclk + 50000) / 1000000, ((pciclk + 50000) / 100000) % 10); } else { printk("External"); pciclk = -1; } printk("\n"); return pciclk; }
void clear_page(void *page) { u64 to_phys = CPHYSADDR((unsigned long)page); unsigned int cpu = smp_processor_id(); /* if the page is not in KSEG0, use old way */ if ((long)KSEGX((unsigned long)page) != (long)CKSEG0) return clear_page_cpu(page); page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM | M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT; page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE); __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT))); /* * Don't really want to do it this way, but there's no * reliable way to delay completion detection. */ while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG))) & M_DM_DSCR_BASE_INTERRUPT)) ; __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE))); }
static void ack_bcm1480_irq(unsigned int irq) { u64 pending; unsigned int irq_dirty; int k; /* * If the interrupt was an HT interrupt, now is the time to * clear it. NOTE: we assume the HT bridge was set up to * deliver the interrupts to all CPUs (which makes affinity * changing easier for us) */ irq_dirty = irq; if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) { irq_dirty -= BCM1480_NR_IRQS_HALF; } for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */ pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq], R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING)))); pending &= ((u64)1 << (irq_dirty)); if (pending) { #ifdef CONFIG_SMP int i; for (i=0; i<NR_CPUS; i++) { /* * Clear for all CPUs so an affinity switch * doesn't find an old status */ __raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(cpu_logical_map(i), R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING)))); } #else __raw_writeq(pending, IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_LDT_INTERRUPT_CLR_H + (k*BCM1480_IMR_HL_SPACING)))); #endif /* * Generate EOI. For Pass 1 parts, EOI is a nop. For * Pass 2, the LDT world may be edge-triggered, but * this EOI shouldn't hurt. If they are * level-sensitive, the EOI is required. */ #ifdef CONFIG_PCI if (ht_eoi_space) *(uint32_t *)(ht_eoi_space+(irq<<16)+(7<<2)) = 0; #endif } } bcm1480_mask_irq(bcm1480_irq_owner[irq], irq); }
static void __init rbtx4939_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); tx4939_report_pciclk(); tx4927_pcic_setup(tx4939_pcicptr, c, extarb); if (!(__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ATA1MODE) && (__raw_readq(&tx4939_ccfgptr->pcfg) & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE))) { tx4939_report_pci1clk(); /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); register_pci_controller(c); tx4927_pcic_setup(tx4939_pcic1ptr, c, 0); } tx4939_setup_pcierr_irq(); #endif /* CONFIG_PCI */ }
static inline void dispatch_ip2(void) { unsigned int cpu = smp_processor_id(); unsigned long long mask; /* * Default...we've hit an IP[2] interrupt, which means we've got to * check the 1250 interrupt registers to figure out what to do. Need * to detect which CPU we're on, now that smp_affinity is supported. */ mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_STATUS_BASE))); if (mask) do_IRQ(fls64(mask) - 1); }
static inline void dispatch_ip2(void) { unsigned long long mask_h, mask_l; unsigned int cpu = smp_processor_id(); unsigned long base; /* * Default...we've hit an IP[2] interrupt, which means we've got to * check the 1480 interrupt registers to figure out what to do. Need * to detect which CPU we're on, now that smp_affinity is supported. */ base = A_BCM1480_IMR_MAPPER(cpu); mask_h = __raw_readq( IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H)); mask_l = __raw_readq( IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L)); if (mask_h) { if (mask_h ^ 1) do_IRQ(fls64(mask_h) - 1); else if (mask_l) do_IRQ(63 + fls64(mask_l)); } }
static int m41t81_read(uint8_t addr) { while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_CMD)); __raw_writeq((V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_WR1BYTE), SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq((V_SMB_ADDR(M41T81_CCR_ADDRESS) | V_SMB_TT_RD1BYTE), SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) { /* Clear error bit by writing a 1 */ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS)); return -1; } return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); }
static void __init rbtx4937_clock_init(void) { switch ((unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIDIVMODE_MASK) { case TX4938_CCFG_PCIDIVMODE_8: case TX4938_CCFG_PCIDIVMODE_4: txx9_cpu_clock = 266666666; break; case TX4938_CCFG_PCIDIVMODE_9: case TX4938_CCFG_PCIDIVMODE_4_5: txx9_cpu_clock = 300000000; break; default: txx9_cpu_clock = 333333333; } }
static long sbwdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned long time; void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: sbwdog_pet(user_dog); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; time *= 1000000; if (time > 0x7fffffUL) { ret = -EINVAL; break; } timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); case WDIOC_GETTIMEOUT: /* * get the remaining count from the ... count register * which is 1*8 before the config register */ ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); break; } return ret; }
void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { /* */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { *(u64 *)to = __raw_readq(from); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { *(u32 *)to = __raw_readl(from); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { *(u16 *)to = __raw_readw(from); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { *(u8 *) to = __raw_readb(from); count--; to++; from++; } mb(); }
/* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { *(u64 *)to = __raw_readq(from); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { *(u32 *)to = __raw_readl(from); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { *(u16 *)to = __raw_readw(from); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { *(u8 *) to = __raw_readb(from); count--; to++; from++; } mb(); }
static map_word ichxrom_read(struct map_info *map, unsigned long ofs) { map_word val; int i; switch(map->bankwidth) { case 1: val.x[0] = __raw_readb(addr(map, ofs)); break; case 2: val.x[0] = __raw_readw(addr(map, ofs)); break; case 4: val.x[0] = __raw_readl(addr(map, ofs)); break; #if BITS_PER_LONG >= 64 case 8: val.x[0] = __raw_readq(addr(map, ofs)); break; #endif default: val.x[0] = 0; break; } for(i = 1; i < map_words(map); i++) { val.x[i] = 0; } return val; }
static void ack_sb1250_irq(struct irq_data *d) { unsigned int irq = d->irq; #ifdef CONFIG_SIBYTE_HAS_LDT u64 pending; /* * If the interrupt was an HT interrupt, now is the time to * clear it. NOTE: we assume the HT bridge was set up to * deliver the interrupts to all CPUs (which makes affinity * changing easier for us) */ pending = __raw_readq(IOADDR(A_IMR_REGISTER(sb1250_irq_owner[irq], R_IMR_LDT_INTERRUPT))); pending &= ((u64)1 << (irq)); if (pending) { int i; for (i=0; i<NR_CPUS; i++) { int cpu; #ifdef CONFIG_SMP cpu = cpu_logical_map(i); #else cpu = i; #endif /* * Clear for all CPUs so an affinity switch * doesn't find an old status */ __raw_writeq(pending, IOADDR(A_IMR_REGISTER(cpu, R_IMR_LDT_INTERRUPT_CLR))); } /* * Generate EOI. For Pass 1 parts, EOI is a nop. For * Pass 2, the LDT world may be edge-triggered, but * this EOI shouldn't hurt. If they are * level-sensitive, the EOI is required. */ *(uint32_t *)(ldt_eoi_space+(irq<<16)+(7<<2)) = 0; } #endif sb1250_mask_irq(sb1250_irq_owner[irq], irq); }
static void __init rbtx4927_clock_init(void) { /* * ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz. * * For TX4927: * PCIDIVMODE[12:11]'s initial value is given by S9[4:3] (ON:0, OFF:1). * CPU 166MHz: PCI 66MHz : PCIDIVMODE: 00 (1/2.5) * CPU 200MHz: PCI 66MHz : PCIDIVMODE: 01 (1/3) * CPU 166MHz: PCI 33MHz : PCIDIVMODE: 10 (1/5) * CPU 200MHz: PCI 33MHz : PCIDIVMODE: 11 (1/6) * i.e. S9[3]: ON (83MHz), OFF (100MHz) */ switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIDIVMODE_MASK) { case TX4927_CCFG_PCIDIVMODE_2_5: case TX4927_CCFG_PCIDIVMODE_5: txx9_cpu_clock = 166666666; /* 166MHz */ break; default: txx9_cpu_clock = 200000000; /* 200MHz */ } }
static void dw8250_serial_outq(struct uart_port *p, int offset, int value) { value &= 0xff; __raw_writeq(value, p->membase + (offset << p->regshift)); /* Read back to ensure register write ordering. */ __raw_readq(p->membase + (UART_LCR << p->regshift)); /* Make sure LCR write wasn't ignored */ if (offset == UART_LCR) { int tries = 1000; while (tries--) { unsigned int lcr = p->serial_in(p, UART_LCR); if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) return; dw8250_force_idle(p); __raw_writeq(value & 0xff, p->membase + (UART_LCR << p->regshift)); } /* * FIXME: this deadlocks if port->lock is already held * dev_err(p->dev, "Couldn't set LCR to %d\n", value); */ } }
static void tx4938ide_tune_ebusc(unsigned int ebus_ch, unsigned int gbus_clock, u8 pio) { struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); u64 cr = __raw_readq(&tx4938_ebuscptr->cr[ebus_ch]); unsigned int sp = (cr >> 4) & 3; unsigned int clock = gbus_clock / (4 - sp); unsigned int cycle = 1000000000 / clock; unsigned int shwt; int wt; /* Minimum DIOx- active time */ wt = DIV_ROUND_UP(t->act8b, cycle) - 2; /* IORDY setup time: 35ns */ wt = max_t(int, wt, DIV_ROUND_UP(35, cycle)); /* actual wait-cycle is max(wt & ~1, 1) */ if (wt > 2 && (wt & 1)) wt++; wt &= ~1; /* Address-valid to DIOR/DIOW setup */ shwt = DIV_ROUND_UP(t->setup, cycle); /* -DIOx recovery time (SHWT * 4) and cycle time requirement */ while ((shwt * 4 + wt + (wt ? 2 : 3)) * cycle < t->cycle) shwt++; if (shwt > 7) { pr_warning("tx4938ide: SHWT violation (%d)\n", shwt); shwt = 7; } pr_debug("tx4938ide: ebus %d, bus cycle %dns, WT %d, SHWT %d\n", ebus_ch, cycle, wt, shwt); __raw_writeq((cr & ~0x3f007ull) | (wt << 12) | shwt, &tx4938_ebuscptr->cr[ebus_ch]); }
/* * swarm_ide_probe - if the board header indicates the existence of * Generic Bus IDE, allocate a HWIF for it. */ static int __devinit swarm_ide_probe(struct device *dev) { ide_hwif_t *hwif; u8 __iomem *base; phys_t offset, size; int i; if (!SIBYTE_HAVE_IDE) return -ENODEV; /* Find an empty slot. */ for (i = 0; i < MAX_HWIFS; i++) if (!ide_hwifs[i].io_ports[IDE_DATA_OFFSET]) break; if (i >= MAX_HWIFS) { printk(KERN_ERR DRV_NAME ": no free slot for interface\n"); return -ENOMEM; } hwif = ide_hwifs + i; base = ioremap(A_IO_EXT_BASE, 0x800); offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS)); size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS)); iounmap(base); offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE; size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE; if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) { printk(KERN_INFO DRV_NAME ": IDE interface at GenBus disabled\n"); return -EBUSY; } printk(KERN_INFO DRV_NAME ": IDE interface at GenBus slot %i\n", IDE_CS); swarm_ide_resource.start = offset; swarm_ide_resource.end = offset + size - 1; if (request_resource(&iomem_resource, &swarm_ide_resource)) { printk(KERN_ERR DRV_NAME ": can't request I/O memory resource\n"); return -EBUSY; } base = ioremap(offset, size); /* Setup MMIO ops. */ default_hwif_mmiops(hwif); /* Prevent resource map manipulation. */ hwif->mmio = 2; hwif->noprobe = 0; for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) hwif->hw.io_ports[i] = (unsigned long)(base + ((0x1f0 + i) << 5)); hwif->hw.io_ports[IDE_CONTROL_OFFSET] = (unsigned long)(base + (0x3f6 << 5)); hwif->hw.irq = K_INT_GB_IDE; memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); hwif->irq = hwif->hw.irq; dev_set_drvdata(dev, hwif); return 0; }