int pci_fixup_pcic(void) { unsigned long bcr1, mcr; bcr1 = ctrl_inl(SH7751_BCR1); bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ pci_write_reg(bcr1, SH4_PCIBCR1); /* Enable all interrupts, so we known what to fix */ pci_write_reg(0x0000c3ff, SH4_PCIINTM); pci_write_reg(0x0000380f, SH4_PCIAINTM); pci_write_reg(0xfb900047, SH7751_PCICONF1); pci_write_reg(0xab000001, SH7751_PCICONF4); mcr = ctrl_inl(SH7751_MCR); mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; pci_write_reg(mcr, SH4_PCIMCR); pci_write_reg(0x0c000000, SH7751_PCICONF5); pci_write_reg(0xd0000000, SH7751_PCICONF6); pci_write_reg(0x0c000000, SH4_PCILAR0); pci_write_reg(0x00000000, SH4_PCILAR1); return 0; }
static int sh_dmac_get_dma_residue(struct dma_channel *chan) { if (!(ctrl_inl(CHCR[chan->chan]) & CHCR_DE)) return 0; return ctrl_inl(DMATCR[chan->chan]) << calc_xmit_shift(chan); }
/* Chip Reset */ static void sh_eth_reset(struct net_device *ndev) { u32 ioaddr = ndev->base_addr; #if defined(CONFIG_CPU_SUBTYPE_SH7763) int cnt = 100; ctrl_outl(EDSR_ENALL, ioaddr + EDSR); ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); while (cnt > 0) { if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) break; mdelay(1); cnt--; } if (cnt < 0) printk(KERN_ERR "Device reset fail\n"); /* Table Init */ ctrl_outl(0x0, ioaddr + TDLAR); ctrl_outl(0x0, ioaddr + TDFAR); ctrl_outl(0x0, ioaddr + TDFXR); ctrl_outl(0x0, ioaddr + TDFFR); ctrl_outl(0x0, ioaddr + RDLAR); ctrl_outl(0x0, ioaddr + RDFAR); ctrl_outl(0x0, ioaddr + RDFXR); ctrl_outl(0x0, ioaddr + RDFFR); #else ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); mdelay(3); ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); #endif }
static int pmb_seq_show(struct seq_file *file, void *iter) { int i; seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); seq_printf(file, "ety vpn ppn size flags\n"); for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned int size; char *sz_str = NULL; addr = ctrl_inl(mk_pmb_addr(i)); data = ctrl_inl(mk_pmb_data(i)); size = data & PMB_SZ_MASK; sz_str = (size == PMB_SZ_16M) ? " 16MB": (size == PMB_SZ_64M) ? " 64MB": (size == PMB_SZ_128M) ? "128MB": "512MB"; /* 02: V 0x88 0x08 128MB C CB B */ seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', (addr >> 24) & 0xff, (data >> 24) & 0xff, sz_str, (data & PMB_C) ? 'C' : ' ', (data & PMB_WT) ? "WT" : "CB", (data & PMB_UB) ? "UB" : " B"); } return 0; }
void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; /* * Don't allow clearing of wired init entries, P1 or P2 access * without a corresponding mapping in the PMB will lead to reset * by the TLB. */ if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || entry >= NR_PMB_ENTRIES)) return; jump_to_uncached(); /* Clear V-bit */ addr = mk_pmb_addr(entry); ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); addr = mk_pmb_data(entry); ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); back_to_cached(); clear_bit(entry, &pmb_map); }
/*********************************************************************** * pwm_irq_enable() * * Enable interrupt generation by the PWM counter. */ void pwm_irq_enable(void) { u32 reg = 0; /* * TODO : Just enable & ack all the sources for now! */ reg = ctrl_inl(pwm->base + PWM_INT_ACK_REG); reg |= CMP1_INT_EN; // reg |= CMP0_INT_EN; reg |= CPT1_INT_EN; // reg |= CPT0_INT_EN; // reg |= PWM_INT_EN; ctrl_outl(reg, pwm->base + PWM_INT_ACK_REG); reg = ctrl_inl(pwm->base + PWM_INT_EN_REG); reg |= CMP1_INT_EN; // reg |= CMP0_INT_EN; reg |= CPT1_INT_EN; // reg |= CPT0_INT_EN; // reg |= PWM_INT_EN; ctrl_outl(reg, pwm->base + PWM_INT_EN_REG); return; }
static void __init migor_setup(char **cmdline_p) { /* SMC91C111 - Enable IRQ0 */ ctrl_outw(ctrl_inw(PORT_PJCR) & ~0x0003, PORT_PJCR); /* KEYSC */ ctrl_outw(ctrl_inw(PORT_PYCR) & ~0x0fff, PORT_PYCR); ctrl_outw(ctrl_inw(PORT_PZCR) & ~0x0ff0, PORT_PZCR); ctrl_outw(ctrl_inw(PORT_PSELA) & ~0x4100, PORT_PSELA); ctrl_outw(ctrl_inw(PORT_HIZCRA) & ~0x4000, PORT_HIZCRA); ctrl_outw(ctrl_inw(PORT_HIZCRC) & ~0xc000, PORT_HIZCRC); ctrl_outl(ctrl_inl(MSTPCR2) & ~0x00004000, MSTPCR2); /* NAND Flash */ ctrl_outw(ctrl_inw(PORT_PXCR) & 0x0fff, PORT_PXCR); ctrl_outl((ctrl_inl(BSC_CS6ABCR) & ~0x00000600) | 0x00000200, BSC_CS6ABCR); /* I2C */ ctrl_outl(ctrl_inl(MSTPCR1) & ~0x00000200, MSTPCR1); /* Touch Panel - Enable IRQ6 */ ctrl_outw(ctrl_inw(PORT_PZCR) & ~0xc, PORT_PZCR); ctrl_outw((ctrl_inw(PORT_PSELA) | 0x8000), PORT_PSELA); ctrl_outw((ctrl_inw(PORT_HIZCRC) & ~0x4000), PORT_HIZCRC); }
static int sh4202_read_vcr(unsigned long base, struct superhyway_vcr_info *vcr) { u32 vcrh, vcrl; u64 tmp; /* * XXX: Even though the SH4-202 Evaluation Device documentation * indicates that VCRL is mapped first with VCRH at a + 0x04 * offset, the opposite seems to be true. * * Some modules (PBR and ePBR for instance) also appear to have * VCRL/VCRH flipped in the documentation, but on the SH4-202 * itself it appears that these are all consistently mapped with * VCRH preceding VCRL. * * Do not trust the documentation, for it is evil. */ vcrh = ctrl_inl(base); vcrl = ctrl_inl(base + sizeof(u32)); tmp = ((u64)vcrh << 32) | vcrl; memcpy(vcr, &tmp, sizeof(u64)); return 0; }
/* PHY state control function */ static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct phy_device *phydev = mdp->phydev; u32 ioaddr = ndev->base_addr; int new_state = 0; if (phydev->link != PHY_DOWN) { if (phydev->duplex != mdp->duplex) { new_state = 1; mdp->duplex = phydev->duplex; #if defined(CONFIG_CPU_SUBTYPE_SH7763) if (mdp->duplex) { /* FULL */ ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); } else { /* Half */ ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); } #endif } if (phydev->speed != mdp->speed) { new_state = 1; mdp->speed = phydev->speed; #if defined(CONFIG_CPU_SUBTYPE_SH7763) switch (mdp->speed) { case 10: /* 10BASE */ ctrl_outl(GECMR_10, ioaddr + GECMR); break; case 100:/* 100BASE */ ctrl_outl(GECMR_100, ioaddr + GECMR); break; case 1000: /* 1000BASE */ ctrl_outl(GECMR_1000, ioaddr + GECMR); break; default: break; } #endif } if (mdp->link == PHY_DOWN) { ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) | ECMR_DM, ioaddr + ECMR); new_state = 1; mdp->link = phydev->link; } } else if (mdp->link) { new_state = 1; mdp->link = PHY_DOWN; mdp->speed = 0; mdp->duplex = -1; } if (new_state) phy_print_status(phydev); }
static u64 sh7750_read_counter(int counter) { u32 hi, lo; hi = (counter == 0) ? ctrl_inl(PMCTR1H) : ctrl_inl(PMCTR2H); lo = (counter == 0) ? ctrl_inl(PMCTR1L) : ctrl_inl(PMCTR2L); return (u64)((u64)(hi & 0xffff) << 32) | lo; }
unsigned int hs7751rvoip_inl(unsigned long port) { if (PXSEG(port)) return ctrl_inl(port); else if (is_pci_ioaddr(port) || shifted_port(port)) return ctrl_inl(pci_ioaddr(port)); else maybebadio(port); return 0; }
/* * Get MAC address from SuperH MAC address register * * SuperH's Ethernet device doesn't have 'ROM' to MAC address. * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). * When you want use this device, you must set MAC address in bootloader. * */ static void read_mac_address(struct net_device *ndev) { u32 ioaddr = ndev->base_addr; ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); }
u32 landisk_inl(unsigned long port) { if (PXSEG(port)) return ctrl_inl(port); else if (is_pci_ioaddr(port)) return ctrl_inl(pci_ioaddr(port)); else maybebadio(port); return 0; }
u32 titan_inl(unsigned long port) { if (PXSEG(port)) return ctrl_inl(port); else if (is_pci_ioaddr(port)) return ctrl_inl(pci_ioaddr(port)); else if (port >= 0x2000) return ctrl_inw(port2adr(port)); else maybebadio(port); return 0; }
static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) { unsigned long dmaor = ctrl_inl(DMAOR); printk("DMAE: DMAOR=%lx\n", dmaor); ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_NMIF, DMAOR); ctrl_outl(ctrl_inl(DMAOR)&~DMAOR_AE, DMAOR); ctrl_outl(ctrl_inl(DMAOR)|DMAOR_DME, DMAOR); disable_irq(irq); return IRQ_HANDLED; }
/* Multicast reception directions set */ static void sh_eth_set_multicast_list(struct net_device *ndev) { u32 ioaddr = ndev->base_addr; if (ndev->flags & IFF_PROMISC) { /* Set promiscuous. */ ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, ioaddr + ECMR); } else { /* Normal, unicast/broadcast-only mode. */ ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, ioaddr + ECMR); } }
static void disable_systemh_irq(unsigned int irq) { if (systemh_irq_mask_register) { unsigned long val, mask = 0x01 << 1; /* Clear the "irq"th bit in the mask and set it in the request */ val = ctrl_inl((unsigned long)systemh_irq_mask_register); val &= ~mask; ctrl_outl(val, (unsigned long)systemh_irq_mask_register); val = ctrl_inl((unsigned long)systemh_irq_request_register); val |= mask; ctrl_outl(val, (unsigned long)systemh_irq_request_register); } }
/** * aica_rtc_gettimeofday - Get the time from the AICA RTC * @tv: pointer to resulting timeval * * Grabs the current RTC seconds counter and adjusts it to the Unix Epoch. */ void aica_rtc_gettimeofday(struct timeval *tv) { unsigned long val1, val2; do { val1 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) | (ctrl_inl(AICA_RTC_SECS_L) & 0xffff); val2 = ((ctrl_inl(AICA_RTC_SECS_H) & 0xffff) << 16) | (ctrl_inl(AICA_RTC_SECS_L) & 0xffff); } while (val1 != val2); tv->tv_sec = val1 - TWENTY_YEARS; /* Can't get microseconds with just a seconds counter. */ tv->tv_usec = 0; }
void __flush_wback_region(void *start, int size) { unsigned long v; unsigned long begin, end; unsigned long flags; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); local_irq_save(flags); jump_to_uncached(); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0); int way; for (way = 0; way < 4; way++) { unsigned long data = ctrl_inl(addr | (way << 11)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; ctrl_outl(data, addr | (way << 11)); } } } back_to_cached(); local_irq_restore(flags); }
void __flush_invalidate_region(void *start, int size) { #ifdef CONFIG_CACHE_WRITEBACK /* * SH-2 does not support individual line invalidation, only a * global invalidate. */ unsigned long ccr; unsigned long flags; local_irq_save(flags); jump_to_uncached(); ccr = ctrl_inl(CCR); ccr |= CCR_CACHE_INVALIDATE; ctrl_outl(ccr, CCR); back_to_cached(); local_irq_restore(flags); #else unsigned long v; unsigned long begin, end; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) ctrl_outl((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008); #endif }
void __flush_invalidate_region(void *start, int size) { unsigned long v; unsigned long begin, end; unsigned long flags; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); local_irq_save(flags); jump_to_uncached(); #ifdef CONFIG_CACHE_WRITEBACK ctrl_outl(ctrl_inl(CCR) | CCR_OCACHE_INVALIDATE, CCR); /* I-cache invalidate */ for (v = begin; v < end; v+=L1_CACHE_BYTES) { ctrl_outl((v & CACHE_PHYSADDR_MASK), CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); } #else for (v = begin; v < end; v+=L1_CACHE_BYTES) { ctrl_outl((v & CACHE_PHYSADDR_MASK), CACHE_IC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); ctrl_outl((v & CACHE_PHYSADDR_MASK), CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008); } #endif back_to_cached(); local_irq_restore(flags); }
static void __init x3proto_init_irq(void) { plat_irq_setup_pins(IRQ_MODE_IRL3210); /* Set ICR0.LVLMODE */ ctrl_outl(ctrl_inl(0xfe410000) | (1 << 21), 0xfe410000); }
void __flush_wback_region(void *start, int size) { unsigned long v, j; unsigned long begin, end; unsigned long flags; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { for (j=0; j<CACHE_OC_NUM_WAYS; j++) { unsigned long data, addr, p; p = __pa(v); addr = CACHE_OC_ADDRESS_ARRAY|(j<<CACHE_OC_WAY_SHIFT)| (v&CACHE_OC_ENTRY_MASK); save_and_cli(flags); data = ctrl_inl(addr); if ((data & CACHE_PHYSADDR_MASK) == (p & CACHE_PHYSADDR_MASK)) { data &= ~CACHE_UPDATED; ctrl_outl(data, addr); restore_flags(flags); break; } restore_flags(flags); } } }
/* WBack O-Cache and flush I-Cache */ void flush_icache_range(unsigned long start, unsigned long end) { unsigned long v; unsigned long flags; start = start & ~(L1_CACHE_BYTES-1); end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); local_irq_save(flags); jump_to_uncached(); for (v = start; v < end; v+=L1_CACHE_BYTES) { unsigned long addr = (v & 0x000007f0); int way; /* O-Cache writeback */ for (way = 0; way < 4; way++) { unsigned long data = ctrl_inl(CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; ctrl_outl(data, CACHE_OC_ADDRESS_ARRAY | addr | (way << 11)); } } /* I-Cache invalidate */ ctrl_outl(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008); } back_to_cached(); local_irq_restore(flags); }
/* * Setup an INTC2 style interrupt. * NOTE: Unlike IPR interrupts, parameters are not shifted by this code, * allowing the use of the numbers straight out of the datasheet. * For example: * PIO1 which is INTPRI00[19,16] and INTMSK00[13] * would be: ^ ^ ^ ^ * | | | | * make_intc2_irq(84, 0, 16, 0, 13); */ void make_intc2_irq(unsigned int irq, unsigned int ipr_offset, unsigned int ipr_shift, unsigned int msk_offset, unsigned int msk_shift, unsigned int priority) { int irq_offset = irq - INTC2_FIRST_IRQ; unsigned int flags; unsigned long ipr; if((irq_offset<0) || (irq_offset>=NR_INTC2_IRQS)) return; disable_irq_nosync(irq); /* Fill the data we need */ intc2_data[irq_offset].msk_offset = msk_offset; intc2_data[irq_offset].msk_shift = msk_shift; #ifdef CONFIG_CPU_SUBTYPE_ST40 intc2_data[irq_offset].clear_irq = NULL; #endif /* Set the priority level */ local_irq_save(flags); ipr=ctrl_inl(INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset); ipr&=~(0xf<<ipr_shift); ipr|=(priority)<<ipr_shift; ctrl_outl(ipr, INTC2_BASE+INTC2_INTPRI_OFFSET+ipr_offset); local_irq_restore(flags); irq_desc[irq].handler=&intc2_irq_type; disable_intc2_irq(irq); }
static void sh3__flush_wback_region(void *start, int size) { unsigned long v, j; unsigned long begin, end; unsigned long flags; begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); end = ((unsigned long)start + size + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); for (v = begin; v < end; v+=L1_CACHE_BYTES) { unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY; for (j = 0; j < current_cpu_data.dcache.ways; j++) { unsigned long data, addr, p; p = __pa(v); addr = addrstart | (v & current_cpu_data.dcache.entry_mask); local_irq_save(flags); data = ctrl_inl(addr); if ((data & CACHE_PHYSADDR_MASK) == (p & CACHE_PHYSADDR_MASK)) { data &= ~SH_CACHE_UPDATED; ctrl_outl(data, addr); local_irq_restore(flags); break; } local_irq_restore(flags); addrstart += current_cpu_data.dcache.way_incr; } } }
void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: /* select IRQ mode for IRL3-0 + IRL7-4 */ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0); register_intc_controller(&intc_irq_desc); break; case IRQ_MODE_IRL7654: /* enable IRL7-4 but don't provide any masking */ ctrl_outl(0x40000000, INTC_INTMSKCLR1); ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL3210: /* enable IRL0-3 but don't provide any masking */ ctrl_outl(0x80000000, INTC_INTMSKCLR1); ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL7654_MASK: /* enable IRL7-4 and mask using cpu intc controller */ ctrl_outl(0x40000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl7654_desc); break; case IRQ_MODE_IRL3210_MASK: /* enable IRL0-3 and mask using cpu intc controller */ ctrl_outl(0x80000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl3210_desc); break; default: BUG(); } }
/* * The 32KB cache on the SH7705 suffers from the same synonym problem * as SH4 CPUs */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; data = ctrl_inl(addr); if ((data & v) == v) ctrl_outl(data & ~v, addr); } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); }
void __init plat_irq_setup(void) { /* disable IRQ3-0 + IRQ7-4 */ ctrl_outl(0xff000000, INTC_INTMSK0); /* disable IRL3-0 + IRL7-4 */ ctrl_outl(0xc0000000, INTC_INTMSK1); ctrl_outl(0xfffefffe, INTC_INTMSK2); /* select IRL mode for IRL3-0 + IRL7-4 */ ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); /* disable holding function, ie enable "SH-4 Mode" */ ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); register_intc_controller(&intc_desc); }
/* * We determine the correct shift size based off of the CHCR transmit size * for the given channel. Since we know that it will take: * * info->count >> ts_shift[transmit_size] * * iterations to complete the transfer. */ static inline unsigned int calc_xmit_shift(struct dma_channel *chan) { u32 chcr = ctrl_inl(CHCR[chan->chan]); chcr >>= 4; return ts_shift[chcr & 0x0007]; }