static int ppi_start(struct ppi_if *ppi) { const struct ppi_info *info = ppi->info; /* enable DMA */ enable_dma(info->dma_ch); /* enable PPI */ ppi->ppi_control |= PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; bfin_write16(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; bfin_write32(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI3: { struct bfin_eppi3_regs *reg = info->base; bfin_write32(®->ctl, ppi->ppi_control); break; } default: return -EINVAL; } SSYNC(); return 0; }
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); /* We assume that everything is 4 byte aligned, so include * a basic sanity check */ BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; /* Find an avalible memDMA channel */ while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. */ __builtin_bfin_ssync(); /* Destination */ bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); /* Source */ bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); /* Enable */ bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); /* Since we are atomic now, don't use the workaround ssync */ __builtin_bfin_ssync(); }
int bf609_nor_flash_init(struct platform_device *pdev) { #define CONFIG_SMC_GCTL_VAL 0x00000010 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); bfin_write32(SMC_B0CTL, 0x01002011); bfin_write32(SMC_B0TIM, 0x08170977); bfin_write32(SMC_B0ETIM, 0x00092231); return 0; }
int bf609_nor_flash_init(struct platform_device *pdev) { #define CONFIG_SMC_GCTL_VAL 0x00000010 if (!devm_pinctrl_get_select_default(&pdev->dev)) return -EBUSY; bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); bfin_write32(SMC_B0CTL, 0x01002011); bfin_write32(SMC_B0TIM, 0x08170977); bfin_write32(SMC_B0ETIM, 0x00092231); return 0; }
void init_cplbtables(void) { uint32_t *ICPLB_ADDR, *ICPLB_DATA; uint32_t *DCPLB_ADDR, *DCPLB_DATA; uint32_t extern_memory; size_t i; void icplb_add(uint32_t addr, uint32_t data) { bfin_write32(ICPLB_ADDR + i, addr); bfin_write32(ICPLB_DATA + i, data); }
void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } __builtin_bfin_ssync(); bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); __builtin_bfin_ssync(); }
static inline void write_icplb_data(int cpu, int idx, unsigned long data, unsigned long addr) { _disable_icplb(); bfin_write32(ICPLB_DATA0 + idx * 4, data); bfin_write32(ICPLB_ADDR0 + idx * 4, addr); _enable_icplb(); #ifdef CONFIG_CPLB_INFO icplb_tbl[cpu][idx].addr = addr; icplb_tbl[cpu][idx].data = data; #endif }
static inline void write_dcplb_data(int cpu, int idx, unsigned long data, unsigned long addr) { unsigned long ctrl = bfin_read_DMEM_CONTROL(); bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB); bfin_write32(DCPLB_DATA0 + idx * 4, data); bfin_write32(DCPLB_ADDR0 + idx * 4, addr); bfin_write_DMEM_CONTROL_SSYNC(ctrl); #ifdef CONFIG_CPLB_INFO dcplb_tbl[cpu][idx].addr = addr; dcplb_tbl[cpu][idx].data = data; #endif }
static void bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr, unsigned long cplb_data, unsigned long mem_control, unsigned long mem_mask) { int i; for (i = 0; i < MAX_CPLBS; i++) { bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr); bfin_write32(cplb_data + i * 4, cplb_tbl[i].data); } _enable_cplb(mem_control, mem_mask); }
void spi_release_bus(struct spi_slave *slave) { struct bfin_spi_slave *bss = to_bfin_spi_slave(slave); debug("%s: bus:%i cs:%i\n", __func__, slave->bus, slave->cs); peripheral_free_list(pins[slave->bus]); if (is_gpio_cs(slave->cs)) gpio_free(gpio_cs(slave->cs)); bfin_write32(&bss->regs->rx_control, 0x0); bfin_write32(&bss->regs->tx_control, 0x0); bfin_write32(&bss->regs->control, 0x0); SSYNC(); }
static int ppi_stop(struct ppi_if *ppi) { const struct ppi_info *info = ppi->info; ppi->ppi_control &= ~PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; bfin_write16(®->control, ppi->ppi_control); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; bfin_write32(®->control, ppi->ppi_control); break; } default: return -EINVAL; } clear_dma_irqstat(info->dma_ch); disable_dma(info->dma_ch); SSYNC(); return 0; }
void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl) { unsigned long ctrl; int i; for (i = 0; i < MAX_CPLBS; i++) { bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr); bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data); } ctrl = bfin_read_IMEM_CONTROL(); ctrl |= IMC | ENICPLB; /* CSYNC to ensure load store ordering */ CSYNC(); bfin_write_IMEM_CONTROL(ctrl); SSYNC(); }
static void clk_reg_set_bits(u32 reg, uint32_t mask) { u32 val; val = bfin_read32(reg); val |= mask; bfin_write32(reg, val); }
int bf609_nor_flash_init(struct platform_device *dev) { #define CONFIG_SMC_GCTL_VAL 0x00000010 const unsigned short pins[] = { P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12, P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21, P_A22, P_A23, P_A24, P_A25, P_NORCK, 0, }; peripheral_request_list(pins, "smc0"); bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); bfin_write32(SMC_B0CTL, 0x01002011); bfin_write32(SMC_B0TIM, 0x08170977); bfin_write32(SMC_B0ETIM, 0x00092231); return 0; }
static void clk_reg_clear_bits(u32 reg, uint32_t mask) { u32 val; val = bfin_read32(reg); val &= ~mask; bfin_write32(reg, val); }
static void clk_reg_write_mask(u32 reg, uint32_t val, uint32_t mask) { u32 val2; val2 = bfin_read32(reg); val2 &= ~mask; val2 |= val; bfin_write32(reg, val2); }
void bfin_dcache_init(void) { unsigned long *table = dcplb_table; unsigned long ctrl; int i; for (i = 0; i < MAX_CPLBS; i++) { unsigned long addr = *table++; unsigned long data = *table++; if (addr == (unsigned long)-1) break; bfin_write32(DCPLB_ADDR0 + i * 4, addr); bfin_write32(DCPLB_DATA0 + i * 4, data); } ctrl = bfin_read_DMEM_CONTROL(); ctrl |= DMEM_CNTR; bfin_write_DMEM_CONTROL(ctrl); }
void __init bfin_icache_init(void) { unsigned long *table = icplb_table; unsigned long ctrl; int i; for (i = 0; i < MAX_CPLBS; i++) { unsigned long addr = *table++; unsigned long data = *table++; if (addr == (unsigned long)-1) break; bfin_write32(ICPLB_ADDR0 + i * 4, addr); bfin_write32(ICPLB_DATA0 + i * 4, data); } ctrl = bfin_read_IMEM_CONTROL(); ctrl |= IMC | ENICPLB; bfin_write_IMEM_CONTROL(ctrl); SSYNC(); }
static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params) { const struct ppi_info *info = ppi->info; int dma32 = 0; int dma_config, bytes_per_line, lines_per_frame; bytes_per_line = params->width * params->bpp / 8; lines_per_frame = params->height; if (params->int_mask == 0xFFFFFFFF) ppi->err_int = false; else ppi->err_int = true; dma_config = (DMA_FLOW_STOP | WNR | RESTART | DMA2D | DI_EN); ppi->ppi_control = params->ppi_control & ~PORT_EN; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; if (params->ppi_control & DMA32) dma32 = 1; bfin_write16(®->control, ppi->ppi_control); bfin_write16(®->count, bytes_per_line - 1); bfin_write16(®->frame, lines_per_frame); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; if ((params->ppi_control & PACK_EN) || (params->ppi_control & 0x38000) > DLEN_16) dma32 = 1; bfin_write32(®->control, ppi->ppi_control); bfin_write16(®->line, bytes_per_line + params->blank_clocks); bfin_write16(®->frame, lines_per_frame); bfin_write16(®->hdelay, 0); bfin_write16(®->vdelay, 0); bfin_write16(®->hcount, bytes_per_line); bfin_write16(®->vcount, lines_per_frame); break; } default: return -EINVAL; } if (dma32) { dma_config |= WDSIZE_32; set_dma_x_count(info->dma_ch, bytes_per_line >> 2); set_dma_x_modify(info->dma_ch, 4); set_dma_y_modify(info->dma_ch, 4); } else {
static int sport_set(void *mmr, u64 val) { unsigned long flags; local_irq_save(flags); if (sport_width(mmr) <= 16) bfin_write16(mmr, val); else bfin_write32(mmr, val); local_irq_restore(flags); return 0; }
void bf609_nor_flash_exit(struct platform_device *dev) { const unsigned short pins[] = { P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12, P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21, P_A22, P_A23, P_A24, P_A25, P_NORCK, 0, }; peripheral_free_list(pins); bfin_write32(SMC_GCTL, 0); }
void bfin_core1_start(void) { #ifdef BF561_FAMILY /* Enable core 1 */ bfin_write_SYSCR(bfin_read_SYSCR() & ~0x0020); #else /* Enable core 1 */ bfin_write32(RCU0_SVECT1, COREB_L1_CODE_START); bfin_write32(RCU0_CRCTL, 0); bfin_write32(RCU0_CRCTL, 0x2); /* Check if core 1 starts */ while (!(bfin_read32(RCU0_CRSTAT) & 0x2)) continue; bfin_write32(RCU0_CRCTL, 0); /* flag to notify cces core 1 application */ bfin_write32(SDU0_MSG_SET, (1 << 19)); #endif }
void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) { unsigned long ctrl; int i; for (i = 0; i < MAX_CPLBS; i++) { bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr); bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data); } ctrl = bfin_read_DMEM_CONTROL(); /* * Anomaly notes: * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL * register, so that the port preferences for DAG0 and DAG1 are set * to port B */ ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0); /* CSYNC to ensure load store ordering */ CSYNC(); bfin_write_DMEM_CONTROL(ctrl); SSYNC(); }
void spi_cs_deactivate(struct spi_slave *slave) { struct bfin_spi_slave *bss = to_bfin_spi_slave(slave); if (is_gpio_cs(slave->cs)) { unsigned int cs = gpio_cs(slave->cs); gpio_set_value(cs, !bss->cs_pol); } else { u32 ssel; ssel = bfin_read32(&bss->regs->ssel); if (bss->cs_pol) ssel &= ~(BIT(8) << slave->cs); else ssel |= BIT(8) << slave->cs; /* deassert cs */ bfin_write32(&bss->regs->ssel, ssel); SSYNC(); /* disable cs */ ssel &= ~(1 << slave->cs); bfin_write32(&bss->regs->ssel, ssel); } SSYNC(); }
int spi_claim_bus(struct spi_slave *slave) { struct bfin_spi_slave *bss = to_bfin_spi_slave(slave); debug("%s: bus:%i cs:%i\n", __func__, slave->bus, slave->cs); if (is_gpio_cs(slave->cs)) { unsigned int cs = gpio_cs(slave->cs); gpio_request(cs, "bfin-spi"); gpio_direction_output(cs, !bss->cs_pol); pins[slave->bus][0] = P_DONTCARE; } else pins[slave->bus][0] = cs_pins[slave->bus][slave->cs - 1]; peripheral_request_list(pins[slave->bus], "bfin-spi"); bfin_write32(&bss->regs->control, bss->control); bfin_write32(&bss->regs->clock, bss->clock); bfin_write32(&bss->regs->delay, 0x0); bfin_write32(&bss->regs->rx_control, SPI_RXCTL_REN); bfin_write32(&bss->regs->tx_control, SPI_TXCTL_TEN | SPI_TXCTL_TTI); SSYNC(); return 0; }
static irqreturn_t ppi_irq_err(int irq, void *dev_id) { struct ppi_if *ppi = dev_id; const struct ppi_info *info = ppi->info; switch (info->type) { case PPI_TYPE_PPI: { struct bfin_ppi_regs *reg = info->base; unsigned short status; /* register on bf561 is cleared when read * others are W1C */ status = bfin_read16(®->status); if (status & 0x3000) ppi->err = true; bfin_write16(®->status, 0xff00); break; } case PPI_TYPE_EPPI: { struct bfin_eppi_regs *reg = info->base; unsigned short status; status = bfin_read16(®->status); if (status & 0x2) ppi->err = true; bfin_write16(®->status, 0xffff); break; } case PPI_TYPE_EPPI3: { struct bfin_eppi3_regs *reg = info->base; unsigned long stat; stat = bfin_read32(®->stat); if (stat & 0x2) ppi->err = true; bfin_write32(®->stat, 0xc0ff); break; } default: break; } return IRQ_HANDLED; }
void spi_cs_activate(struct spi_slave *slave) { struct bfin_spi_slave *bss = to_bfin_spi_slave(slave); if (is_gpio_cs(slave->cs)) { unsigned int cs = gpio_cs(slave->cs); gpio_set_value(cs, bss->cs_pol); } else { u32 ssel; ssel = bfin_read32(&bss->regs->ssel); ssel |= 1 << slave->cs; if (bss->cs_pol) ssel |= (1 << 8) << slave->cs; else ssel &= ~((1 << 8) << slave->cs); bfin_write32(&bss->regs->ssel, ssel); } SSYNC(); }
long probe_kernel_write(void *dst, const void *src, size_t size) { unsigned long ldst = (unsigned long)dst; int mem_type; mem_type = validate_memory_access_address(ldst, size); if (mem_type < 0) return mem_type; if (ldst >= SYSMMR_BASE) { if (size == 2 && ldst % 2 == 0) { u16 mmr; memcpy(&mmr, src, sizeof(mmr)); bfin_write16(dst, mmr); return 0; } else if (size == 4 && ldst % 4 == 0) { u32 mmr; memcpy(&mmr, src, sizeof(mmr)); bfin_write32(dst, mmr); return 0; } } else { switch (mem_type) { case BFIN_MEM_ACCESS_CORE: case BFIN_MEM_ACCESS_CORE_ONLY: return __probe_kernel_write(dst, src, size); case BFIN_MEM_ACCESS_DMA: if (dma_memcpy(dst, src, size)) return 0; break; case BFIN_MEM_ACCESS_ITEST: if (isram_memcpy(dst, src, size)) return 0; break; } } return -EFAULT; }
MGR_ATTR static noinline int dcplb_protection_fault(int cpu) { int status = bfin_read_DCPLB_STATUS(); nr_dcplb_prot[cpu]++; if (likely(status & FAULT_RW)) { int idx = faulting_cplb_index(status); unsigned long regaddr = DCPLB_DATA0 + idx * 4; unsigned long data = bfin_read32(regaddr); /* Check if fault is to dirty a clean page */ if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) && write_permitted(status, data)) { dcplb_tbl[cpu][idx].data = data; bfin_write32(regaddr, data); return CPLB_RELOADED; } } return CPLB_PROT_VIOL; }
static int spi_pio_xfer(struct bfin_spi_slave *bss, const u8 *tx, u8 *rx, uint bytes) { /* discard invalid rx data and empty rfifo */ while (!(bfin_read32(&bss->regs->status) & SPI_STAT_RFE)) bfin_read32(&bss->regs->rfifo); while (bytes--) { u8 value = (tx ? *tx++ : CONFIG_BFIN_SPI_IDLE_VAL); debug("%s: tx:%x ", __func__, value); bfin_write32(&bss->regs->tfifo, value); SSYNC(); while (bfin_read32(&bss->regs->status) & SPI_STAT_RFE) if (ctrlc()) return -1; value = bfin_read32(&bss->regs->rfifo); if (rx) *rx++ = value; debug("rx:%x\n", value); } return 0; }