int pindown_pages(struct page **pg_list, struct iovec *iovec, int iovec_count, int flushDcache, int rw){ int count,err=0; struct iovec *tiovec; int x=0,i; struct page **page_list=pg_list; int pg_count=0; char *iov_base; /* Acquire the mm page semaphore. */ down_read(¤t->mm->mmap_sem); for ( count = 0, tiovec = iovec; count < iovec_count; count++, tiovec++){ int nr_pages = (((u_long)tiovec->iov_base + tiovec->iov_len + PAGE_SIZE - 1)/PAGE_SIZE)- ((u_long)tiovec->iov_base/PAGE_SIZE); if ( rw == READ ){ iov_base = (u_long) tiovec->iov_base - ((u_long) tiovec->iov_base & ~PAGE_MASK); for ( i = 0; i < nr_pages; i++, iov_base += PAGE_SIZE){ if ( __get_user(x, iov_base) || __put_user(x, iov_base)) BUG(); } } err = get_user_pages(current, current->mm, (unsigned long int)tiovec->iov_base, nr_pages, rw==READ, /* read access only for in data */ 0, /* no force */ &page_list[pg_count], NULL); if (err < 0 || err < nr_pages ) goto err_out; pg_count += err; } page_list[pg_count]=NULL; if ( flushDcache ) { flush_dcache_all(); } err_out: if (err < 0) { unlock_pages(pg_list); if (flushDcache ) { flush_dcache_all(); } up_read(¤t->mm->mmap_sem); return err; } up_read(¤t->mm->mmap_sem); return 0; }
/* ************************************************************************************************************ * * function * * name : * * parmeters : * * return : * * note : * * ************************************************************************************************************ */ void sunxi_flush_allcaches(void) { icache_disable(); flush_dcache_all(); dcache_disable(); }
void sgat_dio_exit_req(struct sgat_dio_fops *fops, struct sgat_dio_req *dio_req, int flushDcache) { if ( dio_req) { if (dio_req->iobuf_inuse && dio_req->kiobp) { unlock_pages(dio_req->kiobp->maplist); if ( flushDcache){ flush_dcache_all(); //flush_dcache(); } iobuf_release(dio_req->kiobp); dio_req->iobuf_inuse = 0; } if ( dio_req->kiobp ){ sgat_dio_free(dio_req->kiobp); dio_req->kiobp = NULL; } if ( dio_req->sclist ){ fops->sgat_dio_free((void *)dio_req->sclist); dio_req->sclist = NULL; } if ( dio_req->k_io_hdrp ){ fops->sgat_dio_free((void *)dio_req->k_io_hdrp); dio_req->k_io_hdrp = NULL; } } }
void smp_set_core_boot_addr(unsigned long addr, int corenr) { /* All cores have reset vector pointing to 0 */ writel(addr, (void __iomem *)RESET_VECTOR_ADDR); /* Make sure other cores see written value in memory */ flush_dcache_all(); }
void coldboot_init(coldboot_crt0_reloc_list_t *reloc_list, uintptr_t start_cold) { //MAILBOX_NX_SECMON_BOOT_TIME = TIMERUS_CNTR_1US_0; /* Custom approach */ reloc_list->reloc_base = start_cold; /* TODO: Set NX BOOTLOADER clock time field */ /* This at least copies .warm_crt0 to its VMA. */ for(size_t i = 0; i < reloc_list->nb_relocs_pre_mmu_init; i++) { do_relocation(reloc_list, i); } /* At this point, we can (and will) access functions located in .warm_crt0 */ /* From https://events.static.linuxfound.org/sites/events/files/slides/slides_17.pdf : Caches may write back dirty lines at any time: - To make space for new allocations - Even if MMU is off - Even if Cacheable accesses are disabled (caches are never 'off') It should be fine to clear that here and not before. */ flush_dcache_all(); invalidate_icache_all(); /* Set target firmware. */ g_exosphere_target_firmware_for_init = exosphere_get_target_firmware_for_init(); /* Initialize DMA controllers, and write to AHB_GIZMO_TZRAM. */ /* TZRAM accesses should work normally after this point. */ init_dma_controllers(g_exosphere_target_firmware_for_init); configure_ttbls(); set_memory_registers_enable_mmu(); /* Copy or clear the remaining sections */ for(size_t i = 0; i < reloc_list->nb_relocs_post_mmu_init; i++) { do_relocation(reloc_list, reloc_list->nb_relocs_pre_mmu_init + i); } flush_dcache_all(); invalidate_icache_all(); /* At this point we can access all the mapped segments (all other functions, data...) normally */ }
int arch_cpu_init(void) { /* * It might be necessary to flush data cache, if U-boot is loaded * from kickstart bootloader, e.g. from S1L loader */ flush_dcache_all(); return 0; }
void dpm_idle(void) { unsigned long flags; struct dpm_idle_parms *idle_parms = &dpm_idle_parms; struct dpm_opt *idle_task_opt, *idle_opt; current->dpm_state = DPM_NO_STATE; dpm_set_os(DPM_IDLE_TASK_STATE); dpm_md_idle_set_parms(&idle_parms->md); #ifdef EXTREME_WORST_CASE flush_instruction_cache(); flush_dcache_all(); local_flush_tlb_all(); #endif critical_save_and_cli(flags); if (!current->need_resched) { incr_stat(idles); stat_start_time(idle_parms); if (!dpm_enabled) { basic_idle(idle_parms); } else if (dpm_active_state != DPM_IDLE_TASK_STATE) { incr_stat(interrupted_idles); } else { idle_task_opt = dpm_active_policy-> classes[DPM_IDLE_TASK_STATE]->opt; idle_opt = dpm_active_policy-> classes[DPM_IDLE_STATE]->opt; if ((dpm_active_opt != idle_task_opt) || (idle_task_opt == idle_opt) || dpm_trylock()) { quick_idle(idle_parms); } else { dpm_unlock(); full_idle(idle_parms, idle_task_opt, idle_opt); } } latency_stats(idle_parms); } critical_restore_flags(flags); }
static int board_try_boot_standby(void) { uint func_addr = (uint)boot_standby_mode; standby_func boot_standby_func; //cal the real function address of boot_standby_mode flush_dcache_all(); boot_standby_func = (standby_func)(func_addr - gd->reloc_off); return boot_standby_func(); }
/* * Send a data block via Ethernet */ static int owl_mac_send(struct eth_device *dev, void *pkt, int len) { struct buffer_descriptor *bdp; unsigned long status; struct owl_mac_info *owl_info = dev->priv; int i; u32 reg_val = 0; if(len <=0 || len > ETH_PKG_MAX){ printf("owl_mac : bad tx pkt len (%d)\n",len); } /* */ //printf("owl_mac_send: len %d pkt %p\n",len,pkt); /* */ owl_prepare_tx_bds(dev); bdp = &owl_info->tx_bd_base[owl_info->tx_cur_idx]; status = bdp->status; if(status & TXBD_STAT_OWN){ printf("owl_mac tx error: tx is full\n"); return 0; } bdp->buf_addr = dma_map_single(pkt, len, DMA_TO_DEVICE); bdp->status = 0; bdp->control &= TXBD_CTRL_IC | TXBD_CTRL_TER; /* clear others */ bdp->control |= TXBD_CTRL_TBS1(len); bdp->control |= TXBD_CTRL_FS | TXBD_CTRL_LS; bdp->status = TXBD_STAT_OWN; flush_dcache_all(); writel(readl(MAC_CSR6)| EC_OPMODE_ST, MAC_CSR6); writel(EC_TXPOLL_ST,MAC_CSR1); /* wait for finish then return */ for(i=0; i< OWL_MAC_TX_TIMEOUT; i++){ reg_val = readl(MAC_CSR5); if(reg_val & EC_STATUS_TI ){ reg_val = reg_val & (~EC_STATUS_TI); writel(reg_val,MAC_CSR5); break; } udelay(10); } dma_unmap_single(pkt, len, bdp->buf_addr); if(i >= OWL_MAC_TX_TIMEOUT){ invalidate_dcache_range((uint32_t)bdp, (uint32_t)bdp + roundup(sizeof(*bdp) * TX_RING_SIZE, ARCH_DMA_MINALIGN)); printf("owl_mac : Tx timeout 0x%lx \n",bdp->status); } owl_info->tx_cur_idx = (owl_info->tx_cur_idx + 1) % TX_RING_SIZE; //printf("owl_mac_send >> FINISH\n"); return len; }
static void owl_prepare_tx_bds(struct eth_device *dev) { struct owl_mac_info *owl_info = dev->priv; int i; for(i = 0; i < TX_RING_SIZE; i++){ owl_info->tx_bd_base[i].buf_addr = 0; owl_info->tx_bd_base[i].reserved = 0; owl_info->tx_bd_base[i].status = 0; owl_info->tx_bd_base[i].control = TXBD_CTRL_IC; } owl_info->tx_bd_base[i - 1].control |= TXBD_CTRL_TER; owl_info->tx_cur_idx = 0; flush_dcache_all(); }
static void comm_d2(void) { void *dest; /* set BB_F_SLP in PMUM_SCCR to vote VCXO_SD */ *(uint32_t *)0xd4050038 |= (0x1 << 2); dest = (void *)0x06000000; if (cpu_is_pxa1928_a0()) memcpy(dest, sg_d2_a0_ops, (ARRAY_SIZE(sg_d2_a0_ops) * 4)); else memcpy(dest, sg_d2_b0_ops, (ARRAY_SIZE(sg_d2_b0_ops) * 4)); dest = (void *)0x07000000; memcpy(dest, msa_d2_ops, (ARRAY_SIZE(msa_d2_ops) * 4)); flush_dcache_all(); release_seagull(); }
void dsihw_send_long_packet(int data_type, int word_cnt, int * send_data, int trans_mode) { int tmp; unsigned long *src_addr; int i; int cnt = 100; printf("send long start\n"); tmp = readl(DSI_CTRL); tmp &= 0xffffefff; writel(tmp, DSI_CTRL); #if 1 src_addr = dsi_addr.vaddr; for(i = 0; i <= word_cnt / 4; i++) { *(src_addr + i) = *(send_data + i); } #endif flush_dcache_all(); writel(0x00010224, DMA0_MODE); writel(0x0, DMA0_CHAINED_CTL); writel(0x3, DMA_IRQ_PD0); writel(dsi_addr.paddr, DMA0_SOURCE); writel(DSI_FIFO_ODAT, DMA0_DESTINATION); writel(((word_cnt+3)/4)*4, DMA0_FRAME_LEN); writel(0x1, DMA0_FRAME_CNT); writel(0x1, DMA0_INT_CTL); writel(0x1, DMA0_START); tmp = word_cnt; writel(tmp, DSI_PACK_HEADER); tmp = ((data_type << 8) | 0x40000 | (trans_mode << 14)); writel(tmp, DSI_PACK_CFG); tmp = readl(DSI_PACK_CFG); tmp |= 1 ; writel(tmp, DSI_PACK_CFG); while ((!(readl(DSI_TR_STA) & (1 << 19))) && --cnt) udelay(1); writel(0x80000, DSI_TR_STA); printf("send long end\n"); }
void reset_cpu(ulong ignored) { disable_interrupts(); FW_NandDeInit(); #ifndef CONFIG_SYS_L2CACHE_OFF v7_outer_cache_disable(); #endif #ifndef CONFIG_SYS_DCACHE_OFF flush_dcache_all(); #endif #ifndef CONFIG_SYS_ICACHE_OFF invalidate_icache_all(); #endif #ifndef CONFIG_SYS_DCACHE_OFF dcache_disable(); #endif #ifndef CONFIG_SYS_ICACHE_OFF icache_disable(); #endif #if defined(CONFIG_RKCHIP_RK3288) /* pll enter slow mode */ writel(PLL_MODE_SLOW(APLL_ID) | PLL_MODE_SLOW(GPLL_ID) | PLL_MODE_SLOW(CPLL_ID) | PLL_MODE_SLOW(NPLL_ID), RKIO_GRF_PHYS + CRU_MODE_CON); /* soft reset */ writel(0xeca8, RKIO_CRU_PHYS + CRU_GLB_SRST_SND); #elif defined(CONFIG_RKCHIP_RK3036) /* pll enter slow mode */ writel(PLL_MODE_SLOW(APLL_ID) | PLL_MODE_SLOW(GPLL_ID), RKIO_GRF_PHYS + CRU_MODE_CON); /* soft reset */ writel(0xeca8, RKIO_CRU_PHYS + CRU_GLB_SRST_SND); #elif defined(CONFIG_RKCHIP_RK3126) || defined(CONFIG_RKCHIP_RK3128) /* pll enter slow mode */ writel(PLL_MODE_SLOW(APLL_ID) | PLL_MODE_SLOW(CPLL_ID) | PLL_MODE_SLOW(GPLL_ID), RKIO_GRF_PHYS + CRU_MODE_CON); /* soft reset */ writel(0xeca8, RKIO_CRU_PHYS + CRU_GLB_SRST_SND); #else #error "PLS config platform for reset.c!" #endif /* CONFIG_RKPLATFORM */ }
static void owl_prepare_rx_bds(struct eth_device *dev) { struct owl_mac_info *owl_info = dev->priv; int i; for(i = 0; i < RX_RING_SIZE; i++){ void *buff = memalign(ARCH_DMA_MINALIGN, PKG_MAX_LEN); if(!buff) panic("owl_mac: fail to create rx's buff\n"); owl_info->rx_bd_base[i].buf_addr = (unsigned long)buff; owl_info->rx_bd_base[i].reserved = 0; owl_info->rx_bd_base[i].status = RXBD_STAT_OWN; owl_info->rx_bd_base[i].control = RXBD_CTRL_RBS1(PKG_MAX_LEN); } owl_info->rx_bd_base[i-1].control |= RXBD_CTRL_RER; owl_info->rx_cur_idx = 0; flush_dcache_all(); //flush_dcache_range((unsigned int)owl_info->rx_bd_base, (unsigned int)owl_info->rx_bd_base + sizeof(struct buffer_descriptor) * RX_RING_SIZE); }
static inline void __flush_cache_range( unsigned long start, unsigned long end, unsigned long value) { unsigned long i,flags; if ((end - start) > 0x1000) { if (value | INS_CACHE) { flush_icache_all(); } if (value | DATA_CACHE) { flush_dcache_all(); } return; } flags = irqsave(); if (value & INS_CACHE) { dis_icache(); } for (i = start; i < end; i += L1_CACHE_BYTES) { set_cr22(i); set_cr17(CACHE_OMS | value); } if (end & (L1_CACHE_BYTES - 1)) { set_cr22(end); set_cr17(CACHE_OMS | value); } if (value & INS_CACHE) { en_icache(); } irqrestore(flags); }
void reset_cpu(ulong ignored) { disable_interrupts(); FW_NandDeInit(); #ifndef CONFIG_SYS_L2CACHE_OFF v7_outer_cache_disable(); #endif #ifndef CONFIG_SYS_DCACHE_OFF flush_dcache_all(); #endif #ifndef CONFIG_SYS_ICACHE_OFF invalidate_icache_all(); #endif #ifndef CONFIG_SYS_DCACHE_OFF dcache_disable(); #endif #ifndef CONFIG_SYS_ICACHE_OFF icache_disable(); #endif #if defined(CONFIG_RKCHIP_RK3368) /* pll enter slow mode */ cru_writel(((0x00 << 8) && (0x03 << 24)), PLL_CONS(APLLB_ID, 3)); cru_writel(((0x00 << 8) && (0x03 << 24)), PLL_CONS(APLLL_ID, 3)); cru_writel(((0x00 << 8) && (0x03 << 24)), PLL_CONS(GPLL_ID, 3)); cru_writel(((0x00 << 8) && (0x03 << 24)), PLL_CONS(CPLL_ID, 3)); cru_writel(((0x00 << 8) && (0x03 << 24)), PLL_CONS(NPLL_ID, 3)); /* soft reset */ writel(0xeca8, RKIO_CRU_PHYS + CRU_GLB_SRST_SND); #else #error "PLS config platform for reset.c!" #endif /* CONFIG_RKPLATFORM */ }
void reset_cpu(ulong ignored) { disable_interrupts(); FW_NandDeInit(); #ifndef CONFIG_SYS_L2CACHE_OFF v7_outer_cache_disable(); #endif #ifndef CONFIG_SYS_DCACHE_OFF flush_dcache_all(); #endif #ifndef CONFIG_SYS_ICACHE_OFF invalidate_icache_all(); #endif #ifndef CONFIG_SYS_DCACHE_OFF dcache_disable(); #endif #ifndef CONFIG_SYS_ICACHE_OFF icache_disable(); #endif #if (CONFIG_RKCHIPTYPE == CONFIG_RK3288) /* disable remap */ /* rk3288 address remap control bit: SGRF soc con0 bit 11 */ writel(1 << (11 + 16), RKIO_SECURE_GRF_PHYS + SGRF_SOC_CON0); /* pll enter slow mode */ writel(PLL_MODE_SLOW(APLL_ID) | PLL_MODE_SLOW(GPLL_ID) | PLL_MODE_SLOW(CPLL_ID) | PLL_MODE_SLOW(NPLL_ID), RKIO_GRF_PHYS + CRU_MODE_CON); /* soft reset */ writel(0xeca8, RKIO_CRU_PHYS + CRU_GLB_SRST_SND); #else #error "PLS config platform for reset.c!" #endif /* CONFIG_RKPLATFORM */ }
static unsigned int pfla02_detect_chiptype(void) { u32 *p, *p1; unsigned int offset = MIN_BANK_SIZE; int i; for (i = 0; i < 2; i++) { p = (u32 *)PHYS_SDRAM; p1 = (u32 *)(PHYS_SDRAM + (i + 1) * offset); *p1 = 0; *p = RAM_TEST_PATTERN; /* * This is required to detect mirroring * else we read back values from cache */ flush_dcache_all(); if (*p == *p1) return i; } return RAM_MT256K; }
/*This can be used ONLY by the M4U driver!*/ void inner_dcache_flush_all(void) { flush_dcache_all(); }
/* update_mmc [dev no] <type> 'mem' 'addr' 'length' [load addr] */ int do_update_mmc(cmd_tbl_t * cmdtp, int flag, int argc, char * const argv[]) { block_dev_desc_t *desc; uint64_t dst_addr = 0, mem_len = 0; unsigned int mem_addr = 0; unsigned char *p; char cmd[32]; lbaint_t blk, cnt; int ret, dev; if (6 > argc) goto usage; ret = get_device("mmc", argv[1], &desc); if (0 > ret) { printf ("** Not find device mmc.%s **\n", argv[1]); return 1; } dev = simple_strtoul (argv[1], NULL, 10); sprintf(cmd, "mmc dev %d", dev); if (0 > run_command(cmd, 0)) /* mmc device */ return -1; if (0 != strcmp(argv[2], "2ndboot") && 0 != strcmp(argv[2], "boot") && 0 != strcmp(argv[2], "raw") && 0 != strcmp(argv[2], "part")) goto usage; mem_addr = simple_strtoul (argv[3], NULL, 16); dst_addr = simple_strtoull(argv[4], NULL, 16); mem_len = simple_strtoull(argv[5], NULL, 16); p = (unsigned char *)mem_addr; blk = (dst_addr/MMC_BLOCK_SIZE); cnt = (mem_len/MMC_BLOCK_SIZE) + ((mem_len & (MMC_BLOCK_SIZE-1)) ? 1 : 0); flush_dcache_all(); if (! strcmp(argv[2], "2ndboot")) { struct boot_dev_head *bh = (struct boot_dev_head *)mem_addr; struct boot_dev_mmc *bd = (struct boot_dev_mmc *)&bh->bdi; bd->port_no = dev; /* set u-boot device port num */ printf("head boot dev = %d\n", bd->port_no); goto do_write; } if (! strcmp(argv[2], "boot")) { struct boot_dev_head head; struct boot_dev_head *bh = &head; struct boot_dev_mmc *bd = (struct boot_dev_mmc *)&bh->bdi; int len = sizeof(head); unsigned int load = CONFIG_SYS_TEXT_BASE; if (argc == 7) load = simple_strtoul (argv[6], NULL, 16); memset((void*)&head, 0x00, len); bh->load_addr = (unsigned int)load; bh->jump_addr = bh->load_addr; bh->load_size = (unsigned int)mem_len; bh->signature = SIGNATURE_ID; bd->port_no = dev; printf("head boot dev = %d\n", bd->port_no); printf("head load addr = 0x%08x\n", bh->load_addr); printf("head load size = 0x%08x\n", bh->load_size); printf("head gignature = 0x%08x\n", bh->signature); p -= len; memcpy(p, bh, len); mem_len += MMC_BLOCK_SIZE; cnt = (mem_len/MMC_BLOCK_SIZE) + ((mem_len & (MMC_BLOCK_SIZE-1)) ? 1 : 0); goto do_write; } if (strcmp(argv[2], "part") == 0) { uint64_t parts[4][2] = { {0,0}, }; uint64_t part_len = 0; int partno = (int)dst_addr; int num = 0; if (0 > mmc_get_part_table(desc, parts, &num)) return 1; if (partno > num || 1 > partno) { printf ("** Invalid mmc.%d partition number %d (1 ~ %d) **\n", dev, partno, num); return 1; } dst_addr = parts[partno-1][0]; /* set write addr from part table */ part_len = parts[partno-1][1]; blk = (dst_addr/MMC_BLOCK_SIZE); if (0 == check_compress_ext4((char*)p, part_len)) { printf("update mmc.%d compressed ext4 = 0x%llx(%d) ~ 0x%llx(%d): ", dev, dst_addr, (unsigned int)blk, mem_len, (unsigned int)cnt); ret = write_compressed_ext4((char*)p, blk); printf("%s\n", ret?"Fail":"Done"); return 1; } goto do_write; } do_write: if (! blk) { printf("-- Fail: start %d block(0x%llx) is in MBR zone (0x200) --\n", (int)blk, dst_addr); return -1; } printf("update mmc.%d type %s = 0x%llx(0x%x) ~ 0x%llx(0x%x): ", dev, argv[2], dst_addr, (unsigned int)blk, mem_len, (unsigned int)cnt); ret = mmc_bwrite(dev, blk, cnt, (void const*)p); printf("%s\n", ret?"Done":"Fail"); return ret; usage: cmd_usage(cmdtp); return 1; }
void cache_flush(void) { invalidate_icache_all(); flush_dcache_all(); }