void INT_DMAC0C0(void) { DMAC0INTTCCLR = 1; if (!pcm_remaining) { pcm_play_get_more_callback((void**)&dataptr, &pcm_remaining); pcm_chunksize = pcm_remaining; } if (!pcm_remaining) { pcm_lli->nextlli = NULL; pcm_lli->control = 0x75249000; clean_dcache(); return; } uint32_t lastsize = MIN(PCM_WATERMARK * 4, pcm_remaining / 2 + 1) & ~1; pcm_remaining -= lastsize; if (pcm_remaining) lastlli = &pcm_lli[ARRAYLEN(pcm_lli) - 1]; else lastlli = pcm_lli; uint32_t chunksize = MIN(PCM_CHUNKSIZE * 4 - lastsize, pcm_remaining); if (pcm_remaining > chunksize && chunksize > pcm_remaining - PCM_WATERMARK * 8) chunksize = pcm_remaining - PCM_WATERMARK * 8; pcm_remaining -= chunksize; bool last = !chunksize; int i = 0; while (chunksize) { uint32_t thislli = MIN(PCM_LLIMAX * 4, chunksize); chunksize -= thislli; pcm_lli[i].srcaddr = (void*)dataptr; pcm_lli[i].dstaddr = (void*)((int)&I2STXDB0); pcm_lli[i].nextlli = chunksize ? &pcm_lli[i + 1] : lastlli; pcm_lli[i].control = (chunksize ? 0x75249000 : 0xf5249000) | (thislli / 2); dataptr += thislli; i++; } if (!pcm_remaining) { memcpy(dblbuf[active_dblbuf], dataptr, lastsize); lastlli->srcaddr = dblbuf[active_dblbuf]; active_dblbuf ^= 1; } else lastlli->srcaddr = dataptr; lastlli->dstaddr = (void*)((int)&I2STXDB0); lastlli->nextlli = last ? NULL : pcm_lli; lastlli->control = (last ? 0xf5249000 : 0x75249000) | (lastsize / 2); dataptr += lastsize; clean_dcache(); if (!(DMAC0C0CONFIG & 1) && (pcm_lli[0].control & 0xfff)) { DMAC0C0LLI = pcm_lli[0]; DMAC0C0CONFIG = 0x8a81; } else DMAC0C0NEXTLLI = pcm_lli; pcm_play_dma_started_callback(); }
void term_init_half(unsigned term){ volatile uint32_t *head = (uint32_t*)(term); volatile uint32_t *tail = (uint32_t*)(term + 64); *head = 0; *tail = 0; clean_dcache((uint32_t*)(term)); clean_dcache((uint32_t*)(term+64)); }
//TODO: this looks hideous, clean it up void printstr(const char* str){ unsigned count=0; int i; unsigned tid = get_tid(); unsigned idx = *(unsigned*)(2*tid*TERM_ALIGN + TERM_BASE); char *data = (char*)(TERM_BASE + (2*tid*TERM_ALIGN) + 128); while(*str){ data[(idx+count++)%TERM_BUF] = *str++; } for(i=0; i < count; i+=64){ clean_dcache(&data[(idx+i)%TERM_BUF]); } if( ((unsigned)&data[(idx+i-64)%TERM_BUF]) >> 6 != ((unsigned)&data[(idx+count)%TERM_BUF]) >> 6) clean_dcache(&data[(idx+count)%TERM_BUF]); idx+=count; *(unsigned*)(2*tid*TERM_ALIGN + TERM_BASE) = idx%TERM_BUF; clean_dcache((void*)(2*tid*TERM_ALIGN + TERM_BASE)); }
static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr, p2m_type_t t, p2m_access_t a) { paddr_t pa = ((paddr_t) mfn) << PAGE_SHIFT; /* sh, xn and write bit will be defined in the following switches * based on mattr and t. */ lpae_t e = (lpae_t) { .p2m.af = 1, .p2m.read = 1, .p2m.mattr = mattr, .p2m.table = 1, .p2m.valid = 1, .p2m.type = t, }; BUILD_BUG_ON(p2m_max_real_type > (1 << 4)); switch (mattr) { case MATTR_MEM: e.p2m.sh = LPAE_SH_INNER; break; case MATTR_DEV: e.p2m.sh = LPAE_SH_OUTER; break; default: BUG(); break; } p2m_set_permission(&e, t, a); ASSERT(!(pa & ~PAGE_MASK)); ASSERT(!(pa & ~PADDR_MASK)); e.bits |= pa; return e; } static inline void p2m_write_pte(lpae_t *p, lpae_t pte, bool_t flush_cache) { write_pte(p, pte); if ( flush_cache ) clean_dcache(*p); }
static void ep_send(int ep, const void *ptr, int length) { endpoints[ep].busy = true; endpoints[ep].size = length; DIEPCTL(ep) |= 0x8000; /* EPx OUT ACTIVE */ int blocksize = usb_drv_port_speed() ? 512 : 64; int packets = (length + blocksize - 1) / blocksize; if (!length) { DIEPTSIZ(ep) = 1 << 19; /* one empty packet */ DIEPDMA(ep) = NULL; } else { DIEPTSIZ(ep) = length | (packets << 19); DIEPDMA(ep) = ptr; } clean_dcache(); DIEPCTL(ep) |= 0x84000000; /* EPx OUT ENABLE CLEARNAK */ }
/* Bring up a remote CPU */ int __cpu_up(unsigned int cpu) { int rc; s_time_t deadline; printk("Bringing up CPU%d\n", cpu); rc = init_secondary_pagetables(cpu); if ( rc < 0 ) return rc; console_start_sync(); /* Secondary may use early_printk */ /* Tell the remote CPU which stack to boot on. */ init_data.stack = idle_vcpu[cpu]->arch.stack; /* Tell the remote CPU what its logical CPU ID is. */ init_data.cpuid = cpu; /* Open the gate for this CPU */ smp_up_cpu = cpu_logical_map(cpu); clean_dcache(smp_up_cpu); rc = arch_cpu_up(cpu); console_end_sync(); if ( rc < 0 ) { printk("Failed to bring up CPU%d\n", cpu); return rc; } deadline = NOW() + MILLISECS(1000); while ( !cpu_online(cpu) && NOW() < deadline ) { cpu_relax(); process_pending_softirqs(); } /* * Nuke start of day info before checking one last time if the CPU * actually came online. If it is not online it may still be * trying to come up and may show up later unexpectedly. * * This doesn't completely avoid the possibility of the supposedly * failed CPU trying to progress with another CPUs stack settings * etc, but better than nothing, hopefully. */ init_data.stack = NULL; init_data.cpuid = ~0; smp_up_cpu = MPIDR_INVALID; clean_dcache(smp_up_cpu); if ( !cpu_online(cpu) ) { printk("CPU%d never came online\n", cpu); return -EIO; } return 0; }
void Mmu<Flush_area, Ram>::clean_vdcache() { clean_dcache(); }
void Mmu<Flush_area, Ram>::clean_vdcache(void const *start, void const *end) { clean_dcache(start, end); }
void cpu_machine_idle_init(boolean_t from_boot) { static const unsigned int *BootArgs_paddr = (unsigned int *)NULL; static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL; static unsigned int resume_idle_cpu_paddr = (unsigned int )NULL; cpu_data_t *cpu_data_ptr = getCpuDatap(); if (from_boot) { unsigned int jtag = 0; unsigned int wfi; if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) { if (jtag != 0) idle_enable = FALSE; else idle_enable = TRUE; } else idle_enable = TRUE; if (!PE_parse_boot_argn("wfi", &wfi, sizeof (wfi))) wfi = 1; if (wfi == 0) bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop), (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned)); if (wfi == 2) wfi_fast = 0; LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE); /* Copy Exception Vectors low, but don't touch the sleep token */ bcopy((void *)&ExceptionLowVectorsBase, (void *)LowExceptionVectorsAddr, 0x90); bcopy(((void *)(((vm_offset_t)&ExceptionLowVectorsBase) + 0xA0)), ((void *)(((vm_offset_t)LowExceptionVectorsAddr) + 0xA0)), ARM_PGBYTES - 0xA0); start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu); BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr), (addr64_t)((unsigned int)(gPhysBase) + ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)), 4); CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr), (addr64_t)((unsigned int)(gPhysBase) + ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)), 4); CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE); resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu); } if (cpu_data_ptr == &BootCpuData) { bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE); }; cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); }