void ampscu_attach(struct device *parent, struct device *self, void *args) { struct ampscu_softc *sc = (struct ampscu_softc *)self; struct cortex_attach_args *ca = args; sc->sc_iot = ca->ca_iot; if (bus_space_map(sc->sc_iot, ca->ca_periphbase + SCU_ADDR, SCU_SIZE, 0, &sc->sc_ioh)) panic("ampscu_attach: bus_space_map failed!"); ncpusfound = ampscu_ncpus(sc); printf(": %d CPUs\n", ncpusfound); #ifdef MULTIPROCESSOR /* ARM Errata 764369 */ if ((curcpu()->ci_arm_cpuid & CPU_ID_CORTEX_A9_MASK) == CPU_ID_CORTEX_A9) bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x30, bus_space_read_4(sc->sc_iot, sc->sc_ioh, 0x30) | 1); bus_space_write_4(sc->sc_iot, sc->sc_ioh, SCU_CTRL, bus_space_read_4(sc->sc_iot, sc->sc_ioh, SCU_CTRL) | 1); /* Flush ALL the caches. */ cpu_drain_writebuf(); cpu_idcache_wbinv_all(); cpu_sdcache_wbinv_all(); cpu_drain_writebuf(); #endif }
static int arm32_drain_writebuf(struct thread *td, void *args) { /* No args. */ td->td_retval[0] = 0; cpu_drain_writebuf(); return (0); }
static int arm32_drain_writebuf(struct thread *td, void *args) { /* No args. */ #if __ARM_ARCH < 6 cpu_drain_writebuf(); #else dsb(); cpu_l2cache_drain_writebuf(); #endif td->td_retval[0] = 0; return (0); }
/* * Tentatively read an 8, 16, or 32-bit value from 'addr'. * If the read succeeds, the value is written to 'rptr' and zero is returned. * Else, return EFAULT. */ int badaddr_read(void *addr, size_t size, void *rptr) { extern int badaddr_read_1(const uint8_t *, uint8_t *); extern int badaddr_read_2(const uint16_t *, uint16_t *); extern int badaddr_read_4(const uint32_t *, uint32_t *); union { uint8_t v1; uint16_t v2; uint32_t v4; } u; int rv, s; cpu_drain_writebuf(); s = splhigh(); /* Read from the test address. */ switch (size) { case sizeof(uint8_t): rv = badaddr_read_1(addr, &u.v1); if (rv == 0 && rptr) *(uint8_t *) rptr = u.v1; break; case sizeof(uint16_t): rv = badaddr_read_2(addr, &u.v2); if (rv == 0 && rptr) *(uint16_t *) rptr = u.v2; break; case sizeof(uint32_t): rv = badaddr_read_4(addr, &u.v4); if (rv == 0 && rptr) *(uint32_t *) rptr = u.v4; break; default: panic("%s: invalid size (%zu)", __func__, size); } splx(s); /* Return EFAULT if the address was invalid, else zero */ return (rv); }
/* * Tentatively read an 8, 16, or 32-bit value from 'addr'. * If the read succeeds, the value is written to 'rptr' and zero is returned. * Else, return EFAULT. */ int badaddr_read(void *addr, size_t size, void *rptr) { union { uint8_t v1; uint16_t v2; uint32_t v4; } u; int rv; cpu_drain_writebuf(); /* Read from the test address. */ switch (size) { case sizeof(uint8_t): rv = badaddr_read_1(addr, &u.v1); if (rv == 0 && rptr) *(uint8_t *) rptr = u.v1; break; case sizeof(uint16_t): rv = badaddr_read_2(addr, &u.v2); if (rv == 0 && rptr) *(uint16_t *) rptr = u.v2; break; case sizeof(uint32_t): rv = badaddr_read_4(addr, &u.v4); if (rv == 0 && rptr) *(uint32_t *) rptr = u.v4; break; default: panic("badaddr: invalid size (%lu)", (u_long) size); } /* Return EFAULT if the address was invalid, else zero */ return (rv); }
/* * void cpu_reboot(int howto, char *bootstr) * * Reboots the system * * Deal with any syncing, unmounting, dumping and shutdown hooks, * then reset the CPU. */ void cpu_reboot(int howto, char *bootstr) { /* * If we are still cold then hit the air brakes * and crash to earth fast */ if (cold) { *(volatile uint8_t *)HDLG_LEDCTRL |= LEDCTRL_STAT_RED; howto |= RB_HALT; goto haltsys; } /* Disable console buffering */ /* * If RB_NOSYNC was not specified sync the discs. * Note: Unless cold is set to 1 here, syslogd will die during the * unmount. It looks like syslogd is getting woken up only to find * that it cannot page part of the binary in as the filesystem has * been unmounted. */ if ((howto & RB_NOSYNC) == 0) { bootsync(); /*resettodr();*/ } /* wait 1s */ delay(1 * 1000 * 1000); /* Say NO to interrupts */ splhigh(); /* Do a dump if requested. */ if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) { dumpsys(); } haltsys: /* Run any shutdown hooks */ doshutdownhooks(); /* Make sure IRQ's are disabled */ IRQdisable; if (howto & RB_HALT) { *(volatile uint8_t *)HDLG_PWRMNG = PWRMNG_POWOFF; delay(3 * 1000 * 1000); /* wait 3s */ printf("SHUTDOWN FAILED!\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cngetc(); } printf("rebooting...\n\r"); (void)disable_interrupts(I32_bit|F32_bit); cpu_idcache_wbinv_all(); cpu_drain_writebuf(); *(volatile uint8_t *)HDLG_PWRMNG = PWRMNG_RESET; delay(1 * 1000 * 1000); /* wait 1s */ /* ...and if that didn't work, just croak. */ printf("RESET FAILED!\n"); for (;;) { continue; } }
/* * Common function for mapping DMA-safe memory. May be called by * bus-specific DMA memory map functions. */ int _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap, int flags) { vaddr_t va; bus_addr_t addr; int curseg; pt_entry_t *ptep/*, pte*/; #ifdef DEBUG_DMA printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t, segs, nsegs, (unsigned long)size, flags); #endif /* DEBUG_DMA */ size = round_page(size); va = uvm_km_valloc(kernel_map, size); if (va == 0) return (ENOMEM); *kvap = (caddr_t)va; for (curseg = 0; curseg < nsegs; curseg++) { for (addr = segs[curseg].ds_addr; addr < (segs[curseg].ds_addr + segs[curseg].ds_len); addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { #ifdef DEBUG_DMA printf("wiring p%lx to v%lx", addr, va); #endif /* DEBUG_DMA */ if (size == 0) panic("_bus_dmamem_map: size botch"); pmap_enter(pmap_kernel(), va, addr, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED); /* * If the memory must remain coherent with the * cache then we must make the memory uncacheable * in order to maintain virtual cache coherency. * We must also guarantee the cache does not already * contain the virtual addresses we are making * uncacheable. */ if (flags & BUS_DMA_COHERENT) { cpu_dcache_wbinv_range(va, PAGE_SIZE); cpu_drain_writebuf(); ptep = vtopte(va); *ptep &= ~L2_S_CACHE_MASK; PTE_SYNC(ptep); tlb_flush(); } #ifdef DEBUG_DMA ptep = vtopte(va); printf(" pte=v%p *pte=%x\n", ptep, *ptep); #endif /* DEBUG_DMA */ } } pmap_update(pmap_kernel()); #ifdef DEBUG_DMA printf("dmamem_map: =%p\n", *kvap); #endif /* DEBUG_DMA */ return (0); }
/* * Common function for DMA map synchronization. May be called * by bus-specific DMA map synchronization functions. * * This version works for the Virtually Indexed Virtually Tagged * cache found on 32-bit ARM processors. * * XXX Should have separate versions for write-through vs. * XXX write-back caches. We currently assume write-back * XXX here, which is not as efficient as it could be for * XXX the write-through case. */ void _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { #ifdef DEBUG_DMA printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n", t, map, offset, len, ops); #endif /* DEBUG_DMA */ /* * Mixing of PRE and POST operations is not allowed. */ if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 && (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0) panic("_bus_dmamap_sync: mix PRE and POST"); #ifdef DIAGNOSTIC if (offset >= map->dm_mapsize) panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)", offset, map->dm_mapsize); if (len == 0 || (offset + len) > map->dm_mapsize) panic("_bus_dmamap_sync: bad length"); #endif /* * For a virtually-indexed write-back cache, we need * to do the following things: * * PREREAD -- Invalidate the D-cache. We do this * here in case a write-back is required by the back-end. * * PREWRITE -- Write-back the D-cache. Note that if * we are doing a PREREAD|PREWRITE, we can collapse * the whole thing into a single Wb-Inv. * * POSTREAD -- Invalidate the D-Cache. Contents of * the cache could be from before a device wrote * to the memory. * * POSTWRITE -- Nothing. */ /* Skip cache frobbing if mapping was COHERENT. */ if (map->_dm_flags & ARM32_DMAMAP_COHERENT) { /* Drain the write buffer. */ cpu_drain_writebuf(); return; } /* * If the mapping belongs to a non-kernel vmspace, and the * vmspace has not been active since the last time a full * cache flush was performed, we don't need to do anything. */ if (__predict_false(map->_dm_proc != NULL && map->_dm_proc->p_vmspace->vm_map.pmap->pm_cstate.cs_cache_d == 0)) return; switch (map->_dm_buftype) { case ARM32_BUFTYPE_LINEAR: _bus_dmamap_sync_linear(t, map, offset, len, ops); break; case ARM32_BUFTYPE_MBUF: _bus_dmamap_sync_mbuf(t, map, offset, len, ops); break; case ARM32_BUFTYPE_UIO: _bus_dmamap_sync_uio(t, map, offset, len, ops); break; case ARM32_BUFTYPE_INVALID: panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID"); break; default: printf("unknown buffer type %d\n", map->_dm_buftype); panic("_bus_dmamap_sync"); } /* Drain the write buffer. */ cpu_drain_writebuf(); }