Example #1
0
int
dmar_init_qi(struct dmar_unit *unit)
{
	uint64_t iqa;
	uint32_t ics;
	int qi_sz;

	if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
		return (0);
	unit->qi_enabled = 1;
	TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
	if (!unit->qi_enabled)
		return (0);

	TAILQ_INIT(&unit->tlb_flush_entries);
	TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
	unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
	    taskqueue_thread_enqueue, &unit->qi_taskqueue);
	taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
	    "dmar%d qi taskq", unit->unit);

	unit->inv_waitd_gen = 0;
	unit->inv_waitd_seq = 1;

	qi_sz = DMAR_IQA_QS_DEF;
	TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
	if (qi_sz > DMAR_IQA_QS_MAX)
		qi_sz = DMAR_IQA_QS_MAX;
	unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
	/* Reserve one descriptor to prevent wraparound. */
	unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;

	/* The invalidation queue reads by DMARs are always coherent. */
	unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
	    M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
	unit->inv_waitd_seq_hw_phys = pmap_kextract(
	    (vm_offset_t)&unit->inv_waitd_seq_hw);

	DMAR_LOCK(unit);
	dmar_write8(unit, DMAR_IQT_REG, 0);
	iqa = pmap_kextract(unit->inv_queue);
	iqa |= qi_sz;
	dmar_write8(unit, DMAR_IQA_REG, iqa);
	dmar_enable_qi(unit);
	ics = dmar_read4(unit, DMAR_ICS_REG);
	if ((ics & DMAR_ICS_IWC) != 0) {
		ics = DMAR_ICS_IWC;
		dmar_write4(unit, DMAR_ICS_REG, ics);
	}
	dmar_enable_qi_intr(unit);
	DMAR_UNLOCK(unit);

	return (0);
}
Example #2
0
int
uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2)
{

	if (b1->bst != b2->bst)
		return (0);
	if (pmap_kextract(b1->bsh) == 0)
		return (0);
	if (pmap_kextract(b2->bsh) == 0)
		return (0);
	return ((pmap_kextract(b1->bsh) == pmap_kextract(b2->bsh)) ? 1 : 0);
}
Example #3
0
void    
platform_mp_start_ap(void)
{
	bus_space_handle_t ocm_handle;

	/* Map in magic location to give entry address to CPU1. */
	if (bus_space_map(fdtbus_bs_tag, ZYNQ7_CPU1_ENTRY, 4,
	    0, &ocm_handle) != 0)
		panic("platform_mp_start_ap: Couldn't map OCM\n");

	/* Write start address for CPU1. */
	bus_space_write_4(fdtbus_bs_tag, ocm_handle, 0,
	    pmap_kextract((vm_offset_t)mpentry));

	/*
	 * The SCU is enabled by the BOOTROM but I think the second CPU doesn't
	 * turn on filtering until after the wake-up below. I think that's why
	 * things don't work if I don't put these cache ops here.  Also, the
	 * magic location, 0xfffffff0, isn't in the SCU's filtering range so it
	 * needs a write-back too.
	 */
	cpu_idcache_wbinv_all();
	cpu_l2cache_wbinv_all();

	/* Wake up CPU1. */
	armv7_sev();

	bus_space_unmap(fdtbus_bs_tag, ocm_handle, 4);
}
Example #4
0
physAddress_t
XX_VirtToPhys(void *addr)
{
	vm_paddr_t paddr;
	int cpu;

	cpu = PCPU_GET(cpuid);

	/* Handle NULL address */
	if (addr == NULL)
		return (-1);

	/* Check CCSR */
	if ((vm_offset_t)addr >= ccsrbar_va &&
	    (vm_offset_t)addr < ccsrbar_va + ccsrbar_size)
		return (((vm_offset_t)addr - ccsrbar_va) + ccsrbar_pa);

	/* Handle BMAN mappings */
	if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[BM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ce_va[BM_PORTAL] +
	    XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
		return (XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ce_va[BM_PORTAL]);

	if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[BM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ci_va[BM_PORTAL] +
	    XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
		return (XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ci_va[BM_PORTAL]);

	/* Handle QMAN mappings */
	if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[QM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ce_va[QM_PORTAL] +
	    XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
		return (XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ce_va[QM_PORTAL]);

	if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[QM_PORTAL]) &&
	    ((vm_offset_t)addr < XX_PInfo.portal_ci_va[QM_PORTAL] +
	    XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
		return (XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
		    (vm_offset_t)addr - XX_PInfo.portal_ci_va[QM_PORTAL]);

	if (PMAP_HAS_DMAP && (vm_offset_t)addr >= DMAP_BASE_ADDRESS &&
	    (vm_offset_t)addr <= DMAP_MAX_ADDRESS)
		return (DMAP_TO_PHYS((vm_offset_t)addr));
	else
		paddr = pmap_kextract((vm_offset_t)addr);

	if (paddr == 0)
		printf("NetCommSW: "
		    "Unable to translate virtual address %p!\n", addr);
	else
		pmap_track_page(kernel_pmap, (vm_offset_t)addr);

	return (paddr);
}
Example #5
0
static void vclrmgfifo(vm_offset_t va)
{
	vm_page_t p;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vclrmgfifo!"));
	p->pageq.tqe_next = NULL;
}
Example #6
0
/*
 * Convert kernel VA to physical address
 */
vm_paddr_t
kvtop(void *addr)
{
	vm_paddr_t pa;

	pa = pmap_kextract((vm_offset_t)addr);
	if (pa == 0)
		panic("kvtop: zero page frame");
	return (pa);
}
Example #7
0
static void
vsetmgfifo(vm_offset_t va, struct memguard_fifo *mgfifo)
{
	vm_page_t p;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vsetmgfifo!"));
	p->pageq.tqe_next = (vm_page_t)mgfifo;
}
Example #8
0
void
aw_mp_start_ap(platform_t plat)
{
	bus_space_handle_t cpucfg;
	bus_space_handle_t prcm;
	int i, j, soc_family;
	uint32_t val;

	soc_family = allwinner_soc_family();
	if (soc_family == ALLWINNERSOC_SUN7I) {
		if (bus_space_map(fdtbus_bs_tag, A20_CPUCFG_BASE, CPUCFG_SIZE,
		    0, &cpucfg) != 0)
			panic("Couldn't map the CPUCFG\n");
	} else {
		if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE,
		    0, &cpucfg) != 0)
			panic("Couldn't map the CPUCFG\n");
		if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0,
		    &prcm) != 0)
			panic("Couldn't map the PRCM\n");
	}

	dcache_wbinv_poc_all();

	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_P_REG0,
	    pmap_kextract((vm_offset_t)mpentry));

	/*
	 * Assert nCOREPORESET low and set L1RSTDISABLE low.
	 * Ensure DBGPWRDUP is set to LOW to prevent any external
	 * debug access to the processor.
	 */
	for (i = 1; i < mp_ncpus; i++)
		bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i), 0);

	/* Set L1RSTDISABLE low */
	val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL);
	for (i = 1; i < mp_ncpus; i++)
		val &= ~(1 << i);
	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL, val);

	/* Set DBGPWRDUP low */
	val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1);
	for (i = 1; i < mp_ncpus; i++)
		val &= ~(1 << i);
	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val);

	/* Release power clamp */
	for (i = 1; i < mp_ncpus; i++)
		for (j = 0; j <= CPU_PWR_CLAMP_STEPS; j++) {
			if (soc_family != ALLWINNERSOC_SUN7I) {
				bus_space_write_4(fdtbus_bs_tag, prcm,
				    CPU_PWR_CLAMP(i), 0xff >> j);
			} else {
Example #9
0
void
platform_mp_start_ap(void)
{
	bus_space_handle_t scu, rst, ram;
	int reg;

	if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE,
					SCU_SIZE, 0, &scu) != 0)
		panic("Couldn't map the SCU\n");
	if (bus_space_map(fdtbus_bs_tag, RSTMGR_PHYSBASE,
					RSTMGR_SIZE, 0, &rst) != 0)
		panic("Couldn't map the reset manager (RSTMGR)\n");
	if (bus_space_map(fdtbus_bs_tag, RAM_PHYSBASE,
					RAM_SIZE, 0, &ram) != 0)
		panic("Couldn't map the first physram page\n");

	/* Invalidate SCU cache tags */
	bus_space_write_4(fdtbus_bs_tag, scu,
		SCU_INV_TAGS_REG, 0x0000ffff);

	/*
	 * Erratum ARM/MP: 764369 (problems with cache maintenance).
	 * Setting the "disable-migratory bit" in the undocumented SCU
	 * Diagnostic Control Register helps work around the problem.
	 */
	reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL);
	reg |= (SCU_DIAG_DISABLE_MIGBIT);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, reg);

	/* Put CPU1 to reset state */
	bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, MPUMODRST_CPU1);

	/* Enable the SCU, then clean the cache on this core */
	reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG);
	reg |= (SCU_CONTROL_ENABLE);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, reg);

	/* Set up trampoline code */
	mpentry_addr = (char *)pmap_kextract((vm_offset_t)mpentry);
	bus_space_write_region_4(fdtbus_bs_tag, ram, 0,
	    (uint32_t *)&socfpga_trampoline, 8);

	cpu_idcache_wbinv_all();
	cpu_l2cache_wbinv_all();

	/* Put CPU1 out from reset */
	bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, 0);

	armv7_sev();

	bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE);
	bus_space_unmap(fdtbus_bs_tag, rst, RSTMGR_SIZE);
	bus_space_unmap(fdtbus_bs_tag, ram, RAM_SIZE);
}
Example #10
0
static int
xsd_dev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int nprot, vm_memattr_t *memattr)
{

	if (offset != 0)
		return (EINVAL);

	*paddr = pmap_kextract((vm_offset_t)xen_store);

	return (0);
}
Example #11
0
/*
 * vtomgfifo() converts a virtual address of the first page allocated for
 * an item to a memguard_fifo_pool reference for the corresponding item's
 * size.
 *
 * vsetmgfifo() sets a reference in an underlying page for the specified
 * virtual address to an appropriate memguard_fifo_pool.
 *
 * These routines are very similar to those defined by UMA in uma_int.h.
 * The difference is that these routines store the mgfifo in one of the
 * page's fields that is unused when the page is wired rather than the
 * object field, which is used.
 */
static struct memguard_fifo *
vtomgfifo(vm_offset_t va)
{
	vm_page_t p;
	struct memguard_fifo *mgfifo;

	p = PHYS_TO_VM_PAGE(pmap_kextract(va));
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page in vtomgfifo!"));
	mgfifo = (struct memguard_fifo *)p->pageq.tqe_next;
	return mgfifo;
}
Example #12
0
void
tegra124_mp_start_ap(platform_t plat)
{
	bus_space_handle_t pmc;
	bus_space_handle_t exvec;
	int i;
	uint32_t val;
	uint32_t mask;

	if (bus_space_map(fdtbus_bs_tag, PMC_PHYSBASE, PMC_SIZE, 0, &pmc) != 0)
		panic("Couldn't map the PMC\n");
	if (bus_space_map(fdtbus_bs_tag, TEGRA_EXCEPTION_VECTORS_BASE,
	    TEGRA_EXCEPTION_VECTORS_SIZE, 0, &exvec) != 0)
		panic("Couldn't map the exception vectors\n");

	bus_space_write_4(fdtbus_bs_tag, exvec , TEGRA_EXCEPTION_VECTOR_ENTRY,
			  pmap_kextract((vm_offset_t)mpentry));
	bus_space_read_4(fdtbus_bs_tag, exvec , TEGRA_EXCEPTION_VECTOR_ENTRY);


	/* Wait until POWERGATE is ready (max 20 APB cycles). */
	do {
		val = bus_space_read_4(fdtbus_bs_tag, pmc,
		    PMC_PWRGATE_TOGGLE);
	} while ((val & PCM_PWRGATE_TOGGLE_START) != 0);

	for (i = 1; i < mp_ncpus; i++) {
		val = bus_space_read_4(fdtbus_bs_tag, pmc, PMC_PWRGATE_STATUS);
		mask = 1 << (i + 8);	/* cpu mask */
		if ((val & mask) == 0) {
			/* Wait until POWERGATE is ready (max 20 APB cycles). */
			do {
				val = bus_space_read_4(fdtbus_bs_tag, pmc,
				PMC_PWRGATE_TOGGLE);
			} while ((val & PCM_PWRGATE_TOGGLE_START) != 0);
			bus_space_write_4(fdtbus_bs_tag, pmc,
			    PMC_PWRGATE_TOGGLE,
			    PCM_PWRGATE_TOGGLE_START | (8 + i));

			/* Wait until CPU is powered */
			do {
				val = bus_space_read_4(fdtbus_bs_tag, pmc,
				    PMC_PWRGATE_STATUS);
			} while ((val & mask) == 0);
		}

	}
	dsb();
	sev();
	bus_space_unmap(fdtbus_bs_tag, pmc, PMC_SIZE);
	bus_space_unmap(fdtbus_bs_tag, exvec, TEGRA_EXCEPTION_VECTORS_SIZE);
}
Example #13
0
static int
jzlcd_allocfb(struct jzlcd_softc *sc)
{
	sc->vaddr = kmem_alloc_contig(kernel_arena, sc->fbsize,
	    M_NOWAIT | M_ZERO, 0, ~0, FB_ALIGN, 0, VM_MEMATTR_WRITE_COMBINING);
	if (sc->vaddr == 0) {
		device_printf(sc->dev, "failed to allocate FB memory\n");
		return (ENOMEM);
	}
	sc->paddr = pmap_kextract(sc->vaddr);

	return (0);
}
Example #14
0
static u_long *
v2sizev(vm_offset_t va)
{
	vm_paddr_t pa;
	struct vm_page *p;

	pa = pmap_kextract(va);
	if (pa == 0)
		panic("MemGuard detected double-free of %p", (void *)va);
	p = PHYS_TO_VM_PAGE(pa);
	KASSERT(p->wire_count != 0 && p->queue == PQ_NONE,
	    ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
	return (&p->plinks.memguard.v);
}
Example #15
0
static int
htif_enumerate(struct htif_softc *sc)
{
    char id[HTIF_ID_LEN] __aligned(HTIF_ALIGN);
    uint64_t paddr;
    uint64_t data;
    uint64_t cmd;
    int len;
    int i;

    device_printf(sc->dev, "Enumerating devices\n");

    for (i = 0; i < HTIF_NDEV; i++) {
        paddr = pmap_kextract((vm_offset_t)&id);
        data = (paddr << IDENTIFY_PADDR_SHIFT);
        data |= IDENTIFY_IDENT;

        sc->identify_id = i;
        sc->identify_done = 0;

        cmd = i;
        cmd <<= HTIF_DEV_ID_SHIFT;
        cmd |= (HTIF_CMD_IDENTIFY << HTIF_CMD_SHIFT);
        cmd |= data;

        htif_command(cmd);

        /* Do poll as interrupts are disabled yet */
        while (sc->identify_done == 0) {
            htif_handle_entry(sc);
        }

        len = strnlen(id, sizeof(id));
        if (len <= 0)
            break;

        if (bootverbose)
            printf(" %d %s\n", i, id);

        if (strncmp(id, "disk", 4) == 0)
            htif_add_device(sc, i, id, "htif_blk");
        else if (strncmp(id, "bcd", 3) == 0)
            htif_add_device(sc, i, id, "htif_console");
        else if (strncmp(id, "syscall_proxy", 13) == 0)
            htif_add_device(sc, i, id, "htif_syscall_proxy");
    }

    return (bus_generic_attach(sc->dev));
}
Example #16
0
vm_offset_t
sgmap_overflow_page(void)
{
	/*
	 * Allocate the overflow page if necessary.
	 */
	if (!overflow_page) {
		overflow_page = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
		if (!overflow_page)
			panic("sgmap_alloc_region: can't allocate overflow page");
		overflow_page_pa = pmap_kextract((vm_offset_t) overflow_page);
	}

	return overflow_page_pa;
}
Example #17
0
static void
dcons_crom_expose_idt(struct dcons_crom_softc *sc)
{
	static off_t idt_paddr;

	/* XXX */
#ifdef __amd64__
	idt_paddr = (char *)idt - (char *)KERNBASE;
#else /* __i386__ */
	idt_paddr = (off_t)pmap_kextract((vm_offset_t)idt);
#endif

	crom_add_entry(&sc->unit, DCONS_CSR_KEY_RESET_HI, ADDR_HI(idt_paddr));
	crom_add_entry(&sc->unit, DCONS_CSR_KEY_RESET_LO, ADDR_LO(idt_paddr));
}
Example #18
0
static boolean_t
virt_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg)
{
	int err;

	if (running_cpus >= mp_ncpus)
		return (false);
	running_cpus++;

	err = psci_cpu_on(*reg, pmap_kextract((vm_offset_t)mpentry), id);

	if (err != PSCI_RETVAL_SUCCESS)
		return (false);

	return (true);
}
Example #19
0
void
exynos5_mp_start_ap(platform_t plat)
{
	bus_addr_t sysram, pmu;
	int err, i, j;
	int status;
	int reg;

	err = bus_space_map(fdtbus_bs_tag, EXYNOS_PMU_BASE, 0x20000, 0, &pmu);
	if (err != 0)
		panic("Couldn't map pmu\n");

	if (exynos_get_soc_id() == EXYNOS5420_SOC_ID)
		reg = EXYNOS5420_SYSRAM_NS;
	else
		reg = EXYNOS_SYSRAM;

	err = bus_space_map(fdtbus_bs_tag, reg, 0x100, 0, &sysram);
	if (err != 0)
		panic("Couldn't map sysram\n");

	/* Give power to CPUs */
	for (i = 1; i < mp_ncpus; i++) {
		bus_space_write_4(fdtbus_bs_tag, pmu, CORE_CONFIG(i),
		    CORE_PWR_EN);

		for (j = 10; j >= 0; j--) {
			status = bus_space_read_4(fdtbus_bs_tag, pmu,
			    CORE_STATUS(i));
			if ((status & CORE_PWR_EN) == CORE_PWR_EN)
				break;
			DELAY(10);
			if (j == 0)
				printf("Can't power on CPU%d\n", i);
		}
	}

	bus_space_write_4(fdtbus_bs_tag, sysram, 0x0,
	    pmap_kextract((vm_offset_t)mpentry));

	dcache_wbinv_poc_all();

	dsb();
	sev();
	bus_space_unmap(fdtbus_bs_tag, sysram, 0x100);
	bus_space_unmap(fdtbus_bs_tag, pmu, 0x20000);
}
Example #20
0
void    
platform_mp_start_ap(void)
{
	bus_addr_t scu_addr;

	if (bus_space_map(fdtbus_bs_tag, 0x48240000, 0x1000, 0, &scu_addr) != 0)
		panic("Couldn't map the SCU\n");
	/* Enable the SCU */
	*(volatile unsigned int *)scu_addr |= 1;
	//*(volatile unsigned int *)(scu_addr + 0x30) |= 1;
	cpu_idcache_wbinv_all();
	cpu_l2cache_wbinv_all();
	ti_smc0(0x200, 0xfffffdff, MODIFY_AUX_CORE_0);
	ti_smc0(pmap_kextract((vm_offset_t)mpentry), 0, WRITE_AUX_CORE_1);
	armv7_sev();
	bus_space_unmap(fdtbus_bs_tag, scu_addr, 0x1000);
}
Example #21
0
/*
 * Initialize an XIO given a kernelspace buffer.  0 is returned on success,
 * an error code on failure.  The actual number of bytes that could be
 * accomodated in the XIO will be stored in xio_bytes and the page offset
 * will be stored in xio_offset.
 */
int
xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
{
    vm_offset_t addr;
    vm_paddr_t paddr;
    vm_page_t m;
    int i;
    int n;

    addr = trunc_page((vm_offset_t)kbase);
    xio->xio_flags = 0;
    xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
    xio->xio_bytes = 0;
    xio->xio_pages = xio->xio_internal_pages;
    xio->xio_error = 0;
    if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
	n = kbytes;
    lwkt_gettoken(&vm_token);
    crit_enter();
    for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
	if ((paddr = pmap_kextract(addr)) == 0)
	    break;
	m = PHYS_TO_VM_PAGE(paddr);
	vm_page_hold(m);
	xio->xio_pages[i] = m;
	kbytes -= n;
	xio->xio_bytes += n;
	if ((n = kbytes) > PAGE_SIZE)
	    n = PAGE_SIZE;
	addr += PAGE_SIZE;
    }
    crit_exit();
    lwkt_reltoken(&vm_token);
    xio->xio_npages = i;

    /*
     * If a failure occured clean out what we loaded and return EFAULT.
     * Return 0 on success.
     */
    if (i < XIO_INTERNAL_PAGES && n) {
	xio_release(xio);
	xio->xio_error = EFAULT;
    }
    return(xio->xio_error);
}
Example #22
0
/*
 * Utility function to load a linear buffer.  segp contains
 * the starting segment on entrace, and the ending segment on exit.
 */
static int
nexus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
    int *segp)
{
	bus_size_t sgsize;
	bus_addr_t curaddr;
	vm_offset_t vaddr = (vm_offset_t)buf;

	if (segs == NULL)
		segs = dmat->dt_segments;

	while (buflen > 0) {
		/*
		 * Get the physical address for this segment.
		 */
		if (pmap == kernel_pmap)
			curaddr = pmap_kextract(vaddr);
		else
			curaddr = pmap_extract(pmap, vaddr);

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
		if (sgsize > dmat->dt_maxsegsz)
			sgsize = dmat->dt_maxsegsz;
		if (buflen < sgsize)
			sgsize = buflen;

		sgsize = nexus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
		    segp);
		if (sgsize == 0)
			break;

		vaddr += sgsize;
		buflen -= sgsize;
	}

	/*
	 * Did we fit?
	 */
	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}
Example #23
0
void    
platform_mp_start_ap(void)
{
	bus_space_handle_t scu_handle;
	bus_space_handle_t ocm_handle;
	uint32_t scu_ctrl;

	/* Map in SCU control register. */
	if (bus_space_map(fdtbus_bs_tag, SCU_CONTROL_REG, 4,
			  0, &scu_handle) != 0)
		panic("platform_mp_start_ap: Couldn't map SCU config reg\n");

	/* Set SCU enable bit. */
	scu_ctrl = bus_space_read_4(fdtbus_bs_tag, scu_handle, 0);
	scu_ctrl |= SCU_CONTROL_ENABLE;
	bus_space_write_4(fdtbus_bs_tag, scu_handle, 0, scu_ctrl);

	bus_space_unmap(fdtbus_bs_tag, scu_handle, 4);

	/* Map in magic location to give entry address to CPU1. */
	if (bus_space_map(fdtbus_bs_tag, ZYNQ7_CPU1_ENTRY, 4,
	    0, &ocm_handle) != 0)
		panic("platform_mp_start_ap: Couldn't map OCM\n");

	/* Write start address for CPU1. */
	bus_space_write_4(fdtbus_bs_tag, ocm_handle, 0,
	    pmap_kextract((vm_offset_t)mpentry));

	bus_space_unmap(fdtbus_bs_tag, ocm_handle, 4);

	/*
	 * The SCU is enabled above but I think the second CPU doesn't
	 * turn on filtering until after the wake-up below. I think that's why
	 * things don't work if I don't put these cache ops here.  Also, the
	 * magic location, 0xfffffff0, isn't in the SCU's filtering range so it
	 * needs a write-back too.
	 */
	cpu_idcache_wbinv_all();
	cpu_l2cache_wbinv_all();

	/* Wake up CPU1. */
	armv7_sev();
}
Example #24
0
/*
 * Add a header to be used by libkvm to get the va to pa delta
 */
int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
    Elf_Phdr phdr;
    int error;

    bzero(&phdr, sizeof(phdr));
    phdr.p_type = PT_DUMP_DELTA;
    phdr.p_flags = PF_R;			/* XXX */
    phdr.p_offset = 0;
    phdr.p_vaddr = KERNVIRTADDR;
    phdr.p_paddr = pmap_kextract(KERNVIRTADDR);
    phdr.p_filesz = 0;
    phdr.p_memsz = 0;
    phdr.p_align = PAGE_SIZE;

    error = dumpsys_buf_write(di, (char*)&phdr, sizeof(phdr));
    return (error);
}
Example #25
0
void
sgmap_load_region(struct sgmap *sgmap,
		  bus_addr_t sba,
		  vm_offset_t va,
		  bus_size_t size)
{
	bus_addr_t ba, eba;

	/*
	 * Call the chipset to map each page in the mapped range to
	 * the correct physical page.
	 */
	for (ba = sba, eba = sba + size; ba < eba;
	     ba += PAGE_SIZE, va += PAGE_SIZE) {
		vm_offset_t pa = pmap_kextract(va);
		sgmap->map(sgmap->arg, ba, pa);
	}
	sgmap->map(sgmap->arg, ba, overflow_page_pa);
}
Example #26
0
int
pmsu_boot_secondary_cpu(void)
{
	bus_space_handle_t vaddr;
	int rv;

	rv = bus_space_map(fdtbus_bs_tag, (bus_addr_t)MV_PMSU_BASE, MV_PMSU_REGS_LEN,
	    0, &vaddr);
	if (rv != 0)
		return (rv);

	/* Boot cpu1 */
	bus_space_write_4(fdtbus_bs_tag, vaddr, PMSU_BOOT_ADDR_REDIRECT_OFFSET(1),
	    pmap_kextract((vm_offset_t)mpentry));

	dcache_wbinv_poc_all();
	armv7_sev();

	bus_space_unmap(fdtbus_bs_tag, vaddr, MV_PMSU_REGS_LEN);

	return (0);
}
Example #27
0
void
platform_mp_start_ap(void)
{
	uint32_t reg, *ptr, cpu_num;

	/* Copy boot code to SRAM */
	*((unsigned int*)(0xf1020240)) = 0xffff0101;
	*((unsigned int*)(0xf1008500)) = 0xffff0003;

	pmap_kenter_nocache(0x880f0000, 0xffff0000);
	reg = 0x880f0000;

	for (ptr = (uint32_t *)mptramp; ptr < (uint32_t *)mpentry;
	    ptr++, reg += 4)
		*((uint32_t *)reg) = *ptr;

	if (mp_ncpus > 1) {
		reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL0);
		reg &= 0x00ffffff;
		reg |= 0x01000000;
		write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL0, reg);
	}
	if (mp_ncpus > 2) {
		reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1);
		reg &= 0xff00ffff;
		reg |= 0x00010000;
		write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1, reg);
	}
	if (mp_ncpus > 3) {
		reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1);
		reg &= 0x00ffffff;
		reg |= 0x01000000;
		write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1, reg);
	}

	reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL0);
	reg |= ((0x1 << (mp_ncpus - 1)) - 1) << 21;
	write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg);
	reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL0);
	reg |= 0x01000000;
	write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg);

	DELAY(100);
	reg &= ~(0xf << 21);
	write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg);
	DELAY(100);

	bus_space_write_4(fdtbus_bs_tag, MV_BASE, CPU_RESUME_CONTROL, 0);

	for (cpu_num = 1; cpu_num < mp_ncpus; cpu_num++ )
		bus_space_write_4(fdtbus_bs_tag, CPU_PMU(cpu_num), CPU_PMU_BOOT,
		    pmap_kextract((vm_offset_t)mpentry));

	cpu_idcache_wbinv_all();

	for (cpu_num = 1; cpu_num < mp_ncpus; cpu_num++ )
		bus_space_write_4(fdtbus_bs_tag, MP, MP_SW_RESET(cpu_num), 0);

	/* XXX: Temporary workaround for hangup after releasing AP's */
	wmb();
	DELAY(10);

	initialize_coherency_fabric();
}
Example #28
0
void
platform_mp_start_ap(void)
{
	uint32_t physaddr;
	vm_offset_t vaddr;
	uint32_t val;
	uint32_t start_mask;
	u_long cpu_resume_base;
	u_long nb_base;
	u_long cpu_resume_size;
	u_long nb_size;
	bus_addr_t cpu_resume_baddr;
	bus_addr_t nb_baddr;
	int a;

	if (alpine_get_cpu_resume_base(&cpu_resume_base, &cpu_resume_size))
		panic("Couldn't resolve cpu_resume_base address\n");

	if (alpine_get_nb_base(&nb_base, &nb_size))
		panic("Couldn't resolve_nb_base address\n");

	/* Proceed with start addresses for additional CPUs */
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + cpu_resume_base,
	    cpu_resume_size, 0, &cpu_resume_baddr))
		panic("Couldn't map CPU-resume area");
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
	    nb_size, 0, &nb_baddr))
		panic("Couldn't map NB-service area");

	/* Proceed with start addresses for additional CPUs */
	val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
	    AL_CPU_RESUME_WATERMARK_REG);
	if (((val & AL_CPU_RESUME_MAGIC_NUM_MASK) != AL_CPU_RESUME_MAGIC_NUM) ||
	    ((val & AL_CPU_RESUME_MIN_VER_MASK) < AL_CPU_RESUME_MIN_VER)) {
		panic("CPU-resume device is not compatible");
	}

	vaddr = (vm_offset_t)mpentry;
	physaddr = pmap_kextract(vaddr);

	for (a = 1; a < platform_mp_get_core_cnt(); a++) {
		/* Power up the core */
		bus_space_write_4(fdtbus_bs_tag, nb_baddr,
		    AL_NB_CONFIG_STATUS_PWR_CTRL(a), 0);
		mb();

		/* Enable resume */
		val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_FLAGS(a));
		val &= ~AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME;
		bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_FLAGS(a), val);
		mb();

		/* Set resume physical address */
		bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_RADDR_REG(a), physaddr);
		mb();
	}

	/* Release cores from reset */
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
	    nb_size, 0, &nb_baddr))
		panic("Couldn't map NB-service area");

	start_mask = (1 << platform_mp_get_core_cnt()) - 1;

	/* Release cores from reset */
	val = bus_space_read_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL);
	val |= start_mask;
	bus_space_write_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL, val);
	dsb();

	bus_space_unmap(fdtbus_bs_tag, nb_baddr, nb_size);
	bus_space_unmap(fdtbus_bs_tag, cpu_resume_baddr, cpu_resume_size);
}
Example #29
0
void    
platform_mp_start_ap(void)
{
	bus_space_handle_t scu;
	bus_space_handle_t src;

	uint32_t val;
	int i;

	if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0)
		panic("Couldn't map the SCU\n");
	if (bus_space_map(fdtbus_bs_tag, SRC_PHYSBASE, SRC_SIZE, 0, &src) != 0)
		panic("Couldn't map the system reset controller (SRC)\n");

	/*
	 * Invalidate SCU cache tags.  The 0x0000ffff constant invalidates all
	 * ways on all cores 0-3.  Per the ARM docs, it's harmless to write to
	 * the bits for cores that are not present.
	 */
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff);

	/*
	 * Erratum ARM/MP: 764369 (problems with cache maintenance).
	 * Setting the "disable-migratory bit" in the undocumented SCU
	 * Diagnostic Control Register helps work around the problem.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, 
	    val | SCU_DIAG_DISABLE_MIGBIT);

	/*
	 * Enable the SCU, then clean the cache on this core.  After these two
	 * operations the cache tag ram in the SCU is coherent with the contents
	 * of the cache on this core.  The other cores aren't running yet so
	 * their caches can't contain valid data yet, but we've initialized
	 * their SCU tag ram above, so they will be coherent from startup.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, 
	    val | SCU_CONTROL_ENABLE);
	dcache_wbinv_poc_all();

	/*
	 * For each AP core, set the entry point address and argument registers,
	 * and set the core-enable and core-reset bits in the control register.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, src, SRC_CONTROL_REG);
	for (i=1; i < mp_ncpus; i++) {
		bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR0_C1FUNC + 8*i,
		    pmap_kextract((vm_offset_t)mpentry));
		bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR1_C1ARG  + 8*i, 0);

		val |= ((1 << (SRC_CONTROL_C1ENA_SHIFT - 1 + i )) |
		    ( 1 << (SRC_CONTROL_C1RST_SHIFT - 1 + i)));

	}
	bus_space_write_4(fdtbus_bs_tag, src, SRC_CONTROL_REG, val);

	dsb();
	sev();

	bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE);
	bus_space_unmap(fdtbus_bs_tag, src, SRC_SIZE);
}
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrance, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
static int
bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
    void *buf, bus_size_t buflen, struct thread *td, int flags,
    vm_offset_t *lastaddrp, int *segp, int first)
{
	bus_size_t sgsize;
	bus_addr_t curaddr, lastaddr, baddr, bmask;
	vm_offset_t vaddr = (vm_offset_t)buf;
	int seg;
	pmap_t pmap;

	if (td != NULL)
		pmap = vmspace_pmap(td->td_proc->p_vmspace);
	else
		pmap = NULL;

	lastaddr = *lastaddrp;
	bmask = ~(dmat->boundary - 1);

	for (seg = *segp; buflen > 0 ; ) {
		/*
		 * Get the physical address for this segment.
		 */
		if (pmap)
			curaddr = pmap_extract(pmap, vaddr);
		else
			curaddr = pmap_kextract(vaddr);

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
		if (sgsize > dmat->maxsegsz)
			sgsize = dmat->maxsegsz;
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (dmat->boundary > 0) {
			baddr = (curaddr + dmat->boundary) & bmask;
			if (sgsize > (baddr - curaddr))
				sgsize = (baddr - curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * the previous segment if possible.
		 */
		if (first) {
			segs[seg].ds_addr = curaddr;
			segs[seg].ds_len = sgsize;
			first = 0;
		} else {
			if (curaddr == lastaddr &&
			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
			    (dmat->boundary == 0 ||
			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
				segs[seg].ds_len += sgsize;
			else {
				if (++seg >= dmat->nsegments)
					break;
				segs[seg].ds_addr = curaddr;
				segs[seg].ds_len = sgsize;
			}
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;

	/*
	 * Did we fit?
	 */
	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
}