Esempio n. 1
0
/* 
 * Convert a kernel virtual address to a Physical Address/Length List.
 */
alenlist_t
kvaddr_to_alenlist(alenlist_t alenlist, caddr_t kvaddr, size_t length, unsigned flags)
{
	alenaddr_t paddr;
	long offset;
	size_t piece_length;
	int created_alenlist;

	if (length <=0)
		return(NULL);

	/* If caller supplied a List, use it.  Otherwise, allocate one. */
	if (alenlist == NULL) {
		alenlist = alenlist_create(0);
		created_alenlist = 1;
	} else {
		alenlist_clear(alenlist);
		created_alenlist = 0;
	}

	paddr = kvtophys(kvaddr);
	offset = poff(kvaddr);

	/* Handle first page */
	piece_length = min((size_t)(NBPP - offset), length);
	if (alenlist_append(alenlist, paddr, piece_length, flags) == ALENLIST_FAILURE)
		goto failure;
	length -= piece_length;
	kvaddr += piece_length;

	/* Handle middle pages */
	while (length >= NBPP) {
		paddr = kvtophys(kvaddr);
		if (alenlist_append(alenlist, paddr, NBPP, flags) == ALENLIST_FAILURE)
			goto failure;
		length -= NBPP;
		kvaddr += NBPP;
	}

	/* Handle last page */
	if (length) {
		ASSERT(length < NBPP);
		paddr = kvtophys(kvaddr);
		if (alenlist_append(alenlist, paddr, length, flags) == ALENLIST_FAILURE)
			goto failure;
	}

	alenlist_cursor_init(alenlist, 0, NULL);
	return(alenlist);

failure:
	if (created_alenlist)
		alenlist_destroy(alenlist);
	return(NULL);
}
Esempio n. 2
0
void board_fill_memory_map( amap_t *ram_map )
{
    extern char end[];

    addr_t uptokernel = kvtophys(&end);

    int len = 256*1024*1024; // Hardcode 256M of RAM
    assert( 0 == amap_modify( ram_map, uptokernel, len-uptokernel, MEM_MAP_HI_RAM) );

    assert( 0 == amap_modify( ram_map, kvtophys(BOARD_ISA_IO), BOARD_ISA_IO_LEN, MEM_MAP_DEV_MEM) );
}
Esempio n. 3
0
void
dmac3_start(struct dmac3_softc *sc, vaddr_t addr, int len, int direction)
{
	struct dmac3reg *reg = sc->sc_reg;
	paddr_t pa;
	vaddr_t start, end, v;
	volatile uint32_t *p;

	if (reg->csr & DMAC3_CSR_ENABLE)
		dmac3_reset(sc);

	start = mips_trunc_page(addr);
	end   = mips_round_page(addr + len);
	p = sc->sc_dmamap;
	for (v = start; v < end; v += PAGE_SIZE) {
		pa = kvtophys(v);
		mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE);
		*p++ = 0;
		*p++ = (pa >> PGSHIFT) | 0xc0000000;
	}
	*p++ = 0;
	*p++ = 0x003fffff;

	addr &= PGOFSET;
	addr += sc->sc_dmaaddr;

	reg->len = len;
	reg->addr = addr;
	reg->intr = DMAC3_INTR_EOPIE | DMAC3_INTR_INTEN;
	reg->csr = DMAC3_CSR_ENABLE | direction | BURST_MODE | APAD_MODE;
}
Esempio n. 4
0
static epte_t
epte_get(epte_t *page_table, un index, bool alloc_missing)
{
	epte_t epte = page_table[index];
	if (bit_test(epte, EPT_VALID) || !alloc_missing) {
		return epte;
	}

	epte_t *new_table = (epte_t *)page_alloc();
	if (!new_table) {
		return 0;
	}

	epte_t new_epte = mkepte(kvtophys(new_table), EPT_PERM_ALL);

	if (atomic_cx(&page_table[index], epte, new_epte) != epte) {
		page_free((un)new_table);
		epte = page_table[index];
	} else {
		epte = new_epte;
		iommu_flush_cache((un)new_table, VM_PAGE_SIZE);
		iommu_flush_cache((un)&page_table[index], sizeof(epte_t));
	}	

	return epte;
}
Esempio n. 5
0
int
booted_ni(device_t dev, void *aux)
{
	struct bi_attach_args *ba = aux;

	if (jmfr("ni", dev, BDEV_NI) || (kvtophys(ba->ba_ioh) != rpb.csrphy))
		return 0;

	return 1;
}
Esempio n. 6
0
int
ubtest(void *aux)
{
	paddr_t p;

	p = kvtophys(((struct uba_attach_args *)aux)->ua_ioh);
	if (rpb.csrphy != p)
		return 1;
	return 0;
}
Esempio n. 7
0
int
octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
    uint64_t *gbuf, int *rsegs)
{
	struct mbuf *m;
	int segs = 0;
	uint32_t laddr, rlen, nlen;

	for (m = m0; m != NULL; m = m->m_next) {

		if (__predict_false(m->m_len == 0))
			continue;

#if 0	
		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
#endif

		/*
		 * aligned 4k
		 */
		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);

		if (laddr + m->m_len > PAGE_SIZE) {
			/* XXX */
			rlen = PAGE_SIZE - laddr;
			nlen = m->m_len - rlen;
			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
			    KVTOPHYS(m->m_data));
			segs++;
			if (segs > 63) {
				return 1;
			}
			/* XXX */
		} else {
			rlen = 0;
			nlen = m->m_len;
		}

		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
		    KVTOPHYS((caddr_t)m->m_data + rlen));
		segs++;
		if (segs > 63) {
			return 1;
		}
	}

	OCTEON_ETH_KASSERT(m == NULL);

	*rsegs = segs;

	return 0;
}
Esempio n. 8
0
int ptab_alloc(oskit_addr_t *out_ptab_pa)
{
	void *ptab_va = lmm_alloc_page(&malloc_lmm, 0);
	if (ptab_va == 0)
		return -1;

	/* Clear it out to make sure all entries are invalid.  */
	memset(ptab_va, 0, 4096);

	*out_ptab_pa = kvtophys(ptab_va);
	return 0;
}
Esempio n. 9
0
int dos_rename(const char *oldpath, const char *newpath)
{
	/* This function isn't fully atomic like on Unix,
	   but it's as close as I know how to do under DOS.  */

	struct trap_state ts;
	oskit_addr_t dos_buf_phys = (oskit_addr_t)kvtophys(dos_buf);
	int err;

	init_ts(&ts);

	/* Put the oldpath in the first half of dos_buf,
	   and the newpath in the second half.  */
	if ((strlen(oldpath)+1 > DOS_BUF_SIZE/2)
	    || (strlen(newpath)+1 > DOS_BUF_SIZE/2))
		{ errno = E2BIG; return -1; }
	strcpy(dos_buf, oldpath);
	strcpy(dos_buf+DOS_BUF_SIZE/2, newpath);

	/* Try once to rename the file.  */
	ts.trapno = 0x21;
	ts.eax = 0x5600;
	ts.v86_ds = dos_buf_phys >> 4;
	ts.edx = dos_buf_phys & 15;
	ts.v86_es = dos_buf_phys >> 4;
	ts.edi = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
	base_real_int(&ts);

	/* If that failed, delete newpath and then retry the rename.
	   We _hope_ the failure was because newpath already existed;
	   the DOS error codes I'm getting back seem to be bogus.  */
	if ((err = dos_check_err(&ts)) != 0)
	{
		ts.trapno = 0x21;
		ts.eax = 0x4100;
		ts.v86_ds = dos_buf_phys >> 4;
		ts.edx = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
		base_real_int(&ts);
		if ((err = dos_check_err(&ts)) != 0)
			return err;

		ts.trapno = 0x21;
		ts.eax = 0x5600;
		ts.v86_ds = dos_buf_phys >> 4;
		ts.edx = dos_buf_phys & 15;
		ts.v86_es = dos_buf_phys >> 4;
		ts.edi = (dos_buf_phys & 15) + DOS_BUF_SIZE/2;
		base_real_int(&ts);
		err = dos_check_err(&ts);
	}
Esempio n. 10
0
int dos_read(dos_fd_t fd, void *buf, oskit_size_t size, oskit_size_t *out_actual)
{
	int err;
	int actual = 0;
	struct trap_state ts;
	oskit_addr_t dos_buf_phys = (oskit_addr_t)kvtophys(dos_buf);

	assert(dos_buf); assert(dos_buf_phys);
	assert(dos_buf_phys < 0x100000);

	init_ts(&ts);

	while (size > 0)
	{
		int little_size = size;
		int little_actual;

		if (little_size > DOS_BUF_SIZE)
			little_size = DOS_BUF_SIZE;

		ts.trapno = 0x21;
		ts.eax = 0x3f00;
		ts.ebx = fd;
		ts.ecx = little_size;
		ts.v86_ds = dos_buf_phys >> 4;
		ts.edx = dos_buf_phys & 15;
		base_real_int(&ts);
		if ((err = dos_check_err(&ts)) != 0)
			return err;
		little_actual = ts.eax & 0xffff;
		assert(little_actual <= little_size);

		/* XXX don't copy if buf is <1MB */
		memcpy(buf, dos_buf, little_actual);

		buf += little_actual;
		size -= little_actual;
		actual += little_actual;

		if (little_actual < little_size)
			break;
	}

	*out_actual = actual;
	return 0;
}
Esempio n. 11
0
int dos_read(dos_fd_t fd, void *buf, vm_size_t size, vm_size_t *out_actual)
{
	int err;
	int actual = 0;
	struct real_call_data real_call_data;
	vm_offset_t dos_buf_phys = (vm_offset_t)kvtophys(dos_buf);

	assert(dos_buf); assert(dos_buf_phys);
	assert(dos_buf_phys < 0x100000);

	dos_init_rcd(&real_call_data);

	while (size > 0)
	{
		int little_size = size;
		int little_actual;

		if (little_size > DOS_BUF_SIZE)
			little_size = DOS_BUF_SIZE;

		real_call_data.eax = 0x3f00;
		real_call_data.ebx = fd;
		real_call_data.ecx = little_size;
		real_call_data.ds = dos_buf_phys >> 4;
		real_call_data.edx = dos_buf_phys & 15;
		real_int(0x21, &real_call_data);
		if (err = dos_check_err(&real_call_data))
			return err;
		little_actual = real_call_data.eax & 0xffff;
		assert(little_actual <= little_size);

		/* XXX don't copy if buf is <1MB */
		memcpy(buf, dos_buf, little_actual);

		buf += little_actual;
		size -= little_actual;
		actual += little_actual;

		if (little_actual < little_size)
			break;
	}

	*out_actual = actual;
	return 0;
}
Esempio n. 12
0
void
ept_invalidate_addr(paddr_t gpaddr)
{
	/* See Vol3B 24.3.3 */
	if (TRUNC_PAGE(gpaddr) == kvtophys(dma_test_page)) {
		kprintf("ept_invalidate_addr>gpaddr = "PFMT"\n",
			gpaddr);
	}

#ifdef HYP_PAE
	notify_all((nb_func_t)__ept_invalidate_addr,
		   (nb_arg_t)bits(gpaddr, 31, 0),
		   (nb_arg_t)bits(gpaddr, 63, 32));
#else
	notify_all((nb_func_t)__ept_invalidate_addr,
		   (nb_arg_t)gpaddr, (nb_arg_t)0);
#endif
}
Esempio n. 13
0
int
booted_ra(struct device *dev, void *aux)
{
	struct drive_attach_args *da = aux;
	struct mscp_softc *pdev = (void *)dev->dv_parent;
	paddr_t ioaddr;

	if (jmfr("ra", dev, BDEV_UDA))
		return 0;

	if (da->da_mp->mscp_unit != rpb.unit)
		return 0; /* Wrong unit number */

	ioaddr = kvtophys(pdev->mi_iph); /* Get phys addr of CSR */
	if (rpb.devtyp == BDEV_UDA && rpb.csrphy == ioaddr)
		return 1; /* Did match CSR */

	return 0;
}
Esempio n. 14
0
void
ept_table_free(epte_t *page_table, un level)
{
	if (level > 1) {
		kprintf("ept_table_free>level=%ld page_table=0x%lx/0x%lx\n",
			level, (un)page_table, kvtophys(page_table));
		epte_t *start = page_table;
		epte_t *finish = start + 512;
		for (epte_t *p = start; p < finish; p++) {
			if (bit_test(*p, EPT_VALID) &&
			    !bit_test(*p, EPT_2MB_PAGE)) {
				epte_t *table = epte_to_kv(*p);
				ept_table_free(table, level - 1);
				*p = 0;
			}
		}
	}

	page_free((un)page_table);
}
Esempio n. 15
0
// This is sample code
void
ept_table_walk(epte_t *page_table, un level)
{
	if (level > 1) {
		kprintf("ept_table_walk>level=%ld "
			"page_table=0x%lx/0x%lx\n",
			level, (un)page_table, kvtophys(page_table));
	}
	epte_t *start = page_table;
	epte_t *finish = start + 512;
	for (epte_t *p = start; p < finish; p++) {
		if (!bit_test(*p, EPT_VALID)) {
			continue;
		}
		if ((level > 1) && !bit_test(*p, EPT_2MB_PAGE)) {
			epte_t *table = epte_to_kv(*p);
			ept_table_walk(table, level - 1);
		} else {
			// function(*p);
		}
	}
}
Esempio n. 16
0
static void
#ifdef HYP_PAE
__ept_invalidate_addr(vm_t *v, u32 gpaddr0, u32 gpaddr1)
{
	paddr_t gpaddr = ((paddr_t)gpaddr1 << 32) | gpaddr0;
#else
__ept_invalidate_addr(vm_t *v, paddr_t gpaddr)
{
#endif
	/* There is currently no option to invept to invalidate
	 * a particular page, so gpaddr is ignored */
	u64 eptp = vmread64(VMCE_EPT_PTR);
	un err = invept(INVEPT_TYPE_SINGLE, eptp);
	if (err) {
		kprintf("__ept_invalidate_addr>ERROR eptp 0x%llx\n", eptp);
		return;
	}
	if (TRUNC_PAGE(gpaddr) == kvtophys(dma_test_page)) {
	  kprintf("__ept_invalidate_addr>succeeded gpaddr ="PFMT"\n",
		  gpaddr);
	}
}
Esempio n. 17
0
File: adu.c Progetto: Prajna/mach
void simple_lock( long *l )
{
	int             nloops = 0;

	if (simple_lock_try(l))
		return;

	if (use_Simple_lock)
		Simple_lock(l);
	else
		while (!simple_lock_try(l)) {
			vm_offset_t     phys = kvtophys((vm_offset_t)l);

			/* flush Dcache every now and then */
			if ((nloops & 0x1f) == 0)
				alphacache_Dflush(phys);

			if (++nloops > 100000) {
				gimmeabreak();
				nloops = 0;
			}
		}
}
Esempio n. 18
0
int
booted_hp(device_t dev, void *aux)
{
	static int mbaaddr;

	/* Save last adapter address */
	if (jmfr("mba", dev, BDEV_HP) == 0) {
		struct sbi_attach_args *sa = aux;

		mbaaddr = kvtophys(sa->sa_ioh);
		return 0;
	}

	if (jmfr("hp", dev, BDEV_HP))
		return 0;

	if (((struct mba_attach_args *)aux)->ma_unit != rpb.unit)
		return 0;

	if (mbaaddr != rpb.adpphy)
		return 0;

	return 1;
}
Esempio n. 19
0
bool
vm_exit_ept(registers_t *regs)
{
	u64 gpaddr = vmread64(VMCE_GUEST_PHYS_ADDR); 
	un xq = vmread(VMCE_EXIT_QUALIFICATION);
	un gladdr = vmread(VMCE_GUEST_LINEAR_ADDR);

	bool dump = false;

	if (TRUNC_PAGE(gpaddr) == kvtophys(dma_test_page)) {
		dump = true;
	}

	epte_t *p = epte_ptr_get(gpaddr, false);
	if ((p == NULL) || (!bit_test(*p, EPT_VALID))) {
		u8 mt = mtrr_type(gpaddr);
		if (mt != MT_UC) {
			kprintf("vm_exit_ept>attempted access "
				"to unmapped, non IO page 0x%llx, MT %d\n",
				gpaddr, mt);
			goto protection_violation;
		}

		/* This is a MMIO page that hasn't yet
		 * been set up.
		 */
		epte_t epte = mkepte(gpaddr, EPT_PERM_RW|(mt << EPT_MT_SHIFT));

		if (p == NULL) {
			p = epte_ptr_get(gpaddr, true);
		}
		if (p == NULL) {
			kprintf("vm_exit_ept>page_table alloc failed\n");
			vmx_clear_exec_cpu2(VMEXEC_CPU2_ENABLE_EPT);
			return false;
		}
		epte_t old_epte = *p;
		epte_t result = atomic_cx(p, old_epte, epte);
		if (result == old_epte && result != epte) {
			/* Update succeeded, so flush needed */
			iommu_flush_cache((un)p, sizeof(epte_t));
		}
		return true;
	}

	epte_t old_epte = *p;
	assert(bit_test(old_epte, EPT_VALID));

	if (bit_test(old_epte, EPT_HYP)) {

		kprintf("vm_exit_ept>attempted access "
			"to hyp page 0x%llx\n", gpaddr);
		goto protection_violation;
	}

	if (!bit_test(old_epte, EPT_GUEST)) {
		kprintf("vm_exit_ept>attempted access "
			"to non-guest page 0x%llx\n", gpaddr);
		goto protection_violation;
	}

	if (vm_nx_is_enabled()) {
		return vm_exit_ept_nx(regs, gpaddr, p, xq);
	}

	if (bit_test(xq, EPT_XQ_ACCESS_EXECUTE) &&
	    !bit_test(xq, EPT_XQ_PERM_EXECUTE)) {
		epte_t epte = *p;
		un prot = epte & EPT_PERM_ALL;
		bit_set(prot, EPT_X);
		if (vm_nx_is_enabled()) {
			/* Not yet.  Need a hook to set W again when no longer
			 * executable.  Catching write fault won't work if the
			 * first write is a DMA write */
			bit_clear(prot, EPT_W);
		}
		ret_t ret = vm_protect_page(gpaddr, prot, VMPF_FLUSH);
		if (ret) {
			kprintf("vm_exit_ept>vm_protect_page(0x%llx, 0x%lx) "
				"returned %ld\n",
				gpaddr, prot, ret);
			return false;
		}
		return true;
	} else if (bit_test(xq, EPT_XQ_ACCESS_WRITE) &&
	    !bit_test(xq, EPT_XQ_PERM_WRITE)) {
#ifdef NOTDEF
		epte_t epte = *p;
		static un count = 0;
		un n;
		if (((n = atomic_inc(&count)) < 5) || (n % 100 == 0)) {
			kprintf("vm_exit_ept>write attempt %ld "
				"but no write permission\n", n);
			kprintf("vm_exit_ept>epte = 0x%llx\n", epte);
			dump = true;
		}
#endif
		ret_t ret = vm_protect_page(gpaddr, EPT_PERM_RW, VMPF_FLUSH);
		if (ret) {
			kprintf("vm_exit_ept>vm_protect_page(0x%llx, 0x%lx) "
				"returned %ld\n",
				gpaddr, (un)EPT_PERM_RW, ret);
			return false;
		}
		return true;
	}

protection_violation:
#ifdef NOTDEF
	vmx_clear_exec_cpu2(VMEXEC_CPU2_ENABLE_EPT);
#else
	vm_entry_inject_exception(VEC_GP, 0);
#endif
	dump = true;

	if (dump) {
		kprintf("vm_exit_ept>access type %s%s%s\n",
			bit_test(xq, EPT_XQ_ACCESS_READ) ? "R" : "",
			bit_test(xq, EPT_XQ_ACCESS_WRITE) ? "W" : "",
			bit_test(xq, EPT_XQ_ACCESS_EXECUTE) ? "X" : "");
		kprintf("vm_exit_ept>permission  %s%s%s\n",
			bit_test(xq, EPT_XQ_PERM_READ) ? "R" : "",
			bit_test(xq, EPT_XQ_PERM_WRITE) ? "W" : "",
			bit_test(xq, EPT_XQ_PERM_EXECUTE) ? "X" : "");

		if (bit_test(xq, EPT_XQ_GUEST_LADDR_VALID)) {
			kprintf("vm_exit_ept>guest linear address 0x%lx\n",
				gladdr);
			if (bit_test(xq, EPT_XQ_NOT_PT_ACCESS)) {
				kprintf("vm_exit_ept>"
					"access to guest physical address\n");

			} else {
				kprintf("vm_exit_ept>access to page table\n");
			}
		}
		kprintf("vm_exit_ept>guest physical address 0x%llx\n", gpaddr);
	}

	return true;
}
Esempio n. 20
0
void mb_util_lmm (mbinfo_t *mbi, lmm_t *lmm)
{
	vm_offset_t min;
	extern char _start[], end[];

	/* Memory regions to skip.  */
	vm_offset_t cmdline_start_pa = mbi->flags & MULTIBOOT_CMDLINE
		? mbi->cmdline : 0;
	vm_offset_t cmdline_end_pa = cmdline_start_pa
		? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
		: 0;

	/* Initialize the base memory allocator
	   according to the PC's physical memory regions.  */
	lmm_init(lmm);

    /* Do the x86 init dance to build our initial regions */
    lmm_add_region(&malloc_lmm, &reg1mb,
            (void*)phystokv(0x00000000), 0x00100000,
            LMMF_1MB | LMMF_16MB, LMM_PRI_1MB);
    lmm_add_region(&malloc_lmm, &reg16mb,
            (void*)phystokv(0x00100000), 0x00f00000,
            LMMF_16MB, LMM_PRI_16MB);
    lmm_add_region(&malloc_lmm, &reghigh,
            (void*)phystokv(0x01000000), 0xfeffffff,
            0, LMM_PRI_HIGH);

	/* Add to the free list all the memory the boot loader told us about,
	   carefully avoiding the areas occupied by boot information.
	   as well as our own executable code, data, and bss.
	   Start at the end of the BIOS data area.  */
	min = 0x500;
	do
	{
		vm_offset_t max = 0xffffffff;

		/* Skip the I/O and ROM area.  */
		skip(mbi->mem_lower * 1024, 0x100000);

		/* Stop at the end of upper memory.  */
		skip(0x100000 + mbi->mem_upper * 1024, 0xffffffff);

		/* Skip our own text, data, and bss.  */
		skip(kvtophys(_start), kvtophys(end));

		/* FIXME: temporary state of affairs */
		extern char __kimg_start[];
		skip(kvtophys(__kimg_start), kvtophys(end));

		/* Skip the important stuff the bootloader passed to us.  */
		skip(cmdline_start_pa, cmdline_end_pa);
		if ((mbi->flags & MULTIBOOT_MODS)
		    && (mbi->mods_count > 0))
		{
			struct multiboot_module *m = (struct multiboot_module*)
				phystokv(mbi->mods_addr);
			unsigned i;

			skip(mbi->mods_addr,
			     mbi->mods_addr +
			     mbi->mods_count * sizeof(*m));
			for (i = 0; i < mbi->mods_count; i++)
			{
				if (m[i].string != 0)
				{
					char *s = (char*)phystokv(m[i].string);
					unsigned len = strlen(s);
					skip(m[i].string, m[i].string+len+1);
				}
				skip(m[i].mod_start, m[i].mod_end);
			}
		}

		/* We actually found a contiguous memory block
		   that doesn't conflict with anything else!  Whew!
		   Add it to the free list.  */
		lmm_add_free(&malloc_lmm, (void *) min, max - min);

		/* Continue searching just past the end of this region.  */
		min = max;

		/* The skip() macro jumps to this label
		   to restart with a different (higher) min address.  */
		retry:;
	}
	while (min < 0xffffffff);
}
Esempio n. 21
0
/*
 * Return the physical address
 */
oskit_addr_t
osenv_mem_get_phys(oskit_addr_t addr)
{
	return ((oskit_addr_t)kvtophys(addr));
}
Esempio n. 22
0
/*
 * Utility function to load a linear buffer.  segp contains the starting
 * segment on entrance, and the ending segment on exit. first indicates
 * if this is the first invocation of this function.
 */
static int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
    void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
    int *segp, bool first)
{
	bus_size_t sgsize;
	paddr_t baddr, curaddr, lastaddr;
	vaddr_t vaddr = (vaddr_t)buf, lastvaddr;
	int seg = *segp;
	bus_dma_segment_t *ds = &map->dm_segs[seg];
	bus_dma_segment_t * const eds = &map->dm_segs[map->_dm_segcnt];
	const bool d_cache_coherent =
	    (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) != 0;

	lastaddr = ds->ds_addr + ds->ds_len;
	lastvaddr = ds->_ds_vaddr + ds->ds_len;
	const bus_size_t bmask = ~(map->_dm_boundary - 1);

	while (buflen > 0) {
		/*
		 * Get the physical address for this segment.
		 */
		if (!VMSPACE_IS_KERNEL_P(vm))
			(void) pmap_extract(vm_map_pmap(&vm->vm_map), vaddr,
			    &curaddr);
		else
			curaddr = kvtophys(vaddr);

		/*
		 * If we're beyond the current DMA window, indicate
		 * that and try to fall back onto something else.
		 */
		if (curaddr < t->_bounce_alloc_lo
		    || (t->_bounce_alloc_hi != 0
			&& curaddr >= t->_bounce_alloc_hi))
			return (EINVAL);
#if BUS_DMA_DEBUG
		printf("dma: addr %#"PRIxPADDR" -> %#"PRIxPADDR"\n", curaddr,
		    (curaddr - t->_bounce_alloc_lo) + t->_wbase);
#endif
		curaddr = (curaddr - t->_bounce_alloc_lo) + t->_wbase;

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((uintptr_t)vaddr & PGOFSET);
		if (sgsize > buflen) {
			sgsize = buflen;
		}
		if (sgsize > map->dm_maxsegsz) {
			sgsize = map->dm_maxsegsz;
		}

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = (curaddr + map->_dm_boundary) & bmask;
			if (sgsize > baddr - curaddr) {
				sgsize = baddr - curaddr;
			}
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * the previous segment if possible.
		 */
		if (first) {
			ds->ds_addr = curaddr;
			ds->ds_len = sgsize;
			ds->_ds_vaddr = vaddr;
			first = false;
		} else if (curaddr == lastaddr
		    && (d_cache_coherent || lastvaddr == vaddr)
		    && ds->ds_len + sgsize <= map->dm_maxsegsz
		    && (map->_dm_boundary == 0
			|| ((ds->ds_addr ^ curaddr) & bmask) == 0)) {
			ds->ds_len += sgsize;
		} else {
			if (++ds >= eds)
				break;
			ds->ds_addr = curaddr;
			ds->ds_len = sgsize;
			ds->_ds_vaddr = vaddr;
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
		lastvaddr = vaddr;
	}

	*segp = ds - map->dm_segs;

	/*
	 * Did we fit?
	 */
	if (buflen != 0) {
		/*
		 * If there is a chained window, we will automatically
		 * fall back to it.
		 */
		return (EFBIG);		/* XXX better return value here? */
	}

	return (0);
}
Esempio n. 23
0
File: bus.c Progetto: ryo/netbsd-src
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrance, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
int
_bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
    struct vmspace *vm, int flags, vaddr_t *lastaddrp, int *segp, int first)
{
	bus_size_t sgsize;
	bus_addr_t curaddr, lastaddr, baddr, bmask;
	vaddr_t vaddr = (vaddr_t)buf;
	paddr_t pa;
	size_t seg;

	lastaddr = *lastaddrp;
	bmask  = ~(map->_dm_boundary - 1);

	for (seg = *segp; buflen > 0 ; ) {
		/*
		 * Get the physical address for this segment.
		 */
		if (!VMSPACE_IS_KERNEL_P(vm))
			(void) pmap_extract(vm_map_pmap(&vm->vm_map),
			    vaddr, &pa);
		else
			pa = kvtophys(vaddr);
		curaddr = pa;

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = (curaddr + map->_dm_boundary) & bmask;
			if (sgsize > (baddr - curaddr))
				sgsize = (baddr - curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * the previous segment if possible.
		 */
		if (first) {
			map->dm_segs[seg].ds_addr = curaddr;
			map->dm_segs[seg].ds_len = sgsize;
			map->dm_segs[seg]._ds_vaddr = vaddr;
			first = 0;
		} else {
			if (curaddr == lastaddr &&
			    (map->dm_segs[seg].ds_len + sgsize) <=
			     map->dm_maxsegsz &&
			    (map->_dm_boundary == 0 ||
			     (map->dm_segs[seg].ds_addr & bmask) ==
			     (curaddr & bmask)))
				map->dm_segs[seg].ds_len += sgsize;
			else {
				if (++seg >= map->_dm_segcnt)
					break;
				map->dm_segs[seg].ds_addr = curaddr;
				map->dm_segs[seg].ds_len = sgsize;
				map->dm_segs[seg]._ds_vaddr = vaddr;
			}
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;

	/*
	 * Did we fit?
	 */
	if (buflen != 0)
		return EFBIG;		/* XXX Better return value here? */

	return 0;
}
Esempio n. 24
0
void bootstrap_create(void)
{
  int compat;
  int n = 0;
#ifdef	MACH_XEN
  struct multiboot_module *bmods = ((struct multiboot_module *)
                                   boot_info.mod_start);
  if (bmods)
    for (n = 0; bmods[n].mod_start; n++) {
      bmods[n].mod_start = kvtophys(bmods[n].mod_start + (vm_offset_t) bmods);
      bmods[n].mod_end = kvtophys(bmods[n].mod_end + (vm_offset_t) bmods);
      bmods[n].string = kvtophys(bmods[n].string + (vm_offset_t) bmods);
    }
  boot_info.mods_count = n;
  boot_info.flags |= MULTIBOOT_MODS;
#else	/* MACH_XEN */
  struct multiboot_module *bmods = ((struct multiboot_module *)
				    phystokv(boot_info.mods_addr));

#endif	/* MACH_XEN */
  if (!(boot_info.flags & MULTIBOOT_MODS)
      || (boot_info.mods_count == 0))
    panic ("No bootstrap code loaded with the kernel!");

  compat = boot_info.mods_count == 1;
  if (compat)
    {
      char *p = strchr((char*)phystokv(bmods[0].string), ' ');
      if (p != 0)
	do
	  ++p;
	while (*p == ' ' || *p == '\n');
      compat = p == 0 || *p == '\0';
    }

  if (compat)
    {
      printf("Loading single multiboot module in compat mode: %s\n",
	     (char*)phystokv(bmods[0].string));
      bootstrap_exec_compat(&bmods[0]);
    }
  else
    {
      int i, losers;

      /* Initialize boot script variables.  We leak these send rights.  */
      losers = boot_script_set_variable
	("host-port", VAL_PORT,
	 (long) realhost.host_priv_self);
      if (losers)
	panic ("cannot set boot-script variable host-port: %s",
	       boot_script_error_string (losers));
      losers = boot_script_set_variable
	("device-port", VAL_PORT,
	 (long) master_device_port);
      if (losers)
	panic ("cannot set boot-script variable device-port: %s",
	       boot_script_error_string (losers));

      losers = boot_script_set_variable ("kernel-command-line", VAL_STR,
					 (long) kernel_cmdline);
      if (losers)
	panic ("cannot set boot-script variable %s: %s",
	       "kernel-command-line", boot_script_error_string (losers));

      {
	/* Set the same boot script variables that the old Hurd's
	   serverboot did, so an old Hurd and boot script previously
	   used with serverboot can be used directly with this kernel.  */

	char *flag_string = alloca(1024);
	char *root_string = alloca(1024);

	/*
	 * Get the (compatibility) boot flags and root name strings.
	 */
	get_compat_strings(flag_string, root_string);

	losers = boot_script_set_variable ("boot-args", VAL_STR,
					   (long) flag_string);
	if (losers)
	  panic ("cannot set boot-script variable %s: %s",
		 "boot-args", boot_script_error_string (losers));
	losers = boot_script_set_variable ("root-device", VAL_STR,
					   (long) root_string);
	if (losers)
	  panic ("cannot set boot-script variable %s: %s",
		 "root-device", boot_script_error_string (losers));
      }

#if OSKIT_MACH
      {
	/* The oskit's "environ" array contains all the words from
	   the multiboot command line that looked like VAR=VAL.
	   We set each of these as boot-script variables, which
	   can be used for things like ${root}.  */

	extern char **environ;
	char **ep;
	for (ep = environ; *ep != 0; ++ep)
	  {
	    size_t len = strlen (*ep) + 1;
	    char *var = memcpy (alloca (len), *ep, len);
	    char *val = strchr (var, '=');
	    *val++ = '\0';
	    losers = boot_script_set_variable (var, VAL_STR, (long) val);
	    if (losers)
	      panic ("cannot set boot-script variable %s: %s",
		     var, boot_script_error_string (losers));
	  }
      }
#else  /* GNUmach, not oskit-mach */
      {
	/* Turn each `FOO=BAR' word in the command line into a boot script
	   variable ${FOO} with value BAR.  This matches what we get from
	   oskit's environ in the oskit-mach case (above).  */

	int len = strlen (kernel_cmdline) + 1;
	char *s = memcpy (alloca (len), kernel_cmdline, len);
	char *word;
	while ((word = strsep (&s, " \t")) != 0)
	  {
	    char *eq = strchr (word, '=');
	    if (eq == 0)
	      continue;
	    *eq++ = '\0';
	    losers = boot_script_set_variable (word, VAL_STR, (long) eq);
	    if (losers)
	      panic ("cannot set boot-script variable %s: %s",
		     word, boot_script_error_string (losers));
	  }
      }
#endif

      for (i = 0; i < boot_info.mods_count; ++i)
	{
	  int err;
	  char *line = (char*)phystokv(bmods[i].string);
	  printf ("module %d: %s\n", i, line);
	  err = boot_script_parse_line (&bmods[i], line);
	  if (err)
	    {
	      printf ("\n\tERROR: %s", boot_script_error_string (err));
	      ++losers;
	    }
	}
      printf ("%d multiboot modules\n", i);
      if (losers)
	panic ("%d of %d boot script commands could not be parsed",
	       losers, boot_info.mods_count);
      losers = boot_script_exec ();
      if (losers)
	panic ("ERROR in executing boot script: %s",
	       boot_script_error_string (losers));
    }
  /* XXX we could free the memory used
     by the boot loader's descriptors and such.  */
  for (n = 0; n < boot_info.mods_count; n++)
    vm_page_create(bmods[n].mod_start, bmods[n].mod_end);
}
void base_multiboot_init_mem(void)
{
	unsigned int min;
	extern char _start_of_kernel[], end[];

	/* Memory regions to skip.  */
	unsigned int cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
		? boot_info.cmdline : 0;
	unsigned int cmdline_end_pa = cmdline_start_pa
		? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
		: 0;

	/* Initialize the base memory allocator
	   according to the PC's physical memory regions.  */
	phys_lmm_init();

	/* Add to the free list all the memory the boot loader told us about,
	   carefully avoiding the areas occupied by boot information.
	   as well as our own executable code, data, and bss.
	   Start at the end of the BIOS data area.  */
	min = 0x500;
	do
	{
		unsigned int max = 0xffffffff;

		/* Skip the I/O and ROM area.  */
		skip(boot_info.mem_lower * 1024, 0x100000);

		/* Stop at the end of upper memory.  */
		skip(0x100000 + boot_info.mem_upper * 1024, 0xffffffff);

		/* Skip our own text, data, and bss.  */
		skip(kvtophys(_start_of_kernel), kvtophys(end));

		/* Skip the important stuff the bootloader passed to us.  */
		skip(cmdline_start_pa, cmdline_end_pa);
		if ((boot_info.flags & MULTIBOOT_MODS)
		    && (boot_info.mods_count > 0))
		{
			struct multiboot_module *m = (struct multiboot_module*)
				phystokv(boot_info.mods_addr);
			unsigned i;

			skip(boot_info.mods_addr,
			     boot_info.mods_addr +
			     boot_info.mods_count * sizeof(*m));
			for (i = 0; i < boot_info.mods_count; i++)
			{
				if (m[i].string != 0)
				{
					char *s = (char*)phystokv(m[i].string);
					unsigned len = strlen(s);
					skip(m[i].string, m[i].string+len+1);
				}
				skip(m[i].mod_start, m[i].mod_end);
			}
		}

		/* We actually found a contiguous memory block
		   that doesn't conflict with anything else!  Whew!
		   Add it to the free list.  */
		phys_lmm_add(min, max - min);

		/* Continue searching just past the end of this region.  */
		min = max;

		/* The skip() macro jumps to this label
		   to restart with a different (higher) min address.  */
		retry:
                min = min;
	}
	while (min < 0xffffffff);
}
Esempio n. 26
0
/*
 * Get the next I/O request started
 */
static void
icapstart(struct icap_softc *sc)
{
	paddr_t phys, phys2;
	vaddr_t virt;
	size_t count;
	uint32_t fl;
	struct buf *bp = sc->sc_bp;

	DEBUG_PRINT(("icapstart %p %p\n",sc,bp), DEBUG_FUNCS);

    /* Were we idle?
     */
 recheck:
    if (bp == NULL) {

        /* Yes, get the next request if any
         */
        bp = bufq_get(sc->sc_buflist);
        DEBUG_PRINT(("icapnext: %p\n",bp), DEBUG_XFERS);
        if (bp == NULL)
            return;
    }

    /* Done with this request?
     */
    if ((bp->b_resid == 0) || bp->b_error) {

        /* Yes, complete and move to next, if any
         */
        sc->sc_bp = NULL;
        biodone(bp);
        DEBUG_PRINT(("icapdone %p\n",bp), DEBUG_XFERS);
        bp = NULL;
        goto recheck;
    }

    /* If new request init the xfer info
     */
    if (sc->sc_bp == NULL) {
        sc->sc_bp = bp;
        sc->sc_data = bp->b_data;
        sc->sc_count = bp->b_resid;
    }

    /* Loop filling as many buffers as will fit in the FIFO
     */
    fl = (bp->b_flags & B_READ) ? ICAPS_F_RECV : ICAPS_F_XMIT;
    for (;;) {

        /* Make sure there's still room in the FIFO, no errors.
         */
        if (sc->sc_dp->Control & (ICAPC_IF_FULL|ICAPC_ERROR))
            break;

        /* How much data do we xfer and where
         */
        virt = (vaddr_t)sc->sc_data;
        phys = kvtophys(virt);
        count = round_page(virt) - virt;
        if (count == 0) count = PAGE_SIZE;/* could(will) be aligned */

        /* How much of it is contiguous
         */
        while (count < sc->sc_count) {
            phys2 = kvtophys(virt + count);
            if (phys2 != (phys + count)) {

                /* No longer contig, ship it
                 */
                break;
            }
            count += PAGE_SIZE;
        }

        /* Trim if we went too far 
         */
        if (count > sc->sc_count)
            count = sc->sc_count;

        /* Ship it
         */
        DEBUG_PRINT(("icapship %" PRIxPADDR " %d\n",phys,count), DEBUG_XFERS);
        sc->sc_dp->SizeAndFlags = fl | count;
        sc->sc_dp->BufferAddressHi32 = 0; /* BUGBUG 64bit */
        sc->sc_dp->BufferAddressLo32 = phys; /* this pushes the fifo */

        /* Adjust pointers and continue 
         */
        sc->sc_data  += count;
        sc->sc_count -= count;

        if (sc->sc_count <= 0)
            break;
    }
}