Esempio n. 1
0
void
bcopy_phys(
	   addr64_t src64,
	   addr64_t dst64,
	   vm_size_t bytes)
{
        mapwindow_t *src_map, *dst_map;

	/* ensure we stay within a page */
	if ( ((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) {
	        panic("bcopy_phys alignment");
	}
	mp_disable_preemption();

	src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
	dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) |
						  INTEL_PTE_REF | INTEL_PTE_MOD));

	bcopy((void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)),
	      (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)), bytes);

	pmap_put_mapwindow(src_map);
	pmap_put_mapwindow(dst_map);

	mp_enable_preemption();
}
Esempio n. 2
0
static void
ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size)
{
        mapwindow_t *map;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | 
					  INTEL_PTE_REF | INTEL_PTE_MOD));

        switch (size) {
        case 1:
	    *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned char)data;
            break;
        case 2:
	    *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = (unsigned short)data;
            break;
        case 4:
        default:
	    *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;
            break;
        }
        pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Esempio n. 3
0
static unsigned int
ml_phys_read_data(pmap_paddr_t paddr, int size )
{
        mapwindow_t *map;
	unsigned int result;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));

        switch (size) {
            unsigned char s1;
            unsigned short s2;
        case 1:
            s1 = *(unsigned char *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            result = s1;
            break;
        case 2:
            s2 = *(unsigned short *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            result = s2;
            break;
        case 4:
        default:
            result = *(unsigned int *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));
            break;
        }
        pmap_put_mapwindow(map);

	mp_enable_preemption();

        return result;
}
Esempio n. 4
0
/*
 *	copy_from_phys(src_addr_p, dst_addr_v, count)
 *
 *	Copy physical memory to virtual memory.  The virtual memory
 *	is assumed to be present (e.g. the buffer pool).
 */
void
copy_from_phys(
	vm_offset_t 	src_addr_p, 
	vm_offset_t 	dst_addr_v,
	int 		count)
{
	vm_offset_t src_addr_v;
	pmap_mapwindow_t *src_map;
	boolean_t mapped = src_addr_p >= phys_last_addr;
	assert(src_addr_p != vm_page_fictitious_addr);
	assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));

	if (mapped)
	{
		src_map = pmap_get_mapwindow(INTEL_PTE_R(src_addr_p));
		src_addr_v = src_map->vaddr;
	}
	else
		src_addr_v = phystokv(src_addr_p);

	memcpy((void *)dst_addr_v, (void *)src_addr_v, count);

	if (mapped)
		pmap_put_mapwindow(src_map);
}
Esempio n. 5
0
/*
 *	pmap_copy_page copies the specified (machine independent) pages.
 */
void
pmap_copy_page(
	vm_offset_t src, 
	vm_offset_t dst)
{
	vm_offset_t src_addr_v, dst_addr_v;
	pmap_mapwindow_t *src_map = NULL;
	pmap_mapwindow_t *dst_map;
	boolean_t src_mapped = src >= phys_last_addr;
	boolean_t dst_mapped = dst >= phys_last_addr;
	assert(src != vm_page_fictitious_addr);
	assert(dst != vm_page_fictitious_addr);

	if (src_mapped)
	{
		src_map = pmap_get_mapwindow(INTEL_PTE_R(src));
		src_addr_v = src_map->vaddr;
	}
	else
		src_addr_v = phystokv(src);

	if (dst_mapped)
	{
		dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst));
		dst_addr_v = dst_map->vaddr;
	}
	else
		dst_addr_v = phystokv(dst);

	memcpy((void *) dst_addr_v, (void *) src_addr_v, PAGE_SIZE);

	if (src_mapped)
		pmap_put_mapwindow(src_map);
	if (dst_mapped)
		pmap_put_mapwindow(dst_map);
}
Esempio n. 6
0
static void
ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data)
{
        mapwindow_t *map;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (paddr & PG_FRAME) | 
					      INTEL_PTE_REF | INTEL_PTE_MOD));

	*(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK)) = data;

        pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Esempio n. 7
0
void
bzero_phys(
	   addr64_t src64,
	   uint32_t bytes)
{
        mapwindow_t *map;

        mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));

	bzero((void *)((uintptr_t)map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)), bytes);

	pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Esempio n. 8
0
static unsigned long long
ml_phys_read_long_long(pmap_paddr_t paddr )
{
        mapwindow_t *map;
	unsigned long long result;

	mp_disable_preemption();

	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | (paddr & PG_FRAME) | INTEL_PTE_REF));

	result = *(unsigned long long *)((uintptr_t)map->prv_CADDR | ((uint32_t)paddr & INTEL_OFFMASK));

        pmap_put_mapwindow(map);

	mp_enable_preemption();

        return result;
}
Esempio n. 9
0
void fillPage(ppnum_t pa, unsigned int fill)
{
        mapwindow_t *map;
	pmap_paddr_t src;
	int i;
	int cnt = PAGE_SIZE/sizeof(unsigned int);
	unsigned int *addr;

	mp_disable_preemption();

	src = i386_ptob(pa);
	map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) | 
					      INTEL_PTE_REF | INTEL_PTE_MOD));

	for (i = 0, addr = (unsigned int *)map->prv_CADDR; i < cnt ; i++ )
	        *addr++ = fill;

	pmap_put_mapwindow(map);

	mp_enable_preemption();
}
Esempio n. 10
0
/*
 *	pmap_zero_page zeros the specified (machine independent) page.
 */
void
pmap_zero_page(vm_offset_t p)
{
	assert(p != vm_page_fictitious_addr);
	vm_offset_t v;
	pmap_mapwindow_t *map;
	boolean_t mapped = p >= phys_last_addr;

	if (mapped)
	{
		map = pmap_get_mapwindow(INTEL_PTE_W(p));
		v = map->vaddr;
	}
	else
		v = phystokv(p);

	memset((void*) v, 0, PAGE_SIZE);

	if (mapped)
		pmap_put_mapwindow(map);
}
Esempio n. 11
0
__private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) {
	void *src, *dst;
	int err = 0;

	mp_disable_preemption();
#if NCOPY_WINDOWS > 0
	mapwindow_t *src_map, *dst_map;
	/* We rely on MTRRs here */
	src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF));
	dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD));
	src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK));
	dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK));
#elif defined(__x86_64__)
	addr64_t debug_pa = 0;

	/* If either destination or source are outside the
	 * physical map, establish a physical window onto the target frame.
	 */
	assert(physmap_enclosed(src64) || physmap_enclosed(dst64));

	if (physmap_enclosed(src64) == FALSE) {
		src = (void *)(debugger_window_kva | (src64 & INTEL_OFFMASK));
		dst = PHYSMAP_PTOV(dst64);
		debug_pa = src64 & PG_FRAME;
	} else if (physmap_enclosed(dst64) == FALSE) {
		src = PHYSMAP_PTOV(src64);
		dst = (void *)(debugger_window_kva | (dst64 & INTEL_OFFMASK));
		debug_pa = dst64 & PG_FRAME;
	} else {
		src = PHYSMAP_PTOV(src64);
		dst = PHYSMAP_PTOV(dst64);
	}
	/* DRK: debugger only routine, we don't bother checking for an
	 * identical mapping.
	 */
	if (debug_pa) {
		if (debugger_window_kva == 0)
			panic("%s: invoked in non-debug mode", __FUNCTION__);
		/* Establish a cache-inhibited physical window; some platforms
		 * may not cover arbitrary ranges with MTRRs
		 */
		pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID);
		flush_tlb_raw();
#if	DEBUG
		kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa);
#endif
	}
#endif
	/* ensure we stay within a page */
	if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) {
	        panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64);
	}

	/*
	 * For device register access from the debugger,
	 * 2-byte/16-bit, 4-byte/32-bit and 8-byte/64-bit copies are handled
	 * by assembly routines ensuring the required access widths.
	 * 1-byte and other copies are handled by the regular _bcopy.
	 */
	switch (bytes) {
	case 2:
		err = _bcopy2(src, dst);
		break;
	case 4:
		err = _bcopy4(src, dst);
		break;
	case 8:
		err = _bcopy8(src, dst);
		break;
	case 1:
	default:
		err = _bcopy(src, dst, bytes);
		break;
	}

#if NCOPY_WINDOWS > 0
	pmap_put_mapwindow(src_map);
	pmap_put_mapwindow(dst_map);
#endif
	mp_enable_preemption();

	return err;
}