Example #1
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_hpcmips_bd_mem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    bus_size_t alignment, bus_size_t boundary,
    bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	vaddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;
#ifdef DIAGNOSTIC
	extern paddr_t avail_start, avail_end;		/* XXX */

	high = high<(avail_end - PAGE_SIZE)? high: (avail_end - PAGE_SIZE);
	low = low>avail_start? low: avail_start;
#endif
	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = mlist.tqh_first;
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = m->pageq.queue.tqe_next;

	for (; m != NULL; m = m->pageq.queue.tqe_next) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_hpcmips_bd_mem_alloc");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Example #2
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
    void *va;
    vm_page_t m;
    int pflags;

    *flags = UMA_SLAB_PRIV;
    pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;

    for (;;) {
        m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
        if (m == NULL) {
            if (wait & M_NOWAIT)
                return (NULL);
            VM_WAIT;
        } else
            break;
    }

    va = (void *) VM_PAGE_TO_PHYS(m);

    if (!hw_direct_map)
        pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));

    if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
        bzero(va, PAGE_SIZE);
    atomic_add_int(&hw_uma_mdpages, 1);

    return (va);
}
Example #3
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, vaddr_t low, vaddr_t high)
{
	vaddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error, plaflag;

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, low, high,
	    alignment, boundary, &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("vm_page_alloc_memory returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("dmamem_alloc_range");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Example #4
0
/*
 * Allocate memory safe for DMA.
 */
int
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags)
{
	struct pglist mlist;
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	int curseg, error, plaflag;

	DPRINTF(("bus_dmamem_alloc: t = %p, size = %ld, alignment = %ld, boundary = %ld, segs = %p, nsegs = %d, rsegs = %p, flags = %x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags));

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate the pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, 0, -1, alignment, boundary,
	    &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = mlist.tqh_first;
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;

	DPRINTF(("bus_dmamem_alloc: m = %p, lastaddr = 0x%08lx\n",m,lastaddr));

	while ((m = TAILQ_NEXT(m, pageq)) != NULL) {
		curaddr = VM_PAGE_TO_PHYS(m);
		DPRINTF(("bus_dmamem_alloc: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n", m, curaddr, lastaddr));
		if (curaddr == (lastaddr + PAGE_SIZE)) {
			segs[curseg].ds_len += PAGE_SIZE;
		} else {
			DPRINTF(("bus_dmamem_alloc: new segment\n"));
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	DPRINTF(("bus_dmamem_alloc: curseg = %d, *rsegs = %d\n",curseg,*rsegs));

	return (0);
}
Example #5
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	high = avail_end - PAGE_SIZE;

	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return error;

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq.queue);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < avail_start || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%llx\n", (long long)curaddr);
			panic("_bus_dmamem_alloc_range");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr + t->dma_offset;
			segs[curseg].ds_len = PAGE_SIZE;
			segs[curseg]._ds_paddr = curaddr;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return 0;
}
/*
 * _bus_dmamem_alloc_range_common --
 *	Allocate physical memory from the specified physical address range.
 */
int
_bus_dmamem_alloc_range_common(bus_dma_tag_t t,
			       bus_size_t size,
			       bus_size_t alignment,
			       bus_size_t boundary,
			       bus_dma_segment_t *segs,
			       int nsegs,
			       int *rsegs,
			       int flags,
			       paddr_t low,
			       paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	/* Allocate pages from the VM system. */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
				&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (__predict_false(error != 0))
		return (error);
	
	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM system.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq.queue);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
		curaddr = VM_PAGE_TO_PHYS(m);
		KASSERT(curaddr >= low);
		KASSERT(curaddr < high);
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Example #7
0
static pt_entry_t *
efi_1t1_pte(vm_offset_t va)
{
	pml4_entry_t *pml4e;
	pdp_entry_t *pdpe;
	pd_entry_t *pde;
	pt_entry_t *pte;
	vm_page_t m;
	vm_pindex_t pml4_idx, pdp_idx, pd_idx;
	vm_paddr_t mphys;

	pml4_idx = pmap_pml4e_index(va);
	pml4e = &efi_pml4[pml4_idx];
	if (*pml4e == 0) {
		m = efi_1t1_page(1 + pml4_idx);
		mphys =  VM_PAGE_TO_PHYS(m);
		*pml4e = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pml4e & ~PAGE_MASK;
	}

	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys);
	pdp_idx = pmap_pdpe_index(va);
	pdpe += pdp_idx;
	if (*pdpe == 0) {
		m = efi_1t1_page(1 + NPML4EPG + (pml4_idx + 1) * (pdp_idx + 1));
		mphys =  VM_PAGE_TO_PHYS(m);
		*pdpe = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pdpe & ~PAGE_MASK;
	}

	pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	pd_idx = pmap_pde_index(va);
	pde += pd_idx;
	if (*pde == 0) {
		m = efi_1t1_page(1 + NPML4EPG + NPML4EPG * NPDPEPG +
		    (pml4_idx + 1) * (pdp_idx + 1) * (pd_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*pde = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pde & ~PAGE_MASK;
	}

	pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
	pte += pmap_pte_index(va);
	KASSERT(*pte == 0, ("va %#jx *pt %#jx", va, *pte));

	return (pte);
}
Example #8
0
static pt_entry_t *
efi_1t1_l3(vm_offset_t va)
{
	pd_entry_t *l0, *l1, *l2;
	pt_entry_t *l3;
	vm_pindex_t l0_idx, l1_idx, l2_idx;
	vm_page_t m;
	vm_paddr_t mphys;

	l0_idx = pmap_l0_index(va);
	l0 = &efi_l0[l0_idx];
	if (*l0 == 0) {
		m = efi_1t1_page(1 + l0_idx);
		mphys = VM_PAGE_TO_PHYS(m);
		*l0 = mphys | L0_TABLE;
	} else {
		mphys = *l0 & ~ATTR_MASK;
	}

	l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	l1_idx = pmap_l1_index(va);
	l1 += l1_idx;
	if (*l1 == 0) {
		m = efi_1t1_page(1 + L0_ENTRIES + (l0_idx + 1) * (l1_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*l1 = mphys | L1_TABLE;
	} else {
		mphys = *l1 & ~ATTR_MASK;
	}

	l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	l2_idx = pmap_l2_index(va);
	l2 += l2_idx;
	if (*l2 == 0) {
		m = efi_1t1_page(1 + L0_ENTRIES + L0_ENTRIES * Ln_ENTRIES +
		    (l0_idx + 1) * (l1_idx + 1) * (l2_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*l2 = mphys | L2_TABLE;
	} else {
		mphys = *l2 & ~ATTR_MASK;
	}

	l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
	l3 += pmap_l3_index(va);
	KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
	    va, *l3));

	return (l3);
}
Example #9
0
int ttm_tt_swapin(struct ttm_tt *ttm)
{
	vm_object_t obj;
	vm_page_t from_page, to_page;
	int i, ret, rv;

	obj = ttm->swap_storage;

	VM_OBJECT_LOCK(obj);
	vm_object_pip_add(obj, 1);
	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |
						 VM_ALLOC_RETRY);
		if (from_page->valid != VM_PAGE_BITS_ALL) {
			if (vm_pager_has_page(obj, i)) {
				rv = vm_pager_get_page(obj, &from_page, 1);
				if (rv != VM_PAGER_OK) {
					vm_page_free(from_page);
					ret = -EIO;
					goto err_ret;
				}
			} else {
				vm_page_zero_invalid(from_page, TRUE);
			}
		}
		to_page = ttm->pages[i];
		if (unlikely(to_page == NULL)) {
			ret = -ENOMEM;
			vm_page_wakeup(from_page);
			goto err_ret;
		}
		pmap_copy_page(VM_PAGE_TO_PHYS(from_page),
			       VM_PAGE_TO_PHYS(to_page));
		vm_page_wakeup(from_page);
	}
	vm_object_pip_wakeup(obj);
	VM_OBJECT_UNLOCK(obj);

	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
		vm_object_deallocate(obj);
	ttm->swap_storage = NULL;
	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
	return (0);

err_ret:
	vm_object_pip_wakeup(obj);
	VM_OBJECT_UNLOCK(obj);
	return (ret);
}
Example #10
0
/*
 * Grow the GDT.
 */
void
gdt_grow(int which)
{
    size_t old_len, new_len;
    CPU_INFO_ITERATOR cii;
    struct cpu_info *ci;
    struct vm_page *pg;
    vaddr_t va;

    old_len = gdt_size[which] * sizeof(gdt[0]);
    gdt_size[which] <<= 1;
    new_len = old_len << 1;

#ifdef XEN
    if (which != 0) {
        size_t max_len = MAXGDTSIZ * sizeof(gdt[0]);
        if (old_len == 0) {
            gdt_size[which] = MINGDTSIZ;
            new_len = gdt_size[which] * sizeof(gdt[0]);
        }
        for(va = (vaddr_t)(cpu_info_primary.ci_gdt) + old_len + max_len;
                va < (vaddr_t)(cpu_info_primary.ci_gdt) + new_len + max_len;
                va += PAGE_SIZE) {
            while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
                    == NULL) {
                uvm_wait("gdt_grow");
            }
            pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                           VM_PROT_READ | VM_PROT_WRITE);
        }
        return;
    }
#endif

    for (CPU_INFO_FOREACH(cii, ci)) {
        for (va = (vaddr_t)(ci->ci_gdt) + old_len;
                va < (vaddr_t)(ci->ci_gdt) + new_len;
                va += PAGE_SIZE) {
            while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
                    NULL) {
                uvm_wait("gdt_grow");
            }
            pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                           VM_PROT_READ | VM_PROT_WRITE);
        }
    }

    pmap_update(pmap_kernel());
}
Example #11
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	static vm_pindex_t color;
	void *va;
	vm_page_t m;
	int pflags;

	*flags = UMA_SLAB_PRIV;
	if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
	else
		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
	if (wait & M_ZERO)
		pflags |= VM_ALLOC_ZERO;

	for (;;) {
		m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			VM_WAIT;
		} else
			break;
	}

	va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		bzero(va, PAGE_SIZE);
	return (va);
}
Example #12
0
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
{
	vm_paddr_t pa;
	vm_page_t m;
	int pflags;
	void *va;

	*flags = UMA_SLAB_PRIV;
	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;

	for (;;) {
		m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			else
				pmap_grow_direct_page_cache();
		} else
			break;
	}

	pa = VM_PAGE_TO_PHYS(m);
	va = (void *)MIPS_PHYS_TO_DIRECT(pa);
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		bzero(va, PAGE_SIZE);
	return (va);
}
Example #13
0
/*
 * Load from block io.
 */
static int
_bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
                     int *nsegs, int flags)
{
    vm_paddr_t paddr;
    bus_size_t len, tlen;
    int error, i, ma_offs;

    if ((bio->bio_flags & BIO_UNMAPPED) == 0) {
        error = _bus_dmamap_load_buffer(dmat, map, bio->bio_data,
                                        bio->bio_bcount, kernel_pmap, flags, NULL, nsegs);
        return (error);
    }

    error = 0;
    tlen = bio->bio_bcount;
    ma_offs = bio->bio_ma_offset;
    for (i = 0; tlen > 0; i++, tlen -= len) {
        len = min(PAGE_SIZE - ma_offs, tlen);
        paddr = VM_PAGE_TO_PHYS(bio->bio_ma[i]) + ma_offs;
        error = _bus_dmamap_load_phys(dmat, map, paddr, len,
                                      flags, NULL, nsegs);
        if (error != 0)
            break;
        ma_offs = 0;
    }
    return (error);
}
Example #14
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init(void)
{
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    PROT_READ | PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Example #15
0
/*
 * Create an environment for the EFI runtime code call.  The most
 * important part is creating the required 1:1 physical->virtual
 * mappings for the runtime segments.  To do that, we manually create
 * page table which unmap userspace but gives correct kernel mapping.
 * The 1:1 mappings for runtime segments usually occupy low 4G of the
 * physical address map.
 *
 * The 1:1 mappings were chosen over the SetVirtualAddressMap() EFI RT
 * service, because there are some BIOSes which fail to correctly
 * relocate itself on the call, requiring both 1:1 and virtual
 * mapping.  As result, we must provide 1:1 mapping anyway, so no
 * reason to bother with the virtual map, and no need to add a
 * complexity into loader.
 *
 * The fpu_kern_enter() call allows firmware to use FPU, as mandated
 * by the specification.  In particular, CR0.TS bit is cleared.  Also
 * it enters critical section, giving us neccessary protection against
 * context switch.
 *
 * There is no need to disable interrupts around the change of %cr3,
 * the kernel mappings are correct, while we only grabbed the
 * userspace portion of VA.  Interrupts handlers must not access
 * userspace.  Having interrupts enabled fixes the issue with
 * firmware/SMM long operation, which would negatively affect IPIs,
 * esp. TLB shootdown requests.
 */
int
efi_arch_enter(void)
{
	pmap_t curpmap;

	curpmap = PCPU_GET(curpmap);
	PMAP_LOCK_ASSERT(curpmap, MA_OWNED);

	/*
	 * IPI TLB shootdown handler invltlb_pcid_handler() reloads
	 * %cr3 from the curpmap->pm_cr3, which would disable runtime
	 * segments mappings.  Block the handler's action by setting
	 * curpmap to impossible value.  See also comment in
	 * pmap.c:pmap_activate_sw().
	 */
	if (pmap_pcid_enabled && !invpcid_works)
		PCPU_SET(curpmap, NULL);

	load_cr3(VM_PAGE_TO_PHYS(efi_pml4_page) | (pmap_pcid_enabled ?
	    curpmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid : 0));
	/*
	 * If PCID is enabled, the clear CR3_PCID_SAVE bit in the loaded %cr3
	 * causes TLB invalidation.
	 */
	if (!pmap_pcid_enabled)
		invltlb();
	return (0);
}
Example #16
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	vm_paddr_t pa;
	vm_page_t m;
	int pflags;
	void *va;

	*flags = UMA_SLAB_PRIV;

	if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
		pflags = VM_ALLOC_INTERRUPT;
	else
		pflags = VM_ALLOC_SYSTEM;

	for (;;) {
		m = pmap_alloc_direct_page(0, pflags);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			else
				pmap_grow_direct_page_cache();
		} else
			break;
	}

	pa = VM_PAGE_TO_PHYS(m);
	va = (void *)MIPS_PHYS_TO_DIRECT(pa);
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		bzero(va, PAGE_SIZE);
	return (va);
}
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
	if (error) {
		return NULL;
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);

	/*
	 * We need to return a direct-mapped VA for the pa.
	 */

	return (void *)PMAP_MAP_POOLPAGE(pa);
}
Example #18
0
/*
 * Initialize the GDT.
 */
void
gdt_init(void)
{
	char *old_gdt;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = 0;
	gdt_free = GNULL_SEL;

	old_gdt = gdtstore;
	gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL) {
			panic("gdt_init: no pages");
		}
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(old_gdt, gdtstore, DYNSEL_START);
	ci->ci_gdt = gdtstore;
	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
	    LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);

	gdt_init_cpu(ci);
}
Example #19
0
int
mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
		  bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
		  int *rsegs, int flags)
{
	struct pglist pglist;
	struct vm_page *pg;
	int plaflag;

	size = round_page(size);

	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&pglist);
	if (uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1, alignment, boundary,
	    &pglist, 1, plaflag))
		return (ENOMEM);

	pg = TAILQ_FIRST(&pglist);
	segs[0]._ds_va = segs[0].ds_addr = VM_PAGE_TO_PHYS(pg);
	segs[0].ds_len = size;
	*rsegs = 1;

	for(; pg; pg = TAILQ_NEXT(pg, pageq))
		/* XXX for now */
		pmap_changebit(pg, PTE_UNCACHABLE, 0);
	pmap_update(pmap_kernel());

	return (0);
}
Example #20
0
vm_page_t
shmem_read_mapping_page(vm_object_t object, vm_pindex_t pindex)
{
	vm_page_t m;
	int rv;

	VM_OBJECT_LOCK_ASSERT_OWNED(object);
	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
	if (m->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(object, pindex)) {
			rv = vm_pager_get_page(object, &m, 1);
			m = vm_page_lookup(object, pindex);
			if (m == NULL)
				return ERR_PTR(-ENOMEM);
			if (rv != VM_PAGER_OK) {
				vm_page_free(m);
				return ERR_PTR(-ENOMEM);
			}
		} else {
			pmap_zero_page(VM_PAGE_TO_PHYS(m));
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		}
	}
	vm_page_wire(m);
	vm_page_wakeup(m);
	return (m);
}
Example #21
0
/*
 * Allocate shadow GDT for a slave CPU.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
    int max_len = MAXGDTSIZ * sizeof(gdt[0]);
    int min_len = MINGDTSIZ * sizeof(gdt[0]);
    struct vm_page *pg;
    vaddr_t va;

    ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
                 0, UVM_KMF_VAONLY);
    for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len;
            va += PAGE_SIZE) {
        while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
                == NULL) {
            uvm_wait("gdt_alloc_cpu");
        }
        pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                       VM_PROT_READ | VM_PROT_WRITE);
    }
    pmap_update(pmap_kernel());
    memset(ci->ci_gdt, 0, min_len);
    memcpy(ci->ci_gdt, gdt, gdt_count[0] * sizeof(gdt[0]));
    setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
               SDT_MEMRWA, SEL_KPL, 1, 1);
}
Example #22
0
/*
 * Grow the GDT.
 */
void
gdt_grow()
{
	size_t old_len, new_len;
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	struct vm_page *pg;
	vaddr_t va;

	old_len = gdt_size * sizeof(union descriptor);
	gdt_size <<= 1;
	new_len = old_len << 1;

	CPU_INFO_FOREACH(cii, ci) {
		for (va = (vaddr_t)(ci->ci_gdt) + old_len;
		     va < (vaddr_t)(ci->ci_gdt) + new_len;
		     va += PAGE_SIZE) {
			while (
			    (pg =
			    uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
			    NULL) {
				uvm_wait("gdt_grow");
			}
			pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ | VM_PROT_WRITE);
		}
	}
}
Example #23
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	simple_lock_init(&gdt_simplelock);
	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);

	gdt_size = MINGDTSIZ;
	gdt_count = NGDT;
	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Example #24
0
/*
 * Populate the iomap from an mlist.  See note for iommu_dvmamap_load()
 * regarding page entry exhaustion of the iomap.
 */
int
viommu_dvmamap_load_mlist(bus_dma_tag_t t, struct iommu_state *is,
    bus_dmamap_t map, struct pglist *mlist, int flags,
    bus_size_t size, bus_size_t boundary)
{
	struct vm_page *m;
	paddr_t pa;
	int err;

	/*
	 * This was allocated with bus_dmamem_alloc.
	 * The pages are on an `mlist'.
	 */
	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq)) {
		pa = VM_PAGE_TO_PHYS(m);

		err = viommu_dvmamap_append_range(t, map, pa, PAGE_SIZE,
		    flags, boundary);
		if (err == EFBIG)
			return (err);
		if (err) {
			printf("iomap load seg page: %d for pa 0x%lx "
			    "(%lx - %lx for %d/%x\n", err, pa, pa,
			    pa + PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
			return (err);
		}
	}

	return (0);
}
Example #25
0
/*
 * uvm_pagermapin: map pages into KVA for I/O that needs mappings
 *
 * We basically just km_valloc a blank map entry to reserve the space in the
 * kernel map and then use pmap_enter() to put the mappings in by hand.
 */
vaddr_t
uvm_pagermapin(struct vm_page **pps, int npages, int flags)
{
	vaddr_t kva, cva;
	vm_prot_t prot;
	vsize_t size;
	struct vm_page *pp;

	prot = VM_PROT_READ;
	if (flags & UVMPAGER_MAPIN_READ)
		prot |= VM_PROT_WRITE;
	size = ptoa(npages);

	KASSERT(size <= MAXBSIZE);

	kva = uvm_pseg_get(flags);
	if (kva == 0)
		return 0;

	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
		pp = *pps++;
		KASSERT(pp);
		KASSERT(pp->pg_flags & PG_BUSY);
		/* Allow pmap_enter to fail. */
		if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
		    prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
			pmap_remove(pmap_kernel(), kva, cva);
			pmap_update(pmap_kernel());
			uvm_pseg_release(kva);
			return 0;
		}
	}
	pmap_update(pmap_kernel());
	return kva;
}
Example #26
0
static void
free_pagelist(PAGELIST_T *pagelist, int actual)
{
	vm_page_t *pages;
	unsigned int num_pages, i;

	vcos_log_trace("free_pagelist - %x, %d", (unsigned int)pagelist, actual);

	num_pages =
		 (pagelist->length + pagelist->offset + PAGE_SIZE - 1) / PAGE_SIZE;

	pages = (vm_page_t *)(pagelist->addrs + num_pages);

	/* Deal with any partial cache lines (fragments) */
	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
		FRAGMENTS_T *fragments =
			 g_fragments_base + (pagelist->type -
					PAGELIST_READ_WITH_FRAGMENTS);
		int head_bytes, tail_bytes;

		if (actual >= 0)
		{
			/* XXXBSD: might be inefficient */
			void *page_address = pmap_mapdev(VM_PAGE_TO_PHYS(pages[0]), PAGE_SIZE*num_pages);
			if ((head_bytes = (CACHE_LINE_SIZE - pagelist->offset) & (CACHE_LINE_SIZE - 1)) != 0) {
				if (head_bytes > actual)
					head_bytes = actual;

				memcpy((char *)page_address +
						 pagelist->offset, fragments->headbuf,
						 head_bytes);
			}
			if ((head_bytes < actual) &&
				(tail_bytes =
				(pagelist->offset + actual) & (CACHE_LINE_SIZE -
										1)) != 0) {
				memcpy((char *)page_address + PAGE_SIZE*(num_pages - 1) +
						 ((pagelist->offset + actual) & (PAGE_SIZE -
									1) & ~(CACHE_LINE_SIZE - 1)),
						 fragments->tailbuf, tail_bytes);
			}
			pmap_qremove((vm_offset_t)page_address, PAGE_SIZE*num_pages);
		}

		mtx_lock(&g_free_fragments_mutex);
		*(FRAGMENTS_T **) fragments = g_free_fragments;
		g_free_fragments = fragments;
		mtx_unlock(&g_free_fragments_mutex);
		sema_post(&g_free_fragments_sema);
	}

	for (i = 0; i < num_pages; i++) {
		if (pagelist->type != PAGELIST_WRITE)
			vm_page_dirty(pages[i]);
	}

	vm_page_unhold_pages(pages, num_pages);

	free(pagelist, M_VCPAGELIST);
}
Example #27
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	void *va;
	vm_page_t m;
	int pflags;

	*flags = UMA_SLAB_PRIV;
	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;

	for (;;) {
		m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			VM_WAIT;
		} else
			break;
	}

	va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m));
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		bzero(va, PAGE_SIZE);
	return (va);
}
Example #28
0
static inline void
drm_clflush_pages(struct vm_page *pages[], unsigned long num_pages)
{
	unsigned long i;

	for (i = 0; i < num_pages; i++)
		pmap_flush_page(VM_PAGE_TO_PHYS(pages[i]));
}
Example #29
0
/*
 * Map the DVMA mappings into the kernel pmap.
 * Check the flags to see whether we're streaming or coherent.
 */
int
viommu_dvmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
    int nsegs, size_t size, caddr_t *kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	bus_addr_t addr;
	struct pglist *mlist;
	bus_addr_t cbit = 0;

	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
	    segs, nsegs, size));

	/*
	 * Allocate some space in the kernel map, and then map these pages
	 * into this space.
	 */
	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	/* 
	 * digest flags:
	 */
#if 0
	if (flags & BUS_DMA_COHERENT)	/* Disable vcache */
		cbit |= PMAP_NVC;
#endif
	if (flags & BUS_DMA_NOCACHE)	/* sideffects */
		cbit |= PMAP_NC;

	/*
	 * Now take this and map it into the CPU.
	 */
	mlist = segs[0]._ds_mlist;
	TAILQ_FOREACH(m, mlist, pageq) {
#ifdef DIAGNOSTIC
		if (size == 0)
			panic("iommu_dvmamem_map: size botch");
#endif
		addr = VM_PAGE_TO_PHYS(m);
		DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
		    "mapping va %lx at %llx\n", va,
		    (unsigned long long)addr | cbit));
		pmap_enter(pmap_kernel(), va, addr | cbit,
		    VM_PROT_READ | VM_PROT_WRITE,
		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #30
0
int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
{
	vm_object_t obj;
	vm_page_t from_page, to_page;
	int i;

	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
	BUG_ON(ttm->caching_state != tt_cached);

	if (!persistent_swap_storage) {
		obj = swap_pager_alloc(NULL,
		    IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0);
		if (obj == NULL) {
			pr_err("Failed allocating swap storage\n");
			return (-ENOMEM);
		}
	} else
		obj = persistent_swap_storage;

	VM_OBJECT_LOCK(obj);
	vm_object_pip_add(obj, 1);
	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;
		to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL |
					       VM_ALLOC_RETRY);
		pmap_copy_page(VM_PAGE_TO_PHYS(from_page),
					VM_PAGE_TO_PHYS(to_page));
		to_page->valid = VM_PAGE_BITS_ALL;
		vm_page_dirty(to_page);
		vm_page_wakeup(to_page);
	}
	vm_object_pip_wakeup(obj);
	VM_OBJECT_UNLOCK(obj);

	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
	ttm->swap_storage = obj;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistent_swap_storage)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;

	return 0;
}