예제 #1
0
int
mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
		  bus_size_t boundary, bus_dma_segment_t *segs, int nsegs,
		  int *rsegs, int flags)
{
	struct pglist pglist;
	struct vm_page *pg;
	int plaflag;

	size = round_page(size);

	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&pglist);
	if (uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1, alignment, boundary,
	    &pglist, 1, plaflag))
		return (ENOMEM);

	pg = TAILQ_FIRST(&pglist);
	segs[0]._ds_va = segs[0].ds_addr = VM_PAGE_TO_PHYS(pg);
	segs[0].ds_len = size;
	*rsegs = 1;

	for(; pg; pg = TAILQ_NEXT(pg, pageq))
		/* XXX for now */
		pmap_changebit(pg, PTE_UNCACHABLE, 0);
	pmap_update(pmap_kernel());

	return (0);
}
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
	if (error) {
		return NULL;
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);

	/*
	 * We need to return a direct-mapped VA for the pa.
	 */

	return (void *)PMAP_MAP_POOLPAGE(pa);
}
예제 #3
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_hpcmips_bd_mem_alloc_range(bus_dma_tag_t t, bus_size_t size,
    bus_size_t alignment, bus_size_t boundary,
    bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	vaddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;
#ifdef DIAGNOSTIC
	extern paddr_t avail_start, avail_end;		/* XXX */

	high = high<(avail_end - PAGE_SIZE)? high: (avail_end - PAGE_SIZE);
	low = low>avail_start? low: avail_start;
#endif
	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = mlist.tqh_first;
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = m->pageq.queue.tqe_next;

	for (; m != NULL; m = m->pageq.queue.tqe_next) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_hpcmips_bd_mem_alloc");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
예제 #4
0
파일: bus_dma.c 프로젝트: ajinkya93/OpenBSD
/*
 * Allocate memory safe for DMA.
 */
int
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags)
{
	struct pglist mlist;
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	int curseg, error, plaflag;

	DPRINTF(("bus_dmamem_alloc: t = %p, size = %ld, alignment = %ld, boundary = %ld, segs = %p, nsegs = %d, rsegs = %p, flags = %x\n", t, size, alignment, boundary, segs, nsegs, rsegs, flags));

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate the pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, 0, -1, alignment, boundary,
	    &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = mlist.tqh_first;
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;

	DPRINTF(("bus_dmamem_alloc: m = %p, lastaddr = 0x%08lx\n",m,lastaddr));

	while ((m = TAILQ_NEXT(m, pageq)) != NULL) {
		curaddr = VM_PAGE_TO_PHYS(m);
		DPRINTF(("bus_dmamem_alloc: m = %p, curaddr = 0x%08lx, lastaddr = 0x%08lx\n", m, curaddr, lastaddr));
		if (curaddr == (lastaddr + PAGE_SIZE)) {
			segs[curseg].ds_len += PAGE_SIZE;
		} else {
			DPRINTF(("bus_dmamem_alloc: new segment\n"));
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	DPRINTF(("bus_dmamem_alloc: curseg = %d, *rsegs = %d\n",curseg,*rsegs));

	return (0);
}
예제 #5
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, vaddr_t low, vaddr_t high)
{
	vaddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error, plaflag;

	/* Always round the size. */
	size = round_page(size);

	/*
	 * Allocate pages from the VM system.
	 */
	plaflag = flags & BUS_DMA_NOWAIT ? UVM_PLA_NOWAIT : UVM_PLA_WAITOK;
	if (flags & BUS_DMA_ZERO)
		plaflag |= UVM_PLA_ZERO;

	TAILQ_INIT(&mlist);
	error = uvm_pglistalloc(size, low, high,
	    alignment, boundary, &mlist, nsegs, plaflag);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("vm_page_alloc_memory returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("dmamem_alloc_range");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
예제 #6
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	high = avail_end - PAGE_SIZE;

	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return error;

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg]._ds_paddr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_addr = segs[curseg]._ds_paddr + t->dma_offset;
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq.queue);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < avail_start || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%llx\n", (long long)curaddr);
			panic("_bus_dmamem_alloc_range");
		}
#endif
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr + t->dma_offset;
			segs[curseg].ds_len = PAGE_SIZE;
			segs[curseg]._ds_paddr = curaddr;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return 0;
}
예제 #7
0
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
#ifdef _LP64
	const paddr_t high = mips_avail_end;
#else
	const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
	/*
	 * Don't allocate a direct mapped uarea if aren't allocating for a
	 * system lwp and we have memory that can't be mapped via KSEG0.
	 * If 
	 */
	if (!system && high > mips_avail_end)
		return NULL;
#endif
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, mips_avail_start, high,
	    USPACE_ALIGN, 0, &pglist, 1, 1);
	if (error) {
#ifdef _LP64
		if (!system)
			return NULL;
#endif
		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
	KASSERTMSG(pa >= mips_avail_start,
	    "pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")",
	     pa, mips_avail_start);
	KASSERTMSG(pa < mips_avail_end,
	    "pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")",
	     pa, mips_avail_end);

	/*
	 * we need to return a direct-mapped VA for the pa.
	 */
#ifdef _LP64
	const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
#endif

	return (void *)va;
}
/*
 * _bus_dmamem_alloc_range_common --
 *	Allocate physical memory from the specified physical address range.
 */
int
_bus_dmamem_alloc_range_common(bus_dma_tag_t t,
			       bus_size_t size,
			       bus_size_t alignment,
			       bus_size_t boundary,
			       bus_dma_segment_t *segs,
			       int nsegs,
			       int *rsegs,
			       int flags,
			       paddr_t low,
			       paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	/* Allocate pages from the VM system. */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
				&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (__predict_false(error != 0))
		return (error);
	
	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM system.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq.queue);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
		curaddr = VM_PAGE_TO_PHYS(m);
		KASSERT(curaddr >= low);
		KASSERT(curaddr < high);
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
예제 #9
0
파일: bus.c 프로젝트: yazshel/netbsd-kernel
/*
 * Common function for DMA-safe memory allocation.  May be called
 * by bus-specific DMA memory allocation functions.
 */
int 
_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags)
{
	vaddr_t low, high;
	struct pglist *mlist;
	int error;
extern	paddr_t avail_start;
extern	paddr_t avail_end;

	/* Always round the size. */
	size = m68k_round_page(size);
	low = avail_start;
	high = avail_end;

	if ((mlist = malloc(sizeof(*mlist), M_DEVBUF,
	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
		return (ENOMEM);

	/*
	 * Allocate physical pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, 0, 0,
				mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error) {
		free(mlist, M_DEVBUF);
		return (error);
	}

	/*
	 * Simply keep a pointer around to the linked list, so
	 * bus_dmamap_free() can return it.
	 *
	 * NOBODY SHOULD TOUCH THE pageq.queue FIELDS WHILE THESE PAGES
	 * ARE IN OUR CUSTODY.
	 */
	segs[0]._ds_mlist = mlist;

	/*
	 * We now have physical pages, but no DVMA addresses yet. These
	 * will be allocated in bus_dmamap_load*() routines. Hence we
	 * save any alignment and boundary requirements in this DMA
	 * segment.
	 */
	segs[0].ds_addr = 0;
	segs[0].ds_len = 0;
	segs[0]._ds_va = 0;
	*rsegs = 1;
	return (0);
}
예제 #10
0
void
viomb_inflate(struct viomb_softc *sc)
{
	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
	struct balloon_req *b;
	struct vm_page *p;
	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
	u_int32_t nvpages;
	int slot, error, i = 0;

	nvpages = sc->sc_npages - sc->sc_actual;
	if (nvpages > PGS_PER_REQ)
		nvpages = PGS_PER_REQ;
	b = &sc->sc_req;

	if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0,
				     dma_constraint.ucr_high,
				     0, 0, &b->bl_pglist, nvpages,
				     UVM_PLA_NOWAIT))) {
		printf("%s unable to allocate %u physmem pages,"
		    "error %d\n", DEVNAME(sc), nvpages, error);
		return;
	}

	b->bl_nentries = nvpages;
	TAILQ_FOREACH(p, &b->bl_pglist, pageq)
		b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE;

	KASSERT(i == nvpages);

	if ((virtio_enqueue_prep(vq, &slot)) > 0) {
		printf("%s:virtio_enqueue_prep() vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	if (virtio_enqueue_reserve(vq, slot, 1)) {
		printf("%s:virtio_enqueue_reserve vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
			sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE);
	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
			 sizeof(u_int32_t) * nvpages, VRING_READ);
	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
	return;
err:
	uvm_pglistfree(&b->bl_pglist);
	return;
}
예제 #11
0
struct page *
alloc_page(gfp_t gfp)
{
	paddr_t low = 0;
	paddr_t high = ~(paddr_t)0;
	struct pglist pglist;
	struct vm_page *vm_page;
	int error;

	if (ISSET(gfp, __GFP_DMA32))
		high = 0xffffffff;

	error = uvm_pglistalloc(PAGE_SIZE, low, high, PAGE_SIZE, PAGE_SIZE,
	    &pglist, 1, ISSET(gfp, __GFP_WAIT));
	if (error)
		return NULL;

	vm_page = TAILQ_FIRST(&pglist);
	TAILQ_REMOVE(&pglist, vm_page, pageq.queue);	/* paranoia */
	KASSERT(TAILQ_EMPTY(&pglist));

	return container_of(vm_page, struct page, p_vmp);
}
예제 #12
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	KASSERT(cpu_id < MAXCPUS);

#ifdef MIPS64_OCTEON
	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id;
	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280);

	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1;
	memset((void *)exc_page, 0, PAGE_SIZE);

	if (ti == NULL) {
		ti = ((struct pmap_tlb_info *)ci) - 1;
		pmap_tlb_info_init(ti);
	}
#else
	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif
#endif /* MIPS64_OCTEON */

	KASSERT(cpu_id != 0);
	ci->ci_cpuid = cpu_id;
	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	pmap_md_alloc_ephemeral_address_space(ci);

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}
예제 #13
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
	bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
	int flags)
{
	paddr_t curaddr, lastaddr, pa;
	vaddr_t vacookie;
	size_t sizecookie;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	sizecookie = size;

	/*
	 * Allocate pages from the VM system.
	 */
#if 0
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
#else
	/* XXX: ignores boundary, nsegs, etc. */
	//printf("dma allocation %lx %lx %d\n", alignment, boundary, nsegs);
	error = rumpcomp_pci_dmalloc(size, alignment, &pa, &vacookie);
#endif
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = pa;
	segs[curseg].ds_len = PAGE_SIZE;
	segs[curseg]._ds_vacookie = vacookie;
	segs[curseg]._ds_sizecookie = sizecookie;
	size -= PAGE_SIZE;
	pa += PAGE_SIZE;
	vacookie += PAGE_SIZE;

	for (; size;
	    pa += PAGE_SIZE, vacookie += PAGE_SIZE, size -= PAGE_SIZE) {
		curaddr = pa;
		if (curaddr == (lastaddr + PAGE_SIZE) &&
		    (lastaddr & boundary) == (curaddr & boundary)) {
			segs[curseg].ds_len += PAGE_SIZE;
		} else {
			curseg++;
			if (curseg >= nsegs)
				return EFBIG;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
			segs[curseg]._ds_vacookie = vacookie;
			segs[curseg]._ds_sizecookie = sizecookie;
		}
		lastaddr = curaddr;
	}
	*rsegs = curseg + 1;

	return (0);
}
예제 #14
0
/*
 * Allocate physical memory from the given physical address range.
 * Called by DMA-safe memory allocation methods.
 */
int
_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
    int flags, paddr_t low, paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

#ifdef DEBUG_DMA
	printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
	    t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
#endif	/* DEBUG_DMA */

	/* Always round the size. */
	size = round_page(size);

	TAILQ_INIT(&mlist);
	/*
	 * Allocate pages from the VM system.
	 */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (error)
		return (error);

	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM code.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
#ifdef DEBUG_DMA
		printf("alloc: page %lx\n", lastaddr);
#endif	/* DEBUG_DMA */
	m = TAILQ_NEXT(m, pageq);

	for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
		curaddr = VM_PAGE_TO_PHYS(m);
#ifdef DIAGNOSTIC
		if (curaddr < low || curaddr >= high) {
			printf("uvm_pglistalloc returned non-sensical"
			    " address 0x%lx\n", curaddr);
			panic("_bus_dmamem_alloc_range");
		}
#endif	/* DIAGNOSTIC */
#ifdef DEBUG_DMA
		printf("alloc: page %lx\n", curaddr);
#endif	/* DEBUG_DMA */
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
예제 #15
0
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; 
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	ci->ci_cpuid = cpu_id;
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
        ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
        ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
        ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif

#ifndef _LP64
	/*
	 * If we have more memory than can be mapped by KSEG0, we need to
	 * allocate enough VA so we can map pages with the right color
	 * (to avoid cache alias problems).
	 */
	if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) {
		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_dstbase);
		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_srcbase);
	}
#endif

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}