Example #1
0
/*
 * Initialize the GDT.
 */
void
gdt_init(void)
{
	char *old_gdt;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = 0;
	gdt_free = GNULL_SEL;

	old_gdt = gdtstore;
	gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL) {
			panic("gdt_init: no pages");
		}
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(old_gdt, gdtstore, DYNSEL_START);
	ci->ci_gdt = gdtstore;
	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
	    LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);

	gdt_init_cpu(ci);
}
Example #2
0
int
vme_map_r(const struct vme_range *r, paddr_t pa, psize_t len, int flags,
    vm_prot_t prot, vaddr_t *rva)
{
	vaddr_t ova, va;
	u_int pg;

	ova = va = uvm_km_valloc(kernel_map, len);
	if (va == 0)
		return ENOMEM;

	pa += r->vr_base;
	for (pg = atop(len); pg != 0; pg--) {
		pmap_kenter_pa(va, pa, prot);
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
	}
	if (flags & BUS_SPACE_MAP_CACHEABLE)
		pmap_cache_ctrl(ova, ova + len, CACHE_GLOBAL);
	pmap_update(pmap_kernel());

	*rva = ova;

	return 0;
}
Example #3
0
int
obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, offset;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);
		
	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (va == 0)
		return ENOMEM;

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #4
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init(void)
{
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    PROT_READ | PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Example #5
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	simple_lock_init(&gdt_simplelock);
	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);

	gdt_size = MINGDTSIZ;
	gdt_count = NGDT;
	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Example #6
0
/*
 * Map the DVMA mappings into the kernel pmap.
 * Check the flags to see whether we're streaming or coherent.
 */
int
viommu_dvmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
    int nsegs, size_t size, caddr_t *kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	bus_addr_t addr;
	struct pglist *mlist;
	bus_addr_t cbit = 0;

	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
	    segs, nsegs, size));

	/*
	 * Allocate some space in the kernel map, and then map these pages
	 * into this space.
	 */
	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	/* 
	 * digest flags:
	 */
#if 0
	if (flags & BUS_DMA_COHERENT)	/* Disable vcache */
		cbit |= PMAP_NVC;
#endif
	if (flags & BUS_DMA_NOCACHE)	/* sideffects */
		cbit |= PMAP_NC;

	/*
	 * Now take this and map it into the CPU.
	 */
	mlist = segs[0]._ds_mlist;
	TAILQ_FOREACH(m, mlist, pageq) {
#ifdef DIAGNOSTIC
		if (size == 0)
			panic("iommu_dvmamem_map: size botch");
#endif
		addr = VM_PAGE_TO_PHYS(m);
		DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
		    "mapping va %lx at %llx\n", va,
		    (unsigned long long)addr | cbit));
		pmap_enter(pmap_kernel(), va, addr | cbit,
		    VM_PROT_READ | VM_PROT_WRITE,
		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #7
0
int
obio_iomem_add_mapping(bus_addr_t bpa, bus_size_t size, int type,
    bus_space_handle_t *bshp)
{
	u_long pa, endpa;
	vaddr_t va;
	pt_entry_t *pte;
	unsigned int m = 0;
	int io_type = type & ~OBIO_IOMEM_PCMCIA_8BIT;

	pa = trunc_page(bpa);
	endpa = round_page(bpa + size);

#ifdef DIAGNOSTIC
	if (endpa <= pa)
		panic("obio_iomem_add_mapping: overflow");
#endif

	va = uvm_km_valloc(kernel_map, endpa - pa);
	if (va == 0)
		return (ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));

#define MODE(t, s)							\
	((t) & OBIO_IOMEM_PCMCIA_8BIT) ?				\
		_PG_PCMCIA_ ## s ## 8 :					\
		_PG_PCMCIA_ ## s ## 16
	switch (io_type) {
	default:
		panic("unknown pcmcia space.");
		/* NOTREACHED */
	case OBIO_IOMEM_PCMCIA_IO:
		m = MODE(type, IO);
		break;
	case OBIO_IOMEM_PCMCIA_MEM:
		m = MODE(type, MEM);
		break;
	case OBIO_IOMEM_PCMCIA_ATT:
		m = MODE(type, ATTR);
		break;
	}
#undef MODE

	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
		pte = __pmap_kpte_lookup(va);
		KDASSERT(pte);
		*pte |= m;  /* PTEA PCMCIA assistant bit */
		sh_tlb_update(0, va, *pte);
	}

	return (0);
}
Example #8
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & BUS_DMA_COHERENT)
			*kvap = (caddr_t)PHYS_TO_UNCACHED(pa);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			if (flags & BUS_DMA_COHERENT)
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Example #9
0
/*
 * Allocate shadow GDT for a slave cpu.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
	int max_len = MAXGDTSIZ * sizeof(union descriptor);
	int min_len = MINGDTSIZ * sizeof(union descriptor);

	ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt,
	    (vaddr_t)ci->ci_gdt + min_len, FALSE, FALSE);
	bzero(ci->ci_gdt, min_len);
	bcopy(gdt, ci->ci_gdt, gdt_count * sizeof(union descriptor));
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);
}
Example #10
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, error;

	if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
		*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
		return (0);
	}

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #11
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct region_descriptor region;

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);
	gdt_size = MINGDTSIZ;

	dynamic_gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	uvm_map_pageable(kernel_map, (vaddr_t)dynamic_gdt,
	    (vaddr_t)dynamic_gdt + min_len, FALSE, FALSE);
	bcopy(gdt, dynamic_gdt, NGDT * sizeof(union descriptor));

	setregion(&region, dynamic_gdt, max_len - 1);
	lgdt(&region);
}
Example #12
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, error;

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				/*
				 * Clean up after ourselves.
				 * XXX uvm_wait on WAITOK
				 */
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, va, ssize);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #13
0
int
armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* Some IO registers (ex. UART ports for console)
		   are mapped to fixed address by board specific
		   routine. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
			/* XXX: pmap_kenter_pa() also does PTE_SYNC(). a bit of
			 *      waste.
			 */
		}
	}
	pmap_update(pmap_kernel());

	return(0);
}
Example #14
0
/*
 * Allocate shadow GDT for a slave cpu.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
	struct vm_page *pg;
	vaddr_t va;

	ci->ci_gdt = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt,
            (vaddr_t)ci->ci_gdt + MAXGDTSIZ, FALSE, FALSE);
	for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bzero(ci->ci_gdt, MAXGDTSIZ);
	bcopy(gdtstore, ci->ci_gdt, MAXGDTSIZ);
}
Example #15
0
/*
 * Allocate kva for pipe circular buffer, the space is pageable.
 * This routine will 'realloc' the size of a pipe safely, if it fails
 * it will retain the old buffer.
 * If it fails it will return ENOMEM.
 */
int
pipespace(struct pipe *cpipe, u_int size)
{
	caddr_t buffer;

	buffer = (caddr_t)uvm_km_valloc(kernel_map, size);
	if (buffer == NULL) {
		return (ENOMEM);
	}

	/* free old resources if we are resizing */
	pipe_free_kmem(cpipe);
	cpipe->pipe_buffer.buffer = buffer;
	cpipe->pipe_buffer.size = size;
	cpipe->pipe_buffer.in = 0;
	cpipe->pipe_buffer.out = 0;
	cpipe->pipe_buffer.cnt = 0;

	amountpipekva += cpipe->pipe_buffer.size;

	return (0);
}
Example #16
0
/*
 * Map a chunk of memory read-only and return an appropriately
 * const'ed pointer.
 */
const void *
mpbios_map(paddr_t pa, int len, struct mp_map *handle)
{
	paddr_t pgpa = trunc_page(pa);
	paddr_t endpa = round_page(pa + len);
	vaddr_t va = uvm_km_valloc(kernel_map, endpa - pgpa);
	vaddr_t retva = va + (pa & PGOFSET);

	handle->pa = pa;
	handle->pg = pgpa;
	handle->psize = len;
	handle->baseva = va;
	handle->vsize = endpa - pgpa;

	do {
		pmap_kenter_pa(va, pgpa, VM_PROT_READ);
		va += PAGE_SIZE;
		pgpa += PAGE_SIZE;
	} while (pgpa < endpa);

	return ((const void *)retva);
}
Example #17
0
/*
 * Allocate shadow GDT for a slave cpu.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
	struct vm_page *pg;
	vaddr_t va;

	ci->ci_gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	uvm_map_pageable(kernel_map, (vaddr_t)ci->ci_gdt,
	    (vaddr_t)ci->ci_gdt + MAXGDTSIZ, FALSE, FALSE);
	for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    PROT_READ | PROT_WRITE);
	}
	bzero(ci->ci_gdt, MAXGDTSIZ);
	bcopy(gdt, ci->ci_gdt, MAXGDTSIZ);
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);
}
Example #18
0
int
x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	paddr_t pa, endpa;
	vaddr_t va;
	bus_size_t map_size;
	int pmap_flags = PMAP_NOCACHE;

	pa = trunc_page(bpa);
	endpa = round_page(bpa + size);

#ifdef DIAGNOSTIC
	if (endpa <= pa && endpa != 0)
		panic("bus_mem_add_mapping: overflow");
#endif

	map_size = endpa - pa;

	va = uvm_km_valloc(kernel_map, map_size);
	if (va == 0)
		return (ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));

	if (flags & BUS_SPACE_MAP_CACHEABLE)
		pmap_flags = 0;
	else if (flags & BUS_SPACE_MAP_PREFETCHABLE)
		pmap_flags = PMAP_WC;

	for (; map_size > 0;
	    pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
		pmap_kenter_pa(va, pa | pmap_flags,
		    VM_PROT_READ | VM_PROT_WRITE);
	pmap_update(pmap_kernel());

	return 0;
}
int
acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
{
	paddr_t pgpa = trunc_page(pa);
	paddr_t endpa = round_page(pa + len);
	vaddr_t va = uvm_km_valloc(kernel_map, endpa - pgpa);

	if (va == 0)
		return (ENOMEM);

	handle->baseva = va;
	handle->va = (u_int8_t *)(va + (pa & PGOFSET));
	handle->vsize = endpa - pgpa;
	handle->pa = pa;

	do {
		pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
		va += NBPG;
		pgpa += NBPG;
	} while (pgpa < endpa);

	return 0;
}
Example #20
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup(void)
{
	vaddr_t v;
	vsize_t sz;
	int x;
	vaddr_t minaddr, maxaddr;

	/*
	 * Initialize error message buffer (et end of core).
	 */
	msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_round_page(MSGBUFSIZE));
	if (msgbuf_vaddr == 0)
		panic("failed to valloc msgbuf_vaddr");

	/* msgbuf_paddr was init'd in pmap */
	for (x = 0; x < btoc(MSGBUFSIZE); x++)
		pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
		    msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
	pmap_update(pmap_kenel());

	initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));

	printf("%s", version);

	printf("real mem = %u (%uK)\n", ctob(physmem), ctob(physmem)/1024);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = allocsys(0);
	if ((v = uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (allocsys(v) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Now allocate buffers proper. They are different than the above
	 * in that they usually occupy more virtual memory than physical.
	 */
	setup_buffers(&maxaddr);

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio
	 */
	minaddr = vm_map_min(kernel_map);
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

	printf("avail mem = %lu (%luK)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024);
	printf("using %u buffers containing %u bytes (%uK) of memory\n",
	    nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024);

	bufinit();

	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support - c; continuing..\n");
#endif
	}

	/* Safe for i/o port / memory space allocation to use malloc now. */
	x86_bus_space_mallocok();
}
Example #21
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;

#if defined(TGT_INDIGO2)
	/*
	 * On ECC MC systems, which do not allow uncached writes to memory
	 * during regular operation, fail requests for uncached (coherent)
	 * memory, unless the caller tells us it is aware of this and will
	 * do the right thing, by passing BUS_DMA_BUS1 as well.
	 */
	if ((flags & (BUS_DMA_COHERENT | BUS_DMA_BUS1)) == BUS_DMA_COHERENT &&
	    ip22_ecc)
		return EINVAL;
#endif

#ifdef TGT_COHERENT
	/* coherent mappings do not need to be uncached on these platforms */
	flags &= ~BUS_DMA_COHERENT;
#endif

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
#ifdef DIAGNOSTIC
			if (size == 0)
				panic("_dmamem_map: size botch");
#endif
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached.
			 * If there are no multiple mappings of that
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Example #22
0
void
le_fwio_attach(struct device *parent, struct device *self, void *aux)
{
	struct fwio_attach_args *faa = (struct fwio_attach_args *)aux;
	struct le_fwio_softc *lsc = (struct le_fwio_softc *)self;
	struct lance_softc *sc = &lsc->sc_am7990.lsc;
	unsigned int vec;
	uint32_t *esar;
	int i;

	vec = faa->faa_vecbase + FBIC_DEVIRQ1 * 4;
	printf(" vec %d", vec);

	/*
	 * Map registers.
	 */

	lsc->sc_rdp = (volatile uint16_t *)
	    vax_map_physmem(faa->faa_base + FWIO_LANCE_REG_OFFSET, 1);
	lsc->sc_rap = lsc->sc_rdp + 2;

	/*
	 * Register access functions.
	 */

	sc->sc_rdcsr = le_fwio_rdcsr;
	sc->sc_wrcsr = le_fwio_wrcsr;

	/*
	 * Map buffers.
	 */

	sc->sc_mem = (void *)uvm_km_valloc(kernel_map, FWIO_LANCE_BUF_SIZE);
	if (sc->sc_mem == NULL) {
		vax_unmap_physmem(faa->faa_base + FWIO_LANCE_REG_OFFSET, 1);
		printf(": can't map buffers\n");
		return;
	}

	ioaccess((vaddr_t)sc->sc_mem, faa->faa_base +
	    FWIO_LANCE_BUF_OFFSET, FWIO_LANCE_BUF_SIZE >> VAX_PGSHIFT);

	sc->sc_addr = FWIO_LANCE_BUF_OFFSET;
	sc->sc_memsize = FWIO_LANCE_BUF_SIZE;
	sc->sc_conf3 = 0;

	sc->sc_copytodesc = lance_copytobuf_contig;
	sc->sc_copyfromdesc = lance_copyfrombuf_contig;
	sc->sc_copytobuf = lance_copytobuf_contig;
	sc->sc_copyfrombuf = lance_copyfrombuf_contig;
	sc->sc_zerobuf = lance_zerobuf_contig;

	/*
	 * Get the Ethernet address from the Station Address ROM.
	 */

	esar = (uint32_t *)vax_map_physmem(faa->faa_base + FWIO_ESAR_OFFSET, 1);
	for (i = 0; i < 6; i++)
		sc->sc_arpcom.ac_enaddr[i] =
		    (esar[i] & FWIO_ESAR_MASK) >> FWIO_ESAR_SHIFT;
	vax_unmap_physmem((vaddr_t)esar, 1);

	/*
	 * Register interrupt handler.
	 */

	if (mbus_intr_establish(vec, IPL_NET, le_fwio_intr, sc,
	    self->dv_xname) != 0) {
		vax_unmap_physmem(faa->faa_base + FWIO_LANCE_REG_OFFSET, 1);
		uvm_km_free(kernel_map, (vaddr_t)sc->sc_mem,
		    FWIO_LANCE_BUF_SIZE);
		printf(": can't establish interrupt\n");
		return;
	}

	/*
	 * Complete attachment.
	 */

	am7990_config(&lsc->sc_am7990);
}
Example #23
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
	caddr_t v, v2;
	unsigned long sz;
	int x;
	vaddr_t minaddr, maxaddr;
	vsize_t size;
	char buf[160];				/* about 2 line */
	char pbuf[9];

	/*
	 * Initialize error message buffer (et end of core).
	 */
	msgbuf_vaddr = uvm_km_valloc(kernel_map, x86_64_round_page(MSGBUFSIZE));
	if (msgbuf_vaddr == 0)
		panic("failed to valloc msgbuf_vaddr");

	/* msgbuf_paddr was init'd in pmap */
	for (x = 0; x < btoc(MSGBUFSIZE); x++)
		pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
		    msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);

	initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));

	printf("%s", version);

	printf("cpu0: %s", cpu_model);
	if (cpu_tsc_freq != 0)
		printf(", %ld.%02ld MHz", (cpu_tsc_freq + 4999) / 1000000,
		    ((cpu_tsc_freq + 4999) / 10000) % 100);
	printf("\n");

	if ((cpu_feature & CPUID_MASK1) != 0) {
		bitmask_snprintf(cpu_feature, CPUID_FLAGS1,
		    buf, sizeof(buf));
		printf("cpu0: features %s\n", buf);
	}
	if ((cpu_feature & CPUID_MASK2) != 0) {
		bitmask_snprintf(cpu_feature, CPUID_FLAGS2,
		    buf, sizeof(buf));
		printf("cpu0: features %s\n", buf);
	}

	if (cpuid_level >= 3 && ((cpu_feature & CPUID_PN) != 0)) {
		printf("cpu0: serial number %04X-%04X-%04X-%04X-%04X-%04X\n",
			cpu_serial[0] / 65536, cpu_serial[0] % 65536,
			cpu_serial[1] / 65536, cpu_serial[1] % 65536,
			cpu_serial[2] / 65536, cpu_serial[2] % 65536);
	}

	format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
	printf("total memory = %s\n", pbuf);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (unsigned long)allocsys(NULL, NULL);
	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	v2 = allocsys(v, NULL);
	if ((v2 - v) != sz)
		panic("startup: table size inconsistency");

	/*
	 * Allocate virtual address space for the buffers.  The area
	 * is not managed by the VM system.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
		    NULL, UVM_UNKNOWN_OFFSET, 0,
		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
				UVM_ADV_NORMAL, 0)) != 0)
		panic("cpu_startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;
	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
		/* don't want to alloc more physical mem than needed */
		bufpages = btoc(MAXBSIZE) * nbuf;
	}

	/*
	 * XXX We defer allocation of physical pages for buffers until
	 * XXX after autoconfiguration has run.  We must do this because
	 * XXX on system with large amounts of memory or with large
	 * XXX user-configured buffer caches, the buffer cache will eat
	 * XXX up all of the lower 16M of RAM.  This prevents ISA DMA
	 * XXX maps from allocating bounce pages.
	 *
	 * XXX Note that nothing can use buffer cache buffers until after
	 * XXX autoconfiguration completes!!
	 *
	 * XXX This is a hack, and needs to be replaced with a better
	 * XXX solution!  [email protected], December 6, 1997
	 */

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * Finally, allocate mbuf cluster submap.
	 */
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);

	/*
	 * XXX Buffer cache pages haven't yet been allocated, so
	 * XXX we need to account for those pages when printing
	 * XXX the amount of free memory.
	 */
	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages));
	printf("avail memory = %s\n", pbuf);
	format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);

	/* Safe for i/o port / memory space allocation to use malloc now. */
	x86_64_bus_space_mallocok();
}
Example #24
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did 
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached. 
			 * If there are no multiple mappings of that 
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE)) 
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Example #25
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	pt_entry_t *ptep/*, pte*/;

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
			/*
			 * If the memory must remain coherent with the
			 * cache then we must make the memory uncacheable
			 * in order to maintain virtual cache coherency.
			 * We must also guarantee the cache does not already
			 * contain the virtual addresses we are making
			 * uncacheable.
			 */
			if (flags & BUS_DMA_COHERENT) {
				cpu_dcache_wbinv_range(va, PAGE_SIZE);
				cpu_drain_writebuf();
				ptep = vtopte(va);
				*ptep &= ~L2_S_CACHE_MASK;
				PTE_SYNC(ptep);
				tlb_flush();
			}
#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
Example #26
0
/*
 * Build a component buffer header.
 */
long
ccdbuffer(struct ccd_softc *cs, struct buf *bp, daddr_t bn, caddr_t addr,
    long bcount, struct ccdbuf **cbpp, int old_io)
{
	struct ccdcinfo *ci, *ci2 = NULL;
	struct ccdbuf *cbp;
	daddr_t cbn, cboff, sblk;
	int ccdisk, ccdisk2, off;
	long old_bcount, cnt;
	struct ccdiinfo *ii;
	struct buf *nbp;

	CCD_DPRINTF(CCDB_IO, ("ccdbuffer(%p, %p, %d, %p, %ld, %p)\n",
	    cs, bp, bn, addr, bcount, cbpp));

	/*
	 * Determine which component bn falls in.
	 */
	cbn = bn;
	cboff = 0;

	if (cs->sc_ileave == 0) {
		/*
		 * Serially concatenated
		 */
		sblk = 0;
		for (ccdisk = 0, ci = &cs->sc_cinfo[ccdisk];
		    cbn >= sblk + ci->ci_size;
		    ccdisk++, ci = &cs->sc_cinfo[ccdisk])
			sblk += ci->ci_size;
		cbn -= sblk;
	} else {
		/*
		 * Interleaved
		 */
		cboff = cbn % cs->sc_ileave;
		cbn /= cs->sc_ileave;
		for (ii = cs->sc_itable; ii->ii_ndisk; ii++)
			if (ii->ii_startblk > cbn)
				break;
		ii--;
		off = cbn - ii->ii_startblk;
		if (ii->ii_ndisk == 1) {
			ccdisk = ii->ii_index[0];
			cbn = ii->ii_startoff + off;
		} else {
			ccdisk = ii->ii_index[off % ii->ii_ndisk];
			cbn = ii->ii_startoff + off / ii->ii_ndisk;
		}
		if (cs->sc_cflags & CCDF_MIRROR) {
			/* Mirrored data */
			ccdisk2 = ccdisk + ii->ii_ndisk;
			ci2 = &cs->sc_cinfo[ccdisk2];
			/* spread the read over both parts */
			if (bp->b_flags & B_READ &&
			    bcount > bp->b_bcount / 2 &&
			    (!(ci2->ci_flags & CCIF_FAILED) ||
			      ci->ci_flags & CCIF_FAILED))
				ccdisk = ccdisk2;
		}
		cbn *= cs->sc_ileave;
		ci = &cs->sc_cinfo[ccdisk];
		CCD_DPRINTF(CCDB_IO, ("ccdisk %d cbn %d ci %p ci2 %p\n",
		    ccdisk, cbn, ci, ci2));
	}

	/* Limit the operation at next component border */
	if (cs->sc_ileave == 0)
		cnt = dbtob(ci->ci_size - cbn);
	else
		cnt = dbtob(cs->sc_ileave - cboff);
	if (cnt < bcount)
		bcount = cnt;

	if (old_io || cbpp[ccdisk] == NULL) {
		/*
		 * Setup new component buffer.
		 */
		cbp = cbpp[old_io ? 0 : ccdisk] = getccdbuf();
		cbp->cb_flags = old_io ? CBF_OLD : 0;
		nbp = &cbp->cb_buf;
		nbp->b_flags = bp->b_flags | B_CALL;
		nbp->b_iodone = ccdiodone;
		nbp->b_proc = bp->b_proc;
		nbp->b_dev = ci->ci_dev;		/* XXX */
		nbp->b_blkno = cbn + cboff;
		nbp->b_vp = ci->ci_vp;
		nbp->b_bcount = bcount;
		LIST_INIT(&nbp->b_dep);

		/*
		 * context for ccdiodone
		 */
		cbp->cb_obp = bp;
		cbp->cb_sc = cs;
		cbp->cb_comp = ccdisk;

		/* Deal with the different algorithms */
		if (old_io)
			nbp->b_data = addr;
		else {
			do {
				nbp->b_data = (caddr_t) uvm_km_valloc(ccdmap,
				    bp->b_bcount);

				/*
				 * XXX Instead of sleeping, we might revert
				 * XXX to old I/O policy for this buffer set.
				 */
				if (nbp->b_data == NULL) {
					ccd_need_kvm++;
					tsleep(ccdmap, PRIBIO, "ccdbuffer", 0);
				}
			} while (nbp->b_data == NULL);
			cbp->cb_sgcnt = 0;
			old_bcount = 0;
		}

		/*
		 * Mirrors have an additional write operation that is nearly
		 * identical to the first.
		 */
		if ((cs->sc_cflags & CCDF_MIRROR) &&
		    !(ci2->ci_flags & CCIF_FAILED) &&
		    ((cbp->cb_buf.b_flags & B_READ) == 0)) {
			struct ccdbuf *cbp2;
			cbpp[old_io? 1 : ccdisk2] = cbp2 = getccdbuf();
			*cbp2 = *cbp;
			cbp2->cb_flags = CBF_MIRROR | (old_io ? CBF_OLD : 0);
			cbp2->cb_buf.b_dev = ci2->ci_dev;	/* XXX */
			cbp2->cb_buf.b_vp = ci2->ci_vp;
			LIST_INIT(&cbp2->cb_buf.b_dep);
			cbp2->cb_comp = ccdisk2;
			cbp2->cb_dep = cbp;
			cbp->cb_dep = cbp2;
		}
	} else {
		/*
		 * Continue on an already started component buffer
		 */
		cbp = cbpp[ccdisk];
		nbp = &cbp->cb_buf;

		/*
		 * Map the new pages at the end of the buffer.
		 */
		old_bcount = nbp->b_bcount;
		nbp->b_bcount += bcount;
	}

	if (!old_io) {
		CCD_DPRINTF(CCDB_IO, ("ccdbuffer: sg %d (%p/%x) off %x\n",
		    cbp->cb_sgcnt, addr, bcount, old_bcount));

		pagemove(addr, nbp->b_data + old_bcount, round_page(bcount));
		nbp->b_bufsize += round_page(bcount);
		cbp->cb_sg[cbp->cb_sgcnt].cs_sgaddr = addr;
		cbp->cb_sg[cbp->cb_sgcnt].cs_sglen = bcount;
		cbp->cb_sgcnt++;
	}

	CCD_DPRINTF(CCDB_IO, (" dev %x(u%d): cbp %p bn %d addr %p bcnt %ld\n",
	    ci->ci_dev, ci-cs->sc_cinfo, cbp, bp->b_blkno,
	    bp->b_data, bp->b_bcount));

	return (bcount);
}