Exemplo n.º 1
0
Arquivo: n5.c Projeto: aksr/heirloom
void
casehypp(void)
{
	float	t;

	if (skip(0))
		hypp = hypp2 = hypp3 = 0;
	else {
		t = atop();
		if (!nonumb)
			hypp = t;
		if (skip(0))
			hypp2 = hypp3 = 0;
		else {
			t = atop();
			if (!nonumb)
				hypp2 = t;
			if (skip(0))
				hypp3 = 0;
			else {
				t = atop();
				if (!nonumb)
					hypp3 = t;
			}
		}
	}
}
Exemplo n.º 2
0
int
ia64_physmem_fini(void)
{
	vm_paddr_t base, lim, size;
	u_int idx;

	idx = 0;
	while (phys_avail[idx + 1] != 0) {
		base = round_page(phys_avail[idx]);
		lim = trunc_page(phys_avail[idx + 1]);
		if (base < lim) {
			phys_avail[idx] = base;
			phys_avail[idx + 1] = lim;
			size = lim - base;
			physmem += atop(size);
			paddr_max = lim;
			idx += 2;
		} else
			ia64_physmem_remove(idx);
	}

	/*
	 * Round realmem to a multple of 128MB. Hopefully that compensates
	 * for any loss of DRAM that isn't accounted for in the memory map.
	 * I'm thinking legacy BIOS or VGA here. In any case, it's ok if
	 * we got it wrong, because we don't actually use realmem. It's
	 * just for show...
	 */
	size = 1U << 27;
	realmem = (realmem + size - 1) & ~(size - 1);
	realmem = atop(realmem);
	return (0);
}
Exemplo n.º 3
0
void
dumpconf(void)
{
	int nblks;

	if (dumpdev == NODEV ||
	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
		return;
	if (nblks <= ctod(1))
		return;

	dumpsize = physmem;
	if (dumpsize > atop(dbtob(nblks - dumplo)))
		dumpsize = atop(dbtob(nblks - dumplo));
	else if (dumplo == 0)
		dumplo = nblks - btodb(ptoa(dumpsize));

	/*
	 * Don't dump on the first block in case the dump
	 * device includes a disk label.
	 */
	if (dumplo < btodb(PAGE_SIZE))
		dumplo = btodb(PAGE_SIZE);

	/* Put dump at the end of partition, and make it fit. */
	if (dumpsize + 1 > dtoc(nblks - dumplo))
		dumpsize = dtoc(nblks - dumplo) - 1;
	if (dumplo < nblks - ctod(dumpsize) - 1)
		dumplo = nblks - ctod(dumpsize) - 1;

	/* memory is contiguous on vax */
	cpu_kcore_hdr.ram_segs[0].start = 0;
	cpu_kcore_hdr.ram_segs[0].size = ptoa(physmem);
	cpu_kcore_hdr.sysmap = (vaddr_t)Sysmap;
}
Exemplo n.º 4
0
/*
 * Map a user I/O request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;	/* Kernel VA (new to) */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	vaddr_t uva = mips_trunc_page(bp->b_data);
	const vaddr_t off = (vaddr_t)bp->b_data - uva;
        len = mips_round_page(off + len);

	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
	bp->b_saveaddr = bp->b_data;
	bp->b_data = (void *)(kva + off);
	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	do {
		paddr_t pa;	/* physical address */
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(pmap_kernel());

	return 0;
}
Exemplo n.º 5
0
/*
 * Early initialization, right before main is called.
 */
void
mvme68k_init()
{
	int i;

	/*
	 * Tell the VM system about available physical memory.
	 */
	for (i = 0; i < mem_cluster_cnt; i++) {
		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
			/*
			 * Segment has been completely gobbled up.
			 */
			continue;
		}
		/*
		 * Note the index of the mem cluster is the free
		 * list we want to put the memory on (0 == default,
		 * 1 == VME).  There can only be two.
		 */
		uvm_page_physload(atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end),
				 atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end), i);
	}

	/* Initialize interrupt handlers. */
	isrinit();

	switch (machineid) {
#ifdef MVME147
	case MVME_147:
		mvme147_init();
		break;
#endif
#ifdef MVME162
	case MVME_162:
		mvme162_init();
		break;
#endif
#ifdef MVME167
	case MVME_167:
		mvme167_init();
		break;
#endif
	default:
		panic("mvme68k_init: impossible machineid");
	}

	/*
	 * Initialize error message buffer (at end of core).
	 */
	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE,
		    VM_PROT_READ|VM_PROT_WRITE);
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
}
Exemplo n.º 6
0
/*
 * Early initialization, before main() is called.
 */
void
luna68k_init()
{
	volatile unsigned char *pio0 = (void *)0x49000000;
	int sw1, i;
	char *cp;
	extern char bootarg[64];

	extern paddr_t avail_start, avail_end;

	/*
	 * Tell the VM system about available physical memory.  The
	 * luna68k only has one segment.
	 */
	uvm_page_physload(atop(avail_start), atop(avail_end),
	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));


	pio0[3] = 0xb6;
	pio0[2] = 1 << 6;		/* enable parity check */
	pio0[3] = 0xb6;
	sw1 = pio0[0];			/* dipssw1 value */
	sw1 ^= 0xff;
	sysconsole = !(sw1 & 0x2);	/* console selection */

	boothowto = 0;
	i = 0;
	/*
	 * 'bootarg' has;
	 *   "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>"
	 * where <addr> is MAC address of which network loader used (not
	 * necessarily same as one at 0x4101.FFE0), <host> and <name>
	 * are the values of HOST and SERVER environment variables,
	 *
	 * NetBSD/luna68k cares only the first argment; any of "sda".
	 */
	for (cp = bootarg; *cp != ' '; cp++) {
		BOOT_FLAG(*cp, boothowto);
		if (i++ >= sizeof(bootarg))
			break;
	}
#if 0 /* overload 1:sw1, which now means 'go ROM monitor' after poweron */
	if (boothowto == 0)
		boothowto = (sw1 & 0x1) ? RB_SINGLE : 0;
#endif
}
Exemplo n.º 7
0
static int
tdfx_mmap(dev_t dev, vm_offset_t offset, int nprot)
{
	/* 
	 * mmap(2) is called by a user process to request that an area of memory
	 * associated with this device be mapped for the process to work with. Nprot
	 * holds the protections requested, PROT_READ, PROT_WRITE, or both.
	 */

	/**** OLD GET CONFIG ****/
	/* struct tdfx_softc* tdfx_info; */
	
	/* Get the configuration for our card XXX*/
	/*tdfx_info = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass,
			UNIT(minor(dev)));*/
	/************************/

	struct tdfx_softc* tdfx_info[2];
	
	tdfx_info[0] = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, 0);

	/* If, for some reason, its not configured, we bail out */
	if(tdfx_info[0] == NULL) {
#ifdef	DEBUG
	   printf("tdfx: tdfx_info (softc) is NULL\n");
#endif
	   return -1;
	}

	/* We must stay within the bound of our address space */
	if((offset & 0xff000000) == tdfx_info[0]->addr0) {
		offset &= 0xffffff;
		return atop(rman_get_start(tdfx_info[0]->memrange) + offset);
	}
	
	if(tdfx_count > 1) {
		tdfx_info[1] = (struct tdfx_softc*)devclass_get_softc(tdfx_devclass, 1);
		if((offset & 0xff000000) == tdfx_info[1]->addr0) {
			offset &= 0xffffff;
			return atop(rman_get_start(tdfx_info[1]->memrange) + offset);
		}
	}

	/* See if the Banshee/V3 LFB is being requested */
	/*if(tdfx_info->memrange2 != NULL && (offset & 0xff000000) ==
			tdfx_info->addr1) {
	  	offset &= 0xffffff;
		return atop(rman_get_start(tdfx_info[1]->memrange2) + offset);
	}*/ /* VoodooNG code */

	/* The ret call */
	/* atop -> address to page
	 * rman_get_start, get the (struct resource*)->r_start member,
	 * the mapping base address.
	 */
	return -1;
}
Exemplo n.º 8
0
/*
 *	Allocates a region from the kernel address map and physically
 *	contiguous pages within the specified address range to the kernel
 *	object.  Creates a wired mapping from this region to these pages, and
 *	returns the region's starting virtual address.  If M_ZERO is specified
 *	through the given flags, then the pages are zeroed before they are
 *	mapped.
 */
vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
    vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, offset, tmp;
	vm_page_t end_m, m;
	u_long npages;
	int pflags, tries;
 
	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	npages = atop(size);
	VM_OBJECT_WLOCK(object);
	tries = 0;
retry:
	m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags,
	    npages, low, high, alignment, boundary, memattr);
	if (m == NULL) {
		VM_OBJECT_WUNLOCK(object);
		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
			if (!vm_page_reclaim_contig_domain(domain, pflags,
			    npages, low, high, alignment, boundary) &&
			    (flags & M_WAITOK) != 0)
				vm_wait_domain(domain);
			VM_OBJECT_WLOCK(object);
			tries++;
			goto retry;
		}
		vmem_free(vmem, addr, size);
		return (0);
	}
	KASSERT(vm_phys_domain(m) == domain,
	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
	    vm_phys_domain(m), domain));
	end_m = m + npages;
	tmp = addr;
	for (; m < end_m; m++) {
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
		tmp += PAGE_SIZE;
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
Exemplo n.º 9
0
/*
 *	kmem_back:
 *
 *	Allocate physical pages for the specified virtual address range.
 */
int
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
    vm_size_t size, int flags)
{
	vm_offset_t offset, i;
	vm_page_t m, mpred;
	int pflags;

	KASSERT(object == kernel_object,
	    ("kmem_back_domain: only supports kernel object."));

	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	if (flags & M_WAITOK)
		pflags |= VM_ALLOC_WAITFAIL;

	i = 0;
	VM_OBJECT_WLOCK(object);
retry:
	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
	for (; i < size; i += PAGE_SIZE, mpred = m) {
		m = vm_page_alloc_domain_after(object, atop(offset + i),
		    domain, pflags, mpred);

		/*
		 * Ran out of space, free everything up and return. Don't need
		 * to lock page queues here as we know that the pages we got
		 * aren't on any queues.
		 */
		if (m == NULL) {
			if ((flags & M_NOWAIT) == 0)
				goto retry;
			VM_OBJECT_WUNLOCK(object);
			kmem_unback(object, addr, i);
			return (KERN_NO_SPACE);
		}
		KASSERT(vm_phys_domain(m) == domain,
		    ("kmem_back_domain: Domain mismatch %d != %d",
		    vm_phys_domain(m), domain));
		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
		    ("kmem_malloc: page %p is managed", m));
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
	}
	VM_OBJECT_WUNLOCK(object);

	return (KERN_SUCCESS);
}
Exemplo n.º 10
0
int
vme_map(struct vme_softc *sc, struct extent *ext, u_int awidth,
    bus_addr_t addr, bus_size_t size, int flags, vaddr_t *rva)
{
	const struct vme_range *r;
	int rc;
	paddr_t pa;
	psize_t offs, len;

	/*
	 * Since we need to map VME address ranges on demand, we will allocate
	 * with a page granularity.
	 */
	pa = trunc_page(addr);
	offs = addr - pa;
	len = round_page(addr + size) - pa;

	/*
	 * Check that the mapping fits within the available address ranges.
	 */
	for (r = sc->sc_ranges; r->vr_width != 0; r++) {
		if (r->vr_width == awidth &&
		    r->vr_start <= addr && r->vr_end >= addr + size - 1)
			break;
	}
	if (r->vr_width == 0)
		return EINVAL;

	/*
	 * Register this range in the per-width extent.
	 */
	if (ext != NULL) {
		rc = extent_alloc_region(ext, atop(pa), atop(len),
		    EX_NOWAIT | EX_MALLOCOK);
		if (rc != 0)
			return rc;
	}

	/*
	 * Allocate virtual memory for the range and map it.
	 */
	rc = vme_map_r(r, pa, len, flags, UVM_PROT_RW, rva);
	if (rc != 0) {
		if (ext != NULL)
			(void)extent_free(ext, atop(pa), atop(len),
			    EX_NOWAIT | EX_MALLOCOK);
		return rc;
	}

	*rva += offs;
	return 0;
}
Exemplo n.º 11
0
static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
                                           size_t cb,
                                           RTHCPHYS PhysHighest, size_t uAlignment,
                                           bool fContiguous, int rcNoMem)
{
    uint32_t   cPages = atop(cb);
    vm_paddr_t VmPhysAddrHigh;

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       enmType, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb));

    if (PhysHighest != NIL_RTHCPHYS)
        VmPhysAddrHigh = PhysHighest;
    else
        VmPhysAddrHigh = ~(vm_paddr_t)0;

    int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
                                              uAlignment, fContiguous, true, rcNoMem);
    if (RT_SUCCESS(rc))
    {
        if (fContiguous)
        {
            Assert(enmType == RTR0MEMOBJTYPE_PHYS);
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_LOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.fAllocated = true;
        }

        *ppMem = &pMemFreeBSD->Core;
    }
    else
    {
        vm_object_deallocate(pMemFreeBSD->pObject);
        rtR0MemObjDelete(&pMemFreeBSD->Core);
    }

    return rc;
}
Exemplo n.º 12
0
__dead void
landisk_startup(int howto, char *_esym)
{
	u_int32_t ramsize;

	/* Start to determine heap area */
	esym = _esym;
	kernend = (vaddr_t)round_page((vaddr_t)esym);

	boothowto = howto;

	ramsize = getramsize();

	/* Initialize CPU ops. */
	sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7751R);	

	/* Initialize early console */
	consinit();

	/* Load memory to UVM */
	if (ramsize == 0 || ramsize > 512 * 1024 * 1024)
		ramsize = IOM_RAM_SIZE;
	physmem = atop(ramsize);
	kernend = atop(round_page(SH3_P1SEG_TO_PHYS(kernend)));
	uvm_page_physload(atop(IOM_RAM_BEGIN),
	    atop(IOM_RAM_BEGIN + ramsize), kernend,
	    atop(IOM_RAM_BEGIN + ramsize), 0);
	cpu_init_kcore_hdr();	/* need to be done before pmap_bootstrap */

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

#if defined(DDB)
	db_machine_init();
	ddb_init();
	if (boothowto & RB_KDB) {
		Debugger();
	}
#endif

	/* Jump to main */
	__asm volatile(
		"jmp	@%0\n\t"
		" mov	%1, sp"
		:: "r" (main), "r" (proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	for (;;) ;
}
Exemplo n.º 13
0
void
dreamcast_startup()
{
	extern char edata[], end[];
	paddr_t kernend;

	/* Clear bss */
	memset(edata, 0, end - edata);

	/* Initialize CPU ops. */
	sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7750);

	/* Console */
	consinit();

	/* Load memory to UVM */
	physmem = atop(IOM_RAM_SIZE);
	kernend = atop(round_page(SH3_P1SEG_TO_PHYS(end)));
	uvm_page_physload(
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		VM_FREELIST_DEFAULT);

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

	/* Debugger. */
#ifdef DDB
	ddb_init(0, NULL, NULL);
#endif
#if defined(KGDB) && (NSCIF > 0)
	if (scif_kgdb_init() == 0) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif /* KGDB && NSCIF > 0 */

	/* Jump to main */
	__asm__ __volatile__(
		"jmp	@%0;"
		"mov	%1, sp"
		:: "r"(main),"r"(proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	while (1)
		;
}
Exemplo n.º 14
0
Arquivo: ct.c Projeto: EdKeith/core
static A jttayamp(J jt,A w,B nf,A x,A h){A y;B ng=!nf;I j,n;V*v=VAV(h);
 ASSERT(AR(x)<=(nf?v->lr:v->rr),EVRANK);
 switch(v->id){
  case CPLUS:  R tpoly(over(x,one));
  case CMINUS: R tpoly(nf?over(x,num[-1]):over(negate(x),one));
  case CSTAR:  R tpoly(over(zero,x));
  case CDIV:   ASSERT(ng,EVDOMAIN); R tpoly(over(zero,recip(x)));
  case CJDOT:  R tpoly(nf?over(x,a0j1):over(jdot1(x),one));
  case CPOLY:  ASSERT(nf,EVDOMAIN); R tpoly(BOX&AT(x)?poly1(x):x);
  case CHGEOM: ASSERT(nf,EVDOMAIN); RE(j=i0(x)); ASSERT(0<=j,EVDOMAIN);
               y=IX(j);
               R tpoly(divide(hgcoeff(y,h),fact(y)));
  case CBANG:  ASSERT(nf,EVDOMAIN); RE(j=i0(x)); ASSERT(0<=j,EVDOMAIN); 
               R tpoly(divide(poly1(box(iota(x))),fact(x)));
  case CEXP:   if(nf)R eva(x,"(^.x)&^ % !");
               RE(n=i0(x));   
               R 0<=n?tpoly(over(reshape(x,zero),one)):atop(ds(CDIV),amp(h,sc(-n))); 
  case CFIT:   ASSERT(nf&&CPOLY==ID(v->f),EVDOMAIN);
               y=over(x,IX(IC(x)));
               R tpoly(mdiv(df2(x,y,h),atab(CEXP,y,IX(IC(x)))));
  case CCIRCLE:
   switch(i0(x)){
    case 1:    R eval("{&0 1 0 _1@(4&|) % !");
    case -3:   R eval("{&0 1 0 _1@(4&|) % ]");
    case 2:    R eval("{&1 0 _1 0@(4&|) % !");
    case 5:    R eval("2&|    % !");
    case -7:   R eval("2&|    % ]");
    case 6:    R eval("2&|@>: % !");
    case -1:   R eval("(2&|              % ]) * ([: */ (1&+ % 2&+)@(i.@<.&.-:))\"0");
    case -5:   R eval("({&0 1 0 _1@(4&|) % ]) * ([: */ (1&+ % 2&+)@(i.@<.&.-:))\"0");
 }}
 ASSERT(0,EVDOMAIN);
}
Exemplo n.º 15
0
int
configure_spamd(u_short dport, char *name, char *message,
    struct cidr *blacklists)
{
	int lport = IPPORT_RESERVED - 1, s;
	struct sockaddr_in sin;
	FILE* sdc;

	s = rresvport(&lport);
	if (s == -1)
		return (-1);
	memset(&sin, 0, sizeof sin);
	sin.sin_len = sizeof(sin);
	sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
	sin.sin_family = AF_INET;
	sin.sin_port = htons(dport);
	if (connect(s, (struct sockaddr *)&sin, sizeof sin) == -1)
		return (-1);
	sdc = fdopen(s, "w");
	if (sdc == NULL) {
		close(s);
		return (-1);
	}
	fprintf(sdc, "%s", name);
	do_message(sdc, message);
	while (blacklists->addr != 0) {
		fprintf(sdc, ";%s/%u", atop(blacklists->addr),
		    blacklists->bits);
		blacklists++;
	}
	fputc('\n', sdc);
	fclose(sdc);
	close(s);
	return (0);
}
Exemplo n.º 16
0
paddr_t
drmmmap(dev_t kdev, off_t offset, int prot)
{
	struct drm_device *dev = drm_get_device_from_kdev(kdev);
	drm_local_map_t *map;
	struct drm_file *priv;
	drm_map_type_t type;
	paddr_t phys;

	DRM_LOCK();
	priv = drm_find_file_by_minor(dev, minor(kdev));
	DRM_UNLOCK();
	if (priv == NULL) {
		DRM_ERROR("can't find authenticator\n");
		return (EINVAL);
	}

	if (!priv->authenticated)
		return (EACCES);

	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
		drm_device_dma_t *dma = dev->dma;

		DRM_SPINLOCK(&dev->dma_lock);

		if (dma->pagelist != NULL) {
			unsigned long page = offset >> PAGE_SHIFT;
			unsigned long phys = dma->pagelist[page];

			DRM_SPINUNLOCK(&dev->dma_lock);
			return (atop(phys));
		} else {
Exemplo n.º 17
0
/*
 * Common function for mmap(2)'ing DMA-safe memory.  May be called by
 * bus-specific DMA mmap(2)'ing functions.
 */
paddr_t
_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    off_t off, int prot, int flags)
{
	int i;

	for (i = 0; i < nsegs; i++) {
#ifdef DIAGNOSTIC
		if (off & PGOFSET)
			panic("_bus_dmamem_mmap: offset unaligned");
		if (segs[i].ds_addr & PGOFSET)
			panic("_bus_dmamem_mmap: segment unaligned");
		if (segs[i].ds_len & PGOFSET)
			panic("_bus_dmamem_mmap: segment size not multiple"
			    " of page size");
#endif	/* DIAGNOSTIC */
		if (off >= segs[i].ds_len) {
			off -= segs[i].ds_len;
			continue;
		}

		return (atop(segs[i].ds_addr + off));
	}

	/* Page not found. */
	return (-1);
}
Exemplo n.º 18
0
/*
 *	kmem_unback:
 *
 *	Unmap and free the physical pages underlying the specified virtual
 *	address range.
 *
 *	A physical page must exist within the specified object at each index
 *	that is being unmapped.
 */
static int
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
	vm_page_t m, next;
	vm_offset_t end, offset;
	int domain;

	KASSERT(object == kernel_object,
	    ("kmem_unback: only supports kernel object."));

	if (size == 0)
		return (0);
	pmap_remove(kernel_pmap, addr, addr + size);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	end = offset + size;
	VM_OBJECT_WLOCK(object);
	m = vm_page_lookup(object, atop(offset)); 
	domain = vm_phys_domain(m);
	for (; offset < end; offset += PAGE_SIZE, m = next) {
		next = vm_page_next(m);
		vm_page_unwire(m, PQ_NONE);
		vm_page_free(m);
	}
	VM_OBJECT_WUNLOCK(object);

	return (domain);
}
Exemplo n.º 19
0
/* This code was originally stolen from the alpha port. */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	struct proc *p;
	vm_prot_t prot;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	p = bp->b_proc;
	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(taddr + off);
	len = atop(len);
	prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE :
				      VM_PROT_READ;
	while (len--) {
		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
		    &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
		    prot, prot | PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
	}
	pmap_update(vm_map_pmap(phys_map));

	return 0;
}
Exemplo n.º 20
0
int drm_mmap(struct dev_mmap_args *ap)
{
	struct cdev *kdev = ap->a_head.a_dev;
	vm_offset_t offset = ap->a_offset;
	struct drm_device *dev = drm_get_device_from_kdev(kdev);
	struct drm_file *file_priv = NULL;
	drm_local_map_t *map;
	enum drm_map_type type;
	vm_paddr_t phys;

        DRM_LOCK();
        file_priv = drm_find_file_by_proc(dev, DRM_CURPROC);
        DRM_UNLOCK();

        if (file_priv == NULL) {
                DRM_ERROR("can't find authenticator\n");
                return EINVAL;
        }

        if (!file_priv->authenticated)
                return EACCES;

	if (dev->dma && offset < ptoa(dev->dma->page_count)) {
		drm_device_dma_t *dma = dev->dma;

		DRM_SPINLOCK(&dev->dma_lock);

		if (dma->pagelist != NULL) {
			unsigned long page = offset >> PAGE_SHIFT;
			unsigned long phys = dma->pagelist[page];
			ap->a_result = atop(phys);
			DRM_SPINUNLOCK(&dev->dma_lock);
			return 0;
		} else {
Exemplo n.º 21
0
int
viommu_dvmamap_create(bus_dma_tag_t t, bus_dma_tag_t t0,
    struct iommu_state *is, bus_size_t size, int nsegments,
    bus_size_t maxsegsz, bus_size_t boundary, int flags,
    bus_dmamap_t *dmamap)
{
	int ret;
	bus_dmamap_t map;
	struct iommu_map_state *ims;

	BUS_DMA_FIND_PARENT(t, _dmamap_create);
	ret = (*t->_dmamap_create)(t, t0, size, nsegments, maxsegsz, boundary,
	    flags, &map);

	if (ret)
		return (ret);

	ims = viommu_iomap_create(atop(round_page(size)));

	if (ims == NULL) {
		bus_dmamap_destroy(t0, map);
		return (ENOMEM);
	}

	ims->ims_iommu = is;
	map->_dm_cookie = ims;

	*dmamap = map;

	return (0);
}
Exemplo n.º 22
0
void
x86bios_free(void *addr, size_t size)
{
	vm_paddr_t paddr;
	int i, nfree;

	if (addr == NULL || size == 0)
		return;
	paddr = vtophys(addr);
	if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
		return;
	mtx_lock(&x86bios_lock);
	for (i = 0; i < x86bios_vmc.npages; i++)
		if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
			break;
	if (i >= x86bios_vmc.npages) {
		mtx_unlock(&x86bios_lock);
		return;
	}
	nfree = atop(round_page(size));
	bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
	if (i + nfree == x86bios_vmc.npages) {
		x86bios_vmc.npages -= nfree;
		while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
			x86bios_vmc.npages--;
	}
	mtx_unlock(&x86bios_lock);
	contigfree(addr, size, M_DEVBUF);
}
Exemplo n.º 23
0
void
buf_free_pages(struct buf *bp)
{
	struct uvm_object *uobj = bp->b_pobj;
	struct vm_page *pg;
	voff_t off, i;
	int s;

	KASSERT(bp->b_data == NULL);
	KASSERT(uobj != NULL);

	s = splbio();

	off = bp->b_poffs;
	bp->b_pobj = NULL;
	bp->b_poffs = 0;

	mtx_enter(&uobj->vmobjlock);
	for (i = 0; i < atop(bp->b_bufsize); i++) {
		pg = uvm_pagelookup(uobj, off + ptoa(i));
		KASSERT(pg != NULL);
		KASSERT(pg->wire_count == 1);
		pg->wire_count = 0;
		/* Never on a pageq, no pageqlock needed.  */
		uvm_pagefree(pg);
		bcstats.numbufpages--;
	}
	mtx_leave(&uobj->vmobjlock);
	splx(s);
}
Exemplo n.º 24
0
void
buf_free_pages(struct buf *bp)
{
	struct uvm_object *uobj = bp->b_pobj;
	struct vm_page *pg;
	voff_t off, i;
	int s;

	KASSERT(bp->b_data == NULL);
	KASSERT(uobj != NULL);

	s = splbio();

	off = bp->b_poffs;
	bp->b_pobj = NULL;
	bp->b_poffs = 0;

	for (i = 0; i < atop(bp->b_bufsize); i++) {
		pg = uvm_pagelookup(uobj, off + ptoa(i));
		KASSERT(pg != NULL);
		KASSERT(pg->wire_count == 1);
		pg->wire_count = 0;
		uvm_pagefree(pg);
		bcstats.numbufpages--;
	}
	splx(s);
}
void
dec_eb64plus_init(void)
{
	uint64_t variation;

	platform.family = "EB64+";

	if ((platform.model = alpha_dsr_sysname()) == NULL) {
		variation = hwrpb->rpb_variation & SV_ST_MASK;
		if ((platform.model = alpha_variation_name(variation,
		    dec_eb64plus_variations)) == NULL)
			platform.model = alpha_unknown_sysname();
	}

	platform.iobus = "apecs";
	platform.cons_init = dec_eb64plus_cons_init;
	platform.device_register = dec_eb64plus_device_register;

	/*
	 * EB64+ systems can have 512K, 1M, or 2M secondary
	 * caches.  Default to middle-of-the-road.
	 *
	 * XXX Need to dynamically size it!
	 */
	uvmexp.ncolors = atop(1 * 1024 * 1024);
}
Exemplo n.º 26
0
void
dumpconf(void)
{
	cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
	u_int dumpextra, totaldumpsize;		/* in disk blocks */
	u_int seg, nblks;

	if (dumpdev == NODEV ||
	    (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
		return;
	if (nblks <= ctod(1))
		return;

	dumpsize = 0;
	for (seg = 0; seg < h->kcore_nsegs; seg++)
		dumpsize += atop(h->kcore_segs[seg].size);
	dumpextra = cpu_dumpsize();

	/* Always skip the first block, in case there is a label there. */
	if (dumplo < btodb(1))
		dumplo = btodb(1);

	/* Put dump at the end of the partition, and make it fit. */
	totaldumpsize = ctod(dumpsize) + dumpextra;
	if (totaldumpsize > nblks - dumplo) {
		totaldumpsize = dbtob(nblks - dumplo);
		dumpsize = dtoc(totaldumpsize - dumpextra);
	}
	if (dumplo < nblks - totaldumpsize)
		dumplo = nblks - totaldumpsize;
}
Exemplo n.º 27
0
int
vmcmd_map_zero(struct lwp *l, struct exec_vmcmd *cmd)
{
	struct proc *p = l->l_proc;
	int error;
	long diff;
	vm_prot_t prot, maxprot;

	diff = cmd->ev_addr - trunc_page(cmd->ev_addr);
	cmd->ev_addr -= diff;			/* required by uvm_map */
	cmd->ev_len += diff;

	prot = cmd->ev_prot;
	maxprot = UVM_PROT_ALL;
#ifdef PAX_MPROTECT
	pax_mprotect(l, &prot, &maxprot);
#endif /* PAX_MPROTECT */

	error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
			round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
			UVM_MAPFLAG(prot, maxprot, UVM_INH_COPY,
			UVM_ADV_NORMAL,
			UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
	if (cmd->ev_flags & VMCMD_STACK)
		curproc->p_vmspace->vm_issize += atop(round_page(cmd->ev_len));
	return error;
}
Exemplo n.º 28
0
/*
 * bool __mm_mem_addr(paddr_t pa):
 *	Check specified physical address is memory device.
 */
bool
__mm_mem_addr(paddr_t pa)
{

	return ((atop(pa) < vm_physmem[0].start || PHYS_TO_VM_PAGE(pa) != NULL)
	    ? true : false);
}
Exemplo n.º 29
0
paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
{
	DRM_DEVICE;
	drm_local_map_t *map;
	drm_file_t *priv;
	drm_map_type_t type;
	paddr_t phys;
	uintptr_t roffset;

	DRM_LOCK();
	priv = drm_find_file_by_proc(dev, DRM_CURPROC);
	DRM_UNLOCK();
	if (priv == NULL) {
		DRM_ERROR("can't find authenticator\n");
		return -1;
	}

	if (!priv->authenticated)
		return -1;

	if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
		drm_device_dma_t *dma = dev->dma;

		DRM_SPINLOCK(&dev->dma_lock);

		if (dma->pagelist != NULL) {
			unsigned long page = offset >> PAGE_SHIFT;
			unsigned long pphys = dma->pagelist[page];

#ifdef macppc
			return pphys;
#else
			return atop(pphys);
#endif
		} else {
Exemplo n.º 30
0
int
vme_map_r(const struct vme_range *r, paddr_t pa, psize_t len, int flags,
    vm_prot_t prot, vaddr_t *rva)
{
	vaddr_t ova, va;
	u_int pg;

	ova = va = uvm_km_valloc(kernel_map, len);
	if (va == 0)
		return ENOMEM;

	pa += r->vr_base;
	for (pg = atop(len); pg != 0; pg--) {
		pmap_kenter_pa(va, pa, prot);
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
	}
	if (flags & BUS_SPACE_MAP_CACHEABLE)
		pmap_cache_ctrl(ova, ova + len, CACHE_GLOBAL);
	pmap_update(pmap_kernel());

	*rva = ova;

	return 0;
}