Ejemplo n.º 1
0
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * smbfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		  int a_reqpage, vm_ooffset_t a_offset)
 */
int
smbfs_getpages(struct vop_getpages_args *ap)
{
#ifdef SMBFS_RWGENERIC
	return vop_stdgetpages(ap);
#else
	int i, error, npages;
	int doclose;
	size_t size, toff, nextoff, count;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct smbmount *smp;
	struct smbnode *np;
	struct smb_cred scred;
	vm_page_t *pages;

	KKASSERT(td->td_proc);

	vp = ap->a_vp;
	cred = td->td_proc->p_ucred;
	np = VTOSMB(vp);
	smp = VFSTOSMBFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("smbfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}
	smb_makescred(&scred, td, cred);

	bp = getpbuf_kva(&smbfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	/*
	 * This is kinda nasty.  Since smbfs is physically closing the
	 * fid on close(), we have to reopen it if necessary.  There are
	 * other races here too, such as if another process opens the same
	 * file while we are blocked in read. XXX
	 */
	error = 0;
	doclose = 0;
	if (np->n_opencount == 0) {
		error = smbfs_smb_open(np, SMB_AM_OPENREAD, &scred);
		if (error == 0)
			doclose = 1;
	}
	if (error == 0)
		error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
	if (doclose)
		smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &smbfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("smbfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* SMBFS_RWGENERIC */
}
Ejemplo n.º 2
0
static void
octeon_memory_init(void)
{
	vm_paddr_t phys_end;
	int64_t addr;
	unsigned i, j;

	phys_end = round_page(MIPS_KSEG0_TO_PHYS((vm_offset_t)&end));

	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
		/* Simulator we limit to 96 meg */
		phys_avail[0] = phys_end;
		phys_avail[1] = 96 << 20;

		dump_avail[0] = phys_avail[0];
		dump_avail[1] = phys_avail[1];

		realmem = physmem = btoc(phys_avail[1] - phys_avail[0]);
		return;
	}

	/*
	 * Allocate memory from bootmem 1MB at a time and merge
	 * adjacent entries.
	 */
	i = 0;
	while (i < PHYS_AVAIL_ENTRIES) {
		/*
		 * If there is less than 2MB of memory available in 128-byte
		 * blocks, do not steal any more memory.  We need to leave some
		 * memory for the command queues to be allocated out of.
		 */
		if (cvmx_bootmem_available_mem(128) < 2 << 20)
			break;

		addr = cvmx_bootmem_phy_alloc(1 << 20, phys_end,
					      ~(vm_paddr_t)0, PAGE_SIZE, 0);
		if (addr == -1)
			break;

		/*
		 * The SDK needs to be able to easily map any memory that might
		 * come to it e.g. in the form of an mbuf.  Because on !n64 we
		 * can't direct-map some addresses and we don't want to manage
		 * temporary mappings within the SDK, don't feed memory that
		 * can't be direct-mapped to the kernel.
		 */
#if !defined(__mips_n64)
		if (!MIPS_DIRECT_MAPPABLE(addr + (1 << 20) - 1))
			continue;
#endif

		physmem += btoc(1 << 20);

		if (i > 0 && phys_avail[i - 1] == addr) {
			phys_avail[i - 1] += 1 << 20;
			continue;
		}

		phys_avail[i + 0] = addr;
		phys_avail[i + 1] = addr + (1 << 20);

		i += 2;
	}

	for (j = 0; j < i; j++)
		dump_avail[j] = phys_avail[j];

	realmem = physmem;
}
Ejemplo n.º 3
0
/*
 * Do all the stuff that locore normally does before calling main().
 */
void
mach_init(long fwhandle, long magic, long bootdata, long reserved)
{
	void *kernend, *p0;
	u_long first, last;
	extern char edata[], end[];
	int i;
	uint32_t config;

	/* XXX this code must run on the target CPU */
	config = mips3_cp0_config_read();
	config &= ~MIPS3_CONFIG_K0_MASK;
	config |= 0x05;				/* XXX.  cacheable coherent */
	mips3_cp0_config_write(config);

	/* Zero BSS.  XXXCGD: uh, is this really necessary still?  */
	memset(edata, 0, end - edata);

	/*
	 * Copy the bootinfo structure from the boot loader.
	 * this has to be done before mips_vector_init is
	 * called because we may need CFE's TLB handler
	 */

	if (magic == BOOTINFO_MAGIC)
		memcpy(&bootinfo, (struct bootinfo_v1 *)bootdata,
		    sizeof bootinfo);
	else if (reserved == CFE_EPTSEAL) {
		magic = BOOTINFO_MAGIC;
		bzero(&bootinfo, sizeof bootinfo);
		bootinfo.version = BOOTINFO_VERSION;
		bootinfo.fwhandle = fwhandle;
		bootinfo.fwentry = bootdata;
		bootinfo.ssym = (vaddr_t)end;
		bootinfo.esym = (vaddr_t)end;
	}

	kernend = (void *)mips_round_page(end);
#if NKSYMS || defined(DDB) || defined(LKM)
	if (magic == BOOTINFO_MAGIC) {
		ksym_start = (void *)bootinfo.ssym;
		ksym_end   = (void *)bootinfo.esym;
		kernend = (void *)mips_round_page((vaddr_t)ksym_end);
	}
#endif

	consinit();

	uvm_setpagesize();

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init();

#ifdef DEBUG
	printf("fwhandle=%08X magic=%08X bootdata=%08X reserved=%08X\n",
	    (u_int)fwhandle, (u_int)magic, (u_int)bootdata, (u_int)reserved);
#endif

	strcpy(cpu_model, "sb1250");

	if (magic == BOOTINFO_MAGIC) {
		int idx;
		int added;
		uint64_t start, len, type;

		cfe_init(bootinfo.fwhandle, bootinfo.fwentry);
		cfe_present = 1;

		idx = 0;
		physmem = 0;
		mem_cluster_cnt = 0;
		while (cfe_enummem(idx, 0, &start, &len, &type) == 0) {
			added = 0;
			printf("Memory Block #%d start %08"PRIx64"X len %08"PRIx64"X: %s: ",
			    idx, start, len, (type == CFE_MI_AVAILABLE) ?
			    "Available" : "Reserved");
			if ((type == CFE_MI_AVAILABLE) &&
			    (mem_cluster_cnt < VM_PHYSSEG_MAX)) {
				/*
				 * XXX Ignore memory above 256MB for now, it
				 * XXX needs special handling.
				 */
				if (start < (256*1024*1024)) {
				    physmem += btoc(((int) len));
				    mem_clusters[mem_cluster_cnt].start =
					(long) start;
				    mem_clusters[mem_cluster_cnt].size =
					(long) len;
				    mem_cluster_cnt++;
				    added = 1;
				}
			}
			if (added)
				printf("added to map\n");
			else
				printf("not added to map\n");
			idx++;
		}

	} else {
		/*
		 * Handle the case of not being called from the firmware.
		 */
		/* XXX hardwire to 32MB; should be kernel config option */
		physmem = 32 * 1024 * 1024 / 4096;
		mem_clusters[0].start = 0;
		mem_clusters[0].size = ctob(physmem);
		mem_cluster_cnt = 1;
	}


	for (i = 0; i < sizeof(bootinfo.boot_flags); i++) {
		switch (bootinfo.boot_flags[i]) {
		case '\0':
			break;
		case ' ':
			continue;
		case '-':
			while (bootinfo.boot_flags[i] != ' ' &&
			    bootinfo.boot_flags[i] != '\0') {
				switch (bootinfo.boot_flags[i]) {
				case 'a':
					boothowto |= RB_ASKNAME;
					break;
				case 'd':
					boothowto |= RB_KDB;
					break;
				case 's':
					boothowto |= RB_SINGLE;
					break;
				}
				i++;
			}
		}
	}

	/*
	 * Load the rest of the available pages into the VM system.
	 * The first chunk is tricky because we have to avoid the
	 * kernel, but the rest are easy.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
		VM_FREELIST_DEFAULT);

	for (i = 1; i < mem_cluster_cnt; i++) {
		first = round_page(mem_clusters[i].start);
		last = mem_clusters[i].start + mem_clusters[i].size;
		uvm_page_physload(atop(first), atop(last), atop(first),
		    atop(last), VM_FREELIST_DEFAULT);
	}

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Allocate space for proc0's USPACE
	 */
	p0 = (void *)pmap_steal_memory(USPACE, NULL, NULL);
	lwp0.l_addr = proc0paddr = (struct user *)p0;
	lwp0.l_md.md_regs = (struct frame *)((char *)p0 + USPACE) - 1;
	proc0paddr->u_pcb.pcb_context[11] =
	    MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */

	pmap_bootstrap();

	/*
	 * Initialize debuggers, and break into them, if appropriate.
	 */
#if NKSYMS || defined(DDB) || defined(LKM)
	ksyms_init(((uintptr_t)ksym_end - (uintptr_t)ksym_start),
	    ksym_start, ksym_end);
#endif

	if (boothowto & RB_KDB) {
#if defined(DDB)
		Debugger();
#endif
	}
}
Ejemplo n.º 4
0
/*
 * This is now called from local media FS's to operate against their
 * own vnodes if they fail to implement VOP_PUTPAGES.
 *
 * This is typically called indirectly via the pageout daemon and
 * clustering has already typically occurred, so in general we ask the
 * underlying filesystem to write the data out asynchronously rather
 * then delayed.
 */
int
vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
    int flags, int *rtvals)
{
	int i;
	vm_object_t object;
	vm_page_t m;
	int count;

	int maxsize, ncount;
	vm_ooffset_t poffset;
	struct uio auio;
	struct iovec aiov;
	int error;
	int ioflags;
	int ppscheck = 0;
	static struct timeval lastfail;
	static int curfail;

	object = vp->v_object;
	count = bytecount / PAGE_SIZE;

	for (i = 0; i < count; i++)
		rtvals[i] = VM_PAGER_ERROR;

	if ((int64_t)ma[0]->pindex < 0) {
		printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
		    (long)ma[0]->pindex, (u_long)ma[0]->dirty);
		rtvals[0] = VM_PAGER_BAD;
		return VM_PAGER_BAD;
	}

	maxsize = count * PAGE_SIZE;
	ncount = count;

	poffset = IDX_TO_OFF(ma[0]->pindex);

	/*
	 * If the page-aligned write is larger then the actual file we
	 * have to invalidate pages occurring beyond the file EOF.  However,
	 * there is an edge case where a file may not be page-aligned where
	 * the last page is partially invalid.  In this case the filesystem
	 * may not properly clear the dirty bits for the entire page (which
	 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
	 * With the page locked we are free to fix-up the dirty bits here.
	 *
	 * We do not under any circumstances truncate the valid bits, as
	 * this will screw up bogus page replacement.
	 */
	VM_OBJECT_WLOCK(object);
	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
		if (object->un_pager.vnp.vnp_size > poffset) {
			int pgoff;

			maxsize = object->un_pager.vnp.vnp_size - poffset;
			ncount = btoc(maxsize);
			if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
				/*
				 * If the object is locked and the following
				 * conditions hold, then the page's dirty
				 * field cannot be concurrently changed by a
				 * pmap operation.
				 */
				m = ma[ncount - 1];
				vm_page_assert_sbusied(m);
				KASSERT(!pmap_page_is_write_mapped(m),
		("vnode_pager_generic_putpages: page %p is not read-only", m));
				vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
				    pgoff);
			}
		} else {
			maxsize = 0;
			ncount = 0;
		}
		if (ncount < count) {
			for (i = ncount; i < count; i++) {
				rtvals[i] = VM_PAGER_BAD;
			}
		}
	}
	VM_OBJECT_WUNLOCK(object);

	/*
	 * pageouts are already clustered, use IO_ASYNC to force a bawrite()
	 * rather then a bdwrite() to prevent paging I/O from saturating 
	 * the buffer cache.  Dummy-up the sequential heuristic to cause
	 * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
	 * the system decides how to cluster.
	 */
	ioflags = IO_VMIO;
	if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
		ioflags |= IO_SYNC;
	else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
		ioflags |= IO_ASYNC;
	ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
	ioflags |= IO_SEQMAX << IO_SEQSHIFT;

	aiov.iov_base = (caddr_t) 0;
	aiov.iov_len = maxsize;
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	auio.uio_offset = poffset;
	auio.uio_segflg = UIO_NOCOPY;
	auio.uio_rw = UIO_WRITE;
	auio.uio_resid = maxsize;
	auio.uio_td = (struct thread *) 0;
	error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
	PCPU_INC(cnt.v_vnodeout);
	PCPU_ADD(cnt.v_vnodepgsout, ncount);

	if (error) {
		if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
			printf("vnode_pager_putpages: I/O error %d\n", error);
	}
	if (auio.uio_resid) {
		if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
			printf("vnode_pager_putpages: residual I/O %zd at %lu\n",
			    auio.uio_resid, (u_long)ma[0]->pindex);
	}
	for (i = 0; i < ncount; i++) {
		rtvals[i] = VM_PAGER_OK;
	}
	return rtvals[0];
}
Ejemplo n.º 5
0
static void
mips_init(void)
{
	int i, j;

	printf("entry: mips_init()\n");

#ifdef CFE
	/*
	 * Query DRAM memory map from CFE.
	 */
	physmem = 0;
	for (i = 0; i < 10; i += 2) {
		int result;
		uint64_t addr, len, type;

		result = cfe_enummem(i / 2, 0, &addr, &len, &type);
		if (result < 0) {
			BCM_TRACE("There is no phys memory for: %d\n", i);
			phys_avail[i] = phys_avail[i + 1] = 0;
			break;
		}
		if (type != CFE_MI_AVAILABLE) {
			BCM_TRACE("phys memory is not available: %d\n", i);
			continue;
		}

		phys_avail[i] = addr;
		if (i == 0 && addr == 0) {
			/*
			 * If this is the first physical memory segment probed
			 * from CFE, omit the region at the start of physical
			 * memory where the kernel has been loaded.
			 */
			phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
		}
		
		BCM_TRACE("phys memory is available for: %d\n", i);
		BCM_TRACE(" => addr =  %jx\n", addr);
		BCM_TRACE(" => len =  %jd\n", len);

		phys_avail[i + 1] = addr + len;
		physmem += len;
	}

	BCM_TRACE("Total phys memory is : %ld\n", physmem);
	realmem = btoc(physmem);
#endif

	for (j = 0; j < i; j++)
		dump_avail[j] = phys_avail[j];

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Ejemplo n.º 6
0
void
platform_start(__register_t a0, __register_t a1,  __register_t a2, 
    __register_t a3)
{
	struct bootinfo *bootinfop;
	vm_offset_t kernend;
	uint64_t platform_counter_freq;
	int argc = a0;
	char **argv = (char **)a1;
	char **envp = (char **)a2;
	long memsize;
#ifdef FDT
	char buf[2048];		/* early stack supposedly big enough */
	vm_offset_t dtbp = 0;
	phandle_t chosen;
	void *kmdp;
	int dtb_needs_swap = 0; /* error */
	size_t dtb_size = 0;
#ifndef FDT_DTB_STATIC_ONLY
	struct fdt_header *dtb_rom, *dtb;
	uint32_t *swapptr;
#endif
	int fdt_source = FDT_SOURCE_NONE;
#endif
	int i;

	/* clear the BSS and SBSS segments */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	mips_pcpu0_init();

	/*
	 * Over time, we've changed out boot-time binary interface for the
	 * kernel.  Miniboot simply provides a 'memsize' in a3, whereas the
	 * FreeBSD boot loader provides a 'bootinfo *' in a3.  While slightly
	 * grody, we support both here by detecting 'pointer-like' values in
	 * a3 and assuming physical memory can never be that big.
	 *
	 * XXXRW: Pull more values than memsize out of bootinfop -- e.g.,
	 * module information.
	 */
	if (a3 >= 0x9800000000000000ULL) {
		bootinfop = (void *)a3;
		memsize = bootinfop->bi_memsize;
		preload_metadata = (caddr_t)bootinfop->bi_modulep;
	} else {
		bootinfop = NULL;
		memsize = a3;
	}

	kmdp = preload_search_by_type("elf kernel");
	/*
	 * Configure more boot-time parameters passed in by loader.
	 */
	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);


#ifdef FDT
#ifndef FDT_DTB_STATIC_ONLY
	/*
	 * Find the dtb passed in by the boot loader (currently fictional).
	 *
	 * Prefer a dtb provided as a module to one from bootinfo as we may
	 * have loaded an alternative one or created a modified version.
	 */
	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
	if (dtbp == (vm_offset_t)NULL &&
	    bootinfop != NULL && bootinfop->bi_dtb != (bi_ptr_t)NULL) {
		dtbp = bootinfop->bi_dtb;
		fdt_source = FDT_SOURCE_LOADER;
	}

	/* Try to find an FDT directly in the hardware */
	if (dtbp == (vm_offset_t)NULL) {
		dtb_rom = (void*)(intptr_t)0x900000007f010000;
		if (dtb_rom->magic == FDT_MAGIC) {
			dtb_needs_swap = 0;
			dtb_size = dtb_rom->totalsize;
		} else if (dtb_rom->magic == bswap32(FDT_MAGIC)) {
			dtb_needs_swap = 1;
			dtb_size = bswap32(dtb_rom->totalsize);
		}
		if (dtb_size != 0) {
			/* Steal a bit of memory... */
			dtb = (void *)kernel_kseg0_end;
			/* Round alignment from linker script. */
			kernel_kseg0_end += roundup2(dtb_size, 64 / 8);
			memcpy(dtb, dtb_rom, dtb_size);
			if (dtb_needs_swap)
				for (swapptr = (uint32_t *)dtb;
				    swapptr < (uint32_t *)dtb + (dtb_size/sizeof(*dtb));
				    swapptr++)
					*swapptr = bswap32(*swapptr);
			dtbp = (vm_offset_t)dtb;
			fdt_source = FDT_SOURCE_ROM;
		}
	}
#endif /* !FDT_DTB_STATIC_ONLY */

#if defined(FDT_DTB_STATIC)
	/*
	 * In case the device tree blob was not retrieved (from metadata) try
	 * to use the statically embedded one.
	 */
	if (dtbp == (vm_offset_t)NULL) {
		dtbp = (vm_offset_t)&fdt_static_dtb;
		fdt_source = FDT_SOURCE_STATIC;
	}
#endif

	if (OF_install(OFW_FDT, 0) == FALSE)
		while (1);
	if (OF_init((void *)dtbp) != 0)
		while (1);

	/*
	 * Get bootargs from FDT if specified.
	 */
	chosen = OF_finddevice("/chosen");
	if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1)
		_parse_bootargs(buf);
#endif

	/*
	 * XXXRW: We have no way to compare wallclock time to cycle rate on
	 * BERI, so for now assume we run at the MALTA default (100MHz).
	 */
	platform_counter_freq = MIPS_DEFAULT_HZ;
	mips_timer_early_init(platform_counter_freq);

	cninit();
	printf("entry: platform_start()\n");

#ifdef FDT
	if (dtbp != (vm_offset_t)NULL) {
		printf("Using FDT at %p from ", (void *)dtbp);
		switch (fdt_source) {
		case FDT_SOURCE_LOADER:
			printf("loader");
			break;
		case FDT_SOURCE_ROM:
			printf("ROM");
			break;
		case FDT_SOURCE_STATIC:
			printf("kernel");
			break;
		default:
			printf("unknown source %d", fdt_source);
			break;
		}
		printf("\n");
	}
	if (dtb_size != 0 && dtb_needs_swap)
		printf("FDT was byteswapped\n");
#endif

	bootverbose = 1;
	if (bootverbose) {
		printf("cmd line: ");
		for (i = 0; i < argc; i++)
			printf("%s ", argv[i]);
		printf("\n");

		printf("envp:\n");
		for (i = 0; envp[i]; i += 2)
			printf("\t%s = %s\n", envp[i], envp[i+1]);

		if (bootinfop != NULL)
			printf("bootinfo found at %p\n", bootinfop);

		printf("memsize = %p\n", (void *)memsize);
	}

	realmem = btoc(memsize);
	mips_init();

	mips_timer_init_params(platform_counter_freq, 0);
}
Ejemplo n.º 7
0
/*
 * cpu_startup: allocate memory for variable-sized tables,
 * initialize cpu, and do autoconfiguration.
 */
cpu_startup()
{
	register unsigned i;
	register caddr_t v, firstaddr;
	int base, residual;
	vm_offset_t minaddr, maxaddr;
	vm_size_t size;
#ifdef BUFFERS_UNMANAGED
	vm_offset_t bufmemp;
	caddr_t buffermem;
	int ix;
#endif
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;

	pmapdebug = 0;
#endif

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
		pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
		    avail_end + i * NBPG, VM_PROT_ALL, TRUE);
	msgbufmapped = 1;

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	identifycpu();
	printf("real mem = %d\n", ctob(physmem));

	/*
	 * Allocate space for system data structures.
	 * The first available real memory address is in "firstaddr".
	 * The first available kernel virtual address is in "v".
	 * As pages of kernel virtual memory are allocated, "v" is incremented.
	 * As pages of memory are allocated and cleared,
	 * "firstaddr" is incremented.
	 * An index into the kernel page table corresponding to the
	 * virtual memory address maintained in "v" is kept in "mapaddr".
	 */
	/*
	 * Make two passes.  The first pass calculates how much memory is
	 * needed and allocates it.  The second pass assigns virtual
	 * addresses to the various data structures.
	 */
	firstaddr = 0;
again:
	v = (caddr_t)firstaddr;

#define	valloc(name, type, num) \
	    (name) = (type *)v; v = (caddr_t)((name)+(num))
#define	valloclim(name, type, num, lim) \
	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
	valloc(cfree, struct cblock, nclist);
	valloc(callout, struct callout, ncallout);
	valloc(swapmap, struct map, nswapmap = maxproc * 2);
#ifdef SYSVSHM
	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
	
	/*
	 * Determine how many buffers to allocate.
	 * Since HPs tend to be long on memory and short on disk speed,
	 * we allocate more buffer space than the BSD standard of
	 * use 10% of memory for the first 2 Meg, 5% of remaining.
	 * We just allocate a flat 10%.  Insure a minimum of 16 buffers.
	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
	 */
	if (bufpages == 0)
		bufpages = physmem / 10 / CLSIZE;
	if (nbuf == 0) {
		nbuf = bufpages;
		if (nbuf < 16)
			nbuf = 16;
	}
	if (nswbuf == 0) {
		nswbuf = (nbuf / 2) &~ 1;	/* force even */
		if (nswbuf > 256)
			nswbuf = 256;		/* sanity */
	}
	valloc(swbuf, struct buf, nswbuf);
	valloc(buf, struct buf, nbuf);
	/*
	 * End of first pass, size has been calculated so allocate memory
	 */
	if (firstaddr == 0) {
		size = (vm_size_t)(v - firstaddr);
		firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
		if (firstaddr == 0)
			panic("startup: no room for tables");
#ifdef BUFFERS_UNMANAGED
		buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES);
		if (buffermem == 0)
			panic("startup: no room for buffers");
#endif
		goto again;
	}
	/*
	 * End of second pass, addresses have been assigned
	 */
	if ((vm_size_t)(v - firstaddr) != size)
		panic("startup: table size inconsistency");
	/*
	 * Now allocate buffers proper.  They are different than the above
	 * in that they usually occupy more virtual memory than physical.
	 */
	size = MAXBSIZE * nbuf;
	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
				   &maxaddr, size, TRUE);
	minaddr = (vm_offset_t)buffers;
	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
			&minaddr, size, FALSE) != KERN_SUCCESS)
		panic("startup: cannot allocate buffers");
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
#ifdef BUFFERS_UNMANAGED
	bufmemp = (vm_offset_t) buffermem;
#endif
	for (i = 0; i < nbuf; i++) {
		vm_size_t curbufsize;
		vm_offset_t curbuf;

		/*
		 * First <residual> buffers get (base+1) physical pages
		 * allocated for them.  The rest get (base) physical pages.
		 *
		 * The rest of each buffer occupies virtual space,
		 * but has no physical memory allocated for it.
		 */
		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
		curbufsize = CLBYTES * (i < residual ? base+1 : base);
#ifdef BUFFERS_UNMANAGED
		/*
		 * Move the physical pages over from buffermem.
		 */
		for (ix = 0; ix < curbufsize/CLBYTES; ix++) {
			vm_offset_t pa;

			pa = pmap_extract(kernel_pmap, bufmemp);
			if (pa == 0)
				panic("startup: unmapped buffer");
			pmap_remove(kernel_pmap, bufmemp, bufmemp+CLBYTES);
			pmap_enter(kernel_pmap,
				   (vm_offset_t)(curbuf + ix * CLBYTES),
				   pa, VM_PROT_READ|VM_PROT_WRITE, TRUE);
			bufmemp += CLBYTES;
		}
#else
		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
		vm_map_simplify(buffer_map, curbuf);
#endif
	}
#ifdef BUFFERS_UNMANAGED
#if 0
	/*
	 * We would like to free the (now empty) original address range
	 * but too many bad things will happen if we try.
	 */
	kmem_free(kernel_map, (vm_offset_t)buffermem, bufpages*CLBYTES);
#endif
#endif
	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, TRUE);
	/*
	 * Allocate a submap for physio
	 */
	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
				 VM_PHYS_SIZE, TRUE);

	/*
	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
	 * we use the more space efficient malloc in place of kmem_alloc.
	 */
	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
				   M_MBUF, M_NOWAIT);
	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
			       VM_MBUF_SIZE, FALSE);
	/*
	 * Initialize callouts
	 */
	callfree = callout;
	for (i = 1; i < ncallout; i++)
		callout[i-1].c_next = &callout[i];
	callout[i-1].c_next = NULL;

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %d\n", ptoa(cnt.v_free_count));
	printf("using %d buffers containing %d bytes of memory\n",
		nbuf, bufpages * CLBYTES);
	/*
	 * Set up CPU-specific registers, cache, etc.
	 */
	initcpu();

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/*
	 * Configure the system.
	 */
	configure();
}
Ejemplo n.º 8
0
void
mach_init(void)
{
	void *kernend;
	uint32_t memsize;
	extern char edata[], end[];	/* XXX */

	/* clear the BSS segment */
	kernend = (void *)mips_round_page(end);

	memset(edata, 0, (char *)kernend - edata);

	/* setup early console */
	ingenic_putchar_init();

	/* set CPU model info for sysctl_hw */
	cpu_setmodel("Ingenic XBurst");
	mips_vector_init(NULL, false);
	cal_timer();
	uvm_setpagesize();
	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;
#ifdef KADB
	boothowto |= RB_KDB;
#endif

	/*
	 * Determine the memory size.
	 *
	 * Note: Reserve the first page!  That's where the trap
	 * vectors are located.
	 */
	memsize = 0x40000000;

	printf("Memory size: 0x%08x\n", memsize);
	physmem = btoc(memsize);

	/*
	 * memory is at 0x20000000 with first 256MB mirrored to 0x00000000 so
	 * we can see them through KSEG*
	 * assume 1GB for now, the SoC can theoretically support up to 3GB
	 */
	mem_clusters[0].start = PAGE_SIZE;
	mem_clusters[0].size = 0x10000000 - PAGE_SIZE;
	mem_clusters[1].start = 0x30000000;
	mem_clusters[1].size = 0x30000000;
	mem_cluster_cnt = 2;

	/*
	 * Load the available pages into the VM system.
	 */
	mips_page_physload(MIPS_KSEG0_START, (vaddr_t)kernend,
	    mem_clusters, mem_cluster_cnt, NULL, 0);

	/*
	 * Initialize message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Initialize the virtual memory system.
	 */
	pmap_bootstrap();

	/*
	 * Allocate uarea page for lwp0 and set it.
	 */
	mips_init_lwp0_uarea();

#ifdef MULTIPROCESSOR
	mutex_init(&ingenic_ipi_lock, MUTEX_DEFAULT, IPL_HIGH);
	mips_locoresw.lsw_send_ipi = ingenic_send_ipi;
	mips_locoresw.lsw_cpu_init = ingenic_cpu_init;
#endif

	apbus_init();
	/*
	 * Initialize debuggers, and break into them, if appropriate.
	 */
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
}
Ejemplo n.º 9
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	int argc, i;
	char **argv, **envp;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2) {
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
		}
	}

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/*
	 * Allow build-time override in case Redboot lies
	 * or in other situations (eg where there's u-boot)
	 * where there isn't (yet) a convienent method of
	 * being told how much RAM is available.
	 *
	 * This happens on at least the Ubiquiti LS-SR71A
	 * board, where redboot says there's 16mb of RAM
	 * but in fact there's 32mb.
	 */
#if	defined(AR71XX_REALMEM)
		realmem = btoc(AR71XX_REALMEM);
#endif

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1] - phys_avail[0];

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();

	/* Detect the system type - this is needed for subsequent chipset-specific calls */
	ar71xx_detect_sys_type();
	ar71xx_detect_sys_frequency();

	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("CPU platform: %s\n", ar71xx_get_system_type());
	printf("CPU Frequency=%d MHz\n", u_ar71xx_cpu_freq / 1000000);
	printf("CPU DDR Frequency=%d MHz\n", u_ar71xx_ddr_freq / 1000000);
	printf("CPU AHB Frequency=%d MHz\n", u_ar71xx_ahb_freq / 1000000); 

	printf("platform frequency: %lld\n", platform_counter_freq);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	/* Redboot if_arge MAC address is in the environment */
	ar71xx_redboot_get_macaddr();

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	ar71xx_init_usb_peripheral();

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Ejemplo n.º 10
0
void
cpu_startup()
{
	caddr_t		v;
	int		sz;
	vaddr_t		minaddr, maxaddr;
	extern unsigned int avail_end;
	extern char	cpu_model[];

	/*
	 * Initialize error message buffer.
	 */
	initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));

	/*
	 * Good {morning,afternoon,evening,night}.
	 * Also call CPU init on systems that need that.
	 */
	printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata);
        if (dep_call->cpu_conf)
                (*dep_call->cpu_conf)();

	printf("real mem = %u (%uMB)\n", avail_end,
	    avail_end/1024/1024);
	physmem = btoc(avail_end);
	mtpr(AST_NO, PR_ASTLVL);
	spl0();

	/*
	 * Find out how much space we need, allocate it, and then give
	 * everything true virtual addresses.
	 */

	sz = (int) allocsys((caddr_t)0);
	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (((unsigned long)allocsys(v) - (unsigned long)v) != sz)
		panic("startup: table size inconsistency");

	/*
	 * Determine how many buffers to allocate.
	 * We allocate bufcachepercent% of memory for buffer space.
	 */
	if (bufpages == 0)
		bufpages = physmem * bufcachepercent / 100;

	/* Restrict to at most 25% filled kvm */
	if (bufpages >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) 
		bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    PAGE_SIZE / 4;

	/*
	 * Allocate a submap for exec arguments.  This map effectively limits
	 * the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

#if VAX46 || VAX48 || VAX49 || VAX53
	/*
	 * Allocate a submap for physio.  This map effectively limits the
	 * number of processes doing physio at any one time.
	 *
	 * Note that machines on which all mass storage I/O controllers 
	 * can perform address translation, do not need this.
	 */
	if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 ||
	    vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303)
		phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
		    VM_PHYS_SIZE, 0, FALSE, NULL);
#endif

	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */

	bufinit();
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/*
	 * Configure the system.
	 */
	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support -c; continuing..\n");
#endif
	}
}
Ejemplo n.º 11
0
static void
mips_init(void)
{
	int i, j, cfe_mem_idx, tmp;
	uint64_t maxmem;

#ifdef CFE_ENV
	cfe_env_init();
#endif

	TUNABLE_INT_FETCH("boothowto", &boothowto);

	if (boothowto & RB_VERBOSE)
		bootverbose++;

#ifdef MAXMEM
	tmp = MAXMEM;
#else
	tmp = 0;
#endif
	TUNABLE_INT_FETCH("hw.physmem", &tmp);
	maxmem = (uint64_t)tmp * 1024;

	/*
	 * XXX
	 * If we used vm_paddr_t consistently in pmap, etc., we could
	 * use 64-bit page numbers on !n64 systems, too, like i386
	 * does with PAE.
	 */
#if !defined(__mips_n64)
	if (maxmem == 0 || maxmem > 0xffffffff)
		maxmem = 0xffffffff;
#endif

#ifdef CFE
	/*
	 * Query DRAM memory map from CFE.
	 */
	physmem = 0;
	cfe_mem_idx = 0;
	for (i = 0; i < 10; i += 2) {
		int result;
		uint64_t addr, len, type;

		result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type);
		if (result < 0) {
			phys_avail[i] = phys_avail[i + 1] = 0;
			break;
		}

		KASSERT(type == CFE_MI_AVAILABLE,
			("CFE DRAM region is not available?"));

		if (bootverbose)
			printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len);

		if (maxmem != 0) {
			if (addr >= maxmem) {
				printf("Ignoring %ju bytes of memory at 0x%jx "
				       "that is above maxmem %dMB\n",
				       len, addr,
				       (int)(maxmem / (1024 * 1024)));
				continue;
			}

			if (addr + len > maxmem) {
				printf("Ignoring %ju bytes of memory "
				       "that is above maxmem %dMB\n",
				       (addr + len) - maxmem,
				       (int)(maxmem / (1024 * 1024)));
				len = maxmem - addr;
			}
		}

		phys_avail[i] = addr;
		if (i == 0 && addr == 0) {
			/*
			 * If this is the first physical memory segment probed
			 * from CFE, omit the region at the start of physical
			 * memory where the kernel has been loaded.
			 */
			phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
		}
		phys_avail[i + 1] = addr + len;
		physmem += len;
	}

	realmem = btoc(physmem);
#endif

	for (j = 0; j < i; j++)
		dump_avail[j] = phys_avail[j];

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();

	/*
	 * Sibyte has a L1 data cache coherent with DMA. This includes
	 * on-chip network interfaces as well as PCI/HyperTransport bus
	 * masters.
	 */
	cpuinfo.cache_coherent_dma = TRUE;

	/*
	 * XXX
	 * The kernel is running in 32-bit mode but the CFE is running in
	 * 64-bit mode. So the SR_KX bit in the status register is turned
	 * on by the CFE every time we call into it - for e.g. CFE_CONSOLE.
	 *
	 * This means that if get a TLB miss for any address above 0xc0000000
	 * and the SR_KX bit is set then we will end up in the XTLB exception
	 * vector.
	 *
	 * For now work around this by copying the TLB exception handling
	 * code to the XTLB exception vector.
	 */
	{
		bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC,
		      MipsTLBMissEnd - MipsTLBMiss);

		mips_icache_sync_all();
		mips_dcache_wbinv_all();
	}

	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Ejemplo n.º 12
0
void
platform_start(__register_t a0, __register_t a1,  __register_t a2, 
    __register_t a3)
{
	struct bootinfo *bootinfop;
	vm_offset_t kernend;
	uint64_t platform_counter_freq;
	int argc = a0;
	char **argv = (char **)a1;
	char **envp = (char **)a2;
	long memsize;
#ifdef FDT
	char buf[2048];		/* early stack supposedly big enough */
	vm_offset_t dtbp;
	phandle_t chosen;
	void *kmdp;
#endif
	int i;

	/* clear the BSS and SBSS segments */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	mips_pcpu0_init();

	/*
	 * Over time, we've changed out boot-time binary interface for the
	 * kernel.  Miniboot simply provides a 'memsize' in a3, whereas the
	 * FreeBSD boot loader provides a 'bootinfo *' in a3.  While slightly
	 * grody, we support both here by detecting 'pointer-like' values in
	 * a3 and assuming physical memory can never be that back.
	 *
	 * XXXRW: Pull more values than memsize out of bootinfop -- e.g.,
	 * module information.
	 */
	if (a3 >= 0x9800000000000000ULL) {
		bootinfop = (void *)a3;
		memsize = bootinfop->bi_memsize;
		preload_metadata = (caddr_t)bootinfop->bi_modulep;
	} else {
		bootinfop = NULL;
		memsize = a3;
	}

#ifdef FDT
	/*
	 * Find the dtb passed in by the boot loader (currently fictional).
	 */
	kmdp = preload_search_by_type("elf kernel");
	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);

#if defined(FDT_DTB_STATIC)
	/*
	 * In case the device tree blob was not retrieved (from metadata) try
	 * to use the statically embedded one.
	 */
	if (dtbp == (vm_offset_t)NULL)
		dtbp = (vm_offset_t)&fdt_static_dtb;
#else
#error	"Non-static FDT not yet supported on BERI"
#endif

	if (OF_install(OFW_FDT, 0) == FALSE)
		while (1);
	if (OF_init((void *)dtbp) != 0)
		while (1);

	/*
	 * Configure more boot-time parameters passed in by loader.
	 */
	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
	init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0);

	/*
	 * Get bootargs from FDT if specified.
	 */
	chosen = OF_finddevice("/chosen");
	if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1)
		_parse_bootargs(buf);
#endif

	/*
	 * XXXRW: We have no way to compare wallclock time to cycle rate on
	 * BERI, so for now assume we run at the MALTA default (100MHz).
	 */
	platform_counter_freq = MIPS_DEFAULT_HZ;
	mips_timer_early_init(platform_counter_freq);

	cninit();
	printf("entry: platform_start()\n");

	bootverbose = 1;
	if (bootverbose) {
		printf("cmd line: ");
		for (i = 0; i < argc; i++)
			printf("%s ", argv[i]);
		printf("\n");

		printf("envp:\n");
		for (i = 0; envp[i]; i += 2)
			printf("\t%s = %s\n", envp[i], envp[i+1]);

		if (bootinfop != NULL)
			printf("bootinfo found at %p\n", bootinfop);

		printf("memsize = %p\n", (void *)memsize);
	}

	realmem = btoc(memsize);
	mips_init();

	mips_timer_init_params(platform_counter_freq, 0);
}
void
platform_start(__register_t a0, __register_t a1,  __register_t a2, 
    __register_t a3)
{
	vm_offset_t kernend;
	uint64_t platform_counter_freq;
	int argc = a0;
	char **argv = (char **)a1;
	char **envp = (char **)a2;
	unsigned int memsize = a3;
	int i;

	/* clear the BSS and SBSS segments */
	kernend = round_page((vm_offset_t)&end);
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	cninit();
	printf("entry: platform_start()\n");

	bootverbose = 1;
	if (bootverbose) {
		printf("cmd line: ");
		for (i = 0; i < argc; i++)
			printf("%s ", argv[i]);
		printf("\n");

		printf("envp:\n");
		for (i = 0; envp[i]; i += 2)
			printf("\t%s = %s\n", envp[i], envp[i+1]);

		printf("memsize = %08x\n", memsize);
	}

	realmem = btoc(memsize);
	mips_init();

	do {
#if defined(TICK_USE_YAMON_FREQ)
		/*
		 * If we are running on a board which uses YAMON firmware,
		 * then query CPU pipeline clock from the syscon object.
		 * If unsuccessful, use hard-coded default.
		 */
		platform_counter_freq = yamon_getcpufreq();
		if (platform_counter_freq == 0)
			platform_counter_freq = MIPS_DEFAULT_HZ;

#elif defined(TICK_USE_MALTA_RTC)
		/*
		 * If we are running on a board with the MC146818 RTC,
		 * use it to determine CPU pipeline clock frequency.
		 */
		u_int64_t counterval[2];

		/* Set RTC to binary mode. */
		writertc(RTC_STATUSB, (rtcin(RTC_STATUSB) | RTCSB_BCD));

		/* Busy-wait for falling edge of RTC update. */
		while (((rtcin(RTC_STATUSA) & RTCSA_TUP) == 0))
			;
		while (((rtcin(RTC_STATUSA)& RTCSA_TUP) != 0))
			;
		counterval[0] = mips_rd_count();

		/* Busy-wait for falling edge of RTC update. */
		while (((rtcin(RTC_STATUSA) & RTCSA_TUP) == 0))
			;
		while (((rtcin(RTC_STATUSA)& RTCSA_TUP) != 0))
			;
		counterval[1] = mips_rd_count();

		platform_counter_freq = counterval[1] - counterval[0];
#endif
	} while(0);

	mips_timer_init_params(platform_counter_freq, 0);
}
Ejemplo n.º 14
0
/*
 * Vnode op for VM putpages.
 * possible bug: all IO done in sync mode
 * Note that vop_close always invalidate pages before close, so it's
 * not necessary to open vnode.
 *
 * smbfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync,
 *		  int *a_rtvals, vm_ooffset_t a_offset)
 */
int
smbfs_putpages(struct vop_putpages_args *ap)
{
	int error;
	struct vnode *vp = ap->a_vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;

#ifdef SMBFS_RWGENERIC
	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;
	VOP_OPEN(vp, FWRITE, cred, NULL);
	error = vop_stdputpages(ap);
	VOP_CLOSE(vp, FWRITE, cred);
	return error;
#else
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	int i, npages, count;
	int doclose;
	int *rtvals;
	struct smbmount *smp;
	struct smbnode *np;
	struct smb_cred scred;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;
/*	VOP_OPEN(vp, FWRITE, cred, NULL);*/
	np = VTOSMB(vp);
	smp = VFSTOSMBFS(vp->v_mount);
	pages = ap->a_m;
	count = ap->a_count;
	rtvals = ap->a_rtvals;
	npages = btoc(count);

	for (i = 0; i < npages; i++) {
		rtvals[i] = VM_PAGER_AGAIN;
	}

	bp = getpbuf_kva(&smbfs_pbuf_freecnt);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_td = td;
	SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);

	smb_makescred(&scred, td, cred);

	/*
	 * This is kinda nasty.  Since smbfs is physically closing the
	 * fid on close(), we have to reopen it if necessary.  There are
	 * other races here too, such as if another process opens the same
	 * file while we are blocked in read, or the file is open read-only
	 * XXX
	 */
	error = 0;
	doclose = 0;
	if (np->n_opencount == 0) {
		error = smbfs_smb_open(np, SMB_AM_OPENRW, &scred);
		if (error == 0)
			doclose = 1;
	}
	if (error == 0)
		error = smb_write(smp->sm_share, np->n_fid, &uio, &scred);
	if (doclose)
		smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred);
/*	VOP_CLOSE(vp, FWRITE, cred);*/
	SMBVDEBUG("paged write done: %d\n", error);

	pmap_qremove(kva, npages);
	relpbuf(bp, &smbfs_pbuf_freecnt);

	if (!error) {
		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
		for (i = 0; i < nwritten; i++) {
			rtvals[i] = VM_PAGER_OK;
			vm_page_undirty(pages[i]);
		}
	}
	return rtvals[0];
#endif /* SMBFS_RWGENERIC */
}
Ejemplo n.º 15
0
Archivo: trap.c Proyecto: MarginC/kame
void
m88100_trap(unsigned type, struct trapframe *frame)
{
	struct proc *p;
	u_quad_t sticks = 0;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type, pbus_type;
	u_long fault_code;
	unsigned nss, fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
	int s;
#endif
	int sig = 0;

	extern struct vm_map *kernel_map;
	extern caddr_t guarded_access_start;
	extern caddr_t guarded_access_end;
	extern caddr_t guarded_access_bad;

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	if (USERMODE(frame->tf_epsr)) {
		sticks = p->p_sticks;
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}
	fault_type = 0;
	fault_code = 0;
	fault_addr = frame->tf_sxip & XIP_ADDR;

	switch (type) {
	default:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

#if defined(DDB)
	case T_KDB_BREAK:
		s = splhigh();
		db_enable_interrupt();
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		db_disable_interrupt();
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		db_enable_interrupt();
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		db_disable_interrupt();
		splx(s);
		return;
#endif /* DDB */
	case T_ILLFLT:
		printf("Unimplemented opcode!\n");
		panictrap(frame->tf_vector, frame);
		break;
	case T_INT:
	case T_INT+T_USER:
		/* This function pointer is set in machdep.c
		   It calls m188_ext_int or sbc_ext_int depending
		   on the value of brdtyp - smurph */
		(*md.interrupt_func)(T_INT, frame);
		return;

	case T_MISALGNFLT:
		printf("kernel misaligned access exception @ 0x%08x\n",
		    frame->tf_sxip);
		panictrap(frame->tf_vector, frame);
		break;

	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif
		panictrap(frame->tf_vector, frame);
		break;

	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
			type = T_DATAFLT + T_USER;
			goto user_fault;
		}

		fault_addr = frame->tf_dma0;
		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);
		if (va == 0) {
			panic("trap: bad kernel access at %x", fault_addr);
		}

		vm = p->p_vmspace;
		map = kernel_map;

		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif

		switch (pbus_type) {
		case CMMU_PFSR_BERROR:
			/*
		 	 * If it is a guarded access, bus error is OK.
		 	 */
			if ((frame->tf_sxip & XIP_ADDR) >=
			      (unsigned)&guarded_access_start &&
			    (frame->tf_sxip & XIP_ADDR) <=
			      (unsigned)&guarded_access_end) {
				frame->tf_snip =
				  ((unsigned)&guarded_access_bad    ) | NIP_V;
				frame->tf_sfip =
				  ((unsigned)&guarded_access_bad + 4) | FIP_V;
				frame->tf_sxip = 0;
				/* We sort of resolved the fault ourselves
				 * because we know where it came from
				 * [guarded_access()]. But we must still think
				 * about the other possible transactions in
				 * dmt1 & dmt2.  Mark dmt0 so that
				 * data_access_emulation skips it.  XXX smurph
				 */
				frame->tf_dmt0 |= DMT_SKIP;
				data_access_emulation((unsigned *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
				return;
			}
			break;
		case CMMU_PFSR_SUCCESS:
			/*
			 * The fault was resolved. Call data_access_emulation
			 * to drain the data unit pipe line and reset dmt0
			 * so that trap won't get called again.
			 */
			data_access_emulation((unsigned *)frame);
			frame->tf_dpfsr = 0;
			frame->tf_dmt0 = 0;
			return;
		case CMMU_PFSR_SFAULT:
		case CMMU_PFSR_PFAULT:
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			if (result == 0) {
				/*
				 * We could resolve the fault. Call
				 * data_access_emulation to drain the data
				 * unit pipe line and reset dmt0 so that trap
				 * won't get called again.
				 */
				data_access_emulation((unsigned *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
				return;
			}
			break;
		}
#ifdef TRAPDEBUG
		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
		    pbus_exception_type[pbus_type], va);
#endif
		panictrap(frame->tf_vector, frame);
		/* NOTREACHED */
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
user_fault:
		if (type == T_INSTFLT + T_USER) {
			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
#ifdef TRAPDEBUG
			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		} else {
			fault_addr = frame->tf_dma0;
			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %d\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		}

		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
			ftype = VM_PROT_READ | VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);

		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/* Call uvm_fault() to resolve non-bus error faults */
		switch (pbus_type) {
		case CMMU_PFSR_SUCCESS:
			result = 0;
			break;
		case CMMU_PFSR_BERROR:
			result = EACCES;
			break;
		default:
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			if (result == EACCES)
				result = EFAULT;
			break;
		}

		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0) {
				nss = btoc(USRSTACK - va);/* XXX check this */
				if (nss > vm->vm_ssize)
					vm->vm_ssize = nss;
			}
		}

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_snip = pcb_onfault | NIP_V;
			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
			frame->tf_sxip = 0;
			/*
			 * Continue as if the fault had been resolved, but
			 * do not try to complete the faulting access.
			 */
			frame->tf_dmt0 |= DMT_SKIP;
			result = 0;
		}

		if (result == 0) {
			if (type == T_DATAFLT+T_USER) {
				/*
			 	 * We could resolve the fault. Call
			 	 * data_access_emulation to drain the data unit
			 	 * pipe line and reset dmt0 so that trap won't
			 	 * get called again.
			 	 */
				data_access_emulation((unsigned *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
			} else {
				/*
				 * back up SXIP, SNIP,
				 * clearing the Error bit
				 */
				frame->tf_sfip = frame->tf_snip & ~FIP_E;
				frame->tf_snip = frame->tf_sxip & ~NIP_E;
				frame->tf_ipfsr = 0;
			}
		} else {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		break;
	case T_PRIVINFLT+T_USER:
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		break;
	case T_FPEPFLT+T_USER:
	case T_FPEIFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_SIGTRAP+T_USER:
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			unsigned va;
			unsigned instr;
			unsigned pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(unsigned));
#if 0
			printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
			       p->p_comm, p->p_pid, instr, pc,
			       p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
#endif
			/* check and see if we got here by accident */
			if ((p->p_md.md_ss_addr != pc &&
			     p->p_md.md_ss_taken_addr != pc) ||
			    instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}
			/* restore original instruction and clear BP  */
			va = p->p_md.md_ss_addr;
			if (va != 0) {
				instr = p->p_md.md_ss_instr;
				ss_put_value(p, va, instr, sizeof(instr));
			}

			/* branch taken instruction */
			instr = p->p_md.md_ss_taken_instr;
			if (instr != 0) {
				va = p->p_md.md_ss_taken_addr;
				ss_put_value(p, va, instr, sizeof(instr));
			}
#if 1
			frame->tf_sfip = frame->tf_snip;
			frame->tf_snip = pc | NIP_V;
#endif
			p->p_md.md_ss_addr = 0;
			p->p_md.md_ss_instr = 0;
			p->p_md.md_ss_taken_addr = 0;
			p->p_md.md_ss_taken_instr = 0;
			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;

	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		frame->tf_sfip = frame->tf_snip;
		frame->tf_snip = frame->tf_sxip;
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;

	case T_ASTFLT+T_USER:
		uvmexp.softs++;
		want_ast = 0;
		if (p->p_flag & P_OWEUPC) {
			p->p_flag &= ~P_OWEUPC;
			ADDUPROF(p);
		}
		break;
	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
		sv.sival_int = fault_addr;
		trapsignal(p, sig, fault_code, fault_type, sv);
		/*
		 * don't want multiple faults - we are going to
		 * deliver signal.
		 */
		frame->tf_dmt0 = 0;
		frame->tf_ipfsr = frame->tf_dpfsr = 0;
	}

	userret(p, frame, sticks);
}
Ejemplo n.º 16
0
void
platform_start(__register_t a0, __register_t a1,
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	vm_offset_t kernend;
	int argc = a0;
	char **argv = (char **)a1;
	int i, mem;


	/* clear the BSS and SBSS segments */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Looking for mem=XXM argument
	 */
	mem = 0; /* Just something to start with */
	for (i=0; i < argc; i++) {
		if (strncmp(argv[i], "mem=", 4) == 0) {
			mem = strtol(argv[i] + 4, NULL, 0);
			break;
		}
	}

	bootverbose = 1;
	if (mem > 0)
		realmem = btoc(mem << 20);
	else
		realmem = btoc(32 << 20);

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1];

	physmem = realmem;

	/* 
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();
	/* TODO: parse argc,argv */
	platform_counter_freq = 330000000UL;
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	/* Panic here, after cninit */ 
	if (mem == 0)
		panic("No mem=XX parameter in arguments");

	printf("cmd line: ");
	for (i=0; i < argc; i++)
		printf("%s ", argv[i]);
	printf("\n");

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Ejemplo n.º 17
0
Archivo: trap.c Proyecto: MarginC/kame
void
m88110_trap(unsigned type, struct trapframe *frame)
{
	struct proc *p;
	u_quad_t sticks = 0;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type;
	u_long fault_code;
	unsigned nss, fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
        int s;
#endif
	int sig = 0;
	pt_entry_t *pte;

	extern struct vm_map *kernel_map;
	extern unsigned guarded_access_start;
	extern unsigned guarded_access_end;
	extern unsigned guarded_access_bad;
	extern pt_entry_t *pmap_pte(pmap_t, vaddr_t);

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	if (USERMODE(frame->tf_epsr)) {
		sticks = p->p_sticks;
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}
	fault_type = 0;
	fault_code = 0;
	fault_addr = frame->tf_exip & XIP_ADDR;

	switch (type) {
	default:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_197_READ+T_USER:
	case T_197_READ:
		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/
	case T_197_WRITE+T_USER:
	case T_197_WRITE:
		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/
	case T_197_INST+T_USER:
	case T_197_INST:
		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/
#ifdef DDB
	case T_KDB_TRACE:
		s = splhigh();
		db_enable_interrupt();
		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
		db_disable_interrupt();
		splx(s);
		return;
	case T_KDB_BREAK:
		s = splhigh();
		db_enable_interrupt();
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		db_disable_interrupt();
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		db_enable_interrupt();
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		db_disable_interrupt();
		/* skip one instruction */
		if (frame->tf_exip & 1)
			frame->tf_exip = frame->tf_enip;
		else
			frame->tf_exip += 4;
		splx(s);
		return;
#if 0
	case T_ILLFLT:
		s = splhigh();
		db_enable_interrupt();
		ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
		       "error fault", (db_regs_t*)frame);
		db_disable_interrupt();
		splx(s);
		return;
#endif /* 0 */
#endif /* DDB */
	case T_ILLFLT:
		printf("Unimplemented opcode!\n");
		panictrap(frame->tf_vector, frame);
		break;
	case T_NON_MASK:
	case T_NON_MASK+T_USER:
		(*md.interrupt_func)(T_NON_MASK, frame);
		return;
	case T_INT:
	case T_INT+T_USER:
		(*md.interrupt_func)(T_INT, frame);
		return;
	case T_MISALGNFLT:
		printf("kernel mode misaligned access exception @ 0x%08x\n",
		    frame->tf_exip);
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
			type = T_DATAFLT + T_USER;
			goto m88110_user_fault;
		}

#ifdef TRAPDEBUG
		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif

		fault_addr = frame->tf_dlar;
		if (frame->tf_dsr & CMMU_DSR_RW) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		} else {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		}

		va = trunc_page((vaddr_t)fault_addr);
		if (va == 0) {
			panic("trap: bad kernel access at %x", fault_addr);
		}

		vm = p->p_vmspace;
		map = kernel_map;

		if (frame->tf_dsr & CMMU_DSR_BE) {
			/*
			 * If it is a guarded access, bus error is OK.
			 */
			if ((frame->tf_exip & XIP_ADDR) >=
			      (unsigned)&guarded_access_start &&
			    (frame->tf_exip & XIP_ADDR) <=
			      (unsigned)&guarded_access_end) {
				frame->tf_exip = (unsigned)&guarded_access_bad;
				return;
			}
		}
		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
			frame->tf_dsr &= ~CMMU_DSR_WE;	/* undefined */
			/*
			 * On a segment or a page fault, call uvm_fault() to
			 * resolve the fault.
			 */
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			if (result == 0)
				return;
		}
		if (frame->tf_dsr & CMMU_DSR_WE) {	/* write fault  */
			/*
			 * This could be a write protection fault or an
			 * exception to set the used and modified bits
			 * in the pte. Basically, if we got a write error,
			 * then we already have a pte entry that faulted
			 * in from a previous seg fault or page fault.
			 * Get the pte and check the status of the
			 * modified and valid bits to determine if this
			 * indeed a real write fault.  XXX smurph
			 */
			pte = pmap_pte(map->pmap, va);
#ifdef DEBUG
			if (pte == PT_ENTRY_NULL)
				panic("NULL pte on write fault??");
#endif
			if (!(*pte & PG_M) && !(*pte & PG_RO)) {
				/* Set modified bit and try the write again. */
#ifdef TRAPDEBUG
				printf("Corrected kernel write fault, map %x pte %x\n",
				    map->pmap, *pte);
#endif
				*pte |= PG_M;
				return;
#if 1	/* shouldn't happen */
			} else {
				/* must be a real wp fault */
#ifdef TRAPDEBUG
				printf("Uncorrected kernel write fault, map %x pte %x\n",
				    map->pmap, *pte);
#endif
				if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
					p->p_addr->u_pcb.pcb_onfault = 0;
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
				if (result == 0)
					return;
#endif
			}
		}
		panictrap(frame->tf_vector, frame);
		/* NOTREACHED */
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
m88110_user_fault:
		if (type == T_INSTFLT+T_USER) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
#ifdef TRAPDEBUG
			printf("User Instruction fault exip %x isr %x ilar %x\n",
			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		} else {
			fault_addr = frame->tf_dlar;
			if (frame->tf_dsr & CMMU_DSR_RW) {
				ftype = VM_PROT_READ;
				fault_code = VM_PROT_READ;
			} else {
				ftype = VM_PROT_READ|VM_PROT_WRITE;
				fault_code = VM_PROT_WRITE;
			}
#ifdef TRAPDEBUG
			printf("User Data access fault exip %x dsr %x dlar %x\n",
			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif
		}

		va = trunc_page((vaddr_t)fault_addr);

		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/*
		 * Call uvm_fault() to resolve non-bus error faults
		 * whenever possible.
		 */
		if (type == T_DATAFLT+T_USER) {
			/* data faults */
			if (frame->tf_dsr & CMMU_DSR_BE) {
				/* bus error */
				result = EACCES;
			} else
			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
				if (result == EACCES)
					result = EFAULT;
			} else
			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
				/* copyback or write allocate error */
				result = EACCES;
			} else
			if (frame->tf_dsr & CMMU_DSR_WE) {
				/* write fault  */
				/* This could be a write protection fault or an
				 * exception to set the used and modified bits
				 * in the pte. Basically, if we got a write
				 * error, then we already have a pte entry that
				 * faulted in from a previous seg fault or page
				 * fault.
				 * Get the pte and check the status of the
				 * modified and valid bits to determine if this
				 * indeed a real write fault.  XXX smurph
				 */
				pte = pmap_pte(vm_map_pmap(map), va);
#ifdef DEBUG
				if (pte == PT_ENTRY_NULL)
					panic("NULL pte on write fault??");
#endif
				if (!(*pte & PG_M) && !(*pte & PG_RO)) {
					/*
					 * Set modified bit and try the
					 * write again.
					 */
#ifdef TRAPDEBUG
					printf("Corrected userland write fault, map %x pte %x\n",
					    map->pmap, *pte);
#endif
					*pte |= PG_M;
					/*
					 * invalidate ATCs to force
					 * table search
					 */
					set_dcmd(CMMU_DCMD_INV_UATC);
					return;
				} else {
					/* must be a real wp fault */
#ifdef TRAPDEBUG
					printf("Uncorrected userland write fault, map %x pte %x\n",
					    map->pmap, *pte);
#endif
					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
					p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
					if (result == EACCES)
						result = EFAULT;
				}
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Data access fault dsr %x\n",
				    frame->tf_dsr);
#endif
				panictrap(frame->tf_vector, frame);
			}
		} else {
			/* instruction faults */
			if (frame->tf_isr &
			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
				/* bus error, supervisor protection */
				result = EACCES;
			} else
			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
				if (result == EACCES)
					result = EFAULT;
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Instruction fault isr %x\n",
				    frame->tf_isr);
#endif
				panictrap(frame->tf_vector, frame);
			}
		}

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0) {
				nss = btoc(USRSTACK - va);/* XXX check this */
				if (nss > vm->vm_ssize)
					vm->vm_ssize = nss;
			}
		}

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_exip = pcb_onfault;
			/*
			 * Continue as if the fault had been resolved.
			 */
			result = 0;
		}

		if (result != 0) {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		break;
	case T_PRIVINFLT+T_USER:
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
	case T_KDB_TRACE:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		break;
	case T_FPEPFLT+T_USER:
	case T_FPEIFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_SIGTRAP+T_USER:
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			unsigned instr;
			unsigned pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(unsigned));
#if 0
			printf("trap: %s (%d) breakpoint %x at %x: (adr %x ins %x)\n",
			       p->p_comm, p->p_pid, instr, pc,
			       p->p_md.md_ss_addr, p->p_md.md_ss_instr); /* XXX */
#endif
			/* check and see if we got here by accident */
#ifdef notyet
			if (p->p_md.md_ss_addr != pc || instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}
#endif
			/* restore original instruction and clear BP  */
			instr = p->p_md.md_ss_instr;
			if (instr != 0)
				ss_put_value(p, pc, instr, sizeof(instr));

			p->p_md.md_ss_addr = 0;
			p->p_md.md_ss_instr = 0;
			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;
	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;

	case T_ASTFLT+T_USER:
		uvmexp.softs++;
		want_ast = 0;
		if (p->p_flag & P_OWEUPC) {
			p->p_flag &= ~P_OWEUPC;
			ADDUPROF(p);
		}
		break;
	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
		sv.sival_int = fault_addr;
		trapsignal(p, sig, fault_code, fault_type, sv);
	}

	userret(p, frame, sticks);
}
Ejemplo n.º 18
0
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_reqpage, vm_ooffset_t a_offset)
 */
int
nwfs_getpages(struct vop_getpages_args *ap)
{
#ifndef NWFS_RWCACHE
	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
					    ap->a_reqpage, ap->a_seqaccess);
#else
	int i, error, npages;
	size_t nextoff, toff;
	size_t count;
	size_t size;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	vp = ap->a_vp;
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("nwfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* NWFS_RWCACHE */
}
Ejemplo n.º 19
0
void
mach_init(void)
{
	void *kernend;
	uint32_t memsize;

	extern char edata[], end[];	/* XXX */

	/* clear the BSS segment */
	kernend = (void *)mips_round_page(end);

	memset(edata, 0, (char *)kernend - edata);

	/* setup early console */
	atheros_set_platformsw();

	/* set CPU model info for sysctl_hw */
	snprintf(cpu_model, 64, "Atheros %s", atheros_get_cpuname());

	/*
	 * Set up the exception vectors and CPU-specific function
	 * vectors early on.  We need the wbflush() vector set up
	 * before comcnattach() is called (or at least before the
	 * first printf() after that is called).
	 * Sets up mips_cpu_flags that may be queried by other
	 * functions called during startup.
	 * Also clears the I+D caches.
	 */
	mips_vector_init(NULL, false);

	/*
	 * Calibrate timers.
	 */
	cal_timer();

	/*
	 * Set the VM page size.
	 */
	uvm_setpagesize();

	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;
#ifdef KADB
	boothowto |= RB_KDB;
#endif

	/*
	 * This would be a good place to parse a boot command line, if
	 * we got one from the bootloader.  Right now we have no way to
	 * get one from e.g. vxworks.
	 */

	/*
	 * Determine the memory size.
	 *
	 * Note: Reserve the first page!  That's where the trap
	 * vectors are located.
	 */
	memsize = atheros_get_memsize();

	printf("Memory size: 0x%08x\n", memsize);
	physmem = btoc(memsize);

	mem_clusters[mem_cluster_cnt].start = PAGE_SIZE;
	mem_clusters[mem_cluster_cnt].size =
	    memsize - mem_clusters[mem_cluster_cnt].start;
	mem_cluster_cnt++;

	/*
	 * Load the available pages into the VM system.
	 */
	mips_page_physload(MIPS_KSEG0_START, (vaddr_t)kernend,
	    mem_clusters, mem_cluster_cnt, NULL, 0);

	/*
	 * Initialize message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Initialize the virtual memory system.
	 */
	pmap_bootstrap();

	/*
	 * Allocate uarea page for lwp0 and set it.
	 */
	mips_init_lwp0_uarea();

	/*
	 * Initialize busses.
	 */
	atheros_bus_init();

	/*
	 * Turn off (ignore) the hardware watchdog.  If we got this
	 * far, then we shouldn't need it anymore.
	 */
	atheros_wdog_reload(0);

	/*
	 * Turn off watchpoint that may have been enabled by the
	 * PROM.  VxWorks bootloader seems to leave one set.
	 */ 
	__asm volatile (
		"mtc0	$0, $%0\n\t"
		"nop\n\t"
		"nop\n\t" :: "n"(MIPS_COP_0_WATCH_LO));

	/*
	 * Initialize debuggers, and break into them, if appropriate.
	 */
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
}
Ejemplo n.º 20
0
/*
 * Vnode op for VM putpages.
 * possible bug: all IO done in sync mode
 * Note that vop_close always invalidate pages before close, so it's
 * not necessary to open vnode.
 *
 * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_sync, int *a_rtvals, vm_ooffset_t a_offset)
 */
int
nwfs_putpages(struct vop_putpages_args *ap)
{
	int error;
	struct thread *td = curthread;	/* XXX */
	struct vnode *vp = ap->a_vp;
	struct ucred *cred;

#ifndef NWFS_RWCACHE
	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */
	VOP_OPEN(vp, FWRITE, cred, NULL);
	error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
		ap->a_sync, ap->a_rtvals);
	VOP_CLOSE(vp, FWRITE, cred);
	return error;
#else
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	int i, npages, count;
	int *rtvals;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */

/*	VOP_OPEN(vp, FWRITE, cred, NULL);*/
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = ap->a_count;
	rtvals = ap->a_rtvals;
	npages = btoc(count);

	for (i = 0; i < npages; i++) {
		rtvals[i] = VM_PAGER_AGAIN;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_td = td;
	NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);

	error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
/*	VOP_CLOSE(vp, FWRITE, cred);*/
	NCPVNDEBUG("paged write done: %d\n", error);

	pmap_qremove(kva, npages);
	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (!error) {
		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
		for (i = 0; i < nwritten; i++) {
			rtvals[i] = VM_PAGER_OK;
			vm_page_undirty(pages[i]);
		}
	}
	return rtvals[0];
#endif /* NWFS_RWCACHE */
}
Ejemplo n.º 21
0
int
physio(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct cdevsw *csw;
	struct buf *pbuf;
	struct bio *bp;
	struct vm_page **pages;
	caddr_t sa;
	u_int iolen, poff;
	int error, i, npages, maxpages;
	vm_prot_t prot;

	csw = dev->si_devsw;
	npages = 0;
	sa = NULL;
	/* check if character device is being destroyed */
	if (csw == NULL)
		return (ENXIO);

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	/*
	 * If the driver does not want I/O to be split, that means that we
	 * need to reject any requests that will not fit into one buffer.
	 */
	if (dev->si_flags & SI_NOSPLIT &&
	    (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS ||
	    uio->uio_iovcnt > 1)) {
		/*
		 * Tell the user why his I/O was rejected.
		 */
		if (uio->uio_resid > dev->si_iosize_max)
			uprintf("%s: request size=%zd > si_iosize_max=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, dev->si_iosize_max);
		if (uio->uio_resid > MAXPHYS)
			uprintf("%s: request size=%zd > MAXPHYS=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, MAXPHYS);
		if (uio->uio_iovcnt > 1)
			uprintf("%s: request vectors=%d > 1; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_iovcnt);
		return (EFBIG);
	}

	/*
	 * Keep the process UPAGES from being swapped.  Processes swapped
	 * out while holding pbufs, used by swapper, may lead to deadlock.
	 */
	PHOLD(curproc);

	bp = g_alloc_bio();
	if (uio->uio_segflg != UIO_USERSPACE) {
		pbuf = NULL;
		pages = NULL;
	} else if ((dev->si_flags & SI_UNMAPPED) && unmapped_buf_allowed) {
		pbuf = NULL;
		maxpages = btoc(MIN(uio->uio_resid, MAXPHYS)) + 1;
		pages = malloc(sizeof(*pages) * maxpages, M_DEVBUF, M_WAITOK);
	} else {
		pbuf = uma_zalloc(pbuf_zone, M_WAITOK);
		sa = pbuf->b_data;
		maxpages = btoc(MAXPHYS);
		pages = pbuf->b_pages;
	}
	prot = VM_PROT_READ;
	if (uio->uio_rw == UIO_READ)
		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
	error = 0;
	for (i = 0; i < uio->uio_iovcnt; i++) {
#ifdef RACCT
		if (racct_enable) {
			PROC_LOCK(curproc);
			if (uio->uio_rw == UIO_READ) {
				racct_add_force(curproc, RACCT_READBPS,
				    uio->uio_iov[i].iov_len);
				racct_add_force(curproc, RACCT_READIOPS, 1);
			} else {
				racct_add_force(curproc, RACCT_WRITEBPS,
				    uio->uio_iov[i].iov_len);
				racct_add_force(curproc, RACCT_WRITEIOPS, 1);
			}
			PROC_UNLOCK(curproc);
		}
#endif /* RACCT */

		while (uio->uio_iov[i].iov_len) {
			g_reset_bio(bp);
			if (uio->uio_rw == UIO_READ) {
				bp->bio_cmd = BIO_READ;
				curthread->td_ru.ru_inblock++;
			} else {
				bp->bio_cmd = BIO_WRITE;
				curthread->td_ru.ru_oublock++;
			}
			bp->bio_offset = uio->uio_offset;
			bp->bio_data = uio->uio_iov[i].iov_base;
			bp->bio_length = uio->uio_iov[i].iov_len;
			if (bp->bio_length > dev->si_iosize_max)
				bp->bio_length = dev->si_iosize_max;
			if (bp->bio_length > MAXPHYS)
				bp->bio_length = MAXPHYS;

			/*
			 * Make sure the pbuf can map the request.
			 * The pbuf has kvasize = MAXPHYS, so a request
			 * larger than MAXPHYS - PAGE_SIZE must be
			 * page aligned or it will be fragmented.
			 */
			poff = (vm_offset_t)bp->bio_data & PAGE_MASK;
			if (pbuf && bp->bio_length + poff > pbuf->b_kvasize) {
				if (dev->si_flags & SI_NOSPLIT) {
					uprintf("%s: request ptr %p is not "
					    "on a page boundary; cannot split "
					    "request\n", devtoname(dev),
					    bp->bio_data);
					error = EFBIG;
					goto doerror;
				}
				bp->bio_length = pbuf->b_kvasize;
				if (poff != 0)
					bp->bio_length -= PAGE_SIZE;
			}

			bp->bio_bcount = bp->bio_length;
			bp->bio_dev = dev;

			if (pages) {
				if ((npages = vm_fault_quick_hold_pages(
				    &curproc->p_vmspace->vm_map,
				    (vm_offset_t)bp->bio_data, bp->bio_length,
				    prot, pages, maxpages)) < 0) {
					error = EFAULT;
					goto doerror;
				}
				if (pbuf && sa) {
					pmap_qenter((vm_offset_t)sa,
					    pages, npages);
					bp->bio_data = sa + poff;
				} else {
					bp->bio_ma = pages;
					bp->bio_ma_n = npages;
					bp->bio_ma_offset = poff;
					bp->bio_data = unmapped_buf;
					bp->bio_flags |= BIO_UNMAPPED;
				}
			}

			csw->d_strategy(bp);
			if (uio->uio_rw == UIO_READ)
				biowait(bp, "physrd");
			else
				biowait(bp, "physwr");

			if (pages) {
				if (pbuf)
					pmap_qremove((vm_offset_t)sa, npages);
				vm_page_unhold_pages(pages, npages);
			}

			iolen = bp->bio_length - bp->bio_resid;
			if (iolen == 0 && !(bp->bio_flags & BIO_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base =
			    (char *)uio->uio_iov[i].iov_base + iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if (bp->bio_flags & BIO_ERROR) {
				error = bp->bio_error;
				goto doerror;
			}
		}
	}
doerror:
	if (pbuf)
		uma_zfree(pbuf_zone, pbuf);
	else if (pages)
		free(pages, M_DEVBUF);
	g_destroy_bio(bp);
	PRELE(curproc);
	return (error);
}
Ejemplo n.º 22
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup(void)
{
	vaddr_t v;
	vsize_t sz;
	vaddr_t minaddr, maxaddr;

	msgbuf_vaddr = PMAP_DIRECT_MAP(msgbuf_paddr);
	initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));

	printf("%s", version);

	printf("real mem = %u (%uK)\n", ctob(physmem), ctob(physmem)/1024);

	if (physmem >= btoc(1ULL << 32)) {
		extern int amdgart_enable;

		amdgart_enable = 1;
	}

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = allocsys(0);
	if ((v = uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (allocsys(v) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Now allocate buffers proper. They are different than the above
	 * in that they usually occupy more virtual memory than physical.
	 */
	setup_buffers(&maxaddr);

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio
	 */
	minaddr = vm_map_min(kernel_map);
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

	printf("avail mem = %lu (%luK)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024);
	printf("using %u buffers containing %u bytes (%uK) of memory\n",
	    nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024);

	bufinit();

	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support - c; continuing..\n");
#endif
	}

	/* Safe for i/o port / memory space allocation to use malloc now. */
	x86_bus_space_mallocok();
}
Ejemplo n.º 23
0
/*
 * Early initialization, before main() is called.
 */
void
luna68k_init(void)
{
	volatile uint8_t *pio0 = (void *)0x49000000;
	int sw1, i;
	char *cp;
	extern char bootarg[64];

	extern paddr_t avail_start, avail_end;

	/* initialize cn_tab for early console */
#if 1
	cn_tab = &syscons;
#else
	cn_tab = &romcons;
#endif

	/*
	 * Tell the VM system about available physical memory.  The
	 * luna68k only has one segment.
	 */
	uvm_page_physload(atop(avail_start), atop(avail_end),
	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_kenter_pa((vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 0);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));


	pio0[3] = 0xb6;
	pio0[2] = 1 << 6;		/* enable parity check */
	pio0[3] = 0xb6;
	sw1 = pio0[0];			/* dip sw1 value */
	sw1 ^= 0xff;
	sysconsole = !(sw1 & 0x2);	/* console selection */

	/*
	 * Check if boothowto and bootdev values are passed by our bootloader.
	 */
	if ((bootdev & B_MAGICMASK) == B_DEVMAGIC) {
		/* Valid value is set; no need to parse bootarg. */
		return;
	}

	/*
	 * No valid bootdev value is set.
	 * Assume we are booted by ROM monitor directly using a.out kernel
	 * and we have to parse bootarg passed from the monitor to set
	 * proper boothowto and check netboot.
	 */

	/* set default to "sd0a" with no howto flags */
	bootdev = MAKEBOOTDEV(0, LUNA68K_BOOTADPT_SPC, 0, 0, 0);
	boothowto = 0;

	/*
	 * 'bootarg' on LUNA has:
	 *   "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>"
	 * where <addr> is MAC address of which network loader used (not
	 * necessarily same as one at 0x4101.FFE0), <host> and <name>
	 * are the values of HOST and SERVER environment variables.
	 *
	 * 'bootarg' on LUNA-II has "<args of x command>" only.
	 *
	 * NetBSD/luna68k cares only the first argment; any of "sda".
	 */
	bootarg[63] = '\0';
	for (cp = bootarg; *cp != '\0'; cp++) {
		if (*cp == '-') {
			char c;
			while ((c = *cp) != '\0' && c != ' ') {
				BOOT_FLAG(c, boothowto);
				cp++;
			}
		} else if (*cp == 'E' && memcmp("ENADDR=", cp, 7) == 0) {
			bootdev =
			    MAKEBOOTDEV(0, LUNA68K_BOOTADPT_LANCE, 0, 0, 0);
		}
	}
}
Ejemplo n.º 24
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	int argc = 0, i;
	char **argv = NULL, **envp = NULL;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Until some more sensible abstractions for uboot/redboot
	 * environment handling, we have to make this a compile-time
	 * hack.  The existing code handles the uboot environment
	 * very incorrectly so we should just ignore initialising
	 * the relevant pointers.
	 */
#ifndef	AR71XX_ENV_UBOOT
	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
#endif
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2) {
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
			else if (strcmp(envp[i], "bootverbose") == 0)
				bootverbose = btoc(strtoul(envp[i+1], NULL, 10));
		}
	}
	bootverbose = 1;

#ifdef	AR71XX_ENV_ROUTERBOOT
	/*
	 * RouterBoot informs the board memory as a command line argument.
	 */
	if (realmem == 0)
		realmem = ar71xx_routerboot_get_mem(argc, argv);
#endif

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/*
	 * Allow build-time override in case Redboot lies
	 * or in other situations (eg where there's u-boot)
	 * where there isn't (yet) a convienent method of
	 * being told how much RAM is available.
	 *
	 * This happens on at least the Ubiquiti LS-SR71A
	 * board, where redboot says there's 16mb of RAM
	 * but in fact there's 32mb.
	 */
#if	defined(AR71XX_REALMEM)
		realmem = btoc(AR71XX_REALMEM);
#endif

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1] - phys_avail[0];

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();

	/* Detect the system type - this is needed for subsequent chipset-specific calls */
	ar71xx_detect_sys_type();
	ar71xx_detect_sys_frequency();

	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("CPU platform: %s\n", ar71xx_get_system_type());
	printf("CPU Frequency=%d MHz\n", u_ar71xx_cpu_freq / 1000000);
	printf("CPU DDR Frequency=%d MHz\n", u_ar71xx_ddr_freq / 1000000);
	printf("CPU AHB Frequency=%d MHz\n", u_ar71xx_ahb_freq / 1000000);
	printf("platform frequency: %lld MHz\n", platform_counter_freq / 1000000);
	printf("CPU reference clock: %d MHz\n", u_ar71xx_refclk / 1000000);
	printf("CPU MDIO clock: %d MHz\n", u_ar71xx_mdio_freq / 1000000);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	strcpy(cpu_model, ar71xx_get_system_type());

	/*
	 * XXX this code is very redboot specific.
	 */
	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			kern_setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	/* Platform setup */
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	ar71xx_init_usb_peripheral();

	/*
	 * Reset internal ethernet switch, if one exists
	 */
	ar71xx_reset_ethernet_switch();

	/*
	 * Initialise the gmac driver.
	 */
	ar71xx_init_gmac();

	/* Redboot if_arge MAC address is in the environment */
	(void) ar71xx_redboot_get_macaddr();

	/* Various other boards need things to come out of EEPROM */
	(void) ar71xx_platform_read_eeprom_mac();

	/* Initialise the MAC address hint map */
	ar71xx_platform_check_mac_hints();

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Ejemplo n.º 25
0
/*
 * Release a buffer on to the free lists.
 * Described in Bach (p. 46).
 */
void
brelse(struct buf *bp)
{
	struct bqueues *bufq;
	int s;

	/* Block disk interrupts. */
	s = splbio();

	/*
	 * Determine which queue the buffer should be on, then put it there.
	 */

	/* If it's locked, don't report an error; try again later. */
	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
		CLR(bp->b_flags, B_ERROR);

	/* If it's not cacheable, or an error, mark it invalid. */
	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
		SET(bp->b_flags, B_INVAL);

	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
		/*
		 * If it's invalid or empty, dissociate it from its vnode
		 * and put on the head of the appropriate queue.
		 */
		if (LIST_FIRST(&bp->b_dep) != NULL)
			buf_deallocate(bp);

		if (ISSET(bp->b_flags, B_DELWRI)) {
			CLR(bp->b_flags, B_DELWRI);
		}

		if (bp->b_vp) {
			reassignbuf(bp);
			brelvp(bp);
		}
		if (bp->b_bufsize <= 0) {
			/* no data */
			bufq = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		} else {
			/* invalid data */
			bufq = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		}
		binsheadfree(bp, bufq);
	} else {
		/*
		 * It has valid data.  Put it on the end of the appropriate
		 * queue, so that it'll stick around for as long as possible.
		 */
		if (ISSET(bp->b_flags, B_LOCKED))
			/* locked in core */
			bufq = &bufqueues[BQ_LOCKED];
		else {
			numfreepages += btoc(bp->b_bufsize);
			if (!ISSET(bp->b_flags, B_DELWRI)) {
				numcleanpages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_CLEAN];
			} else {
				numdirtypages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_DIRTY];
			}
		}
		if (ISSET(bp->b_flags, B_AGE))
			binsheadfree(bp, bufq);
		else
			binstailfree(bp, bufq);
	}

	/* Unlock the buffer. */
	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED));


	/* Wake up syncer and cleaner processes waiting for buffers */
	if (nobuffers) {
		wakeup(&nobuffers);
		nobuffers = 0;
	}

	/* Wake up any processes waiting for any buffer to become free. */
	if (needbuffer && (numcleanpages > locleanpages)) {
		needbuffer--;
		wakeup_one(&needbuffer);
	}

	splx(s);

	/* Wake up any processes waiting for _this_ buffer to become free. */
	if (ISSET(bp->b_flags, B_WANTED)) {
		CLR(bp->b_flags, B_WANTED);
		wakeup(bp);
	}
}
Ejemplo n.º 26
0
/*
    struct vnop_getpages_args {
        struct vnode *a_vp;
        vm_page_t *a_m;
        int a_count;
        int a_reqpage;
        vm_ooffset_t a_offset;
    };
*/
static int
fuse_vnop_getpages(struct vop_getpages_args *ap)
{
	int i, error, nextoff, size, toff, count, npages;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td;
	struct ucred *cred;
	vm_page_t *pages;

	FS_DEBUG2G("heh\n");

	vp = ap->a_vp;
	KASSERT(vp->v_object, ("objectless vp passed to getpages"));
	td = curthread;			/* XXX */
	cred = curthread->td_ucred;	/* XXX */
	pages = ap->a_m;
	count = ap->a_count;

	if (!fsess_opt_mmap(vnode_mount(vp))) {
		FS_DEBUG("called on non-cacheable vnode??\n");
		return (VM_PAGER_ERROR);
	}
	npages = btoc(count);

	/*
	 * If the requested page is partially valid, just return it and
	 * allow the pager to zero-out the blanks.  Partially valid pages
	 * can only occur at the file EOF.
	 */

	VM_OBJECT_WLOCK(vp->v_object);
	fuse_vm_page_lock_queues();
	if (pages[ap->a_reqpage]->valid != 0) {
		for (i = 0; i < npages; ++i) {
			if (i != ap->a_reqpage) {
				fuse_vm_page_lock(pages[i]);
				vm_page_free(pages[i]);
				fuse_vm_page_unlock(pages[i]);
			}
		}
		fuse_vm_page_unlock_queues();
		VM_OBJECT_WUNLOCK(vp->v_object);
		return 0;
	}
	fuse_vm_page_unlock_queues();
	VM_OBJECT_WUNLOCK(vp->v_object);

	/*
	 * We use only the kva address for the buffer, but this is extremely
	 * convienient and fast.
	 */
	bp = getpbuf(&fuse_pbuf_freecnt);

	kva = (vm_offset_t)bp->b_data;
	pmap_qenter(kva, pages, npages);
	PCPU_INC(cnt.v_vnodein);
	PCPU_ADD(cnt.v_vnodepgsin, npages);

	iov.iov_base = (caddr_t)kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &fuse_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		FS_DEBUG("error %d\n", error);
		VM_OBJECT_WLOCK(vp->v_object);
		fuse_vm_page_lock_queues();
		for (i = 0; i < npages; ++i) {
			if (i != ap->a_reqpage) {
				fuse_vm_page_lock(pages[i]);
				vm_page_free(pages[i]);
				fuse_vm_page_unlock(pages[i]);
			}
		}
		fuse_vm_page_unlock_queues();
		VM_OBJECT_WUNLOCK(vp->v_object);
		return VM_PAGER_ERROR;
	}
	/*
	 * Calculate the number of bytes read and validate only that number
	 * of bytes.  Note that due to pending writes, size may be 0.  This
	 * does not mean that the remaining data is invalid!
	 */

	size = count - uio.uio_resid;
	VM_OBJECT_WLOCK(vp->v_object);
	fuse_vm_page_lock_queues();
	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;

		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		if (nextoff <= size) {
			/*
			 * Read operation filled an entire page
			 */
			m->valid = VM_PAGE_BITS_ALL;
			KASSERT(m->dirty == 0,
			    ("fuse_getpages: page %p is dirty", m));
		} else if (size > toff) {
			/*
			 * Read operation filled a partial page.
			 */
			m->valid = 0;
			vm_page_set_valid_range(m, 0, size - toff);
			KASSERT(m->dirty == 0,
			    ("fuse_getpages: page %p is dirty", m));
		} else {
			/*
			 * Read operation was short.  If no error occured
			 * we may have hit a zero-fill section.   We simply
			 * leave valid set to 0.
			 */
			;
		}
		if (i != ap->a_reqpage)
			vm_page_readahead_finish(m);
	}
	fuse_vm_page_unlock_queues();
	VM_OBJECT_WUNLOCK(vp->v_object);
	return 0;
}
Ejemplo n.º 27
0
/*
 * Do all the stuff that locore normally does before calling main().
 * Process arguments passed to us by the prom monitor.
 * Return the first page address following the system.
 */
void
mach_init(int x_boothowto, int x_bootdev, int x_bootname, int x_maxmem)
{
	u_long first, last;
	char *kernend;
	struct btinfo_magic *bi_magic;
	struct btinfo_bootarg *bi_arg;
	struct btinfo_systype *bi_systype;
#if NKSYMS || defined(DDB) || defined(MODULAR)
	struct btinfo_symtab *bi_sym;
	int nsym = 0;
	char *ssym, *esym;

	ssym = esym = NULL;	/* XXX: gcc */
#endif
	bi_arg = NULL;

	bootinfo = (void *)BOOTINFO_ADDR;	/* XXX */
	bi_magic = lookup_bootinfo(BTINFO_MAGIC);
	if (bi_magic && bi_magic->magic == BOOTINFO_MAGIC) {
		bi_arg = lookup_bootinfo(BTINFO_BOOTARG);
		if (bi_arg) {
			x_boothowto = bi_arg->howto;
			x_bootdev = bi_arg->bootdev;
			x_maxmem = bi_arg->maxmem;
		}
#if NKSYMS || defined(DDB) || defined(MODULAR)
		bi_sym = lookup_bootinfo(BTINFO_SYMTAB);
		if (bi_sym) {
			nsym = bi_sym->nsym;
			ssym = (void *)bi_sym->ssym;
			esym = (void *)bi_sym->esym;
		}
#endif

		bi_systype = lookup_bootinfo(BTINFO_SYSTYPE);
		if (bi_systype)
			systype = bi_systype->type;
	} else {
		/*
		 * Running kernel is loaded by non-native loader;
		 * clear the BSS segment here.
		 */
		memset(edata, 0, end - edata);
	}

	if (systype == 0) 
		systype = NEWS3400;	/* XXX compatibility for old boot */

#ifdef news5000
	if (systype == NEWS5000) {
		int i;
		char *bootspec = (char *)x_bootdev;

		if (bi_arg == NULL)
			panic("news5000 requires BTINFO_BOOTARG to boot");

		_sip = (void *)bi_arg->sip;
		x_maxmem = _sip->apbsi_memsize;
		x_maxmem -= 0x00100000;	/* reserve 1MB for ROM monitor */
		if (strncmp(bootspec, "scsi", 4) == 0) {
			x_bootdev = (5 << 28) | 0;	 /* magic, sd */
			bootspec += 4;
			if (*bootspec != '(' /*)*/)
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i << 24);		/* bus */
			if (*bootspec != ',')
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i / 10) << 20;	/* controller */
			x_bootdev |= (i % 10) << 16;	/* unit */
			if (*bootspec != ',')
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i << 8);		/* partition */
		}
 bootspec_end:
		consinit();
	}
#endif

	/*
	 * Save parameters into kernel work area.
	 */
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_MAXMEMSIZE_ADDR)) = x_maxmem;
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTDEV_ADDR)) = x_bootdev;
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTSW_ADDR)) = x_boothowto;

	kernend = (char *)mips_round_page(end);
#if NKSYMS || defined(DDB) || defined(MODULAR)
	if (nsym)
		kernend = (char *)mips_round_page(esym);
#endif

	/*
	 * Set the VM page size.
	 */
	uvm_setpagesize();

	boothowto = x_boothowto;
	bootdev = x_bootdev;
	physmem = btoc(x_maxmem);

	/*
	 * Now that we know how much memory we have, initialize the
	 * mem cluster array.
	 */
	mem_clusters[0].start = 0;		/* XXX is this correct? */
	mem_clusters[0].size  = ctob(physmem);
	mem_cluster_cnt = 1;

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init(NULL, false);

	/*
	 * We know the CPU type now.  Initialize our DMA tags (might
	 * need this early).
	 */
	newsmips_bus_dma_init();

#if NKSYMS || defined(DDB) || defined(MODULAR)
	if (nsym)
		ksyms_addsyms_elf(esym - ssym, ssym, esym);
#endif

#ifdef KADB
	boothowto |= RB_KDB;
#endif

	/*
	 * Check to see if a mini-root was loaded into memory. It resides
	 * at the start of the next page just after the end of BSS.
	 */
	if (boothowto & RB_MINIROOT)
		kernend += round_page(mfs_initminiroot(kernend));

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Initialize the virtual memory system.
	 */
	pmap_bootstrap();

	/*
	 * Allocate uarea page for lwp0 and set it.
	 */
	mips_init_lwp0_uarea();

	/*
	 * Determine what model of computer we are running on.
	 */
	switch (systype) {
#ifdef news3400
	case NEWS3400:
		news3400_init();
		strcpy(cpu_model, idrom.id_machine);
		if (strcmp(cpu_model, "news3400") == 0 ||
		    strcmp(cpu_model, "news3200") == 0 ||
		    strcmp(cpu_model, "news3700") == 0) {
			/*
			 * Set up interrupt handling and I/O addresses.
			 */
			hardware_intr = news3400_intr;
			cpuspeed = 10;
		} else {
			printf("kernel not configured for machine %s\n",
			    cpu_model);
		}
		break;
#endif

#ifdef news5000
	case NEWS5000:
		news5000_init();
		strcpy(cpu_model, idrom.id_machine);
		if (strcmp(cpu_model, "news5000") == 0 ||
		    strcmp(cpu_model, "news5900") == 0) {
			/*
			 * Set up interrupt handling and I/O addresses.
			 */
			hardware_intr = news5000_intr;
			cpuspeed = 50;	/* ??? XXX */
		} else {
			printf("kernel not configured for machine %s\n",
			    cpu_model);
		}
		break;
#endif

	default:
		printf("kernel not configured for systype %d\n", systype);
		break;
	}
}
Ejemplo n.º 28
0
/*
    struct vnop_putpages_args {
        struct vnode *a_vp;
        vm_page_t *a_m;
        int a_count;
        int a_sync;
        int *a_rtvals;
        vm_ooffset_t a_offset;
    };
*/
static int
fuse_vnop_putpages(struct vop_putpages_args *ap)
{
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	int i, error, npages, count;
	off_t offset;
	int *rtvals;
	struct vnode *vp;
	struct thread *td;
	struct ucred *cred;
	vm_page_t *pages;
	vm_ooffset_t fsize;

	FS_DEBUG2G("heh\n");

	vp = ap->a_vp;
	KASSERT(vp->v_object, ("objectless vp passed to putpages"));
	fsize = vp->v_object->un_pager.vnp.vnp_size;
	td = curthread;			/* XXX */
	cred = curthread->td_ucred;	/* XXX */
	pages = ap->a_m;
	count = ap->a_count;
	rtvals = ap->a_rtvals;
	npages = btoc(count);
	offset = IDX_TO_OFF(pages[0]->pindex);

	if (!fsess_opt_mmap(vnode_mount(vp))) {
		FS_DEBUG("called on non-cacheable vnode??\n");
	}
	for (i = 0; i < npages; i++)
		rtvals[i] = VM_PAGER_AGAIN;

	/*
	 * When putting pages, do not extend file past EOF.
	 */

	if (offset + count > fsize) {
		count = fsize - offset;
		if (count < 0)
			count = 0;
	}
	/*
	 * We use only the kva address for the buffer, but this is extremely
	 * convienient and fast.
	 */
	bp = getpbuf(&fuse_pbuf_freecnt);

	kva = (vm_offset_t)bp->b_data;
	pmap_qenter(kva, pages, npages);
	PCPU_INC(cnt.v_vnodeout);
	PCPU_ADD(cnt.v_vnodepgsout, count);

	iov.iov_base = (caddr_t)kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = offset;
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_td = td;

	error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);

	pmap_qremove(kva, npages);
	relpbuf(bp, &fuse_pbuf_freecnt);

	if (!error) {
		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;

		for (i = 0; i < nwritten; i++) {
			rtvals[i] = VM_PAGER_OK;
			VM_OBJECT_WLOCK(pages[i]->object);
			vm_page_undirty(pages[i]);
			VM_OBJECT_WUNLOCK(pages[i]->object);
		}
	}
	return rtvals[0];
}
Ejemplo n.º 29
0
/*
 * Do all the stuff that locore normally does before calling main().
 */
void
mach_init(int32_t memsize32, u_int bim, int32_t bip32)
{
	intptr_t memsize = (int32_t)memsize32;
	char *kernend;
	char *bip = (char *)(intptr_t)(int32_t)bip32;
	u_long first, last;
	extern char edata[], end[];
	const char *bi_msg;
#if NKSYMS || defined(DDB) || defined(MODULAR)
	char *ssym = 0;
	struct btinfo_symtab *bi_syms;
#endif
	struct btinfo_howto *bi_howto;

	/*
	 * Clear the BSS segment (if needed).
	 */
	if (memcmp(((Elf_Ehdr *)end)->e_ident, ELFMAG, SELFMAG) == 0 &&
	    ((Elf_Ehdr *)end)->e_ident[EI_CLASS] == ELFCLASS) {
		esym = end;
#if NKSYMS || defined(DDB) || defined(MODULAR)
		esym += ((Elf_Ehdr *)end)->e_entry;
#endif
		kernend = (char *)mips_round_page(esym);
		/*
		 * We don't have to clear BSS here
		 * since our bootloader already does it.
		 */
#if 0
		memset(edata, 0, end - edata);
#endif
	} else {
		kernend = (void *)mips_round_page(end);
		/*
		 * No symbol table, so assume we are loaded by
		 * the firmware directly with "bfd" command.
		 * The firmware loader doesn't clear BSS of
		 * a loaded kernel, so do it here.
		 */
		memset(edata, 0, kernend - edata);

	}

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init(NULL, false);

	/* Check for valid bootinfo passed from bootstrap */
	if (bim == BOOTINFO_MAGIC) {
		struct btinfo_magic *bi_magic;

		bootinfo = bip;
		bi_magic = lookup_bootinfo(BTINFO_MAGIC);
		if (bi_magic == NULL) {
			bi_msg = "missing bootinfo structure";
			bim = (uintptr_t)bip;
		} else if (bi_magic->magic != BOOTINFO_MAGIC) {
			bi_msg = "invalid bootinfo structure";
			bim = bi_magic->magic;
		} else
			bi_msg = NULL;
	} else {
		bi_msg = "invalid bootinfo (standalone boot?)";
	}

#if NKSYMS || defined(DDB) || defined(MODULAR)
	bi_syms = lookup_bootinfo(BTINFO_SYMTAB);

	/* Load symbol table if present */
	if (bi_syms != NULL) {
		ssym = (void *)(intptr_t)bi_syms->ssym;
		esym = (void *)(intptr_t)bi_syms->esym;
		kernend = (void *)mips_round_page(esym);
	}
#endif

	bi_howto = lookup_bootinfo(BTINFO_HOWTO);
	if (bi_howto != NULL)
		boothowto = bi_howto->bi_howto;

	cobalt_id = read_board_id();
	if (cobalt_id >= COBALT_MODELS || cobalt_model[cobalt_id] == NULL)
		cpu_setmodel("Cobalt unknown model (board ID %u)",
		    cobalt_id);
	else
		cpu_setmodel("%s", cobalt_model[cobalt_id]);

	switch (cobalt_id) {
	case COBALT_ID_QUBE2700:
	case COBALT_ID_RAQ:
		cpuspeed = 150; /* MHz */
		break;
	case COBALT_ID_QUBE2:
	case COBALT_ID_RAQ2:
		cpuspeed = 250; /* MHz */
		break;
	default:
		/* assume the fastest, so that delay(9) works */
		cpuspeed = 250;
		break;
	}
	curcpu()->ci_cpu_freq = cpuspeed * 1000 * 1000;
	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
	curcpu()->ci_divisor_delay =
	    ((curcpu()->ci_cpu_freq + (1000000 / 2)) / 1000000);
	/* all models have Rm5200, which is CPU_MIPS_DOUBLE_COUNT */
	curcpu()->ci_cycles_per_hz /= 2;
	curcpu()->ci_divisor_delay /= 2;

	physmem = btoc(memsize - MIPS_KSEG0_START);

	consinit();

	KASSERT(&lwp0 == curlwp);
	if (bi_msg != NULL)
		printf("%s: magic=%#x bip=%p\n", bi_msg, bim, bip);

	uvm_setpagesize();

	/*
	 * The boot command is passed in the top 512 bytes,
	 * so don't clobber that.
	 */
	mem_clusters[0].start = 0;
	mem_clusters[0].size = ctob(physmem) - 512;
	mem_cluster_cnt = 1;

	memcpy(bootstring, (char *)(memsize - 512), 512);
	memset((char *)(memsize - 512), 0, 512);
	bootstring[511] = '\0';

	decode_bootstring();

#if NKSYMS || defined(DDB) || defined(MODULAR)
	/* init symbols if present */
	if ((bi_syms != NULL) && (esym != NULL))
		ksyms_addsyms_elf(esym - ssym, ssym, esym);
#endif
	KASSERT(&lwp0 == curlwp);
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	if (boothowto & RB_KDB)
		kgdb_connect(0);
#endif

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	pmap_bootstrap();

	/*
	 * Allocate space for proc0's USPACE.
	 */
	mips_init_lwp0_uarea();
}
Ejemplo n.º 30
0
void
mach_init(int argc, char **argv, yamon_env_var *envp, u_long memsize)
{
	bus_space_handle_t sh;
	void *kernend;
	const char *cp;
	u_long first, last;
	void *v;
	int freqok, howto, i;
	const struct alchemy_board *board;

	extern char edata[], end[];	/* XXX */

	board = board_info();
	KASSERT(board != NULL);

	/* clear the BSS segment */
	kernend = (void *)mips_round_page(end);
	memset(edata, 0, (char *)kernend - edata);

	/* set CPU model info for sysctl_hw */
	strcpy(cpu_model, board->ab_name);

	/* save the yamon environment pointer */
	yamon_envp = envp;

	/* Use YAMON callbacks for early console I/O */
	cn_tab = &yamon_promcd;

	/*
	 * Set up the exception vectors and CPU-specific function
	 * vectors early on.  We need the wbflush() vector set up
	 * before comcnattach() is called (or at least before the
	 * first printf() after that is called).
	 * Sets up mips_cpu_flags that may be queried by other
	 * functions called during startup.
	 * Also clears the I+D caches.
	 */
	mips_vector_init();

	/*
	 * Set the VM page size.
	 */
	uvm_setpagesize();

	/*
	 * Use YAMON's CPU frequency if available.
	 */
	freqok = yamon_setcpufreq(1);

	/*
	 * Initialize bus space tags.
	 */
	au_cpureg_bus_mem_init(&alchemy_cpuregt, &alchemy_cpuregt);
	aubus_st = &alchemy_cpuregt;

	/*
	 * Calibrate the timer if YAMON failed to tell us.
	 */
	if (!freqok) {
		bus_space_map(aubus_st, PC_BASE, PC_SIZE, 0, &sh);
		au_cal_timers(aubus_st, sh);
		bus_space_unmap(aubus_st, sh, PC_SIZE);
	}

	/*
	 * Perform board-specific initialization.
	 */
	board->ab_init();

	/*
	 * Bring up the console.
	 */
#if NCOM > 0
#ifdef CONSPEED
	if (aucomcnrate == 0)
		aucomcnrate = CONSPEED;
#else /* !CONSPEED */
	/*
	 * Learn default console speed.  We use the YAMON environment,
	 * though we could probably also figure it out by checking the
	 * aucom registers directly.
	 */
	if ((aucomcnrate == 0) && ((cp = yamon_getenv("modetty0")) != NULL))
		aucomcnrate = strtoul(cp, NULL, 0);

	if (aucomcnrate == 0) {
		printf("FATAL: `modetty0' YAMON variable not set.  Set it\n");
		printf("       to the speed of the console and try again.\n");
		printf("       Or, build a kernel with the `CONSPEED' "
		    "option.\n");
		panic("mach_init");
	}
#endif /* CONSPEED */

	/*
	 * Delay to allow firmware putchars to complete.
	 * FIFO depth * character time.
	 * character time = (1000000 / (defaultrate / 10))
	 */
	delay(160000000 / aucomcnrate);
	if (com_aubus_cnattach(UART0_BASE, aucomcnrate) != 0)
		panic("mach_init: unable to initialize serial console");

#else /* NCOM > 0 */
	panic("mach_init: not configured to use serial console");
#endif /* NAUCOM > 0 */

	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;
#ifdef KADB
	boothowto |= RB_KDB;
#endif
	for (i = 1; i < argc; i++) {
		for (cp = argv[i]; *cp; cp++) {
			/* Ignore superfluous '-', if there is one */
			if (*cp == '-')
				continue;

			howto = 0;
			BOOT_FLAG(*cp, howto);
			if (! howto)
				printf("bootflag '%c' not recognised\n", *cp);
			else
				boothowto |= howto;
		}
	}

	/*
	 * Determine the memory size.  Use the `memsize' PMON
	 * variable.  If that's not available, panic.
	 *
	 * Note: Reserve the first page!  That's where the trap
	 * vectors are located.
	 */

#if defined(MEMSIZE)
	memsize = MEMSIZE;
#else
	if (memsize == 0) {
		if ((cp = yamon_getenv("memsize")) != NULL)
			memsize = strtoul(cp, NULL, 0);
		else {
			printf("FATAL: `memsize' YAMON variable not set.  Set it to\n");
			printf("       the amount of memory (in MB) and try again.\n");
			printf("       Or, build a kernel with the `MEMSIZE' "
			    "option.\n");
			panic("mach_init");
		}
	}
#endif /* MEMSIZE */
	printf("Memory size: 0x%08lx\n", memsize);
	physmem = btoc(memsize);

	mem_clusters[mem_cluster_cnt].start = PAGE_SIZE;
	mem_clusters[mem_cluster_cnt].size =
	    memsize - mem_clusters[mem_cluster_cnt].start;
	mem_cluster_cnt++;

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Initialize the virtual memory system.
	 */
	pmap_bootstrap();

	/*
	 * Init mapping for u page(s) for proc0.
	 */
	v = (void *) uvm_pageboot_alloc(USPACE);
	lwp0.l_addr = proc0paddr = (struct user *)v;
	lwp0.l_md.md_regs = (struct frame *)((char *)v + USPACE) - 1;
	proc0paddr->u_pcb.pcb_context[11] =
	    MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */

	/*
	 * Initialize debuggers, and break into them, if appropriate.
	 */
#if NKSYMS || defined(DDB) || defined(LKM)
	ksyms_init(0, 0, 0);
#endif
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
}