Ejemplo n.º 1
0
void
ibd_init(void)
{
	pnode_t	chosen;
	char	*mtuprop = "ipib-frame-size";
	char	*bcastprop = "ipib-broadcast";
	char	*addrprop = "ipib-address";
	char	*cidprop = "client-id";
	int	cidlen;
	uint8_t	dhcpcid[DHCP_MAX_CID_LEN];

	mac_state.mac_addr_len = IPOIB_ADDRL;
	mac_state.mac_addr_buf = bkmem_alloc(mac_state.mac_addr_len);
	if (mac_state.mac_addr_buf == NULL)
		prom_panic("ibd_init: Cannot allocate memory.");

	chosen = prom_finddevice("/chosen");
	if (chosen == OBP_NONODE || chosen == OBP_BADNODE)
		prom_panic("ibd_init: Cannot find /chosen.");

	if (prom_getprop(chosen, addrprop, (caddr_t)mac_state.mac_addr_buf) !=
	    IPOIB_ADDRL)
		prom_panic("ibd_init: Cannot find /chosen:ipib-address\n.");

	if (prom_getprop(chosen, bcastprop, (caddr_t)&ibdbroadcastaddr) !=
	    IPOIB_ADDRL)
		prom_panic("ibd_init: Cannot find /chosen:ipib-broadcast\n.");

	if (((cidlen = prom_getproplen(chosen, cidprop)) <= 0) ||
	    (cidlen > DHCP_MAX_CID_LEN) || (prom_getprop(chosen, cidprop,
	    (caddr_t)&dhcpcid) != cidlen))
		prom_panic("ibd_init: Invalid /chosen:client-id\n.");
	dhcp_set_client_id(dhcpcid, cidlen);

	/*
	 * Note that prom reports mtu including 20 bytes of
	 * addressing information.
	 */
	if (prom_getprop(chosen, mtuprop,
	    (caddr_t)&mac_state.mac_mtu) <= 0)
		mac_state.mac_mtu = IBDSIZE + IPOIB_ADDRL;

	/*
	 * Tell upper layers that we can support a little
	 * more. We will be taking off these 20 bytes at
	 * the start before we invoke prom_write() to send
	 * over the wire.
	 */
	mac_state.mac_arp_timeout = IBD_ARP_TIMEOUT;
	mac_state.mac_in_timeout = IBD_IN_TIMEOUT;

	mac_state.mac_arp = ibd_arp;
	mac_state.mac_rarp = ibd_revarp;
	mac_state.mac_header_len = ibd_header_len;
	mac_state.mac_input = ibd_input;
	mac_state.mac_output = ibd_output;
}
Ejemplo n.º 2
0
/*
 * prom_walk_devs() implements a generic walker for the OBP tree; this
 * implementation uses an explicitly managed stack in order to save the
 * overhead of a recursive implementation.
 */
void
prom_walk_devs(pnode_t node, int (*cb)(pnode_t, void *, void *), void *arg,
    void *result)
{
	pnode_t stack[OBP_STACKDEPTH];
	int stackidx = 0;

	if (node == OBP_NONODE || node == OBP_BADNODE) {
		prom_panic("Invalid node specified as root of prom tree walk");
	}

	stack[0] = node;

	for (;;) {
		pnode_t curnode = stack[stackidx];
		pnode_t child;

		/*
		 * We're out of stuff to do at this level, bump back up a level
		 * in the tree, and move to the next node;  if the new level
		 * will be level -1, we're done.
		 */
		if (curnode == OBP_NONODE || curnode == OBP_BADNODE) {
			stackidx--;

			if (stackidx < 0)
				return;

			stack[stackidx] = prom_nextnode(stack[stackidx]);
			continue;
		}

		switch ((*cb)(curnode, arg, result)) {

		case PROM_WALK_TERMINATE:
			return;

		case PROM_WALK_CONTINUE:
			/*
			 * If curnode has a child, traverse to it,
			 * otherwise move to curnode's sibling.
			 */
			child = prom_childnode(curnode);
			if (child != OBP_NONODE && child != OBP_BADNODE) {
				stackidx++;
				stack[stackidx] = child;
			} else {
				stack[stackidx] =
				    prom_nextnode(stack[stackidx]);
			}
			break;

		default:
			prom_panic("unrecognized walk directive");
		}
	}
}
Ejemplo n.º 3
0
void
resalloc_init(void)
{
	char iarch[128];

	if (impl_name(iarch, sizeof (iarch)) < 0) {
		dprintf("boot: resalloc_init: failed to read iarch\n");
		return;
	}

	dprintf("boot: resalloc_init: got iarch %s\n", iarch);

	/*
	 * Some versions of SG/LW8 firmware can actually handle the entire 10MB,
	 * but we don't have the ability to check for the firmware version here.
	 */
	if (strcmp(iarch, "SUNW,Sun-Fire") == 0 ||
	    strcmp(iarch, "SUNW,Netra-T12") == 0) {
		is_sg = 1;
		sg_addr = MAPPEDMEM_MINTOP;
		sg_len = MAPPEDMEM_FULLTOP - MAPPEDMEM_MINTOP;
		if (prom_alloc(sg_addr, sg_len, 1) != sg_addr)
			prom_panic("can't extend sg bootmem");
	}

	top_bootmem = MAPPEDMEM_FULLTOP;

	dprintf("boot: resalloc_init: boosted top_bootmem to %p\n",
	    (void *)top_bootmem);
}
Ejemplo n.º 4
0
/*
 * ARP client side
 * Broadcasts to determine MAC address given network order IP address.
 * See RFC 826
 *
 * Returns TRUE if successful, FALSE otherwise.
 */
static int
ibd_arp(struct in_addr *ip, void *hap, uint32_t timeout)
{
	ipoib_mac_t *ep = (ipoib_mac_t *)hap;
	struct arp_packet out;
	int result;

	if (!initialized)
		prom_panic("IPoIB device is not initialized.");

	bzero((char *)&out, sizeof (struct arp_packet));

	out.arp_eh.ipoib_rhdr.ipoib_type = htons(ETHERTYPE_ARP);
	out.arp_ea.arp_op = htons(ARPOP_REQUEST);
	bcopy((caddr_t)&ibdbroadcastaddr, (caddr_t)&out.arp_ea.arp_tha,
	    IPOIB_ADDRL);
	bcopy((caddr_t)ip, (caddr_t)out.arp_ea.arp_tpa,
	    sizeof (struct in_addr));

	result = ibd_comarp(&out, timeout);

	if (result && (ep != NULL)) {
		bcopy((caddr_t)&out.arp_ea.arp_sha, (caddr_t)ep, IPOIB_ADDRL);
	}
	return (result);
}
Ejemplo n.º 5
0
/*
 * Given the boot path in the native firmware format use
 * the redirection string to mutate the boot path to the new device.
 * Fix up the 'v2path' so that it matches the new firmware path.
 */
void
redirect_boot_path(char *bpath, char *redirect)
{
	char slicec = *redirect;
	char *p = bpath + strlen(bpath);

	/*
	 * If the redirection character doesn't fall in this
	 * range, something went horribly wrong.
	 */
	if (slicec < '0' || slicec > '7') {
		printf("boot: bad redirection slice '%c'\n", slicec);
		return;
	}

	/*
	 * Handle fully qualified OpenBoot pathname.
	 */
	while (--p >= bpath && *p != '@' && *p != '/')
		if (*p == ':')
			break;
	if (*p++ == ':') {
		/*
		 * Convert slice number to partition 'letter'.
		 */
		*p++ = 'a' + slicec - '0';
		*p = '\0';
		v2path = bpath;
		return;
	}
	prom_panic("redirect_boot_path: mangled boot path!");
}
Ejemplo n.º 6
0
/*ARGSUSED*/
int
bootprog(char *bpath, char *bargs, boolean_t user_specified_filename)
{
	boolean_t	once = B_FALSE;

	systype = set_fstype(v2path, bpath);

loop:
	/*
	 * Beware: the following code may be executed twice, with different
	 * bpath's if we discover a redirection file.
	 */

	if (verbosemode) {
		printf("device path '%s'\n", bpath);
		if (strcmp(bpath, v2path) != 0)
			printf("client path '%s'\n", v2path);
	}

	if (mountroot(bpath) != SUCCESS)
		prom_panic("Could not mount filesystem.");

	/*
	 * kernname (default-name) might have changed if mountroot() called
	 * boot_nfs_mountroot(), and it called set_default_filename().
	 */
	if (!user_specified_filename)
		(void) strcpy(filename, kernname);

	if (verbosemode)
		printf("standalone = `%s', args = `%s'\n", filename, bargs);

	set_client_bootargs(filename, bargs);

	if (!once &&
	    (strcmp(systype, "ufs") == 0 || strcmp(systype, "hsfs") == 0)) {
		char redirect[OBP_MAXPATHLEN];

		post_mountroot(filename, redirect);

		/*
		 * If we return at all, it's because we discovered
		 * a redirection file - the 'redirect' string now contains
		 * the name of the disk slice we should be looking at.
		 *
		 * Unmount the filesystem, tweak the boot path and retry
		 * the whole operation one more time.
		 */
		closeall(1);
		once = B_TRUE;
		redirect_boot_path(bpath, redirect);
		if (verbosemode)
			printf("%sboot: using '%s'\n", systype, bpath);

		goto loop;
		/*NOTREACHED*/
	}

	return (0);
}
Ejemplo n.º 7
0
int
set_bcache(fileid_t *fp)
{
	/*
	 *  Insert Disk Block Cache Entry:
	 *
	 *  In this case, we actually read the requested block into a
	 *  dynamically allocated buffer before inserting it into the
	 *  cache.  If the read fails, we return a non-zero value.
	 *
	 *  The search keys for disk blocks are the block number and
	 *  buffer size.  The data associated with each entry is the
	 *  corresponding data buffer.
	 */
	bc_t *bcp;

	if (fp->fi_memp = bkmem_alloc(x_len = fp->fi_count)) {
		/*
		 *  We were able to succesffully allocate an input
		 *  buffer, now read the data into it.
		 */
		if (diskread(fp) != 0) {
			/*
			 * I/O error on read. Free the input buffer,
			 * print an error message, and bail out.
			 */
			bkmem_free(fp->fi_memp, x_len);
			printf("disk read error\n");
			return (-1);
		}

		x_blkno = fp->fi_blocknum;
		x_dev = fp->fi_devp->di_dcookie;
		bcp = (bc_t *)
			set_cache(&bc_hash[BC_HASH(x_dev, x_blkno, x_len)],
								&bc_head, 0);
		bcp->bc_blk = x_blkno;
		bcp->bc_hdr.dev = x_dev;
		bcp->bc_hdr.size = x_len;
		bcp->bc_hdr.data = (void *)fp->fi_memp;

	} else {
		/*
		 * We could be a bit more convervative here by
		 * calling "set_cache" before we try to allocate a
		 * buffer (thereby giving us a chance to re-use a
		 * previously allocated buffer) but the error recovery
		 * is a bit trickier, and if we're that short on memory
		 * we'll have trouble elsewhere anyway!
		 */
		prom_panic("can't read - no memory");
	}

	return (0);
}
Ejemplo n.º 8
0
/*
 * Compare the version of boot that boot says it is against
 * the version of boot the kernel expects.
 */
int
check_boot_version(int boots_version)
{
	if (boots_version == BO_VERSION)
		return (0);

	prom_printf("Wrong boot interface - kernel needs v%d found v%d\n",
	    BO_VERSION, boots_version);
	prom_panic("halting");
	/*NOTREACHED*/
}
Ejemplo n.º 9
0
/*
 * Initializes our "clock" to the creation date of /timestamp, which is
 * made on the fly for us by the web server. Thereafter, time() will keep
 * time sort of up to date.
 */
void
init_boot_time(void)
{
	struct stat sb;

	if (start_time == 0) {
		if (stat("/timestamp", &sb) < 0)
			prom_panic("init_boot_time: cannot stat /timestamp");

		start_time = sb.st_ctim.tv_sec;
		secs_since_boot = prom_gettime() / 1000;
	}
}
Ejemplo n.º 10
0
void *
bkmem_alloc(size_t s)
{
	static caddr_t next;
	caddr_t ret;

	if (next == NULL)
		next = (caddr_t)roundup((uintptr_t)&_end, MINALLOC);
	ret = next;
	next += roundup(s, MINALLOC);
	if (next >= TOPMEM)
		prom_panic("out of memory");
	return (ret);
}
Ejemplo n.º 11
0
/*
 *	This routine will find the next PAGESIZE chunk in the
 *	low MAPPEDMEM_MINTOP.  It is analogous to valloc(). It is only for boot
 *	scratch memory, because child scratch memory goes up in
 *	the the high memory.  We just need to verify that the
 *	pages are on the list.  The calling routine will actually
 *	remove them.
 */
static caddr_t
get_low_vpage(size_t numpages, enum RESOURCES type)
{
	size_t bytes;
	caddr_t v;

	if (!numpages)
		return (0);

	/* We know the page is mapped because the 1st MAPPEDMEM_MINTOP is 1:1 */
	bytes = numpages * pagesize;
	if (scratchmemp + bytes <= top_bootmem) {
		v = scratchmemp;
		scratchmemp += bytes;
		return (v);
	}

	/*
	 * If we run out of scratch memory, look in the freelist
	 */
	if ((v = vpage_from_freelist(bytes)) != NULL)
		return (v);

	/*
	 * Try really hard for allocations that can't fail.  Look in the area
	 * that we've reserved for them.
	 */
	if (type == RES_BOOTSCRATCH_NOFAIL) {
		if (scratchresvp + bytes <= top_resvmem) {
			v = scratchresvp;
			scratchresvp += bytes;
			dprintf("using %lu bytes of reserved mem (%lu left)\n",
			    bytes, top_resvmem - scratchresvp);
			return (v);
		} else {
			printf("boot: failed to allocate %lu bytes from "
			    "reserved scratch memory\n", bytes);
			prom_panic("boot: scratch memory overflow.\n");
		}
	}

	return (NULL);
}
Ejemplo n.º 12
0
/*
 * Get pages from boot and hash them into the kernel's vp.
 * Used after page structs have been allocated, but before segkmem is ready.
 */
void *
boot_alloc(void *inaddr, size_t size, uint_t align)
{
	caddr_t addr = inaddr;

	if (bootops == NULL)
		prom_panic("boot_alloc: attempt to allocate memory after "
		    "BOP_GONE");

	size = ptob(btopr(size));
#ifdef __sparc
	if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
		panic("boot_alloc: bop_alloc_chunk failed");
#else
	if (BOP_ALLOC(bootops, addr, size, align) != addr)
		panic("boot_alloc: BOP_ALLOC failed");
#endif
	boot_mapin((caddr_t)addr, size);
	return (addr);
}
Ejemplo n.º 13
0
int
set_rdcache(int dev, char *name, int pnum, int inum)
{
	/*
	 * Reliably set the dcache
	 *
	 * This routine is the same as set_dcache except that it
	 * return 1 if the entry could not be entered into
	 * the cache without a purge.
	 */
	int len = strlen(name) + 1;
	dc_t *dcp =
		(dc_t *)set_cache(&dc_hash[DC_HASH(dev, name, len)],
								&dc_head, 1);

	if (dcp == NULL)
		return (1);

	if ((dcp->dc_hdr.data = (void *)bkmem_alloc(len)) == NULL) {
		/*
		 * Not enough memory to make a copy of the name!
		 * There's probably not enough to do much else either!
		 */
		prom_panic("no memory for directory cache");
		/* NOTREACHED */
	}

	/*
	 * Allocate a buffer for the pathname component, and
	 * make this the "data" portion of the generalize
	 * "cache_t" struct. Also fill in the cache-specific
	 * fields (pnum, inum).
	 */
	dcp->dc_pnum = pnum;
	dcp->dc_inum = inum;
	dcp->dc_hdr.dev = dev;
	dcp->dc_hdr.size = len;
	bcopy(name, (char *)dcp->dc_hdr.data, len);

	return (0);
}
Ejemplo n.º 14
0
void
set_dcache(int dev, char *name, int pnum, int inum)
{
	/*
	 *  Build Directory Cache Entry:
	 *
	 *  This routine creates directory cache entries to be retrieved later
	 *  via "get_dcache".  The cache key is composed of three parts: The
	 *  device specifier, the file name ("name"), and the file number of
	 *  the directory containing that name ("pnum").  The data portion of
	 *  the entry consists of the file number ("inum").
	 */

	int len = strlen(name)+1;
	dc_t *dcp =
	    (dc_t *)set_cache(&dc_hash[DC_HASH(dev, name, len)], &dc_head, 0);

	if (dcp->dc_hdr.data = (void *)bkmem_alloc(len)) {
		/*
		 * Allocate a buffer for the pathname component, and
		 * make this the "data" portion of the generalize
		 * "cache_t" struct. Also fill in the cache-specific
		 * fields (pnum, inum).
		 */
		dcp->dc_pnum = pnum;
		dcp->dc_inum = inum;
		dcp->dc_hdr.dev = dev;
		dcp->dc_hdr.size = len;
		bcopy(name, (char *)dcp->dc_hdr.data, len);

	} else {
		/*
		 * Not enough memory to make a copy of the name!
		 * There's probably not enough to do much else either!
		 */
		prom_panic("no memory for directory cache");
	}
}
Ejemplo n.º 15
0
/*
 * This routine remaps the kernel using large ttes
 * All entries except locked ones will be removed from the tlb.
 * It assumes that both the text and data segments reside in a separate
 * 4mb virtual and physical contigous memory chunk.  This routine
 * is only executed by the first cpu.  The remaining cpus execute
 * sfmmu_mp_startup() instead.
 * XXX It assumes that the start of the text segment is KERNELBASE.  It should
 * actually be based on start.
 */
void
sfmmu_remap_kernel(void)
{
	pfn_t	pfn;
	uint_t	attr;
	int	flags;

	extern char end[];
	extern struct as kas;

	textva = (caddr_t)(KERNELBASE & MMU_PAGEMASK4M);
	pfn = va_to_pfn(textva);
	if (pfn == PFN_INVALID)
		prom_panic("can't find kernel text pfn");
	pfn &= TTE_PFNMASK(TTE4M);

	attr = PROC_TEXT | HAT_NOSYNC;
	flags = HAT_LOAD_LOCK | SFMMU_NO_TSBLOAD;
	sfmmu_memtte(&ktext_tte, pfn, attr, TTE4M);
	/*
	 * We set the lock bit in the tte to lock the translation in
	 * the tlb.
	 */
	TTE_SET_LOCKED(&ktext_tte);
	sfmmu_tteload(kas.a_hat, &ktext_tte, textva, NULL, flags);

	datava = (caddr_t)((uintptr_t)end & MMU_PAGEMASK4M);
	pfn = va_to_pfn(datava);
	if (pfn == PFN_INVALID)
		prom_panic("can't find kernel data pfn");
	pfn &= TTE_PFNMASK(TTE4M);

	attr = PROC_DATA | HAT_NOSYNC;
	sfmmu_memtte(&kdata_tte, pfn, attr, TTE4M);
	/*
	 * We set the lock bit in the tte to lock the translation in
	 * the tlb.  We also set the mod bit to avoid taking dirty bit
	 * traps on kernel data.
	 */
	TTE_SET_LOCKED(&kdata_tte);
	TTE_SET_LOFLAGS(&kdata_tte, 0, TTE_HWWR_INT);
	sfmmu_tteload(kas.a_hat, &kdata_tte, datava,
	    (struct page *)NULL, flags);

	/*
	 * create bigktsb ttes if necessary.
	 */
	if (enable_bigktsb) {
		int i = 0;
		caddr_t va = ktsb_base;
		size_t tsbsz = ktsb_sz;
		tte_t tte;

		ASSERT(va >= datava + MMU_PAGESIZE4M);
		ASSERT(tsbsz >= MMU_PAGESIZE4M);
		ASSERT(IS_P2ALIGNED(tsbsz, tsbsz));
		ASSERT(IS_P2ALIGNED(va, tsbsz));
		attr = PROC_DATA | HAT_NOSYNC;
		while (tsbsz != 0) {
			ASSERT(i < MAX_BIGKTSB_TTES);
			pfn = va_to_pfn(va);
			ASSERT(pfn != PFN_INVALID);
			ASSERT((pfn & ~TTE_PFNMASK(TTE4M)) == 0);
			sfmmu_memtte(&tte, pfn, attr, TTE4M);
			ASSERT(TTE_IS_MOD(&tte));
			/*
			 * No need to lock if we use physical addresses.
			 * Since we invalidate the kernel TSB using virtual
			 * addresses, it's an optimization to load them now
			 * so that we won't have to load them later.
			 */
			if (!ktsb_phys) {
				TTE_SET_LOCKED(&tte);
			}
			sfmmu_tteload(kas.a_hat, &tte, va, NULL, flags);
			bigktsb_ttes[i] = tte;
			va += MMU_PAGESIZE4M;
			tsbsz -= MMU_PAGESIZE4M;
			i++;
		}
		bigktsb_nttes = i;
	}

	sfmmu_set_tlb();
}
Ejemplo n.º 16
0
/*
 * Handle a IP datagram addressed to our MAC address or to the link
 * layer broadcast address. Also respond to ARP requests. Generates
 * inetgrams as long as there's data and the mac level IP timeout timer
 * hasn't expired. As soon as there is no data, we try for
 * IBD_INPUT_ATTEMPTS for more, then exit the loop, even if there is time
 * left, since we expect to have data waiting for us when we're called, we just
 * don't know how much.
 *
 * We workaround slow proms (some proms have hard sleeps for as much as 3msec)
 * even though there are is data waiting.
 *
 * Returns the total number of MEDIA_LVL frames placed on the socket.
 * Caller is expected to free up the inetgram resources.
 */
static int
ibd_input(int index)
{
	struct inetgram		*inp;
	ipoib_ptxhdr_t		*eh;
	int		frames = 0;	/* successful frames */
	int		attempts = 0;	/* failed attempts after success */
	int16_t		len = 0, data_len;
	uint32_t	timeout, reltime;
	uint32_t	pre_pr, post_pr; /* prom_read interval */

#ifdef	DEBUG
	int		failures = 0;		/* total failures */
	int		total_attempts = 0;	/* total prom_read */
	int		no_data = 0;		/* no data in prom */
	int		arps = 0;		/* arp requests processed */
	uint32_t	tot_pr = 0;		/* prom_read time */
	uint32_t	tot_pc = 0;		/* inetgram creation time */
	uint32_t	pre_pc;
	uint32_t	now;
#endif	/* DEBUG */

	if (!initialized)
		prom_panic("IPoIB device is not initialized.");

	if ((reltime = sockets[index].in_timeout) == 0)
		reltime = mac_state.mac_in_timeout;
	timeout = prom_gettime() + reltime;

	do {
		if (frames > IBD_MAX_FRAMES) {
			/* someone is trying a denial of service attack */
			break;
		}

		/*
		 * The following is being paranoid about possible bugs
		 * where prom_read() returns a nonzero length, even when
		 * it's not read a packet; it zeroes out the header to
		 * compensate. Paranoia from calvin prom (V2) days.
		 */
		bzero(mac_state.mac_buf, sizeof (ipoib_ptxhdr_t));

		/*
		 * Prom_read() will return 0 or -2 if no data is present. A
		 * return value of -1 means an error has occurred. We adjust
		 * the timeout by calling the time spent in prom_read() "free".
		 * prom_read() returns the number of bytes actually read, but
		 * will only copy "len" bytes into our buffer. Adjust in
		 * case the MTU is wrong.
		 */
		pre_pr = prom_gettime();
		len = prom_read(mac_state.mac_dev, mac_state.mac_buf,
		    mac_state.mac_mtu, 0, NETWORK);
		post_pr = prom_gettime();
		timeout += (post_pr - pre_pr);
#ifdef	DEBUG
		tot_pr += (post_pr - pre_pr);
		total_attempts++;
#endif	/* DEBUG */

		if (len > mac_state.mac_mtu) {
			dprintf("ibd_input: adjusting MTU %d -> %d\n",
			    mac_state.mac_mtu, len);
			bkmem_free(mac_state.mac_buf, mac_state.mac_mtu);
			mac_state.mac_mtu = len;
			mac_state.mac_buf = bkmem_alloc(mac_state.mac_mtu);
			if (mac_state.mac_buf == NULL) {
				prom_panic("ibd_input: Cannot reallocate "
				    "netbuf memory.");
			}
			len = 0; /* pretend there was no data */
		}

		if (len == -1) {
#ifdef	DEBUG
			failures++;
#endif	/* DEBUG */
			break;
		}
		if (len == 0 || len == -2) {
			if (frames != 0)
				attempts++;
#ifdef	DEBUG
			no_data++;
#endif	/* DEBUG */
			continue;
		}

		eh = (ipoib_ptxhdr_t *)mac_state.mac_buf;
		if (eh->ipoib_rhdr.ipoib_type == ntohs(ETHERTYPE_IP) &&
		    len >= (sizeof (ipoib_ptxhdr_t) + sizeof (struct ip))) {

			int offset;
#ifdef	DEBUG
			pre_pc = prom_gettime();
#endif	/* DEBUG */

			inp = (struct inetgram *)bkmem_zalloc(
			    sizeof (struct inetgram));
			if (inp == NULL) {
				errno = ENOMEM;
				return (frames == 0 ? -1 : frames);
			}
			offset = sizeof (ipoib_ptxhdr_t);
			data_len = len - offset;
			inp->igm_mp = allocb(data_len, 0);
			if (inp->igm_mp == NULL) {
				errno = ENOMEM;
				bkmem_free((caddr_t)inp,
				    sizeof (struct inetgram));
				return (frames == 0 ? -1 : frames);
			}
			bcopy((caddr_t)(mac_state.mac_buf + offset),
			    inp->igm_mp->b_rptr, data_len);
			inp->igm_mp->b_wptr += data_len;
			inp->igm_level = NETWORK_LVL;
			add_grams(&sockets[index].inq, inp);
			frames++;
			attempts = 0;
#ifdef	DEBUG
			tot_pc += prom_gettime() - pre_pc;
#endif	/* DEBUG */
			continue;
		}

		if (eh->ipoib_rhdr.ipoib_type == ntohs(ETHERTYPE_ARP) &&
		    len >= sizeof (struct arp_packet)) {

			struct in_addr		ip;
			struct ibd_arp		*ea;

#ifdef	DEBUG
			printf("ibd_input: ARP message received\n");
			arps++;
#endif	/* DEBUG */

			ea = (struct ibd_arp *)(mac_state.mac_buf +
			    sizeof (ipoib_ptxhdr_t));
			if (ea->arp_pro != ntohs(ETHERTYPE_IP))
				continue;

			ipv4_getipaddr(&ip);
			ip.s_addr = ntohl(ip.s_addr);

			if (ea->arp_op == ntohs(ARPOP_REQUEST) &&
			    ip.s_addr != INADDR_ANY &&
			    (bcmp((caddr_t)ea->arp_tpa, (caddr_t)&ip,
			    sizeof (struct in_addr)) == 0)) {
				ea->arp_op = htons(ARPOP_REPLY);
				bcopy((caddr_t)&ea->arp_sha,
				    (caddr_t)&eh->ipoib_dest, IPOIB_ADDRL);
				bcopy((caddr_t)&ea->arp_sha,
				    (caddr_t)&ea->arp_tha, IPOIB_ADDRL);
				bcopy((caddr_t)ea->arp_spa,
				    (caddr_t)ea->arp_tpa,
				    sizeof (struct in_addr));
				bcopy(mac_state.mac_addr_buf,
				    (caddr_t)&ea->arp_sha,
				    mac_state.mac_addr_len);
				bcopy((caddr_t)&ip, (caddr_t)ea->arp_spa,
				    sizeof (struct in_addr));
				(void) prom_write(mac_state.mac_dev,
				    mac_state.mac_buf,
				    sizeof (struct arp_packet), 0, NETWORK);
				/* don't charge for ARP replies */
				timeout += reltime;
			}
		}
	} while (attempts < IBD_INPUT_ATTEMPTS &&
#ifdef	DEBUG
	    (now = prom_gettime()) < timeout);
#else
	    prom_gettime() < timeout);
#endif	/* DEBUG */

#ifdef	DEBUG
	printf("ibd_input(%d): T/S/N/A/F/P/M: %d/%d/%d/%d/%d/%d/%d "
	    "T/O: %d < %d = %s\n", index, total_attempts, frames, no_data,
	    arps, failures, tot_pr, tot_pc, now, timeout,
	    (now < timeout) ? "TRUE" : "FALSE");
#endif	/* DEBUG */
	return (frames);
}
Ejemplo n.º 17
0
/*
 * Reverse ARP client side
 * Determine our Internet address given our MAC address
 * See RFC 903
 */
static void
ibd_revarp(void)
{
	prom_panic("IPoIB can not boot with RARP.");
}
Ejemplo n.º 18
0
static int
amd64_config_cpu(void)
{
	struct amd64_cpuid_regs __vcr, *vcr = &__vcr;
	uint32_t maxeax;
	uint32_t max_maxeax = 0x100;
	char vendor[13];
	int isamd64 = 0;
	uint32_t stdfeatures = 0, xtdfeatures = 0;
	uint64_t efer;

	/*
	 * This check may seem silly, but if the C preprocesor symbol __amd64
	 * is #defined during compilation, something that may outwardly seem
	 * like a good idea, uts/common/sys/isa_defs.h will #define _LP64,
	 * which will cause uts/common/sys/int_types.h to typedef uint64_t as
	 * an unsigned long - which is only 4 bytes in size when using a 32-bit
	 * compiler.
	 *
	 * If that happens, all the page table translation routines will fail
	 * horribly, so check the size of uint64_t just to insure some degree
	 * of sanity in future operations.
	 */
	/*LINTED [sizeof result is invarient]*/
	if (sizeof (uint64_t) != 8)
		prom_panic("grub compiled improperly, unable to boot "
		    "64-bit AMD64 executables");

	/*
	 * If the CPU doesn't support the CPUID instruction, it's definitely
	 * not an AMD64.
	 */
	if (amd64_cpuid_supported() == 0)
		return (0);

	amd64_cpuid_insn(0, vcr);

	maxeax = vcr->r_eax;
	{
		/*LINTED [vendor string from cpuid data]*/
		uint32_t *iptr = (uint32_t *)vendor;

		*iptr++ = vcr->r_ebx;
		*iptr++ = vcr->r_edx;
		*iptr++ = vcr->r_ecx;

		vendor[12] = '\0';
	}

	if (maxeax > max_maxeax) {
		grub_printf("cpu: warning, maxeax was 0x%x -> 0x%x\n",
		    maxeax, max_maxeax);
		maxeax = max_maxeax;
	}

	if (maxeax < 1)
		return (0);	/* no additional functions, not an AMD64 */
	else {
		uint_t family, model, step;

		amd64_cpuid_insn(1, vcr);

		/*
		 * All AMD64/IA32e processors technically SHOULD report
		 * themselves as being in family 0xf, but for some reason
		 * Simics doesn't, and this may change in the future, so
		 * don't error out if it's not true.
		 */
		if ((family = BITX(vcr->r_eax, 11, 8)) == 0xf)
			family += BITX(vcr->r_eax, 27, 20);

		if ((model = BITX(vcr->r_eax, 7, 4)) == 0xf)
			model += BITX(vcr->r_eax, 19, 16) << 4;
		step = BITX(vcr->r_eax, 3, 0);

		grub_printf("cpu: '%s' family %d model %d step %d\n",
		    vendor, family, model, step);
		stdfeatures = vcr->r_edx;
	}

	amd64_cpuid_insn(0x80000000, vcr);

	if (vcr->r_eax & 0x80000000) {
		uint32_t xmaxeax = vcr->r_eax;
		const uint32_t max_xmaxeax = 0x80000100;

		if (xmaxeax > max_xmaxeax) {
			grub_printf("amd64: warning, xmaxeax was "
			    "0x%x -> 0x%x\n", xmaxeax, max_xmaxeax);
			xmaxeax = max_xmaxeax;
		}

		if (xmaxeax >= 0x80000001) {
			amd64_cpuid_insn(0x80000001, vcr);
			xtdfeatures = vcr->r_edx;
		}
	}

	if (BITX(xtdfeatures, 29, 29))		/* long mode */
		isamd64++;
	else
		grub_printf("amd64: CPU does NOT support long mode\n");

	if (!BITX(stdfeatures, 0, 0)) {
		grub_printf("amd64: CPU does NOT support FPU\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 4, 4)) {
		grub_printf("amd64: CPU does NOT support TSC\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 5, 5)) {
		grub_printf("amd64: CPU does NOT support MSRs\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 6, 6)) {
		grub_printf("amd64: CPU does NOT support PAE\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 8, 8)) {
		grub_printf("amd64: CPU does NOT support CX8\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 13, 13)) {
		grub_printf("amd64: CPU does NOT support PGE\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 19, 19)) {
		grub_printf("amd64: CPU does NOT support CLFSH\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 23, 23)) {
		grub_printf("amd64: CPU does NOT support MMX\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 24, 24)) {
		grub_printf("amd64: CPU does NOT support FXSR\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 25, 25)) {
		grub_printf("amd64: CPU does NOT support SSE\n");
		isamd64--;
	}

	if (!BITX(stdfeatures, 26, 26)) {
		grub_printf("amd64: CPU does NOT support SSE2\n");
		isamd64--;
	}

	if (isamd64 < 1) {
		grub_printf("amd64: CPU does not support amd64 executables.\n");
		return (0);
	}

	amd64_rdmsr(MSR_AMD_EFER, &efer);
	if (efer & AMD_EFER_SCE)
		grub_printf("amd64: EFER_SCE (syscall/sysret) already "
		    "enabled\n");
	if (efer & AMD_EFER_NXE)
		grub_printf("amd64: EFER_NXE (no-exec prot) already enabled\n");
	if (efer & AMD_EFER_LME)
		grub_printf("amd64: EFER_LME (long mode) already enabled\n");

	return (detect_target_operating_mode());
}
Ejemplo n.º 19
0
/*
 * Standalone's approximation of abort().
 */
void
abort(void)
{
	prom_panic("fatal error; aborting");
}
Ejemplo n.º 20
0
/* ARGSUSED */
static int
ibd_output(int index, struct inetgram *ogp)
{
	int			header_len, result;
	ipoib_ptxhdr_t		eh;
	struct ip		*ip;
	struct in_addr		tmpip, ipdst;
	int			broadcast = FALSE;
	int			size;
	mblk_t			*mp;

	if (!initialized)
		prom_panic("IPoIB device is not initialized.");

	if (ogp->igm_level != MEDIA_LVL) {
		dprintf("ibd_output: frame type wrong: socket: %d\n",
		    index * SOCKETTYPE);
		errno = EINVAL;
		return (-1);
	}

	header_len = IPOIB_HDRSIZE + IPOIB_ADDRL;
	mp = ogp->igm_mp;
	size = mp->b_wptr - mp->b_rptr;
	if (size > (mac_state.mac_mtu - IPOIB_ADDRL)) {
		dprintf("ibd_output: frame size too big: %d\n", size);
		errno = E2BIG;
		return (-1);
	}

	size += header_len;
	ip = (struct ip *)(mp->b_rptr);

	eh.ipoib_rhdr.ipoib_type = htons(ETHERTYPE_IP);
	eh.ipoib_rhdr.ipoib_mbz = 0;
	bcopy((caddr_t)&ip->ip_dst, (caddr_t)&ipdst, sizeof (ipdst));

	if (ipdst.s_addr == htonl(INADDR_BROADCAST))
		broadcast = TRUE; /* limited broadcast */

	if (!broadcast) {
		struct in_addr mask;

		ipv4_getnetmask(&mask);
		mask.s_addr = htonl(mask.s_addr);
		if (mask.s_addr != htonl(INADDR_BROADCAST) &&
		    (ipdst.s_addr & ~mask.s_addr) == 0) {
			broadcast = TRUE; /* directed broadcast */
		} else {
			if (ogp->igm_router.s_addr != htonl(INADDR_ANY))
				tmpip.s_addr = ogp->igm_router.s_addr;
			else
				tmpip.s_addr = ipdst.s_addr;

			result = mac_get_arp(&tmpip, (void *)&eh.ipoib_dest,
			    IPOIB_ADDRL, mac_state.mac_arp_timeout);
			if (!result) {
				errno = ETIMEDOUT;
				dprintf("ibd_output: ARP request for %s "
				    "timed out.\n", inet_ntoa(tmpip));
				return (-1);
			}
		}
	}

	if (broadcast)
		bcopy((caddr_t)&ibdbroadcastaddr, (caddr_t)&eh.ipoib_dest,
		    IPOIB_ADDRL);

	/* add the ibd header */
	mp->b_rptr -= sizeof (eh);
	bcopy((caddr_t)&eh, mp->b_rptr, sizeof (eh));

#ifdef	DEBUG
	printf("ibd_output(%d): level(%d) frame(0x%x) len(%d)\n",
	    index, ogp->igm_level, mp->b_rptr, size);
#endif	/* DEBUG */

	return (prom_write(mac_state.mac_dev, (char *)mp->b_rptr, size,
	    0, NETWORK));
}