예제 #1
0
static int
sbsh_attach(device_t dev)
{
	struct sbsh_softc	*sc;
	struct ifnet		*ifp;
	int			unit, error = 0, rid;

	sc = device_get_softc(dev);
	unit = device_get_unit(dev);

	rid = PCIR_MAPS + 4;
	sc->mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
					0, ~0, 4096, RF_ACTIVE);

	if (sc->mem_res == NULL) {
		kprintf ("sbsh%d: couldn't map memory\n", unit);
		error = ENXIO;
		goto fail;
	}

	rid = 0;
	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);

	if (sc->irq_res == NULL) {
		kprintf("sbsh%d: couldn't map interrupt\n", unit);
		error = ENXIO;
		goto fail;
	}

	sc->mem_base = rman_get_virtual(sc->mem_res);
	init_card(sc);
	/* generate ethernet MAC address */
	*(u_int32_t *)sc->arpcom.ac_enaddr = htonl(0x00ff0192);
	read_random_unlimited(sc->arpcom.ac_enaddr + 4, 2);

	ifp = &sc->arpcom.ac_if;
	ifp->if_softc = sc;
	if_initname(ifp, "sbsh", unit);
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = sbsh_ioctl;
	ifp->if_start = sbsh_start;
	ifp->if_watchdog = sbsh_watchdog;
	ifp->if_init = sbsh_init;
	ifp->if_baudrate = 4600000;
	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
	ifq_set_ready(&ifp->if_snd);

	ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL);

	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->irq_res));

	error = bus_setup_intr(dev, sc->irq_res, INTR_MPSAFE,
				sbsh_intr, sc, &sc->intr_hand, 
				ifp->if_serializer);
	if (error) {
		ether_ifdetach(ifp);
		kprintf("sbsh%d: couldn't set up irq\n", unit);
		goto fail;
	}

	return(0);

fail:
	sbsh_detach(dev);
	return (error);
}
static int
mmrw(cdev_t dev, struct uio *uio, int flags)
{
	int o;
	u_int c;
	u_int poolsize;
	u_long v;
	struct iovec *iov;
	int error = 0;
	caddr_t buf = NULL;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {
		case 0:
			/*
			 * minor device 0 is physical memory, /dev/mem 
			 */
			v = uio->uio_offset;
			v &= ~(long)PAGE_MASK;
			pmap_kenter((vm_offset_t)ptvmmap, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
			pmap_kremove((vm_offset_t)ptvmmap);
			continue;

		case 1: {
			/*
			 * minor device 1 is kernel memory, /dev/kmem 
			 */
			vm_offset_t saddr, eaddr;
			int prot;

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently 
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			saddr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);
			if (saddr > eaddr)
				return EFAULT;

			/*
			 * Make sure the kernel addresses are mapped.
			 * platform_direct_mapped() can be used to bypass
			 * default mapping via the page table (virtual kernels
			 * contain a lot of out-of-band data).
			 */
			prot = VM_PROT_READ;
			if (uio->uio_rw != UIO_READ)
				prot |= VM_PROT_WRITE;
			error = kvm_access_check(saddr, eaddr, prot);
			if (error)
				return (error);
			error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
					(int)c, uio);
			continue;
		}
		case 2:
			/*
			 * minor device 2 (/dev/null) is EOF/RATHOLE
			 */
			if (uio->uio_rw == UIO_READ)
				return (0);
			c = iov->iov_len;
			break;
		case 3:
			/*
			 * minor device 3 (/dev/random) is source of filth
			 * on read, seeder on write
			 */
			if (buf == NULL)
				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
			c = min(iov->iov_len, PAGE_SIZE);
			if (uio->uio_rw == UIO_WRITE) {
				error = uiomove(buf, (int)c, uio);
				if (error == 0 &&
				    seedenable &&
				    securelevel <= 0) {
					error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING);
				} else if (error == 0) {
					error = EPERM;
				}
			} else {
				poolsize = read_random(buf, c);
				if (poolsize == 0) {
					if (buf)
						kfree(buf, M_TEMP);
					if ((flags & IO_NDELAY) != 0)
						return (EWOULDBLOCK);
					return (0);
				}
				c = min(c, poolsize);
				error = uiomove(buf, (int)c, uio);
			}
			continue;
		case 4:
			/*
			 * minor device 4 (/dev/urandom) is source of muck
			 * on read, writes are disallowed.
			 */
			c = min(iov->iov_len, PAGE_SIZE);
			if (uio->uio_rw == UIO_WRITE) {
				error = EPERM;
				break;
			}
			if (CURSIG(curthread->td_lwp) != 0) {
				/*
				 * Use tsleep() to get the error code right.
				 * It should return immediately.
				 */
				error = tsleep(&rand_bolt, PCATCH, "urand", 1);
				if (error != 0 && error != EWOULDBLOCK)
					continue;
			}
			if (buf == NULL)
				buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
			poolsize = read_random_unlimited(buf, c);
			c = min(c, poolsize);
			error = uiomove(buf, (int)c, uio);
			continue;
		case 12:
			/*
			 * minor device 12 (/dev/zero) is source of nulls 
			 * on read, write are disallowed.
			 */
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			if (zbuf == NULL) {
				zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
				    M_WAITOK | M_ZERO);
			}
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zbuf, (int)c, uio);
			continue;
		default:
			return (ENODEV);
		}
		if (error)
			break;
		iov->iov_base = (char *)iov->iov_base + c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	if (buf)
		kfree(buf, M_TEMP);
	return (error);
}