コード例 #1
0
ファイル: machine.c プロジェクト: JackieXie168/xnu
kern_return_t
processor_shutdown(
	processor_t			processor)
{
	processor_set_t		pset;
	spl_t				s;

	s = splsched();
	pset = processor->processor_set;
	pset_lock(pset);
	if (processor->state == PROCESSOR_OFF_LINE) {
		/*
		 * Success if already shutdown.
		 */
		pset_unlock(pset);
		splx(s);

		return (KERN_SUCCESS);
	}

	if (processor->state == PROCESSOR_START) {
		/*
		 * Failure if currently being started.
		 */
		pset_unlock(pset);
		splx(s);

		return (KERN_FAILURE);
	}

	/*
	 * If the processor is dispatching, let it finish.
	 */
	while (processor->state == PROCESSOR_DISPATCHING) {
		pset_unlock(pset);
		splx(s);
		delay(1);
		s = splsched();
		pset_lock(pset);
	}

	/*
	 * Success if already being shutdown.
	 */
	if (processor->state == PROCESSOR_SHUTDOWN) {
		pset_unlock(pset);
		splx(s);

		return (KERN_SUCCESS);
	}

	if (processor->state == PROCESSOR_IDLE)
		remqueue((queue_entry_t)processor);
	else
	if (processor->state == PROCESSOR_RUNNING)
		remqueue((queue_entry_t)processor);

	processor->state = PROCESSOR_SHUTDOWN;

	pset_unlock(pset);

	processor_doshutdown(processor);
	splx(s);

	cpu_exit_wait(processor->cpu_id);

	return (KERN_SUCCESS);
}
コード例 #2
0
ファイル: rccide.c プロジェクト: eyberg/rumpkernel-netbsd-src
static void
serverworks_setup_channel(struct ata_channel *chp)
{
	struct ata_drive_datas *drvp;
	struct atac_softc *atac = chp->ch_atac;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
	int channel = chp->ch_channel;
	int drive, unit, s;
	u_int32_t pio_time, dma_time, pio_mode, udma_mode;
	u_int32_t idedma_ctl;
	static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
	static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};

	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
	dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
	pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
	udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);

	pio_time &= ~(0xffff << (16 * channel));
	dma_time &= ~(0xffff << (16 * channel));
	pio_mode &= ~(0xff << (8 * channel + 16));
	udma_mode &= ~(0xff << (8 * channel + 16));
	udma_mode &= ~(3 << (2 * channel));

	idedma_ctl = 0;

	/* Per drive settings */
	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		unit = drive + 2 * channel;
		/* add timing values, setup DMA if needed */
		pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
		pio_mode |= drvp->PIO_mode << (4 * unit + 16);
		if ((atac->atac_cap & ATAC_CAP_UDMA) &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA)) {
			/* use Ultra/DMA, check for 80-pin cable */
			if (drvp->UDMA_mode > 2 &&
			    (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
			    			       PCI_SUBSYS_ID_REG))
			     & (1 << (14 + channel))) == 0)
				drvp->UDMA_mode = 2;
			dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
			udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
			udma_mode |= 1 << unit;
			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
		} else if ((atac->atac_cap & ATAC_CAP_DMA) &&
		    (drvp->drive_flags & ATA_DRIVE_DMA)) {
			/* use Multiword DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
			splx(s);
			dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
		} else {
			/* PIO only */
			s = splbio();
			drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA);
			splx(s);
		}
	}

	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
	if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
		pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
	pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);

	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
}
コード例 #3
0
static int
j720lcd_param(void *ctx, int type, long id, void *msg)
{
	struct j720lcd_softc *sc = ctx;
	struct j720ssp_softc *ssp = sc->sc_ssp;
	uint32_t data[2], len;
	const int maxval = 255;
	int i, s;

	switch (type) {
	case CONFIG_HOOK_GET:
		switch (id) {
		case CONFIG_HOOK_BRIGHTNESS_MAX:
		case CONFIG_HOOK_CONTRAST_MAX:
			*(int *)msg = maxval;
			return 1;
		case CONFIG_HOOK_BRIGHTNESS:
			data[0] = 0xd6;
			data[1] = 0x11;
			len = 2;
			break;
		case CONFIG_HOOK_CONTRAST:
			data[0] = 0xd4;
			data[1] = 0x11;
			len = 2;
			break;
		default:
			return 0;
		}
		break;

	case CONFIG_HOOK_SET:
		switch (id) {
		case CONFIG_HOOK_BRIGHTNESS:
			if (*(int *)msg >= 0) {
				data[0] = 0xd3;
				data[1] = maxval - *(int *)msg;
				len = 2;
			} else {
				/* XXX hack */
				data[0] = 0xdf;
				len = 1;
			}
			break;
		case CONFIG_HOOK_CONTRAST:
			data[0] = 0xd1;
			data[1] = maxval - *(int *)msg;
			len = 2;
			break;
		default:
			return 0;
		}
		break;

	default:
		return 0;
	}

	s = splbio();
	bus_space_write_4(ssp->sc_iot, ssp->sc_gpioh, SAGPIO_PCR, 0x2000000);

	for (i = 0; i < len; i++) {
		if (j720ssp_readwrite(ssp, 1, data[i], &data[i], 500) < 0)
			goto out;
	}
	bus_space_write_4(ssp->sc_iot, ssp->sc_gpioh, SAGPIO_PSR, 0x2000000);
	splx(s);

	if (type == CONFIG_HOOK_SET)
		return 1;

	*(int *)msg = maxval - data[1];

	return 1;

out:
	bus_space_write_4(ssp->sc_iot, ssp->sc_gpioh, SAGPIO_PSR, 0x2000000);

	/* reset SSP */
	bus_space_write_4(ssp->sc_iot, ssp->sc_ssph, SASSP_CR0, 0x307);
	delay(100);
	bus_space_write_4(ssp->sc_iot, ssp->sc_ssph, SASSP_CR0, 0x387);

	splx(s);

	DPRINTF(("j720lcd_param: error %x %x\n", data[0], data[1]));
	return 0;

}
コード例 #4
0
/*
 * Set up the system's time, given a `reasonable' time value.
 */
void
inittodr(time_t base)
{
	bool badbase = false;
	bool waszero = (base == 0);
	bool goodtime = false;
	bool badrtc = false;
	int s;
	struct timespec ts;
	struct timeval tv;

	rnd_add_data(NULL, &base, sizeof(base), 0);

	if (base < 5 * SECS_PER_COMMON_YEAR) {
		struct clock_ymdhms basedate;

		/*
		 * If base is 0, assume filesystem time is just unknown
		 * instead of preposterous. Don't bark.
		 */
		if (base != 0)
			printf("WARNING: preposterous time in file system\n");
		/* not going to use it anyway, if the chip is readable */
		basedate.dt_year = 2010;
		basedate.dt_mon = 1;
		basedate.dt_day = 1;
		basedate.dt_hour = 12;
		basedate.dt_min = 0;
		basedate.dt_sec = 0;
		base = clock_ymdhms_to_secs(&basedate);
		badbase = true;
	}

	/*
	 * Some ports need to be supplied base in order to fabricate a time_t.
	 */
	if (todr_handle)
		todr_handle->base_time = base;

	if ((todr_handle == NULL) ||
	    (todr_gettime(todr_handle, &tv) != 0) ||
	    (tv.tv_sec < (25 * SECS_PER_COMMON_YEAR))) {

		if (todr_handle != NULL)
			printf("WARNING: preposterous TOD clock time\n");
		else
			printf("WARNING: no TOD clock present\n");
		badrtc = true;
	} else {
		time_t deltat = tv.tv_sec - base;

		if (deltat < 0)
			deltat = -deltat;

		if (!badbase && deltat >= 2 * SECS_PER_DAY) {
			
			if (tv.tv_sec < base) {
				/*
				 * The clock should never go backwards
				 * relative to filesystem time.  If it
				 * does by more than the threshold,
				 * believe the filesystem.
				 */
				printf("WARNING: clock lost %" PRId64 " days\n",
				    deltat / SECS_PER_DAY);
				badrtc = true;
			} else {
				aprint_verbose("WARNING: clock gained %" PRId64
				    " days\n", deltat / SECS_PER_DAY);
				goodtime = true;
			}
		} else {
			goodtime = true;
		}

		rnd_add_data(NULL, &tv, sizeof(tv), 0);
	}

	/* if the rtc time is bad, use the filesystem time */
	if (badrtc) {
		if (badbase) {
			printf("WARNING: using default initial time\n");
		} else {
			printf("WARNING: using filesystem time\n");
		}
		tv.tv_sec = base;
		tv.tv_usec = 0;
	}

	timeset = true;

	ts.tv_sec = tv.tv_sec;
	ts.tv_nsec = tv.tv_usec * 1000;
	s = splclock();
	tc_setclock(&ts);
	splx(s);

	if (waszero || goodtime)
		return;

	printf("WARNING: CHECK AND RESET THE DATE!\n");
}
コード例 #5
0
static void
nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp,
	const char *hostname)
{
	int s;
	int adjsock;
	int maxio;
	char *p;
	char *secname;
	char *principal;

	s = splnet();

	/*
	 * Set read-only flag if requested; otherwise, clear it if this is
	 * an update.  If this is not an update, then either the read-only
	 * flag is already clear, or this is a root mount and it was set
	 * intentionally at some previous point.
	 */
	if (vfs_getopt(mp->mnt_optnew, "ro", NULL, NULL) == 0) {
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_RDONLY;
		MNT_IUNLOCK(mp);
	} else if (mp->mnt_flag & MNT_UPDATE) {
		MNT_ILOCK(mp);
		mp->mnt_flag &= ~MNT_RDONLY;
		MNT_IUNLOCK(mp);
	}

	/*
	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
	 * no sense in that context.  Also, set up appropriate retransmit
	 * and soft timeout behavior.
	 */
	if (argp->sotype == SOCK_STREAM) {
		nmp->nm_flag &= ~NFSMNT_NOCONN;
		nmp->nm_flag |= NFSMNT_DUMBTIMR;
		nmp->nm_timeo = NFS_MAXTIMEO;
		nmp->nm_retry = NFS_RETRANS_TCP;
	}

	/* Also clear RDIRPLUS if not NFSv3, it crashes some servers */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;

	/* Re-bind if rsrvd port requested and wasn't on one */
	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
		  && (argp->flags & NFSMNT_RESVPORT);
	/* Also re-bind if we're switching to/from a connected UDP socket */
	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
		    (argp->flags & NFSMNT_NOCONN));

	/* Update flags atomically.  Don't change the lock bits. */
	nmp->nm_flag = argp->flags | nmp->nm_flag;
	splx(s);

	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
		if (nmp->nm_timeo < NFS_MINTIMEO)
			nmp->nm_timeo = NFS_MINTIMEO;
		else if (nmp->nm_timeo > NFS_MAXTIMEO)
			nmp->nm_timeo = NFS_MAXTIMEO;
	}

	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
		nmp->nm_retry = argp->retrans;
		if (nmp->nm_retry > NFS_MAXREXMIT)
			nmp->nm_retry = NFS_MAXREXMIT;
	}

	if (argp->flags & NFSMNT_NFSV3) {
		if (argp->sotype == SOCK_DGRAM)
			maxio = NFS_MAXDGRAMDATA;
		else
			maxio = NFS_MAXDATA;
	} else
		maxio = NFS_V2MAXDATA;

	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
		nmp->nm_wsize = argp->wsize;
		/* Round down to multiple of blocksize */
		nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_wsize <= 0)
			nmp->nm_wsize = NFS_FABLKSIZE;
	}
	if (nmp->nm_wsize > maxio)
		nmp->nm_wsize = maxio;
	if (nmp->nm_wsize > MAXBSIZE)
		nmp->nm_wsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
		nmp->nm_rsize = argp->rsize;
		/* Round down to multiple of blocksize */
		nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_rsize <= 0)
			nmp->nm_rsize = NFS_FABLKSIZE;
	}
	if (nmp->nm_rsize > maxio)
		nmp->nm_rsize = maxio;
	if (nmp->nm_rsize > MAXBSIZE)
		nmp->nm_rsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
		nmp->nm_readdirsize = argp->readdirsize;
	}
	if (nmp->nm_readdirsize > maxio)
		nmp->nm_readdirsize = maxio;
	if (nmp->nm_readdirsize > nmp->nm_rsize)
		nmp->nm_readdirsize = nmp->nm_rsize;

	if ((argp->flags & NFSMNT_ACREGMIN) && argp->acregmin >= 0)
		nmp->nm_acregmin = argp->acregmin;
	else
		nmp->nm_acregmin = NFS_MINATTRTIMO;
	if ((argp->flags & NFSMNT_ACREGMAX) && argp->acregmax >= 0)
		nmp->nm_acregmax = argp->acregmax;
	else
		nmp->nm_acregmax = NFS_MAXATTRTIMO;
	if ((argp->flags & NFSMNT_ACDIRMIN) && argp->acdirmin >= 0)
		nmp->nm_acdirmin = argp->acdirmin;
	else
		nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
	if ((argp->flags & NFSMNT_ACDIRMAX) && argp->acdirmax >= 0)
		nmp->nm_acdirmax = argp->acdirmax;
	else
		nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
	if (nmp->nm_acdirmin > nmp->nm_acdirmax)
		nmp->nm_acdirmin = nmp->nm_acdirmax;
	if (nmp->nm_acregmin > nmp->nm_acregmax)
		nmp->nm_acregmin = nmp->nm_acregmax;

	if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0) {
		if (argp->maxgrouplist <= NFS_MAXGRPS)
			nmp->nm_numgrps = argp->maxgrouplist;
		else
			nmp->nm_numgrps = NFS_MAXGRPS;
	}
	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0) {
		if (argp->readahead <= NFS_MAXRAHEAD)
			nmp->nm_readahead = argp->readahead;
		else
			nmp->nm_readahead = NFS_MAXRAHEAD;
	}
	if ((argp->flags & NFSMNT_WCOMMITSIZE) && argp->wcommitsize >= 0) {
		if (argp->wcommitsize < nmp->nm_wsize)
			nmp->nm_wcommitsize = nmp->nm_wsize;
		else
			nmp->nm_wcommitsize = argp->wcommitsize;
	}
	if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 0) {
		if (argp->deadthresh <= NFS_MAXDEADTHRESH)
			nmp->nm_deadthresh = argp->deadthresh;
		else
			nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	}

	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
		    (nmp->nm_soproto != argp->proto));
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;

	if (nmp->nm_client && adjsock) {
		nfs_safedisconnect(nmp);
		if (nmp->nm_sotype == SOCK_DGRAM)
			while (nfs_connect(nmp)) {
				printf("nfs_args: retrying connect\n");
				(void) tsleep(&fake_wchan, PSOCK, "nfscon", hz);
			}
	}

	if (hostname) {
		strlcpy(nmp->nm_hostname, hostname,
		    sizeof(nmp->nm_hostname));
		p = strchr(nmp->nm_hostname, ':');
		if (p)
			*p = '\0';
	}

	if (vfs_getopt(mp->mnt_optnew, "sec",
		(void **) &secname, NULL) == 0) {
		nmp->nm_secflavor = nfs_sec_name_to_num(secname);
	} else {
		nmp->nm_secflavor = AUTH_SYS;
	}

	if (vfs_getopt(mp->mnt_optnew, "principal",
		(void **) &principal, NULL) == 0) {
		strlcpy(nmp->nm_principal, principal,
		    sizeof(nmp->nm_principal));
	} else {
		snprintf(nmp->nm_principal, sizeof(nmp->nm_principal),
		    "nfs@%s", nmp->nm_hostname);
	}
}
コード例 #6
0
ファイル: ip6_mroute.c プロジェクト: orumin/openbsd-efivars
/*
 * Add an mfc entry
 */
int
add_m6fc(struct mf6cctl *mfccp)
{
	struct mf6c *rt;
	u_long hash;
	struct rtdetq *rte;
	u_short nstl;
	char orig[INET6_ADDRSTRLEN], mcast[INET6_ADDRSTRLEN];
	int s;

	MF6CFIND(mfccp->mf6cc_origin.sin6_addr,
		 mfccp->mf6cc_mcastgrp.sin6_addr, rt);

	/* If an entry already exists, just update the fields */
	if (rt) {
#ifdef MRT6DEBUG
		if (mrt6debug & DEBUG_MFC) {
			log(LOG_DEBUG,"add_m6fc update o %s g %s p %x\n",
			    inet_ntop(AF_INET6,
				&mfccp->mf6cc_origin.sin6_addr,
				orig, sizeof(orig)),
			    inet_ntop(AF_INET6,
				&mfccp->mf6cc_mcastgrp.sin6_addr,
				mcast, sizeof(mcast)),
			    mfccp->mf6cc_parent);
		}
#endif

		s = splsoftnet();

		rt->mf6c_parent = mfccp->mf6cc_parent;
		rt->mf6c_ifset = mfccp->mf6cc_ifset;
		splx(s);
		return 0;
	}

	/*
	 * Find the entry for which the upcall was made and update
	 */
	s = splsoftnet();

	hash = MF6CHASH(mfccp->mf6cc_origin.sin6_addr,
			mfccp->mf6cc_mcastgrp.sin6_addr);
	for (rt = mf6ctable[hash], nstl = 0; rt; rt = rt->mf6c_next) {
		if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr,
				       &mfccp->mf6cc_origin.sin6_addr) &&
		    IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr,
				       &mfccp->mf6cc_mcastgrp.sin6_addr) &&
		    (rt->mf6c_stall != NULL)) {

			if (nstl++)
				log(LOG_ERR,
				    "add_m6fc: %s o %s g %s p %x dbx %p\n",
				    "multiple kernel entries",
				    inet_ntop(AF_INET6,
					&mfccp->mf6cc_origin.sin6_addr,
					orig, sizeof(orig)),
				    inet_ntop(AF_INET6,
					&mfccp->mf6cc_mcastgrp.sin6_addr,
					mcast, sizeof(mcast)),
				    mfccp->mf6cc_parent, rt->mf6c_stall);

#ifdef MRT6DEBUG
			if (mrt6debug & DEBUG_MFC)
				log(LOG_DEBUG,
				    "add_m6fc o %s g %s p %x dbg %x\n",
				    inet_ntop(AF_INET6,
					&mfccp->mf6cc_origin.sin6_addr,
					orig, sizeof(orig)),
				    inet_ntop(AF_INET6,
					&mfccp->mf6cc_mcastgrp.sin6_addr,
					mcast, sizeof(mcast)),
				    mfccp->mf6cc_parent, rt->mf6c_stall);
#endif

			rt->mf6c_origin     = mfccp->mf6cc_origin;
			rt->mf6c_mcastgrp   = mfccp->mf6cc_mcastgrp;
			rt->mf6c_parent     = mfccp->mf6cc_parent;
			rt->mf6c_ifset	    = mfccp->mf6cc_ifset;
			/* initialize pkt counters per src-grp */
			rt->mf6c_pkt_cnt    = 0;
			rt->mf6c_byte_cnt   = 0;
			rt->mf6c_wrong_if   = 0;

			rt->mf6c_expire = 0;	/* Don't clean this guy up */
			n6expire[hash]--;

			/* free packets Qed at the end of this entry */
			for (rte = rt->mf6c_stall; rte != NULL; ) {
				struct rtdetq *n = rte->next;
				if (rte->ifp) {
					ip6_mdq(rte->m, rte->ifp, rt);
				}
				m_freem(rte->m);
				free(rte, M_MRTABLE, 0);
				rte = n;
			}
			rt->mf6c_stall = NULL;
		}
	}

	/*
	 * It is possible that an entry is being inserted without an upcall
	 */
	if (nstl == 0) {
#ifdef MRT6DEBUG
		if (mrt6debug & DEBUG_MFC)
			log(LOG_DEBUG,
			    "add_m6fc no upcall h %d o %s g %s p %x\n",
			    hash,
			    inet_ntop(AF_INET6,
				&mfccp->mf6cc_origin.sin6_addr,
				orig, sizeof(orig)),
			    inet_ntop(AF_INET6,
				&mfccp->mf6cc_mcastgrp.sin6_addr,
				mcast, sizeof(mcast)),
			    mfccp->mf6cc_parent);
#endif

		for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) {

			if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr,
					       &mfccp->mf6cc_origin.sin6_addr)&&
			    IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr,
					       &mfccp->mf6cc_mcastgrp.sin6_addr)) {

				rt->mf6c_origin     = mfccp->mf6cc_origin;
				rt->mf6c_mcastgrp   = mfccp->mf6cc_mcastgrp;
				rt->mf6c_parent     = mfccp->mf6cc_parent;
				rt->mf6c_ifset	    = mfccp->mf6cc_ifset;
				/* initialize pkt counters per src-grp */
				rt->mf6c_pkt_cnt    = 0;
				rt->mf6c_byte_cnt   = 0;
				rt->mf6c_wrong_if   = 0;

				if (rt->mf6c_expire)
					n6expire[hash]--;
				rt->mf6c_expire	   = 0;
			}
		}
		if (rt == NULL) {
			/* no upcall, so make a new entry */
			rt = (struct mf6c *)malloc(sizeof(*rt), M_MRTABLE,
						  M_NOWAIT);
			if (rt == NULL) {
				splx(s);
				return ENOBUFS;
			}

			/* insert new entry at head of hash chain */
			rt->mf6c_origin     = mfccp->mf6cc_origin;
			rt->mf6c_mcastgrp   = mfccp->mf6cc_mcastgrp;
			rt->mf6c_parent     = mfccp->mf6cc_parent;
			rt->mf6c_ifset	    = mfccp->mf6cc_ifset;
			/* initialize pkt counters per src-grp */
			rt->mf6c_pkt_cnt    = 0;
			rt->mf6c_byte_cnt   = 0;
			rt->mf6c_wrong_if   = 0;
			rt->mf6c_expire     = 0;
			rt->mf6c_stall = NULL;

			/* link into table */
			rt->mf6c_next  = mf6ctable[hash];
			mf6ctable[hash] = rt;
		}
	}
	splx(s);
	return 0;
}
コード例 #7
0
ファイル: radeon_kms.c プロジェクト: ajinkya93/OpenBSD
/**
 * radeon_driver_load_kms - Main load function for KMS.
 *
 * @dev: drm dev pointer
 * @flags: device flags
 *
 * This is the main load function for KMS (all asics).
 * It calls radeon_device_init() to set up the non-display
 * parts of the chip (asic init, CP, writeback, etc.), and
 * radeon_modeset_init() to set up the display parts
 * (crtcs, encoders, hotplug detect, etc.).
 * Returns 0 on success, error on failure.
 */
void
radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
{
	struct radeon_device	*rdev = (struct radeon_device *)self;
	struct drm_device	*dev;
	struct pci_attach_args	*pa = aux;
	const struct drm_pcidev *id_entry;
	int			 is_agp;
	pcireg_t		 type;
	uint8_t			 iobar;
#if !defined(__sparc64__)
	pcireg_t		 addr, mask;
	int			 s;
#endif

#if defined(__sparc64__) || defined(__macppc__)
	extern int fbnode;
#endif

	id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
	    PCI_PRODUCT(pa->pa_id), radeondrm_pciidlist);
	rdev->flags = id_entry->driver_data;
	rdev->pc = pa->pa_pc;
	rdev->pa_tag = pa->pa_tag;
	rdev->iot = pa->pa_iot;
	rdev->memt = pa->pa_memt;
	rdev->dmat = pa->pa_dmat;

#if defined(__sparc64__) || defined(__macppc__)
	if (fbnode == PCITAG_NODE(rdev->pa_tag))
		rdev->console = 1;
#else
	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA &&
	    (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG)
	    & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE))
	    == (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) {
		rdev->console = 1;
#if NVGA > 0
		vga_console_attached = 1;
#endif
	}
#if NEFIFB > 0
	if (efifb_is_console(pa)) {
		rdev->console = 1;
		efifb_cndetach();
	}
#endif
#endif

#define RADEON_PCI_MEM		0x10
#define RADEON_PCI_IO		0x14
#define RADEON_PCI_MMIO		0x18
#define RADEON_PCI_IO2		0x20

	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM);
	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
	    pci_mapreg_info(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM,
	    type, &rdev->fb_aper_offset, &rdev->fb_aper_size, NULL)) {
		printf(": can't get frambuffer info\n");
		return;
	}

	if (PCI_MAPREG_MEM_TYPE(type) != PCI_MAPREG_MEM_TYPE_64BIT)
		iobar = RADEON_PCI_IO;
	else
		iobar = RADEON_PCI_IO2;
	
	if (pci_mapreg_map(pa, iobar, PCI_MAPREG_TYPE_IO, 0,
	    NULL, &rdev->rio_mem, NULL, &rdev->rio_mem_size, 0)) {
		printf(": can't map IO space\n");
		return;
	}

	type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MMIO);
	if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
	    pci_mapreg_map(pa, RADEON_PCI_MMIO, type, 0, NULL,
	    &rdev->rmmio, &rdev->rmmio_base, &rdev->rmmio_size, 0)) {
		printf(": can't map mmio space\n");
		return;
	}

#if !defined(__sparc64__)
	/*
	 * Make sure we have a base address for the ROM such that we
	 * can map it later.
	 */
	s = splhigh();
	addr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, ~PCI_ROM_ENABLE);
	mask = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, addr);
	splx(s);

	if (addr == 0 && PCI_ROM_SIZE(mask) != 0 && pa->pa_memex) {
		bus_size_t size, start, end;
		bus_addr_t base;

		size = PCI_ROM_SIZE(mask);
		start = max(PCI_MEM_START, pa->pa_memex->ex_start);
		end = min(PCI_MEM_END, pa->pa_memex->ex_end);
		if (extent_alloc_subregion(pa->pa_memex, start, end, size,
		    size, 0, 0, 0, &base) == 0)
			pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, base);
	}
#endif

#ifdef notyet
	mtx_init(&rdev->swi_lock, IPL_TTY);
#endif

	/* update BUS flag */
	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, NULL, NULL)) {
		rdev->flags |= RADEON_IS_AGP;
	} else if (pci_get_capability(pa->pa_pc, pa->pa_tag,
	    PCI_CAP_PCIEXPRESS, NULL, NULL)) {
		rdev->flags |= RADEON_IS_PCIE;
	} else {
		rdev->flags |= RADEON_IS_PCI;
	}

	DRM_DEBUG("%s card detected\n",
		 ((rdev->flags & RADEON_IS_AGP) ? "AGP" :
		 (((rdev->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));

	is_agp = pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
	    NULL, NULL);

	printf("\n");

	kms_driver.num_ioctls = radeon_max_kms_ioctl;

	dev = (struct drm_device *)drm_attach_pci(&kms_driver, pa, is_agp,
	    rdev->console, self);
	rdev->ddev = dev;
	rdev->pdev = dev->pdev;

	rdev->family = rdev->flags & RADEON_FAMILY_MASK;
	if (!radeon_msi_ok(rdev))
		pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;

	rdev->msi_enabled = 0;
	if (pci_intr_map_msi(pa, &rdev->intrh) == 0)
		rdev->msi_enabled = 1;
	else if (pci_intr_map(pa, &rdev->intrh) != 0) {
		printf(": couldn't map interrupt\n");
		return;
	}
	printf("%s: %s\n", rdev->dev.dv_xname,
	    pci_intr_string(pa->pa_pc, rdev->intrh));

	rdev->irqh = pci_intr_establish(pa->pa_pc, rdev->intrh, IPL_TTY,
	    radeon_driver_irq_handler_kms, rdev->ddev, rdev->dev.dv_xname);
	if (rdev->irqh == NULL) {
		printf("%s: couldn't establish interrupt\n",
		    rdev->dev.dv_xname);
		return;
	}

#ifdef __sparc64__
{
	struct rasops_info *ri;
	int node, console;

	node = PCITAG_NODE(pa->pa_tag);
	console = (fbnode == node);

	fb_setsize(&rdev->sf, 8, 1152, 900, node, 0);

	/*
	 * The firmware sets up the framebuffer such that at starts at
	 * an offset from the start of video memory.
	 */
	rdev->fb_offset =
	    bus_space_read_4(rdev->memt, rdev->rmmio, RADEON_CRTC_OFFSET);
	if (bus_space_map(rdev->memt, rdev->fb_aper_offset + rdev->fb_offset,
	    rdev->sf.sf_fbsize, BUS_SPACE_MAP_LINEAR, &rdev->memh)) {
		printf("%s: can't map video memory\n", rdev->dev.dv_xname);
		return;
	}

	ri = &rdev->sf.sf_ro;
	ri->ri_bits = bus_space_vaddr(rdev->memt, rdev->memh);
	ri->ri_hw = rdev;
	ri->ri_updatecursor = NULL;

	fbwscons_init(&rdev->sf, RI_VCONS | RI_WRONLY | RI_BSWAP, console);
	if (console)
		fbwscons_console_init(&rdev->sf, -1);
}
#endif

	rdev->shutdown = true;
	config_mountroot(self, radeondrm_attachhook);
}
コード例 #8
0
/*
 * General trap (exception) handling function for mips.
 * This is called by the assembly-language exception handler once
 * the trapframe has been set up.
 */
void
mips_trap(struct trapframe *tf)
{
	uint32_t code;
	bool isutlb, iskern;
	int spl;

	/* The trap frame is supposed to be 37 registers long. */
	KASSERT(sizeof(struct trapframe)==(37*4));

	/*
	 * Extract the exception code info from the register fields.
	 */
	code = (tf->tf_cause & CCA_CODE) >> CCA_CODESHIFT;
	isutlb = (tf->tf_cause & CCA_UTLB) != 0;
	iskern = (tf->tf_status & CST_KUp) == 0;

	KASSERT(code < NTRAPCODES);

	/* Make sure we haven't run off our stack */
	if (curthread != NULL && curthread->t_stack != NULL) {
		KASSERT((vaddr_t)tf > (vaddr_t)curthread->t_stack);
		KASSERT((vaddr_t)tf < (vaddr_t)(curthread->t_stack
						+ STACK_SIZE));
	}

	/* Interrupt? Call the interrupt handler and return. */
	if (code == EX_IRQ) {
		int old_in;
		bool doadjust;

		old_in = curthread->t_in_interrupt;
		curthread->t_in_interrupt = 1;

		/*
		 * The processor has turned interrupts off; if the
		 * currently recorded interrupt state is interrupts on
		 * (spl of 0), adjust the recorded state to match, and
		 * restore after processing the interrupt.
		 *
		 * How can we get an interrupt if the recorded state
		 * is interrupts off? Well, as things currently stand
		 * when the CPU finishes idling it flips interrupts on
		 * and off to allow things to happen, but leaves
		 * curspl high while doing so.
		 *
		 * While we're here, assert that the interrupt
		 * handling code hasn't leaked a spinlock or an
		 * splhigh().
		 */

		if (curthread->t_curspl == 0) {
			KASSERT(curthread->t_curspl == 0);
			KASSERT(curthread->t_iplhigh_count == 0);
			curthread->t_curspl = IPL_HIGH;
			curthread->t_iplhigh_count++;
			doadjust = true;
		}
		else {
			doadjust = false;
		}

		mainbus_interrupt(tf);

		if (doadjust) {
			KASSERT(curthread->t_curspl == IPL_HIGH);
			KASSERT(curthread->t_iplhigh_count == 1);
			curthread->t_iplhigh_count--;
			curthread->t_curspl = 0;
		}

		curthread->t_in_interrupt = old_in;
		goto done2;
	}

	/*
	 * The processor turned interrupts off when it took the trap.
	 *
	 * While we're in the kernel, and not actually handling an
	 * interrupt, restore the interrupt state to where it was in
	 * the previous context, which may be low (interrupts on).
	 *
	 * Do this by forcing splhigh(), which may do a redundant
	 * cpu_irqoff() but forces the stored MI interrupt state into
	 * sync, then restoring the previous state.
	 */
	spl = splhigh();
	splx(spl);

	/* Syscall? Call the syscall handler and return. */
	if (code == EX_SYS) {
		/* Interrupts should have been on while in user mode. */
		KASSERT(curthread->t_curspl == 0);
		KASSERT(curthread->t_iplhigh_count == 0);

		DEBUG(DB_SYSCALL, "syscall: #%d, args %x %x %x %x\n", 
		      tf->tf_v0, tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3);

		syscall(tf);
		goto done;
	}

	/*
	 * Ok, it wasn't any of the really easy cases.
	 * Call vm_fault on the TLB exceptions.
	 * Panic on the bus error exceptions.
	 */
	switch (code) {
	case EX_MOD:
		if (vm_fault(VM_FAULT_READONLY, tf->tf_vaddr)==0) {
            //kprintf("Fault type: readonly\n");
			goto done;
		}
		break;
	case EX_TLBL:
		if (vm_fault(VM_FAULT_READ, tf->tf_vaddr)==0) {
            //kprintf("Fault type: read\n");
			goto done;
		}
		break;
	case EX_TLBS:
		if (vm_fault(VM_FAULT_WRITE, tf->tf_vaddr)==0) {
            //kprintf("Fault type: write\n");
			goto done;
		}
		break;
	case EX_IBE:
	case EX_DBE:
		/*
		 * This means you loaded invalid TLB entries, or 
		 * touched invalid parts of the direct-mapped 
		 * segments. These are serious kernel errors, so
		 * panic.
		 * 
		 * The MIPS won't even tell you what invalid address
		 * caused the bus error.
		 */
		panic("Bus error exception, PC=0x%x\n", tf->tf_epc);
		break;
	}

	/*
	 * If we get to this point, it's a fatal fault - either it's
	 * one of the other exceptions, like illegal instruction, or
	 * it was a page fault we couldn't handle.
	 */

	if (!iskern) {
		/*
		 * Fatal fault in user mode.
		 * Kill the current user process.
		 */
		kill_curthread(tf->tf_epc, code, tf->tf_vaddr);
		goto done;
	}

	/*
	 * Fatal fault in kernel mode.
	 *
	 * If pcb_badfaultfunc is set, we do not panic; badfaultfunc is
	 * set by copyin/copyout and related functions to signify that
	 * the addresses they're accessing are userlevel-supplied and
	 * not trustable. What we actually want to do is resume
	 * execution at the function pointed to by badfaultfunc. That's 
	 * going to be "copyfail" (see copyinout.c), which longjmps 
	 * back to copyin/copyout or wherever and returns EFAULT.
	 *
	 * Note that we do not just *call* this function, because that
	 * won't necessarily do anything. We want the control flow
	 * that is currently executing in copyin (or whichever), and
	 * is stopped while we process the exception, to *teleport* to
	 * copyfail.
	 *
	 * This is accomplished by changing tf->tf_epc and returning
	 * from the exception handler.
	 */

	if (curthread != NULL &&
	    curthread->t_machdep.tm_badfaultfunc != NULL) {
		tf->tf_epc = (vaddr_t) curthread->t_machdep.tm_badfaultfunc;
		goto done;
	}

	/*
	 * Really fatal kernel-mode fault.
	 */

	kprintf("panic: Fatal exception %u (%s) in kernel mode\n", code,
		trapcodenames[code]);
	kprintf("panic: EPC 0x%x, exception vaddr 0x%x\n", 
		tf->tf_epc, tf->tf_vaddr);

	panic("I can't handle this... I think I'll just die now...\n");

 done:
	/*
	 * Turn interrupts off on the processor, without affecting the
	 * stored interrupt state.
	 */
	cpu_irqoff();
 done2:

	/*
	 * The boot thread can get here (e.g. on interrupt return) but
	 * since it doesn't go to userlevel, it can't be returning to
	 * userlevel, so there's no need to set cputhreads[] and
	 * cpustacks[]. Just return.
	 */
	if (curthread->t_stack == NULL) {
		return;
	}

	cputhreads[curcpu->c_number] = (vaddr_t)curthread;
	cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE;

	/*
	 * This assertion will fail if either
	 *   (1) curthread->t_stack is corrupted, or
	 *   (2) the trap frame is somehow on the wrong kernel stack.
	 *
	 * If cpustacks[] is corrupted, the next trap back to the
	 * kernel will (most likely) hang the system, so it's better
	 * to find out now.
	 */
	KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf));
}
コード例 #9
0
STATIC int
test_mod(test_pars_t *tp, pdu_t *pdu, iscsi_pdu_kind_t kind, int rxtx, int err)
{
	mod_desc_t *mod;
	uint32_t mpoff, off;
	int i, rc = 0, s;

	tp->pdu_count[kind][rxtx]++;
	tp->pdu_count[ANY_PDU][rxtx]++;

	do {
		if ((mod = TAILQ_FIRST(&tp->mods)) == NULL) {
			return check_loss(tp, rxtx);
		}
		if (mod->pars.which_pdu != ANY_PDU &&
		    mod->pars.which_pdu != kind) {
			return check_loss(tp, rxtx);
		}
		mpoff = mod->pars.pdu_offset;

		switch (mod->pars.which_offset) {
		case ABSOLUTE_ANY:
			off = tp->pdu_count[ANY_PDU][CNT_TX] +
				  tp->pdu_count[ANY_PDU][CNT_RX];
			break;
		case RELATIVE_ANY:
			off = (tp->pdu_count[ANY_PDU][CNT_TX] +
				   tp->pdu_count[ANY_PDU][CNT_RX]) -
				(tp->pdu_last[ANY_PDU][CNT_TX] + tp->pdu_last[ANY_PDU][CNT_RX]);
			break;

		case ABSOLUTE_PDUKIND:
			off = tp->pdu_count[kind][rxtx];
			break;
		case RELATIVE_PDUKIND:
			off = tp->pdu_count[kind][rxtx] - tp->pdu_last[kind][rxtx];
			break;

		case ABSOLUTE_TX:
			if (rxtx != CNT_TX)
				return check_loss(tp, rxtx);
			off = tp->pdu_count[ANY_PDU][CNT_TX];
			break;
		case RELATIVE_TX:
			if (rxtx != CNT_TX)
				return check_loss(tp, rxtx);
			off = tp->pdu_count[ANY_PDU][CNT_TX] -
				  tp->pdu_last[ANY_PDU][CNT_TX];
			break;

		case ABSOLUTE_RX:
			if (rxtx != CNT_RX)
				return check_loss(tp, rxtx);
			off = tp->pdu_count[ANY_PDU][CNT_RX];
			break;
		case RELATIVE_RX:
			if (rxtx != CNT_RX)
				return check_loss(tp, rxtx);
			off = tp->pdu_count[ANY_PDU][CNT_RX] -
				  tp->pdu_last[ANY_PDU][CNT_RX];
			break;

		default:
			/* bad offset - skip this entry */
			mpoff = off = 0;
			break;
		}

		DEB(1, ("test_mod: kind=%d, rxtx=%d, pdukind=%d, mpoff=%d, "
				"whichoff=%d, off=%d\n", kind, rxtx, mod->pars.which_pdu,
				mpoff, mod->pars.which_offset, off));

		if (!off || (mpoff != 0 && mpoff < off)) {
			/* This might happen in some cases. Just discard the modification. */
			s = splbio();
			TAILQ_REMOVE(&tp->mods, mod, link);
			splx(s);

			update_options(tp, mod);

			if (mod->pars.options & ISCSITEST_OPT_WAIT_FOR_COMPLETION) {
				mod->pars.status = ISCSI_STATUS_TEST_MODIFICATION_SKIPPED;
				wakeup(mod);
			}
			free(mod, M_TEMP);
		}
	} while (mpoff && mpoff < off);

	if (mpoff > off)
		return check_loss(tp, rxtx);

	DEB(1, ("test_mod: opt=%x, pdu_ptr=%x, num_mods=%d\n", mod->pars.options,
			(int) mod->pdu_ptr, mod->pars.num_pdu_mods));

	if (mod->pdu_ptr)
		test_get(pdu, mod, err);

	if (mod->pars.options & ISCSITEST_OPT_DISCARD_PDU)
		rc = 1;
	else if (check_loss(tp, rxtx))
		rc = 1;
	else if (mod->pars.num_pdu_mods) {
		if (!(mod->pars.options & ISCSITEST_OPT_MOD_PERMANENT)) {
			/*
             * Note: if the PDU is later resent, the unmodified one will be
             * used as resend_pdu restores the original io vector.
             */
			pdu->mod_pdu = pdu->pdu;
			pdu->io_vec[0].iov_base = &pdu->mod_pdu;
		}
		for (i = 0; i < mod->pars.num_pdu_mods; i++) {
			mod_pdu(pdu, &mod->mods[i]);
		}
	}

	if (rxtx == CNT_TX) {
		if (mod->pars.options & ISCSITEST_OPT_NO_RESPONSE_PDU) {
			ccb_t *ccb = pdu->owner;

			DEB(1, ("test_mod: No response expected, completing CCB %x\n",
					(int)ccb));

			if (ccb != NULL &&
				(ccb->disp == CCBDISP_WAIT || ccb->disp == CCBDISP_SCSIPI)) {
				/* simulate timeout */
				wake_ccb(ccb, ISCSI_STATUS_TIMEOUT);
			}
		}

		if ((mod->pars.options & ISCSITEST_SFLAG_UPDATE_FIELDS) &&
			mod->pars.num_pdu_mods) {
			connection_t *conn = pdu->connection;

			if (conn->HeaderDigest &&
				!(mod->pars.options & ISCSITEST_SFLAG_NO_HEADER_DIGEST))
				pdu->pdu.HeaderDigest = gen_digest(&pdu->pdu, BHS_SIZE);

			if (pdu->uio.uio_iovcnt > 1 && conn->DataDigest &&
				!(mod->pars.options & ISCSITEST_SFLAG_NO_DATA_DIGEST))
				pdu->data_digest = gen_digest_2(
						pdu->io_vec[1].iov_base,
						pdu->io_vec[1].iov_len,
						pdu->io_vec[2].iov_base,
						pdu->io_vec[2].iov_len);
		}
	}

	s = splbio();
	TAILQ_REMOVE(&tp->mods, mod, link);
	update_options(tp, mod);
	/* we've modified a PDU - copy current count into last count */
	memcpy(tp->pdu_last, tp->pdu_count, sizeof(tp->pdu_last));
	splx(s);

	if (mod->pars.options & ISCSITEST_OPT_WAIT_FOR_COMPLETION) {
		wakeup(mod);
	}
	if (mod->pars.options & ISCSITEST_KILL_CONNECTION) {
		kill_connection(tp->connection,
				ISCSI_STATUS_TEST_CONNECTION_CLOSED,
				NO_LOGOUT, TRUE);
	}
	free(mod, M_TEMP);

	return rc;
}
コード例 #10
0
void
nfs_decode_args(struct nfsmount *nmp, struct nfs_args *argp, struct lwp *l)
{
	int s;
	int adjsock;
	int maxio;

	s = splsoftnet();

	/*
	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
	 * no sense in that context.
	 */
	if (argp->sotype == SOCK_STREAM)
		argp->flags &= ~NFSMNT_NOCONN;

	/*
	 * Cookie translation is not needed for v2, silently ignore it.
	 */
	if ((argp->flags & (NFSMNT_XLATECOOKIE|NFSMNT_NFSV3)) ==
	    NFSMNT_XLATECOOKIE)
		argp->flags &= ~NFSMNT_XLATECOOKIE;

	/* Re-bind if rsrvd port requested and wasn't on one */
	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
		  && (argp->flags & NFSMNT_RESVPORT);
	/* Also re-bind if we're switching to/from a connected UDP socket */
	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
		    (argp->flags & NFSMNT_NOCONN));

	/* Update flags. */
	nmp->nm_flag = argp->flags;
	splx(s);

	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
		if (nmp->nm_timeo < NFS_MINTIMEO)
			nmp->nm_timeo = NFS_MINTIMEO;
		else if (nmp->nm_timeo > NFS_MAXTIMEO)
			nmp->nm_timeo = NFS_MAXTIMEO;
	}

	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
		nmp->nm_retry = argp->retrans;
		if (nmp->nm_retry > NFS_MAXREXMIT)
			nmp->nm_retry = NFS_MAXREXMIT;
	}

#ifndef NFS_V2_ONLY
	if (argp->flags & NFSMNT_NFSV3) {
		if (argp->sotype == SOCK_DGRAM)
			maxio = NFS_MAXDGRAMDATA;
		else
			maxio = NFS_MAXDATA;
	} else
#endif
		maxio = NFS_V2MAXDATA;

	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
		int osize = nmp->nm_wsize;
		nmp->nm_wsize = argp->wsize;
		/* Round down to multiple of blocksize */
		nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_wsize <= 0)
			nmp->nm_wsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_wsize != osize);
	}
	if (nmp->nm_wsize > maxio)
		nmp->nm_wsize = maxio;
	if (nmp->nm_wsize > MAXBSIZE)
		nmp->nm_wsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
		int osize = nmp->nm_rsize;
		nmp->nm_rsize = argp->rsize;
		/* Round down to multiple of blocksize */
		nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_rsize <= 0)
			nmp->nm_rsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_rsize != osize);
	}
	if (nmp->nm_rsize > maxio)
		nmp->nm_rsize = maxio;
	if (nmp->nm_rsize > MAXBSIZE)
		nmp->nm_rsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
		nmp->nm_readdirsize = argp->readdirsize;
		/* Round down to multiple of minimum blocksize */
		nmp->nm_readdirsize &= ~(NFS_DIRFRAGSIZ - 1);
		if (nmp->nm_readdirsize < NFS_DIRFRAGSIZ)
			nmp->nm_readdirsize = NFS_DIRFRAGSIZ;
		/* Bigger than buffer size makes no sense */
		if (nmp->nm_readdirsize > NFS_DIRBLKSIZ)
			nmp->nm_readdirsize = NFS_DIRBLKSIZ;
	} else if (argp->flags & NFSMNT_RSIZE)
		nmp->nm_readdirsize = nmp->nm_rsize;

	if (nmp->nm_readdirsize > maxio)
		nmp->nm_readdirsize = maxio;

	if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0 &&
		argp->maxgrouplist <= NFS_MAXGRPS)
		nmp->nm_numgrps = argp->maxgrouplist;
	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0 &&
		argp->readahead <= NFS_MAXRAHEAD)
		nmp->nm_readahead = argp->readahead;
	if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 1 &&
		argp->deadthresh <= NFS_NEVERDEAD)
		nmp->nm_deadthresh = argp->deadthresh;

	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
		    (nmp->nm_soproto != argp->proto));
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;

	if (nmp->nm_so && adjsock) {
		nfs_safedisconnect(nmp);
		if (nmp->nm_sotype == SOCK_DGRAM)
			while (nfs_connect(nmp, (struct nfsreq *)0, l)) {
				printf("nfs_args: retrying connect\n");
				kpause("nfscn3", false, hz, NULL);
			}
	}
}
コード例 #11
0
ファイル: vnd.c プロジェクト: orumin/openbsd-efivars
void
vndstrategy(struct buf *bp)
{
	int unit = DISKUNIT(bp->b_dev);
	struct vnd_softc *sc;
	struct partition *p;
	off_t off;
	long origbcount;
	int s;

	DNPRINTF(VDB_FOLLOW, "vndstrategy(%p): unit %d\n", bp, unit);

	if (unit >= numvnd) {
		bp->b_error = ENXIO;
		goto bad;
	}
	sc = &vnd_softc[unit];

	if ((sc->sc_flags & VNF_HAVELABEL) == 0) {
		bp->b_error = ENXIO;
		goto bad;
	}

	/*
	 * Many of the distrib scripts assume they can issue arbitrary
	 * sized requests to raw vnd devices irrespective of the
	 * emulated disk geometry.
	 *
	 * To continue supporting this, round the block count up to a
	 * multiple of d_secsize for bounds_check_with_label(), and
	 * then restore afterwards.
	 *
	 * We only do this for non-encrypted vnd, because encryption
	 * requires operating on blocks at a time.
	 */
	origbcount = bp->b_bcount;
	if (sc->sc_keyctx == NULL) {
		u_int32_t secsize = sc->sc_dk.dk_label->d_secsize;
		bp->b_bcount = ((origbcount + secsize - 1) & ~(secsize - 1));
#ifdef DIAGNOSTIC
		if (bp->b_bcount != origbcount) {
			struct proc *pr = curproc;
			printf("%s: sloppy %s from proc %d (%s): "
			    "blkno %lld bcount %ld\n", sc->sc_dev.dv_xname,
			    (bp->b_flags & B_READ) ? "read" : "write",
			    pr->p_pid, pr->p_comm, (long long)bp->b_blkno,
			    origbcount);
		}
#endif
	}

	if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) {
		bp->b_resid = bp->b_bcount = origbcount;
		goto done;
	}

	if (origbcount < bp->b_bcount)
		bp->b_bcount = origbcount;

	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
	off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize +
	    (u_int64_t)bp->b_blkno * DEV_BSIZE;

	if (sc->sc_keyctx && !(bp->b_flags & B_READ))
		vndencryptbuf(sc, bp, 1);

	/*
	 * Use IO_NOLIMIT because upper layer has already checked I/O
	 * for limits, so there is no need to do it again.
	 */
	bp->b_error = vn_rdwr((bp->b_flags & B_READ) ? UIO_READ : UIO_WRITE,
	    sc->sc_vp, bp->b_data, bp->b_bcount, off, UIO_SYSSPACE, IO_NOLIMIT,
	    sc->sc_cred, &bp->b_resid, curproc);
	if (bp->b_error)
		bp->b_flags |= B_ERROR;

	/* Data in buffer cache needs to be in clear */
	if (sc->sc_keyctx)
		vndencryptbuf(sc, bp, 0);

	goto done;

 bad:
	bp->b_flags |= B_ERROR;
	bp->b_resid = bp->b_bcount;
 done:
	s = splbio();
	biodone(bp);
	splx(s);
}
コード例 #12
0
ファイル: cardslot.c プロジェクト: avsm/openbsd-xen-sys
/*
 * static void cardslot_event_thread(void *arg)
 *
 *   This function is the main routine handing cardslot events such as
 *   insertions and removals.
 *
 */
static void
cardslot_event_thread(void *arg)
{
	struct cardslot_softc *sc = arg;
	struct cardslot_event *ce;
	int s;
	static int antonym_ev[4] = {
		CARDSLOT_EVENT_REMOVAL_16, CARDSLOT_EVENT_INSERTION_16,
		CARDSLOT_EVENT_REMOVAL_CB, CARDSLOT_EVENT_INSERTION_CB
	};

	while (sc->sc_th_enable) {
		s = spltty();
		if ((ce = SIMPLEQ_FIRST(&sc->sc_events)) == NULL) {
			splx(s);
			(void) tsleep(&sc->sc_events, PWAIT, "cardslotev", 0);
			continue;
		}
		SIMPLEQ_REMOVE_HEAD(&sc->sc_events, ce_q);
		splx(s);

		if (IS_CARDSLOT_INSERT_REMOVE_EV(ce->ce_type)) {
			/* Chattering suppression */
			s = spltty();
			while (1) {
				struct cardslot_event *ce1, *ce2;

				if ((ce1 = SIMPLEQ_FIRST(&sc->sc_events)) ==
				    NULL)
					break;
				if (ce1->ce_type != antonym_ev[ce->ce_type])
					break;
				if ((ce2 = SIMPLEQ_NEXT(ce1, ce_q)) == NULL)
					break;
				if (ce2->ce_type == ce->ce_type) {
					SIMPLEQ_REMOVE_HEAD(&sc->sc_events,
					    ce_q);
					free(ce1, M_TEMP);
					SIMPLEQ_REMOVE_HEAD(&sc->sc_events,
					    ce_q);
					free(ce2, M_TEMP);
				}
			}
			splx(s);
		}

		switch (ce->ce_type) {
		case CARDSLOT_EVENT_INSERTION_CB:
			if ((CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_CB) ||
			    (CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_16)) {
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					/* A card has already been inserted
					 * and works.
					 */
					break;
				}
			}

			if (sc->sc_cb_softc) {
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_CB);
				if (cardbus_attach_card(sc->sc_cb_softc) > 0) {
					/* At least one function works */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_WORKING);
				} else {
					/* No functions work or this card is
					 * not known
					 */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
				}
			} else {
				panic("no cardbus on %s", sc->sc_dev.dv_xname);
			}

			break;

		case CARDSLOT_EVENT_INSERTION_16:
			if ((CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_CB) ||
			    (CARDSLOT_CARDTYPE(sc->sc_status) ==
			     CARDSLOT_STATUS_CARD_16)) {
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					/* A card has already been inserted
					 * and works.
					 */
					break;
				}
			}
			if (sc->sc_16_softc) {
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_16);
				if (pcmcia_card_attach(
				    (struct device *)sc->sc_16_softc)) {
					/* Do not attach */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
				} else {
					/* working */
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_WORKING);
				}
			} else {
				panic("no 16-bit pcmcia on %s",
				    sc->sc_dev.dv_xname);
			}

			break;

		case CARDSLOT_EVENT_REMOVAL_CB:
			if (CARDSLOT_CARDTYPE(sc->sc_status) ==
			    CARDSLOT_STATUS_CARD_CB) {
				/* CardBus card has not been inserted. */
				if (CARDSLOT_WORK(sc->sc_status) ==
				    CARDSLOT_STATUS_WORKING) {
					cardbus_detach_card(sc->sc_cb_softc);
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_NOTWORK);
					CARDSLOT_SET_WORK(sc->sc_status,
					    CARDSLOT_STATUS_CARD_NONE);
				}
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_NONE);
			} else if (CARDSLOT_CARDTYPE(sc->sc_status) !=
			    CARDSLOT_STATUS_CARD_16) {
				/* Unknown card... */
				CARDSLOT_SET_CARDTYPE(sc->sc_status,
				    CARDSLOT_STATUS_CARD_NONE);
			}
			CARDSLOT_SET_WORK(sc->sc_status,
			    CARDSLOT_STATUS_NOTWORK);
			break;

		case CARDSLOT_EVENT_REMOVAL_16:
			DPRINTF(("%s: removal event\n", sc->sc_dev.dv_xname));
			if (CARDSLOT_CARDTYPE(sc->sc_status) !=
			    CARDSLOT_STATUS_CARD_16) {
				/* 16-bit card has not been inserted. */
				break;
			}
			if ((sc->sc_16_softc != NULL) &&
			    (CARDSLOT_WORK(sc->sc_status) ==
			     CARDSLOT_STATUS_WORKING)) {
				struct pcmcia_softc *psc = sc->sc_16_softc;

				pcmcia_card_deactivate((struct device *)psc);
				pcmcia_chip_socket_disable(psc->pct, psc->pch);
				pcmcia_card_detach((struct device *)psc,
				    DETACH_FORCE);
			}
			CARDSLOT_SET_CARDTYPE(sc->sc_status,
			    CARDSLOT_STATUS_CARD_NONE);
			CARDSLOT_SET_WORK(sc->sc_status,
			    CARDSLOT_STATUS_NOTWORK);
			break;

		default:
			panic("cardslot_event_thread: unknown event %d",
			    ce->ce_type);
		}
		free(ce, M_TEMP);
	}

	sc->sc_event_thread = NULL;

	/* In case the parent device is waiting for us to exit. */
	wakeup(sc);

	kthread_exit(0);
}
コード例 #13
0
/* 
 * Note that we are called from the keyboard software interrupt!
 */
void
mouse_soft(REL_MOUSE *rel_ms, int size, int type)
{
	struct ms_softc		*ms = &ms_softc[0];
	struct firm_event	*fe, *fe2;
	REL_MOUSE		fake_mouse;
	int			get, put;
	int			sps;
	u_char			mbut, bmask;
	int			flush_buttons;

	if (ms->ms_events.ev_io == NULL)
		return;

	switch (type) {
	    case KBD_JOY1_PKG:
		/*
		 * Ignore if in emulation mode
		 */
		if (ms->ms_emul3b)
			return;

		/*
		 * There are some mice that have their middle button
		 * wired to the 'up' bit of joystick 1....
		 * Simulate a mouse packet with dx = dy = 0, the middle
		 * button state set by UP and the other buttons unchanged.
		 * Flush all button changes.
		 */
		flush_buttons = 1;
		fake_mouse.id = (rel_ms->dx & 1 ? 4 : 0) | (ms->ms_buttons & 3);
		fake_mouse.dx = fake_mouse.dy = 0;
		rel_ms = &fake_mouse;
		break;
	    case KBD_TIMEO_PKG:
		/*
		 * Timeout package. No button changes and no movement.
		 * Flush all button changes.
		 */
		flush_buttons = 1;
		fake_mouse.id = ms->ms_buttons;
		fake_mouse.dx = fake_mouse.dy = 0;
		rel_ms = &fake_mouse;
		break;
	    case KBD_RMS_PKG:
		/*
		 * Normal mouse package. Always copy the middle button
		 * status. The emulation code decides if button changes
		 * must be flushed.
		 */
		rel_ms->id = (ms->ms_buttons & 4) | (rel_ms->id & 3);
		flush_buttons = (ms->ms_emul3b) ? 0 : 1;
		break;
	    default:
		return;
	}

	sps = splev();
	get = ms->ms_events.ev_get;
	put = ms->ms_events.ev_put;
	fe  = &ms->ms_events.ev_q[put];

	if ((type != KBD_TIMEO_PKG) && ms->ms_emul3b && ms->ms_bq_idx)
		callout_stop(&ms->ms_delay_ch);

	/*
	 * Button states are encoded in the lower 3 bits of 'id'
	 */
	if (!(mbut = (rel_ms->id ^ ms->ms_buttons)) && (put != get)) {
		/*
		 * Compact dx/dy messages. Always generate an event when
		 * a button is pressed or the event queue is empty.
		 */
		ms->ms_dx += rel_ms->dx;
		ms->ms_dy += rel_ms->dy;
		goto out;
	}
	rel_ms->dx += ms->ms_dx;
	rel_ms->dy += ms->ms_dy;
	ms->ms_dx = ms->ms_dy = 0;

	/*
	 * Output location events _before_ button events ie. make sure
	 * the button is pressed at the correct location.
	 */
	if (rel_ms->dx) {
		if ((++put) % EV_QSIZE == get) {
			put--;
			goto out;
		}
		fe->id    = LOC_X_DELTA;
		fe->value = rel_ms->dx;
		firm_gettime(fe);
		if (put >= EV_QSIZE) {
			put = 0;
			fe  = &ms->ms_events.ev_q[0];
		}
		else fe++;
	}
	if (rel_ms->dy) {
		if ((++put) % EV_QSIZE == get) {
			put--;
			goto out;
		}
		fe->id    = LOC_Y_DELTA;
		fe->value = rel_ms->dy;
		firm_gettime(fe);
		if (put >= EV_QSIZE) {
			put = 0;
			fe  = &ms->ms_events.ev_q[0];
		}
		else fe++;
	}
	if (mbut && (type != KBD_TIMEO_PKG)) {
		for (bmask = 1; bmask < 0x08; bmask <<= 1) {
			if (!(mbut & bmask))
				continue;
			fe2 = &ms->ms_bq[ms->ms_bq_idx++];
			if (bmask == 1)
				fe2->id = MS_RIGHT;
			else if (bmask == 2)
				fe2->id = MS_LEFT;
			else fe2->id = MS_MIDDLE;
			fe2->value = rel_ms->id & bmask ? VKEY_DOWN : VKEY_UP;
			firm_gettime(fe2);
		}
	}

	/*
	 * Handle 3rd button emulation.
	 */
	if (ms->ms_emul3b && ms->ms_bq_idx && (type != KBD_TIMEO_PKG)) {
		/*
		 * If the middle button is pressed, any change to
		 * one of the other buttons releases all.
		 */
		if ((ms->ms_buttons & 4) && (mbut & 3)) {
			ms->ms_bq[0].id = MS_MIDDLE;
			ms->ms_bq_idx   = 1;
			rel_ms->id      = 0;
			flush_buttons   = 1;
			goto out;
		}
	    	if (ms->ms_bq_idx == 2) {
			if (ms->ms_bq[0].value == ms->ms_bq[1].value) {
				/* Must be 2 button presses! */
				ms->ms_bq[0].id = MS_MIDDLE;
				ms->ms_bq_idx   = 1;
				rel_ms->id      = 7;
			}
		}
		else if (ms->ms_bq[0].value == VKEY_DOWN) {
			callout_reset(&ms->ms_delay_ch, 10,
			    (FPV)ms_3b_delay, (void *)ms);
			goto out;
		}
		flush_buttons   = 1;
	}
out:
	if (flush_buttons) {
		int	i;

		for (i = 0; i < ms->ms_bq_idx; i++) {
			if ((++put) % EV_QSIZE == get) {
				ms->ms_bq_idx = 0;
				put--;
				goto out;
			}
			*fe = ms->ms_bq[i];
			if (put >= EV_QSIZE) {
				put = 0;
				fe  = &ms->ms_events.ev_q[0];
			}
			else fe++;
		}
		ms->ms_bq_idx = 0;
	}
	ms->ms_events.ev_put = put;
	ms->ms_buttons       = rel_ms->id;
	splx(sps);
	EV_WAKEUP(&ms->ms_events);
}
コード例 #14
0
ファイル: if_mpw.c プロジェクト: DavidAlphaFox/openbsd-kernel
int
mpw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
	struct ifreq *ifr = (struct ifreq *) data;
	struct mpw_softc *sc = ifp->if_softc;
	struct sockaddr_in *sin;
	struct sockaddr_in *sin_nexthop;
	int error = 0;
	int s;
	struct ifmpwreq imr;

	switch (cmd) {
	case SIOCSIFMTU:
		if (ifr->ifr_mtu < MPE_MTU_MIN ||
		    ifr->ifr_mtu > MPE_MTU_MAX)
			error = EINVAL;
		else
			ifp->if_mtu = ifr->ifr_mtu;
		break;

	case SIOCSIFFLAGS:
		if ((ifp->if_flags & IFF_UP))
			ifp->if_flags |= IFF_RUNNING;
		else
			ifp->if_flags &= ~IFF_RUNNING;
		break;

	case SIOCSETMPWCFG:
		error = suser(curproc, 0);
		if (error != 0)
			break;

		error = copyin(ifr->ifr_data, &imr, sizeof(imr));
		if (error != 0)
			break;

		/* Teardown all configuration if got no nexthop */
		sin = (struct sockaddr_in *) &imr.imr_nexthop;
		if (sin->sin_addr.s_addr == 0) {
			s = splsoftnet();
			if (rt_ifa_del(&sc->sc_ifa, RTF_MPLS | RTF_UP,
			    smplstosa(&sc->sc_smpls)) == 0)
				sc->sc_smpls.smpls_label = 0;
			splx(s);

			memset(&sc->sc_rshim, 0, sizeof(sc->sc_rshim));
			memset(&sc->sc_nexthop, 0, sizeof(sc->sc_nexthop));
			sc->sc_flags = 0;
			sc->sc_type = 0;
			break;
		}

		/* Validate input */
		if (sin->sin_family != AF_INET ||
		    imr.imr_lshim.shim_label > MPLS_LABEL_MAX ||
		    imr.imr_lshim.shim_label <= MPLS_LABEL_RESERVED_MAX ||
		    imr.imr_rshim.shim_label > MPLS_LABEL_MAX ||
		    imr.imr_rshim.shim_label <= MPLS_LABEL_RESERVED_MAX) {
			error = EINVAL;
			break;
		}

		/* Setup labels and create inbound route */
		imr.imr_lshim.shim_label =
		    htonl(imr.imr_lshim.shim_label << MPLS_LABEL_OFFSET);
		imr.imr_rshim.shim_label =
		    htonl(imr.imr_rshim.shim_label << MPLS_LABEL_OFFSET);

		if (sc->sc_smpls.smpls_label != imr.imr_lshim.shim_label) {
			s = splsoftnet();
			if (sc->sc_smpls.smpls_label)
				rt_ifa_del(&sc->sc_ifa, RTF_MPLS | RTF_UP,
				    smplstosa(&sc->sc_smpls));

			sc->sc_smpls.smpls_label = imr.imr_lshim.shim_label;
			error = rt_ifa_add(&sc->sc_ifa, RTF_MPLS | RTF_UP,
			    smplstosa(&sc->sc_smpls));
			splx(s);
			if (error != 0) {
				sc->sc_smpls.smpls_label = 0;
				break;
			}
		}

		/* Apply configuration */
		sc->sc_flags = imr.imr_flags;
		sc->sc_type = imr.imr_type;
		sc->sc_rshim.shim_label = imr.imr_rshim.shim_label;
		sc->sc_rshim.shim_label |= MPLS_BOS_MASK;

		memset(&sc->sc_nexthop, 0, sizeof(sc->sc_nexthop));
		sin_nexthop = (struct sockaddr_in *) &sc->sc_nexthop;
		sin_nexthop->sin_family = sin->sin_family;
		sin_nexthop->sin_len = sizeof(struct sockaddr_in);
		sin_nexthop->sin_addr.s_addr = sin->sin_addr.s_addr;
		break;

	case SIOCGETMPWCFG:
		imr.imr_flags = sc->sc_flags;
		imr.imr_type = sc->sc_type;
		imr.imr_lshim.shim_label =
		    ((ntohl(sc->sc_smpls.smpls_label & MPLS_LABEL_MASK)) >>
			MPLS_LABEL_OFFSET);
		imr.imr_rshim.shim_label =
		    ((ntohl(sc->sc_rshim.shim_label & MPLS_LABEL_MASK)) >>
			MPLS_LABEL_OFFSET);
		memcpy(&imr.imr_nexthop, &sc->sc_nexthop,
		    sizeof(imr.imr_nexthop));

		error = copyout(&imr, ifr->ifr_data, sizeof(imr));
		break;

	default:
		error = ENOTTY;
		break;
	}

	return (error);
}
コード例 #15
0
ファイル: ip6_mroute.c プロジェクト: orumin/openbsd-efivars
/*
 * Disable multicast routing
 */
int
ip6_mrouter_done(void)
{
	mifi_t mifi;
	int i;
	struct ifnet *ifp;
	struct in6_ifreq ifr;
	struct mf6c *rt;
	struct rtdetq *rte;
	int s;

	s = splsoftnet();

	/*
	 * For each phyint in use, disable promiscuous reception of all IPv6
	 * multicasts.
	 */
#ifdef MROUTING
	/*
	 * If there is still IPv4 multicast routing daemon,
	 * we remain interfaces to receive all muliticasted packets.
	 * XXX: there may be an interface in which the IPv4 multicast
	 * daemon is not interested...
	 */
	if (!ip_mrouter)
#endif
	{
		for (mifi = 0; mifi < nummifs; mifi++) {
			if (mif6table[mifi].m6_ifp &&
			    !(mif6table[mifi].m6_flags & MIFF_REGISTER)) {
				memset(&ifr, 0, sizeof(ifr));
				ifr.ifr_addr.sin6_family = AF_INET6;
				ifr.ifr_addr.sin6_addr= in6addr_any;
				ifp = mif6table[mifi].m6_ifp;
				(*ifp->if_ioctl)(ifp, SIOCDELMULTI,
						 (caddr_t)&ifr);
			}
		}
	}
#ifdef notyet
	bzero((caddr_t)qtable, sizeof(qtable));
	bzero((caddr_t)tbftable, sizeof(tbftable));
#endif
	bzero((caddr_t)mif6table, sizeof(mif6table));
	nummifs = 0;

	pim6 = 0; /* used to stub out/in pim specific code */

	timeout_del(&expire_upcalls6_ch);

	/*
	 * Free all multicast forwarding cache entries.
	 */
	for (i = 0; i < MF6CTBLSIZ; i++) {
		rt = mf6ctable[i];
		while (rt) {
			struct mf6c *frt;

			for (rte = rt->mf6c_stall; rte != NULL; ) {
				struct rtdetq *n = rte->next;

				m_freem(rte->m);
				free(rte, M_MRTABLE, 0);
				rte = n;
			}
			frt = rt;
			rt = rt->mf6c_next;
			free(frt, M_MRTABLE, 0);
		}
	}

	bzero((caddr_t)mf6ctable, sizeof(mf6ctable));

	/*
	 * Reset de-encapsulation cache
	 */
	reg_mif_num = -1;

	ip6_mrouter = NULL;
	ip6_mrouter_ver = 0;

	splx(s);

#ifdef MRT6DEBUG
	if (mrt6debug)
		log(LOG_DEBUG, "ip6_mrouter_done\n");
#endif

	return 0;
}
コード例 #16
0
void
test_send_pdu(struct proc *p, iscsi_test_send_pdu_parameters_t *par)
{
	static uint8_t pad_bytes[4] = { 0 };
	test_pars_t *tp;
	connection_t *conn;
	pdu_t *pdu;
	uint32_t psize = par->pdu_size;
	void *pdu_ptr = par->pdu_ptr;
	struct uio *uio;
	uint32_t i, pad, dsl, size;
	int s;

	if ((tp = find_test_id(par->test_id)) == NULL) {
		par->status = ISCSI_STATUS_INVALID_ID;
		return;
	}
	if (!psize || pdu_ptr == NULL ||
		((par->options & ISCSITEST_SFLAG_UPDATE_FIELDS) && psize < BHS_SIZE)) {
		par->status = ISCSI_STATUS_PARAMETER_INVALID;
		return;
	}
	if ((conn = tp->connection) == NULL) {
		par->status = ISCSI_STATUS_TEST_INACTIVE;
		return;
	}
	if ((pdu = get_pdu(conn, TRUE)) == NULL) {
		par->status = ISCSI_STATUS_TEST_CONNECTION_CLOSED;
		return;
	}
	DEB(1, ("Test Send PDU, id %d\n", par->test_id));

	if ((par->status = map_databuf(p, &pdu_ptr, psize)) != 0) {
		free_pdu(pdu);
		return;
	}

	i = 1;
	if (!par->options) {
		pdu->io_vec[0].iov_base = pdu_ptr;
		pdu->io_vec[0].iov_len = size = psize;
	} else {
		memcpy(&pdu->pdu, pdu_ptr, BHS_SIZE);

		if (!(pdu->pdu.Opcode & OP_IMMEDIATE))
			conn->session->CmdSN++;
		pdu->pdu.p.command.CmdSN = htonl(conn->session->CmdSN);

		dsl = psize - BHS_SIZE;
		size = BHS_SIZE;

		hton3(dsl, pdu->pdu.DataSegmentLength);

		if (conn->HeaderDigest &&
			!(par->options & ISCSITEST_SFLAG_NO_HEADER_DIGEST)) {
			pdu->pdu.HeaderDigest = gen_digest(&pdu->pdu, BHS_SIZE);
			size += 4;
		}

		pdu->io_vec[0].iov_base = &pdu->pdu;
		pdu->io_vec[0].iov_len = size;

		if (dsl) {
			pdu->io_vec[1].iov_base = &pdu_ptr[BHS_SIZE];
			pdu->io_vec[1].iov_len = dsl;
			i++;
			size += dsl;

			/* Pad to next multiple of 4 */
			pad = (par->options & ISCSITEST_SFLAG_NO_PADDING) ? 0 : size & 0x03;

			if (pad) {
				pad = 4 - pad;
				pdu->io_vec[i].iov_base = pad_bytes;
				pdu->io_vec[i].iov_len = pad;
				i++;
				size += pad;
			}

			if (conn->DataDigest &&
				!(par->options & ISCSITEST_SFLAG_NO_DATA_DIGEST)) {
				pdu->data_digest = gen_digest_2(&pdu_ptr[BHS_SIZE], dsl,
												pad_bytes, pad);
				pdu->io_vec[i].iov_base = &pdu->data_digest;
				pdu->io_vec[i].iov_len = 4;
				i++;
				size += 4;
			}
		}
	}
	uio = &pdu->uio;
	uio->uio_iov = pdu->io_vec;
	UIO_SETUP_SYSSPACE(uio);
	uio->uio_rw = UIO_WRITE;
	uio->uio_iovcnt = i;
	uio->uio_resid = size;

	pdu->disp = PDUDISP_SIGNAL;
	pdu->flags = PDUF_BUSY | PDUF_NOUPDATE;

	s = splbio();
	/* Enqueue for sending */
	if (pdu->pdu.Opcode & OP_IMMEDIATE)
		TAILQ_INSERT_HEAD(&conn->pdus_to_send, pdu, send_chain);
	else
		TAILQ_INSERT_TAIL(&conn->pdus_to_send, pdu, send_chain);

	wakeup(&conn->pdus_to_send);
	tsleep(pdu, PINOD, "test_send_pdu", 0);
	splx(s);

	unmap_databuf(p, pdu_ptr, psize);
	par->status = ISCSI_STATUS_SUCCESS;
	if (par->options & ISCSITEST_KILL_CONNECTION)
		kill_connection(conn, ISCSI_STATUS_TEST_CONNECTION_CLOSED, NO_LOGOUT,
						TRUE);
}
コード例 #17
0
ファイル: ip6_mroute.c プロジェクト: orumin/openbsd-efivars
/*
 * Add a mif to the mif table
 */
int
add_m6if(struct mif6ctl *mifcp)
{
	struct mif6 *mifp;
	struct ifnet *ifp;
	struct in6_ifreq ifr;
	int error, s;
#ifdef notyet
	struct tbf *m_tbf = tbftable + mifcp->mif6c_mifi;
#endif

	if (mifcp->mif6c_mifi >= MAXMIFS)
		return EINVAL;
	mifp = mif6table + mifcp->mif6c_mifi;
	if (mifp->m6_ifp)
		return EADDRINUSE; /* XXX: is it appropriate? */
	ifp = if_get(mifcp->mif6c_pifi);
	if (!ifp)
		return ENXIO;

	if (mifcp->mif6c_flags & MIFF_REGISTER) {
		if (reg_mif_num == (mifi_t)-1) {
			strlcpy(multicast_register_if.if_xname,
			    "register_mif",
			    sizeof multicast_register_if.if_xname); /* XXX */
			multicast_register_if.if_flags |= IFF_LOOPBACK;
			multicast_register_if.if_index = mifcp->mif6c_mifi;
			reg_mif_num = mifcp->mif6c_mifi;
		}

		if_put(ifp);
		ifp = if_ref(&multicast_register_if);

	} /* if REGISTER */
	else {
		/* Make sure the interface supports multicast */
		if ((ifp->if_flags & IFF_MULTICAST) == 0) {
			if_put(ifp);
			return EOPNOTSUPP;
		}

		s = splsoftnet();

		/*
		 * Enable promiscuous reception of all IPv6 multicasts
		 * from the interface.
		 */
		memset(&ifr, 0, sizeof(ifr));
		ifr.ifr_addr.sin6_family = AF_INET6;
		ifr.ifr_addr.sin6_addr = in6addr_any;
		error = (*ifp->if_ioctl)(ifp, SIOCADDMULTI, (caddr_t)&ifr);

		splx(s);
		if (error) {
			if_put(ifp);
			return error;
		}
	}

	s = splsoftnet();

	mifp->m6_flags     = mifcp->mif6c_flags;
	mifp->m6_ifp       = ifp;
#ifdef notyet
	/* scaling up here allows division by 1024 in critical code */
	mifp->m6_rate_limit = mifcp->mif6c_rate_limit * 1024 / 1000;
#endif
	/* initialize per mif pkt counters */
	mifp->m6_pkt_in    = 0;
	mifp->m6_pkt_out   = 0;
	mifp->m6_bytes_in  = 0;
	mifp->m6_bytes_out = 0;
	splx(s);

	/* Adjust nummifs up if the mifi is higher than nummifs */
	if (nummifs <= mifcp->mif6c_mifi)
		nummifs = mifcp->mif6c_mifi + 1;

#ifdef MRT6DEBUG
	if (mrt6debug)
		log(LOG_DEBUG,
		    "add_mif #%d, phyint %s\n",
		    mifcp->mif6c_mifi,
		    ifp->if_xname);
#endif

	if_put(ifp);

	return 0;
}
コード例 #18
0
ファイル: darwin-tcpip.c プロジェクト: Lezval/lustre
/*
 * XXX Liang: timeout for write is not supported yet.
 */
int
libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
{
        int            rc;
        CFS_DECL_NET_DATA;

        while (nob > 0) {
                struct iovec  iov = {
                        .iov_base = buffer,
                        .iov_len  = nob
                };
                struct  uio suio = {
                        .uio_iov        = &iov,
                        .uio_iovcnt     = 1,
                        .uio_offset     = 0,
                        .uio_resid      = nob,
                        .uio_segflg     = UIO_SYSSPACE,
                        .uio_rw         = UIO_WRITE,
                        .uio_procp      = NULL
                };
                                
                CFS_NET_IN;
                rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, 0);
                CFS_NET_EX;
                                
                if (rc != 0) {
                        if ( suio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\
                             rc == EWOULDBLOCK))
                        rc = 0;
                        if ( rc != 0 )
                                return -rc;
                        rc = nob - suio.uio_resid;
                        buffer = ((char *)buffer) + rc;
                        nob = suio.uio_resid;
                        continue;
                }
                break;
        }
        return (0);
}

/*
 * XXX Liang: timeout for read is not supported yet.
 */
int
libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
{
        int            rc;
        CFS_DECL_NET_DATA;

        while (nob > 0) {
                struct iovec  iov = {
                        .iov_base = buffer,
                        .iov_len  = nob
                };
                struct uio  ruio = {
                        .uio_iov        = &iov,
                        .uio_iovcnt     = 1,
                        .uio_offset     = 0,
                        .uio_resid      = nob,
                        .uio_segflg     = UIO_SYSSPACE,
                        .uio_rw         = UIO_READ,
                        .uio_procp      = NULL
                };
                
                CFS_NET_IN;
                rc = soreceive(sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, (int *)0);
                CFS_NET_EX;
                
                if (rc != 0) {
                        if ( ruio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\
                                rc == EWOULDBLOCK))
                                rc = 0;
                        if (rc != 0)
                                return -rc;
                        rc = nob - ruio.uio_resid;
                        buffer = ((char *)buffer) + rc;
                        nob = ruio.uio_resid;
                        continue;
                }
                break;
        }
        return (0);
}

int
libcfs_sock_setbuf (struct socket *sock, int txbufsize, int rxbufsize)
{
        struct sockopt  sopt;
        int             rc = 0;
        int             option;
        CFS_DECL_NET_DATA;

        bzero(&sopt, sizeof sopt);
        sopt.sopt_dir = SOPT_SET;
        sopt.sopt_level = SOL_SOCKET;
        sopt.sopt_val = &option;
        sopt.sopt_valsize = sizeof(option);

        if (txbufsize != 0) {
                option = txbufsize;
                if (option > KSOCK_MAX_BUF)
                        option = KSOCK_MAX_BUF;
        
                sopt.sopt_name = SO_SNDBUF;
                CFS_NET_IN;
                rc = sosetopt(sock, &sopt);
                CFS_NET_EX;
                if (rc != 0) {
                        CERROR ("Can't set send buffer %d: %d\n",
                                option, rc);
                        
                        return -rc;
                }
        }
                
        if (rxbufsize != 0) {
                option = rxbufsize;
                sopt.sopt_name = SO_RCVBUF;
                CFS_NET_IN;
                rc = sosetopt(sock, &sopt);
                CFS_NET_EX;
                if (rc != 0) {
                        CERROR ("Can't set receive buffer %d: %d\n",
                                option, rc);
                        return -rc;
                }
        }
        return 0;
}

int
libcfs_sock_getaddr (struct socket *sock, int remote, __u32 *ip, int *port)
{
        struct sockaddr_in *sin;
        struct sockaddr    *sa = NULL;
        int                rc;
        CFS_DECL_NET_DATA;

        if (remote != 0) {
                CFS_NET_IN;
                rc = sock->so_proto->pr_usrreqs->pru_peeraddr(sock, &sa);
                CFS_NET_EX;

                if (rc != 0) {
                        if (sa) FREE(sa, M_SONAME);
                        CERROR ("Error %d getting sock peer IP\n", rc);
                        return -rc;
                }
        } else {
                CFS_NET_IN;
                rc = sock->so_proto->pr_usrreqs->pru_sockaddr(sock, &sa);
                CFS_NET_EX;
                if (rc != 0) {
                        if (sa) FREE(sa, M_SONAME);
                        CERROR ("Error %d getting sock local IP\n", rc);
                        return -rc;
                }
        }
        if (sa != NULL) {
                sin = (struct sockaddr_in *)sa;
                if (ip != NULL)
                        *ip = ntohl (sin->sin_addr.s_addr);
                if (port != NULL)
                        *port = ntohs (sin->sin_port);
                if (sa) 
                        FREE(sa, M_SONAME);
        }
        return 0;
}

int
libcfs_sock_getbuf (struct socket *sock, int *txbufsize, int *rxbufsize)
{
        struct sockopt  sopt;
        int rc;
        CFS_DECL_NET_DATA;

        bzero(&sopt, sizeof sopt);
        sopt.sopt_dir = SOPT_GET;
        sopt.sopt_level = SOL_SOCKET;

        if (txbufsize != NULL) {
                sopt.sopt_val = txbufsize;
                sopt.sopt_valsize = sizeof(*txbufsize);
                sopt.sopt_name = SO_SNDBUF;
                CFS_NET_IN;
                rc = sogetopt(sock, &sopt);
                CFS_NET_EX;
                if (rc != 0) {
                        CERROR ("Can't get send buffer size: %d\n", rc);
                        return -rc;
                }
        }

        if (rxbufsize != NULL) {
                sopt.sopt_val = rxbufsize;
                sopt.sopt_valsize = sizeof(*rxbufsize);
                sopt.sopt_name = SO_RCVBUF;
                CFS_NET_IN;
                rc = sogetopt(sock, &sopt);
                CFS_NET_EX;
                if (rc != 0) {
                        CERROR ("Can't get receive buffer size: %d\n", rc);
                        return -rc;
                }
        }
        return 0;
}

int
libcfs_sock_connect (struct socket **sockp, int *fatal,
                     __u32 local_ip, int local_port,
                     __u32 peer_ip, int peer_port)
{
        struct sockaddr_in  srvaddr;
        struct socket      *so;
        int                 s;
        int                 rc; 
        CFS_DECL_FUNNEL_DATA;
        
        rc = libcfs_sock_create(sockp, fatal, local_ip, local_port);
        if (rc != 0)
                return rc;
        so = *sockp;
        bzero(&srvaddr, sizeof(srvaddr));
        srvaddr.sin_len = sizeof(struct sockaddr_in);
        srvaddr.sin_family = AF_INET;
        srvaddr.sin_port = htons (peer_port);
        srvaddr.sin_addr.s_addr = htonl (peer_ip);

        CFS_NET_IN;
        rc = soconnect(so, (struct sockaddr *)&srvaddr);
        if (rc != 0) {
                CFS_NET_EX;
                if (rc != EADDRNOTAVAIL && rc != EADDRINUSE)
                        CNETERR("Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc,
                               HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port);
                goto out;
        }
        s = splnet();
        while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
                CDEBUG(D_NET, "ksocknal sleep for waiting auto_connect.\n");
                (void) tsleep((caddr_t)&so->so_timeo, PSOCK, "ksocknal_conn", hz);
        }
        if ((rc = so->so_error) != 0) {
                so->so_error = 0;
                splx(s);
                CFS_NET_EX;
                CNETERR("Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc,
                       HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port);
                goto out;
        }
        LASSERT(so->so_state & SS_ISCONNECTED);
        splx(s);
        CFS_NET_EX;
        if (sockp)
                *sockp = so;
        return (0);
out:
        CFS_NET_IN;
        soshutdown(so, 2);
        soclose(so);
        CFS_NET_EX;
        return (-rc);
}

void
libcfs_sock_release (struct socket *sock)
{
        CFS_DECL_FUNNEL_DATA;
        CFS_NET_IN;
        soshutdown(sock, 0);
        CFS_NET_EX;
}
コード例 #19
0
ファイル: locks.c プロジェクト: MACasuba/MACasuba-Utils-git
/*
 * Routine: 	lck_mtx_lock_wait
 *
 * Invoked in order to wait on contention.
 *
 * Called with the interlock locked and
 * returns it unlocked.
 */
void
lck_mtx_lock_wait (
	lck_mtx_t			*lck,
	thread_t			holder)
{
	thread_t		self = current_thread();
	lck_mtx_t		*mutex;
	integer_t		priority;
	spl_t			s = splsched();
#if	CONFIG_DTRACE
	uint64_t		sleep_start = 0;

	if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
		sleep_start = mach_absolute_time();
	}
#endif

	if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)
		mutex = lck;
	else
		mutex = &lck->lck_mtx_ptr->lck_mtx;

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0);

	priority = self->sched_pri;
	if (priority < self->priority)
		priority = self->priority;
	if (priority < BASEPRI_DEFAULT)
		priority = BASEPRI_DEFAULT;

	thread_lock(holder);
	if (mutex->lck_mtx_pri == 0)
		holder->promotions++;
	holder->sched_mode |= TH_MODE_PROMOTED;
	if (		mutex->lck_mtx_pri < priority	&&
				holder->sched_pri < priority		) {
		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE,
					holder->sched_pri, priority, (int)holder, (int)lck, 0);

		set_sched_pri(holder, priority);
	}
	thread_unlock(holder);
	splx(s);

	if (mutex->lck_mtx_pri < priority)
		mutex->lck_mtx_pri = priority;
	if (self->pending_promoter[self->pending_promoter_index] == NULL) {
		self->pending_promoter[self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}
	else
	if (self->pending_promoter[self->pending_promoter_index] != mutex) {
		self->pending_promoter[++self->pending_promoter_index] = mutex;
		mutex->lck_mtx_waiters++;
	}

	assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT);
	lck_mtx_ilk_unlock(mutex);

	thread_block(THREAD_CONTINUE_NULL);

	KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
#if	CONFIG_DTRACE
	/*
	 * Record the Dtrace lockstat probe for blocking, block time
	 * measured from when we were entered.
	 */
	if (sleep_start) {
		if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
			LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		} else {
			LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
			    mach_absolute_time() - sleep_start);
		}
	}
#endif
}
コード例 #20
0
ファイル: darwin-tcpip.c プロジェクト: Lezval/lustre
int
libcfs_sock_accept (struct socket **newsockp, struct socket *sock)
{
        struct socket *so;
        struct sockaddr *sa;
        int error, s;
        CFS_DECL_FUNNEL_DATA;

        CFS_NET_IN;
        s = splnet();
        if ((sock->so_options & SO_ACCEPTCONN) == 0) {
                splx(s);
                CFS_NET_EX;
                return (-EINVAL);
        }

        if ((sock->so_state & SS_NBIO) && sock->so_comp.tqh_first == NULL) {
                splx(s);
                CFS_NET_EX;
                return (-EWOULDBLOCK);
        }

        error = 0;
        while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
                if (sock->so_state & SS_CANTRCVMORE) {
                        sock->so_error = ECONNABORTED;
                        break;
                }
                error = tsleep((caddr_t)&sock->so_timeo, PSOCK | PCATCH,
                                "accept", 0);
                if (error) {
                        splx(s);
                        CFS_NET_EX;
                        return (-error);
                }
        }
        if (sock->so_error) {
                error = sock->so_error;
                sock->so_error = 0;
                splx(s);
                CFS_NET_EX;
                return (-error);
        }

        /*
         * At this point we know that there is at least one connection
         * ready to be accepted. Remove it from the queue prior to
         * allocating the file descriptor for it since falloc() may
         * block allowing another process to accept the connection
         * instead.
         */
        so = TAILQ_FIRST(&sock->so_comp);
        TAILQ_REMOVE(&sock->so_comp, so, so_list);
        sock->so_qlen--;

        so->so_state &= ~SS_COMP;
        so->so_head = NULL;
        sa = 0;
        (void) soaccept(so, &sa);

        *newsockp = so;
        FREE(sa, M_SONAME);
        splx(s);
        CFS_NET_EX;
        return (-error);
}
コード例 #21
0
ファイル: rp.c プロジェクト: MarginC/kame
static _INLINE_ void rp_do_receive(struct rp_port *rp, struct tty *tp,
			CHANNEL_t *cp, unsigned int ChanStatus)
{
	int	spl;
	unsigned	int	CharNStat;
	int	ToRecv, wRecv, ch, ttynocopy;

	ToRecv = sGetRxCnt(cp);
	if(ToRecv == 0)
		return;

/*	If status indicates there are errored characters in the
	FIFO, then enter status mode (a word in FIFO holds
	characters and status)
*/

	if(ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
		if(!(ChanStatus & STATMODE)) {
			ChanStatus |= STATMODE;
			sEnRxStatusMode(cp);
		}
	}
/*
	if we previously entered status mode then read down the
	FIFO one word at a time, pulling apart the character and
	the status. Update error counters depending on status.
*/
	if(ChanStatus & STATMODE) {
		while(ToRecv) {
			if(tp->t_state & TS_TBLOCK) {
				break;
			}
			CharNStat = rp_readch2(cp,sGetTxRxDataIO(cp));
			ch = CharNStat & 0xff;

			if((CharNStat & STMBREAK) || (CharNStat & STMFRAMEH))
				ch |= TTY_FE;
			else if (CharNStat & STMPARITYH)
				ch |= TTY_PE;
			else if (CharNStat & STMRCVROVRH)
				rp->rp_overflows++;

			(*linesw[tp->t_line].l_rint)(ch, tp);
			ToRecv--;
		}
/*
	After emtying FIFO in status mode, turn off status mode
*/

		if(sGetRxCnt(cp) == 0) {
			sDisRxStatusMode(cp);
		}
	} else {
		/*
		 * Avoid the grotesquely inefficient lineswitch routine
		 * (ttyinput) in "raw" mode.  It usually takes about 450
		 * instructions (that's without canonical processing or echo!).
		 * slinput is reasonably fast (usually 40 instructions plus
		 * call overhead).
		 */
		ToRecv = sGetRxCnt(cp);
		if ( tp->t_state & TS_CAN_BYPASS_L_RINT ) {
			if ( ToRecv > RXFIFO_SIZE ) {
				ToRecv = RXFIFO_SIZE;
			}
			wRecv = ToRecv >> 1;
			if ( wRecv ) {
				rp_readmultich2(cp,sGetTxRxDataIO(cp),(u_int16_t *)rp->RxBuf,wRecv);
			}
			if ( ToRecv & 1 ) {
				((unsigned char *)rp->RxBuf)[(ToRecv-1)] = (u_char) rp_readch1(cp,sGetTxRxDataIO(cp));
			}
			tk_nin += ToRecv;
			tk_rawcc += ToRecv;
			tp->t_rawcc += ToRecv;
			ttynocopy = b_to_q((char *)rp->RxBuf, ToRecv, &tp->t_rawq);
			ttwakeup(tp);
		} else {
			while (ToRecv) {
				if(tp->t_state & TS_TBLOCK) {
					break;
				}
				ch = (u_char) rp_readch1(cp,sGetTxRxDataIO(cp));
				spl = spltty();
				(*linesw[tp->t_line].l_rint)(ch, tp);
				splx(spl);
				ToRecv--;
			}
		}
	}
コード例 #22
0
ファイル: trap.c プロジェクト: avsm/openbsd-xen-sys
void
m88100_trap(unsigned type, struct trapframe *frame)
{
	struct proc *p;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type, pbus_type;
	u_long fault_code;
	unsigned fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
	int s;
	u_int psr;
#endif
	int sig = 0;

	extern struct vm_map *kernel_map;

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	if (USERMODE(frame->tf_epsr)) {
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}
	fault_type = 0;
	fault_code = 0;
	fault_addr = frame->tf_sxip & XIP_ADDR;

	switch (type) {
	default:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

#if defined(DDB)
	case T_KDB_BREAK:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
#endif /* DDB */
	case T_ILLFLT:
		printf("Unimplemented opcode!\n");
		panictrap(frame->tf_vector, frame);
		break;
	case T_INT:
	case T_INT+T_USER:
		curcpu()->ci_intrdepth++;
		md_interrupt_func(T_INT, frame);
		curcpu()->ci_intrdepth--;
		return;

	case T_MISALGNFLT:
		printf("kernel misaligned access exception @ 0x%08x\n",
		    frame->tf_sxip);
		panictrap(frame->tf_vector, frame);
		break;

	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif
		panictrap(frame->tf_vector, frame);
		break;

	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
			type = T_DATAFLT + T_USER;
			goto user_fault;
		}

		fault_addr = frame->tf_dma0;
		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);
		if (va == 0) {
			panic("trap: bad kernel access at %x", fault_addr);
		}

		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
		vm = p->p_vmspace;
		map = kernel_map;

		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
		    pbus_type, pbus_exception_type[pbus_type],
		    fault_addr, frame, frame->tf_cpu);
#endif

		switch (pbus_type) {
		case CMMU_PFSR_SUCCESS:
			/*
			 * The fault was resolved. Call data_access_emulation
			 * to drain the data unit pipe line and reset dmt0
			 * so that trap won't get called again.
			 */
			data_access_emulation((unsigned *)frame);
			frame->tf_dpfsr = 0;
			frame->tf_dmt0 = 0;
			KERNEL_UNLOCK();
			return;
		case CMMU_PFSR_SFAULT:
		case CMMU_PFSR_PFAULT:
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			if (result == 0) {
				/*
				 * We could resolve the fault. Call
				 * data_access_emulation to drain the data
				 * unit pipe line and reset dmt0 so that trap
				 * won't get called again.
				 */
				data_access_emulation((unsigned *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
				KERNEL_UNLOCK();
				return;
			}
			break;
		}
#ifdef TRAPDEBUG
		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
		    pbus_exception_type[pbus_type], va);
#endif
		KERNEL_UNLOCK();
		panictrap(frame->tf_vector, frame);
		/* NOTREACHED */
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
user_fault:
		if (type == T_INSTFLT + T_USER) {
			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
#ifdef TRAPDEBUG
			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		} else {
			fault_addr = frame->tf_dma0;
			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
#ifdef TRAPDEBUG
			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
			    pbus_type, pbus_exception_type[pbus_type],
			    fault_addr, frame, frame->tf_cpu);
#endif
		}

		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
			ftype = VM_PROT_READ | VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		} else {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		}

		va = trunc_page((vaddr_t)fault_addr);

		KERNEL_PROC_LOCK(p);
		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/* Call uvm_fault() to resolve non-bus error faults */
		switch (pbus_type) {
		case CMMU_PFSR_SUCCESS:
			result = 0;
			break;
		case CMMU_PFSR_BERROR:
			result = EACCES;
			break;
		default:
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			break;
		}

		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0)
				uvm_grow(p, va);
			else if (result == EACCES)
				result = EFAULT;
		}
		KERNEL_PROC_UNLOCK(p);

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_snip = pcb_onfault | NIP_V;
			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
			frame->tf_sxip = 0;
			/*
			 * Continue as if the fault had been resolved, but
			 * do not try to complete the faulting access.
			 */
			frame->tf_dmt0 |= DMT_SKIP;
			result = 0;
		}

		if (result == 0) {
			if (type == T_DATAFLT+T_USER) {
				/*
			 	 * We could resolve the fault. Call
			 	 * data_access_emulation to drain the data unit
			 	 * pipe line and reset dmt0 so that trap won't
			 	 * get called again.
			 	 */
				data_access_emulation((unsigned *)frame);
				frame->tf_dpfsr = 0;
				frame->tf_dmt0 = 0;
			} else {
				/*
				 * back up SXIP, SNIP,
				 * clearing the Error bit
				 */
				frame->tf_sfip = frame->tf_snip & ~FIP_E;
				frame->tf_snip = frame->tf_sxip & ~NIP_E;
				frame->tf_ipfsr = 0;
			}
		} else {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		break;
	case T_PRIVINFLT+T_USER:
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		break;
	case T_FPEPFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			u_int instr;
			vaddr_t pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(u_int));

			/* check and see if we got here by accident */
			if ((p->p_md.md_bp0va != pc &&
			     p->p_md.md_bp1va != pc) ||
			    instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}

			/* restore original instruction and clear breakpoint */
			if (p->p_md.md_bp0va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp0save);
				p->p_md.md_bp0va = 0;
			}
			if (p->p_md.md_bp1va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp1save);
				p->p_md.md_bp1va = 0;
			}

#if 1
			frame->tf_sfip = frame->tf_snip;
			frame->tf_snip = pc | NIP_V;
#endif
			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;

	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		frame->tf_sfip = frame->tf_snip;
		frame->tf_snip = frame->tf_sxip;
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;

	case T_ASTFLT+T_USER:
		uvmexp.softs++;
		p->p_md.md_astpending = 0;
		if (p->p_flag & P_OWEUPC) {
			KERNEL_PROC_LOCK(p);
			ADDUPROF(p);
			KERNEL_PROC_UNLOCK(p);
		}
		if (curcpu()->ci_want_resched)
			preempt(NULL);
		break;
	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
		sv.sival_int = fault_addr;
		KERNEL_PROC_LOCK(p);
		trapsignal(p, sig, fault_code, fault_type, sv);
		KERNEL_PROC_UNLOCK(p);
		/*
		 * don't want multiple faults - we are going to
		 * deliver signal.
		 */
		frame->tf_dmt0 = 0;
		frame->tf_ipfsr = frame->tf_dpfsr = 0;
	}

	userret(p);
}
コード例 #23
0
void
smsc_init(void *xsc)
{
	struct smsc_softc	*sc = xsc;
	struct ifnet		*ifp = &sc->sc_ac.ac_if;
	struct smsc_chain	*c;
	usbd_status		 err;
	int			 s, i;
	
	s = splnet();

	/* Cancel pending I/O */
	smsc_stop(sc);

	/* Reset the ethernet interface. */
	smsc_reset(sc);

	/* Init RX ring. */
	if (smsc_rx_list_init(sc) == ENOBUFS) {
		printf("%s: rx list init failed\n", sc->sc_dev.dv_xname);
		splx(s);
		return;
	}

	/* Init TX ring. */
	if (smsc_tx_list_init(sc) == ENOBUFS) {
		printf("%s: tx list init failed\n", sc->sc_dev.dv_xname);
		splx(s);
		return;
	}

	/* Program promiscuous mode and multicast filters. */
	smsc_iff(sc);

	/* Open RX and TX pipes. */
	err = usbd_open_pipe(sc->sc_iface, sc->sc_ed[SMSC_ENDPT_RX],
	    USBD_EXCLUSIVE_USE, &sc->sc_ep[SMSC_ENDPT_RX]);
	if (err) {
		printf("%s: open rx pipe failed: %s\n",
		    sc->sc_dev.dv_xname, usbd_errstr(err));
		splx(s);
		return;
	}

	err = usbd_open_pipe(sc->sc_iface, sc->sc_ed[SMSC_ENDPT_TX],
	    USBD_EXCLUSIVE_USE, &sc->sc_ep[SMSC_ENDPT_TX]);
	if (err) {
		printf("%s: open tx pipe failed: %s\n",
		    sc->sc_dev.dv_xname, usbd_errstr(err));
		splx(s);
		return;
	}

	/* Start up the receive pipe. */
	for (i = 0; i < SMSC_RX_LIST_CNT; i++) {
		c = &sc->sc_cdata.rx_chain[i];
		usbd_setup_xfer(c->sc_xfer, sc->sc_ep[SMSC_ENDPT_RX],
		    c, c->sc_buf, sc->sc_bufsz,
		    USBD_SHORT_XFER_OK | USBD_NO_COPY,
		    USBD_NO_TIMEOUT, smsc_rxeof);
		usbd_transfer(c->sc_xfer);
	}

	/* TCP/UDP checksum offload engines. */
	smsc_sethwcsum(sc);

	/* Indicate we are up and running. */
	ifp->if_flags |= IFF_RUNNING;
	ifp->if_flags &= ~IFF_OACTIVE;

	timeout_add_sec(&sc->sc_stat_ch, 1);

	splx(s);
}
コード例 #24
0
ファイル: trap.c プロジェクト: avsm/openbsd-xen-sys
void
m88110_trap(unsigned type, struct trapframe *frame)
{
	struct proc *p;
	struct vm_map *map;
	vaddr_t va, pcb_onfault;
	vm_prot_t ftype;
	int fault_type;
	u_long fault_code;
	unsigned fault_addr;
	struct vmspace *vm;
	union sigval sv;
	int result;
#ifdef DDB
        int s;
	u_int psr;
#endif
	int sig = 0;
	pt_entry_t *pte;

	extern struct vm_map *kernel_map;
	extern pt_entry_t *pmap_pte(pmap_t, vaddr_t);

	uvmexp.traps++;
	if ((p = curproc) == NULL)
		p = &proc0;

	if (USERMODE(frame->tf_epsr)) {
		type += T_USER;
		p->p_md.md_tf = frame;	/* for ptrace/signals */
	}
	fault_type = 0;
	fault_code = 0;
	fault_addr = frame->tf_exip & XIP_ADDR;

	switch (type) {
	default:
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_110_DRM+T_USER:
	case T_110_DRM:
#ifdef DEBUG
		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
#endif
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/
	case T_110_DWM+T_USER:
	case T_110_DWM:
#ifdef DEBUG
		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
#endif
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/
	case T_110_IAM+T_USER:
	case T_110_IAM:
#ifdef DEBUG
		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
#endif
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

#ifdef DDB
	case T_KDB_TRACE:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_BREAK:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
	case T_KDB_ENTRY:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
		set_psr(psr);
		/* skip one instruction */
		if (frame->tf_exip & 1)
			frame->tf_exip = frame->tf_enip;
		else
			frame->tf_exip += 4;
		splx(s);
		return;
#if 0
	case T_ILLFLT:
		s = splhigh();
		set_psr((psr = get_psr()) & ~PSR_IND);
		ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
		       "error fault", (db_regs_t*)frame);
		set_psr(psr);
		splx(s);
		return;
#endif /* 0 */
#endif /* DDB */
	case T_ILLFLT:
		printf("Unimplemented opcode!\n");
		panictrap(frame->tf_vector, frame);
		break;
	case T_NON_MASK:
	case T_NON_MASK+T_USER:
		curcpu()->ci_intrdepth++;
		md_interrupt_func(T_NON_MASK, frame);
		curcpu()->ci_intrdepth--;
		return;
	case T_INT:
	case T_INT+T_USER:
		curcpu()->ci_intrdepth++;
		md_interrupt_func(T_INT, frame);
		curcpu()->ci_intrdepth--;
		return;
	case T_MISALGNFLT:
		printf("kernel mode misaligned access exception @ 0x%08x\n",
		    frame->tf_exip);
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_INSTFLT:
		/* kernel mode instruction access fault.
		 * Should never, never happen for a non-paged kernel.
		 */
#ifdef TRAPDEBUG
		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		panictrap(frame->tf_vector, frame);
		break;
		/*NOTREACHED*/

	case T_DATAFLT:
		/* kernel mode data fault */

		/* data fault on the user address? */
		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
			type = T_DATAFLT + T_USER;
			goto m88110_user_fault;
		}

#ifdef TRAPDEBUG
		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif

		fault_addr = frame->tf_dlar;
		if (frame->tf_dsr & CMMU_DSR_RW) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
		} else {
			ftype = VM_PROT_READ|VM_PROT_WRITE;
			fault_code = VM_PROT_WRITE;
		}

		va = trunc_page((vaddr_t)fault_addr);
		if (va == 0) {
			panic("trap: bad kernel access at %x", fault_addr);
		}

		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
		vm = p->p_vmspace;
		map = kernel_map;

		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
			frame->tf_dsr &= ~CMMU_DSR_WE;	/* undefined */
			/*
			 * On a segment or a page fault, call uvm_fault() to
			 * resolve the fault.
			 */
			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
				p->p_addr->u_pcb.pcb_onfault = 0;
			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			if (result == 0) {
				KERNEL_UNLOCK();
				return;
			}
		}
		if (frame->tf_dsr & CMMU_DSR_WE) {	/* write fault  */
			/*
			 * This could be a write protection fault or an
			 * exception to set the used and modified bits
			 * in the pte. Basically, if we got a write error,
			 * then we already have a pte entry that faulted
			 * in from a previous seg fault or page fault.
			 * Get the pte and check the status of the
			 * modified and valid bits to determine if this
			 * indeed a real write fault.  XXX smurph
			 */
			pte = pmap_pte(map->pmap, va);
#ifdef DEBUG
			if (pte == NULL) {
				KERNEL_UNLOCK();
				panic("NULL pte on write fault??");
			}
#endif
			if (!(*pte & PG_M) && !(*pte & PG_RO)) {
				/* Set modified bit and try the write again. */
#ifdef TRAPDEBUG
				printf("Corrected kernel write fault, map %x pte %x\n",
				    map->pmap, *pte);
#endif
				*pte |= PG_M;
				KERNEL_UNLOCK();
				return;
#if 1	/* shouldn't happen */
			} else {
				/* must be a real wp fault */
#ifdef TRAPDEBUG
				printf("Uncorrected kernel write fault, map %x pte %x\n",
				    map->pmap, *pte);
#endif
				if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
					p->p_addr->u_pcb.pcb_onfault = 0;
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
				if (result == 0) {
					KERNEL_UNLOCK();
					return;
				}
#endif
			}
		}
		KERNEL_UNLOCK();
		panictrap(frame->tf_vector, frame);
		/* NOTREACHED */
	case T_INSTFLT+T_USER:
		/* User mode instruction access fault */
		/* FALLTHROUGH */
	case T_DATAFLT+T_USER:
m88110_user_fault:
		if (type == T_INSTFLT+T_USER) {
			ftype = VM_PROT_READ;
			fault_code = VM_PROT_READ;
#ifdef TRAPDEBUG
			printf("User Instruction fault exip %x isr %x ilar %x\n",
			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
#endif
		} else {
			fault_addr = frame->tf_dlar;
			if (frame->tf_dsr & CMMU_DSR_RW) {
				ftype = VM_PROT_READ;
				fault_code = VM_PROT_READ;
			} else {
				ftype = VM_PROT_READ|VM_PROT_WRITE;
				fault_code = VM_PROT_WRITE;
			}
#ifdef TRAPDEBUG
			printf("User Data access fault exip %x dsr %x dlar %x\n",
			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
#endif
		}

		va = trunc_page((vaddr_t)fault_addr);

		KERNEL_PROC_LOCK(p);
		vm = p->p_vmspace;
		map = &vm->vm_map;
		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
			p->p_addr->u_pcb.pcb_onfault = 0;

		/*
		 * Call uvm_fault() to resolve non-bus error faults
		 * whenever possible.
		 */
		if (type == T_DATAFLT+T_USER) {
			/* data faults */
			if (frame->tf_dsr & CMMU_DSR_BE) {
				/* bus error */
				result = EACCES;
			} else
			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			} else
			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
				/* copyback or write allocate error */
				result = EACCES;
			} else
			if (frame->tf_dsr & CMMU_DSR_WE) {
				/* write fault  */
				/* This could be a write protection fault or an
				 * exception to set the used and modified bits
				 * in the pte. Basically, if we got a write
				 * error, then we already have a pte entry that
				 * faulted in from a previous seg fault or page
				 * fault.
				 * Get the pte and check the status of the
				 * modified and valid bits to determine if this
				 * indeed a real write fault.  XXX smurph
				 */
				pte = pmap_pte(vm_map_pmap(map), va);
#ifdef DEBUG
				if (pte == NULL) {
					KERNEL_PROC_UNLOCK(p);
					panic("NULL pte on write fault??");
				}
#endif
				if (!(*pte & PG_M) && !(*pte & PG_RO)) {
					/*
					 * Set modified bit and try the
					 * write again.
					 */
#ifdef TRAPDEBUG
					printf("Corrected userland write fault, map %x pte %x\n",
					    map->pmap, *pte);
#endif
					*pte |= PG_M;
					/*
					 * invalidate ATCs to force
					 * table search
					 */
					set_dcmd(CMMU_DCMD_INV_UATC);
					KERNEL_PROC_UNLOCK(p);
					return;
				} else {
					/* must be a real wp fault */
#ifdef TRAPDEBUG
					printf("Uncorrected userland write fault, map %x pte %x\n",
					    map->pmap, *pte);
#endif
					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
					p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
				}
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Data access fault dsr %x\n",
				    frame->tf_dsr);
#endif
				KERNEL_PROC_UNLOCK(p);
				panictrap(frame->tf_vector, frame);
			}
		} else {
			/* instruction faults */
			if (frame->tf_isr &
			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
				/* bus error, supervisor protection */
				result = EACCES;
			} else
			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
				/* segment or page fault */
				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
			} else {
#ifdef TRAPDEBUG
				printf("Unexpected Instruction fault isr %x\n",
				    frame->tf_isr);
#endif
				KERNEL_PROC_UNLOCK(p);
				panictrap(frame->tf_vector, frame);
			}
		}

		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (result == 0)
				uvm_grow(p, va);
			else if (result == EACCES)
				result = EFAULT;
		}
		KERNEL_PROC_UNLOCK(p);

		/*
		 * This could be a fault caused in copyin*()
		 * while accessing user space.
		 */
		if (result != 0 && pcb_onfault != 0) {
			frame->tf_exip = pcb_onfault;
			/*
			 * Continue as if the fault had been resolved.
			 */
			result = 0;
		}

		if (result != 0) {
			sig = result == EACCES ? SIGBUS : SIGSEGV;
			fault_type = result == EACCES ?
			    BUS_ADRERR : SEGV_MAPERR;
		}
		break;
	case T_MISALGNFLT+T_USER:
		/* Fix any misaligned ld.d or st.d instructions */
		sig = double_reg_fixup(frame);
		fault_type = BUS_ADRALN;
		break;
	case T_PRIVINFLT+T_USER:
	case T_ILLFLT+T_USER:
#ifndef DDB
	case T_KDB_BREAK:
	case T_KDB_ENTRY:
	case T_KDB_TRACE:
#endif
	case T_KDB_BREAK+T_USER:
	case T_KDB_ENTRY+T_USER:
	case T_KDB_TRACE+T_USER:
		sig = SIGILL;
		break;
	case T_BNDFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_ZERODIV+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTDIV;
		break;
	case T_OVFFLT+T_USER:
		sig = SIGFPE;
		fault_type = FPE_INTOVF;
		break;
	case T_FPEPFLT+T_USER:
		sig = SIGFPE;
		break;
	case T_SIGSYS+T_USER:
		sig = SIGSYS;
		break;
	case T_STEPBPT+T_USER:
#ifdef PTRACE
		/*
		 * This trap is used by the kernel to support single-step
		 * debugging (although any user could generate this trap
		 * which should probably be handled differently). When a
		 * process is continued by a debugger with the PT_STEP
		 * function of ptrace (single step), the kernel inserts
		 * one or two breakpoints in the user process so that only
		 * one instruction (or two in the case of a delayed branch)
		 * is executed.  When this breakpoint is hit, we get the
		 * T_STEPBPT trap.
		 */
		{
			u_int instr;
			vaddr_t pc = PC_REGS(&frame->tf_regs);

			/* read break instruction */
			copyin((caddr_t)pc, &instr, sizeof(u_int));

			/* check and see if we got here by accident */
			if ((p->p_md.md_bp0va != pc &&
			     p->p_md.md_bp1va != pc) ||
			    instr != SSBREAKPOINT) {
				sig = SIGTRAP;
				fault_type = TRAP_TRACE;
				break;
			}

			/* restore original instruction and clear breakpoint */
			if (p->p_md.md_bp0va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp0save);
				p->p_md.md_bp0va = 0;
			}
			if (p->p_md.md_bp1va == pc) {
				ss_put_value(p, pc, p->p_md.md_bp1save);
				p->p_md.md_bp1va = 0;
			}

			sig = SIGTRAP;
			fault_type = TRAP_BRKPT;
		}
#else
		sig = SIGTRAP;
		fault_type = TRAP_TRACE;
#endif
		break;
	case T_USERBPT+T_USER:
		/*
		 * This trap is meant to be used by debuggers to implement
		 * breakpoint debugging.  When we get this trap, we just
		 * return a signal which gets caught by the debugger.
		 */
		sig = SIGTRAP;
		fault_type = TRAP_BRKPT;
		break;

	case T_ASTFLT+T_USER:
		uvmexp.softs++;
		p->p_md.md_astpending = 0;
		if (p->p_flag & P_OWEUPC) {
			KERNEL_PROC_LOCK(p);
			ADDUPROF(p);
			KERNEL_PROC_UNLOCK(p);
		}
		if (curcpu()->ci_want_resched)
			preempt(NULL);
		break;
	}

	/*
	 * If trap from supervisor mode, just return
	 */
	if (type < T_USER)
		return;

	if (sig) {
		sv.sival_int = fault_addr;
		KERNEL_PROC_LOCK(p);
		trapsignal(p, sig, fault_code, fault_type, sv);
		KERNEL_PROC_UNLOCK(p);
	}

	userret(p);
}
コード例 #25
0
// this function waits for space to be available in the transport
// buffers and then prints the specified line to the CDCACM transport
// console.
void
cdcacm_print(const byte *buffer, int length)
{
    int a;
    int n;
    int m;
    int x;

    ASSERT(length);

    if (! cdcacm_attached || discard) {
        return;
    }

    // figure out how many buffers we need
    n = (length+sizeof(rx[0])-1)/sizeof(rx[0])+1;

    x = ilsplx(7);

    // forever...
    m = 0;
    for (;;) {
        // compute the number of available buffers
        a = (rx_out+NRX-rx_in)%NRX;
        if (! a) {
            a = NRX;
        }

        // if we have as many as we need...
        if (a >= n) {
            // we're ready to go
            break;
        }

#if ! INTERRUPT
        usb_isr();
#else
        splx(x);
        //delay(1);
        //if (m++ > 1000) {
        //    discard = true;
        //    return;
        //}
        x = splx(7);
#endif
    }

    // while there is more data to send...
    do {
        // append to next rx_in(s)
        m = MIN(length, sizeof(rx[rx_in])-rx_length[rx_in]);

        assert(rx_length[rx_in]+m <= sizeof(rx[rx_in]));
        ilmemcpy(rx[rx_in]+rx_length[rx_in], buffer, m);
        rx_length[rx_in] += m;

        buffer += m;
        length -= m;

        // if this is the first buffer of the transfer or if the transfer will need more buffers...
        if (a == NRX || length) {
            // advance to the next buffer
            assert(length ? rx_length[rx_in] == sizeof(rx[rx_in]) : true);
            rx_in = (rx_in+1)%NRX;
            assert(rx_in != rx_out);
            assert(! rx_length[rx_in]);
        }
    } while (length);

    // if this is the first buffer of the transfer...
    if (a == NRX) {
        // start the rx ball rolling
        assert(rx_out != rx_in);
        assert(rx_length[rx_out] > 0 && rx_length[rx_out] < PACKET_SIZE);
        usb_device_enqueue(bulk_in_ep, 1, rx[rx_out], rx_length[rx_out]);
    }

    ilsplx(x);
}
コード例 #26
0
ファイル: altq_priq.c プロジェクト: oza/FreeBSD-7.3-dyntick
static struct priq_class *
priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid)
{
	struct priq_class *cl;
	int s;

#ifndef ALTQ_RED
	if (flags & PRCF_RED) {
#ifdef ALTQ_DEBUG
		printf("priq_class_create: RED not configured for PRIQ!\n");
#endif
		return (NULL);
	}
#endif

	if ((cl = pif->pif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
#ifdef __NetBSD__
		s = splnet();
#else
		s = splimp();
#endif
		IFQ_LOCK(cl->cl_pif->pif_ifq);
		if (!qempty(cl->cl_q))
			priq_purgeq(cl);
		IFQ_UNLOCK(cl->cl_pif->pif_ifq);
		splx(s);
#ifdef ALTQ_RIO
		if (q_is_rio(cl->cl_q))
			rio_destroy((rio_t *)cl->cl_red);
#endif
#ifdef ALTQ_RED
		if (q_is_red(cl->cl_q))
			red_destroy(cl->cl_red);
#endif
	} else {
		MALLOC(cl, struct priq_class *, sizeof(struct priq_class),
		       M_DEVBUF, M_WAITOK);
		if (cl == NULL)
			return (NULL);
		bzero(cl, sizeof(struct priq_class));

		MALLOC(cl->cl_q, class_queue_t *, sizeof(class_queue_t),
		       M_DEVBUF, M_WAITOK);
		if (cl->cl_q == NULL)
			goto err_ret;
		bzero(cl->cl_q, sizeof(class_queue_t));
	}

	pif->pif_classes[pri] = cl;
	if (flags & PRCF_DEFAULTCLASS)
		pif->pif_default = cl;
	if (qlimit == 0)
		qlimit = 50;  /* use default */
	qlimit(cl->cl_q) = qlimit;
	qtype(cl->cl_q) = Q_DROPTAIL;
	qlen(cl->cl_q) = 0;
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > pif->pif_maxpri)
		pif->pif_maxpri = pri;
	cl->cl_pif = pif;
	cl->cl_handle = qid;

#ifdef ALTQ_RED
	if (flags & (PRCF_RED|PRCF_RIO)) {
		int red_flags, red_pkttime;

		red_flags = 0;
		if (flags & PRCF_ECN)
			red_flags |= REDF_ECN;
#ifdef ALTQ_RIO
		if (flags & PRCF_CLEARDSCP)
			red_flags |= RIOF_CLEARDSCP;
#endif
		if (pif->pif_bandwidth < 8)
			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu
			  * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8);
#ifdef ALTQ_RIO
		if (flags & PRCF_RIO) {
			cl->cl_red = (red_t *)rio_alloc(0, NULL,
						red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RIO;
		} else
#endif
		if (flags & PRCF_RED) {
			cl->cl_red = red_alloc(0, 0,
			    qlimit(cl->cl_q) * 10/100,
			    qlimit(cl->cl_q) * 30/100,
			    red_flags, red_pkttime);
			if (cl->cl_red != NULL)
				qtype(cl->cl_q) = Q_RED;
		}
	}
#endif /* ALTQ_RED */

	return (cl);

 err_ret:
	if (cl->cl_red != NULL) {
#ifdef ALTQ_RIO
		if (q_is_rio(cl->cl_q))
			rio_destroy((rio_t *)cl->cl_red);
#endif
#ifdef ALTQ_RED
		if (q_is_red(cl->cl_q))
			red_destroy(cl->cl_red);
#endif
	}
	if (cl->cl_q != NULL)
		FREE(cl->cl_q, M_DEVBUF);
	FREE(cl, M_DEVBUF);
	return (NULL);
}
コード例 #27
0
/*
 *	vm_pageout_scan does the dirty work for the pageout daemon.
 */
void
vm_pageout_scan()
{
	register vm_page_t	m, next;
	register int		page_shortage;
	register int		s;
	register int		pages_freed;
	int			free;
	vm_object_t		object;

	/*
	 *	Only continue when we want more pages to be "free"
	 */

	cnt.v_rev++;

	s = splimp();
	simple_lock(&vm_page_queue_free_lock);
	free = cnt.v_free_count;
	simple_unlock(&vm_page_queue_free_lock);
	splx(s);

	if (free < cnt.v_free_target) {
		swapout_threads();

		/*
		 *	Be sure the pmap system is updated so
		 *	we can scan the inactive queue.
		 */

		pmap_update();
	}

	/*
	 *	Acquire the resident page system lock,
	 *	as we may be changing what's resident quite a bit.
	 */
	vm_page_lock_queues();

	/*
	 *	Start scanning the inactive queue for pages we can free.
	 *	We keep scanning until we have enough free pages or
	 *	we have scanned through the entire queue.  If we
	 *	encounter dirty pages, we start cleaning them.
	 */

	pages_freed = 0;
	for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) {
		s = splimp();
		simple_lock(&vm_page_queue_free_lock);
		free = cnt.v_free_count;
		simple_unlock(&vm_page_queue_free_lock);
		splx(s);
		if (free >= cnt.v_free_target)
			break;

		cnt.v_scan++;
		next = m->pageq.tqe_next;

		/*
		 * If the page has been referenced, move it back to the
		 * active queue.
		 */
		if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) {
			vm_page_activate(m);
			cnt.v_reactivated++;
			continue;
		}

		/*
		 * If the page is clean, free it up.
		 */
		if (m->flags & PG_CLEAN) {
			object = m->object;
			if (vm_object_lock_try(object)) {
				pmap_page_protect(VM_PAGE_TO_PHYS(m),
						  VM_PROT_NONE);
				vm_page_free(m);
				pages_freed++;
				cnt.v_dfree++;
				vm_object_unlock(object);
			}
			continue;
		}

		/*
		 * If the page is dirty but already being washed, skip it.
		 */
		if ((m->flags & PG_LAUNDRY) == 0)
			continue;

		/*
		 * Otherwise the page is dirty and still in the laundry,
		 * so we start the cleaning operation and remove it from
		 * the laundry.
		 */
		object = m->object;
		if (!vm_object_lock_try(object))
			continue;
		cnt.v_pageouts++;
#ifdef CLUSTERED_PAGEOUT
		if (object->pager &&
		    vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
			vm_pageout_cluster(m, object);
		else
#endif
		vm_pageout_page(m, object);
		thread_wakeup(object);
		vm_object_unlock(object);
		/*
		 * Former next page may no longer even be on the inactive
		 * queue (due to potential blocking in the pager with the
		 * queues unlocked).  If it isn't, we just start over.
		 */
		if (next && (next->flags & PG_INACTIVE) == 0)
			next = vm_page_queue_inactive.tqh_first;
	}
	
	/*
	 *	Compute the page shortage.  If we are still very low on memory
	 *	be sure that we will move a minimal amount of pages from active
	 *	to inactive.
	 */

	page_shortage = cnt.v_inactive_target - cnt.v_inactive_count;
	if (page_shortage <= 0 && pages_freed == 0)
		page_shortage = 1;

	while (page_shortage > 0) {
		/*
		 *	Move some more pages from active to inactive.
		 */

		if ((m = vm_page_queue_active.tqh_first) == NULL)
			break;
		vm_page_deactivate(m);
		page_shortage--;
	}

	vm_page_unlock_queues();
}
コード例 #28
0
ファイル: ip6_mroute.c プロジェクト: orumin/openbsd-efivars
int
ip6_mforward(struct ip6_hdr *ip6, struct ifnet *ifp, struct mbuf *m)
{
	struct mf6c *rt;
	struct mif6 *mifp;
	struct mbuf *mm;
	int s;
	mifi_t mifi;
	struct sockaddr_in6 sin6;
	char src[INET6_ADDRSTRLEN], dst[INET6_ADDRSTRLEN];

	inet_ntop(AF_INET6, &ip6->ip6_src, src, sizeof(src));
	inet_ntop(AF_INET6, &ip6->ip6_dst, dst, sizeof(dst));
#ifdef MRT6DEBUG
	if (mrt6debug & DEBUG_FORWARD)
		log(LOG_DEBUG, "ip6_mforward: src %s, dst %s, ifindex %d\n",
		    src, dst, ifp->if_index);
#endif

	/*
	 * Don't forward a packet with Hop limit of zero or one,
	 * or a packet destined to a local-only group.
	 */
	if (ip6->ip6_hlim <= 1 || IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) ||
	    IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst))
		return 0;
	ip6->ip6_hlim--;

	/*
	 * Source address check: do not forward packets with unspecified
	 * source. It was discussed in July 2000, on ipngwg mailing list.
	 * This is rather more serious than unicast cases, because some
	 * MLD packets can be sent with the unspecified source address
	 * (although such packets must normally set 1 to the hop limit field).
	 */
	if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
		ip6stat.ip6s_cantforward++;
		if (ip6_log_time + ip6_log_interval < time_second) {
			ip6_log_time = time_second;
			log(LOG_DEBUG,
			    "cannot forward "
			    "from %s to %s nxt %d received on interface %u\n",
			    src, dst,
			    ip6->ip6_nxt,
			    m->m_pkthdr.ph_ifidx);
		}
		return 0;
	}

	/*
	 * Determine forwarding mifs from the forwarding cache table
	 */
	s = splsoftnet();
	MF6CFIND(ip6->ip6_src, ip6->ip6_dst, rt);

	/* Entry exists, so forward if necessary */
	if (rt) {
		splx(s);
		return (ip6_mdq(m, ifp, rt));
	} else {
		/*
		 * If we don't have a route for packet's origin,
		 * Make a copy of the packet &
		 * send message to routing daemon
		 */

		struct mbuf *mb0;
		struct rtdetq *rte;
		u_long hash;

		mrt6stat.mrt6s_no_route++;
#ifdef MRT6DEBUG
		if (mrt6debug & (DEBUG_FORWARD | DEBUG_MFC))
			log(LOG_DEBUG, "ip6_mforward: no rte s %s g %s\n",
			    src, dst);
#endif

		/*
		 * Allocate mbufs early so that we don't do extra work if we
		 * are just going to fail anyway.
		 */
		rte = (struct rtdetq *)malloc(sizeof(*rte), M_MRTABLE,
					      M_NOWAIT);
		if (rte == NULL) {
			splx(s);
			return ENOBUFS;
		}
		mb0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
		/*
		 * Pullup packet header if needed before storing it,
		 * as other references may modify it in the meantime.
		 */
		if (mb0 &&
		    (M_READONLY(mb0) || mb0->m_len < sizeof(struct ip6_hdr)))
			mb0 = m_pullup(mb0, sizeof(struct ip6_hdr));
		if (mb0 == NULL) {
			free(rte, M_MRTABLE, 0);
			splx(s);
			return ENOBUFS;
		}

		/* is there an upcall waiting for this packet? */
		hash = MF6CHASH(ip6->ip6_src, ip6->ip6_dst);
		for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) {
			if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src,
					       &rt->mf6c_origin.sin6_addr) &&
			    IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
					       &rt->mf6c_mcastgrp.sin6_addr) &&
			    (rt->mf6c_stall != NULL))
				break;
		}

		if (rt == NULL) {
			struct mrt6msg *im;

			/* no upcall, so make a new entry */
			rt = (struct mf6c *)malloc(sizeof(*rt), M_MRTABLE,
						  M_NOWAIT);
			if (rt == NULL) {
				free(rte, M_MRTABLE, 0);
				m_freem(mb0);
				splx(s);
				return ENOBUFS;
			}
			/*
			 * Make a copy of the header to send to the user
			 * level process
			 */
			mm = m_copym(mb0, 0, sizeof(struct ip6_hdr), M_NOWAIT);

			if (mm == NULL) {
				free(rte, M_MRTABLE, 0);
				m_freem(mb0);
				free(rt, M_MRTABLE, 0);
				splx(s);
				return ENOBUFS;
			}

			/*
			 * Send message to routing daemon
			 */
			(void)memset(&sin6, 0, sizeof(sin6));
			sin6.sin6_len = sizeof(sin6);
			sin6.sin6_family = AF_INET6;
			sin6.sin6_addr = ip6->ip6_src;

			im = NULL;
			switch (ip6_mrouter_ver) {
			case MRT6_INIT:
				im = mtod(mm, struct mrt6msg *);
				im->im6_msgtype = MRT6MSG_NOCACHE;
				im->im6_mbz = 0;
				break;
			default:
				free(rte, M_MRTABLE, 0);
				m_freem(mb0);
				free(rt, M_MRTABLE, 0);
				splx(s);
				return EINVAL;
			}

#ifdef MRT6DEBUG
			if (mrt6debug & DEBUG_FORWARD)
				log(LOG_DEBUG,
				    "getting the iif info in the kernel\n");
#endif

			for (mifp = mif6table, mifi = 0;
			     mifi < nummifs && mifp->m6_ifp != ifp;
			     mifp++, mifi++)
				;

			switch (ip6_mrouter_ver) {
			case MRT6_INIT:
				im->im6_mif = mifi;
				break;
			}

			if (socket6_send(ip6_mrouter, mm, &sin6) < 0) {
				log(LOG_WARNING, "ip6_mforward: ip6_mrouter "
				    "socket queue full\n");
				mrt6stat.mrt6s_upq_sockfull++;
				free(rte, M_MRTABLE, 0);
				m_freem(mb0);
				free(rt, M_MRTABLE, 0);
				splx(s);
				return ENOBUFS;
			}

			mrt6stat.mrt6s_upcalls++;

			/* insert new entry at head of hash chain */
			bzero(rt, sizeof(*rt));
			rt->mf6c_origin.sin6_family = AF_INET6;
			rt->mf6c_origin.sin6_len = sizeof(struct sockaddr_in6);
			rt->mf6c_origin.sin6_addr = ip6->ip6_src;
			rt->mf6c_mcastgrp.sin6_family = AF_INET6;
			rt->mf6c_mcastgrp.sin6_len = sizeof(struct sockaddr_in6);
			rt->mf6c_mcastgrp.sin6_addr = ip6->ip6_dst;
			rt->mf6c_expire = UPCALL_EXPIRE;
			n6expire[hash]++;
			rt->mf6c_parent = MF6C_INCOMPLETE_PARENT;

			/* link into table */
			rt->mf6c_next  = mf6ctable[hash];
			mf6ctable[hash] = rt;
			/* Add this entry to the end of the queue */
			rt->mf6c_stall = rte;
		} else {
			/* determine if q has overflowed */
			struct rtdetq **p;
			int npkts = 0;

			for (p = &rt->mf6c_stall; *p != NULL; p = &(*p)->next)
				if (++npkts > MAX_UPQ6) {
					mrt6stat.mrt6s_upq_ovflw++;
					free(rte, M_MRTABLE, 0);
					m_freem(mb0);
					splx(s);
					return 0;
				}

			/* Add this entry to the end of the queue */
			*p = rte;
		}

		rte->next = NULL;
		rte->m = mb0;
		rte->ifp = ifp;
		splx(s);

		return 0;
	}
コード例 #29
0
/*
 * Do an I/O operation to/from a cache block.
 */
int
smbfs_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
{
    struct smbmount *smp = VFSTOSMBFS(vp->v_mount);
    struct smbnode *np = VTOSMB(vp);
    struct uio uio, *uiop = &uio;
    struct iovec io;
    struct smb_cred scred;
    int error = 0;

    uiop->uio_iov = &io;
    uiop->uio_iovcnt = 1;
    uiop->uio_segflg = UIO_SYSSPACE;
    uiop->uio_td = td;

    smb_makescred(&scred, td, cr);

    if (bp->b_iocmd == BIO_READ) {
        io.iov_len = uiop->uio_resid = bp->b_bcount;
        io.iov_base = bp->b_data;
        uiop->uio_rw = UIO_READ;
        switch (vp->v_type) {
        case VREG:
            uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
            error = smb_read(smp->sm_share, np->n_fid, uiop, &scred);
            if (error)
                break;
            if (uiop->uio_resid) {
                int left = uiop->uio_resid;
                int nread = bp->b_bcount - left;
                if (left > 0)
                    bzero((char *)bp->b_data + nread, left);
            }
            break;
        default:
            printf("smbfs_doio:  type %x unexpected\n",vp->v_type);
            break;
        };
        if (error) {
            bp->b_error = error;
            bp->b_ioflags |= BIO_ERROR;
        }
    } else { /* write */
        if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size)
            bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE);

        if (bp->b_dirtyend > bp->b_dirtyoff) {
            io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff;
            uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
            io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
            uiop->uio_rw = UIO_WRITE;
            error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);

            /*
             * For an interrupted write, the buffer is still valid
             * and the write hasn't been pushed to the server yet,
             * so we can't set BIO_ERROR and report the interruption
             * by setting B_EINTR. For the B_ASYNC case, B_EINTR
             * is not relevant, so the rpc attempt is essentially
             * a noop.  For the case of a V3 write rpc not being
             * committed to stable storage, the block is still
             * dirty and requires either a commit rpc or another
             * write rpc with iomode == NFSV3WRITE_FILESYNC before
             * the block is reused. This is indicated by setting
             * the B_DELWRI and B_NEEDCOMMIT flags.
             */
            if (error == EINTR
                    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
                int s;

                s = splbio();
                bp->b_flags &= ~(B_INVAL|B_NOCACHE);
                if ((bp->b_flags & B_ASYNC) == 0)
                    bp->b_flags |= B_EINTR;
                if ((bp->b_flags & B_PAGING) == 0) {
                    bdirty(bp);
                    bp->b_flags &= ~B_DONE;
                }
                if ((bp->b_flags & B_ASYNC) == 0)
                    bp->b_flags |= B_EINTR;
                splx(s);
            } else {
                if (error) {
                    bp->b_ioflags |= BIO_ERROR;
                    bp->b_error = error;
                }
                bp->b_dirtyoff = bp->b_dirtyend = 0;
            }
        } else {
            bp->b_resid = 0;
            bufdone(bp);
            return 0;
        }
    }
    bp->b_resid = uiop->uio_resid;
    bufdone(bp);
    return error;
}
コード例 #30
0
ファイル: kern_timeout.c プロジェクト: UnitedMarsupials/kame
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock()
{
	register struct callout *c;
	register struct callout_tailq *bucket;
	register int s;
	register int curticks;
	register int steps;	/* #steps since we last allowed interrupts */

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	steps = 0;
	s = splhigh();
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					splx(s);
					s = splhigh();
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				splx(s);
				c_func(c_arg);
				s = splhigh();
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	nextsoftcheck = NULL;
	splx(s);
}