Esempio n. 1
0
/*
 * Stir our S-box.
 */
static void
arc4_randomstir(void)
{
	u_int8_t key[ARC4_KEYBYTES];
	int n;
	struct timeval tv_now;

	/*
	 * XXX: FIX!! This isn't brilliant. Need more confidence.
	 * This returns zero entropy before random(4) is seeded.
	 */
	(void)read_random(key, ARC4_KEYBYTES);
	getmicrouptime(&tv_now);
	mtx_lock(&arc4_mtx);
	for (n = 0; n < 256; n++) {
		arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256;
		arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]);
	}
	arc4_i = arc4_j = 0;
	/* Reset for next reseed cycle. */
	arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS;
	arc4_numruns = 0;
	/*
	 * Throw away the first N words of output, as suggested in the
	 * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
	 * by Fluher, Mantin, and Shamir.  (N = 256 in our case.)
	 *
	 * http://dl.acm.org/citation.cfm?id=646557.694759
	 */
	for (n = 0; n < 256*4; n++)
		arc4_randbyte();
	mtx_unlock(&arc4_mtx);
}
Esempio n. 2
0
static int
hwmp_send_perr(struct ieee80211_node *ni,
    const uint8_t sa[IEEE80211_ADDR_LEN],
    const uint8_t da[IEEE80211_ADDR_LEN],
    struct ieee80211_meshperr_ie *perr)
{
	struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp;

	/*
	 * Enforce PERR interval.
	 */
	if (ratecheck(&hs->hs_lastperr, &ieee80211_hwmp_perrminint) == 0)
		return EALREADY;
	getmicrouptime(&hs->hs_lastperr);

	/*
	 * mesh perr action frame format
	 *     [6] da
	 *     [6] sa
	 *     [6] addr3 = sa
	 *     [1] action
	 *     [1] category
	 *     [tlv] mesh path error
	 */
	perr->perr_ie = IEEE80211_ELEMID_MESHPERR;
	return hwmp_send_action(ni, sa, da, (uint8_t *)perr,
	    sizeof(struct ieee80211_meshperr_ie));
}
Esempio n. 3
0
static int
hwmp_send_preq(struct ieee80211_node *ni,
    const uint8_t sa[IEEE80211_ADDR_LEN],
    const uint8_t da[IEEE80211_ADDR_LEN],
    struct ieee80211_meshpreq_ie *preq)
{
	struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp;

	/*
	 * Enforce PREQ interval.
	 */
	if (ratecheck(&hs->hs_lastpreq, &ieee80211_hwmp_preqminint) == 0)
		return EALREADY;
	getmicrouptime(&hs->hs_lastpreq);

	/*
	 * mesh preq action frame format
	 *     [6] da
	 *     [6] sa 
	 *     [6] addr3 = sa
	 *     [1] action
	 *     [1] category
	 *     [tlv] mesh path request
	 */
	preq->preq_ie = IEEE80211_ELEMID_MESHPREQ;
	return hwmp_send_action(ni, sa, da, (uint8_t *)preq,
	    sizeof(struct ieee80211_meshpreq_ie));
}
Esempio n. 4
0
static void
nfs_feedback(int type, int proc, void *arg)
{
	struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg;
	struct nfsmount *nmp = nf->nf_mount;
	struct timeval now;

	getmicrouptime(&now);

	switch (type) {
	case FEEDBACK_REXMIT2:
	case FEEDBACK_RECONNECT:
		if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
			nfs_down(nmp, nf->nf_td,
			    "not responding", 0, NFSSTA_TIMEO);
			nf->nf_tprintfmsg = TRUE;
			nf->nf_lastmsg = now.tv_sec;
		}
		break;

	case FEEDBACK_OK:
		nfs_up(nf->nf_mount, nf->nf_td,
		    "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg);
		break;
	}
}
Esempio n. 5
0
static struct timeval get_drm_timestamp(void)
{
	struct timeval now;

	getmicrouptime(&now);
#ifdef notyet
	if (!drm_timestamp_monotonic)
		now = ktime_sub(now, ktime_get_monotonic_offset());
#endif

	return (now);
}
Esempio n. 6
0
void
nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
{
	struct nvme_io_test		*io_test;
	struct nvme_io_test_internal	*io_test_internal;
	void				(*fn)(void *);
	int				i;

	io_test = (struct nvme_io_test *)arg;

	if ((io_test->opc != NVME_OPC_READ) &&
	    (io_test->opc != NVME_OPC_WRITE))
		return;

	if (io_test->size % nvme_ns_get_sector_size(ns))
		return;

	io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
	    M_WAITOK | M_ZERO);
	io_test_internal->opc = io_test->opc;
	io_test_internal->ns = ns;
	io_test_internal->td_active = io_test->num_threads;
	io_test_internal->time = io_test->time;
	io_test_internal->size = io_test->size;
	io_test_internal->flags = io_test->flags;

	if (cmd == NVME_IO_TEST)
		fn = nvme_ns_io_test;
	else
		fn = nvme_ns_bio_test;

	getmicrouptime(&io_test_internal->start);

	for (i = 0; i < io_test->num_threads; i++)
#if __FreeBSD_version >= 800004
		kthread_add(fn, io_test_internal,
		    NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
#else
		kthread_create(fn, io_test_internal,
		    NULL, 0, 0, "nvme_io_test[%d]", i);
#endif

	tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);

	while (io_test_internal->td_active > 0)
		DELAY(10);

	memcpy(io_test->io_completed, io_test_internal->io_completed,
	    sizeof(io_test->io_completed));

	free(io_test_internal, M_NVME);
}
/*
 * Initialize the vnode associated with a new inode, handle aliased
 * vnodes.
 */
void
ulfs_vinit(struct mount *mntp, int (**specops)(void *), int (**fifoops)(void *),
	struct vnode **vpp)
{
	struct timeval	tv;
	struct inode	*ip;
	struct vnode	*vp;
	dev_t		rdev;
	struct ulfsmount *ump;

	vp = *vpp;
	ip = VTOI(vp);
	switch(vp->v_type = IFTOVT(ip->i_mode)) {
	case VCHR:
	case VBLK:
		vp->v_op = specops;
		ump = ip->i_ump;
		// XXX clean this up
		if (ump->um_fstype == ULFS1)
			rdev = (dev_t)ulfs_rw32(ip->i_din->u_32.di_rdev,
			    ULFS_MPNEEDSWAP(ump->um_lfs));
		else
			rdev = (dev_t)ulfs_rw64(ip->i_din->u_64.di_rdev,
			    ULFS_MPNEEDSWAP(ump->um_lfs));
		spec_node_init(vp, rdev);
		break;
	case VFIFO:
		vp->v_op = fifoops;
		break;
	case VNON:
	case VBAD:
	case VSOCK:
	case VLNK:
	case VDIR:
	case VREG:
		break;
	}
	if (ip->i_number == ULFS_ROOTINO)
                vp->v_vflag |= VV_ROOT;
	/*
	 * Initialize modrev times
	 */
	getmicrouptime(&tv);
	ip->i_modrev = (uint64_t)(uint)tv.tv_sec << 32
			| tv.tv_usec * 4294u;
	*vpp = vp;
}
Esempio n. 8
0
/*
 * MPSAFE
 */
void
arc4rand(void *ptr, u_int len, int reseed)
{
    u_char *p;
    struct timeval tv;

    getmicrouptime(&tv);
    if (reseed ||
            (arc4_numruns > ARC4_RESEED_BYTES) ||
            (tv.tv_sec > arc4_t_reseed))
        arc4_randomstir();

    mtx_lock(&arc4_mtx);
    arc4_numruns += len;
    p = ptr;
    while (len--)
        *p++ = arc4_randbyte();
    mtx_unlock(&arc4_mtx);
}
Esempio n. 9
0
/*
 * Stir our S-box.
 */
static void
arc4_randomstir (void)
{
    u_int8_t key[256];
    int r, n;
    struct timeval tv_now;

    /*
     * XXX read_random() returns unsafe numbers if the entropy
     * device is not loaded -- MarkM.
     */
    r = read_random(key, ARC4_KEYBYTES);
    getmicrouptime(&tv_now);
    mtx_lock(&arc4_mtx);
    /* If r == 0 || -1, just use what was on the stack. */
    if (r > 0)
    {
        for (n = r; n < sizeof(key); n++)
            key[n] = key[n % r];
    }

    for (n = 0; n < 256; n++)
    {
        arc4_j = (arc4_j + arc4_sbox[n] + key[n]) % 256;
        arc4_swap(&arc4_sbox[n], &arc4_sbox[arc4_j]);
    }

    /* Reset for next reseed cycle. */
    arc4_t_reseed = tv_now.tv_sec + ARC4_RESEED_SECONDS;
    arc4_numruns = 0;

    /*
     * Throw away the first N words of output, as suggested in the
     * paper "Weaknesses in the Key Scheduling Algorithm of RC4"
     * by Fluher, Mantin, and Shamir.  (N = 256 in our case.)
     */
    for (n = 0; n < 256 * 4; n++)
        arc4_randbyte();
    mtx_unlock(&arc4_mtx);
}
Esempio n. 10
0
static void
nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
{
	struct nvme_io_test_thread	*tth = arg;
	struct timeval			t;

	tth->io_completed++;

	if (nvme_completion_is_error(cpl)) {
		printf("%s: error occurred\n", __func__);
		wakeup_one(tth);
		return;
	}

	getmicrouptime(&t);
	timevalsub(&t, &tth->start);

	if (t.tv_sec >= tth->time) {
		wakeup_one(tth);
		return;
	}

	switch (tth->opc) {
	case NVME_OPC_WRITE:
		nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
		    tth->size/nvme_ns_get_sector_size(tth->ns),
		    nvme_ns_io_test_cb, tth);
		break;
	case NVME_OPC_READ:
		nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
		    tth->size/nvme_ns_get_sector_size(tth->ns),
		    nvme_ns_io_test_cb, tth);
		break;
	default:
		break;
	}
}
Esempio n. 11
0
void
tap_attach(device_t parent, device_t self, void *aux)
{
	struct tap_softc *sc = device_private(self);
	struct ifnet *ifp;
#if defined(COMPAT_40) || defined(MODULAR)
	const struct sysctlnode *node;
	int error;
#endif
	uint8_t enaddr[ETHER_ADDR_LEN] =
	    { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff };
	char enaddrstr[3 * ETHER_ADDR_LEN];
	struct timeval tv;
	uint32_t ui;

	sc->sc_dev = self;
	sc->sc_sih = softint_establish(SOFTINT_CLOCK, tap_softintr, sc);
	getnanotime(&sc->sc_btime);
	sc->sc_atime = sc->sc_mtime = sc->sc_btime;

	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "couldn't establish power handler\n");

	/*
	 * In order to obtain unique initial Ethernet address on a host,
	 * do some randomisation using the current uptime.  It's not meant
	 * for anything but avoiding hard-coding an address.
	 */
	getmicrouptime(&tv);
	ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
	memcpy(enaddr+3, (uint8_t *)&ui, 3);

	aprint_verbose_dev(self, "Ethernet address %s\n",
	    ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr));

	/*
	 * Why 1000baseT? Why not? You can add more.
	 *
	 * Note that there are 3 steps: init, one or several additions to
	 * list of supported media, and in the end, the selection of one
	 * of them.
	 */
	ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO);

	/*
	 * One should note that an interface must do multicast in order
	 * to support IPv6.
	 */
	ifp = &sc->sc_ec.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc	= sc;
	ifp->if_flags	= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl	= tap_ioctl;
	ifp->if_start	= tap_start;
	ifp->if_stop	= tap_stop;
	ifp->if_init	= tap_init;
	IFQ_SET_READY(&ifp->if_snd);

	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;

	/* Those steps are mandatory for an Ethernet driver, the fisrt call
	 * being common to all network interface drivers. */
	if_attach(ifp);
	ether_ifattach(ifp, enaddr);

	sc->sc_flags = 0;

#if defined(COMPAT_40) || defined(MODULAR)
	/*
	 * Add a sysctl node for that interface.
	 *
	 * The pointer transmitted is not a string, but instead a pointer to
	 * the softc structure, which we can use to build the string value on
	 * the fly in the helper function of the node.  See the comments for
	 * tap_sysctl_handler for details.
	 *
	 * Usually sysctl_createv is called with CTL_CREATE as the before-last
	 * component.  However, we can allocate a number ourselves, as we are
	 * the only consumer of the net.link.<iface> node.  In this case, the
	 * unit number is conveniently used to number the node.  CTL_CREATE
	 * would just work, too.
	 */
	if ((error = sysctl_createv(NULL, 0, NULL,
	    &node, CTLFLAG_READWRITE,
	    CTLTYPE_STRING, device_xname(self), NULL,
	    tap_sysctl_handler, 0, sc, 18,
	    CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev),
	    CTL_EOL)) != 0)
		aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n",
		    error);
#endif

	/*
	 * Initialize the two locks for the device.
	 *
	 * We need a lock here because even though the tap device can be
	 * opened only once, the file descriptor might be passed to another
	 * process, say a fork(2)ed child.
	 *
	 * The Giant saves us from most of the hassle, but since the read
	 * operation can sleep, we don't want two processes to wake up at
	 * the same moment and both try and dequeue a single packet.
	 *
	 * The queue for event listeners (used by kqueue(9), see below) has
	 * to be protected, too, but we don't need the same level of
	 * complexity for that lock, so a simple spinning lock is fine.
	 */
	mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE);
	simple_lock_init(&sc->sc_kqlock);

	selinit(&sc->sc_rsel);
}
Esempio n. 12
0
static int
ukbd_interrupt(keyboard_t *kbd, void *arg)
{
	usbd_status status = (usbd_status)arg;
	ukbd_state_t *state;
	struct ukbd_data *ud;
	struct timeval tv;
	u_long now;
	int mod, omod;
	int key, c;
	int i, j;

	DPRINTFN(5, ("ukbd_intr: status=%d\n", status));
	if (status == USBD_CANCELLED)
		return 0;

	state = (ukbd_state_t *)kbd->kb_data;
	ud = &state->ks_ndata;

	if (status != USBD_NORMAL_COMPLETION) {
		DPRINTF(("ukbd_intr: status=%d\n", status));
		if (status == USBD_STALLED)
		    usbd_clear_endpoint_stall_async(state->ks_intrpipe);
		return 0;
	}

	if (ud->keycode[0] == KEY_ERROR) {
		return 0;		/* ignore  */
	}

	getmicrouptime(&tv);
	now = (u_long)tv.tv_sec*1000 + (u_long)tv.tv_usec/1000;

#define ADDKEY1(c) 		\
	if (state->ks_inputs < INPUTBUFSIZE) {				\
		state->ks_input[state->ks_inputtail] = (c);		\
		++state->ks_inputs;					\
		state->ks_inputtail = (state->ks_inputtail + 1)%INPUTBUFSIZE; \
	}

	mod = ud->modifiers;
	omod = state->ks_odata.modifiers;
	if (mod != omod) {
		for (i = 0; i < NMOD; i++)
			if (( mod & ukbd_mods[i].mask) !=
			    (omod & ukbd_mods[i].mask))
				ADDKEY1(ukbd_mods[i].key |
				       (mod & ukbd_mods[i].mask
					  ? KEY_PRESS : KEY_RELEASE));
	}

	/* Check for released keys. */
	for (i = 0; i < NKEYCODE; i++) {
		key = state->ks_odata.keycode[i];
		if (key == 0)
			continue;
		for (j = 0; j < NKEYCODE; j++) {
			if (ud->keycode[j] == 0)
				continue;
			if (key == ud->keycode[j])
				goto rfound;
		}
		ADDKEY1(key | KEY_RELEASE);
	rfound:
		;
	}

	/* Check for pressed keys. */
	for (i = 0; i < NKEYCODE; i++) {
		key = ud->keycode[i];
		if (key == 0)
			continue;
		state->ks_ntime[i] = now + kbd->kb_delay1;
		for (j = 0; j < NKEYCODE; j++) {
			if (state->ks_odata.keycode[j] == 0)
				continue;
			if (key == state->ks_odata.keycode[j]) {
				state->ks_ntime[i] = state->ks_otime[j];
				if (state->ks_otime[j] > now)
					goto pfound;
				state->ks_ntime[i] = now + kbd->kb_delay2;
				break;
			}
		}
		ADDKEY1(key | KEY_PRESS);
		/*
		 * If any other key is presently down, force its repeat to be
		 * well in the future (100s).  This makes the last key to be
		 * pressed do the autorepeat.
		 */
		for (j = 0; j < NKEYCODE; j++) {
			if (j != i)
				state->ks_ntime[j] = now + 100 * 1000;
		}
	pfound:
		;
	}

	state->ks_odata = *ud;
	bcopy(state->ks_ntime, state->ks_otime, sizeof(state->ks_ntime));
	if (state->ks_inputs <= 0) {
		return 0;
	}

#ifdef USB_DEBUG
	for (i = state->ks_inputhead, j = 0; j < state->ks_inputs; ++j,
		i = (i + 1)%INPUTBUFSIZE) {
		c = state->ks_input[i];
		DPRINTF(("0x%x (%d) %s\n", c, c,
			(c & KEY_RELEASE) ? "released":"pressed"));
	}
	if (ud->modifiers)
		DPRINTF(("mod:0x%04x ", ud->modifiers));
        for (i = 0; i < NKEYCODE; i++) {
		if (ud->keycode[i])
			DPRINTF(("%d ", ud->keycode[i]));
	}
	DPRINTF(("\n"));
#endif /* USB_DEBUG */

	if (state->ks_polling) {
		return 0;
	}

	if (KBD_IS_ACTIVE(kbd) && KBD_IS_BUSY(kbd)) {
		/* let the callback function to process the input */
		(*kbd->kb_callback.kc_func)(kbd, KBDIO_KEYINPUT,
					    kbd->kb_callback.kc_arg);
	} else {
		/* read and discard the input; no one is waiting for it */
		do {
			c = ukbd_read_char(kbd, FALSE);
		} while (c != NOKEY);
	}

	return 0;
}
Esempio n. 13
0
static void
nvme_ns_bio_test(void *arg)
{
	struct nvme_io_test_internal	*io_test = arg;
	struct cdevsw			*csw;
	struct mtx			*mtx;
	struct bio			*bio;
	struct cdev			*dev;
	void				*buf;
	struct timeval			t;
	uint64_t			offset;
	uint32_t			idx, io_completed = 0;
#if __FreeBSD_version >= 900017
	int				ref;
#endif

	buf = malloc(io_test->size, M_NVME, M_WAITOK);
	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
	dev = io_test->ns->cdev;

	offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns);

	while (1) {

		bio = g_alloc_bio();

		memset(bio, 0, sizeof(*bio));
		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
		    BIO_READ : BIO_WRITE;
		bio->bio_done = nvme_ns_bio_test_cb;
		bio->bio_dev = dev;
		bio->bio_offset = offset;
		bio->bio_data = buf;
		bio->bio_bcount = io_test->size;

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			csw = dev_refthread(dev, &ref);
#else
			csw = dev_refthread(dev);
#endif
		} else
			csw = dev->si_devsw;

		mtx = mtx_pool_find(mtxpool_sleep, bio);
		mtx_lock(mtx);
		(*csw->d_strategy)(bio);
		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
		mtx_unlock(mtx);

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			dev_relthread(dev, ref);
#else
			dev_relthread(dev);
#endif
		}

		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
			break;

		g_destroy_bio(bio);

		io_completed++;

		getmicrouptime(&t);
		timevalsub(&t, &io_test->start);

		if (t.tv_sec >= io_test->time)
			break;

		offset += io_test->size;
		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
			offset = 0;
	}

	io_test->io_completed[idx] = io_completed;
	wakeup_one(io_test);

	free(buf, M_NVME);

	atomic_subtract_int(&io_test->td_active, 1);
	mb();

#if __FreeBSD_version >= 800000
	kthread_exit();
#else
	kthread_exit(0);
#endif
}
Esempio n. 14
0
/*
 * Buffer cleaning daemon.
 */
void
buf_daemon(struct proc *p)
{
	struct timeval starttime, timediff;
	struct buf *bp = NULL;
	int s, pushed = 0;

	cleanerproc = curproc;

	s = splbio();
	for (;;) {
		if (bp == NULL || (pushed >= 16 &&
		    UNCLEAN_PAGES < hidirtypages &&
		    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS)){
			pushed = 0;
			/*
			 * Wake up anyone who was waiting for buffers
			 * to be released.
			 */
			if (needbuffer) {
				needbuffer = 0;
				wakeup(&needbuffer);
			}
			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
		}

		getmicrouptime(&starttime);

		while ((bp = bufcache_getdirtybuf())) {
			struct timeval tv;

			if (UNCLEAN_PAGES < lodirtypages &&
			    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS &&
			    pushed >= 16)
				break;

			bufcache_take(bp);
			buf_acquire(bp);
			splx(s);

			if (ISSET(bp->b_flags, B_INVAL)) {
				brelse(bp);
				s = splbio();
				continue;
			}
#ifdef DIAGNOSTIC
			if (!ISSET(bp->b_flags, B_DELWRI))
				panic("Clean buffer on dirty queue");
#endif
			if (LIST_FIRST(&bp->b_dep) != NULL &&
			    !ISSET(bp->b_flags, B_DEFERRED) &&
			    buf_countdeps(bp, 0, 0)) {
				SET(bp->b_flags, B_DEFERRED);
				s = splbio();
				bufcache_release(bp);
				buf_release(bp);
				continue;
			}

			bawrite(bp);
			pushed++;

			/* Never allow processing to run for more than 1 sec */
			getmicrouptime(&tv);
			timersub(&tv, &starttime, &timediff);
			s = splbio();
			if (timediff.tv_sec)
				break;

		}
	}
}
Esempio n. 15
0
void
pmsinput(void *vsc, int data)
{
	struct pms_softc *sc = vsc;
	u_int changed;
	int dx, dy, dz = 0;
	int newbuttons = 0;

	if (!sc->sc_enabled) {
		/* Interrupts are not expected.	 Discard the byte. */
		return;
	}

	getmicrouptime(&sc->current);

	if (sc->inputstate > 0) {
		struct timeval diff;

		timersub(&sc->current, &sc->last, &diff);
		/*
		 * Empirically, the delay should be about 1700us on a standard
		 * PS/2 port.  I have seen delays as large as 4500us (rarely)
		 * in regular use.  When using a confused mouse, I generally
		 * see delays at least as large as 30,000us.  -seebs
		 *
		 * The thinkpad trackball returns at 22-23ms. So we use
		 * >= 40ms. In the future, I'll implement adaptable timeout
		 * by increasing the timeout if the mouse reset happens
		 * too frequently -christos
		 */
		if (diff.tv_sec > 0 || diff.tv_usec >= 40000) {
			DPRINTF(("pms_input: unusual delay (%ld.%06ld s), "
			    "scheduling reset\n",
			    (long)diff.tv_sec, (long)diff.tv_usec));
			sc->inputstate = 0;
			sc->sc_enabled = 0;
			wakeup(&sc->sc_enabled);
			return;
		}
	}
	sc->last = sc->current;

	if (sc->inputstate == 0) {
		/*
		 * Some devices (seen on trackballs anytime, and on
		 * some mice shortly after reset) output garbage bytes
		 * between packets.  Just ignore them.
		 */
		if ((data & 0xc0) != 0)
			return;	/* not in sync yet, discard input */
	}

	sc->packet[sc->inputstate++] = data & 0xff;
	switch (sc->inputstate) {
	case 0:
		/* no useful processing can be done yet */
		break;

	case 1:
		/*
		 * Why should we test for bit 0x8 and insist on it here?
		 * The old (psm.c and psm_intelli.c) drivers didn't do
		 * it, and there are devices where it does harm (that's
		 * why it is not used if using PMS_STANDARD protocol).
		 * Anyway, it does not to cause any harm to accept packets
		 * without this bit.
		 */
#if 0
		if (sc->protocol == PMS_STANDARD)
			break;
		if (!(sc->packet[0] & 0x8)) {
			DPRINTF(("pmsinput: 0x8 not set in first byte "
			    "[0x%02x], resetting\n", sc->packet[0]));
			sc->inputstate = 0;
			sc->sc_enabled = 0;
			wakeup(&sc->sc_enabled);
			return;
		}
#endif
		break;

	case 2:
		break;

	case 4:
		/* Case 4 is a superset of case 3. This is *not* an accident. */
		if (sc->protocol == PMS_SCROLL3) {
			dz = sc->packet[3];
			if (dz >= 128)
				dz -= 256;
			if (dz == -128)
				dz = -127;
		} else if (sc->protocol == PMS_SCROLL5) {
			dz = sc->packet[3] & 0xf;
			if (dz >= 8)
				dz -= 16;
                	if (sc->packet[3] & PMS_4BUTMASK)
				newbuttons |= 0x8;
                	if (sc->packet[3] & PMS_5BUTMASK)
				newbuttons |= 0x10;
		} else {
			DPRINTF(("pmsinput: why am I looking at this byte?\n"));
			dz = 0;
		}
		/* FALLTHROUGH */
	case 3:
		/*
		 * This is only an endpoint for scroll protocols with 4
		 * bytes, or the standard protocol with 3.
		 */
		if (sc->protocol != PMS_STANDARD && sc->inputstate == 3)
			break;

		newbuttons |= ((sc->packet[0] & PMS_LBUTMASK) ? 0x1 : 0) |
		    ((sc->packet[0] & PMS_MBUTMASK) ? 0x2 : 0) |
		    ((sc->packet[0] & PMS_RBUTMASK) ? 0x4 : 0);

		dx = sc->packet[1];
		if (dx >= 128)
			dx -= 256;
		if (dx == -128)
			dx = -127;

		dy = sc->packet[2];
		if (dy >= 128)
			dy -= 256;
		if (dy == -128)
			dy = -127;

		sc->inputstate = 0;
		changed = (sc->buttons ^ newbuttons);
		sc->buttons = newbuttons;

#ifdef PMSDEBUG
		if (sc->protocol == PMS_STANDARD) {
			DPRINTF(("pms: packet: 0x%02x%02x%02x\n",
			    sc->packet[0], sc->packet[1], sc->packet[2]));
		} else {
			DPRINTF(("pms: packet: 0x%02x%02x%02x%02x\n",
			    sc->packet[0], sc->packet[1], sc->packet[2],
			    sc->packet[3]));
		}
#endif
		if (dx || dy || dz || changed) {
#ifdef PMSDEBUG
			DPRINTF(("pms: x %+03d y %+03d z %+03d "
			    "buttons 0x%02x\n",	dx, dy, dz, sc->buttons));
#endif
			wsmouse_input(sc->sc_wsmousedev,
			    sc->buttons, dx, dy, dz, 0,
			    WSMOUSE_INPUT_DELTA);
		}
		memset(sc->packet, 0, 4);
		break;

	/* If we get here, we have problems. */
	default:
		printf("pmsinput: very confused.  resetting.\n");
		sc->inputstate = 0;
		sc->sc_enabled = 0;
		wakeup(&sc->sc_enabled);
		return;
	}
}
Esempio n. 16
0
          /*
	   | Add header digest to the iSCSI hdr mbuf
	   | XXX Assert: (mh->m_pkthdr.len + 4) < MHLEN
	   */
          bcopy(&pp->hdr_dig, (mh->m_data + mh->m_len), sizeof(int));
          mh->m_len += sizeof(int);
          mh->m_pkthdr.len += sizeof(int);
     }
     mp = &mh->m_next;
     if(pq->pdu.ds) {
          struct mbuf   *md;
          int           off = 0;

          len = pp->ds_len;
	  while(len & 03) // the specs say it must be int alligned
	       len++;
          while(len > 0) {
                int       l;

	       MGET(md, MB_TRYWAIT, MT_DATA);
	       pq->refcnt++;

                l = min(MCLBYTES, len);
	       debug(5, "setting ext_free(arg=%p len/l=%d/%d)", pq->buf, len, l);
	       md->m_ext.ext_buf = pq->buf;
	       md->m_ext.ext_free = ext_free;
	       md->m_ext.ext_ref = ext_ref;
	       md->m_ext.ext_arg = pq;
	       md->m_ext.ext_size = l;
	       md->m_flags |= M_EXT;
	       md->m_data = pp->ds + off;
	       md->m_len = l;
	       md->m_next = NULL;
	       mh->m_pkthdr.len += l;
	       *mp = md;
	       mp = &md->m_next;
	       len -= l;
	       off += l;
          }
     }
     if(sp->dataDigest) {
          struct mbuf   *me;

	  pp->ds_dig = sp->dataDigest(pp->ds, pp->ds_len, 0);

	  MGET(me, MB_TRYWAIT, MT_DATA);
          me->m_len = sizeof(int);
          MH_ALIGN(mh, sizeof(int));
          bcopy(&pp->ds_dig, me->m_data, sizeof(int));
          me->m_next = NULL;
          mh->m_pkthdr.len += sizeof(int);
          *mp = me;
     }
     if((error = sosend(sp->soc, NULL, NULL, mh, 0, 0, curthread)) != 0) {
	  sdebug(3, "error=%d", error);
	  return error;
     }
     sp->stats.nsent++;
     getmicrouptime(&sp->stats.t_sent);
     return 0;
}
#else /* NO_USE_MBUF */
int
isc_sendPDU(isc_session_t *sp, pduq_t *pq)
{
     struct uio *uio = &pq->uio;
     struct iovec *iv;
     pdu_t	*pp = &pq->pdu;
     int	len, error;

     debug_called(8);

     bzero(uio, sizeof(struct uio));
     uio->uio_rw = UIO_WRITE;
     uio->uio_segflg = UIO_SYSSPACE;
     uio->uio_td = curthread;
     uio->uio_iov = iv = pq->iov;

     iv->iov_base = &pp->ipdu;
     iv->iov_len = sizeof(union ipdu_u);
     uio->uio_resid = pq->len;
     iv++;
     if(sp->hdrDigest)
	  pq->pdu.hdr_dig = sp->hdrDigest(&pp->ipdu, sizeof(union ipdu_u), 0);
     if(pp->ahs_len) {
	  iv->iov_base = pp->ahs;
	  iv->iov_len = pp->ahs_len;
	  iv++;

	  if(sp->hdrDigest)
	       pq->pdu.hdr_dig = sp->hdrDigest(&pp->ahs, pp->ahs_len, pq->pdu.hdr_dig);
     }
     if(sp->hdrDigest) {
	  debug(2, "hdr_dig=%x", pq->pdu.hdr_dig);
	  iv->iov_base = &pp->hdr_dig;
	  iv->iov_len = sizeof(int);
	  iv++;
     }
     if(pq->pdu.ds) {
	  iv->iov_base = pp->ds;
	  iv->iov_len = pp->ds_len;
	  while(iv->iov_len & 03) // the specs say it must be int alligned
	       iv->iov_len++;
	  iv++;
     }
     if(sp->dataDigest) {
	  pp->ds_dig = sp->dataDigest(pp->ds, pp->ds_len, 0);
	  iv->iov_base = &pp->ds_dig;
	  iv->iov_len = sizeof(int);
	  iv++;
     }
     uio->uio_iovcnt	= iv - pq->iov;
     sdebug(5, "opcode=%x iovcnt=%d uio_resid=%d itt=%x",
	    pp->ipdu.bhs.opcode, uio->uio_iovcnt, uio->uio_resid,
	    ntohl(pp->ipdu.bhs.itt));
     sdebug(5, "sp=%p sp->soc=%p uio=%p sp->td=%p",
	    sp, sp->soc, uio, sp->td);
     do {
	  len = uio->uio_resid;
	  error = sosend(sp->soc, NULL, uio, 0, 0, 0, curthread);
	  if(uio->uio_resid == 0 || error || len == uio->uio_resid) {
	       if(uio->uio_resid) {
		    sdebug(2, "uio->uio_resid=%d uio->uio_iovcnt=%d error=%d len=%d",
			   uio->uio_resid, uio->uio_iovcnt, error, len);
		    if(error == 0)
			 error = EAGAIN; // 35
	       }
	       break;
	  }
	  /*
	   | XXX: untested code
	   */
	  sdebug(1, "uio->uio_resid=%d uio->uio_iovcnt=%d",
		uio->uio_resid, uio->uio_iovcnt);
	  iv = uio->uio_iov;
	  len -= uio->uio_resid;
	  while(uio->uio_iovcnt > 0) {
	       if(iv->iov_len > len) {
		    caddr_t	bp = (caddr_t)iv->iov_base;

		    iv->iov_len -= len;
		    iv->iov_base = (void *)&bp[len];
		    break;
	       }
	       len -= iv->iov_len;
	       uio->uio_iovcnt--;
	       uio->uio_iov++;
	       iv++;
	  }
     } while(uio->uio_resid);

     if(error == 0) {
	  sp->stats.nsent++;
	  getmicrouptime(&sp->stats.t_sent);

     }

     return error;
}
Esempio n. 17
0
/*
 * nfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls nfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
int
nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
    struct thread *td, struct ucred *cred, struct mbuf **mrp,
    struct mbuf **mdp, caddr_t *dposp)
{
	struct mbuf *mrep;
	u_int32_t *tl;
	struct nfsmount *nmp;
	struct mbuf *md;
	time_t waituntil;
	caddr_t dpos;
	int error = 0;
	struct timeval now;
	AUTH *auth = NULL;
	enum nfs_rto_timer_t timer;
	struct nfs_feedback_arg nf;
	struct rpc_callextra ext;
	enum clnt_stat stat;
	struct timeval timo;

	/* Reject requests while attempting a forced unmount. */
	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
		m_freem(mreq);
		return (ESTALE);
	}
	nmp = VFSTONFS(vp->v_mount);
	bzero(&nf, sizeof(struct nfs_feedback_arg));
	nf.nf_mount = nmp;
	nf.nf_td = td;
	getmicrouptime(&now);
	nf.nf_lastmsg = now.tv_sec -
	    ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));

	/*
	 * XXX if not already connected call nfs_connect now.  Longer
	 * term, change nfs_mount to call nfs_connect unconditionally
	 * and let clnt_reconnect_create handle reconnects.
	 */
	if (!nmp->nm_client)
		nfs_connect(nmp);

	auth = nfs_getauth(nmp, cred);
	if (!auth) {
		m_freem(mreq);
		return (EACCES);
	}
	bzero(&ext, sizeof(ext));
	ext.rc_auth = auth;

	ext.rc_feedback = nfs_feedback;
	ext.rc_feedback_arg = &nf;

	/*
	 * Use a conservative timeout for RPCs other than getattr,
	 * lookup, read or write.  The justification for doing "other"
	 * this way is that these RPCs happen so infrequently that
	 * timer est. would probably be stale.  Also, since many of
	 * these RPCs are non-idempotent, a conservative timeout is
	 * desired.
	 */
	timer = nfs_rto_timer(procnum);
	if (timer != NFS_DEFAULT_TIMER)
		ext.rc_timers = &nmp->nm_timers[timer - 1];
	else
		ext.rc_timers = NULL;

#ifdef KDTRACE_HOOKS
	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
		uint32_t probe_id;
		int probe_procnum;

		if (nmp->nm_flag & NFSMNT_NFSV3) {
			probe_id = nfsclient_nfs3_start_probes[procnum];
			probe_procnum = procnum;
		} else {
			probe_id = nfsclient_nfs2_start_probes[procnum];
			probe_procnum = nfsv2_procid[procnum];
		}
		if (probe_id != 0)
			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
			    mreq, cred, probe_procnum);
	}
#endif

	nfsstats.rpcrequests++;
tryagain:
	timo.tv_sec = nmp->nm_timeo / NFS_HZ;
	timo.tv_usec = (nmp->nm_timeo * 1000000) / NFS_HZ;
	mrep = NULL;
	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
	    mreq, &mrep, timo);

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (stat == RPC_SUCCESS)
		error = 0;
	else if (stat == RPC_TIMEDOUT)
		error = ETIMEDOUT;
	else if (stat == RPC_VERSMISMATCH)
		error = EOPNOTSUPP;
	else if (stat == RPC_PROGVERSMISMATCH)
		error = EPROTONOSUPPORT;
	else
		error = EACCES;
	if (error)
		goto nfsmout;

	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));

	/*
	 * Search for any mbufs that are not a multiple of 4 bytes long
	 * or with m_data not longword aligned.
	 * These could cause pointer alignment problems, so copy them to
	 * well aligned mbufs.
	 */
	error = nfs_realign(&mrep, M_DONTWAIT);
	if (error == ENOMEM) {
		m_freem(mrep);
		AUTH_DESTROY(auth);
		return (error);
	}

	md = mrep;
	dpos = mtod(mrep, caddr_t);
	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
	if (*tl != 0) {
		error = fxdr_unsigned(int, *tl);
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    error == NFSERR_TRYLATER) {
			m_freem(mrep);
			error = 0;
			waituntil = time_second + nfs3_jukebox_delay;
			while (time_second < waituntil)
				(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
				    hz);
			goto tryagain;
		}

		/*
		 * If the File Handle was stale, invalidate the lookup
		 * cache, just in case.
		 */
		if (error == ESTALE)
			nfs_purgecache(vp);
		/*
		 * Skip wcc data on NFS errors for now.  NetApp filers
		 * return corrupt postop attrs in the wcc data for NFS
		 * err EROFS.  Not sure if they could return corrupt
		 * postop attrs for others errors.
		 */
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    !nfs_skip_wcc_data_onerr) {
			*mrp = mrep;
			*mdp = md;
			*dposp = dpos;
			error |= NFSERR_RETERR;
		} else
			m_freem(mrep);
		goto nfsmout;
	}
Esempio n. 18
0
/*
 * The timer handler for dummynet. Time is computed in ticks, but
 * but the code is tolerant to the actual rate at which this is called.
 * Once complete, the function reschedules itself for the next tick.
 */
void
dummynet_task(void *context, int pending)
{
	struct timeval t;
	struct mq q = { NULL, NULL }; /* queue to accumulate results */

	CURVNET_SET((struct vnet *)context);

	DN_BH_WLOCK();

	/* Update number of lost(coalesced) ticks. */
	tick_lost += pending - 1;

	getmicrouptime(&t);
	/* Last tick duration (usec). */
	tick_last = (t.tv_sec - dn_cfg.prev_t.tv_sec) * 1000000 +
	(t.tv_usec - dn_cfg.prev_t.tv_usec);
	/* Last tick vs standard tick difference (usec). */
	tick_delta = (tick_last * hz - 1000000) / hz;
	/* Accumulated tick difference (usec). */
	tick_delta_sum += tick_delta;

	dn_cfg.prev_t = t;

	/*
	* Adjust curr_time if the accumulated tick difference is
	* greater than the 'standard' tick. Since curr_time should
	* be monotonically increasing, we do positive adjustments
	* as required, and throttle curr_time in case of negative
	* adjustment.
	*/
	dn_cfg.curr_time++;
	if (tick_delta_sum - tick >= 0) {
		int diff = tick_delta_sum / tick;

		dn_cfg.curr_time += diff;
		tick_diff += diff;
		tick_delta_sum %= tick;
		tick_adjustment++;
	} else if (tick_delta_sum + tick <= 0) {
		dn_cfg.curr_time--;
		tick_diff--;
		tick_delta_sum += tick;
		tick_adjustment++;
	}

	/* serve pending events, accumulate in q */
	for (;;) {
		struct dn_id *p;    /* generic parameter to handler */

		if (dn_cfg.evheap.elements == 0 ||
		    DN_KEY_LT(dn_cfg.curr_time, HEAP_TOP(&dn_cfg.evheap)->key))
			break;
		p = HEAP_TOP(&dn_cfg.evheap)->object;
		heap_extract(&dn_cfg.evheap, NULL);

		if (p->type == DN_SCH_I) {
			serve_sched(&q, (struct dn_sch_inst *)p, dn_cfg.curr_time);
		} else { /* extracted a delay line */
			transmit_event(&q, (struct delay_line *)p, dn_cfg.curr_time);
		}
	}
	if (dn_cfg.expire && ++dn_cfg.expire_cycle >= dn_cfg.expire) {
		dn_cfg.expire_cycle = 0;
		dn_drain_scheduler();
		dn_drain_queue();
	}

	DN_BH_WUNLOCK();
	dn_reschedule();
	if (q.head != NULL)
		dummynet_send(q.head);
	CURVNET_RESTORE();
}
Esempio n. 19
0
/**
 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
 * drivers. Implements calculation of exact vblank timestamps from
 * given drm_display_mode timings and current video scanout position
 * of a crtc. This can be called from within get_vblank_timestamp()
 * implementation of a kms driver to implement the actual timestamping.
 *
 * Should return timestamps conforming to the OML_sync_control OpenML
 * extension specification. The timestamp corresponds to the end of
 * the vblank interval, aka start of scanout of topmost-leftmost display
 * pixel in the following video frame.
 *
 * Requires support for optional dev->driver->get_scanout_position()
 * in kms driver, plus a bit of setup code to provide a drm_display_mode
 * that corresponds to the true scanout timing.
 *
 * The current implementation only handles standard video modes. It
 * returns as no operation if a doublescan or interlaced video mode is
 * active. Higher level code is expected to handle this.
 *
 * @dev: DRM device.
 * @crtc: Which crtc's vblank timestamp to retrieve.
 * @max_error: Desired maximum allowable error in timestamps (nanosecs).
 *             On return contains true maximum error of timestamp.
 * @vblank_time: Pointer to struct timeval which should receive the timestamp.
 * @flags: Flags to pass to driver:
 *         0 = Default.
 *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
 * @refcrtc: drm_crtc* of crtc which defines scanout timing.
 *
 * Returns negative value on error, failure or if not supported in current
 * video mode:
 *
 * -EINVAL   - Invalid crtc.
 * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
 * -ENOTSUPP - Function not supported in current display mode.
 * -EIO      - Failed, e.g., due to failed scanout position query.
 *
 * Returns or'ed positive status flags on success:
 *
 * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
 *
 */
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
					  int *max_error,
					  struct timeval *vblank_time,
					  unsigned flags,
					  struct drm_crtc *refcrtc)
{
	struct timeval stime, etime;
#ifdef notyet
	struct timeval mono_time_offset;
#endif
	struct drm_display_mode *mode;
	int vbl_status, vtotal, vdisplay;
	int vpos, hpos, i;
	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
	bool invbl;

	if (crtc < 0 || crtc >= dev->num_crtcs) {
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return -EINVAL;
	}

	/* Scanout position query not supported? Should not happen. */
	if (!dev->driver->get_scanout_position) {
		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
		return -EIO;
	}

	mode = &refcrtc->hwmode;
	vtotal = mode->crtc_vtotal;
	vdisplay = mode->crtc_vdisplay;

	/* Durations of frames, lines, pixels in nanoseconds. */
	framedur_ns = refcrtc->framedur_ns;
	linedur_ns  = refcrtc->linedur_ns;
	pixeldur_ns = refcrtc->pixeldur_ns;

	/* If mode timing undefined, just return as no-op:
	 * Happens during initial modesetting of a crtc.
	 */
	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
		return -EAGAIN;
	}

	/* Get current scanout position with system timestamp.
	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
	 * if single query takes longer than max_error nanoseconds.
	 *
	 * This guarantees a tight bound on maximum error if
	 * code gets preempted or delayed for some reason.
	 */
	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
		/* Disable preemption to make it very likely to
		 * succeed in the first iteration even on PREEMPT_RT kernel.
		 */
#ifdef notyet
		preempt_disable();
#endif

		/* Get system timestamp before query. */
		getmicrouptime(&stime);

		/* Get vertical and horizontal scanout pos. vpos, hpos. */
		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);

		/* Get system timestamp after query. */
		getmicrouptime(&etime);
#ifdef notyet
		if (!drm_timestamp_monotonic)
			mono_time_offset = ktime_get_monotonic_offset();

		preempt_enable();
#endif

		/* Return as no-op if scanout query unsupported or failed. */
		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
				  crtc, vbl_status);
			return -EIO;
		}

		duration_ns = timeval_to_ns(&etime) - timeval_to_ns(&stime);

		/* Accept result with <  max_error nsecs timing uncertainty. */
		if (duration_ns <= (s64) *max_error)
			break;
	}

	/* Noisy system timing? */
	if (i == DRM_TIMESTAMP_MAXRETRIES) {
		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
			  crtc, (int) duration_ns/1000, *max_error/1000, i);
	}

	/* Return upper bound of timestamp precision error. */
	*max_error = (int) duration_ns;

	/* Check if in vblank area:
	 * vpos is >=0 in video scanout area, but negative
	 * within vblank area, counting down the number of lines until
	 * start of scanout.
	 */
	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;

	/* Convert scanout position into elapsed time at raw_time query
	 * since start of scanout at first display scanline. delta_ns
	 * can be negative if start of scanout hasn't happened yet.
	 */
	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;

	/* Is vpos outside nominal vblank area, but less than
	 * 1/100 of a frame height away from start of vblank?
	 * If so, assume this isn't a massively delayed vblank
	 * interrupt, but a vblank interrupt that fired a few
	 * microseconds before true start of vblank. Compensate
	 * by adding a full frame duration to the final timestamp.
	 * Happens, e.g., on ATI R500, R600.
	 *
	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
	 */
	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
	    ((vdisplay - vpos) < vtotal / 100)) {
		delta_ns = delta_ns - framedur_ns;

		/* Signal this correction as "applied". */
		vbl_status |= 0x8;
	}

#ifdef notyet
	if (!drm_timestamp_monotonic)
		etime = ktime_sub(etime, mono_time_offset);
#endif

	/* Subtract time delta from raw timestamp to get final
	 * vblank_time timestamp for end of vblank.
	 */
	*vblank_time = ns_to_timeval(timeval_to_ns(&etime) - delta_ns);

	DPRINTF("crtc %d : v %d p(%d,%d)@ %lld.%ld -> %lld.%ld [e %d us, %d rep]\n",
		  crtc, (int)vbl_status, hpos, vpos,
		  (long long)etime.tv_sec, (long)etime.tv_usec,
		  (long long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
		  (int)duration_ns/1000, i);

	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
	if (invbl)
		vbl_status |= DRM_VBLANKTIME_INVBL;

	return vbl_status;
}
Esempio n. 20
0
int
isc_sendPDU(isc_session_t *sp, pduq_t *pq)
{
     struct mbuf *mh, **mp;
     pdu_t		*pp = &pq->pdu;
     int		len, error;

     debug_called(8);
     /*
      | mbuf for the iSCSI header
      */
     MGETHDR(mh, MB_TRYWAIT, MT_DATA);
     mh->m_len = mh->m_pkthdr.len = sizeof(union ipdu_u);
     mh->m_pkthdr.rcvif = NULL;
     MH_ALIGN(mh, sizeof(union ipdu_u));
     bcopy(&pp->ipdu, mh->m_data, sizeof(union ipdu_u));
     mh->m_next = NULL;

     if(sp->hdrDigest)
	  pq->pdu.hdr_dig = sp->hdrDigest(&pp->ipdu, sizeof(union ipdu_u), 0);
     if(pp->ahs_len) {
          /*
	   | Add any AHS to the iSCSI hdr mbuf
           |  XXX Assert: (mh->m_pkthdr.len + pp->ahs_len) < MHLEN
	   */
          bcopy(pp->ahs, (mh->m_data + mh->m_len), pp->ahs_len);
          mh->m_len += pp->ahs_len;
          mh->m_pkthdr.len += pp->ahs_len;

	  if(sp->hdrDigest)
	       pq->pdu.hdr_dig = sp->hdrDigest(&pp->ahs, pp->ahs_len, pq->pdu.hdr_dig);
     }
     if(sp->hdrDigest) {
	  debug(2, "hdr_dig=%x", pq->pdu.hdr_dig);
          /*
	   | Add header digest to the iSCSI hdr mbuf
	   | XXX Assert: (mh->m_pkthdr.len + 4) < MHLEN
	   */
          bcopy(&pp->hdr_dig, (mh->m_data + mh->m_len), sizeof(int));
          mh->m_len += sizeof(int);
          mh->m_pkthdr.len += sizeof(int);
     }
     mp = &mh->m_next;
     if(pq->pdu.ds) {
          struct mbuf   *md;
          int           off = 0;

          len = pp->ds_len;
	  while(len & 03) // the specs say it must be int alligned
	       len++;
          while(len > 0) {
                int       l;

	       MGET(md, MB_TRYWAIT, MT_DATA);
	       pq->refcnt++;

                l = min(MCLBYTES, len);
	       debug(5, "setting ext_free(arg=%p len/l=%d/%d)", pq->buf, len, l);
	       md->m_ext.ext_buf = pq->buf;
	       md->m_ext.ext_free = ext_free;
	       md->m_ext.ext_ref = ext_ref;
	       md->m_ext.ext_arg = pq;
	       md->m_ext.ext_size = l;
	       md->m_flags |= M_EXT;
	       md->m_data = pp->ds + off;
	       md->m_len = l;
	       md->m_next = NULL;
	       mh->m_pkthdr.len += l;
	       *mp = md;
	       mp = &md->m_next;
	       len -= l;
	       off += l;
          }
     }
     if(sp->dataDigest) {
          struct mbuf   *me;

	  pp->ds_dig = sp->dataDigest(pp->ds, pp->ds_len, 0);

	  MGET(me, MB_TRYWAIT, MT_DATA);
          me->m_len = sizeof(int);
          MH_ALIGN(mh, sizeof(int));
          bcopy(&pp->ds_dig, me->m_data, sizeof(int));
          me->m_next = NULL;
          mh->m_pkthdr.len += sizeof(int);
          *mp = me;
     }
     if((error = sosend(sp->soc, NULL, NULL, mh, 0, 0, curthread)) != 0) {
	  sdebug(3, "error=%d", error);
	  return error;
     }
     sp->stats.nsent++;
     getmicrouptime(&sp->stats.t_sent);
     return 0;
}
Esempio n. 21
0
/*
 * Initialize the vnode associated with a new inode, handle aliased
 * vnodes.
 */
int
ufs_vinit(struct mount *mntp, struct vops *specops, struct vops *fifoops,
    struct vnode **vpp)
{
	struct inode *ip;
	struct vnode *vp, *nvp;
	struct timeval mtv;

	vp = *vpp;
	ip = VTOI(vp);
	switch(vp->v_type = IFTOVT(DIP(ip, mode))) {
	case VCHR:
	case VBLK:
		vp->v_op = specops;
		if ((nvp = checkalias(vp, DIP(ip, rdev), mntp)) != NULL) {
			/*
			 * Discard unneeded vnode, but save its inode.
			 * Note that the lock is carried over in the inode
			 * to the replacement vnode.
			 */
			nvp->v_data = vp->v_data;
			vp->v_data = NULL;
			vp->v_op = &spec_vops;
#ifdef VFSLCKDEBUG
			vp->v_flag &= ~VLOCKSWORK;
#endif
			vrele(vp);
			vgone(vp);
			/*
			 * Reinitialize aliased inode.
			 */
			vp = nvp;
			ip->i_vnode = vp;
		}
		break;
	case VFIFO:
#ifdef FIFO
		vp->v_op = fifoops;
		break;
#else
		return (EOPNOTSUPP);
#endif
	case VNON:
	case VBAD:
	case VSOCK:
	case VLNK:
	case VDIR:
	case VREG:
		break;
	}
	if (ip->i_number == ROOTINO)
                vp->v_flag |= VROOT;
	/*
	 * Initialize modrev times
	 */
	getmicrouptime(&mtv);
	SETHIGH(ip->i_modrev, mtv.tv_sec);
	SETLOW(ip->i_modrev, mtv.tv_usec * 4294);
	*vpp = vp;
	return (0);
}
Esempio n. 22
0
ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
{
#ifndef ACPI_NO_SEMAPHORES
    ACPI_STATUS			result;
    struct acpi_semaphore	*as = (struct acpi_semaphore *)Handle;
    int				rv, tmo;
    struct timeval		timeouttv, currenttv, timelefttv;
    AS_LOCK_DECL;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    if (as == NULL)
	return_ACPI_STATUS (AE_BAD_PARAMETER);

    if (cold)
	return_ACPI_STATUS (AE_OK);

#if 0
    if (as->as_units < Units && as->as_timeouts > 10) {
	kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
	AS_LOCK(as);
	as->as_units = as->as_maxunits;
	if (as->as_pendings)
	    as->as_resetting = 1;
	as->as_timeouts = 0;
	wakeup(as);
	AS_UNLOCK(as);
	return_ACPI_STATUS (AE_TIME);
    }

    if (as->as_resetting)
	return_ACPI_STATUS (AE_TIME);
#endif

    /* a timeout of ACPI_WAIT_FOREVER means "forever" */
    if (Timeout == ACPI_WAIT_FOREVER) {
	tmo = 0;
	timeouttv.tv_sec = ((0xffff/1000) + 1);	/* cf. ACPI spec */
	timeouttv.tv_usec = 0;
    } else {
	/* compute timeout using microseconds per tick */
	tmo = (Timeout * 1000) / (1000000 / hz);
	if (tmo <= 0)
	    tmo = 1;
	timeouttv.tv_sec  = Timeout / 1000;
	timeouttv.tv_usec = (Timeout % 1000) * 1000;
    }

    /* calculate timeout value in timeval */
    getmicrouptime(&currenttv);
    timevaladd(&timeouttv, &currenttv);

    AS_LOCK(as);
    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
	"get %d units from semaphore %p (has %d), timeout %d\n",
	Units, as, as->as_units, Timeout));
    for (;;) {
	if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
	    result = AE_OK;
	    break;
	}
	if (as->as_units >= Units) {
	    as->as_units -= Units;
	    result = AE_OK;
	    break;
	}

	/* limit number of pending treads */
	if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
	    result = AE_TIME;
	    break;
	}

	/* if timeout values of zero is specified, return immediately */
	if (Timeout == 0) {
	    result = AE_TIME;
	    break;
	}

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
	    "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n",
	    as, &as->as_spin, PCATCH, tmo));

	as->as_pendings++;

	if (acpi_semaphore_debug) {
	    kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %jd\n",
		__func__, (intmax_t)Timeout,
		(intmax_t)as->as_pendings, as,
		(intmax_t)AcpiOsGetThreadId());
	}

	rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo);

	as->as_pendings--;

#if 0
	if (as->as_resetting) {
	    /* semaphore reset, return immediately */
	    if (as->as_pendings == 0) {
		as->as_resetting = 0;
	    }
	    result = AE_TIME;
	    break;
	}
#endif

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv));
	if (rv == EWOULDBLOCK) {
	    result = AE_TIME;
	    break;
	}

	/* check if we already awaited enough */
	timelefttv = timeouttv;
	getmicrouptime(&currenttv);
	timevalsub(&timelefttv, &currenttv);
	if (timelefttv.tv_sec < 0) {
	    ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
		as));
	    result = AE_TIME;
	    break;
	}

	/* adjust timeout for the next sleep */
	tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
	    (1000000 / hz);
	if (tmo <= 0)
	    tmo = 1;

	if (acpi_semaphore_debug) {
	    kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %jd\n",
		__func__,
		(intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec,
		(intmax_t)tmo, as, (intmax_t)AcpiOsGetThreadId());
	}
    }

    if (acpi_semaphore_debug) {
	if (result == AE_TIME && Timeout > 0) {
	    kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
		__func__, Timeout, as->as_pendings, as);
	}
	if (ACPI_SUCCESS(result) &&
	    (as->as_timeouts > 0 || as->as_pendings > 0))
	{
	    kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %jd\n",
		__func__, Units, as->as_units, as->as_pendings, as,
		(intmax_t)AcpiOsGetThreadId());
	}
    }

    if (result == AE_TIME)
	as->as_timeouts++;
    else
	as->as_timeouts = 0;

    AS_UNLOCK(as);
    return_ACPI_STATUS (result);
#else
    return_ACPI_STATUS (AE_OK);
#endif /* !ACPI_NO_SEMAPHORES */
}
Esempio n. 23
0
/* send everything in-context since it's just a matter of mem-to-mem copy */
static void
shmif_start(struct ifnet *ifp)
{
	struct shmif_sc *sc = ifp->if_softc;
	struct shmif_mem *busmem = sc->sc_busmem;
	struct mbuf *m, *m0;
	uint32_t dataoff;
	uint32_t pktsize, pktwrote;
	bool wrote = false;
	bool wrap;

	ifp->if_flags |= IFF_OACTIVE;

	for (;;) {
		struct shmif_pkthdr sp;
		struct timeval tv;

		IF_DEQUEUE(&ifp->if_snd, m0);
		if (m0 == NULL) {
			break;
		}

		pktsize = 0;
		for (m = m0; m != NULL; m = m->m_next) {
			pktsize += m->m_len;
		}
		KASSERT(pktsize <= ETHERMTU + ETHER_HDR_LEN);

		getmicrouptime(&tv);
		sp.sp_len = pktsize;
		sp.sp_sec = tv.tv_sec;
		sp.sp_usec = tv.tv_usec;

		bpf_mtap(ifp, m0);

		shmif_lockbus(busmem);
		KASSERT(busmem->shm_magic == SHMIF_MAGIC);
		busmem->shm_last = shmif_nextpktoff(busmem, busmem->shm_last);

		wrap = false;
		dataoff = shmif_buswrite(busmem,
		    busmem->shm_last, &sp, sizeof(sp), &wrap);
		pktwrote = 0;
		for (m = m0; m != NULL; m = m->m_next) {
			pktwrote += m->m_len;
			dataoff = shmif_buswrite(busmem, dataoff,
			    mtod(m, void *), m->m_len, &wrap);
		}
		KASSERT(pktwrote == pktsize);
		if (wrap) {
			busmem->shm_gen++;
			DPRINTF(("bus generation now %" PRIu64 "\n",
			    busmem->shm_gen));
		}
		shmif_unlockbus(busmem);

		m_freem(m0);
		wrote = true;

		DPRINTF(("shmif_start: send %d bytes at off %d\n",
		    pktsize, busmem->shm_last));
	}

	ifp->if_flags &= ~IFF_OACTIVE;

	/* wakeup? */
	if (wrote) {
		dowakeup(sc);
	}
}