Esempio n. 1
0
void
elink_cb_dbg(struct bxe_softc *sc,
             char             *fmt)
{
    char buf[128];
    if (__predict_false(sc->debug & DBG_PHY)) {
        snprintf(buf, sizeof(buf), "ELINK: %s", fmt);
        device_printf(sc->dev, "%s", buf);
    }
}
Esempio n. 2
0
void
sfxge_rx_qrefill(struct sfxge_rxq *rxq)
{

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		return;

	/* Make sure the queue is full */
	sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE);
}
Esempio n. 3
0
/*
 * Runtime implementation of __builtin____strcpy_chk.
 *
 * See
 *   http://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
 *   http://gcc.gnu.org/ml/gcc-patches/2004-09/msg02055.html
 * for details.
 *
 * This strcpy check is called if _FORTIFY_SOURCE is defined and
 * greater than 0.
 */
extern "C" char *__strcpy_chk (char *dest, const char *src, size_t dest_len) {
    // TODO: optimize so we don't scan src twice.
    size_t src_len = strlen(src) + 1;
    if (__predict_false(src_len > dest_len)) {
        __fortify_chk_fail("strcpy prevented write past end of buffer",
                             BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW);
    }

    return strcpy(dest, src);
}
void
netbsd32_syscall(struct trapframe *frame)
{
	char *params;
	const struct sysent *callp;
	struct proc *p;
	struct lwp *l;
	int error;
	int i;
	register32_t code, args[2 + SYS_MAXSYSARGS];
	register_t rval[2];
	register_t args64[SYS_MAXSYSARGS];

	l = curlwp;
	p = l->l_proc;

	code = frame->tf_rax & (SYS_NSYSENT - 1);
	callp = p->p_emul->e_sysent + code;

	LWP_CACHE_CREDS(l, p);

	SYSCALL_COUNT(syscall_counts, code);
	SYSCALL_TIME_SYS_ENTRY(l, syscall_times, code);

	params = (char *)frame->tf_rsp + sizeof(int);

	if (callp->sy_argsize) {
		error = copyin(params, args, callp->sy_argsize);
		if (__predict_false(error != 0))
			goto bad;
		/* Recover 'code' - not in a register */
		code = frame->tf_rax & (SYS_NSYSENT - 1);
	}

	if (__predict_false(p->p_trace_enabled)
	    && !__predict_false(callp->sy_flags & SYCALL_INDIRECT)) {
		int narg = callp->sy_argsize >> 2;
		for (i = 0; i < narg; i++)
			args64[i] = args[i];
		error = trace_enter(code, args64, narg);
		if (__predict_false(error != 0))
			goto out;
	}
/*
 * callout_halt:
 *
 *	Cancel a pending callout.  If in-flight, block until it completes.
 *	May not be called from a hard interrupt handler.  If the callout
 * 	can take locks, the caller of callout_halt() must not hold any of
 *	those locks, otherwise the two could deadlock.  If 'interlock' is
 *	non-NULL and we must wait for the callout to complete, it will be
 *	released and re-acquired before returning.
 */
bool
callout_halt(callout_t *cs, void *interlock)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	struct callout_cpu *cc;
	struct lwp *l;
	kmutex_t *lock, *relock;
	bool expired;

	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT(!cpu_intr_p());

	lock = callout_lock(c);
	relock = NULL;

	expired = ((c->c_flags & CALLOUT_FIRED) != 0);
	if ((c->c_flags & CALLOUT_PENDING) != 0)
		CIRCQ_REMOVE(&c->c_list);
	c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED);

	l = curlwp;
	for (;;) {
		cc = c->c_cpu;
		if (__predict_true(cc->cc_active != c || cc->cc_lwp == l))
			break;
		if (interlock != NULL) {
			/*
			 * Avoid potential scheduler lock order problems by
			 * dropping the interlock without the callout lock
			 * held.
			 */
			mutex_spin_exit(lock);
			mutex_exit(interlock);
			relock = interlock;
			interlock = NULL;
		} else {
			/* XXX Better to do priority inheritance. */
			KASSERT(l->l_wchan == NULL);
			cc->cc_nwait++;
			cc->cc_ev_block.ev_count++;
			l->l_kpriority = true;
			sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
			sleepq_enqueue(&cc->cc_sleepq, cc, "callout",
			    &sleep_syncobj);
			sleepq_block(0, false);
		}
		lock = callout_lock(c);
	}

	mutex_spin_exit(lock);
	if (__predict_false(relock != NULL))
		mutex_enter(relock);

	return expired;
}
/*
 * uvm_emap_produce: increment emap generation counter.
 *
 * => pmap updates must be globally visible.
 * => caller must have already entered mappings.
 * => may be called from both interrupt and thread context.
 */
u_int
uvm_emap_produce(void)
{
	u_int gen;
again:
	gen = atomic_inc_uint_nv(&uvm_emap_gen);
	if (__predict_false(gen == UVM_EMAP_INACTIVE)) {
		goto again;
	}
	return gen;
}
Esempio n. 7
0
void
rw_exit_read(struct rwlock *rwl)
{
	unsigned long owner = rwl->rwl_owner;

	rw_assert_rdlock(rwl);

	if (__predict_false((owner & RWLOCK_WAIT) ||
	    rw_cas(&rwl->rwl_owner, owner, owner - RWLOCK_READ_INCR)))
		rw_exit(rwl);
}
Esempio n. 8
0
void
amap_wipeout(struct vm_amap *amap)
{
	int lcv, slot;
	struct vm_anon *anon;
	UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
	UVMHIST_LOG(maphist,"(amap=0x%x)", amap, 0,0,0);

	KASSERT(amap->am_ref == 0);

	if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
		/*
		 * amap_swap_off will call us again.
		 */
		amap_unlock(amap);
		return;
	}
	amap_list_remove(amap);
	amap_unlock(amap);

	for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
		int refs;

		slot = amap->am_slots[lcv];
		anon = amap->am_anon[slot];

		if (anon == NULL || anon->an_ref == 0)
			panic("amap_wipeout: corrupt amap");

		mutex_enter(&anon->an_lock);
		UVMHIST_LOG(maphist,"  processing anon 0x%x, ref=%d", anon,
		    anon->an_ref, 0, 0);
		refs = --anon->an_ref;
		mutex_exit(&anon->an_lock);
		if (refs == 0) {

			/*
			 * we had the last reference to a vm_anon. free it.
			 */
			uvm_anfree(anon);
		}

		if (curlwp->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
			preempt();
	}

	/*
	 * now we free the map
	 */

	amap->am_nused = 0;
	amap_free(amap);	/* will unlock and free amap */
	UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
}
Esempio n. 9
0
int
do_sys_connect(struct lwp *l, int fd, struct mbuf *nam)
{
	struct socket	*so;
	int		error;
	int		interrupted = 0;

	if ((error = fd_getsock(fd, &so)) != 0) {
		m_freem(nam);
		return (error);
	}
	solock(so);
	MCLAIM(nam, so->so_mowner);
	if ((so->so_state & SS_ISCONNECTING) != 0) {
		error = EALREADY;
		goto out;
	}

	error = soconnect(so, nam, l);
	if (error)
		goto bad;
	if ((so->so_state & (SS_NBIO|SS_ISCONNECTING)) ==
	    (SS_NBIO|SS_ISCONNECTING)) {
		error = EINPROGRESS;
		goto out;
	}
	while ((so->so_state & SS_ISCONNECTING) != 0 && so->so_error == 0) {
		error = sowait(so, true, 0);
		if (__predict_false((so->so_state & SS_ISABORTING) != 0)) {
			error = EPIPE;
			interrupted = 1;
			break;
		}
		if (error) {
			if (error == EINTR || error == ERESTART)
				interrupted = 1;
			break;
		}
	}
	if (error == 0) {
		error = so->so_error;
		so->so_error = 0;
	}
 bad:
	if (!interrupted)
		so->so_state &= ~SS_ISCONNECTING;
	if (error == ERESTART)
		error = EINTR;
 out:
	sounlock(so);
	fd_putfile(fd);
	m_freem(nam);
	return error;
}
Esempio n. 10
0
static int
wake_waiting(void *arg)
{
	if (__predict_false(xenstored_ready == 0 && xendomain_is_dom0())) {
		xenstored_ready = 1;
		wakeup(&xenstored_ready);
	} 

	wakeup(&xenstore_interface);
	return 1;
}
Esempio n. 11
0
/*
 * nc_jump: helper function to jump to specified line (32 bit word)
 * in the n-code, fetch a word, and update the instruction pointer.
 */
static inline const void *
nc_jump(const void *iptr, int n, u_int *lcount)
{

	/* Detect infinite loops. */
	if (__predict_false(*lcount == 0)) {
		return NULL;
	}
	*lcount = *lcount - 1;
	return (const uint32_t *)iptr + n;
}
/*
 * _bus_dmamem_alloc_range_common --
 *	Allocate physical memory from the specified physical address range.
 */
int
_bus_dmamem_alloc_range_common(bus_dma_tag_t t,
			       bus_size_t size,
			       bus_size_t alignment,
			       bus_size_t boundary,
			       bus_dma_segment_t *segs,
			       int nsegs,
			       int *rsegs,
			       int flags,
			       paddr_t low,
			       paddr_t high)
{
	paddr_t curaddr, lastaddr;
	struct vm_page *m;
	struct pglist mlist;
	int curseg, error;

	/* Always round the size. */
	size = round_page(size);

	/* Allocate pages from the VM system. */
	error = uvm_pglistalloc(size, low, high, alignment, boundary,
				&mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
	if (__predict_false(error != 0))
		return (error);
	
	/*
	 * Compute the location, size, and number of segments actually
	 * returned by the VM system.
	 */
	m = TAILQ_FIRST(&mlist);
	curseg = 0;
	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
	segs[curseg].ds_len = PAGE_SIZE;
	m = TAILQ_NEXT(m, pageq.queue);

	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
		curaddr = VM_PAGE_TO_PHYS(m);
		KASSERT(curaddr >= low);
		KASSERT(curaddr < high);
		if (curaddr == (lastaddr + PAGE_SIZE))
			segs[curseg].ds_len += PAGE_SIZE;
		else {
			curseg++;
			segs[curseg].ds_addr = curaddr;
			segs[curseg].ds_len = PAGE_SIZE;
		}
		lastaddr = curaddr;
	}

	*rsegs = curseg + 1;

	return (0);
}
Esempio n. 13
0
struct sockaddr *
sockaddr_copy(struct sockaddr *dst, socklen_t socklen,
    const struct sockaddr *src)
{
	if (__predict_false(socklen < src->sa_len)) {
		panic("%s: source too long, %d < %d bytes", __func__, socklen,
		    src->sa_len);
	}
	memcpy(dst, src, src->sa_len);
	return dst;
}
Esempio n. 14
0
void
rw_exit_write(struct rwlock *rwl)
{
	unsigned long owner = rwl->rwl_owner;

	rw_assert_wrlock(rwl);

	if (__predict_false((owner & RWLOCK_WAIT) ||
	    rw_cas(&rwl->rwl_owner, owner, 0)))
		rw_exit(rwl);
}
Esempio n. 15
0
/* ARGSUSED */
vaddr_t
uvm_km_alloc_poolpage1(struct vm_map *map, struct uvm_object *obj,
    boolean_t waitok)
{
#if defined(__HAVE_PMAP_DIRECT)
	struct vm_page *pg;
	vaddr_t va;

 again:
	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
	if (__predict_false(pg == NULL)) {
		if (waitok) {
			uvm_wait("plpg");
			goto again;
		} else
			return (0);
	}
	va = pmap_map_direct(pg);
	if (__predict_false(va == 0))
		uvm_pagefree(pg);
	return (va);
#else
	vaddr_t va;
	int s;

	/*
	 * NOTE: We may be called with a map that doesn't require splvm
	 * protection (e.g. kernel_map).  However, it does not hurt to
	 * go to splvm in this case (since unprotected maps will never be
	 * accessed in interrupt context).
	 *
	 * XXX We may want to consider changing the interface to this
	 * XXX function.
	 */

	s = splvm();
	va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
	splx(s);
	return (va);
#endif /* __HAVE_PMAP_DIRECT */
}
Esempio n. 16
0
static int
wake_waiting(void *arg)
{
	if (__predict_false(xenstored_ready == 0 &&
	    xen_start_info.flags & SIF_INITDOMAIN)) {
		xenstored_ready = 1; 
		wakeup(&xenstored_ready);
	} 

	wakeup(&xenstore_interface);
	return 1;
}
Esempio n. 17
0
static inline struct rumpcpu *
getnextcpu(void)
{
    unsigned newcpu;

    newcpu = atomic_inc_uint_nv(&nextcpu);
    if (__predict_false(ncpu > UINT_MAX/2))
        atomic_and_uint(&nextcpu, 0);
    newcpu = newcpu % ncpu;

    return &rcpu_storage[newcpu];
}
Esempio n. 18
0
void
elink_cb_dbg1(struct bxe_softc *sc,
              char             *fmt,
              uint32_t         arg1)
{
    char tmp[128], buf[128];
    if (__predict_false(sc->debug & DBG_PHY)) {
        snprintf(tmp, sizeof(tmp), "ELINK: %s", fmt);
        snprintf(buf, sizeof(buf), tmp, arg1);
        device_printf(sc->dev, "%s", buf);
    }
}
Esempio n. 19
0
int
octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
    uint64_t *gbuf, int *rsegs)
{
	struct mbuf *m;
	int segs = 0;
	uint32_t laddr, rlen, nlen;

	for (m = m0; m != NULL; m = m->m_next) {

		if (__predict_false(m->m_len == 0))
			continue;

#if 0	
		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
#endif

		/*
		 * aligned 4k
		 */
		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);

		if (laddr + m->m_len > PAGE_SIZE) {
			/* XXX */
			rlen = PAGE_SIZE - laddr;
			nlen = m->m_len - rlen;
			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
			    KVTOPHYS(m->m_data));
			segs++;
			if (segs > 63) {
				return 1;
			}
			/* XXX */
		} else {
			rlen = 0;
			nlen = m->m_len;
		}

		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
		    KVTOPHYS((caddr_t)m->m_data + rlen));
		segs++;
		if (segs > 63) {
			return 1;
		}
	}

	OCTEON_ETH_KASSERT(m == NULL);

	*rsegs = segs;

	return 0;
}
Esempio n. 20
0
static inline int
read_timer(struct s3c24x0_softc *sc)
{
	int count;

	do {
		count = bus_space_read_2(sc->sc_sx.sc_iot, sc->sc_timer_ioh,
		    TIMER_TCNTO(4));
	} while ( __predict_false(count > timer4_reload_value) );

	return count;
}
Esempio n. 21
0
/*
 * Allocate icl_pdu with empty BHS to fill up by the caller.
 */
static struct icl_pdu *
icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags)
{
	struct icl_pdu *ip;

	ip = icl_cxgbei_new_pdu(flags);
	if (__predict_false(ip == NULL))
		return (NULL);
	icl_cxgbei_new_pdu_set_conn(ip, ic);

	return (ip);
}
Esempio n. 22
0
File: xbd.c Progetto: MarginC/kame
static void
xbdresume(void)
{
	struct xbdreq *pxr, *xr;
	struct xbd_softc *xs;
	struct buf *bp;

	while ((pxr = SIMPLEQ_FIRST(&xbdr_suspended)) != NULL) {
		DPRINTF(XBDB_IO, ("xbdstart: resuming xbdreq %p for bp %p\n",
		    pxr, pxr->xr_bp));
		bp = pxr->xr_bp;
		xs = getxbd_softc(bp->b_dev);
		if (xs == NULL || xs->sc_shutdown) {
			bp->b_flags |= B_ERROR;
			bp->b_error = EIO;
		}
		if (bp->b_flags & B_ERROR) {
			pxr->xr_bdone -= pxr->xr_bqueue;
			pxr->xr_bqueue = 0;
			if (pxr->xr_bdone == 0) {
				bp->b_resid = bp->b_bcount;
				if (pxr->xr_aligned)
					unmap_align(pxr);
				PUT_XBDREQ(pxr);
				if (xs)
				{
					disk_unbusy(&xs->sc_dksc.sc_dkdev,
					    (bp->b_bcount - bp->b_resid),
					    (bp->b_flags & B_READ));
#if NRND > 0
					rnd_add_uint32(&xs->rnd_source,
					    bp->b_blkno);
#endif
				}
				biodone(bp);
			}
			continue;
		}
		while (__predict_true(pxr->xr_bqueue > 0)) {
			GET_XBDREQ(xr);
			if (__predict_false(xr == NULL))
				goto out;
			xr->xr_parent = pxr;
			fill_ring(xr);
		}
		DPRINTF(XBDB_IO, ("xbdstart: resumed xbdreq %p for bp %p\n",
		    pxr, bp));
		SIMPLEQ_REMOVE_HEAD(&xbdr_suspended, xr_suspended);
	}

 out:
	return;
}
Esempio n. 23
0
int
gnttab_alloc_grant_references(uint16_t count, grant_ref_t *head)
{
	int ref, error;

	error = get_free_entries(count, &ref);
	if (__predict_false(error))
		return (error);

	*head = ref;
	return (0);
}
Esempio n. 24
0
void
_thr_cancel_enter2(struct pthread *curthread, int maycancel)
{
	curthread->cancel_point = 1;
	if (__predict_false(SHOULD_CANCEL(curthread) &&
	    !THR_IN_CRITICAL(curthread))) {
		if (!maycancel)
			thr_wake(curthread->tid);
		else
			_pthread_exit(PTHREAD_CANCELED);
	}
}
Esempio n. 25
0
/*
 * nbuf_ensure_contig: check whether the specified length from the current
 * point in the nbuf is contiguous.  If not, rearrange the chain to be so.
 *
 * => Returns pointer to the data at the current offset in the buffer.
 * => Returns NULL on failure and nbuf becomes invalid.
 */
void *
nbuf_ensure_contig(nbuf_t *nbuf, size_t len)
{
	const struct mbuf * const n = nbuf->nb_mbuf;
	const size_t off = (uintptr_t)nbuf->nb_nptr - mtod(n, uintptr_t);

	KASSERT(off <= m_buflen(n));

	if (__predict_false(m_buflen(n) < (off + len))) {
		struct mbuf *m = nbuf->nb_mbuf0;
		const size_t foff = nbuf_offset(nbuf);
		const size_t plen = m_length(m);
		const size_t mlen = m_buflen(m);
		size_t target;
		bool success;

		//npf_stats_inc(npf, NPF_STAT_NBUF_NONCONTIG);

		/* Attempt to round-up to NBUF_ENSURE_ALIGN bytes. */
		if ((target = NBUF_ENSURE_ROUNDUP(foff + len)) > plen) {
			target = foff + len;
		}

		/* Rearrange the chain to be contiguous. */
		KASSERT(m_flags_p(m, M_PKTHDR));
		success = m_ensure_contig(&m, target);
		KASSERT(m != NULL);

		/* If no change in the chain: return what we have. */
		if (m == nbuf->nb_mbuf0 && m_buflen(m) == mlen) {
			return success ? nbuf->nb_nptr : NULL;
		}

		/*
		 * The mbuf chain was re-arranged.  Update the pointers
		 * accordingly and indicate that the references to the data
		 * might need a reset.
		 */
		KASSERT(m_flags_p(m, M_PKTHDR));
		nbuf->nb_mbuf0 = m;
		nbuf->nb_mbuf = m;

		KASSERT(foff < m_buflen(m) && foff < m_length(m));
		nbuf->nb_nptr = mtod(m, uint8_t *) + foff;
		nbuf->nb_flags |= NBUF_DATAREF_RESET;

		if (!success) {
			//npf_stats_inc(npf, NPF_STAT_NBUF_CONTIG_FAIL);
			return NULL;
		}
	}
	return nbuf->nb_nptr;
}
Esempio n. 26
0
static struct mbuf *
rtwn_report_intr(struct rtwn_usb_softc *uc, struct usb_xfer *xfer,
    struct rtwn_data *data)
{
	struct rtwn_softc *sc = &uc->uc_sc;
	struct ieee80211com *ic = &sc->sc_ic;
	uint8_t *buf;
	int len;

	usbd_xfer_status(xfer, &len, NULL, NULL, NULL);

	if (__predict_false(len < sizeof(struct r92c_rx_stat))) {
		counter_u64_add(ic->ic_ierrors, 1);
		return (NULL);
	}

	buf = data->buf;
	switch (rtwn_classify_intr(sc, buf, len)) {
	case RTWN_RX_DATA:
		return (rtwn_rxeof(sc, buf, len));
	case RTWN_RX_TX_REPORT:
		if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
			/* shouldn't happen */
			device_printf(sc->sc_dev,
			    "%s called while ratectl = %d!\n",
			    __func__, sc->sc_ratectl);
			break;
		}

		RTWN_NT_LOCK(sc);
		rtwn_handle_tx_report(sc, buf, len);
		RTWN_NT_UNLOCK(sc);

#ifdef IEEE80211_SUPPORT_SUPERG
		/*
		 * NB: this will executed only when 'report' bit is set.
		 */
		if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
			rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif
		break;
	case RTWN_RX_OTHER:
		rtwn_handle_c2h_report(sc, buf, len);
		break;
	default:
		/* NOTREACHED */
		KASSERT(0, ("unknown Rx classification code"));
		break;
	}

	return (NULL);
}
Esempio n. 27
0
int
move_panel(PANEL *p, int y, int x)
{
	int oldy, oldx;

	if (__predict_false(p == NULL))
		return ERR;

	getbegyx(p->win, oldy, oldx);
	if (__predict_false(y == oldy && x == oldx))
		return OK;

	if (!PANEL_HIDDEN(p)) {
		PANEL *other;

		/* touch exposed areas at the old location now */
		FOREACH_PANEL (other) {
			if (other != p) {
				touchoverlap(p->win, other->win);
			}
		}
	}
Esempio n. 28
0
int
reallocarr(void *ptr, size_t number, size_t size)
{
	int saved_errno, result;
	void *optr;
	void *nptr;

	saved_errno = errno;
	memcpy(&optr, ptr, sizeof(ptr));
	if (number == 0 || size == 0) {
		free(optr);
		nptr = NULL;
		memcpy(ptr, &nptr, sizeof(ptr));
		errno = saved_errno;
		return 0;
	}

	/*
	 * Try to avoid division here.
	 *
	 * It isn't possible to overflow during multiplication if neither
	 * operand uses any of the most significant half of the bits.
	 */
	if (__predict_false((number|size) >= SQRT_SIZE_MAX &&
	                    number > SIZE_MAX / size)) {
		errno = saved_errno;
		return EOVERFLOW;
	}

	nptr = realloc(optr, number * size);
	if (__predict_false(nptr == NULL)) {
		result = errno;
	} else {
		result = 0;
		memcpy(ptr, &nptr, sizeof(ptr));
	}
	errno = saved_errno;
	return result;
}
Esempio n. 29
0
/* us and them, after all we're only ordinary seconds */
static void
rump_delay(unsigned int us)
{
	uint64_t sec, nsec;

	sec = us / 1000000;
	nsec = (us % 1000000) * 1000;

	if (__predict_false(sec != 0))
		printf("WARNING: over 1s delay\n");

	rumpuser_clock_sleep(RUMPUSER_CLOCK_RELWALL, sec, nsec);
}
Esempio n. 30
0
/*
 * nbuf_advance: advance in nbuf or chain by specified amount of bytes and,
 * if requested, ensure that the area *after* advance is contiguous.
 *
 * => Returns new pointer to data in nbuf or NULL if offset is invalid.
 * => Current nbuf and the offset is stored in the nbuf metadata.
 */
void *
nbuf_advance(nbuf_t *nbuf, size_t len, size_t ensure)
{
	struct mbuf *m = nbuf->nb_mbuf;
	u_int off, wmark;
	uint8_t *d;

	/* Offset with amount to advance. */
	off = (uintptr_t)nbuf->nb_nptr - mtod(m, uintptr_t) + len;
	wmark = m_buflen(m);

	/* Find the mbuf according to offset. */
	while (__predict_false(wmark <= off)) {
		m = m_next(m);
		if (__predict_false(m == NULL)) {
			/*
			 * If end of the chain, then the offset is
			 * higher than packet length.
			 */
			return NULL;
		}
		wmark += m_buflen(m);
	}
	KASSERT(off < m_length(nbuf->nb_mbuf0));

	/* Offset in mbuf data. */
	d = mtod(m, uint8_t *);
	KASSERT(off >= (wmark - m_buflen(m)));
	d += (off - (wmark - m_buflen(m)));

	nbuf->nb_mbuf = m;
	nbuf->nb_nptr = d;

	if (ensure) {
		/* Ensure contiguousness (may change nbuf chain). */
		d = nbuf_ensure_contig(nbuf, ensure);
	}
	return d;
}