예제 #1
0
int
t4_init_l2t(struct adapter *sc, int flags)
{
	int i, l2t_size;
	struct l2t_data *d;

	l2t_size = sc->vres.l2t.size;
	if (l2t_size < 2)	/* At least 1 bucket for IP and 1 for IPv6 */
		return (EINVAL);

	d = malloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), M_CXGBE,
	    M_ZERO | flags);
	if (!d)
		return (ENOMEM);

	d->l2t_size = l2t_size;
	d->rover = d->l2tab;
	atomic_store_rel_int(&d->nfree, l2t_size);
	rw_init(&d->lock, "L2T");

	for (i = 0; i < l2t_size; i++) {
		struct l2t_entry *e = &d->l2tab[i];

		e->idx = i;
		e->state = L2T_STATE_UNUSED;
		mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF);
		STAILQ_INIT(&e->wr_list);
		atomic_store_rel_int(&e->refcnt, 0);
	}

	sc->l2t = d;
	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);

	return (0);
}
예제 #2
0
파일: thr_once.c 프로젝트: coyizumi/cs111
int
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
	struct pthread *curthread;
	int state;

	_thr_check_init();

	for (;;) {
		state = once_control->state;
		if (state == ONCE_DONE)
			return (0);
		if (state == ONCE_NEVER_DONE) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_IN_PROGRESS))
				break;
		} else if (state == ONCE_IN_PROGRESS) {
			if (atomic_cmpset_acq_int(&once_control->state, state, ONCE_WAIT))
				_thr_umtx_wait_uint(&once_control->state, ONCE_WAIT, NULL, 0);
		} else if (state == ONCE_WAIT) {
			_thr_umtx_wait_uint(&once_control->state, state, NULL, 0);
		} else
			return (EINVAL);
        }

	curthread = _get_curthread();
	THR_CLEANUP_PUSH(curthread, once_cancel_handler, once_control);
	init_routine();
	THR_CLEANUP_POP(curthread, 0);
	if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_DONE))
		return (0);
	atomic_store_rel_int(&once_control->state, ONCE_DONE);
	_thr_umtx_wake(&once_control->state, INT_MAX, 0);
	return (0);
}
예제 #3
0
void
vfs_mountroot(void)
{
	struct mount *mp;
	struct sbuf *sb;
	struct thread *td;
	time_t timebase;
	int error;

	td = curthread;

	vfs_mountroot_wait();

	sb = sbuf_new_auto();
	vfs_mountroot_conf0(sb);
	sbuf_finish(sb);

	error = vfs_mountroot_devfs(td, &mp);
	while (!error) {
		error = vfs_mountroot_parse(sb, mp);
		if (!error) {
			error = vfs_mountroot_shuffle(td, mp);
			if (!error) {
				sbuf_clear(sb);
				error = vfs_mountroot_readconf(td, sb);
				sbuf_finish(sb);
			}
		}
	}

	sbuf_delete(sb);

	/*
	 * Iterate over all currently mounted file systems and use
	 * the time stamp found to check and/or initialize the RTC.
	 * Call inittodr() only once and pass it the largest of the
	 * timestamps we encounter.
	 */
	timebase = 0;
	mtx_lock(&mountlist_mtx);
	mp = TAILQ_FIRST(&mountlist);
	while (mp != NULL) {
		if (mp->mnt_time > timebase)
			timebase = mp->mnt_time;
		mp = TAILQ_NEXT(mp, mnt_list);
	}
	mtx_unlock(&mountlist_mtx);
	inittodr(timebase);

	/* Keep prison0's root in sync with the global rootvnode. */
	mtx_lock(&prison0.pr_mtx);
	prison0.pr_root = rootvnode;
	vref(prison0.pr_root);
	mtx_unlock(&prison0.pr_mtx);

	mtx_lock(&mountlist_mtx);
	atomic_store_rel_int(&root_mount_complete, 1);
	wakeup(&root_mount_complete);
	mtx_unlock(&mountlist_mtx);
}
예제 #4
0
/*
 * The GPE handler is called when IBE/OBF or SCI events occur.  We are
 * called from an unknown lock context.
 */
static UINT32
EcGpeHandler(ACPI_HANDLE GpeDevice, UINT32 GpeNumber, void *Context)
{
    struct acpi_ec_softc *sc = Context;
    ACPI_STATUS		       Status;
    EC_STATUS		       EcStatus;

    KASSERT(Context != NULL, ("EcGpeHandler called with NULL"));
    CTR0(KTR_ACPI, "ec gpe handler start");

    /*
     * Notify EcWaitEvent() that the status register is now fresh.  If we
     * didn't do this, it wouldn't be possible to distinguish an old IBE
     * from a new one, for example when doing a write transaction (writing
     * address and then data values.)
     */
    atomic_add_int(&sc->ec_gencount, 1);
    wakeup(sc);

    /*
     * If the EC_SCI bit of the status register is set, queue a query handler.
     * It will run the query and _Qxx method later, under the lock.
     */
    EcStatus = EC_GET_CSR(sc);
    if ((EcStatus & EC_EVENT_SCI) &&
	atomic_fetchadd_int(&sc->ec_sci_pend, 1) == 0) {
	CTR0(KTR_ACPI, "ec gpe queueing query handler");
	Status = AcpiOsExecute(OSL_GPE_HANDLER, EcGpeQueryHandler, Context);
	if (ACPI_FAILURE(Status)) {
	    printf("EcGpeHandler: queuing GPE query handler failed\n");
	    atomic_store_rel_int(&sc->ec_sci_pend, 0);
	}
    }
    return (ACPI_REENABLE_GPE);
}
예제 #5
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
    uint8_t *eth_addr)
{
	struct l2t_data *d = sc->l2t;
	struct l2t_entry *e;
	int rc;

	rw_wlock(&d->lock);
	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
	if (e) {
		if (atomic_load_acq_int(&e->refcnt) == 0) {
			mtx_lock(&e->lock);    /* avoid race with t4_l2t_free */
			e->wrq = &sc->sge.ctrlq[0];
			e->iqid = sc->sge.fwq.abs_id;
			e->state = L2T_STATE_SWITCHING;
			e->vlan = vlan;
			e->lport = port;
			memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
			atomic_store_rel_int(&e->refcnt, 1);
			atomic_subtract_int(&d->nfree, 1);
			rc = t4_write_l2e(e, 0);
			mtx_unlock(&e->lock);
			if (rc != 0)
				e = NULL;
		} else {
			MPASS(e->vlan == vlan);
			MPASS(e->lport == port);
			atomic_add_int(&e->refcnt, 1);
		}
	}
	rw_wunlock(&d->lock);
	return (e);
}
예제 #6
0
파일: thr_once.c 프로젝트: coyizumi/cs111
static void
once_cancel_handler(void *arg)
{
	pthread_once_t *once_control = arg;

	if (atomic_cmpset_rel_int(&once_control->state, ONCE_IN_PROGRESS, ONCE_NEVER_DONE))
		return;
	atomic_store_rel_int(&once_control->state, ONCE_NEVER_DONE);
	_thr_umtx_wake(&once_control->state, INT_MAX, 0);
}
예제 #7
0
/*
 * The TOE wants an L2 table entry that it can use to reach the next hop over
 * the specified port.  Produce such an entry - create one if needed.
 *
 * Note that the ifnet could be a pseudo-device like if_vlan, if_lagg, etc. on
 * top of the real cxgbe interface.
 */
struct l2t_entry *
t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
{
	struct l2t_entry *e;
	struct adapter *sc = pi->adapter;
	struct l2t_data *d = sc->l2t;
	u_int hash, smt_idx = pi->port_id;

	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
	    ("%s: sa %p has unexpected sa_family %d", __func__, sa,
	    sa->sa_family));

#ifndef VLAN_TAG
	if (ifp->if_type == IFT_L2VLAN)
		return (NULL);
#endif

	hash = l2_hash(d, sa, ifp->if_index);
	rw_wlock(&d->lock);
	for (e = d->l2tab[hash].first; e; e = e->next) {
		if (l2_cmp(sa, e) == 0 && e->ifp == ifp &&
		    e->smt_idx == smt_idx) {
			l2t_hold(d, e);
			goto done;
		}
	}

	/* Need to allocate a new entry */
	e = t4_alloc_l2e(d);
	if (e) {
		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
		e->next = d->l2tab[hash].first;
		d->l2tab[hash].first = e;

		e->state = L2T_STATE_RESOLVING;
		l2_store(sa, e);
		e->ifp = ifp;
		e->smt_idx = smt_idx;
		e->hash = hash;
		e->lport = pi->lport;
		e->wrq = &sc->sge.ctrlq[pi->port_id];
		e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
		atomic_store_rel_int(&e->refcnt, 1);
#ifdef VLAN_TAG
		if (ifp->if_type == IFT_L2VLAN)
			VLAN_TAG(ifp, &e->vlan);
		else
			e->vlan = VLAN_NONE;
#endif
		mtx_unlock(&e->lock);
	}
done:
	rw_wunlock(&d->lock);
	return e;
}
예제 #8
0
void
smp_rendezvous_cpus(cpumask_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int i, ncpus = 0;

	if (!smp_started) {
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		return;
	}

	CPU_FOREACH(i) {
		if (((1 << i) & map) != 0)
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with map=0x%x", map);

	/* obtain rendezvous lock */
	mtx_lock_spin(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/* signal other processors, which will enter the IPI with interrupts off */
	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if ((map & (1 << curcpu)) != 0)
		smp_rendezvous_action();

	if (teardown_func == smp_no_rendevous_barrier)
		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
			cpu_spinwait();

	/* release lock */
	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #9
0
struct ib_pd *ib_alloc_pd(struct ib_device *device)
{
	struct ib_pd *pd;

	pd = device->alloc_pd(device, NULL, NULL);

	if (!IS_ERR(pd)) {
		pd->device  = device;
		pd->uobject = NULL;
		atomic_store_rel_int(&pd->usecnt, 0);
	}

	return pd;
}
예제 #10
0
파일: drm_gem.c 프로젝트: coyizumi/cs111
/**
 * Initialize an already allocated GEM object of the specified size with
 * no GEM provided backing store. Instead the caller is responsible for
 * backing the object and handling it.
 */
int drm_gem_private_object_init(struct drm_device *dev,
                                struct drm_gem_object *obj, size_t size)
{
    MPASS((size & (PAGE_SIZE - 1)) == 0);

    obj->dev = dev;
    obj->vm_obj = NULL;

    obj->refcount = 1;
    atomic_store_rel_int(&obj->handle_count, 0);
    obj->size = size;

    return 0;
}
예제 #11
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct l2t_data *d)
{
	struct l2t_entry *e;

	rw_wlock(&d->lock);
	e = t4_alloc_l2e(d);
	if (e) {
		mtx_lock(&e->lock);          /* avoid race with t4_l2t_free */
		e->state = L2T_STATE_SWITCHING;
		atomic_store_rel_int(&e->refcnt, 1);
		mtx_unlock(&e->lock);
	}
	rw_wunlock(&d->lock);
	return e;
}
예제 #12
0
/*
 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
 */
inline static int
doconfigtimer(int i)
{
	tc *conf;

	conf = DPCPU_PTR(configtimer);
	if (atomic_load_acq_int(*conf + i)) {
		if (i == 0 ? timer1hz : timer2hz)
			et_start(timer[i], NULL, &timerperiod[i]);
		else
			et_stop(timer[i]);
		atomic_store_rel_int(*conf + i, 0);
		return (1);
	}
	return (0);
}
예제 #13
0
/*
 * MP-friendly version of ppsratecheck().
 *
 * Returns non-negative if we are in the rate, negative otherwise.
 *  0 - rate limit not reached.
 * -1 - rate limit reached.
 * >0 - rate limit was reached before, and was just reset. The return value
 *      is number of events since last reset.
 */
int64_t
counter_ratecheck(struct counter_rate *cr, int64_t limit)
{
	int64_t val;
	int now;

	val = cr->cr_over;
	now = ticks;

	if (now - cr->cr_ticks >= hz) {
		/*
		 * Time to clear the structure, we are in the next second.
		 * First try unlocked read, and then proceed with atomic.
		 */
		if ((cr->cr_lock == 0) &&
		    atomic_cmpset_acq_int(&cr->cr_lock, 0, 1)) {
			/*
			 * Check if other thread has just went through the
			 * reset sequence before us.
			 */
			if (now - cr->cr_ticks >= hz) {
				val = counter_u64_fetch(cr->cr_rate);
				counter_u64_zero(cr->cr_rate);
				cr->cr_over = 0;
				cr->cr_ticks = now;
				if (val <= limit)
					val = 0;
			}
			atomic_store_rel_int(&cr->cr_lock, 0);
		} else
			/*
			 * We failed to lock, in this case other thread may
			 * be running counter_u64_zero(), so it is not safe
			 * to do an update, we skip it.
			 */
			return (val);
	}

	counter_u64_add(cr->cr_rate, 1);
	if (cr->cr_over != 0)
		return (-1);
	if (counter_u64_fetch(cr->cr_rate) > limit)
		val = cr->cr_over = -1;

	return (val);
}
예제 #14
0
static void
ng_ksocket_incoming(struct socket *so, void *arg, int waitflag)
{
	const node_p node = arg;
	const priv_p priv = NG_NODE_PRIVATE(node);
	int wait = ((waitflag & M_WAITOK) ? NG_WAITOK : 0) | NG_QUEUE;

	/*
	 * Even if node is not locked, as soon as we are called, we assume
	 * it exist and it's private area is valid. With some care we can
	 * access it. Mark node that incoming event for it was sent to
	 * avoid unneded queue trashing.
	 */
	if (atomic_cmpset_int(&priv->fn_sent, 0, 1) &&
	    ng_send_fn1(node, NULL, &ng_ksocket_incoming2, so, 0, wait)) {
		atomic_store_rel_int(&priv->fn_sent, 0);
	}
}
예제 #15
0
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
int
restart_cpus(cpumask_t map)
{

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%x)", map);

	/* signal other cpus to restart */
	atomic_store_rel_int(&started_cpus, map);

	/* wait for each to clear its bit */
	while ((stopped_cpus & map) != 0)
		cpu_spinwait();

	return 1;
}
예제 #16
0
static void
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	int cpu, ncpu, othercpus;
	struct _call_data data;

	othercpus = mp_ncpus - 1;
	if (CPU_ISFULLSET(&mask)) {
		if (othercpus < 1)
			return;
	} else {
		CPU_CLR(PCPU_GET(cpuid), &mask);
		if (CPU_EMPTY(&mask))
			return;
	}
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;		
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	if (CPU_ISFULLSET(&mask)) {
		ncpu = othercpus;
		ipi_all_but_self(vector);
	} else {
		ncpu = 0;
		while ((cpu = cpusetobj_ffs(&mask)) != 0) {
			cpu--;
			CPU_CLR(cpu, &mask);
			CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
			    vector);
			ipi_send_cpu(cpu, vector);
			ncpu++;
		}
	}
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #17
0
/*
 * Reconfigure specified timer.
 * For per-CPU timers use IPI to make other CPUs to reconfigure.
 */
static void
configtimer(int i)
{
#ifdef SMP
	tc *conf;
	int cpu;

	critical_enter();
#endif
	/* Start/stop global timer or per-CPU timer of this CPU. */
	if (i == 0 ? timer1hz : timer2hz)
		et_start(timer[i], NULL, &timerperiod[i]);
	else
		et_stop(timer[i]);
#ifdef SMP
	if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
		critical_exit();
		return;
	}
	/* Set reconfigure flags for other CPUs. */
	CPU_FOREACH(cpu) {
		conf = DPCPU_ID_PTR(cpu, configtimer);
		atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
	}
	/* Send reconfigure IPI. */
	ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
	/* Wait for reconfiguration completed. */
restart:
	cpu_spinwait();
	CPU_FOREACH(cpu) {
		if (cpu == curcpu)
			continue;
		conf = DPCPU_ID_PTR(cpu, configtimer);
		if (atomic_load_acq_int(*conf + i))
			goto restart;
	}
	critical_exit();
#endif
}
예제 #18
0
파일: adb_bus.c 프로젝트: 2asoft/freebsd
u_int
adb_receive_raw_packet(device_t dev, u_char status, u_char command, int len, 
    u_char *data) 
{
	struct adb_softc *sc = device_get_softc(dev);
	u_char addr = command >> 4;

	if (len > 0 && (command & 0x0f) == ((ADB_COMMAND_TALK << 2) | 3)) {
		memcpy(&sc->devinfo[addr].register3,data,2);
		sc->devinfo[addr].handler_id = data[1];
	}

	if (sc->sync_packet == command)  {
		memcpy(sc->syncreg,data,(len > 8) ? 8 : len);
		atomic_store_rel_int(&sc->packet_reply,len + 1);
		wakeup(sc);
	}

	if (sc->children[addr] != NULL) {
		ADB_RECEIVE_PACKET(sc->children[addr],status,
			(command & 0x0f) >> 2,command & 0x03,len,data);
	}
예제 #19
0
/*
 * Flush the TLB on all other CPU's
 */
static void
smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	u_int ncpu;
	struct _call_data data;

	ncpu = mp_ncpus - 1;	/* does not shootdown self */
	if (ncpu < 1)
		return;		/* no other cpus */
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	ipi_all_but_self(vector);
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #20
0
/*
 * When incoming data is appended to the socket, we get notified here.
 * This is also called whenever a significant event occurs for the socket.
 * Our original caller may have queued this even some time ago and
 * we cannot trust that he even still exists. The node however is being
 * held with a reference by the queueing code and guarantied to be valid.
 */
static void
ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2)
{
	struct socket *so = arg1;
	const priv_p priv = NG_NODE_PRIVATE(node);
	struct ng_mesg *response;
	struct uio auio;
	int flags, error;

	KASSERT(so == priv->so, ("%s: wrong socket", __func__));

	/* Allow next incoming event to be queued. */
	atomic_store_rel_int(&priv->fn_sent, 0);

	/* Check whether a pending connect operation has completed */
	if (priv->flags & KSF_CONNECTING) {
		if ((error = so->so_error) != 0) {
			so->so_error = 0;
			so->so_state &= ~SS_ISCONNECTING;
		}
		if (!(so->so_state & SS_ISCONNECTING)) {
			NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE,
			    NGM_KSOCKET_CONNECT, sizeof(int32_t), M_NOWAIT);
			if (response != NULL) {
				response->header.flags |= NGF_RESP;
				response->header.token = priv->response_token;
				*(int32_t *)response->data = error;
				/*
				 * send an async "response" message
				 * to the node that set us up
				 * (if it still exists)
				 */
				NG_SEND_MSG_ID(error, node,
				    response, priv->response_addr, 0);
			}
			priv->flags &= ~KSF_CONNECTING;
		}
	}

	/* Check whether a pending accept operation has completed */
	if (priv->flags & KSF_ACCEPTING) {
		error = ng_ksocket_check_accept(priv);
		if (error != EWOULDBLOCK)
			priv->flags &= ~KSF_ACCEPTING;
		if (error == 0)
			ng_ksocket_finish_accept(priv);
	}

	/*
	 * If we don't have a hook, we must handle data events later.  When
	 * the hook gets created and is connected, this upcall function
	 * will be called again.
	 */
	if (priv->hook == NULL)
		return;

	/* Read and forward available mbuf's */
	auio.uio_td = NULL;
	auio.uio_resid = MJUMPAGESIZE;	/* XXXGL: sane limit? */
	flags = MSG_DONTWAIT;
	while (1) {
		struct sockaddr *sa = NULL;
		struct mbuf *m;

		/* Try to get next packet from socket */
		if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ?
		    NULL : &sa, &auio, &m, NULL, &flags)) != 0)
			break;

		/* See if we got anything */
		if (m == NULL) {
			if (sa != NULL)
				free(sa, M_SONAME);
			break;
		}

		KASSERT(m->m_nextpkt == NULL, ("%s: nextpkt", __func__));

		/*
		 * Stream sockets do not have packet boundaries, so
		 * we have to allocate a header mbuf and attach the
		 * stream of data to it.
		 */
		if (so->so_type == SOCK_STREAM) {
			struct mbuf *mh;

			mh = m_gethdr(M_NOWAIT, MT_DATA);
			if (mh == NULL) {
				m_freem(m);
				if (sa != NULL)
					free(sa, M_SONAME);
				break;
			}

			mh->m_next = m;
			for (; m; m = m->m_next)
				mh->m_pkthdr.len += m->m_len;
			m = mh;
		}

		/* Put peer's socket address (if any) into a tag */
		if (sa != NULL) {
			struct sa_tag	*stag;

			stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE,
			    NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) +
			    sa->sa_len, M_NOWAIT);
			if (stag == NULL) {
				free(sa, M_SONAME);
				goto sendit;
			}
			bcopy(sa, &stag->sa, sa->sa_len);
			free(sa, M_SONAME);
			stag->id = NG_NODE_ID(node);
			m_tag_prepend(m, &stag->tag);
		}

sendit:		/* Forward data with optional peer sockaddr as packet tag */
		NG_SEND_DATA_ONLY(error, priv->hook, m);
	}

	/*
	 * If the peer has closed the connection, forward a 0-length mbuf
	 * to indicate end-of-file.
	 */
	if (so->so_rcv.sb_state & SBS_CANTRCVMORE &&
	    !(priv->flags & KSF_EOFSEEN)) {
		struct mbuf *m;

		m = m_gethdr(M_NOWAIT, MT_DATA);
		if (m != NULL)
			NG_SEND_DATA_ONLY(error, priv->hook, m);
		priv->flags |= KSF_EOFSEEN;
	}
}
예제 #21
0
/*
 * AP CPU's call this to initialize themselves.
 */
void
init_secondary(void)
{
	vm_offset_t addr;
	u_int	cpuid;
	int	gsel_tss;
	
	
	/* bootAP is set in start_ap() to our ID. */
	PCPU_SET(currentldt, _default_ldt);
	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
#if 0
	gdt[bootAP * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;
#endif
	PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */
	PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
	PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
#if 0
	PCPU_SET(tss_gdt, &gdt[bootAP * NGDT + GPROC0_SEL].sd);

	PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
#endif
	PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	/*
	 * signal our startup to the BSP.
	 */
	mp_naps++;

	/* Spin until the BSP releases the AP's. */
	while (!aps_ready)
		ia32_pause();

	/* BSP may have changed PTD while we were waiting */
	invltlb();
	for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE)
		invlpg(addr);

	/* set up FPU state on the AP */
	npxinit();
#if 0
	
	/* set up SSE registers */
	enable_sse();
#endif
#if 0 && defined(PAE)
	/* Enable the PTE no-execute bit. */
	if ((amd_feature & AMDID_NX) != 0) {
		uint64_t msr;

		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
	}
#endif
#if 0
	/* A quick check from sanity claus */
	if (PCPU_GET(apic_id) != lapic_id()) {
		printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
		printf("SMP: actual apic_id = %d\n", lapic_id());
		printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
		panic("cpuid mismatch! boom!!");
	}
#endif
	
	/* Initialize curthread. */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	PCPU_SET(curthread, PCPU_GET(idlethread));

	mtx_lock_spin(&ap_boot_mtx);
#if 0
	
	/* Init local apic for irq's */
	lapic_setup(1);
#endif
	smp_cpus++;

	cpuid = PCPU_GET(cpuid);
	CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid);
	printf("SMP: AP CPU #%d Launched!\n", cpuid);

	/* Determine if we are a logical CPU. */
	if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
		CPU_SET(cpuid, &logical_cpus_mask);
	
	/* Determine if we are a hyperthread. */
	if (hyperthreading_cpus > 1 &&
	    PCPU_GET(apic_id) % hyperthreading_cpus != 0)
		CPU_SET(cpuid, &hyperthreading_cpus_mask);
#if 0
	if (bootverbose)
		lapic_dump("AP");
#endif
	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
		smp_active = 1;	 /* historic */
	}

	mtx_unlock_spin(&ap_boot_mtx);

	/* wait until all the AP's are up */
	while (smp_started == 0)
		ia32_pause();

	PCPU_SET(curthread, PCPU_GET(idlethread));

	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	/* enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
예제 #22
0
파일: subr_smp.c 프로젝트: AhmadTux/freebsd
void
smp_rendezvous_cpus(cpuset_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int curcpumap, i, ncpus = 0;

	/* Look comments in the !SMP case. */
	if (!smp_started) {
		spinlock_enter();
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		spinlock_exit();
		return;
	}

	CPU_FOREACH(i) {
		if (CPU_ISSET(i, &map))
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with non-zero map");

	mtx_lock_spin(&smp_ipi_mtx);

	/* Pass rendezvous parameters via global variables. */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	smp_rv_waiters[3] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/*
	 * Signal other processors, which will enter the IPI with
	 * interrupts off.
	 */
	curcpumap = CPU_ISSET(curcpu, &map);
	CPU_CLR(curcpu, &map);
	ipi_selected(map, IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if (curcpumap != 0)
		smp_rendezvous_action();

	/*
	 * Ensure that the master CPU waits for all the other
	 * CPUs to finish the rendezvous, so that smp_rv_*
	 * pseudo-structure and the arg are guaranteed to not
	 * be in use.
	 */
	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
		cpu_spinwait();

	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #23
0
void
init_secondary(int cpu)
{
	struct pcpu *pc;
	uint32_t loop_counter;
#ifndef INTRNG
	int start = 0, end = 0;
#endif
	uint32_t actlr_mask, actlr_set;

	pmap_set_tex();
	cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
	reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
	cpu_setup();

	/* Provide stack pointers for other processor modes. */
	set_stackptrs(cpu);

	enable_interrupts(PSR_A);
	pc = &__pcpu[cpu];

	/*
	 * pcpu_init() updates queue, so it should not be executed in parallel
	 * on several cores
	 */
	while(mp_naps < (cpu - 1))
		;

	pcpu_init(pc, cpu, sizeof(struct pcpu));
	dpcpu_init(dpcpu[cpu - 1], cpu);
#if __ARM_ARCH >= 6 && defined(DDB)
	dbg_monitor_init_secondary();
#endif
	/* Signal our startup to BSP */
	atomic_add_rel_32(&mp_naps, 1);

	/* Spin until the BSP releases the APs */
	while (!atomic_load_acq_int(&aps_ready)) {
#if __ARM_ARCH >= 7
		__asm __volatile("wfe");
#endif
	}

	/* Initialize curthread */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	pc->pc_curthread = pc->pc_idlethread;
	pc->pc_curpcb = pc->pc_idlethread->td_pcb;
	set_curthread(pc->pc_idlethread);
#ifdef VFP
	vfp_init();
#endif

	/* Configure the interrupt controller */
	intr_pic_init_secondary();

	mtx_lock_spin(&ap_boot_mtx);

	atomic_add_rel_32(&smp_cpus, 1);

	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
	}

	mtx_unlock_spin(&ap_boot_mtx);

#ifndef INTRNG
	/* Enable ipi */
#ifdef IPI_IRQ_START
	start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
	end = IPI_IRQ_END;
#else
	end = IPI_IRQ_START;
#endif
#endif

	for (int i = start; i <= end; i++)
		arm_unmask_irq(i);
#endif /* INTRNG */
	enable_interrupts(PSR_I);

	loop_counter = 0;
	while (smp_started == 0) {
		DELAY(100);
		loop_counter++;
		if (loop_counter == 1000)
			CTR0(KTR_SMP, "AP still wait for smp_started");
	}
	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	CTR0(KTR_SMP, "go into scheduler");

	/* Enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
예제 #24
0
/* _mcount; may be static, inline, etc */
_MCOUNT_DECL(uintfptr_t frompc, uintfptr_t selfpc)
{
#ifdef GUPROF
	u_int delta;
#endif
	fptrdiff_t frompci;
	u_short *frompcindex;
	struct tostruct *top, *prevtop;
	struct gmonparam *p;
	long toindex;
#ifdef _KERNEL
	MCOUNT_DECL(s)
#endif

	p = &_gmonparam;
#ifndef GUPROF			/* XXX */
	/*
	 * check that we are profiling
	 * and that we aren't recursively invoked.
	 */
	if (p->state != GMON_PROF_ON)
		return;
#endif
#ifdef _KERNEL
	MCOUNT_ENTER(s);
#else
	if (!atomic_cmpset_acq_int(&p->state, GMON_PROF_ON, GMON_PROF_BUSY))
		return;
#endif
	frompci = frompc - p->lowpc;

#ifdef _KERNEL
	/*
	 * When we are called from an exception handler, frompci may be
	 * for a user address.  Convert such frompci's to the index of
	 * user() to merge all user counts.
	 */
	if (frompci >= p->textsize) {
		if (frompci + p->lowpc
		    >= (uintfptr_t)(VM_MAXUSER_ADDRESS + UPAGES * PAGE_SIZE))
			goto done;
		frompci = (uintfptr_t)user - p->lowpc;
		if (frompci >= p->textsize)
		    goto done;
	}
#endif

#ifdef GUPROF
	if (p->state != GMON_PROF_HIRES)
		goto skip_guprof_stuff;
	/*
	 * Look at the clock and add the count of clock cycles since the
	 * clock was last looked at to a counter for frompc.  This
	 * solidifies the count for the function containing frompc and
	 * effectively starts another clock for the current function.
	 * The count for the new clock will be solidified when another
	 * function call is made or the function returns.
	 *
	 * We use the usual sampling counters since they can be located
	 * efficiently.  4-byte counters are usually necessary.
	 *
	 * There are many complications for subtracting the profiling
	 * overheads from the counts for normal functions and adding
	 * them to the counts for mcount(), mexitcount() and cputime().
	 * We attempt to handle fractional cycles, but the overheads
	 * are usually underestimated because they are calibrated for
	 * a simpler than usual setup.
	 */
	delta = cputime() - p->mcount_overhead;
	p->cputime_overhead_resid += p->cputime_overhead_frac;
	p->mcount_overhead_resid += p->mcount_overhead_frac;
	if ((int)delta < 0)
		*p->mcount_count += delta + p->mcount_overhead
				    - p->cputime_overhead;
	else if (delta != 0) {
		if (p->cputime_overhead_resid >= CALIB_SCALE) {
			p->cputime_overhead_resid -= CALIB_SCALE;
			++*p->cputime_count;
			--delta;
		}
		if (delta != 0) {
			if (p->mcount_overhead_resid >= CALIB_SCALE) {
				p->mcount_overhead_resid -= CALIB_SCALE;
				++*p->mcount_count;
				--delta;
			}
			KCOUNT(p, frompci) += delta;
		}
		*p->mcount_count += p->mcount_overhead_sub;
	}
	*p->cputime_count += p->cputime_overhead;
skip_guprof_stuff:
#endif /* GUPROF */

#ifdef _KERNEL
	/*
	 * When we are called from an exception handler, frompc is faked
	 * to be for where the exception occurred.  We've just solidified
	 * the count for there.  Now convert frompci to the index of btrap()
	 * for trap handlers and bintr() for interrupt handlers to make
	 * exceptions appear in the call graph as calls from btrap() and
	 * bintr() instead of calls from all over.
	 */
	if ((uintfptr_t)selfpc >= (uintfptr_t)btrap
	    && (uintfptr_t)selfpc < (uintfptr_t)eintr) {
		if ((uintfptr_t)selfpc >= (uintfptr_t)bintr)
			frompci = (uintfptr_t)bintr - p->lowpc;
		else
			frompci = (uintfptr_t)btrap - p->lowpc;
	}
#endif

	/*
	 * check that frompc is a reasonable pc value.
	 * for example:	signal catchers get called from the stack,
	 *		not from text space.  too bad.
	 */
	if (frompci >= p->textsize)
		goto done;

	frompcindex = &p->froms[frompci / (p->hashfraction * sizeof(*p->froms))];
	toindex = *frompcindex;
	if (toindex == 0) {
		/*
		 *	first time traversing this arc
		 */
		toindex = ++p->tos[0].link;
		if (toindex >= p->tolimit)
			/* halt further profiling */
			goto overflow;

		*frompcindex = toindex;
		top = &p->tos[toindex];
		top->selfpc = selfpc;
		top->count = 1;
		top->link = 0;
		goto done;
	}
	top = &p->tos[toindex];
	if (top->selfpc == selfpc) {
		/*
		 * arc at front of chain; usual case.
		 */
		top->count++;
		goto done;
	}
	/*
	 * have to go looking down chain for it.
	 * top points to what we are looking at,
	 * prevtop points to previous top.
	 * we know it is not at the head of the chain.
	 */
	for (; /* goto done */; ) {
		if (top->link == 0) {
			/*
			 * top is end of the chain and none of the chain
			 * had top->selfpc == selfpc.
			 * so we allocate a new tostruct
			 * and link it to the head of the chain.
			 */
			toindex = ++p->tos[0].link;
			if (toindex >= p->tolimit)
				goto overflow;

			top = &p->tos[toindex];
			top->selfpc = selfpc;
			top->count = 1;
			top->link = *frompcindex;
			*frompcindex = toindex;
			goto done;
		}
		/*
		 * otherwise, check the next arc on the chain.
		 */
		prevtop = top;
		top = &p->tos[top->link];
		if (top->selfpc == selfpc) {
			/*
			 * there it is.
			 * increment its count
			 * move it to the head of the chain.
			 */
			top->count++;
			toindex = prevtop->link;
			prevtop->link = top->link;
			top->link = *frompcindex;
			*frompcindex = toindex;
			goto done;
		}

	}
done:
#ifdef _KERNEL
	MCOUNT_EXIT(s);
#else
	atomic_store_rel_int(&p->state, GMON_PROF_ON);
#endif
	return;
overflow:
	atomic_store_rel_int(&p->state, GMON_PROF_ERROR);
#ifdef _KERNEL
	MCOUNT_EXIT(s);
#endif
	return;
}
예제 #25
0
void
init_secondary(int cpu)
{
	struct pcpu *pc;
	uint32_t loop_counter;
	int start = 0, end = 0;

	cpu_setup(NULL);
	setttb(pmap_pa);
	cpu_tlb_flushID();

	pc = &__pcpu[cpu];

	/*
	 * pcpu_init() updates queue, so it should not be executed in parallel
	 * on several cores
	 */
	while(mp_naps < (cpu - 1))
		;

	pcpu_init(pc, cpu, sizeof(struct pcpu));
	dpcpu_init(dpcpu[cpu - 1], cpu);

	/* Provide stack pointers for other processor modes. */
	set_stackptrs(cpu);

	/* Signal our startup to BSP */
	atomic_add_rel_32(&mp_naps, 1);

	/* Spin until the BSP releases the APs */
	while (!aps_ready)
		;

	/* Initialize curthread */
	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
	pc->pc_curthread = pc->pc_idlethread;
	pc->pc_curpcb = pc->pc_idlethread->td_pcb;
	set_curthread(pc->pc_idlethread);
#ifdef VFP
	pc->pc_cpu = cpu;

	vfp_init();
#endif

	mtx_lock_spin(&ap_boot_mtx);

	atomic_add_rel_32(&smp_cpus, 1);

	if (smp_cpus == mp_ncpus) {
		/* enable IPI's, tlb shootdown, freezes etc */
		atomic_store_rel_int(&smp_started, 1);
	}

	mtx_unlock_spin(&ap_boot_mtx);

	/* Enable ipi */
#ifdef IPI_IRQ_START
	start = IPI_IRQ_START;
#ifdef IPI_IRQ_END
  	end = IPI_IRQ_END;
#else
	end = IPI_IRQ_START;
#endif
#endif
				
	for (int i = start; i <= end; i++)
		arm_unmask_irq(i);
	enable_interrupts(PSR_I);

	loop_counter = 0;
	while (smp_started == 0) {
		DELAY(100);
		loop_counter++;
		if (loop_counter == 1000)
			CTR0(KTR_SMP, "AP still wait for smp_started");
	}
	/* Start per-CPU event timers. */
	cpu_initclocks_ap();

	CTR0(KTR_SMP, "go into scheduler");
	platform_mp_init_secondary();

	/* Enter the scheduler */
	sched_throw(NULL);

	panic("scheduler returned us to %s", __func__);
	/* NOTREACHED */
}
예제 #26
0
파일: lock.c 프로젝트: coyizumi/cs111
/*
 * Release a lock.
 */
void
_lock_release(struct lock *lck, struct lockuser *lu)
{
	struct lockuser *lu_tmp, *lu_h;
	struct lockreq *myreq;
	int prio_h;
	int lval;

	/**
	 * XXX - We probably want to remove these checks to optimize
	 *       performance.  It is also a bug if any one of the 
	 *       checks fail, so it's probably better to just let it
	 *       SEGV and fix it.
	 */
#if 0
	if ((lck == NULL) || (lu == NULL))
		return;
#endif
	if ((lck->l_type & LCK_PRIORITY) != 0) {
		prio_h = 0;
		lu_h = NULL;

		/* Update tail if our request is last. */
		if (lu->lu_watchreq->lr_owner == NULL) {
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lck->l_tail,
			    (uintptr_t)lu->lu_myreq);
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_myreq->lr_owner,
			    (uintptr_t)NULL);
		} else {
			/* Remove ourselves from the list. */
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_myreq->lr_owner,
			    (uintptr_t)lu->lu_watchreq->lr_owner);
			atomic_store_rel_ptr((volatile uintptr_t *)
			    (void *)&lu->lu_watchreq->lr_owner->lu_myreq,
			    (uintptr_t)lu->lu_myreq);
		}
		/*
		 * The watch request now becomes our own because we've
		 * traded away our previous request.  Save our previous
		 * request so that we can grant the lock.
		 */
		myreq = lu->lu_myreq;
		lu->lu_myreq = lu->lu_watchreq;
		lu->lu_watchreq = NULL;
		lu->lu_myreq->lr_locked = 1;
		lu->lu_myreq->lr_owner = lu;
		lu->lu_myreq->lr_watcher = NULL;
		/*
		 * Traverse the list of lock requests in reverse order
		 * looking for the user with the highest priority.
		 */
		for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
		     lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
			if (lu_tmp->lu_priority > prio_h) {
				lu_h = lu_tmp;
				prio_h = lu_tmp->lu_priority;
			}
		}
		if (lu_h != NULL) {
			/* Give the lock to the highest priority user. */
			if (lck->l_wakeup != NULL) {
				atomic_swap_int(
				    &lu_h->lu_watchreq->lr_locked,
				    0, &lval);
				if (lval == 2)
					/* Notify the sleeper */
					lck->l_wakeup(lck,
					    lu_h->lu_myreq->lr_watcher);
			}
			else
				atomic_store_rel_int(
				    &lu_h->lu_watchreq->lr_locked, 0);
		} else {
			if (lck->l_wakeup != NULL) {
				atomic_swap_int(&myreq->lr_locked,
				    0, &lval);
				if (lval == 2)
					/* Notify the sleeper */
					lck->l_wakeup(lck, myreq->lr_watcher);
			}
			else
				/* Give the lock to the previous request. */
				atomic_store_rel_int(&myreq->lr_locked, 0);
		}
	} else {
		/*
		 * The watch request now becomes our own because we've
		 * traded away our previous request.  Save our previous
		 * request so that we can grant the lock.
		 */
		myreq = lu->lu_myreq;
		lu->lu_myreq = lu->lu_watchreq;
		lu->lu_watchreq = NULL;
		lu->lu_myreq->lr_locked = 1;
		if (lck->l_wakeup) {
			atomic_swap_int(&myreq->lr_locked, 0, &lval);
			if (lval == 2)
				/* Notify the sleeper */
				lck->l_wakeup(lck, myreq->lr_watcher);
		}
		else
			/* Give the lock to the previous request. */
			atomic_store_rel_int(&myreq->lr_locked, 0);
	}
	lu->lu_myreq->lr_active = 0;
}
예제 #27
0
static void atomic_set(sp_counted_base_atomic_type volatile *pw,int v)
{
    atomic_store_rel_int(&pw->ui,v);
}
예제 #28
0
/*
 * When incoming data is appended to the socket, we get notified here.
 * This is also called whenever a significant event occurs for the socket.
 * Our original caller may have queued this even some time ago and 
 * we cannot trust that he even still exists. The node however is being
 * held with a reference by the queueing code and guarantied to be valid.
 */
static void
ng_ksocket_incoming2(node_p node, hook_p hook, void *arg1, int arg2)
{
	struct socket *so = arg1;
	const priv_p priv = NG_NODE_PRIVATE(node);
	struct mbuf *m;
	struct ng_mesg *response;
	struct uio auio;
	int s, flags, error;

	s = splnet();

	/* so = priv->so; *//* XXX could have derived this like so */
	KASSERT(so == priv->so, ("%s: wrong socket", __func__));
	
	/* Allow next incoming event to be queued. */
	atomic_store_rel_int(&priv->fn_sent, 0);

	/* Check whether a pending connect operation has completed */
	if (priv->flags & KSF_CONNECTING) {
		if ((error = so->so_error) != 0) {
			so->so_error = 0;
			soclrstate(so, SS_ISCONNECTING);
		}
		if (!(so->so_state & SS_ISCONNECTING)) {
			NG_MKMESSAGE(response, NGM_KSOCKET_COOKIE,
			    NGM_KSOCKET_CONNECT, sizeof(int32_t), M_WAITOK | M_NULLOK);
			if (response != NULL) {
				response->header.flags |= NGF_RESP;
				response->header.token = priv->response_token;
				*(int32_t *)response->data = error;
				/* 
				 * send an async "response" message
				 * to the node that set us up
				 * (if it still exists)
				 */
				NG_SEND_MSG_ID(error, node,
				    response, priv->response_addr, 0);
			}
			priv->flags &= ~KSF_CONNECTING;
		}
	}

	/* Check whether a pending accept operation has completed */
	if (priv->flags & KSF_ACCEPTING) {
		error = ng_ksocket_check_accept(priv);
		if (error != EWOULDBLOCK)
			priv->flags &= ~KSF_ACCEPTING;
		if (error == 0)
			ng_ksocket_finish_accept(priv);
	}

	/*
	 * If we don't have a hook, we must handle data events later.  When
	 * the hook gets created and is connected, this upcall function
	 * will be called again.
	 */
	if (priv->hook == NULL) {
		splx(s);
		return;
	}

	/* Read and forward available mbuf's */
	auio.uio_td = NULL;
	auio.uio_resid = 1000000000;
	flags = MSG_DONTWAIT;
	while (1) {
		struct sockaddr *sa = NULL;
		struct mbuf *n;

		/* Try to get next packet from socket */
		if ((error = soreceive(so, (so->so_state & SS_ISCONNECTED) ?
		    NULL : &sa, &auio, &m, NULL, &flags)) != 0)
			break;

		/* See if we got anything */
		if (m == NULL) {
			if (sa != NULL)
				kfree(sa, M_SONAME);
			break;
		}

		/*
		 * Don't trust the various socket layers to get the
		 * packet header and length correct (e.g. kern/15175).
		 *
		 * Also, do not trust that soreceive() will clear m_nextpkt
		 * for us (e.g. kern/84952, kern/82413).
		 */
		m->m_pkthdr.csum_flags = 0;
		for (n = m, m->m_pkthdr.len = 0; n != NULL; n = n->m_next) {
			m->m_pkthdr.len += n->m_len;
			n->m_nextpkt = NULL;
		}

		/* Put peer's socket address (if any) into a tag */
		if (sa != NULL) {
			struct sa_tag	*stag;

			stag = (struct sa_tag *)m_tag_alloc(NGM_KSOCKET_COOKIE,
			    NG_KSOCKET_TAG_SOCKADDR, sizeof(ng_ID_t) +
			    sa->sa_len, MB_DONTWAIT);
			if (stag == NULL) {
				kfree(sa, M_SONAME);
				goto sendit;
			}
			bcopy(sa, &stag->sa, sa->sa_len);
			kfree(sa, M_SONAME);
			stag->id = NG_NODE_ID(node);
			m_tag_prepend(m, &stag->tag);
		}

sendit:		/* Forward data with optional peer sockaddr as packet tag */
		NG_SEND_DATA_ONLY(error, priv->hook, m);
	}

	/*
	 * If the peer has closed the connection, forward a 0-length mbuf
	 * to indicate end-of-file.
	 */
	if (so->so_rcv.sb_state & SBS_CANTRCVMORE && !(priv->flags & KSF_EOFSEEN)) {
		MGETHDR(m, MB_DONTWAIT, MT_DATA);
		if (m != NULL) {
			m->m_len = m->m_pkthdr.len = 0;
			NG_SEND_DATA_ONLY(error, priv->hook, m);
		}
		priv->flags |= KSF_EOFSEEN;
	}
	splx(s);
}