示例#1
0
struct process *
process_get(pid_t pid)
{
    rw_rlock(pidt_rw);
    struct process *p = pid_table[pid];
    rw_rdone(pidt_rw);
    return p;
}
示例#2
0
void
lock_rw(struct lock_object *lock, uintptr_t how)
{
	struct rwlock *rw;

	rw = (struct rwlock *)lock;
	if (how)
		rw_rlock(rw);
	else
		rw_wlock(rw);
}
示例#3
0
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
					       uint32_t key)
{
	struct ttm_object_device *tdev = tfile->tdev;
	struct ttm_base_object *base;
	struct drm_hash_item *hash;
	int ret;

	rw_rlock(&tdev->object_lock);
	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);

	if (ret == 0) {
		base = drm_hash_entry(hash, struct ttm_base_object, hash);
		refcount_acquire(&base->refcount);
	}
示例#4
0
文件: memutil.c 项目: coyizumi/cs111
int
mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
{
	int nd;

	if (mem_range_softc.mr_op == NULL)
		return (EOPNOTSUPP);
	nd = *arg;
	rw_rlock(&mr_lock);
	if (nd == 0)
		*arg = mem_range_softc.mr_ndesc;
	else
		bcopy(mem_range_softc.mr_desc, mrd, nd * sizeof(*mrd));
	rw_runlock(&mr_lock);
	return (0);
}
void
loginclass_racct_foreach(void (*callback)(struct racct *racct,
    void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
    void *arg2, void *arg3)
{
	struct loginclass *lc;

	rw_rlock(&loginclasses_lock);
	if (pre != NULL)
		(pre)();
	LIST_FOREACH(lc, &loginclasses, lc_next)
		(callback)(lc->lc_racct, arg2, arg3);
	if (post != NULL)
		(post)();
	rw_runlock(&loginclasses_lock);
}
示例#6
0
int
sblock(struct sockbuf *sb, int flags)
{

	KASSERT((flags & SBL_VALID) == flags,
	    ("sblock: flags invalid (0x%x)", flags));

	if (flags & SBL_WAIT) {
		rw_rlock(&sb->sb_rwlock);
		return (0);
	} else {
		if (rw_try_rlock(&sb->sb_rwlock) == 0)
			return (EWOULDBLOCK);
		return (0);
	}
}
/*
 * Return loginclass structure with a corresponding name.  Not
 * performance critical, as it's used mainly by setloginclass(2),
 * which happens once per login session.  Caller has to use
 * loginclass_free() on the returned value when it's no longer
 * needed.
 */
struct loginclass *
loginclass_find(const char *name)
{
	struct loginclass *lc, *new_lc;

	if (name[0] == '\0' || strlen(name) >= MAXLOGNAME)
		return (NULL);

	lc = curthread->td_ucred->cr_loginclass;
	if (strcmp(name, lc->lc_name) == 0) {
		loginclass_hold(lc);
		return (lc);
	}

	rw_rlock(&loginclasses_lock);
	lc = loginclass_lookup(name);
	rw_runlock(&loginclasses_lock);
	if (lc != NULL)
		return (lc);

	new_lc = malloc(sizeof(*new_lc), M_LOGINCLASS, M_ZERO | M_WAITOK);
	racct_create(&new_lc->lc_racct);
	refcount_init(&new_lc->lc_refcount, 1);
	strcpy(new_lc->lc_name, name);

	rw_wlock(&loginclasses_lock);
	/*
	 * There's a chance someone created our loginclass while we
	 * were in malloc and not holding the lock, so we have to
	 * make sure we don't insert a duplicate loginclass.
	 */
	if ((lc = loginclass_lookup(name)) == NULL) {
		LIST_INSERT_HEAD(&loginclasses, new_lc, lc_next);
		rw_wunlock(&loginclasses_lock);
		lc = new_lc;
	} else {
		rw_wunlock(&loginclasses_lock);
		racct_destroy(&new_lc->lc_racct);
		free(new_lc, M_LOGINCLASS);
	}

	return (lc);
}
示例#8
0
/*
 * Called when the host's ARP layer makes a change to some entry that is loaded
 * into the HW L2 table.
 */
void
t4_l2_update(struct toedev *tod, struct ifnet *ifp, struct sockaddr *sa,
    uint8_t *lladdr, uint16_t vtag)
{
	struct adapter *sc = tod->tod_softc;
	struct l2t_entry *e;
	struct l2t_data *d = sc->l2t;
	u_int hash;

	KASSERT(d != NULL, ("%s: no L2 table", __func__));

	hash = l2_hash(d, sa, ifp->if_index);
	rw_rlock(&d->lock);
	for (e = d->l2tab[hash].first; e; e = e->next) {
		if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
			mtx_lock(&e->lock);
			if (atomic_load_acq_int(&e->refcnt))
				goto found;
			e->state = L2T_STATE_STALE;
			mtx_unlock(&e->lock);
			break;
		}
	}
	rw_runlock(&d->lock);

	/*
	 * This is of no interest to us.  We've never had an offloaded
	 * connection to this destination, and we aren't attempting one right
	 * now.
	 */
	return;

found:
	rw_runlock(&d->lock);

	KASSERT(e->state != L2T_STATE_UNUSED,
	    ("%s: unused entry in the hash.", __func__));

	update_entry(sc, e, lladdr, vtag);
	mtx_unlock(&e->lock);
}
示例#9
0
/*
 * active open (soconnect).
 *
 * State of affairs on entry:
 * soisconnecting (so_state |= SS_ISCONNECTING)
 * tcbinfo not locked (This has changed - used to be WLOCKed)
 * inp WLOCKed
 * tp->t_state = TCPS_SYN_SENT
 * rtalloc1, RT_UNLOCK on rt.
 */
int
t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
    struct sockaddr *nam)
{
	struct adapter *sc = tod->tod_softc;
	struct toepcb *toep = NULL;
	struct wrqe *wr = NULL;
	struct ifnet *rt_ifp = rt->rt_ifp;
	struct vi_info *vi;
	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
	struct inpcb *inp = sotoinpcb(so);
	struct tcpcb *tp = intotcpcb(inp);
	int reason;
	struct offload_settings settings;
	uint16_t vid = 0xfff, pcp = 0;

	INP_WLOCK_ASSERT(inp);
	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));

	if (rt_ifp->if_type == IFT_ETHER)
		vi = rt_ifp->if_softc;
	else if (rt_ifp->if_type == IFT_L2VLAN) {
		struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp);

		vi = ifp->if_softc;
		VLAN_TAG(rt_ifp, &vid);
		VLAN_PCP(rt_ifp, &pcp);
	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
	else
		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);

	rw_rlock(&sc->policy_lock);
	settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL,
	    EVL_MAKETAG(vid, pcp, 0), inp);
	rw_runlock(&sc->policy_lock);
	if (!settings.offload)
		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);

	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
		txqid = settings.txq;
	else
		txqid = arc4random() % vi->nofldtxq;
	txqid += vi->first_ofld_txq;
	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
		rxqid = settings.rxq;
	else
		rxqid = arc4random() % vi->nofldrxq;
	rxqid += vi->first_ofld_rxq;

	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
	if (toep == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->tid = alloc_atid(sc, toep);
	if (toep->tid < 0)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
	if (toep->l2te == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	isipv6 = nam->sa_family == AF_INET6;
	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
	if (wr == NULL)
		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);

	toep->vnet = so->so_vnet;
	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
	SOCKBUF_LOCK(&so->so_rcv);
	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
	SOCKBUF_UNLOCK(&so->so_rcv);

	/*
	 * The kernel sets request_r_scale based on sb_max whereas we need to
	 * take hardware's MAX_RCV_WND into account too.  This is normally a
	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
	 */
	if (tp->t_flags & TF_REQ_SCALE)
		rscale = tp->request_r_scale = select_rcv_wscale();
	else
		rscale = 0;
	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
	qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
	    V_TID_COOKIE(CPL_COOKIE_TOM);

	if (isipv6) {
		struct cpl_act_open_req6 *cpl = wrtod(wr);
		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;

		if ((inp->inp_vflag & INP_IPV6) == 0)
			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);

		toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL);
		if (toep->ce == NULL)
			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);

		switch (chip_id(sc)) {
		case CHELSIO_T4:
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T5:
			INIT_TP_WR(cpl5, 0);
			cpl5->iss = htobe32(tp->iss);
			cpl5->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T6:
		default:
			INIT_TP_WR(cpl6, 0);
			cpl6->iss = htobe32(tp->iss);
			cpl6->params = select_ntuple(vi, toep->l2te);
			break;
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
		    qid_atid));
		cpl->local_port = inp->inp_lport;
		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
		cpl->peer_port = inp->inp_fport;
		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode, &settings);
		cpl->opt2 = calc_opt2a(so, toep, &settings);
	} else {
		struct cpl_act_open_req *cpl = wrtod(wr);
		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;

		switch (chip_id(sc)) {
		case CHELSIO_T4:
			INIT_TP_WR(cpl, 0);
			cpl->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T5:
			INIT_TP_WR(cpl5, 0);
			cpl5->iss = htobe32(tp->iss);
			cpl5->params = select_ntuple(vi, toep->l2te);
			break;
		case CHELSIO_T6:
		default:
			INIT_TP_WR(cpl6, 0);
			cpl6->iss = htobe32(tp->iss);
			cpl6->params = select_ntuple(vi, toep->l2te);
			break;
		}
		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
		    qid_atid));
		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
		    &cpl->peer_ip, &cpl->peer_port);
		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
		    toep->rx_credits, toep->ulp_mode, &settings);
		cpl->opt2 = calc_opt2a(so, toep, &settings);
	}

	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
	    toep->tid, tcpstates[tp->t_state], toep, inp);

	offload_socket(so, toep);
	rc = t4_l2t_send(sc, wr, toep->l2te);
	if (rc == 0) {
		toep->flags |= TPF_CPL_PENDING;
		return (0);
	}

	undo_offload_socket(so);
	reason = __LINE__;
failed:
	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);

	if (wr)
		free_wrqe(wr);

	if (toep) {
		if (toep->tid >= 0)
			free_atid(sc, toep->tid);
		if (toep->l2te)
			t4_l2t_release(toep->l2te);
		if (toep->ce)
			t4_release_lip(sc, toep->ce);
		free_toepcb(toep);
	}

	return (rc);
}