Ejemplo n.º 1
0
/*
 * An interrupt thread is ending a time slice, so compute the interval it
 * ran for and update the statistic for its PIL.
 */
void
cpu_intr_swtch_enter(kthread_id_t t)
{
	uint64_t	interval;
	uint64_t	start;
	cpu_t		*cpu;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	/*
	 * We could be here with a zero timestamp. This could happen if:
	 * an interrupt thread which no longer has a pinned thread underneath
	 * it (i.e. it blocked at some point in its past) has finished running
	 * its handler. intr_thread() updated the interrupt statistic for its
	 * PIL and zeroed its timestamp. Since there was no pinned thread to
	 * return to, swtch() gets called and we end up here.
	 *
	 * Note that we use atomic ops below (cas64 and atomic_add_64), which
	 * we don't use in the functions above, because we're not called
	 * with interrupts blocked, but the epilog/prolog functions are.
	 */
	if (t->t_intr_start) {
		do {
			start = t->t_intr_start;
			interval = tsc_read() - start;
		} while (cas64(&t->t_intr_start, start, 0) != start);
		cpu = CPU;
		cpu->cpu_m.intrstat[t->t_pil][0] += interval;

		atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
		    interval);
	} else
		ASSERT(t->t_intr == NULL);
}
Ejemplo n.º 2
0
inline void sync_add_and_fetch(PtrT *ptr, const ValueT& val)
{
#if ELEVELDB_IS_SOLARIS
    atomic_add_64(ptr, val);
#else
    __sync_add_and_fetch(ptr, val);
#endif
}
Ejemplo n.º 3
0
void
syscall_mstate(int fromms, int toms)
{
	kthread_t *t = curthread;
	zone_t *z = ttozone(t);
	struct mstate *ms;
	hrtime_t *mstimep;
	hrtime_t curtime;
	klwp_t *lwp;
	hrtime_t newtime;
	cpu_t *cpu;
	uint16_t gen;

	if ((lwp = ttolwp(t)) == NULL)
		return;

	ASSERT(fromms < NMSTATES);
	ASSERT(toms < NMSTATES);

	ms = &lwp->lwp_mstate;
	mstimep = &ms->ms_acct[fromms];
	curtime = gethrtime_unscaled();
	newtime = curtime - ms->ms_state_start;
	while (newtime < 0) {
		curtime = gethrtime_unscaled();
		newtime = curtime - ms->ms_state_start;
	}
	*mstimep += newtime;
	if (fromms == LMS_USER)
		atomic_add_64(&z->zone_utime, newtime);
	else if (fromms == LMS_SYSTEM)
		atomic_add_64(&z->zone_stime, newtime);
	t->t_mstate = toms;
	ms->ms_state_start = curtime;
	ms->ms_prev = fromms;
	kpreempt_disable(); /* don't change CPU while changing CPU's state */
	cpu = CPU;
	ASSERT(cpu == t->t_cpu);
	if ((toms != LMS_USER) && (cpu->cpu_mstate != CMS_SYSTEM)) {
		NEW_CPU_MSTATE(CMS_SYSTEM);
	} else if ((toms == LMS_USER) && (cpu->cpu_mstate != CMS_USER)) {
		NEW_CPU_MSTATE(CMS_USER);
	}
	kpreempt_enable();
}
Ejemplo n.º 4
0
void
m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
{
	int i;

	if (m == NULL)
		return;

	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0)
		return;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
	    (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
	    (m->m_pkthdr.pkt_flags & PKTF_DRV_TS_VALID) == 0) {
		struct timespec now;

		nanouptime(&now);
		net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
	}
#endif /* (DEBUG || DEVELOPMENT) */

	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
		mbuf_tx_compl_func callback;

		if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0)
			continue;

		lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
		callback = mbuf_tx_compl_table[i];
		lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);

		if (callback != NULL) {
			callback(m->m_pkthdr.pkt_compl_context,
			    ifp, m->m_pkthdr.pkt_timestamp,
			    m->m_pkthdr.drv_tx_compl_arg,
			    m->m_pkthdr.drv_tx_compl_data,
			    m->m_pkthdr.drv_tx_status);
		}
	}
	m->m_pkthdr.pkt_compl_callbacks = 0;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0) {
		OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
		if (ifp == NULL)
			atomic_add_64(&mbuf_tx_compl_aborted, 1);
	}
#endif /* (DEBUG || DEVELOPMENT) */
}
Ejemplo n.º 5
0
static void
apix_dispatch_pending_autovect(uint_t ipl)
{
	uint32_t cpuid = psm_get_cpu_id();
	apix_impl_t *apixp = apixs[cpuid];
	struct autovec *av;

	while ((av = apix_remove_pending_av(apixp, ipl)) != NULL) {
		uint_t r;
		uint_t (*intr)() = av->av_vector;
		caddr_t arg1 = av->av_intarg1;
		caddr_t arg2 = av->av_intarg2;
		dev_info_t *dip = av->av_dip;
		uchar_t vector = av->av_flags & AV_PENTRY_VECTMASK;

		if (intr == NULL)
			continue;

		/* Don't enable interrupts during x-calls */
		if (ipl != XC_HI_PIL)
			sti();

		DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
		    void *, intr, caddr_t, arg1, caddr_t, arg2);
		r = (*intr)(arg1, arg2);
		DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
		    void *, intr, caddr_t, arg1, uint_t, r);

		if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
			atomic_add_64(av->av_ticksp, intr_get_time());

		cli();

		if (vector) {
			if ((av->av_flags & AV_PENTRY_PEND) == 0)
				av->av_flags &= ~AV_PENTRY_VECTMASK;

			apix_post_hardint(vector);
		}

		/* mark it as idle */
		av->av_flags &= ~AV_PENTRY_ONPROC;
	}
}
Ejemplo n.º 6
0
static void *
osif_malloc(sa_size_t size)
{
#ifdef IN_KERNEL
	void *tr;
	kern_return_t kr;
	
	kr = kernel_memory_allocate(kernel_map, &tr, size, 0, 0);
	
	if (kr == KERN_SUCCESS) {
		atomic_add_64(&bmalloc_allocated_total, size);
		return (tr);
	} else {
		return (NULL);
	}
#else
	return ((void*)malloc(size));
#endif /* IN_KERNEL */
}
Ejemplo n.º 7
0
/*
 * dcopy_cmd_post()
 */
int
dcopy_cmd_post(dcopy_cmd_t cmd)
{
	dcopy_handle_t channel;
	int e;


	channel = cmd->dp_private->pr_channel;

	atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64);
	if (cmd->dp_cmd == DCOPY_CMD_COPY) {
		atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64,
		    cmd->dp.copy.cc_size);
	}
	e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd);
	if (e != DCOPY_SUCCESS) {
		return (e);
	}

	return (DCOPY_SUCCESS);
}
Ejemplo n.º 8
0
/*
 * An interrupt thread is ending a time slice, so compute the interval it
 * ran for and update the statistic for its PIL.
 */
void
cpu_intr_swtch_enter(kthread_id_t t)
{
	uint64_t	interval;
	uint64_t	start;
	cpu_t		*cpu;

	ASSERT((t->t_flag & T_INTR_THREAD) != 0);
	ASSERT(t->t_pil > 0 && t->t_pil <= LOCK_LEVEL);

	/*
	 * We could be here with a zero timestamp. This could happen if:
	 * an interrupt thread which no longer has a pinned thread underneath
	 * it (i.e. it blocked at some point in its past) has finished running
	 * its handler. intr_thread() updated the interrupt statistic for its
	 * PIL and zeroed its timestamp. Since there was no pinned thread to
	 * return to, swtch() gets called and we end up here.
	 *
	 * It can also happen if an interrupt thread in intr_thread() calls
	 * preempt. It will have already taken care of updating stats. In
	 * this event, the interrupt thread will be runnable.
	 */
	if (t->t_intr_start) {
		do {
			start = t->t_intr_start;
			interval = CLOCK_TICK_COUNTER() - start;
		} while (atomic_cas_64(&t->t_intr_start, start, 0) != start);
		cpu = CPU;
		if (cpu->cpu_m.divisor > 1)
			interval *= cpu->cpu_m.divisor;
		cpu->cpu_m.intrstat[t->t_pil][0] += interval;

		atomic_add_64((uint64_t *)&cpu->cpu_intracct[cpu->cpu_mstate],
		    interval);
	} else
		ASSERT(t->t_intr == NULL || t->t_state == TS_RUN);
}
Ejemplo n.º 9
0
/* ARGSUSED */
int
tswtcl_process(mblk_t **mpp, tswtcl_data_t *tswtcl_data,
    ipp_action_id_t *next_action)
{
	ipha_t *ipha;
	hrtime_t now;
	ip6_t *ip6_hdr;
	uint32_t pkt_len;
	mblk_t *mp = *mpp;
	hrtime_t deltaT;
	uint64_t bitsinwin;
	uint32_t min = 0, additive, rnd;
	tswtcl_cfg_t *cfg_parms = tswtcl_data->cfg_parms;

	if (mp == NULL) {
		tswtcl0dbg(("tswtcl_process: null mp!\n"));
		atomic_add_64(&tswtcl_data->epackets, 1);
		return (EINVAL);
	}

	if (mp->b_datap->db_type != M_DATA) {
		if ((mp->b_cont != NULL) &&
		    (mp->b_cont->b_datap->db_type == M_DATA)) {
			mp = mp->b_cont;
		} else {
			tswtcl0dbg(("tswtcl_process: no data\n"));
			atomic_add_64(&tswtcl_data->epackets, 1);
			return (EINVAL);
		}
	}

	/* Figure out the ToS/Traffic Class and length from the message */
	if ((mp->b_wptr - mp->b_rptr) < IP_SIMPLE_HDR_LENGTH) {
		if (!pullupmsg(mp, IP_SIMPLE_HDR_LENGTH)) {
			tswtcl0dbg(("tswtcl_process: pullup error\n"));
			atomic_add_64(&tswtcl_data->epackets, 1);
			return (EINVAL);
		}
	}
	ipha = (ipha_t *)mp->b_rptr;
	if (IPH_HDR_VERSION(ipha) == IPV4_VERSION) {
		pkt_len = ntohs(ipha->ipha_length);
	} else {
		ip6_hdr = (ip6_t *)mp->b_rptr;
		pkt_len = ntohs(ip6_hdr->ip6_plen) +
		    ip_hdr_length_v6(mp, ip6_hdr);
	}

	/* Convert into bits */
	pkt_len <<= 3;

	/* Get current time */
	now = gethrtime();

	/* Update the avg_rate and win_front tswtcl_data */
	mutex_enter(&tswtcl_data->tswtcl_lock);

	/* avg_rate = bits/sec and window in msec */
	bitsinwin = ((uint64_t)tswtcl_data->avg_rate * cfg_parms->window /
	    1000) + pkt_len;

	deltaT = now - tswtcl_data->win_front + cfg_parms->nsecwindow;

	tswtcl_data->avg_rate = (uint64_t)bitsinwin * METER_SEC_TO_NSEC /
	    deltaT;
	tswtcl_data->win_front = now;

	if (tswtcl_data->avg_rate <= cfg_parms->committed_rate) {
		*next_action = cfg_parms->green_action;
	} else if (tswtcl_data->avg_rate <= cfg_parms->peak_rate) {
		/*
		 * Compute the probability:
		 *
		 * p0 = (avg_rate - committed_rate) / avg_rate
		 *
		 * Yellow with probability p0
		 * Green with probability (1 - p0)
		 *
		 */
		uint32_t aminusc;

		/* Get a random no. betweeen 0 and avg_rate */
		(void) random_get_pseudo_bytes((uint8_t *)&additive,
		    sizeof (additive));
		rnd = min + (additive % (tswtcl_data->avg_rate - min + 1));

		aminusc = tswtcl_data->avg_rate - cfg_parms->committed_rate;
		if (aminusc >= rnd) {
			*next_action = cfg_parms->yellow_action;
		} else {
			*next_action = cfg_parms->green_action;
		}
	} else {
		/*
		 * Compute the probability:
		 *
		 * p1 = (avg_rate - peak_rate) / avg_rate
		 * p2 = (peak_rate - committed_rate) / avg_rate
		 *
		 * Red with probability p1
		 * Yellow with probability p2
		 * Green with probability (1 - (p1 + p2))
		 *
		 */
		uint32_t  aminusp;

		/* Get a random no. betweeen 0 and avg_rate */
		(void) random_get_pseudo_bytes((uint8_t *)&additive,
		    sizeof (additive));
		rnd = min + (additive % (tswtcl_data->avg_rate - min + 1));

		aminusp = tswtcl_data->avg_rate - cfg_parms->peak_rate;

		if (aminusp >= rnd) {
			*next_action = cfg_parms->red_action;
		} else if ((cfg_parms->pminusc + aminusp) >= rnd) {
			*next_action = cfg_parms->yellow_action;
		} else {
			*next_action = cfg_parms->green_action;
		}

	}
	mutex_exit(&tswtcl_data->tswtcl_lock);

	/* Update Stats */
	if (*next_action == cfg_parms->green_action) {
		atomic_add_64(&tswtcl_data->green_packets, 1);
		atomic_add_64(&tswtcl_data->green_bits, pkt_len);
	} else if (*next_action == cfg_parms->yellow_action) {
		atomic_add_64(&tswtcl_data->yellow_packets, 1);
		atomic_add_64(&tswtcl_data->yellow_bits, pkt_len);
	} else {
		ASSERT(*next_action == cfg_parms->red_action);
		atomic_add_64(&tswtcl_data->red_packets, 1);
		atomic_add_64(&tswtcl_data->red_bits, pkt_len);
	}
	return (0);
}
Ejemplo n.º 10
0
static void
splat_atomic_work(void *priv)
{
	atomic_priv_t *ap;
	atomic_op_t op;
	int i;

	ap = (atomic_priv_t *)priv;
	ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC);

	spin_lock(&ap->ap_lock);
	op = ap->ap_op;
	wake_up(&ap->ap_waitq);
	spin_unlock(&ap->ap_lock);

        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
	             "Thread %d successfully started: %lu/%lu\n", op,
		     (long unsigned)ap->ap_atomic,
		     (long unsigned)ap->ap_atomic_exited);

	for (i = 0; i < SPLAT_ATOMIC_INIT_VALUE / 10; i++) {

		/* Periodically sleep to mix up the ordering */
		if ((i % (SPLAT_ATOMIC_INIT_VALUE / 100)) == 0) {
		        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
			     "Thread %d sleeping: %lu/%lu\n", op,
			     (long unsigned)ap->ap_atomic,
			     (long unsigned)ap->ap_atomic_exited);
		        set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(HZ / 100);
		}

		switch (op) {
			case SPLAT_ATOMIC_INC_64:
				atomic_inc_64(&ap->ap_atomic);
				break;
			case SPLAT_ATOMIC_DEC_64:
				atomic_dec_64(&ap->ap_atomic);
				break;
			case SPLAT_ATOMIC_ADD_64:
				atomic_add_64(&ap->ap_atomic, 3);
				break;
			case SPLAT_ATOMIC_SUB_64:
				atomic_sub_64(&ap->ap_atomic, 3);
				break;
			case SPLAT_ATOMIC_ADD_64_NV:
				atomic_add_64_nv(&ap->ap_atomic, 5);
				break;
			case SPLAT_ATOMIC_SUB_64_NV:
				atomic_sub_64_nv(&ap->ap_atomic, 5);
				break;
			default:
				PANIC("Undefined op %d\n", op);
		}
	}

	atomic_inc_64(&ap->ap_atomic_exited);

        splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
	             "Thread %d successfully exited: %lu/%lu\n", op,
		     (long unsigned)ap->ap_atomic,
		     (long unsigned)ap->ap_atomic_exited);

	wake_up(&ap->ap_waitq);
	thread_exit();
}
Ejemplo n.º 11
0
Archivo: if_pflog.c Proyecto: argp/xnu
int
pflog_packet(struct pfi_kif *kif, pbuf_t *pbuf, sa_family_t af, u_int8_t dir,
    u_int8_t reason, struct pf_rule *rm, struct pf_rule *am,
    struct pf_ruleset *ruleset, struct pf_pdesc *pd)
{
#if NBPFILTER > 0
	struct ifnet *ifn;
	struct pfloghdr hdr;
	struct mbuf *m;

	LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED);

	if (kif == NULL || !pbuf_is_valid(pbuf) || rm == NULL || pd == NULL)
		return (-1);

	if (rm->logif >= PFLOGIFS_MAX ||
	    (ifn = pflogifs[rm->logif]) == NULL || !ifn->if_bpf) {
		return (0);
	}

	if ((m = pbuf_to_mbuf(pbuf, FALSE)) == NULL)
		return (0);

	bzero(&hdr, sizeof (hdr));
	hdr.length = PFLOG_REAL_HDRLEN;
	hdr.af = af;
	hdr.action = rm->action;
	hdr.reason = reason;
	memcpy(hdr.ifname, kif->pfik_name, sizeof (hdr.ifname));

	if (am == NULL) {
		hdr.rulenr = htonl(rm->nr);
		hdr.subrulenr = -1;
	} else {
		hdr.rulenr = htonl(am->nr);
		hdr.subrulenr = htonl(rm->nr);
		if (ruleset != NULL && ruleset->anchor != NULL)
			strlcpy(hdr.ruleset, ruleset->anchor->name,
			    sizeof (hdr.ruleset));
	}
	if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done)
		pd->lookup.done = pf_socket_lookup(dir, pd);
	if (pd->lookup.done > 0) {
		hdr.uid = pd->lookup.uid;
		hdr.pid = pd->lookup.pid;
	} else {
		hdr.uid = UID_MAX;
		hdr.pid = NO_PID;
	}
	hdr.rule_uid = rm->cuid;
	hdr.rule_pid = rm->cpid;
	hdr.dir = dir;

#if INET
	if (af == AF_INET && dir == PF_OUT) {
		struct ip *ip;

		ip = mtod(m, struct ip *);
		ip->ip_sum = 0;
		ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
	}
#endif /* INET */

	atomic_add_64(&ifn->if_opackets, 1);
	atomic_add_64(&ifn->if_obytes, m->m_pkthdr.len);

	switch (dir) {
	case PF_IN:
		bpf_tap_in(ifn, DLT_PFLOG, m, &hdr, PFLOG_HDRLEN);
		break;

	case PF_OUT:
		bpf_tap_out(ifn, DLT_PFLOG, m, &hdr, PFLOG_HDRLEN);
		break;

	default:
		break;
	}
#endif /* NBPFILTER > 0 */
	return (0);
}
Ejemplo n.º 12
0
void
atomic_dec_64(volatile uint64_t *addr)
{

	atomic_add_64(addr, -1);
}
Ejemplo n.º 13
0
void
atomic_inc_64(volatile uint64_t *addr)
{

	atomic_add_64(addr, 1);
}
Ejemplo n.º 14
0
/*
 * This is the common dispatch function for SMB2, used for both
 * synchronous and asynchronous requests.  In the async case,
 * this runs twice: once for the initial processing where the
 * initial handler returns NT_STATUS_PENDING, and then a second
 * time (with async_func != NULL) for the "real work".
 * Note the async_func == NULL for "normal" calls, and the
 * handler function is taken from the dispatch table.
 */
static int
smb2sr_dispatch(smb_request_t *sr,
	smb_sdrc_t	(*async_func)(smb_request_t *))
{
	const smb_disp_entry_t	*sdd;
	smb_disp_stats_t	*sds;
	smb_session_t		*session;
	smb_server_t		*server;
	boolean_t		related;
	int			rc = 0;

	session = sr->session;
	server = session->s_server;

	/*
	 * Validate the commmand code, get dispatch table entries.
	 * [MS-SMB2] 3.3.5.2.6 Handling Incorrectly Formatted...
	 *
	 * The last slot in the dispatch table is used to handle
	 * invalid commands.  Same for statistics.
	 */
	if (sr->smb2_cmd_code < SMB2_INVALID_CMD) {
		sdd = &smb2_disp_table[sr->smb2_cmd_code];
		sds = &server->sv_disp_stats2[sr->smb2_cmd_code];
	} else {
		sdd = &smb2_disp_table[SMB2_INVALID_CMD];
		sds = &server->sv_disp_stats2[SMB2_INVALID_CMD];
	}

	if (sr->smb2_hdr_flags & SMB2_FLAGS_SERVER_TO_REDIR) {
		smb2sr_put_error(sr, NT_STATUS_INVALID_PARAMETER);
		goto done;
	}

	/*
	 * If this command is NOT "related" to the previous,
	 * clear out the UID, TID, FID state that might be
	 * left over from the previous command.
	 *
	 * Also, if the command IS related, but is declining to
	 * inherit the previous UID or TID, then clear out the
	 * previous session or tree now.  This simplifies the
	 * inheritance logic below.  Similar logic for FIDs
	 * happens in smb2sr_lookup_fid()
	 */
	related = (sr->smb2_hdr_flags & SMB2_FLAGS_RELATED_OPERATIONS);
	if (!related &&
	    sr->fid_ofile != NULL) {
		smb_ofile_request_complete(sr->fid_ofile);
		smb_ofile_release(sr->fid_ofile);
		sr->fid_ofile = NULL;
	}
	if ((!related || sr->smb_tid != INHERIT_ID) &&
	    sr->tid_tree != NULL) {
		smb_tree_release(sr->tid_tree);
		sr->tid_tree = NULL;
	}
	if ((!related || sr->smb_uid != INHERIT_ID) &&
	    sr->uid_user != NULL) {
		smb_user_release(sr->uid_user);
		sr->uid_user = NULL;
	}

	/*
	 * Make sure we have a user and tree as needed
	 * according to the flags for the this command.
	 * In a compound, a "related" command may inherit
	 * the UID, TID, and FID from previous commands
	 * using the special INHERIT_ID (all ones).
	 */

	if ((sdd->sdt_flags & SDDF_SUPPRESS_UID) == 0) {
		/*
		 * This command requires a user session.
		 */
		if (related && sr->smb_uid == INHERIT_ID &&
		    sr->uid_user != NULL) {
			sr->smb_uid = sr->uid_user->u_uid;
		} else {
			ASSERT3P(sr->uid_user, ==, NULL);
			sr->uid_user = smb_session_lookup_uid(session,
			    sr->smb_uid);
		}
		if (sr->uid_user == NULL) {
			/* [MS-SMB2] 3.3.5.2.9 Verifying the Session */
			smb2sr_put_error(sr, NT_STATUS_USER_SESSION_DELETED);
			goto done;
		}
		sr->user_cr = smb_user_getcred(sr->uid_user);
	}

	if ((sdd->sdt_flags & SDDF_SUPPRESS_TID) == 0) {
		/*
		 * This command requires a tree connection.
		 */
		if (related && sr->smb_tid == INHERIT_ID &&
		    sr->tid_tree != NULL) {
			sr->smb_tid = sr->tid_tree->t_tid;
		} else {
			ASSERT3P(sr->tid_tree, ==, NULL);
			sr->tid_tree = smb_session_lookup_tree(session,
			    sr->smb_tid);
		}
		if (sr->tid_tree == NULL) {
			/* [MS-SMB2] 3.3.5.2.11 Verifying the Tree Connect */
			smb2sr_put_error(sr, NT_STATUS_NETWORK_NAME_DELETED);
			goto done;
		}
	}

	/*
	 * The real work: call the SMB2 command handler.
	 */
	sr->sr_time_start = gethrtime();
	if (async_func != NULL) {
		rc = (*async_func)(sr);
	} else {
		/* NB: not using pre_op */
		rc = (*sdd->sdt_function)(sr);
		/* NB: not using post_op */
	}

	MBC_FLUSH(&sr->raw_data);

done:
	/*
	 * Pad the reply to align(8) if necessary.
	 */
	if (sr->reply.chain_offset & 7) {
		int padsz = 8 - (sr->reply.chain_offset & 7);
		(void) smb_mbc_encodef(&sr->reply, "#.", padsz);
	}
	ASSERT((sr->reply.chain_offset & 7) == 0);

	/*
	 * Record some statistics: latency, rx bytes, tx bytes
	 */
	smb_latency_add_sample(&sds->sdt_lat,
	    gethrtime() - sr->sr_time_start);
	atomic_add_64(&sds->sdt_rxb,
	    (int64_t)(sr->command.chain_offset - sr->smb2_cmd_hdr));
	atomic_add_64(&sds->sdt_txb,
	    (int64_t)(sr->reply.chain_offset - sr->smb2_reply_hdr));

	return (rc);
}
Ejemplo n.º 15
0
/*ARGSUSED*/
static void
fipe_idle_enter(void *arg, cpu_idle_callback_context_t ctx,
    cpu_idle_check_wakeup_t check_func, void* check_arg)
{
	hrtime_t ts;
	uint32_t cnt;
	uint64_t iowait;
	cpu_t *cp = CPU;
	struct fipe_cpu_state *sp;

	sp = &fipe_cpu_states[cp->cpu_id];
	ts = cpu_idle_prop_get_hrtime(fipe_idle_ctrl.prop_enter, ctx);

	if (fipe_pm_policy != FIPE_PM_POLICY_DISABLE &&
	    fipe_ioat_ctrl.ioat_ready &&
	    sp->state_ready && sp->throttle_ts <= ts) {
		/* Adjust iowait count for local CPU. */
		iowait = CPU_STATS(cp, sys.iowait);
		if (iowait != sp->last_iowait) {
			atomic_add_64(&fipe_gbl_ctrl.io_waiters,
			    iowait - sp->last_iowait);
			sp->last_iowait = iowait;
		}

		/* Check current CPU status. */
		if (fipe_check_cpu(sp, ctx, ts)) {
			/* Increase count of CPU ready for power saving. */
			do {
				cnt = fipe_gbl_ctrl.cpu_count;
				ASSERT(cnt < ncpus);
			} while (atomic_cas_32(&fipe_gbl_ctrl.cpu_count,
			    cnt, cnt + 1) != cnt);

			/*
			 * Enable power saving if all CPUs are idle.
			 */
			if (cnt + 1 == ncpus) {
				if (fipe_gbl_ctrl.io_waiters == 0) {
					fipe_gbl_ctrl.enter_ts = ts;
					fipe_enable(fipe_pm_throttle_level,
					    check_func, check_arg);
				/* There are ongoing block io operations. */
				} else {
					FIPE_KSTAT_DETAIL_INC(bio_busy_cnt);
				}
			}
		}
	} else if (fipe_pm_policy == FIPE_PM_POLICY_DISABLE ||
	    fipe_ioat_ctrl.ioat_ready == B_FALSE) {
		if (sp->cond_ready == B_TRUE) {
			sp->cond_ready = B_FALSE;
		}
	} else if (sp->state_ready == B_FALSE) {
		sp->cond_ready = B_FALSE;
		sp->state_ready = B_TRUE;
		sp->throttle_ts = 0;
		sp->next_ts = ts + fipe_idle_ctrl.tick_interval;
		sp->last_busy = cpu_idle_prop_get_hrtime(
		    fipe_idle_ctrl.prop_busy, ctx);
		sp->last_idle = cpu_idle_prop_get_hrtime(
		    fipe_idle_ctrl.prop_idle, ctx);
		sp->last_intr = cpu_idle_prop_get_hrtime(
		    fipe_idle_ctrl.prop_intr, ctx);
		sp->idle_count = 0;
	}
}
Ejemplo n.º 16
0
/* ARGSUSED */
static int
ipgpc_invoke_action(ipp_action_id_t aid, ipp_packet_t *packet)
{
	ipgpc_class_t *out_class;
	hrtime_t start, end;
	mblk_t *mp = NULL;
	ip_priv_t *priv = NULL;
	ill_t *ill = NULL;
	ipha_t *ipha;
	ip_proc_t callout_pos;
	int af;
	int rc;
	ipgpc_packet_t pkt;
	uint_t ill_idx;

	/* extract packet data */
	mp = ipp_packet_get_data(packet);
	ASSERT(mp != NULL);

	priv = (ip_priv_t *)ipp_packet_get_private(packet);
	ASSERT(priv != NULL);

	callout_pos = priv->proc;
	ill_idx = priv->ill_index;

	/* If we don't get an M_DATA, then return an error */
	if (mp->b_datap->db_type != M_DATA) {
		if ((mp->b_cont != NULL) &&
		    (mp->b_cont->b_datap->db_type == M_DATA)) {
			mp = mp->b_cont; /* jump over the M_CTL into M_DATA */
		} else {
			ipgpc0dbg(("ipgpc_invoke_action: no data\n"));
			atomic_add_64(&ipgpc_epackets, 1);
			return (EINVAL);
		}
	}

	/*
	 * Translate the callout_pos into the direction the packet is traveling
	 */
	if (callout_pos != IPP_LOCAL_IN) {
		if (callout_pos & IPP_LOCAL_OUT) {
			callout_pos = IPP_LOCAL_OUT;
		} else if (callout_pos & IPP_FWD_IN) {
			callout_pos = IPP_FWD_IN;
		} else {	/* IPP_FWD_OUT */
			callout_pos = IPP_FWD_OUT;
		}
	}

	/* parse the packet from the message block */
	ipha = (ipha_t *)mp->b_rptr;
	/* Determine IP Header Version */
	if (IPH_HDR_VERSION(ipha) == IPV4_VERSION) {
		parse_packet(&pkt, mp);
		af = AF_INET;
	} else {
		parse_packet6(&pkt, mp);
		af = AF_INET6;
	}

	pkt.direction = callout_pos; /* set packet direction */

	/* The ill_index could be 0 when called from forwarding (read) path */
	if (ill_idx > 0)
		ill = ill_lookup_on_ifindex_global_instance(ill_idx, B_FALSE);

	if (ill != NULL) {
		/*
		 * Since all IPP actions in an IPMP group are performed
		 * relative to the IPMP group interface, if this is an
		 * underlying interface in an IPMP group, use the IPMP
		 * group interface's index.
		 */
		if (IS_UNDER_IPMP(ill))
			pkt.if_index = ipmp_ill_get_ipmp_ifindex(ill);
		else
			pkt.if_index = ill->ill_phyint->phyint_ifindex;
		/* Got the field from the ILL, go ahead and refrele */
		ill_refrele(ill);
	} else {
		/* unknown if_index */
		pkt.if_index = IPGPC_UNSPECIFIED;
	}

	if (ipgpc_debug > 5) {
		/* print pkt under high debug level */
#ifdef	IPGPC_DEBUG
		print_packet(af, &pkt);
#endif
	}
	if (ipgpc_debug > 3) {
		start = gethrtime(); /* start timer */
	}

	/* classify this packet */
	out_class = ipgpc_classify(af, &pkt);

	if (ipgpc_debug > 3) {
		end = gethrtime(); /* stop timer */
	}

	/* ipgpc_classify will only return NULL if a memory error occured */
	if (out_class == NULL) {
		atomic_add_64(&ipgpc_epackets, 1);
		return (ENOMEM);
	}

	ipgpc1dbg(("ipgpc_invoke_action: class = %s", out_class->class_name));
	/* print time to classify(..) */
	ipgpc2dbg(("ipgpc_invoke_action: time = %lld nsec\n", (end - start)));

	if ((rc = ipp_packet_add_class(packet, out_class->class_name,
	    out_class->next_action)) != 0) {
		atomic_add_64(&ipgpc_epackets, 1);
		ipgpc0dbg(("ipgpc_invoke_action: ipp_packet_add_class " \
		    "failed with error %d", rc));
		return (rc);
	}
	return (ipp_packet_next(packet, IPP_ACTION_CONT));
}
Ejemplo n.º 17
0
static uri_desc_t *
http_mkresponse(uri_desc_t *req, uri_desc_t *res, char *proto, int sz)
{
	http_t		*qhttp = req->scheme;
	http_t		*shttp = res->scheme;
	uri_desc_t	*uri = kmem_cache_alloc(nl7c_uri_kmc, KM_SLEEP);
	char		*alloc;
	char		*cp;
	char		*ep = &proto[sz];
	uri_rd_t	*rdp;
	int		cnt;

	char		hdr_etag[] = "ETag: ";

	/* Any optional header(s) */
	if (shttp->etag.cp != NULL) {
		/* Response has an ETag:, count it */
		sz += sizeof (hdr_etag) - 1 +
		    (shttp->etag.ep - shttp->etag.cp) + 2;
	}
	sz += 2;
	alloc = kmem_alloc(sz, KM_SLEEP);

	/* Minimum temp uri initialization as needed by uri_response() */
	REF_INIT(uri, 1, nl7c_uri_inactive, nl7c_uri_kmc);
	uri->hash = URI_TEMP;
	uri->tail = NULL;
	uri->scheme = NULL;
	uri->reqmp = NULL;
	uri->count = 0;
	cv_init(&uri->waiting, NULL, CV_DEFAULT, NULL);
	mutex_init(&uri->proclock, NULL, MUTEX_DEFAULT, NULL);

	URI_RD_ADD(uri, rdp, sz, -1);
	rdp->data.kmem = alloc;
	atomic_add_64(&nl7c_uri_bytes, sz);

	cp = alloc;
	if (qhttp->major == 1) {
		/*
		 * Full response format.
		 *
		 * Copy to first sub char '#'.
		 */
		while (proto < ep) {
			if (*proto == '#')
				break;
			*cp++ = *proto++;
		}

		/* Process the HTTP version substitutions */
		if (*proto != '#') goto bad;
		*cp++ = '0' + qhttp->major;
		proto++;
		while (proto < ep) {
			if (*proto == '#')
				break;
			*cp++ = *proto++;
		}
		if (*proto != '#') goto bad;
		*cp++ = '0' + qhttp->minor;
		proto++;

		/* Copy to the next sub char '#' */
		while (proto < ep) {
			if (*proto == '#')
				break;
			*cp++ = *proto++;
		}

		/* Process the "Date: " substitution */
		if (*proto != '#') goto bad;
		http_today(cp);

		/* Skip to the next nonsub char '#' */
		while (proto < ep) {
			if (*proto != '#')
				break;
			cp++;
			proto++;
		}

		/* Copy to the next sub char '#' */
		while (proto < ep) {
			if (*proto == '#')
				break;
			*cp++ = *proto++;
		}

		/* Process the NCA version substitutions */
		if (*proto != '#') goto bad;
		*cp++ = '0' + nca_major_version;
		proto++;
		while (proto < ep) {
			if (*proto == '#')
				break;
			*cp++ = *proto++;
		}
		if (*proto != '#') goto bad;
		*cp++ = '0' + nca_minor_version;
		proto++;

		/* Copy remainder of HTTP header */
		while (proto < ep) {
			*cp++ = *proto++;
		}
	} else {
		goto bad;
	}
	/* Any optional header(s) */
	if (shttp->etag.cp != NULL) {
		/* Response has an ETag:, add it */
		cnt = sizeof (hdr_etag) - 1;
		bcopy(hdr_etag, cp, cnt);
		cp += cnt;
		cnt = (shttp->etag.ep - shttp->etag.cp);
		bcopy(shttp->etag.cp, cp, cnt);
		cp += cnt;
		*cp++ = '\r';
		*cp++ = '\n';
	}
	/* Last, add empty line */
	uri->eoh = cp;
	*cp++ = '\r';
	*cp = '\n';

	return (uri);

bad:
	/*
	 * Free any resources allocated here, note that while we could
	 * use the uri_inactive() to free the uri by doing a REF_RELE()
	 * we instead free it here as the URI may be in less then a fully
	 * initialized state.
	 */
	kmem_free(alloc, sz);
	kmem_cache_free(nl7c_uri_kmc, uri);
	return (NULL);
}