Esempio n. 1
0
/*
 * dm2s_start - Start transmission function.
 *
 * Send all queued messages. If the mailbox is busy, then
 * start a timeout as a polling mechanism. The timeout is useful
 * to not rely entirely on the SCF_MB_SPACE event.
 */
void
dm2s_start(queue_t *wq, dm2s_t *dm2sp)
{
	mblk_t *mp;
	int ret;

	DPRINTF(DBG_DRV, ("dm2s_start: called\n"));
	ASSERT(dm2sp != NULL);
	ASSERT(MUTEX_HELD(&dm2sp->ms_lock));

	while ((mp = getq(wq)) != NULL) {
		switch (mp->b_datap->db_type) {

		case M_DATA:
			ret = dm2s_transmit(wq, mp, dm2sp->ms_target,
			    dm2sp->ms_key);
			if (ret == EBUSY || ret == ENOSPC || ret == EAGAIN) {
				DPRINTF(DBG_MBOX,
				    ("dm2s_start: recoverable err=%d\n", ret));
				/*
				 * Start a timeout to retry again.
				 */
				if (dm2sp->ms_wq_timeoutid == 0) {
					DTRACE_PROBE1(dm2s_wqtimeout__start,
					    dm2s_t, dm2sp);
					dm2sp->ms_wq_timeoutid = qtimeout(wq,
					    dm2s_wq_timeout, (void *)dm2sp,
					    dm2s_timeout_val(ret));
				}
				return;
			} else if (ret != 0) {
				mutex_exit(&dm2sp->ms_lock);
				/*
				 * An error occurred with the transmission,
				 * flush pending messages and initiate a
				 * hangup.
				 */
				flushq(wq, FLUSHDATA);
				(void) putnextctl(RD(wq), M_HANGUP);
				DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
				DPRINTF(DBG_WARN,
				    ("dm2s_start: hangup transmit err=%d\n",
				    ret));
				mutex_enter(&dm2sp->ms_lock);
			}
			break;
		default:
			/*
			 * At this point, we don't expect any other messages.
			 */
			freemsg(mp);
			break;
		}
	}
}
Esempio n. 2
0
/* ARGSUSED */
int
dm2s_close(queue_t *rq, int flag, cred_t *cred)
{
	dm2s_t *dm2sp = (dm2s_t *)rq->q_ptr;

	DPRINTF(DBG_DRV, ("dm2s_close: called\n"));
	if (dm2sp == NULL) {
		/* Already closed once */
		return (ENODEV);
	}

	/* Close the lower layer first */
	mutex_enter(&dm2sp->ms_lock);
	(void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key, MB_FLUSH_ALL);
	dm2s_mbox_fini(dm2sp);
	mutex_exit(&dm2sp->ms_lock);

	/*
	 * Now we can assume that no asynchronous callbacks exist.
	 * Poison the stream head so that we can't be pushed again.
	 */
	(void) putnextctl(rq, M_HANGUP);
	qprocsoff(rq);
	if (dm2sp->ms_rbufcid != 0) {
		qunbufcall(rq, dm2sp->ms_rbufcid);
		dm2sp->ms_rbufcid = 0;
	}
	if (dm2sp->ms_rq_timeoutid != 0) {
		DTRACE_PROBE1(dm2s_rqtimeout__cancel, dm2s_t, dm2sp);
		(void) quntimeout(dm2sp->ms_rq, dm2sp->ms_rq_timeoutid);
		dm2sp->ms_rq_timeoutid = 0;
	}
	if (dm2sp->ms_wq_timeoutid != 0) {
		DTRACE_PROBE1(dm2s_wqtimeout__cancel, dm2s_t, dm2sp);
		(void) quntimeout(dm2sp->ms_wq, dm2sp->ms_wq_timeoutid);
		dm2sp->ms_wq_timeoutid = 0;
	}
	/*
	 * Now we can really mark it closed.
	 */
	mutex_enter(&dm2sp->ms_lock);
	dm2sp->ms_rq = dm2sp->ms_wq = NULL;
	dm2sp->ms_state &= ~DM2S_OPENED;
	mutex_exit(&dm2sp->ms_lock);

	rq->q_ptr = WR(rq)->q_ptr = NULL;
	(void) qassociate(rq, -1);
	DPRINTF(DBG_DRV, ("dm2s_close: successfully closed\n"));
	return (0);
}
Esempio n. 3
0
/*
 * XDR decode the long reply write chunk.
 */
bool_t
xdr_decode_reply_wchunk(XDR *xdrs, struct clist **clist)
{
	bool_t		have_rchunk = FALSE;
	struct clist	*first = NULL, *ncl = NULL;
	uint32_t	num_wclist;
	uint32_t	i;

	if (!xdr_bool(xdrs, &have_rchunk))
		return (FALSE);

	if (have_rchunk == FALSE)
		return (TRUE);

	if (!xdr_uint32(xdrs, &num_wclist)) {
		DTRACE_PROBE(krpc__e__xdrrdma__replywchunk__listlength);
		return (FALSE);
	}

	if (num_wclist == 0) {
		return (FALSE);
	}

	first = ncl = clist_alloc();

	for (i = 0; i < num_wclist; i++) {

		if (i > 0) {
			ncl->c_next = clist_alloc();
			ncl = ncl->c_next;
		}

		if (!xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
			goto err_out;
		if (!xdr_uint32(xdrs, &ncl->c_len))
			goto err_out;
		if (!xdr_uint64(xdrs, &ncl->u.c_daddr))
			goto err_out;

		if (ncl->c_len > MAX_SVC_XFER_SIZE) {
			DTRACE_PROBE(
			    krpc__e__xdrrdma__replywchunk__chunklist_toobig);
			ncl->c_len = MAX_SVC_XFER_SIZE;
		}
		if (!(ncl->c_dmemhandle.mrc_rmr &&
		    (ncl->c_len > 0) && ncl->u.c_daddr))
			DTRACE_PROBE(
			    krpc__e__xdrrdma__replywchunk__invalid_segaddr);

		DTRACE_PROBE1(krpc__i__xdr_decode_reply_wchunk_c_len,
		    uint32_t, ncl->c_len);

	}
	*clist = first;
	return (TRUE);

err_out:
	clist_free(first);
	return (FALSE);
}
Esempio n. 4
0
static	bool_t
xdrrdma_getint32(XDR *xdrs, int32_t *int32p)
{
	xrdma_private_t	*xdrp = (xrdma_private_t *)(xdrs->x_private);
	int chunked = 0;

	if ((xdrs->x_handy -= (int)sizeof (int32_t)) < 0) {
		/*
		 * check if rest of the rpc message is in a chunk
		 */
		if (!xdrrdma_read_a_chunk(xdrs, &xdrp->xp_conn)) {
			return (FALSE);
		}
		chunked = 1;
	}

	/* LINTED pointer alignment */
	*int32p = (int32_t)ntohl((uint32_t)(*((int32_t *)(xdrp->xp_offp))));

	DTRACE_PROBE1(krpc__i__xdrrdma_getint32, int32_t, *int32p);

	xdrp->xp_offp += sizeof (int32_t);

	if (chunked)
		xdrs->x_handy -= (int)sizeof (int32_t);

	if (xdrp->xp_off != 0) {
		xdrp->xp_off += sizeof (int32_t);
	}

	return (TRUE);
}
Esempio n. 5
0
/*
 * Transition the current processor to the requested state.
 */
static void
pwrnow_pstate_transition(uint32_t req_state)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
	cpu_acpi_pstate_t *req_pstate;
	uint32_t ctrl;

	req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
	req_pstate += req_state;

	DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
	    CPU_ACPI_FREQ(req_pstate));

	/*
	 * Initiate the processor p-state change.
	 */
	ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
	write_ctrl(handle, ctrl);

	if (mach_state->ms_turbo != NULL)
		cpupm_record_turbo_info(mach_state->ms_turbo,
		    mach_state->ms_pstate.cma_state.pstate, req_state);

	mach_state->ms_pstate.cma_state.pstate = req_state;
	cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
}
Esempio n. 6
0
/* 
*   This probe will fire each time a new TCP connection is created 
*
*   arg0 -> Client's IP address 
*/
int apache_accept_connection(conn_rec *c, void *csd)
{
    DTRACE_PROBE1(apache,
                  accept__connection,
                  c);

    return DECLINED;
}
Esempio n. 7
0
/*
*   This probe will fire when the authentication stage is encountered
*
*   arg0 -> The address of the request_rec structure
*
*/
int apache_check_user(request_rec *r)
{
    DTRACE_PROBE1(apache,
                  check__user__credentials,
                  r);

    return DECLINED;
}
Esempio n. 8
0
/*
*   This probe will fire when the access checking stage is encountered
*
*   arg0 -> The address of the request_rec structure
*
*/
int apache_check_access(request_rec *r)
{
    DTRACE_PROBE1(apache,
                  check__access,
                  r);

    return DECLINED;
}
Esempio n. 9
0
/*
*   This probe will fire when the authorization checking stage is encountered
*
*   arg0 -> The address of the request_rec structure
*
*/
int apache_check_authorization(request_rec *r)
{
    DTRACE_PROBE1(apache,
                  check__authorization,
                  r);

    return DECLINED;
}
Esempio n. 10
0
/* 
*   Probe Function Purpoose:
*   This probe will fire each time a request is send to the server. 
*
*   arg0 -> address of the request_rec structure 
*/
int apache_receive_request(request_rec *r)
{
    DTRACE_PROBE1(apache, 
                  receive__request, 
                  r);
   
    return DECLINED;
}
Esempio n. 11
0
/* ARGSUSED */
static size_t
iscsi_net_sendmsg(void *socket, struct msghdr *msg)
{
	ksocket_t ks = (ksocket_t)socket;
	size_t sent = 0;
	int flag = msg->msg_flags;
	(void) ksocket_sendmsg(ks, msg, flag, &sent, CRED());
	DTRACE_PROBE1(ksocket_sendmsg, size_t, sent);
	return (sent);
}
Esempio n. 12
0
/*
 * Write the ctrl register.
 */
static void
write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
{
	cpu_acpi_pct_t *pct_ctrl;
	uint64_t reg;

	pct_ctrl = CPU_ACPI_PCT_CTRL(handle);

	switch (pct_ctrl->cr_addrspace_id) {
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		reg = ctrl;
		wrmsr(PWRNOW_PERF_CTL_MSR, reg);
		break;

	default:
		DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
		    pct_ctrl->cr_addrspace_id);
		return;
	}

	DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
}
Esempio n. 13
0
/*
 * Conditionally decode a RDMA WRITE chunk list from XDR stream.
 *
 * If the next boolean in the XDR stream is false there is no
 * RDMA WRITE chunk list present. Otherwise iterate over the
 * array and for each entry: allocate a struct clist and decode.
 * Pass back an indication via wlist_exists if we have seen a
 * RDMA WRITE chunk list.
 */
bool_t
xdr_decode_wlist(XDR *xdrs, struct clist **w, bool_t *wlist_exists)
{
	struct clist	*tmp;
	bool_t		more = FALSE;
	uint32_t	seg_array_len;
	uint32_t	i;

	if (!xdr_bool(xdrs, &more))
		return (FALSE);

	/* is there a wlist? */
	if (more == FALSE) {
		*wlist_exists = FALSE;
		return (TRUE);
	}
	*wlist_exists = TRUE;

	if (!xdr_uint32(xdrs, &seg_array_len))
		return (FALSE);

	tmp = *w = clist_alloc();
	for (i = 0; i < seg_array_len; i++) {

		if (!xdr_uint32(xdrs, &tmp->c_dmemhandle.mrc_rmr))
			return (FALSE);
		if (!xdr_uint32(xdrs, &tmp->c_len))
			return (FALSE);

		DTRACE_PROBE1(krpc__i__xdr_decode_wlist_len,
		    uint_t, tmp->c_len);

		if (!xdr_uint64(xdrs, &tmp->u.c_daddr))
			return (FALSE);
		if (i < seg_array_len - 1) {
			tmp->c_next = clist_alloc();
			tmp = tmp->c_next;
		} else {
			tmp->c_next = NULL;
		}
	}

	more = FALSE;
	if (!xdr_bool(xdrs, &more))
		return (FALSE);

	return (TRUE);
}
Esempio n. 14
0
void
buz (int parm)
{
 struct astruct
  {
    int a;
    int b;
    int *c;
  };
  struct astruct bstruct = {parm, parm + 1};
  struct astruct *cstruct = &bstruct;
  if (parm == 0)
    parm = 1000;
  if (SDT_MISC_TEST_PROBE_4_ENABLED())
     DTRACE_PROBE1(sdt_misc,test_probe_4,&bstruct);
  SDT_MISC_TEST_PROBE_1(cstruct->c != ((void*)0));
}
Esempio n. 15
0
bool_t
xdr_encode_wlist(XDR *xdrs, clist *w)
{
	bool_t		vfalse = FALSE, vtrue = TRUE;
	int		i;
	uint_t		num_segment = 0;
	struct clist	*cl;

	/* does a wlist exist? */
	if (w == NULL) {
		return (xdr_bool(xdrs, &vfalse));
	}
	/* Encode N consecutive segments, 1, N, HLOO, ..., HLOO, 0 */
	if (!xdr_bool(xdrs, &vtrue))
		return (FALSE);

	for (cl = w; cl != NULL; cl = cl->c_next) {
		num_segment++;
	}

	if (!xdr_uint32(xdrs, &num_segment))
		return (FALSE);
	for (i = 0; i < num_segment; i++) {

		DTRACE_PROBE1(krpc__i__xdr_encode_wlist_len, uint_t, w->c_len);

		if (!xdr_uint32(xdrs, &w->c_dmemhandle.mrc_rmr))
			return (FALSE);

		if (!xdr_uint32(xdrs, &w->c_len))
			return (FALSE);

		if (!xdr_uint64(xdrs, &w->u.c_daddr))
			return (FALSE);

		w = w->c_next;
	}

	if (!xdr_bool(xdrs, &vfalse))
		return (FALSE);

	return (TRUE);
}
Esempio n. 16
0
/*ARGSUSED*/
static void
balloon_handler(struct xenbus_watch *watch, const char **vec, uint_t len)
{
	ulong_t new_target_kb;
	pgcnt_t	new_target_pages;
	int rv;
	static uchar_t warning_cnt = 0;

	rv = xenbus_scanf(0, "memory", "target", "%lu", &new_target_kb);
	if (rv != 0) {
		return;
	}

	/* new_target is in kB - change this to pages */
	new_target_pages = kbtop(new_target_kb);

	DTRACE_PROBE1(balloon__new__target, pgcnt_t, new_target_pages);

	/*
	 * Unfortunately, dom0 may give us a target that is larger than
	 * our max limit.  Re-check the limit, and, if the new target is
	 * too large, adjust it downwards.
	 */
	mutex_enter(&bln_mutex);
	if (new_target_pages > bln_stats.bln_max_pages) {
		DTRACE_PROBE2(balloon__target__too__large, pgcnt_t,
		    new_target_pages, pgcnt_t, bln_stats.bln_max_pages);
		if (!DOMAIN_IS_INITDOMAIN(xen_info) || warning_cnt != 0) {
			cmn_err(CE_WARN, "New balloon target (0x%lx pages) is "
			    "larger than original memory size (0x%lx pages). "
			    "Ballooning beyond original memory size is not "
			    "allowed.",
			    new_target_pages, bln_stats.bln_max_pages);
		}
		warning_cnt = 1;
		bln_stats.bln_new_target = bln_stats.bln_max_pages;
	} else {
		bln_stats.bln_new_target = new_target_pages;
	}

	mutex_exit(&bln_mutex);
	cv_signal(&bln_cv);
}
Esempio n. 17
0
/*
 * dm2s_wsrv - Streams write side service procedure.
 *
 * All messages are transmitted in the service procedure
 * only. This is done to simplify the streams synchronization.
 */
int
dm2s_wsrv(queue_t *wq)
{
	dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr;

	DPRINTF(DBG_DRV, ("dm2s_wsrv: called\n"));
	ASSERT(dm2sp != NULL);
	/* Lets cancel any timeouts waiting to be scheduled. */
	if (dm2sp->ms_wq_timeoutid != 0) {
		DTRACE_PROBE1(dm2s_wqtimeout__cancel, dm2s_t, dm2sp);
		(void) quntimeout(dm2sp->ms_wq, dm2sp->ms_wq_timeoutid);
		dm2sp->ms_wq_timeoutid = 0;
	}
	mutex_enter(&dm2sp->ms_lock);
	dm2s_start(wq, dm2sp);
	mutex_exit(&dm2sp->ms_lock);
	DPRINTF(DBG_DRV, ("dm2s_wsrv: return\n"));
	return (0);
}
Esempio n. 18
0
bool_t
xdr_encode_reply_wchunk(XDR *xdrs,
    struct clist *cl_longreply, uint32_t seg_array_len)
{
	int		i;
	bool_t		long_reply_exists = TRUE;
	uint32_t	length;
	uint64		offset;

	if (seg_array_len > 0) {
		if (!xdr_bool(xdrs, &long_reply_exists))
			return (FALSE);
		if (!xdr_uint32(xdrs, &seg_array_len))
			return (FALSE);

		for (i = 0; i < seg_array_len; i++) {
			if (!cl_longreply)
				return (FALSE);
			length = cl_longreply->c_len;
			offset = (uint64) cl_longreply->u.c_daddr;

			DTRACE_PROBE1(
			    krpc__i__xdr_encode_reply_wchunk_c_len,
			    uint32_t, length);

			if (!xdr_uint32(xdrs,
			    &cl_longreply->c_dmemhandle.mrc_rmr))
				return (FALSE);
			if (!xdr_uint32(xdrs, &length))
				return (FALSE);
			if (!xdr_uint64(xdrs, &offset))
				return (FALSE);
			cl_longreply = cl_longreply->c_next;
		}
	} else {
		long_reply_exists = FALSE;
		if (!xdr_bool(xdrs, &long_reply_exists))
			return (FALSE);
	}
	return (TRUE);
}
Esempio n. 19
0
/* ARGSUSED */
static size_t
iscsi_net_recvmsg(void *socket, struct msghdr *msg, int timeout)
{
	int		prflag	    = msg->msg_flags;
	ksocket_t	ks	    = (ksocket_t)socket;
	size_t 		recv	    = 0;

	/* Set recv timeout */
	if (get_udatamodel() == DATAMODEL_NONE ||
	    get_udatamodel() == DATAMODEL_NATIVE) {
		struct timeval tl;

		tl.tv_sec = timeout;
		tl.tv_usec = 0;
		if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl,
		    sizeof (struct timeval), CRED()))
			return (0);
	} else {
		struct timeval32 tl;

		tl.tv_sec = timeout;
		tl.tv_usec = 0;
		if (ksocket_setsockopt(ks, SOL_SOCKET, SO_RCVTIMEO, &tl,
		    sizeof (struct timeval32), CRED()))
			return (0);
	}
	/*
	 * Receive the requested data.  Block until all
	 * data is received or timeout.
	 */
	ksocket_hold(ks);
	(void) ksocket_recvmsg(ks, msg, prflag, &recv, CRED());
	ksocket_rele(ks);
	DTRACE_PROBE1(ksocket_recvmsg, size_t, recv);
	return (recv);
}
Esempio n. 20
0
/*
 * Server side RDMA WRITE list decode.
 * XDR context is memory ops
 */
bool_t
xdr_decode_wlist_svc(XDR *xdrs, struct clist **wclp, bool_t *wwl,
    uint32_t *total_length, CONN *conn)
{
	struct clist	*first, *ncl;
	char		*memp;
	uint32_t	num_wclist;
	uint32_t	wcl_length = 0;
	uint32_t	i;
	bool_t		more = FALSE;

	*wclp = NULL;
	*wwl = FALSE;
	*total_length = 0;

	if (!xdr_bool(xdrs, &more)) {
		return (FALSE);
	}

	if (more == FALSE) {
		return (TRUE);
	}

	*wwl = TRUE;

	if (!xdr_uint32(xdrs, &num_wclist)) {
		DTRACE_PROBE(krpc__e__xdrrdma__wlistsvc__listlength);
		return (FALSE);
	}

	first = ncl = clist_alloc();

	for (i = 0; i < num_wclist; i++) {

		if (!xdr_uint32(xdrs, &ncl->c_dmemhandle.mrc_rmr))
			goto err_out;
		if (!xdr_uint32(xdrs, &ncl->c_len))
			goto err_out;
		if (!xdr_uint64(xdrs, &ncl->u.c_daddr))
			goto err_out;

		if (ncl->c_len > MAX_SVC_XFER_SIZE) {
			DTRACE_PROBE(
			    krpc__e__xdrrdma__wlistsvc__chunklist_toobig);
			ncl->c_len = MAX_SVC_XFER_SIZE;
		}

		DTRACE_PROBE1(krpc__i__xdr_decode_wlist_svc_len,
		    uint_t, ncl->c_len);

		wcl_length += ncl->c_len;

		if (i < num_wclist - 1) {
			ncl->c_next = clist_alloc();
			ncl = ncl->c_next;
		}
	}

	if (!xdr_bool(xdrs, &more))
		goto err_out;

	first->rb_longbuf.type = RDMA_LONG_BUFFER;
	first->rb_longbuf.len =
	    wcl_length > WCL_BUF_LEN ? wcl_length : WCL_BUF_LEN;

	if (rdma_buf_alloc(conn, &first->rb_longbuf)) {
		clist_free(first);
		return (FALSE);
	}

	memp = first->rb_longbuf.addr;

	ncl = first;
	for (i = 0; i < num_wclist; i++) {
		ncl->w.c_saddr3 = (caddr_t)memp;
		memp += ncl->c_len;
		ncl = ncl->c_next;
	}

	*wclp = first;
	*total_length = wcl_length;
	return (TRUE);

err_out:
	clist_free(first);
	return (FALSE);
}
Esempio n. 21
0
/*
 * dm2s_receive - Read all messages from the mailbox.
 *
 * This function is called from the read service procedure, to
 * receive the messages awaiting in the mailbox.
 */
void
dm2s_receive(dm2s_t *dm2sp)
{
	queue_t	*rq = dm2sp->ms_rq;
	mblk_t	*mp;
	int	ret;
	uint32_t len;

	DPRINTF(DBG_DRV, ("dm2s_receive: called\n"));
	ASSERT(dm2sp != NULL);
	ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
	if (rq == NULL) {
		return;
	}
	/*
	 * As the number of messages in the mailbox are pretty limited,
	 * it is safe to process all messages in one loop.
	 */
	while (DM2S_MBOX_READY(dm2sp) && ((ret = scf_mb_canget(dm2sp->ms_target,
	    dm2sp->ms_key, &len)) == 0)) {
		DPRINTF(DBG_MBOX, ("dm2s_receive: mb_canget len=%d\n", len));
		if (len == 0) {
			break;
		}
		mp = allocb(len, BPRI_MED);
		if (mp == NULL) {
			DPRINTF(DBG_WARN, ("dm2s_receive: allocb failed\n"));
			/*
			 * Start a bufcall so that we can retry again
			 * when memory becomes available.
			 */
			dm2sp->ms_rbufcid = qbufcall(rq, len, BPRI_MED,
			    dm2s_bufcall_rcv, dm2sp);
			if (dm2sp->ms_rbufcid == 0) {
				DPRINTF(DBG_WARN,
				    ("dm2s_receive: qbufcall failed\n"));
				/*
				 * if bufcall fails, start a timeout to
				 * initiate a re-try after some time.
				 */
				DTRACE_PROBE1(dm2s_rqtimeout__start,
				    dm2s_t, dm2sp);
				dm2sp->ms_rq_timeoutid = qtimeout(rq,
				    dm2s_rq_timeout, (void *)dm2sp,
				    drv_usectohz(DM2S_SM_TOUT));
			}
			break;
		}

		/*
		 * Only a single scatter/gather element is enough here.
		 */
		dm2sp->ms_sg_rcv.msc_dptr = (caddr_t)mp->b_wptr;
		dm2sp->ms_sg_rcv.msc_len = len;
		DPRINTF(DBG_MBOX, ("dm2s_receive: calling getmsg\n"));
		ret = scf_mb_getmsg(dm2sp->ms_target, dm2sp->ms_key, len, 1,
		    &dm2sp->ms_sg_rcv, 0);
		DPRINTF(DBG_MBOX, ("dm2s_receive: getmsg ret=%d\n", ret));
		if (ret != 0) {
			freemsg(mp);
			break;
		}
		DMPBYTES("dm2s: Getmsg: ", len, 1, &dm2sp->ms_sg_rcv);
		mp->b_wptr += len;
		/*
		 * Queue the messages in the rq, so that the service
		 * procedure handles sending the messages up the stream.
		 */
		putq(rq, mp);
	}

	if ((!DM2S_MBOX_READY(dm2sp)) || (ret != ENOMSG && ret != EMSGSIZE)) {
		/*
		 * Some thing went wrong, flush pending messages
		 * and initiate a hangup.
		 * Note: flushing the wq initiates a faster close.
		 */
		mutex_exit(&dm2sp->ms_lock);
		flushq(WR(rq), FLUSHDATA);
		(void) putnextctl(rq, M_HANGUP);
		DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
		mutex_enter(&dm2sp->ms_lock);
		DPRINTF(DBG_WARN, ("dm2s_receive: encountered unknown "
		    "condition - hangup ret=%d\n", ret));
	}
}
/*
 * See radius_packet.h.
 */
int
rcv_radius_response(void *socket, uint8_t *shared_secret,
    uint32_t shared_secret_len, uint8_t *req_authenticator,
    radius_packet_data_t *resp_data)
{
	int			rcv_len = 0;
	radius_packet_t		*packet;
	MD5_CTX			context;
	uint8_t			*tmp_data;
	uint8_t			md5_digest[16]; /* MD5 Digest Length 16 */
	uint16_t		declared_len = 0;
	ushort_t		len;
	struct nmsghdr		msg;
	struct iovec		iov[1];

	tmp_data = kmem_zalloc(MAX_RAD_PACKET_LEN, KM_SLEEP);

	iov[0].iov_base	    = (char *)tmp_data;
	iov[0].iov_len	    = MAX_RAD_PACKET_LEN;

	bzero(&msg, sizeof (msg));
	msg.msg_name	    = NULL;
	msg.msg_namelen	    = 0;
	msg.msg_control	    = NULL;
	msg.msg_controllen  = 0;
	msg.msg_flags	    = MSG_WAITALL;
	msg.msg_iov	    = iov;
	msg.msg_iovlen	    = 1;

	rcv_len = iscsi_net->recvmsg(socket, &msg, RAD_RCV_TIMEOUT);
	if (rcv_len == 0) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_NO_DATA);
	}

	DTRACE_PROBE1(rcv_rad_resp_summary, int, rcv_len);

	packet = (radius_packet_t *)tmp_data;
	bcopy(packet->length, &len, sizeof (ushort_t));
	declared_len = ntohs(len);

	DTRACE_PROBE1(rcv_rad_resp_data, uint16_t, declared_len);

	/*
	 * Check if the received packet length is within allowable range.
	 * RFC 2865 section 3.
	 */
	if (rcv_len < MIN_RAD_PACKET_LEN) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_PROTOCOL_ERR);
	} else if (rcv_len > MAX_RAD_PACKET_LEN) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_PROTOCOL_ERR);
	}

	/*
	 * Check if the declared packet length is within allowable range.
	 * RFC 2865 section 3.
	 */
	if (declared_len < MIN_RAD_PACKET_LEN) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_PROTOCOL_ERR);
	} else if (declared_len > MAX_RAD_PACKET_LEN) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_PROTOCOL_ERR);
	}

	/*
	 * Discard packet with received length shorter than declared
	 * length. RFC 2865 section 3.
	 */
	if (rcv_len < declared_len) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_PROTOCOL_ERR);
	}

	/*
	 * Authenticate the incoming packet, using the following algorithm
	 * (RFC 2865 section 3):
	 *
	 * 	MD5(Code+ID+Length+RequestAuth+Attributes+Secret)
	 *
	 * Code = RADIUS packet code
	 * ID = RADIUS packet identifier
	 * Length = Declared length of the packet
	 * RequestAuth = The request authenticator
	 * Attributes = The response attributes
	 * Secret = The shared secret
	 */
	MD5Init(&context);
	bzero(&md5_digest, 16);
	MD5Update(&context, &packet->code, 1);
	MD5Update(&context, &packet->identifier, 1);
	MD5Update(&context, packet->length, 2);
	MD5Update(&context, req_authenticator, RAD_AUTHENTICATOR_LEN);
	/* Include response attributes only if there is a payload */
	if (declared_len > RAD_PACKET_HDR_LEN) {
		/* Response Attributes */
		MD5Update(&context, packet->data,
		    declared_len - RAD_PACKET_HDR_LEN);
	}
	MD5Update(&context, shared_secret, shared_secret_len);
	MD5Final(md5_digest, &context);

	if (bcmp(md5_digest, packet->authenticator, RAD_AUTHENTICATOR_LEN)
	    != 0) {
		kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
		return (RAD_RSP_RCVD_AUTH_FAILED);
	}

	/*
	 * If the received length is greater than the declared length,
	 * trust the declared length and shorten the packet (i.e., to
	 * treat the octets outside the range of the Length field as
	 * padding - RFC 2865 section 3).
	 */
	if (rcv_len > declared_len) {
		/* Clear the padding data. */
		bzero(tmp_data + declared_len, rcv_len - declared_len);
		rcv_len = declared_len;
	}

	/*
	 * Annotate the RADIUS packet data with the data we received from
	 * the server.
	 */
	resp_data->code = packet->code;
	resp_data->identifier = packet->identifier;

	kmem_free(tmp_data, MAX_RAD_PACKET_LEN);
	return (RAD_RSP_RCVD_SUCCESS);
}
Esempio n. 23
0
/*
 * dm2s_event_handler - Mailbox event handler.
 */
void
dm2s_event_handler(scf_event_t event, void *arg)
{
	dm2s_t *dm2sp = (dm2s_t *)arg;
	queue_t	*rq;

	ASSERT(dm2sp != NULL);
	mutex_enter(&dm2sp->ms_lock);
	if (!(dm2sp->ms_state & DM2S_MB_INITED)) {
		/*
		 * Ignore all events if the state flag indicates that the
		 * mailbox not initialized, this may happen during the close.
		 */
		mutex_exit(&dm2sp->ms_lock);
		DPRINTF(DBG_MBOX,
		    ("Event(0x%X) received - Mailbox not inited\n", event));
		return;
	}
	switch (event) {
	case SCF_MB_CONN_OK:
		/*
		 * Now the mailbox is ready to use, lets wake up
		 * any one waiting for this event.
		 */
		dm2sp->ms_state |= DM2S_MB_CONN;
		cv_broadcast(&dm2sp->ms_wait);
		DPRINTF(DBG_MBOX, ("Event received = CONN_OK\n"));
		break;

	case SCF_MB_MSG_DATA:
		if (!DM2S_MBOX_READY(dm2sp)) {
			DPRINTF(DBG_MBOX,
			    ("Event(MSG_DATA) received - Mailbox not READY\n"));
			break;
		}
		/*
		 * A message is available in the mailbox.
		 * Lets enable the read service procedure
		 * to receive this message.
		 */
		if (dm2sp->ms_rq != NULL) {
			qenable(dm2sp->ms_rq);
		}
		DPRINTF(DBG_MBOX, ("Event received = MSG_DATA\n"));
		break;

	case SCF_MB_SPACE:
		if (!DM2S_MBOX_READY(dm2sp)) {
			DPRINTF(DBG_MBOX,
			    ("Event(MB_SPACE) received - Mailbox not READY\n"));
			break;
		}

		/*
		 * Now the mailbox is ready to transmit, lets
		 * schedule the write service procedure.
		 */
		if (dm2sp->ms_wq != NULL) {
			qenable(dm2sp->ms_wq);
		}
		DPRINTF(DBG_MBOX, ("Event received = MB_SPACE\n"));
		break;
	case SCF_MB_DISC_ERROR:
		dm2sp->ms_state |= DM2S_MB_DISC;
		if (dm2sp->ms_state & DM2S_MB_CONN) {
			/*
			 * If it was previously connected,
			 * then send a hangup message.
			 */
			rq = dm2sp->ms_rq;
			if (rq != NULL) {
				mutex_exit(&dm2sp->ms_lock);
				/*
				 * Send a hangup message to indicate
				 * disconnect event.
				 */
				(void) putctl(rq, M_HANGUP);
				DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
				mutex_enter(&dm2sp->ms_lock);
			}
		} else {
			/*
			 * Signal if the open is waiting for a
			 * connection.
			 */
			cv_broadcast(&dm2sp->ms_wait);
		}
		DPRINTF(DBG_MBOX, ("Event received = DISC_ERROR\n"));
		break;
	default:
		cmn_err(CE_WARN, "Unexpected event received\n");
		break;
	}
	mutex_exit(&dm2sp->ms_lock);
}
Esempio n. 24
0
/*
 * smb2sr_work
 *
 * This function processes each SMB command in the current request
 * (which may be a compound request) building a reply containing
 * SMB reply messages, one-to-one with the SMB commands.  Some SMB
 * commands (change notify, blocking pipe read) may require both an
 * "interim response" and a later "async response" at completion.
 * In such cases, we'll encode the interim response in the reply
 * compound we're building, and put the (now async) command on a
 * list of commands that need further processing.  After we've
 * finished processing the commands in this compound and building
 * the compound reply, we'll send the compound reply, and finally
 * process the list of async commands.
 *
 * As we work our way through the compound request and reply,
 * we need to keep track of the bounds of the current request
 * and reply.  For the request, this uses an MBC_SHADOW_CHAIN
 * that begins at smb2_cmd_hdr.  The reply is appended to the
 * sr->reply chain starting at smb2_reply_hdr.
 *
 * This function must always free the smb request.
 */
void
smb2sr_work(struct smb_request *sr)
{
	smb_session_t		*session;
	uint32_t		msg_len;
	int			rc;
	boolean_t		disconnect = B_FALSE;

	session = sr->session;

	ASSERT(sr->tid_tree == 0);
	ASSERT(sr->uid_user == 0);
	ASSERT(sr->fid_ofile == 0);
	sr->smb_fid = (uint16_t)-1;

	/* temporary until we identify a user */
	sr->user_cr = zone_kcred();

	mutex_enter(&sr->sr_mutex);
	switch (sr->sr_state) {
	case SMB_REQ_STATE_SUBMITTED:
	case SMB_REQ_STATE_CLEANED_UP:
		sr->sr_state = SMB_REQ_STATE_ACTIVE;
		break;
	default:
		ASSERT(0);
		/* FALLTHROUGH */
	case SMB_REQ_STATE_CANCELED:
		goto complete_unlock_free;
	}
	mutex_exit(&sr->sr_mutex);

cmd_start:
	/*
	 * Reserve space for the reply header, and save the offset.
	 * The reply header will be overwritten later.
	 */
	sr->smb2_reply_hdr = sr->reply.chain_offset;
	(void) smb_mbc_encodef(&sr->reply, "#.", SMB2_HDR_SIZE);

	/*
	 * Decode the request header
	 *
	 * Most problems with decoding will result in the error
	 * STATUS_INVALID_PARAMETER.  If the decoding problem
	 * prevents continuing, we'll close the connection.
	 * [MS-SMB2] 3.3.5.2.6 Handling Incorrectly Formatted...
	 */
	sr->smb2_status = 0;
	sr->smb2_cmd_hdr = sr->command.chain_offset;
	if ((rc = smb2_decode_header(sr)) != 0) {
		cmn_err(CE_WARN, "clnt %s bad SMB2 header",
		    session->ip_addr_str);
		disconnect = B_TRUE;
		goto cleanup;
	}

	/*
	 * Figure out the length of data following the SMB2 header.
	 * It ends at either the next SMB2 header if there is one
	 * (smb2_next_command != 0) or at the end of the message.
	 */
	if (sr->smb2_next_command != 0) {
		/* [MS-SMB2] says this is 8-byte aligned */
		msg_len = sr->smb2_next_command;
		if ((msg_len & 7) != 0 || (msg_len < SMB2_HDR_SIZE) ||
		    ((sr->smb2_cmd_hdr + msg_len) > sr->command.max_bytes)) {
			cmn_err(CE_WARN, "clnt %s bad SMB2 next cmd",
			    session->ip_addr_str);
			disconnect = B_TRUE;
			goto cleanup;
		}
	} else {
		msg_len = sr->command.max_bytes - sr->smb2_cmd_hdr;
	}

	/*
	 * Setup a shadow chain for this SMB2 command, starting
	 * with the header and ending at either the next command
	 * or the end of the message.  Note that we've already
	 * decoded the header, so chain_offset is now positioned
	 * at the end of the header.  The signing check needs the
	 * entire SMB2 command, so we'll shadow starting at the
	 * smb2_cmd_hdr offset.  After the signing check, we'll
	 * move chain_offset up to the end of the header.
	 */
	(void) MBC_SHADOW_CHAIN(&sr->smb_data, &sr->command,
	    sr->smb2_cmd_hdr, msg_len);

	/*
	 * Verify SMB signature if signing is enabled and active now.
	 * [MS-SMB2] 3.3.5.2.4 Verifying the Signature
	 */
	if ((sr->smb2_hdr_flags & SMB2_FLAGS_SIGNED) != 0) {
		rc = smb2_sign_check_request(sr);
		if (rc != 0) {
			DTRACE_PROBE1(smb2__sign__check, smb_request_t, sr);
			if (session->signing.flags & SMB_SIGNING_CHECK) {
				smb2sr_put_error(sr, NT_STATUS_ACCESS_DENIED);
				goto cmd_finish;
			}
		}
	}

	/*
	 * Now that the signing check is done with smb_data,
	 * advance past the SMB2 header we decoded above.
	 * This leaves sr->smb_data correctly positioned
	 * for command-specific decoding in the dispatch
	 * function called next.
	 */
	sr->smb_data.chain_offset = sr->smb2_cmd_hdr + SMB2_HDR_SIZE;

	/*
	 * Default credit response.  Command handler may modify.
	 */
	sr->smb2_credit_response = sr->smb2_credit_request;

	/*
	 * Common dispatch (for sync & async)
	 */
	rc = smb2sr_dispatch(sr, NULL);
	switch (rc) {
	case SDRC_SUCCESS:
		break;
	default:
		/*
		 * SMB2 does not use the other dispatch return codes.
		 * If we see something else, log an event so we'll
		 * know something is returning bogus status codes.
		 * If you see these in the log, use dtrace to find
		 * the code returning something else.
		 */
#ifdef	DEBUG
		cmn_err(CE_NOTE, "smb2sr_dispatch -> 0x%x", rc);
#endif
		/* FALLTHROUGH */
	case SDRC_ERROR:
		if (sr->smb2_status == 0)
			sr->smb2_status = NT_STATUS_INTERNAL_ERROR;
		break;
	case SDRC_DROP_VC:
		disconnect = B_TRUE;
		goto cleanup;
	}

	/*
	 * If there's a next command, figure out where it starts,
	 * and fill in the next command offset for the reply.
	 * Note: We sanity checked smb2_next_command above
	 * (the offset to the next command).  Similarly set
	 * smb2_next_reply as the offset to the next reply.
	 */
cmd_finish:
	if (sr->smb2_next_command != 0) {
		sr->command.chain_offset =
		    sr->smb2_cmd_hdr + sr->smb2_next_command;
		sr->smb2_next_reply =
		    sr->reply.chain_offset - sr->smb2_reply_hdr;
	} else {
		sr->smb2_next_reply = 0;
	}

	/*
	 * Overwrite the SMB2 header for the response of
	 * this command (possibly part of a compound).
	 */
	sr->smb2_hdr_flags |= SMB2_FLAGS_SERVER_TO_REDIR;
	(void) smb2_encode_header(sr, B_TRUE);

	if (sr->smb2_hdr_flags & SMB2_FLAGS_SIGNED)
		smb2_sign_reply(sr);

	if (sr->smb2_next_command != 0)
		goto cmd_start;

	/*
	 * We've done all the commands in this compound.
	 * Send it out.
	 */
	smb2_send_reply(sr);

	/*
	 * If any of the requests "went async", process those now.
	 */
	if (sr->sr_async_req != NULL) {
		smb2sr_do_async(sr);
	}

cleanup:
	if (disconnect) {
		smb_rwx_rwenter(&session->s_lock, RW_WRITER);
		switch (session->s_state) {
		case SMB_SESSION_STATE_DISCONNECTED:
		case SMB_SESSION_STATE_TERMINATED:
			break;
		default:
			smb_soshutdown(session->sock);
			session->s_state = SMB_SESSION_STATE_DISCONNECTED;
			break;
		}
		smb_rwx_rwexit(&session->s_lock);
	}


	mutex_enter(&sr->sr_mutex);
complete_unlock_free:
	sr->sr_state = SMB_REQ_STATE_COMPLETED;
	mutex_exit(&sr->sr_mutex);

	smb_request_free(sr);
}