Beispiel #1
0
/*
 * Dispatch a work request to the thread pool.
 * If there are idle workers, awaken one.
 * Else, if the maximum number of workers has
 * not been reached, spawn a new worker thread.
 * Else just return with the job added to the queue.
 */
int
tpool_dispatch(tpool_t *tpool, void (*func)(void *), void *arg)
{
	tpool_job_t *job;

	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	if ((job = lmalloc(sizeof (*job))) == NULL)
		return (-1);
	job->tpj_next = NULL;
	job->tpj_func = func;
	job->tpj_arg = arg;

	sig_mutex_lock(&tpool->tp_mutex);

	if (tpool->tp_head == NULL)
		tpool->tp_head = job;
	else
		tpool->tp_tail->tpj_next = job;
	tpool->tp_tail = job;
	tpool->tp_njobs++;

	if (!(tpool->tp_flags & TP_SUSPEND)) {
		if (tpool->tp_idle > 0)
			(void) cond_signal(&tpool->tp_workcv);
		else if (tpool->tp_current < tpool->tp_maximum &&
		    create_worker(tpool) == 0)
			tpool->tp_current++;
	}

	sig_mutex_unlock(&tpool->tp_mutex);
	return (0);
}
Beispiel #2
0
/*
 * Worker thread is terminating.
 */
static void
worker_cleanup(tpool_t *tpool)
{
	ASSERT(MUTEX_HELD(&tpool->tp_mutex));

	if (--tpool->tp_current == 0 &&
	    (tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
		if (tpool->tp_flags & TP_ABANDON) {
			sig_mutex_unlock(&tpool->tp_mutex);
			delete_pool(tpool);
			return;
		}
		if (tpool->tp_flags & TP_DESTROY)
			(void) cond_broadcast(&tpool->tp_busycv);
	}
	sig_mutex_unlock(&tpool->tp_mutex);
}
Beispiel #3
0
/*
 * Like tpool_destroy(), but don't cancel workers or wait for them to finish.
 * The last worker to terminate will delete the pool.
 */
void
tpool_abandon(tpool_t *tpool)
{
	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	sig_mutex_lock(&tpool->tp_mutex);
	if (tpool->tp_current == 0) {
		/* no workers, just delete the pool */
		sig_mutex_unlock(&tpool->tp_mutex);
		delete_pool(tpool);
	} else {
		/* wake up all workers, last one will delete the pool */
		tpool->tp_flags |= TP_ABANDON;
		tpool->tp_flags &= ~TP_SUSPEND;
		(void) cond_broadcast(&tpool->tp_workcv);
		sig_mutex_unlock(&tpool->tp_mutex);
	}
}
Beispiel #4
0
int
tpool_member(tpool_t *tpool)
{
	pthread_t my_tid = pthread_self();
	tpool_active_t *activep;

	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	sig_mutex_lock(&tpool->tp_mutex);
	for (activep = tpool->tp_active; activep; activep = activep->tpa_next) {
		if (activep->tpa_tid == my_tid) {
			sig_mutex_unlock(&tpool->tp_mutex);
			return (1);
		}
	}
	sig_mutex_unlock(&tpool->tp_mutex);
	return (0);
}
Beispiel #5
0
void
tpool_suspend(tpool_t *tpool)
{
	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	sig_mutex_lock(&tpool->tp_mutex);
	tpool->tp_flags |= TP_SUSPEND;
	sig_mutex_unlock(&tpool->tp_mutex);
}
Beispiel #6
0
void
tpool_resume(tpool_t *tpool)
{
	int excess;

	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	sig_mutex_lock(&tpool->tp_mutex);
	if (!(tpool->tp_flags & TP_SUSPEND)) {
		sig_mutex_unlock(&tpool->tp_mutex);
		return;
	}
	tpool->tp_flags &= ~TP_SUSPEND;
	(void) cond_broadcast(&tpool->tp_workcv);
	excess = tpool->tp_njobs - tpool->tp_idle;
	while (excess-- > 0 && tpool->tp_current < tpool->tp_maximum) {
		if (create_worker(tpool) != 0)
			break;		/* pthread_create() failed */
		tpool->tp_current++;
	}
	sig_mutex_unlock(&tpool->tp_mutex);
}
Beispiel #7
0
int
tpool_suspended(tpool_t *tpool)
{
	int suspended;

	ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));

	sig_mutex_lock(&tpool->tp_mutex);
	suspended = (tpool->tp_flags & TP_SUSPEND) != 0;
	sig_mutex_unlock(&tpool->tp_mutex);

	return (suspended);
}
Beispiel #8
0
/* ARGSUSED */
static void
_lio_listio_cleanup(aio_lio_t *head)
{
	int freeit = 0;

	ASSERT(MUTEX_HELD(&head->lio_mutex));
	if (head->lio_refcnt == 0) {
		ASSERT(head->lio_nent == 0);
		freeit = 1;
	}
	head->lio_waiting = 0;
	sig_mutex_unlock(&head->lio_mutex);
	if (freeit)
		_aio_lio_free(head);
}
Beispiel #9
0
static struct clnt_ops *
clnt_door_ops(void)
{
	static struct clnt_ops	ops;
	extern mutex_t		ops_lock;

	sig_mutex_lock(&ops_lock);
	if (ops.cl_call == NULL) {
		ops.cl_call = clnt_door_call;
		ops.cl_send = clnt_door_send;
		ops.cl_abort = clnt_door_abort;
		ops.cl_geterr = clnt_door_geterr;
		ops.cl_freeres = clnt_door_freeres;
		ops.cl_destroy = clnt_door_destroy;
		ops.cl_control = clnt_door_control;
	}
	sig_mutex_unlock(&ops_lock);
	return (&ops);
}
Beispiel #10
0
static struct clnt_ops *
clnt_dg_ops(void)
{
	static struct clnt_ops ops;
	extern mutex_t	ops_lock;

/* VARIABLES PROTECTED BY ops_lock: ops */

	sig_mutex_lock(&ops_lock);
	if (ops.cl_call == NULL) {
		ops.cl_call = clnt_dg_call;
		ops.cl_send = clnt_dg_send;
		ops.cl_abort = clnt_dg_abort;
		ops.cl_geterr = clnt_dg_geterr;
		ops.cl_freeres = clnt_dg_freeres;
		ops.cl_destroy = clnt_dg_destroy;
		ops.cl_control = clnt_dg_control;
	}
	sig_mutex_unlock(&ops_lock);
	return (&ops);
}
Beispiel #11
0
int
_tx_listen(int fd, struct t_call *call, int api_semantics)
{
	struct strbuf ctlbuf;
	struct strbuf databuf;
	int retval;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int didalloc, didralloc;
	int flg = 0;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype == T_CLTS) {
		sv_errno = errno;
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}
	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_IDLE ||
		    tiptr->ti_state == T_INCON)) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (tiptr->ti_qlen == 0) {
			t_errno = TBADQLEN;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (tiptr->ti_ocnt == tiptr->ti_qlen) {
			if (!(tiptr->ti_flags & TX_TQFULL_NOTIFIED)) {
				tiptr->ti_flags |= TX_TQFULL_NOTIFIED;
				t_errno = TQFULL;
				sig_mutex_unlock(&tiptr->ti_lock);
				return (-1);
			}
		}

	}

	/*
	 * check if something in look buffer
	 */
	if (tiptr->ti_lookcnt > 0) {
		t_errno = TLOOK;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	/*
	 * Acquire ctlbuf for use in sending/receiving control part
	 * of the message.
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}
	/*
	 * Acquire databuf for use in sending/receiving data part
	 */
	if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
		int sv_errno = errno;

		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	/*
	 * This is a call that may block indefinitely so we drop the
	 * lock and allow signals in MT case here and reacquire it.
	 * Error case should roll back state changes done above
	 * (happens to be no state change here)
	 */
	sig_mutex_unlock(&tiptr->ti_lock);
	if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) {
		if (errno == EAGAIN)
			t_errno = TNODATA;
		else
			t_errno = TSYSERR;
		sv_errno = errno;
		sig_mutex_lock(&tiptr->ti_lock);
		errno = sv_errno;
		goto err_out;
	}
	sig_mutex_lock(&tiptr->ti_lock);

	if (databuf.len == -1) databuf.len = 0;

	/*
	 * did I get entire message?
	 */
	if (retval > 0) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/*
	 * is ctl part large enough to determine type
	 */
	if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;
	}

	/* LINTED pointer cast */
	pptr = (union T_primitives *)ctlbuf.buf;

	switch (pptr->type) {

	case T_CONN_IND:
		if ((ctlbuf.len < (int)sizeof (struct T_conn_ind)) ||
		    (ctlbuf.len < (int)(pptr->conn_ind.OPT_length
		    + pptr->conn_ind.OPT_offset))) {
			t_errno = TSYSERR;
			errno = EPROTO;
			goto err_out;
		}
		/*
		 * Change state and increment outstanding connection
		 * indication count and instantiate "sequence" return
		 * parameter.
		 * Note: It is correct semantics accoring to spec to
		 * do this despite possibility of TBUFOVFLW error later.
		 * The spec treats TBUFOVFLW error in general as a special case
		 * which can be ignored by applications that do not
		 * really need the stuff returned in 'netbuf' structures.
		 */
		_T_TX_NEXTSTATE(T_LISTN, tiptr,
				"t_listen:invalid state event T_LISTN");
		tiptr->ti_ocnt++;
		call->sequence = pptr->conn_ind.SEQ_number;

		if (_T_IS_TLI(api_semantics) || call->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->conn_ind.SRC_length,
			    call->addr.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->addr.buf, ctlbuf.buf +
			    (size_t)pptr->conn_ind.SRC_offset,
			(unsigned int)pptr->conn_ind.SRC_length);
			call->addr.len = pptr->conn_ind.SRC_length;
		}
		if (_T_IS_TLI(api_semantics) || call->opt.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->conn_ind.OPT_length,
			    call->opt.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->opt.buf, ctlbuf.buf +
			    pptr->conn_ind.OPT_offset,
			    (size_t)pptr->conn_ind.OPT_length);
			call->opt.len = pptr->conn_ind.OPT_length;
		}
		if (_T_IS_TLI(api_semantics) || call->udata.maxlen > 0) {
			if (databuf.len > (int)call->udata.maxlen) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(call->udata.buf, databuf.buf,
			    (size_t)databuf.len);
			call->udata.len = databuf.len;
		}

		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (0);

	case T_DISCON_IND:
		/*
		 * Append to the events in the "look buffer"
		 * list of events. This routine may defer signals.
		 */
		if (_t_register_lookevent(tiptr, databuf.buf,
					databuf.len, ctlbuf.buf,
					ctlbuf.len) < 0) {
			t_errno = TSYSERR;
			errno = ENOMEM;
			goto err_out;
		}
		t_errno = TLOOK;
		goto err_out;

	default:
		break;
	}

	t_errno = TSYSERR;
	errno = EPROTO;
err_out:
	sv_errno = errno;

	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	if (didralloc)
		free(databuf.buf);
	else
		tiptr->ti_rcvbuf = databuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}
Beispiel #12
0
int
_tx_rcvuderr(int fd, struct t_uderr *uderr, int api_semantics)
{
	struct strbuf ctlbuf, databuf;
	int flg;
	int retval;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int didalloc;
	int use_lookbufs = 0;


	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype != T_CLTS) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}
	/*
	 * is there a unitdata error indication in look buffer
	 */
	if (tiptr->ti_lookcnt > 0) {
		ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen;
		ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf;
		/* Note: cltbuf.maxlen not used in this case */

		/* LINTED pointer cast */
		assert(((union T_primitives *)ctlbuf.buf)->type
			== T_UDERROR_IND);

		databuf.maxlen = 0;
		databuf.len = 0;
		databuf.buf = NULL;

		use_lookbufs = 1;

	} else {
		if ((retval = _t_look_locked(fd, tiptr, 0,
		    api_semantics)) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}
		if (retval != T_UDERR) {
			t_errno = TNOUDERR;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		/*
		 * Acquire ctlbuf for use in sending/receiving control part
		 * of the message.
		 */
		if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		databuf.maxlen = 0;
		databuf.len = 0;
		databuf.buf = NULL;

		flg = 0;

		/*
		 * Since we already verified that a unitdata error
		 * indication is pending, we assume that this getmsg()
		 * will not block indefinitely.
		 */
		if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) {

			t_errno = TSYSERR;
			goto err_out;
		}
		/*
		 * did I get entire message?
		 */
		if (retval > 0) {
			t_errno = TSYSERR;
			errno = EIO;
			goto err_out;
		}

	}

	/* LINTED pointer cast */
	pptr = (union T_primitives *)ctlbuf.buf;

	if ((ctlbuf.len < (int)sizeof (struct T_uderror_ind)) ||
	    (pptr->type != T_UDERROR_IND)) {
		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;
	}

	if (uderr) {
		if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->uderror_ind.DEST_length,
			    uderr->addr.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(uderr->addr.buf, ctlbuf.buf +
			    pptr->uderror_ind.DEST_offset,
			    (size_t)pptr->uderror_ind.DEST_length);
			uderr->addr.len =
			    (unsigned int)pptr->uderror_ind.DEST_length;
		}
		if (_T_IS_TLI(api_semantics) || uderr->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(pptr->uderror_ind.OPT_length,
			    uderr->opt.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(uderr->opt.buf, ctlbuf.buf +
			    pptr->uderror_ind.OPT_offset,
			    (size_t)pptr->uderror_ind.OPT_length);
			uderr->opt.len =
			    (unsigned int)pptr->uderror_ind.OPT_length;
		}
		uderr->error = pptr->uderror_ind.ERROR_type;
	}

	_T_TX_NEXTSTATE(T_RCVUDERR, tiptr,
			"t_rcvuderr: invalid state event T_RCVUDERR");
	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);

err_out:
	sv_errno = errno;

	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}
Beispiel #13
0
/*
 * Connection less client creation returns with client handle parameters.
 * Default options are set, which the user can change using clnt_control().
 * fd should be open and bound.
 * NB: The rpch->cl_auth is initialized to null authentication.
 * 	Caller may wish to set this something more useful.
 *
 * sendsz and recvsz are the maximum allowable packet sizes that can be
 * sent and received. Normally they are the same, but they can be
 * changed to improve the program efficiency and buffer allocation.
 * If they are 0, use the transport default.
 *
 * If svcaddr is NULL, returns NULL.
 */
CLIENT *
clnt_dg_create(const int fd, struct netbuf *svcaddr, const rpcprog_t program,
	const rpcvers_t version, const uint_t sendsz, const uint_t recvsz)
{
	CLIENT *cl = NULL;		/* client handle */
	struct cu_data *cu = NULL;	/* private data */
	struct t_unitdata *tr_data;
	struct t_info tinfo;
	struct timeval now;
	struct rpc_msg call_msg;
	uint_t ssz;
	uint_t rsz;

	sig_mutex_lock(&dgtbl_lock);
	if ((dgtbl == NULL) && ((dgtbl = rpc_fd_init()) == NULL)) {
		sig_mutex_unlock(&dgtbl_lock);
		goto err1;
	}
	sig_mutex_unlock(&dgtbl_lock);

	if (svcaddr == NULL) {
		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
		return (NULL);
	}
	if (t_getinfo(fd, &tinfo) == -1) {
		rpc_createerr.cf_stat = RPC_TLIERROR;
		rpc_createerr.cf_error.re_errno = 0;
		rpc_createerr.cf_error.re_terrno = t_errno;
		return (NULL);
	}
	/*
	 * Setup to rcv datagram error, we ignore any errors returned from
	 * __rpc_tli_set_options() as SO_DGRAM_ERRIND is only relevant to
	 * udp/udp6 transports and this point in the code we only know that
	 * we are using a connection less transport.
	 */
	if (tinfo.servtype == T_CLTS)
		(void) __rpc_tli_set_options(fd, SOL_SOCKET, SO_DGRAM_ERRIND,
		    1);
	/*
	 * Find the receive and the send size
	 */
	ssz = __rpc_get_t_size((int)sendsz, tinfo.tsdu);
	rsz = __rpc_get_t_size((int)recvsz, tinfo.tsdu);
	if ((ssz == 0) || (rsz == 0)) {
		rpc_createerr.cf_stat = RPC_TLIERROR; /* XXX */
		rpc_createerr.cf_error.re_errno = 0;
		rpc_createerr.cf_error.re_terrno = 0;
		return (NULL);
	}

	if ((cl = malloc(sizeof (CLIENT))) == NULL)
		goto err1;
	/*
	 * Should be multiple of 4 for XDR.
	 */
	ssz = ((ssz + 3) / 4) * 4;
	rsz = ((rsz + 3) / 4) * 4;
	cu = malloc(sizeof (*cu) + ssz + rsz);
	if (cu == NULL)
		goto err1;
	if ((cu->cu_raddr.buf = malloc(svcaddr->len)) == NULL)
		goto err1;
	(void) memcpy(cu->cu_raddr.buf, svcaddr->buf, (size_t)svcaddr->len);
	cu->cu_raddr.len = cu->cu_raddr.maxlen = svcaddr->len;
	cu->cu_outbuf_start = &cu->cu_inbuf[rsz];
	/* Other values can also be set through clnt_control() */
	cu->cu_wait.tv_sec = 15;	/* heuristically chosen */
	cu->cu_wait.tv_usec = 0;
	cu->cu_total.tv_sec = -1;
	cu->cu_total.tv_usec = -1;
	cu->cu_sendsz = ssz;
	cu->cu_recvsz = rsz;
	(void) gettimeofday(&now, NULL);
	call_msg.rm_xid = getpid() ^ now.tv_sec ^ now.tv_usec;
	call_msg.rm_call.cb_prog = program;
	call_msg.rm_call.cb_vers = version;
	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf, ssz, XDR_ENCODE);
	if (!xdr_callhdr(&(cu->cu_outxdrs), &call_msg)) {
		rpc_createerr.cf_stat = RPC_CANTENCODEARGS;  /* XXX */
		rpc_createerr.cf_error.re_errno = 0;
		rpc_createerr.cf_error.re_terrno = 0;
		goto err2;
	}
	cu->cu_xdrpos = XDR_GETPOS(&(cu->cu_outxdrs));
	XDR_DESTROY(&(cu->cu_outxdrs));
	xdrmem_create(&(cu->cu_outxdrs), cu->cu_outbuf_start, ssz, XDR_ENCODE);
/* LINTED pointer alignment */
	tr_data = (struct t_unitdata *)t_alloc(fd, T_UNITDATA, T_ADDR | T_OPT);
	if (tr_data == NULL) {
		goto err1;
	}
	tr_data->udata.maxlen = cu->cu_recvsz;
	tr_data->udata.buf = cu->cu_inbuf;
	cu->cu_tr_data = tr_data;

	/*
	 * By default, closeit is always FALSE. It is users responsibility
	 * to do a t_close on it, else the user may use clnt_control
	 * to let clnt_destroy do it for him/her.
	 */
	cu->cu_closeit = FALSE;
	cu->cu_fd = fd;
	cl->cl_ops = clnt_dg_ops();
	cl->cl_private = (caddr_t)cu;
	cl->cl_auth = authnone_create();
	cl->cl_tp = NULL;
	cl->cl_netid = NULL;
	cu->pfdp.fd = cu->cu_fd;
	cu->pfdp.events = MASKVAL;
	return (cl);
err1:
	(void) syslog(LOG_ERR, mem_err_clnt_dg);
	rpc_createerr.cf_stat = RPC_SYSTEMERROR;
	rpc_createerr.cf_error.re_errno = errno;
	rpc_createerr.cf_error.re_terrno = 0;
err2:
	if (cl) {
		free(cl);
		if (cu) {
			free(cu->cu_raddr.buf);
			free(cu);
		}
	}
	return (NULL);
}
Beispiel #14
0
int
_tx_sndrel(int fd, int api_semantics)
{
	struct T_ordrel_req orreq;
	struct strbuf ctlbuf;
	struct _ti_user *tiptr;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype != T_COTS_ORD) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_DATAXFER ||
		    tiptr->ti_state == T_INREL)) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		if (_t_look_locked(fd, tiptr, 0,
		    api_semantics) == T_DISCONNECT) {
			t_errno = TLOOK;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

	}

	orreq.PRIM_type = T_ORDREL_REQ;
	ctlbuf.maxlen = (int)sizeof (struct T_ordrel_req);
	ctlbuf.len = (int)sizeof (struct T_ordrel_req);
	ctlbuf.buf = (caddr_t)&orreq;

	/*
	 * Calls to send data (write or putmsg) can potentially
	 * block, for MT case, we drop the lock and enable signals here
	 * and acquire it back
	 */
	sig_mutex_unlock(&tiptr->ti_lock);
	if (putmsg(fd, &ctlbuf, NULL, 0) < 0) {
		if (errno == EAGAIN)
			t_errno = TFLOW;
		else
			t_errno = TSYSERR;
		return (-1);
	}
	sig_mutex_lock(&tiptr->ti_lock);
	_T_TX_NEXTSTATE(T_SNDREL, tiptr,
				"t_sndrel: invalid state on event T_SNDREL");
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);
}
Beispiel #15
0
int
_tx_rcvv(int fd, struct t_iovec *tiov, unsigned int tiovcount,  int *flags,
    int api_semantics)
{
	struct strbuf ctlbuf, databuf;
	int retval, flg = 0;
	int msglen;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int didalloc;
	unsigned int nbytes;
	char *dataptr;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype == T_CLTS) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	assert(api_semantics == TX_XTI_XNS5_API);

	if (tiovcount == 0 || tiovcount > T_IOV_MAX) {
		t_errno = TBADDATA;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	if (!(tiptr->ti_state == T_DATAXFER ||
		tiptr->ti_state == T_OUTREL)) {
		t_errno = TOUTSTATE;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	/*
	 * Check in lookbuf for stuff
	 */
	if (tiptr->ti_lookcnt > 0) {
		/*
		 * Implied preference rules give priority to
		 * T_DISCON_IND over T_ORDREL_IND. Also certain errors like
		 * data received after T_ORDREL_IND or a duplicate T_ORDREL_IND
		 * after a T_ORDRELING have priority over TLOOK.
		 * This manifests in following code behavior.
		 *
		 * (1)  If something in lookbuf then check
		 *	the stream head also. This may result
		 *	in retuning a TLOOK error but only if there are
		 *	  - message at stream head but look buffer
		 *	    has a T_DISCON_IND event.
		 *	  - no messages are on the stream head
		 *
		 * (2)  If there are messages on the stream head and
		 *	all of them are T_ORDREL_IND(i.e. no message in
		 *	look buffer is T_DISCON_IND), there
		 *	could be data on stream head to be picked up and
		 *	we work on the stream head and not return TLOOK.
		 *	We remove the event on the stream head and queue it.
		 *
		 */
		do {
			retval = ioctl(fd, I_NREAD, &msglen);
		} while (retval < 0 && errno == EINTR);

		if (retval < 0) {
			sv_errno = errno;
			t_errno = TSYSERR;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		if (retval > 0) {
			/*
			 * If any T_DISCON_IND event in look buffer
			 * list then return TLOOK. Else continue
			 * processing as what could be on the stream
			 * head might be a possible T_DISCON_IND (which
			 * would have priority over the T_ORDREL_INDs
			 * on the look buffer.)
			 */
			struct _ti_lookbufs *tlbs;

			tlbs = &tiptr->ti_lookbufs;
			do {
				/* LINTED pointer cast */
				if (*((t_scalar_t *)tlbs->tl_lookcbuf)
				    == T_DISCON_IND) {
					t_errno = TLOOK;
					sig_mutex_unlock(&tiptr->ti_lock);
					return (-1);
				}
			} while ((tlbs = tlbs->tl_next) != NULL);

		} else {	/* retval == 0 */
			/*
			 * Nothing on stream head so whatever in
			 * look buffer has nothing that might override
			 * it.
			 */
			t_errno = TLOOK;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}
	}

	/*
	 * Acquire ctlbuf for use in sending/receiving control part
	 * of the message.
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	nbytes = _t_bytecount_upto_intmax(tiov, tiovcount);
	dataptr = NULL;
	if (nbytes != 0 && ((dataptr = malloc(nbytes)) == NULL)) {
		sv_errno = errno;
		t_errno = TSYSERR;
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	databuf.maxlen = (int)nbytes;
	databuf.len = 0;
	databuf.buf = dataptr;

	*flags = 0;

	/*
	 * This is a call that may block indefinitely so we drop the
	 * lock and allow signals in MT case here and reacquire it.
	 * Error case should roll back state changes done above
	 * (happens to be no state change here)
	 */
	sig_mutex_unlock(&tiptr->ti_lock);
	if ((retval = getmsg(fd, &ctlbuf, &databuf, &flg)) < 0) {
		if (errno == EAGAIN)
			t_errno = TNODATA;
		else
			t_errno = TSYSERR;
		sv_errno = errno;
		sig_mutex_lock(&tiptr->ti_lock);
		errno = sv_errno;
		goto err_out;
	}
	sig_mutex_lock(&tiptr->ti_lock);

	assert((retval & MORECTL) == 0); /* MORECTL should not be on */

	if (databuf.len == -1) databuf.len = 0;

	if (ctlbuf.len > 0) {
		if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
			t_errno = TSYSERR;
			errno = EPROTO;
			goto err_out;
		}

		/* LINTED pointer cast */
		pptr = (union T_primitives *)ctlbuf.buf;

		switch (pptr->type) {

		case T_EXDATA_IND:
			*flags |= T_EXPEDITED;
			if (retval > 0)
				tiptr->ti_flags |= EXPEDITED;
			/* FALLTHROUGH */
		case T_DATA_IND:
			/*
			 * Uses the fact T_DATA_IND and T_EXDATA_IND
			 * are same in size
			 */
			if ((ctlbuf.len < (int)sizeof (struct T_data_ind)) ||
			    (tiptr->ti_lookcnt > 0)) {
				/*
				 * ti_lookcnt > 0 implies data
				 * received after T_DISCON_IND or
				 * T_ORDREL_IND hence error
				 */
				t_errno = TSYSERR;
				errno = EPROTO;
				goto err_out;
			}

			if ((pptr->data_ind.MORE_flag) || retval)
				*flags |= T_MORE;
			if ((pptr->data_ind.MORE_flag) && retval)
				tiptr->ti_flags |= MORE;
			/*
			 * No real state change on T_RCV event (noop)
			 *
			 * We invoke the macro only for error logging
			 * part of its capabilities when in a bad state.
			 */
			_T_TX_NEXTSTATE(T_RCV, tiptr,
					"t_rcvv: invalid state event T_RCV");
			if (didalloc)
				free(ctlbuf.buf);
			else
				tiptr->ti_ctlbuf = ctlbuf.buf;
			_t_scatter(&databuf, tiov, tiovcount);
			if (dataptr != NULL)
				free(dataptr);
			sig_mutex_unlock(&tiptr->ti_lock);
			return (databuf.len);

		case T_ORDREL_IND:
			if (tiptr->ti_lookcnt > 0) {
				/*
				 * ti_lookcnt > 0 implies T_ORDREL_IND
				 * received after T_DISCON_IND or
				 * another T_ORDREL_IND hence error.
				 */
				t_errno = TSYSERR;
				errno = EPROTO;
				goto err_out;
			}
			/* FALLTHROUGH */
		case T_DISCON_IND:
			/*
			 * Post event (T_ORDREL_IND/T_DISCON_IND) to
			 * the lookbuffer list.
			 */

			if (_t_register_lookevent(tiptr, databuf.buf,
					databuf.len,
					ctlbuf.buf, ctlbuf.len) < 0) {
				t_errno = TSYSERR;
				errno = ENOMEM;
				goto err_out;
			}
			/*
			 * We know that T_DISCON_IND is stored in
			 * last look buffer. If there is more data
			 * that follows, we try to append it to
			 * the same look buffer
			 */
			if (retval & MOREDATA) {
				ctlbuf.maxlen = 0; /* XXX why ? */
				ctlbuf.len = 0;

				/*
				 * XXX Will break (-ve maxlen) for
				 * transport provider with unbounded
				 * T_DISCON_IND data part (-1).
				 */
				databuf.maxlen =
					tiptr->ti_rcvsize - databuf.len;

				databuf.len = 0;
				databuf.buf =
					tiptr->ti_lookbufs.tl_lookdbuf +
					tiptr->ti_lookbufs.tl_lookdlen;
				*flags = 0;

				/*
				 * Since MOREDATA was set, we assume
				 * that this getmsg will not block
				 * indefinitely
				 */
				do {
					retval = getmsg(fd, &ctlbuf,
							&databuf, &flg);
				} while (retval < 0 && errno == EINTR);

				if (retval < 0) {
					t_errno = TSYSERR;
					goto err_out;
				}
				if (databuf.len == -1) databuf.len = 0;
				if (retval > 0) {
					/* MORECTL should not be on */
					assert((retval & MORECTL) == 0);
					/*
					 * XXX - Why ?
					 * No support for unbounded data
					 * on T_DISCON_IND ?
					 */
					t_errno = TSYSERR;
					errno = EPROTO;
					goto err_out;
				}
				tiptr->ti_lookbufs.tl_lookdlen +=
					databuf.len;
			}

			t_errno = TLOOK;
			goto err_out;

		default:
			break;
		}

		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;

	} else {		/* else for "if (ctlbuf.len > 0)" */
		if (!retval && (tiptr->ti_flags & MORE)) {
			*flags |= T_MORE;
			tiptr->ti_flags &= ~MORE;
		}
		if (retval & MOREDATA)
			*flags |= T_MORE;

		/*
		 * If inside an ETSDU, set expedited flag and turn
		 * of internal version when reach end of "ETIDU".
		 */
		if (tiptr->ti_flags & EXPEDITED) {
			*flags |= T_EXPEDITED;
			if (!retval)
				tiptr->ti_flags &= ~EXPEDITED;
		}

		/*
		 * No real state change on T_RCV events (It is a NOOP)
		 *
		 * We invoke the macro only for error logging
		 * part of its capabilities when in a bad state.
		 */
		_T_TX_NEXTSTATE(T_RCV, tiptr,
			"t_rcvv: state invalid T_RCV event");
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		_t_scatter(&databuf, tiov, tiovcount);
		if (dataptr != NULL)
			free(dataptr);
		sig_mutex_unlock(&tiptr->ti_lock);
		return (databuf.len);
	}
	/* NOTREACHED */

err_out:
	sv_errno = errno;
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	if (dataptr != NULL)
		free(dataptr);
	sig_mutex_unlock(&tiptr->ti_lock);

	errno = sv_errno;
	return (-1);
}
Beispiel #16
0
int
_tx_open(const char *path, int flags, struct t_info *info, int api_semantics)
{
	int retval, fd, sv_errno;
	int sv_terrno;
	int sv_errno_global;
	struct _ti_user *tiptr;
	sigset_t mask;
	int t_create_first_attempt = 1;
	int ticap_ioctl_failed = 0;

	if (!(flags & O_RDWR)) {
		t_errno = TBADFLAG;
		return (-1);
	}

	sv_errno_global = errno;
	sv_terrno = t_errno;

retry:
	if ((fd = open(path, flags)) < 0) {
		t_errno = TSYSERR;
		if (_T_IS_XTI(api_semantics) && errno == ENOENT)
			/* XTI only */
			t_errno = TBADNAME;
		return (-1);
	}
	/*
	 * is module already pushed
	 */
	do {
		retval = ioctl(fd, I_FIND, "timod");
	} while (retval < 0 && errno == EINTR);

	if (retval < 0) {
		sv_errno = errno;

		t_errno = TSYSERR;
		(void) close(fd);
		errno = sv_errno;
		return (-1);
	}

	if (retval == 0) {
		/*
		 * "timod" not already on stream, then push it
		 */
		do {
			/*
			 * Assumes (correctly) that I_PUSH  is
			 * atomic w.r.t signals (EINTR error)
			 */
			retval = ioctl(fd, I_PUSH, "timod");
		} while (retval < 0 && errno == EINTR);

		if (retval < 0) {
			int sv_errno = errno;

			t_errno = TSYSERR;
			(void) close(fd);
			errno = sv_errno;
			return (-1);
		}
	}

	/*
	 * _t_create() requires that all signals be blocked.
	 * Note that sig_mutex_lock() only defers signals, it does not
	 * block them, so interruptible syscalls could still get EINTR.
	 */
	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
	sig_mutex_lock(&_ti_userlock);
	/*
	 * Call to _t_create may fail either because transport doesn't
	 * understand T_CAPABILITY_REQ or for some other reason. It is nearly
	 * impossible to distinguish between these cases so it is implicitly
	 * assumed that it is always save to close and reopen the same stream
	 * and that open/close doesn't have side effects. _t_create may fail
	 * only once if its' failure is caused by unimplemented
	 * T_CAPABILITY_REQ.
	 */
	tiptr = _t_create(fd, info, api_semantics, &ticap_ioctl_failed);
	if (tiptr == NULL) {
		/*
		 * If _t_create failed due to fail of ti_capability_req we may
		 * try to reopen the stream in the hope that timod will emulate
		 * TI_CAPABILITY and it will succeed when called again.
		 */
		if (t_create_first_attempt == 1 && ticap_ioctl_failed == 1) {
			t_create_first_attempt = 0;
			(void) close(fd);
			errno = sv_errno_global;
			t_errno = sv_terrno;
			sig_mutex_unlock(&_ti_userlock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			goto retry;
		} else {
			int sv_errno = errno;
			(void) close(fd);
			sig_mutex_unlock(&_ti_userlock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			errno = sv_errno;
			return (-1);
		}
	}

	/*
	 * _t_create synchronizes state witk kernel timod and
	 * already sets it to T_UNBND - what it needs to be
	 * be on T_OPEN event. No _T_TX_NEXTSTATE needed here.
	 */
	sig_mutex_unlock(&_ti_userlock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);

	do {
		retval = ioctl(fd, I_FLUSH, FLUSHRW);
	} while (retval < 0 && errno == EINTR);

	/*
	 * We ignore other error cases (retval < 0) - assumption is
	 * that I_FLUSH failures is temporary (e.g. ENOSR) or
	 * otherwise benign failure on a this newly opened file
	 * descriptor and not a critical failure.
	 */

	return (fd);
}
Beispiel #17
0
int
_tx_rcvdis(int fd, struct t_discon *discon, int api_semantics)
{
	struct strbuf ctlbuf;
	struct strbuf databuf;
	int retval;
	union T_primitives *pptr;
	struct _ti_user *tiptr;
	int sv_errno;
	int flg = 0;
	int didalloc, didralloc;
	int use_lookbufs = 0;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	/*
	 * Acquire per thread lock.
	 * Note: Lock is held across most of this routine
	 * including the blocking getmsg() call. This is fine
	 * because it is first verfied that an event is pending
	 */
	sig_mutex_lock(&tiptr->ti_lock);

	if (tiptr->ti_servtype == T_CLTS) {
		t_errno = TNOTSUPPORT;
		sig_mutex_unlock(&tiptr->ti_lock);
		return (-1);
	}

	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (!(tiptr->ti_state == T_DATAXFER ||
		    tiptr->ti_state == T_OUTCON ||
		    tiptr->ti_state == T_OUTREL ||
		    tiptr->ti_state == T_INREL ||
		    (tiptr->ti_state == T_INCON && tiptr->ti_ocnt > 0))) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}
	}
	/*
	 * Handle likely scenario as special case:
	 * Is there a discon in look buffer as the first
	 * event in the lookbuffer, is so just get it.
	 */
	if ((tiptr->ti_lookcnt > 0) &&
	    /* LINTED pointer cast */
	    (*((t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf) == T_DISCON_IND)) {
		/*
		 * The T_DISCON_IND is already in the look buffer
		 */
		ctlbuf.len = tiptr->ti_lookbufs.tl_lookclen;
		ctlbuf.buf = tiptr->ti_lookbufs.tl_lookcbuf;
		/* Note: ctlbuf.maxlen not used in this case */

		databuf.len = tiptr->ti_lookbufs.tl_lookdlen;
		databuf.buf = tiptr->ti_lookbufs.tl_lookdbuf;
		/* Note databuf.maxlen not used in this case */

		use_lookbufs = 1;

	} else {

		if ((retval = _t_look_locked(fd, tiptr, 0,
		    api_semantics)) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		if (retval != T_DISCONNECT) {
			t_errno = TNODIS;
			sig_mutex_unlock(&tiptr->ti_lock);
			return (-1);
		}

		/*
		 * get disconnect off read queue.
		 * use ctl and rcv buffers
		 *
		 * Acquire ctlbuf for use in sending/receiving control part
		 * of the message.
		 */
		if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
			sv_errno = errno;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		/*
		 * Acquire databuf for use in sending/receiving data part
		 */
		if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
			sv_errno = errno;
			if (didalloc)
				free(ctlbuf.buf);
			else
				tiptr->ti_ctlbuf = ctlbuf.buf;
			sig_mutex_unlock(&tiptr->ti_lock);
			errno = sv_errno;
			return (-1);
		}

		/*
		 * Since we already verified that a disconnect event
		 * is present, we assume that this getmsg() cannot
		 * block indefinitely
		 */
		do {
			retval = getmsg(fd, &ctlbuf, &databuf, &flg);
		} while (retval < 0 && errno == EINTR);

		if (retval  < 0) {
			t_errno = TSYSERR;
			goto err_out;
		}
		if (databuf.len == -1) databuf.len = 0;

		/*
		 * did I get entire message?
		 */
		if (retval > 0) {
			t_errno = TSYSERR;
			errno = EIO;
			goto err_out;
		}
	}


	/* LINTED pointer cast */
	pptr = (union T_primitives *)ctlbuf.buf;

	if ((ctlbuf.len < (int)sizeof (struct T_discon_ind)) ||
	    (pptr->type != T_DISCON_IND)) {
		t_errno = TSYSERR;
		errno = EPROTO;
		goto err_out;
	}

	/*
	 * clear more and expedited flags
	 */
	tiptr->ti_flags &= ~(MORE | EXPEDITED);

	if (tiptr->ti_ocnt <= 0) {
		_T_TX_NEXTSTATE(T_RCVDIS1, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS1");
	} else {
		if (tiptr->ti_ocnt == 1) {
			_T_TX_NEXTSTATE(T_RCVDIS2, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS2");
		} else {
			_T_TX_NEXTSTATE(T_RCVDIS3, tiptr,
				"t_rcvdis: invalid state event T_RCVDIS3");
		}
		tiptr->ti_ocnt--;
		tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED;
	}

	if (discon != NULL) {
		if (_T_IS_TLI(api_semantics) || discon->udata.maxlen > 0) {
			if (databuf.len > (int)discon->udata.maxlen) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(discon->udata.buf, databuf.buf,
			    (size_t)databuf.len);
			discon->udata.len = databuf.len;
		}
		discon->reason = pptr->discon_ind.DISCON_reason;
		discon->sequence = pptr->discon_ind.SEQ_number;
	}
	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);

err_out:
	sv_errno = errno;

	if (use_lookbufs)
		_t_free_looklist_head(tiptr);
	else {
		if (didalloc)
			free(ctlbuf.buf);
		else
			tiptr->ti_ctlbuf = ctlbuf.buf;
		if (didralloc)
			free(databuf.buf);
		else
			tiptr->ti_rcvbuf = databuf.buf;
	}
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}
Beispiel #18
0
int
_tx_bind(
	int fd,
	const struct t_bind *req,
	struct t_bind *ret,
	int api_semantics
)
{
	struct T_bind_req *bind_reqp;
	struct T_bind_ack *bind_ackp;
	int size, sv_errno, retlen;
	struct _ti_user *tiptr;
	sigset_t mask;

	int didalloc;
	int use_xpg41tpi;
	struct strbuf ctlbuf;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == NULL)
		return (-1);

	/*
	 * We block all signals since TI_BIND, which sends a TPI message
	 * O_T_BIND_REQ down, is not an idempotetent operation
	 * Note that sig_mutex_lock() only defers signals, it does not
	 * block them, so interruptible syscalls could still get EINTR.
	 */
	(void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
	sig_mutex_lock(&tiptr->ti_lock);
	if (_T_IS_XTI(api_semantics)) {
		/*
		 * User level state verification only done for XTI
		 * because doing for TLI may break existing applications
		 */
		if (tiptr->ti_state != T_UNBND) {
			t_errno = TOUTSTATE;
			sig_mutex_unlock(&tiptr->ti_lock);
			(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
			return (-1);
		}
	}
	/*
	 * Acquire buffer for use in sending/receiving the message.
	 * Note: assumes (correctly) that ti_ctlsize is large enough
	 * to hold sizeof (struct T_bind_req/ack)
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
		errno = sv_errno;
		return (-1);
	}

	/* LINTED pointer cast */
	bind_reqp = (struct T_bind_req *)ctlbuf.buf;
	size = (int)sizeof (struct T_bind_req);

	use_xpg41tpi = (_T_IS_XTI(api_semantics)) &&
		((tiptr->ti_prov_flag & XPG4_1) != 0);
	if (use_xpg41tpi)
		/* XTI call and provider knows the XTI inspired TPI */
		bind_reqp->PRIM_type = T_BIND_REQ;
	else
		/* TLI caller old TPI provider */
		bind_reqp->PRIM_type = O_T_BIND_REQ;

	bind_reqp->ADDR_length = (req == NULL? 0: req->addr.len);
	bind_reqp->ADDR_offset = 0;
	bind_reqp->CONIND_number = (req == NULL? 0: req->qlen);


	if (bind_reqp->ADDR_length) {
		if (_t_aligned_copy(&ctlbuf, (int)bind_reqp->ADDR_length, size,
		    req->addr.buf, &bind_reqp->ADDR_offset) < 0) {
			/*
			 * Aligned copy will overflow buffer allocated based
			 * on transport maximum address length.
			 * return error.
			 */
			t_errno = TBADADDR;
			goto err_out;
		}
		size = bind_reqp->ADDR_offset + bind_reqp->ADDR_length;
	}

	if (_t_do_ioctl(fd, ctlbuf.buf, size, TI_BIND, &retlen) < 0) {
		goto err_out;
	}

	if (retlen < (int)sizeof (struct T_bind_ack)) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/* LINTED pointer cast */
	bind_ackp = (struct T_bind_ack *)ctlbuf.buf;

	if ((req != NULL) && req->addr.len != 0 &&
	    (use_xpg41tpi == 0) && (_T_IS_XTI(api_semantics))) {
		/*
		 * Best effort to do XTI on old TPI.
		 *
		 * Match address requested or unbind and fail with
		 * TADDRBUSY.
		 *
		 * XXX - Hack alert ! Should we do this at all ?
		 * Not "supported" as may not work if encoding of
		 * address is different in the returned address. This
		 * will also have trouble with TCP/UDP wildcard port
		 * requests
		 */
		if ((req->addr.len != bind_ackp->ADDR_length) ||
		    (memcmp(req->addr.buf, ctlbuf.buf +
		    bind_ackp->ADDR_offset, req->addr.len) != 0)) {
			(void) _tx_unbind_locked(fd, tiptr, &ctlbuf);
			t_errno = TADDRBUSY;
			goto err_out;
		}
	}

	tiptr->ti_ocnt = 0;
	tiptr->ti_flags &= ~TX_TQFULL_NOTIFIED;

	_T_TX_NEXTSTATE(T_BIND, tiptr, "t_bind: invalid state event T_BIND");

	if (ret != NULL) {
		if (_T_IS_TLI(api_semantics) || ret->addr.maxlen > 0) {
			if (TLEN_GT_NLEN(bind_reqp->ADDR_length,
			    ret->addr.maxlen)) {
				t_errno = TBUFOVFLW;
				goto err_out;
			}
			(void) memcpy(ret->addr.buf,
			    ctlbuf.buf + bind_ackp->ADDR_offset,
			    (size_t)bind_ackp->ADDR_length);
			ret->addr.len = bind_ackp->ADDR_length;
		}
		ret->qlen = bind_ackp->CONIND_number;
	}

	tiptr->ti_qlen = (uint_t)bind_ackp->CONIND_number;

	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
	return (0);
	/* NOTREACHED */
err_out:
	sv_errno = errno;
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	(void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
	errno = sv_errno;
	return (-1);
}
Beispiel #19
0
static void *
tpool_worker(void *arg)
{
	tpool_t *tpool = (tpool_t *)arg;
	int elapsed;
	tpool_job_t *job;
	void (*func)(void *);
	tpool_active_t active;

	sig_mutex_lock(&tpool->tp_mutex);
	pthread_cleanup_push(worker_cleanup, tpool);

	/*
	 * This is the worker's main loop.
	 * It will only be left if a timeout or an error has occured.
	 */
	active.tpa_tid = pthread_self();
	for (;;) {
		elapsed = 0;
		tpool->tp_idle++;
		if (tpool->tp_flags & TP_WAIT)
			notify_waiters(tpool);
		while ((tpool->tp_head == NULL ||
		    (tpool->tp_flags & TP_SUSPEND)) &&
		    !(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
			if (tpool->tp_current <= tpool->tp_minimum ||
			    tpool->tp_linger == 0) {
				(void) sig_cond_wait(&tpool->tp_workcv,
				    &tpool->tp_mutex);
			} else {
				timestruc_t timeout;

				timeout.tv_sec = tpool->tp_linger;
				timeout.tv_nsec = 0;
				if (sig_cond_reltimedwait(&tpool->tp_workcv,
				    &tpool->tp_mutex, &timeout) != 0) {
					elapsed = 1;
					break;
				}
			}
		}
		tpool->tp_idle--;
		if (tpool->tp_flags & TP_DESTROY)
			break;
		if (tpool->tp_flags & TP_ABANDON) {
			/* can't abandon a suspended pool */
			if (tpool->tp_flags & TP_SUSPEND) {
				tpool->tp_flags &= ~TP_SUSPEND;
				(void) cond_broadcast(&tpool->tp_workcv);
			}
			if (tpool->tp_head == NULL)
				break;
		}
		if ((job = tpool->tp_head) != NULL &&
		    !(tpool->tp_flags & TP_SUSPEND)) {
			elapsed = 0;
			func = job->tpj_func;
			arg = job->tpj_arg;
			tpool->tp_head = job->tpj_next;
			if (job == tpool->tp_tail)
				tpool->tp_tail = NULL;
			tpool->tp_njobs--;
			active.tpa_next = tpool->tp_active;
			tpool->tp_active = &active;
			sig_mutex_unlock(&tpool->tp_mutex);
			pthread_cleanup_push(job_cleanup, tpool);
			lfree(job, sizeof (*job));
			/*
			 * Call the specified function.
			 */
			func(arg);
			/*
			 * We don't know what this thread has been doing,
			 * so we reset its signal mask and cancellation
			 * state back to the initial values.
			 */
			(void) pthread_sigmask(SIG_SETMASK, &maskset, NULL);
			(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED,
			    NULL);
			(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,
			    NULL);
			pthread_cleanup_pop(1);
		}
		if (elapsed && tpool->tp_current > tpool->tp_minimum) {
			/*
			 * We timed out and there is no work to be done
			 * and the number of workers exceeds the minimum.
			 * Exit now to reduce the size of the pool.
			 */
			break;
		}
	}
	pthread_cleanup_pop(1);
	return (arg);
}
Beispiel #20
0
int
_tx_getinfo(int fd, struct t_info *info, int api_semantics)
{
	struct T_info_req *inforeqp;
	struct T_info_ack *infoackp;
	int retlen;
	struct _ti_user *tiptr;
	int retval, sv_errno, didalloc;
	struct strbuf ctlbuf;

	if ((tiptr = _t_checkfd(fd, 0, api_semantics)) == 0)
		return (-1);
	sig_mutex_lock(&tiptr->ti_lock);

	/*
	 * Acquire buffer for use in sending/receiving the message.
	 * Note: assumes (correctly) that ti_ctlsize is large enough
	 * to hold sizeof (struct T_info_req/ack)
	 */
	if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0) {
		sv_errno = errno;
		sig_mutex_unlock(&tiptr->ti_lock);
		errno = sv_errno;
		return (-1);
	}

	/* LINTED pointer cast */
	inforeqp =  (struct T_info_req *)ctlbuf.buf;
	inforeqp->PRIM_type = T_INFO_REQ;

	do {
		retval = _t_do_ioctl(fd, ctlbuf.buf,
			(int)sizeof (struct T_info_req), TI_GETINFO, &retlen);
	} while (retval < 0 && errno == EINTR);

	if (retval < 0)
		goto err_out;

	if (retlen != (int)sizeof (struct T_info_ack)) {
		t_errno = TSYSERR;
		errno = EIO;
		goto err_out;
	}

	/* LINTED pointer cast */
	infoackp = (struct T_info_ack *)ctlbuf.buf;

	info->addr = infoackp->ADDR_size;
	info->options = infoackp->OPT_size;
	info->tsdu = infoackp->TSDU_size;
	info->etsdu = infoackp->ETSDU_size;
	info->connect = infoackp->CDATA_size;
	info->discon = infoackp->DDATA_size;
	info->servtype = infoackp->SERV_type;

	if (_T_IS_XTI(api_semantics)) {
		/* XTI ONLY - TLI t_info struct does not have "flags" */
		info->flags = 0;
		if (infoackp->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
			info->flags |= T_SENDZERO;
	}
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	return (0);

err_out:
	sv_errno = errno;
	if (didalloc)
		free(ctlbuf.buf);
	else
		tiptr->ti_ctlbuf = ctlbuf.buf;
	sig_mutex_unlock(&tiptr->ti_lock);
	errno = sv_errno;
	return (-1);
}