Exemple #1
0
/* 
 *  PUTP and SRVP routines
 */
static streamscall void
pckt_enable(long arg)
{
	queue_t *q = (queue_t *) arg;
	bcid_t bc;
	struct pckt *p;

	if ((p = PCKT_PRIV(q)) && (bc = xchg(&p->bufcall, 0))) {
		qenable(q);
		enableok(q);
	}
}
Exemple #2
0
STATIC void streamscall
spm_r_timeout(caddr_t data)
{
	queue_t *q = (queue_t *) data;
	spm_t *s = PRIV(q);

	if (!xchg(&s->rtim, 0))
		return;
	enableok(q);
	qenable(q);
	return;
}
Exemple #3
0
/************************************************************************
 *
 * Function Name: ip2xinet_lwsrv
 * Title: IP2XINET Lower Write Service routine
 *
 * Description:
 *      Send all of the messages on this queue down to the next driver.
 *      If we discover that we can't do a put, then stop the Linux
 *      devices from sending us stuff.
 *
 ************************************************************************/
STATIC streamscall int
ip2xinet_lwsrv(queue_t *q)
{
	mblk_t *mp;
	int allsent = 1;
	int i;
	struct ip2xinet_dev *dev = ip2xinet_devs;
	struct ip2xinet_priv *privp;

	while ((mp = getq(q))) {
		/* M_PROTO's should be last on the list.  If it is something else, then it should
		   be ahead, and we can just go ahead and put it down. */
		if (mp->b_datap->db_type == M_PROTO) {
			if (canputnext(q)) {
				putnext(q, mp);
			} else {
				noenable(q);
				if (!putbq(q, mp))
					freemsg(mp);	/* FIXME */
				enableok(q);
				allsent = 0;
				break;
			}
		} else {
			putnext(q, mp);
		}
	}

	/* Handle the flow control.  If we were able to send everything then it is ok for the
	   kernel to send us more stuff.  Otherwise it is not ok.  Go through all of the devices
	   and set the appropriate state. */
	spin_lock(&ip2xinet_lock);
	for (i = 0; i < NUMIP2XINET; i++, dev++) {
		privp = &dev->priv;
		if (privp->state == 1 && ip2xinet_status.ip2x_dlstate == DL_IDLE) {
			if (allsent) {
				netif_start_queue(&dev->dev);	/* kernel can transmit */
			} else {
				netif_stop_queue(&dev->dev);	/* We are flow controlled. */
			}
		}
	}
	spin_unlock(&ip2xinet_lock);

	return (0);
}
Exemple #4
0
int main(){
    int flag = 0;
    t_mb *mb;
    long width, height;

    struct t_gpio gp;
    gpioinit(gp);
    enableok(gp);
    uartinit(gp);
    struct t_mb mb;
    mailboxinit(gp);

    //mb->data = *($1000);

    mb->data[0] = 8*4;
    mb->data[1] = 0;
    mb->data[2] = $40003;
    mb->data[3] = 8;
    mb->data[4] = 0;
    mb->data[5] = 0;
    mb->data[6] = 0;
    mb->data[7] = 0;

    mailboxwrite(mb, 8, $1000);

    width = mb->data[5];
    height = mb->data[6];

    okon(gp);

    /*frameBufferinit();
    frameBufferfill();

    while(flag)
    {
        uart_puts("resolution");
        uart_puts(to_hex(width));
        uart_puts("|");
        uart_puts(to_hex(height));
        uart_puts("end resolution");

        frameBufferdebug();
    }*/

    return 0;
}
Exemple #5
0
static void
telmod_buffer(void *arg)
{
	queue_t *q = arg;
	struct	telmod_info	*tmip = (struct telmod_info *)q->q_ptr;

	ASSERT(tmip);

	if (q->q_flag & QREADR) {
		ASSERT(tmip->rbufcid);
		tmip->rbufcid = 0;
	} else {
		ASSERT(tmip->wbufcid);
		tmip->wbufcid = 0;
	}
	enableok(q);
	qenable(q);
}
static void
mouse8042_reset_timeout(void *argp)
{
	struct mouse_state *state = (struct mouse_state *)argp;
	mblk_t *mp;

	mutex_enter(&state->reset_mutex);

	/*
	 * If the interrupt handler hasn't completed the reset handling
	 * (reset_state would be IDLE or FAILED in that case), then
	 * drop the 8042 lock, and send a faked retry reply upstream,
	 * then enable the queue for further message processing.
	 */
	if (state->reset_state != MSE_RESET_IDLE &&
	    state->reset_state != MSE_RESET_FAILED) {

		state->reset_tid = 0;
		state->reset_state = MSE_RESET_IDLE;
		cv_signal(&state->reset_cv);

		(void) ddi_get8(state->ms_handle, state->ms_addr +
		    I8042_UNLOCK);

		mp = state->reply_mp;
		*mp->b_wptr++ = MSERESEND;
		state->reply_mp = NULL;

		if (state->ms_rqp != NULL)
			putnext(state->ms_rqp, mp);
		else
			freemsg(mp);

		ASSERT(state->ms_wqp != NULL);

		enableok(state->ms_wqp);
		qenable(state->ms_wqp);
	}

	mutex_exit(&state->reset_mutex);
}
Exemple #7
0
/*
 * telmodwput:
 * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon
 * to process something.  M_CTL's are data from the daemon bound for the
 * network.  We forward them immediately.  There are two classes of ioctl's
 * we must handle here also.  One is ioctl's forwarded by ptem which we
 * ignore.  The other is ioctl's issued by the daemon to control us.
 * Process them appropriately.  M_PROTO's we pass along, figuring they are
 * are TPI operations for TCP.  M_FLUSH requires careful processing, since
 * telnet cannot tolerate flushing its protocol requests.  Also the flushes
 * can be running either daemon<->TCP or application<->telmod.  We must
 * carefully deal with this.
 */
static void
telmodwput(
	queue_t *q,	/* Pointer to the read queue */
	mblk_t *mp)	/* Pointer to current message block */
{
	struct telmod_info	*tmip;
	struct iocblk *ioc;
	mblk_t *savemp;
	int rw;
	int error;

	tmip = (struct telmod_info *)q->q_ptr;

	switch (mp->b_datap->db_type) {
	case M_DATA:
		if (!canputnext(q) || (tmip->flags & TEL_STOPPED) ||
			(q->q_first)) {
			noenable(q);
			(void) putq(q, mp);
			break;
		}
		/*
		 * This routine parses data generating from ptm side.
		 * Insert a null character if carraige return
		 * is not followed by line feed unless we are in binary mode.
		 * Also, duplicate IAC if found in the data.
		 */
		(void) snd_parse(q, mp);
		break;

	case M_CTL:
		if (((mp->b_wptr - mp->b_rptr) == 1) &&
			(*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) {
			savemp = mp->b_cont;
			freeb(mp);
			mp = savemp;
		}
		putnext(q, mp);
		break;

	case M_IOCTL:
		ioc = (struct iocblk *)mp->b_rptr;
		switch (ioc->ioc_cmd) {

		/*
		 * This ioctl is issued by user level daemon to
		 * request one more message block to process protocol
		 */
		case TEL_IOC_GETBLK:
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags |= TEL_GETBLK;
			qenable(RD(q));
			enableok(RD(q));

			miocack(q, mp, 0, 0);
			break;

		/*
		 * This ioctl is issued by user level daemon to reenable the
		 * read and write queues. This is issued during startup time
		 * after setting up the mux links and also after processing
		 * the protocol.  It is also issued after each time an
		 * an unrecognized telnet option is forwarded to the daemon.
		 */
		case TEL_IOC_ENABLE:

			/*
			 * Send negative ack if TEL_STOPPED flag is not set
			 */
			if (!(tmip->flags & TEL_STOPPED)) {
				miocnak(q, mp, 0, EINVAL);
				break;
			}
			tmip->flags &= ~TEL_STOPPED;
			if (mp->b_cont) {
				(void) putbq(RD(q), mp->b_cont);
				mp->b_cont = 0;
			}

			qenable(RD(q));
			enableok(RD(q));
			qenable(q);
			enableok(q);

			miocack(q, mp, 0, 0);
			break;

		/*
		 * Set binary/normal mode for input and output
		 * according to the instructions from the daemon.
		 */
		case TEL_IOC_MODE:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			tmip->flags |= *(mp->b_cont->b_rptr) &
			    (TEL_BINARY_IN|TEL_BINARY_OUT);
			miocack(q, mp, 0, 0);
			break;

#ifdef DEBUG
		case TCSETAF:
		case TCSETSF:
		case TCSETA:
		case TCSETAW:
		case TCSETS:
		case TCSETSW:
		case TCSBRK:
		case TIOCSTI:
		case TIOCSWINSZ:
			miocnak(q, mp, 0, EINVAL);
			break;
#endif
		case CRYPTPASSTHRU:
			error = miocpullup(mp, sizeof (uchar_t));
			if (error != 0) {
				miocnak(q, mp, 0, error);
				break;
			}
			if (*(mp->b_cont->b_rptr) == 0x01)
				tmip->flags |= TEL_IOCPASSTHRU;
			else
				tmip->flags &= ~TEL_IOCPASSTHRU;

			miocack(q, mp, 0, 0);
			break;

		default:
			if (tmip->flags & TEL_IOCPASSTHRU) {
				putnext(q, mp);
			} else {
#ifdef DEBUG
				cmn_err(CE_NOTE,
				"telmodwput: unexpected ioctl type 0x%x",
					ioc->ioc_cmd);
#endif
				miocnak(q, mp, 0, EINVAL);
			}
			break;
		}
		break;

	case M_FLUSH:
		/*
		 * Flushing is tricky:  We try to flush all we can, but certain
		 * data cannot be flushed.  Telnet protocol sequences cannot
		 * be flushed.  So, TCP's queues cannot be flushed since we
		 * cannot tell what might be telnet protocol data.  Then we
		 * must take care to create and forward out-of-band data
		 * indicating the flush to the far side.
		 */
		rw = *mp->b_rptr;
		if (rw & FLUSHR) {
			/*
			 * We cannot flush our read queue, since there may
			 * be telnet protocol bits in the queue, awaiting
			 * processing.  However, once it leaves this module
			 * it's guaranteed that all protocol data is in
			 * M_CTL, so we do flush read data beyond us, expecting
			 * them (actually logindmux) to do FLUSHDATAs also.
			 */
			*mp->b_rptr = rw & ~FLUSHW;
			qreply(q, mp);
		} else {
			freemsg(mp);
		}
		if (rw & FLUSHW) {
			/*
			 * Since all telnet protocol data comes from the
			 * daemon, stored as M_CTL messages, flushq will
			 * do exactly what's needed:  Flush bytes which do
			 * not have telnet protocol data.
			 */
			flushq(q, FLUSHDATA);
		}
		break;

	case M_PCPROTO:
		putnext(q, mp);
		break;

	case M_PROTO:
		/* We may receive T_DISCON_REQ from the mux */
		if (!canputnext(q) || q->q_first != NULL)
			(void) putq(q, mp);
		else
			putnext(q, mp);
		break;

	default:
#ifdef DEBUG
		cmn_err(CE_NOTE,
		    "telmodwput: unexpected msg type 0x%x",
		    mp->b_datap->db_type);
#endif
		freemsg(mp);
		break;
	}
}
Exemple #8
0
/*
 * cvc_wsrv()
 *	cvc_wsrv handles mblks that have been queued by cvc_wput either because
 *	the IOSRAM path was selected or the queue contained preceding mblks.  To
 *	optimize processing (particularly if the IOSRAM path is selected), all
 *	mblks are pulled off of the queue and chained together.  Then, if there
 *	are any mblks on the chain, they are either forwarded to cvcredir or
 *	sent for IOSRAM processing as appropriate given current circumstances.
 *	IOSRAM processing may not be able to handle all of the data in the
 *	chain, in which case the remaining data is placed back on the queue and
 *	a timeout routine is registered to reschedule cvc_wsrv in the future.
 *	Automatic scheduling of the queue is disabled (noenable(q)) while
 *	cvc_wsrv is running to avoid superfluous calls.
 */
static int
cvc_wsrv(queue_t *q)
{
	mblk_t *total_mp = NULL;
	mblk_t *mp;

	if (cvc_stopped == 1 || cvc_suspended == 1) {
		return (0);
	}

	rw_enter(&cvclock, RW_READER);
	noenable(q);

	/*
	 * If there's already a timeout registered for scheduling this routine
	 * in the future, it's a safe bet that we don't want to run right now.
	 */
	if (cvc_timeout_id != (timeout_id_t)-1) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Start by linking all of the queued M_DATA mblks into a single chain
	 * so we can flush as much as possible to IOSRAM (if we choose that
	 * route).
	 */
	while ((mp = getq(q)) != NULL) {
		/*
		 * Technically, certain IOCTLs are supposed to be processed only
		 * after all preceding data has completely "drained".  In an
		 * attempt to support that, we delay processing of those IOCTLs
		 * until this point.  It is still possible that an IOCTL will be
		 * processed before all preceding data is drained, for instance
		 * in the case where not all of the preceding data would fit
		 * into IOSRAM and we have to place it back on the queue.
		 * However, since none of these IOCTLs really appear to have any
		 * relevance for cvc, and we weren't supporting delayed
		 * processing at _all_ previously, this partial implementation
		 * should suffice.  (Fully implementing the delayed IOCTL
		 * processing would be unjustifiably difficult given the nature
		 * of the underlying IOSRAM console protocol.)
		 */
		if (mp->b_datap->db_type == M_IOCTL) {
			cvc_ioctl(q, mp);
			continue;
		}

		/*
		 * We know that only M_IOCTL and M_DATA blocks are placed on our
		 * queue.  Since this block isn't an M_IOCTL, it must be M_DATA.
		 */
		if (total_mp != NULL) {
			linkb(total_mp, mp);
		} else {
			total_mp = mp;
		}
	}

	/*
	 * Do we actually have anything to do?
	 */
	if (total_mp == NULL) {
		enableok(q);
		rw_exit(&cvclock);
		return (0);
	}

	/*
	 * Yes, we do, so send the data to either cvcredir or IOSRAM as
	 * appropriate.  In the latter case, we might not be able to transmit
	 * everything right now, so re-queue the remainder.
	 */
	if (cvcoutput_q != NULL && !via_iosram) {
		CVC_DBG0(CVC_DBG_NETWORK_WR, "Sending to cvcredir.");
		/*
		 * XXX - should canputnext be called here?  Starfire's cvc
		 * doesn't do that, and it appears to work anyway.
		 */
		(void) putnext(cvcoutput_q, total_mp);
	} else {
		CVC_DBG0(CVC_DBG_IOSRAM_WR, "Send to IOSRAM.");
		cvc_send_to_iosram(&total_mp);
		if (total_mp != NULL) {
			(void) putbq(q, total_mp);
		}
	}

	/*
	 * If there is still data queued at this point, make sure the queue
	 * gets scheduled again after an appropriate delay (which has been
	 * somewhat arbitrarily selected as half of the SC's input polling
	 * frequency).
	 */
	enableok(q);
	if (q->q_first != NULL) {
		if (cvc_timeout_id == (timeout_id_t)-1) {
			cvc_timeout_id = timeout(cvc_flush_queue,
			    NULL, drv_usectohz(CVC_IOSRAM_POLL_USECS / 2));
		}
	}
	rw_exit(&cvclock);
	return (0);
}
Exemple #9
0
static streamscall __hot_put int
ptem_wput(queue_t *q, mblk_t *mp)
{
	struct ptem *p = PTEM_PRIV(q);

	/* fast path */
	if (likely(mp->b_datap->db_type == M_DATA)) {
	      m_data:
		/* free zero-length messages */
		if (msgdsize(mp) != 0) {
			if ((p->flags & PTEM_OUTPUT_STOPPED)
			    || (q->q_first != NULL)
			    || (q->q_flag & QSVCBUSY)
			    || (!bcanputnext(q, mp->b_band))) {
				/* Note, the only reason for failinng putq() is the lack of a queue 
				   band, in which case the band is empty and no loss of order will
				   result from putting it to the next queue. */
				if (putq(q, mp))
					return (0);
			}
			putnext(q, mp);
			return (0);
		}
		freemsg(mp);
		return (0);
	}

	switch (mp->b_datap->db_type) {
	case M_DATA:
		goto m_data;
	case M_IOCTL:
	{
		struct iocblk *ioc = (struct iocblk *) mp->b_rptr;

		/* The Stream head is set to recognized all transparent terminal input-output
		   controls and pass them downstream as though they were I_STR input-output
		   controls.  There is also the opportunity to register input-output controls with
		   the Stream head using the TIOC_REPLY message. */
		if (unlikely(ioc->ioc_count == TRANSPARENT))
			goto do_it;

		switch (ioc->ioc_cmd) {
		case TCSETAW:
		case TCSETAF:
		case TCSETSW:
		case TCSETSF:
		case TCSBRK:
			/* These need to wait for the output to drain before being processed, queue 
			   them. */
			putq(q, mp);
			break;
		default:
			/* Process others immediately, regardless of whether there is any data or
			   other messages in queue. */
			goto do_it;
		}
		break;
	}
	case M_DELAY:
	case M_READ:
		freemsg(mp);
		break;
	case M_STOP:
		if (canenable(q)) {
			noenable(q);
			p->flags |= PTEM_OUTPUT_STOPPED;
		}
		putnext(q, mp);
		break;
	case M_START:
		if (!canenable(q)) {
			p->flags &= ~PTEM_OUTPUT_STOPPED;
			enableok(q);
			qenable(q);
		}
		putnext(q, mp);
		break;
	case M_STOPI:
	case M_STARTI:
		/* We have no read side queue so we cannot queue in this direction.  Tell master so 
		   that pckt(4) can tell master not to send anything more. */
		putnext(q, mp);
		break;
	default:
	      do_it:
		if (ptem_w_msg(q, mp) && !putq(q, mp))
			freemsg(mp);
		break;
	}
	return (0);
}
static uint_t
mouse8042_intr(caddr_t arg)
{
	unsigned char    mdata;
	mblk_t *mp;
	struct mouse_state *state = (struct mouse_state *)arg;
	int rc;

	mutex_enter(&state->ms_mutex);

	rc = DDI_INTR_UNCLAIMED;

	for (;;) {

		if (ddi_get8(state->ms_handle,
		    state->ms_addr + I8042_INT_INPUT_AVAIL) == 0) {
			break;
		}

		mdata = ddi_get8(state->ms_handle,
		    state->ms_addr + I8042_INT_INPUT_DATA);

		rc = DDI_INTR_CLAIMED;

		/*
		 * If we're not ready for this data, discard it.
		 */
		if (!state->ready)
			continue;

		mutex_enter(&state->reset_mutex);
		if (state->reset_state != MSE_RESET_IDLE) {

			if (mdata == MSEERROR || mdata == MSERESET) {
				state->reset_state = MSE_RESET_FAILED;
			} else {
				state->reset_state =
				    mouse8042_reset_fsm(state->reset_state,
				    mdata);
			}

			if (state->reset_state == MSE_RESET_ACK) {

			/*
			 * We received an ACK from the mouse, so
			 * send it upstream immediately so that
			 * consumers depending on the immediate
			 * ACK don't time out.
			 */
				if (state->reset_ack_mp != NULL) {

					mp = state->reset_ack_mp;

					state->reset_ack_mp = NULL;

					if (state->ms_rqp != NULL) {
						*mp->b_wptr++ = MSE_ACK;
						putnext(state->ms_rqp, mp);
					} else
						freemsg(mp);
				}

				if (state->ms_wqp != NULL) {
					enableok(state->ms_wqp);
					qenable(state->ms_wqp);
				}

			} else if (state->reset_state == MSE_RESET_IDLE ||
			    state->reset_state == MSE_RESET_FAILED) {

			/*
			 * If we transitioned back to the idle reset state (or
			 * the reset failed), disable the timeout, release the
			 * 8042 exclusive-access lock, then send the response
			 * the the upper-level modules. Finally, enable the
			 * queue and schedule queue service procedures so that
			 * upper-level modules can process the response.
			 * Otherwise, if we're still in the middle of the
			 * reset sequence, do not send the data up (since the
			 * response is sent at the end of the sequence, or
			 * on timeout/error).
			 */

				mutex_exit(&state->reset_mutex);
				(void) quntimeout(state->ms_wqp,
				    state->reset_tid);
				mutex_enter(&state->reset_mutex);

				(void) ddi_get8(state->ms_handle,
				    state->ms_addr + I8042_UNLOCK);

				state->reset_tid = 0;
				if (state->reply_mp != NULL) {
					mp = state->reply_mp;
					if (state->reset_state ==
					    MSE_RESET_FAILED) {
						*mp->b_wptr++ = mdata;
					} else {
						*mp->b_wptr++ = MSE_AA;
						*mp->b_wptr++ = MSE_00;
					}
					state->reply_mp = NULL;
				} else {
					mp = NULL;
				}

				state->reset_state = MSE_RESET_IDLE;
				cv_signal(&state->reset_cv);

				if (mp != NULL) {
					if (state->ms_rqp != NULL)
						putnext(state->ms_rqp, mp);
					else
						freemsg(mp);
				}

				if (state->ms_wqp != NULL) {
					enableok(state->ms_wqp);
					qenable(state->ms_wqp);
				}
			}

			mutex_exit(&state->reset_mutex);
			mutex_exit(&state->ms_mutex);
			return (rc);
		}
		mutex_exit(&state->reset_mutex);

		if (state->ms_rqp != NULL && (mp = allocb(1, BPRI_MED))) {
			*mp->b_wptr++ = mdata;
			putnext(state->ms_rqp, mp);
		}
	}
	mutex_exit(&state->ms_mutex);

	return (rc);
}