/* * vml_ioctl() - * Handle the M_IOCTL message associated with a request * to validate a module list. */ static void vml_ioctl( queue_t *qp, /* pointer to write queue */ mblk_t *mp) /* message pointer */ { struct iocblk *iocp; iocp = (struct iocblk *)mp->b_rptr; if (iocp->ioc_count != TRANSPARENT) { miocnak(qp, mp, 0, EINVAL); return; } ASSERT(iocp->ioc_cmd == SAD_VML); mcopyin(mp, (void *)GETSTRUCT, SIZEOF_STRUCT(str_list, iocp->ioc_flag), NULL); qreply(qp, mp); }
/* * This routine is symmetric for master and slave, so it handles both without * splitting up the codepath. * * If there are messages on this queue that can be sent to the other, send * them via putnext(). Else, if queued messages cannot be sent, leave them * on this queue. */ static void zc_wsrv(queue_t *qp) { mblk_t *mp; DBG1("zc_wsrv master (%s) side", zc_side(qp)); /* * Partner has no read queue, so take the data, and throw it away. */ if (zc_switch(RD(qp)) == NULL) { DBG("zc_wsrv: other side isn't listening"); while ((mp = getq(qp)) != NULL) { if (mp->b_datap->db_type == M_IOCTL) miocnak(qp, mp, 0, 0); else freemsg(mp); } flushq(qp, FLUSHALL); return; } /* * while there are messages on this write queue... */ while ((mp = getq(qp)) != NULL) { /* * Due to the way zc_wput is implemented, we should never * see a control message here. */ ASSERT(mp->b_datap->db_type < QPCTL); if (bcanputnext(RD(zc_switch(qp)), mp->b_band)) { DBG("wsrv: send message to other side\n"); putnext(RD(zc_switch(qp)), mp); } else { DBG("wsrv: putting msg back on queue\n"); (void) putbq(qp, mp); break; } } }
/* * cvc_ioctl() * handle normal console ioctls. */ static void cvc_ioctl(register queue_t *q, register mblk_t *mp) { register cvc_t *cp = q->q_ptr; int datasize; int error = 0; /* * Let ttycommon_ioctl take the first shot at processing the ioctl. If * it fails because it can't allocate memory, schedule processing of the * ioctl later when a proper buffer is available. The mblk that * couldn't be processed will have been stored in the tty structure by * ttycommon_ioctl. */ datasize = ttycommon_ioctl(&cp->cvc_tty, q, mp, &error); if (datasize != 0) { if (cp->cvc_wbufcid) { unbufcall(cp->cvc_wbufcid); } cp->cvc_wbufcid = bufcall(datasize, BPRI_HI, cvc_reioctl, cp); return; } /* * ttycommon_ioctl didn't do anything, but there's nothing we really * support either with the exception of TCSBRK, which is supported * only to appear a bit more like a serial device for software that * expects TCSBRK to work. */ if (error != 0) { struct iocblk *iocp = (struct iocblk *)mp->b_rptr; if (iocp->ioc_cmd == TCSBRK) { miocack(q, mp, 0, 0); } else { miocnak(q, mp, 0, EINVAL); } } else { qreply(q, mp); } }
/* * set the q_ptr of the 'q' to the conn_t pointer passed in */ static void ip_helper_share_conn(queue_t *q, mblk_t *mp, cred_t *crp) { conn_t *connp = *((conn_t **)mp->b_cont->b_rptr); /* * This operation is allowed only on helper streams with kcred */ if (kcred != crp || msgdsize(mp->b_cont) != sizeof (void *)) { miocnak(q, mp, 0, EINVAL); return; } connp->conn_helper_info->iphs_minfo = q->q_ptr; connp->conn_helper_info->iphs_rq = RD(q); connp->conn_helper_info->iphs_wq = WR(q); WR(q)->q_ptr = RD(q)->q_ptr = (void *)connp; connp->conn_rq = RD(q); connp->conn_wq = WR(q); miocack(q, mp, 0, 0); }
/* * ptemrput - Module read queue put procedure. * * This is called from the module or driver downstream. */ static void ptemrput(queue_t *q, mblk_t *mp) { struct iocblk *iocp; /* M_IOCTL data */ struct copyresp *resp; /* transparent ioctl response struct */ int error; switch (mp->b_datap->db_type) { case M_DELAY: case M_READ: freemsg(mp); break; case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { case TCSBRK: /* * Send a break message upstream. * * XXX: Shouldn't the argument come into play in * determining whether or not so send an M_BREAK? * It certainly does in the write-side direction. */ error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); break; } if (!(*(int *)mp->b_cont->b_rptr)) { if (!putnextctl(q, M_BREAK)) { /* * Send an NAK reply back */ miocnak(q, mp, 0, EAGAIN); break; } } /* * ACK it. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; case JWINSIZE: case TIOCGWINSZ: case TIOCSWINSZ: ptioc(q, mp, RDSIDE); break; case TIOCSIGNAL: /* * The following subtle logic is due to the fact that * `mp' may be in any one of three distinct formats: * * 1. A transparent M_IOCTL with an intptr_t-sized * payload containing the signal number. * * 2. An I_STR M_IOCTL with an int-sized payload * containing the signal number. * * 3. An M_IOCDATA with an int-sized payload * containing the signal number. */ if (iocp->ioc_count == TRANSPARENT) { intptr_t sig = *(intptr_t *)mp->b_cont->b_rptr; if (sig < 1 || sig >= NSIG) { /* * it's transparent with pointer * to the arg */ mcopyin(mp, NULL, sizeof (int), NULL); qreply(q, mp); break; } } ptioc(q, mp, RDSIDE); break; case TIOCREMOTE: if (iocp->ioc_count != TRANSPARENT) ptioc(q, mp, RDSIDE); else { mcopyin(mp, NULL, sizeof (int), NULL); qreply(q, mp); } break; default: putnext(q, mp); break; } break; case M_IOCDATA: resp = (struct copyresp *)mp->b_rptr; if (resp->cp_rval) { /* * Just free message on failure. */ freemsg(mp); break; } /* * Only need to copy data for the SET case. */ switch (resp->cp_cmd) { case TIOCSWINSZ: case TIOCSIGNAL: case TIOCREMOTE: ptioc(q, mp, RDSIDE); break; case JWINSIZE: case TIOCGWINSZ: mp->b_datap->db_type = M_IOCACK; mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; default: freemsg(mp); break; } break; case M_IOCACK: case M_IOCNAK: /* * We only pass write-side ioctls through to the master that * we've already ACKed or NAKed to the stream head. Thus, we * discard ones arriving from below, since they're redundant * from the point of view of modules above us. */ freemsg(mp); break; case M_HANGUP: /* * clear blocked state. */ { struct ptem *ntp = (struct ptem *)q->q_ptr; if (ntp->state & OFLOW_CTL) { ntp->state &= ~OFLOW_CTL; qenable(WR(q)); } } default: putnext(q, mp); break; } }
static int log_wput(queue_t *q, mblk_t *mp) { log_t *lp = (log_t *)q->q_ptr; struct iocblk *iocp; mblk_t *mp2; cred_t *cr = DB_CRED(mp); zoneid_t zoneid; /* * Default to global zone if dblk doesn't have a valid cred. * Calls to syslog() go through putmsg(), which does set up * the cred. */ zoneid = (cr != NULL) ? crgetzoneid(cr) : GLOBAL_ZONEID; switch (DB_TYPE(mp)) { case M_FLUSH: if (*mp->b_rptr & FLUSHW) { flushq(q, FLUSHALL); *mp->b_rptr &= ~FLUSHW; } if (*mp->b_rptr & FLUSHR) { flushq(RD(q), FLUSHALL); qreply(q, mp); return (0); } break; case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; if (lp->log_major != LOG_LOGMIN) { /* write-only device */ miocnak(q, mp, 0, EINVAL); return (0); } if (iocp->ioc_count == TRANSPARENT) { miocnak(q, mp, 0, EINVAL); return (0); } if (lp->log_flags) { miocnak(q, mp, 0, EBUSY); return (0); } freemsg(lp->log_data); lp->log_data = mp->b_cont; mp->b_cont = NULL; switch (iocp->ioc_cmd) { case I_CONSLOG: log_update(lp, RD(q), SL_CONSOLE, log_console); break; case I_TRCLOG: if (lp->log_data == NULL) { miocnak(q, mp, 0, EINVAL); return (0); } log_update(lp, RD(q), SL_TRACE, log_trace); break; case I_ERRLOG: log_update(lp, RD(q), SL_ERROR, log_error); break; default: miocnak(q, mp, 0, EINVAL); return (0); } miocack(q, mp, 0, 0); return (0); case M_PROTO: if (MBLKL(mp) == sizeof (log_ctl_t) && mp->b_cont != NULL) { log_ctl_t *lc = (log_ctl_t *)mp->b_rptr; /* This code is used by savecore to log dump msgs */ if (mp->b_band != 0 && secpolicy_sys_config(CRED(), B_FALSE) == 0) { (void) putq(log_consq, mp); return (0); } if ((lc->pri & LOG_FACMASK) == LOG_KERN) lc->pri |= LOG_USER; mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, lc->level, lc->flags, lc->pri, mp->b_cont->b_rptr, MBLKL(mp->b_cont) + 1, 0); if (mp2 != NULL) log_sendmsg(mp2, zoneid); } break; case M_DATA: mp2 = log_makemsg(LOG_MID, LOG_CONSMIN, 0, SL_CONSOLE, LOG_USER | LOG_INFO, mp->b_rptr, MBLKL(mp) + 1, 0); if (mp2 != NULL) log_sendmsg(mp2, zoneid); break; } freemsg(mp); return (0); }
static int zap_m_ioctl(struct zt_chan *chan, queue_t *q, mblk_t *mp) { struct iocblk *ioc = (typeof(ioc)) mp->b_rptr; mblk_t *dp = mp->b_cont; switch (ioc->ioc_cmd) { case ZT_GET_BLOCKSIZE: case ZT_SET_BLOCKSIZE: case ZT_FLUSH: case ZT_SYNC: case ZT_GET_PARAMS: case ZT_SET_PARAMS: case ZT_HOOK: case ZT_SPANSTAT: case ZT_MAINT: case ZT_GETCONF: case ZT_SETCONF: case ZT_CONFLINK: case ZT_CONFDIAG: case ZT_GETGAINS: case ZT_SETGAINS: case ZT_SPANCONFIG: case ZT_CHANCONFIG: case ZT_CONFMUTE: case ZT_SENDTONE: case ZT_SETTONEZONE: case ZT_GETTONEZONE: case ZT_DEFAULTZONE: case ZT_LOADZONE: case ZT_FREEZONE: case ZT_SET_BUFINFO: case ZT_GET_BUFINFO: case ZT_GET_DIALPARAMS: case ZT_SET_DIALPARAMS: case ZT_DIAL: case ZT_AUDIOMODE: case ZT_ECHOCANCEL: case ZT_CHANNO: case ZT_DIALING: case ZT_HDLCRAWMODE: case ZT_HDLCFCSMODE: case ZT_SPECIFY: { int channo = *(int *) dp->b_rptr; if ((channo < 1) || (channo > ZT_MAX_CHANNELS)) miocnak(q, mp, 0, EINVAL); if ((err = zt_specchan_open(NULL, qstream(q)->sd_file, channo, 0)) == 0) { RD(q)->q_ptr = WR(q)->q_ptr = &chans[channo]; miocack(q, mp, 0, 0); } else { miocnak(q, mp, 0, (err < 0 ? -err : err)); } return (0); } case ZT_SETLAW: case ZT_SETLINEAR: case ZT_HDLCPPP: case ZT_SETCADENCE: case ZT_SETTXBITS: case ZT_CHANDIAG: case ZT_GETRXBITS: case ZT_SFCONFIG: case ZT_TIMERCONFIG: case ZT_TIMERACK: case ZT_GETCONFMUTE: case ZT_ECHOTRAIN: case ZT_ONHOOKTRANSFER: case ZT_TIMERPING: case ZT_TIMERPONG: case ZT_SIGFREEZE: case ZT_GETSIGFREEZE: mi_copyout(q, mp, NULL, sizeof(int)); return (0); case ZT_INDIRECT: case ZT_DYNAMIC_CREATE: case ZT_DYNAMIC_DESTROY: case ZT_TONEDETECT: case ZT_SETPOLARITY: case ZT_STARTUP: case ZT_SHUTDOWN: } }
/* * Pass on M_IOCTL messages passed to the DLD, and support * private IOCTLs for debugging and ndd. */ void ixgbe_m_ioctl(void *arg, queue_t *q, mblk_t *mp) { ixgbe_t *ixgbe = (ixgbe_t *)arg; struct iocblk *iocp; enum ioc_reply status; iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; iocp->ioc_error = 0; mutex_enter(&ixgbe->gen_lock); if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { mutex_exit(&ixgbe->gen_lock); miocnak(q, mp, 0, EINVAL); return; } mutex_exit(&ixgbe->gen_lock); switch (iocp->ioc_cmd) { case LB_GET_INFO_SIZE: case LB_GET_INFO: case LB_GET_MODE: case LB_SET_MODE: status = ixgbe_loopback_ioctl(ixgbe, iocp, mp); break; default: status = IOC_INVAL; break; } /* * Decide how to reply */ switch (status) { default: case IOC_INVAL: /* * Error, reply with a NAK and EINVAL or the specified error */ miocnak(q, mp, 0, iocp->ioc_error == 0 ? EINVAL : iocp->ioc_error); break; case IOC_DONE: /* * OK, reply already sent */ break; case IOC_ACK: /* * OK, reply with an ACK */ miocack(q, mp, 0, 0); break; case IOC_REPLY: /* * OK, send prepared reply as ACK or NAK */ mp->b_datap->db_type = iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK; qreply(q, mp); break; } }
static void kb8042_ioctlmsg(struct kb8042 *kb8042, queue_t *qp, mblk_t *mp) { struct iocblk *iocp; mblk_t *datap; int error; int tmp; iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { case CONSOPENPOLLEDIO: error = miocpullup(mp, sizeof (struct cons_polledio *)); if (error != 0) { miocnak(qp, mp, 0, error); return; } /* * We are given an appropriate-sized data block, * and return a pointer to our structure in it. */ *(struct cons_polledio **)mp->b_cont->b_rptr = &kb8042->polledio; mp->b_datap->db_type = M_IOCACK; iocp->ioc_error = 0; qreply(qp, mp); break; case CONSCLOSEPOLLEDIO: miocack(qp, mp, 0, 0); break; case CONSSETABORTENABLE: if (iocp->ioc_count != TRANSPARENT) { miocnak(qp, mp, 0, EINVAL); return; } kb8042->debugger.enabled = *(intptr_t *)mp->b_cont->b_rptr; miocack(qp, mp, 0, 0); break; /* * Valid only in TR_UNTRANS_MODE mode. */ case CONSSETKBDTYPE: error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(qp, mp, 0, error); return; } tmp = *(int *)mp->b_cont->b_rptr; if (tmp != KB_PC && tmp != KB_USB) { miocnak(qp, mp, 0, EINVAL); break; } kb8042->simulated_kbd_type = tmp; miocack(qp, mp, 0, 0); break; case KIOCLAYOUT: if (kb8042->w_kblayout == -1) { miocnak(qp, mp, 0, EINVAL); return; } if ((datap = allocb(sizeof (int), BPRI_HI)) == NULL) { miocnak(qp, mp, 0, ENOMEM); return; } if (kb8042->simulated_kbd_type == KB_USB) *(int *)datap->b_wptr = KBTRANS_USBKB_DEFAULT_LAYOUT; else *(int *)datap->b_wptr = kb8042->w_kblayout; datap->b_wptr += sizeof (int); if (mp->b_cont) freemsg(mp->b_cont); mp->b_cont = datap; iocp->ioc_count = sizeof (int); mp->b_datap->db_type = M_IOCACK; iocp->ioc_error = 0; qreply(qp, mp); break; case KIOCSLAYOUT: if (iocp->ioc_count != TRANSPARENT) { miocnak(qp, mp, 0, EINVAL); return; } kb8042->w_kblayout = *(intptr_t *)mp->b_cont->b_rptr; miocack(qp, mp, 0, 0); break; case KIOCCMD: error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(qp, mp, 0, error); return; } kb8042_type4_cmd(kb8042, *(int *)mp->b_cont->b_rptr); miocack(qp, mp, 0, 0); break; default: #ifdef DEBUG1 cmn_err(CE_NOTE, "!kb8042_ioctlmsg %x", iocp->ioc_cmd); #endif miocnak(qp, mp, 0, EINVAL); return; } }
/* * wput(9E) is symmetric for master and slave sides, so this handles both * without splitting the codepath. (The only exception to this is the * processing of zcons ioctls, which is restricted to the master side.) * * zc_wput() looks at the other side; if there is no process holding that * side open, it frees the message. This prevents processes from hanging * if no one is holding open the console. Otherwise, it putnext's high * priority messages, putnext's normal messages if possible, and otherwise * enqueues the messages; in the case that something is enqueued, wsrv(9E) * will take care of eventually shuttling I/O to the other side. */ static void zc_wput(queue_t *qp, mblk_t *mp) { unsigned char type = mp->b_datap->db_type; zc_state_t *zcs; struct iocblk *iocbp; file_t *slave_filep; struct snode *slave_snodep; int slave_fd; ASSERT(qp->q_ptr); DBG1("entering zc_wput, %s side", zc_side(qp)); /* * Process zcons ioctl messages if qp is the master console's write * queue. */ zcs = (zc_state_t *)qp->q_ptr; if (zcs->zc_master_rdq != NULL && qp == WR(zcs->zc_master_rdq) && type == M_IOCTL) { iocbp = (struct iocblk *)(void *)mp->b_rptr; switch (iocbp->ioc_cmd) { case ZC_HOLDSLAVE: /* * Hold the slave's vnode and increment the refcount * of the snode. If the vnode is already held, then * indicate success. */ if (iocbp->ioc_count != TRANSPARENT) { miocack(qp, mp, 0, EINVAL); return; } if (zcs->zc_slave_vnode != NULL) { miocack(qp, mp, 0, 0); return; } /* * The process that passed the ioctl must be running in * the global zone. */ if (curzone != global_zone) { miocack(qp, mp, 0, EINVAL); return; } /* * The calling process must pass a file descriptor for * the slave device. */ slave_fd = (int)(intptr_t)*(caddr_t *)(void *)mp->b_cont-> b_rptr; slave_filep = getf(slave_fd); if (slave_filep == NULL) { miocack(qp, mp, 0, EINVAL); return; } if (ZC_STATE_TO_SLAVEDEV(zcs) != slave_filep->f_vnode->v_rdev) { releasef(slave_fd); miocack(qp, mp, 0, EINVAL); return; } /* * Get a reference to the slave's vnode. Also bump the * reference count on the associated snode. */ ASSERT(vn_matchops(slave_filep->f_vnode, spec_getvnodeops())); zcs->zc_slave_vnode = slave_filep->f_vnode; VN_HOLD(zcs->zc_slave_vnode); slave_snodep = VTOCS(zcs->zc_slave_vnode); mutex_enter(&slave_snodep->s_lock); ++slave_snodep->s_count; mutex_exit(&slave_snodep->s_lock); releasef(slave_fd); miocack(qp, mp, 0, 0); return; case ZC_RELEASESLAVE: /* * Release the master's handle on the slave's vnode. * If there isn't a handle for the vnode, then indicate * success. */ if (iocbp->ioc_count != TRANSPARENT) { miocack(qp, mp, 0, EINVAL); return; } if (zcs->zc_slave_vnode == NULL) { miocack(qp, mp, 0, 0); return; } /* * The process that passed the ioctl must be running in * the global zone. */ if (curzone != global_zone) { miocack(qp, mp, 0, EINVAL); return; } /* * The process that passed the ioctl must have provided * a file descriptor for the slave device. Make sure * this is correct. */ slave_fd = (int)(intptr_t)*(caddr_t *)(void *)mp->b_cont-> b_rptr; slave_filep = getf(slave_fd); if (slave_filep == NULL) { miocack(qp, mp, 0, EINVAL); return; } if (zcs->zc_slave_vnode->v_rdev != slave_filep->f_vnode->v_rdev) { releasef(slave_fd); miocack(qp, mp, 0, EINVAL); return; } /* * Decrement the snode's reference count and release the * vnode. */ ASSERT(vn_matchops(slave_filep->f_vnode, spec_getvnodeops())); slave_snodep = VTOCS(zcs->zc_slave_vnode); mutex_enter(&slave_snodep->s_lock); --slave_snodep->s_count; mutex_exit(&slave_snodep->s_lock); VN_RELE(zcs->zc_slave_vnode); zcs->zc_slave_vnode = NULL; releasef(slave_fd); miocack(qp, mp, 0, 0); return; default: break; } } if (zc_switch(RD(qp)) == NULL) { DBG1("wput to %s side (no one listening)", zc_side(qp)); switch (type) { case M_FLUSH: handle_mflush(qp, mp); break; case M_IOCTL: miocnak(qp, mp, 0, 0); break; default: freemsg(mp); break; } return; } if (type >= QPCTL) { DBG1("(hipri) wput, %s side", zc_side(qp)); switch (type) { case M_READ: /* supposedly from ldterm? */ DBG("zc_wput: tossing M_READ\n"); freemsg(mp); break; case M_FLUSH: handle_mflush(qp, mp); break; default: /* * Put this to the other side. */ ASSERT(zc_switch(RD(qp)) != NULL); putnext(zc_switch(RD(qp)), mp); break; } DBG1("done (hipri) wput, %s side", zc_side(qp)); return; } /* * Only putnext if there isn't already something in the queue. * otherwise things would wind up out of order. */ if (qp->q_first == NULL && bcanputnext(RD(zc_switch(qp)), mp->b_band)) { DBG("wput: putting message to other side\n"); putnext(RD(zc_switch(qp)), mp); } else { DBG("wput: putting msg onto queue\n"); (void) putq(qp, mp); } DBG1("done wput, %s side", zc_side(qp)); }
/*ARGSUSED*/ static void vnic_m_ioctl(void *arg, queue_t *q, mblk_t *mp) { miocnak(q, mp, 0, ENOTSUP); }
static void sdp_gen_ioctl(queue_t *q, mblk_t *mp) { struct iocblk *iocp; int32_t enable = 0; int ret; boolean_t priv = B_TRUE; /* LINTED */ iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { int32_t send_enable; case SIOCSENABLESDP: bcopy(mp->b_cont->b_rptr, &enable, sizeof (int)); send_enable = enable; /* * Check for root privs. * if not net config privs - return state of system SDP */ if (secpolicy_net_config(CRED(), B_FALSE) != 0) { priv = B_FALSE; } /* * The sdpib driver is loaded if root enables sdp the * first time (sdp_transport_handle is NULL). It is * unloaded during the following first disable. At all * other times for root as well as non-root users, the * action of enabling/disabling sdp is simply acked. */ rw_enter(&sdp_transport_lock, RW_READER); if ((send_enable == 1) && (sdp_transport_handle == NULL) && (priv == B_TRUE)) { /* Initialize sdpib transport driver */ rw_exit(&sdp_transport_lock); ret = sdp_open_sdpib_driver(); rw_enter(&sdp_transport_lock, RW_READER); if (ret != 0) { /* Transport failed to load */ rw_exit(&sdp_transport_lock); enable = 0; goto done; } (void) ldi_ioctl(sdp_transport_handle, iocp->ioc_cmd, (intptr_t)&send_enable, FKIOCTL, CRED(), (int *)&enable); } else if (sdp_transport_handle != NULL) { (void) ldi_ioctl(sdp_transport_handle, iocp->ioc_cmd, (intptr_t)&send_enable, FKIOCTL, CRED(), (int *)&enable); if (send_enable == 0 && priv == B_TRUE) { (void) ldi_close(sdp_transport_handle, FNDELAY, kcred); sdp_transport_handle = NULL; } } else { enable = 0; } rw_exit(&sdp_transport_lock); done: bcopy(&enable, mp->b_cont->b_rptr, sizeof (int)); /* ACK the ioctl */ mp->b_datap->db_type = M_IOCACK; iocp->ioc_count = sizeof (int); qreply(q, mp); break; default: miocnak(q, mp, 0, ENOTSUP); } }
/* * 10G is the only loopback mode for Hydra. */ void hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) { p_lb_property_t lb_props; size_t size; int i; if (mp->b_cont == NULL) { miocnak(wq, mp, 0, EINVAL); } switch (iocp->ioc_cmd) { case LB_GET_MODE: HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command")); if (hxgep != NULL) { *(lb_info_sz_t *)mp->b_cont->b_rptr = hxgep->statsp->port_stats.lb_mode; miocack(wq, mp, sizeof (hxge_lb_t), 0); } else miocnak(wq, mp, 0, EINVAL); break; case LB_SET_MODE: HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command")); if (iocp->ioc_count != sizeof (uint32_t)) { miocack(wq, mp, 0, 0); break; } if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) { miocack(wq, mp, 0, 0); } else { miocnak(wq, mp, 0, EPROTO); } break; case LB_GET_INFO_SIZE: HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command")); if (hxgep != NULL) { size = sizeof (lb_normal) + sizeof (lb_mac10g); *(lb_info_sz_t *)mp->b_cont->b_rptr = size; HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command: size %d", size)); miocack(wq, mp, sizeof (lb_info_sz_t), 0); } else miocnak(wq, mp, 0, EINVAL); break; case LB_GET_INFO: HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command")); if (hxgep != NULL) { size = sizeof (lb_normal) + sizeof (lb_mac10g); HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command: size %d", size)); if (size == iocp->ioc_count) { i = 0; lb_props = (p_lb_property_t)mp->b_cont->b_rptr; lb_props[i++] = lb_normal; lb_props[i++] = lb_mac10g; miocack(wq, mp, size, 0); } else miocnak(wq, mp, 0, EINVAL); } else { miocnak(wq, mp, 0, EINVAL); cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x", iocp->ioc_cmd); } break; } }
/* * apush_iocdata() - * Handle the M_IOCDATA messages associated with * the autopush feature. */ static void apush_iocdata( queue_t *qp, /* pointer to write queue */ mblk_t *mp) /* message pointer */ { int i, ret; struct copyresp *csp; struct strapush *sap; struct autopush *ap; struct saddev *sadp; uint_t size; csp = (struct copyresp *)mp->b_rptr; if (csp->cp_rval) { /* if there was an error */ freemsg(mp); return; } if (mp->b_cont) /* sap needed only if mp->b_cont is set */ sap = (struct strapush *)mp->b_cont->b_rptr; switch (SAD_CMD(csp->cp_cmd)) { case SAD_CMD(SAD_SAP): switch ((long)csp->cp_private) { case GETSTRUCT: switch (sap->sap_cmd) { case SAP_ONE: case SAP_RANGE: case SAP_ALL: if ((sap->sap_npush == 0) || (sap->sap_npush > MAXAPUSH) || (sap->sap_npush > nstrpush)) { /* invalid number of modules to push */ miocnak(qp, mp, 0, EINVAL); break; } if (ret = valid_major(sap->sap_major)) { miocnak(qp, mp, 0, ret); break; } if ((sap->sap_cmd == SAP_RANGE) && (sap->sap_lastminor <= sap->sap_minor)) { /* bad range */ miocnak(qp, mp, 0, ERANGE); break; } /* * Validate that the specified list of * modules exist. */ for (i = 0; i < sap->sap_npush; i++) { sap->sap_list[i][FMNAMESZ] = '\0'; if (fmodsw_find(sap->sap_list[i], FMODSW_LOAD) == NULL) { miocnak(qp, mp, 0, EINVAL); return; } } mutex_enter(&sad_lock); if (ap_hfind(sap->sap_major, sap->sap_minor, sap->sap_lastminor, sap->sap_cmd)) { mutex_exit(&sad_lock); /* already configured */ miocnak(qp, mp, 0, EEXIST); break; } if ((ap = ap_alloc()) == NULL) { mutex_exit(&sad_lock); /* no autopush structures */ miocnak(qp, mp, 0, ENOSR); break; } ap->ap_cnt++; ap->ap_common = sap->sap_common; if (SAD_VER(csp->cp_cmd) > 0) ap->ap_anchor = sap->sap_anchor; else ap->ap_anchor = 0; for (i = 0; i < ap->ap_npush; i++) (void) strcpy(ap->ap_list[i], sap->sap_list[i]); ap_hadd(ap); mutex_exit(&sad_lock); miocack(qp, mp, 0, 0); break; case SAP_CLEAR: if (ret = valid_major(sap->sap_major)) { miocnak(qp, mp, 0, ret); break; } mutex_enter(&sad_lock); if ((ap = ap_hfind(sap->sap_major, sap->sap_minor, sap->sap_lastminor, sap->sap_cmd)) == NULL) { mutex_exit(&sad_lock); /* not configured */ miocnak(qp, mp, 0, ENODEV); break; } if ((ap->ap_type == SAP_RANGE) && (sap->sap_minor != ap->ap_minor)) { mutex_exit(&sad_lock); /* starting minors do not match */ miocnak(qp, mp, 0, ERANGE); break; } if ((ap->ap_type == SAP_ALL) && (sap->sap_minor != 0)) { mutex_exit(&sad_lock); /* SAP_ALL must have minor == 0 */ miocnak(qp, mp, 0, EINVAL); break; } ap_hrmv(ap); if (--(ap->ap_cnt) <= 0) ap_free(ap); mutex_exit(&sad_lock); miocack(qp, mp, 0, 0); break; default: miocnak(qp, mp, 0, EINVAL); break; } /* switch (sap_cmd) */ break; default: cmn_err(CE_WARN, "apush_iocdata: cp_private bad in SAD_SAP: %p", (void *)csp->cp_private); freemsg(mp); break; } /* switch (cp_private) */ break; case SAD_CMD(SAD_GAP): switch ((long)csp->cp_private) { case GETSTRUCT: { if (ret = valid_major(sap->sap_major)) { miocnak(qp, mp, 0, ret); break; } mutex_enter(&sad_lock); if ((ap = ap_hfind(sap->sap_major, sap->sap_minor, sap->sap_lastminor, SAP_ONE)) == NULL) { mutex_exit(&sad_lock); /* not configured */ miocnak(qp, mp, 0, ENODEV); break; } sap->sap_common = ap->ap_common; if (SAD_VER(csp->cp_cmd) > 0) sap->sap_anchor = ap->ap_anchor; for (i = 0; i < ap->ap_npush; i++) (void) strcpy(sap->sap_list[i], ap->ap_list[i]); for (; i < MAXAPUSH; i++) bzero(sap->sap_list[i], FMNAMESZ + 1); mutex_exit(&sad_lock); if (SAD_VER(csp->cp_cmd) == 1) size = STRAPUSH_V1_LEN; else size = STRAPUSH_V0_LEN; sadp = (struct saddev *)qp->q_ptr; mcopyout(mp, (void *)GETRESULT, size, sadp->sa_addr, NULL); qreply(qp, mp); break; } case GETRESULT: miocack(qp, mp, 0, 0); break; default: cmn_err(CE_WARN, "apush_iocdata: cp_private bad case SAD_GAP: %p", (void *)csp->cp_private); freemsg(mp); break; } /* switch (cp_private) */ break; default: /* can't happen */ ASSERT(0); freemsg(mp); break; } /* switch (cp_cmd) */ }
/* * sadwput() - * Write side put procedure. */ static int sadwput( queue_t *qp, /* pointer to write queue */ mblk_t *mp) /* message pointer */ { struct iocblk *iocp; switch (mp->b_datap->db_type) { case M_FLUSH: if (*mp->b_rptr & FLUSHR) { *mp->b_rptr &= ~FLUSHW; qreply(qp, mp); } else freemsg(mp); break; case M_IOCTL: iocp = (struct iocblk *)mp->b_rptr; switch (SAD_CMD(iocp->ioc_cmd)) { case SAD_CMD(SAD_SAP): case SAD_CMD(SAD_GAP): apush_ioctl(qp, mp); break; case SAD_VML: vml_ioctl(qp, mp); break; default: miocnak(qp, mp, 0, EINVAL); break; } break; case M_IOCDATA: iocp = (struct iocblk *)mp->b_rptr; switch (SAD_CMD(iocp->ioc_cmd)) { case SAD_CMD(SAD_SAP): case SAD_CMD(SAD_GAP): apush_iocdata(qp, mp); break; case SAD_VML: vml_iocdata(qp, mp); break; default: cmn_err(CE_WARN, "sadwput: invalid ioc_cmd in case M_IOCDATA: %d", iocp->ioc_cmd); freemsg(mp); break; } break; default: freemsg(mp); break; } /* switch (db_type) */ return (0); }
/* * This routine is called from both ptemwput and ptemwsrv to do the * actual work of dealing with mp. ptmewput will have already * dealt with high priority messages. * * Return 1 if the message was processed completely and 0 if not. */ static int ptemwmsg(queue_t *q, mblk_t *mp) { struct ptem *ntp = (struct ptem *)q->q_ptr; struct iocblk *iocp; /* outgoing ioctl structure */ struct termio *termiop; struct termios *termiosp; mblk_t *dack_ptr; /* disconnect message ACK block */ mblk_t *pckt_msgp; /* message sent to the PCKT module */ mblk_t *dp; /* ioctl reply data */ tcflag_t cflags; int error; switch (mp->b_datap->db_type) { case M_IOCTL: /* * Note: for each "set" type operation a copy * of the M_IOCTL message is made and passed * downstream. Eventually the PCKT module, if * it has been pushed, should pick up this message. * If the PCKT module has not been pushed the master * side stream head will free it. */ iocp = (struct iocblk *)mp->b_rptr; switch (iocp->ioc_cmd) { case TCSETAF: case TCSETSF: /* * Flush the read queue. */ if (putnextctl1(q, M_FLUSH, FLUSHR) == 0) { miocnak(q, mp, 0, EAGAIN); break; } /* FALLTHROUGH */ case TCSETA: case TCSETAW: case TCSETS: case TCSETSW: switch (iocp->ioc_cmd) { case TCSETAF: case TCSETA: case TCSETAW: error = miocpullup(mp, sizeof (struct termio)); if (error != 0) { miocnak(q, mp, 0, error); goto out; } cflags = ((struct termio *) mp->b_cont->b_rptr)->c_cflag; ntp->cflags = (ntp->cflags & 0xffff0000 | cflags); break; case TCSETSF: case TCSETS: case TCSETSW: error = miocpullup(mp, sizeof (struct termios)); if (error != 0) { miocnak(q, mp, 0, error); goto out; } cflags = ((struct termios *) mp->b_cont->b_rptr)->c_cflag; ntp->cflags = cflags; break; } if ((cflags & CBAUD) == B0) { /* * Hang-up: Send a zero length message. */ dack_ptr = ntp->dack_ptr; if (dack_ptr) { ntp->dack_ptr = NULL; /* * Send a zero length message * downstream. */ putnext(q, dack_ptr); } } else { /* * Make a copy of this message and pass it on * to the PCKT module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } putnext(q, pckt_msgp); } /* * Send ACK upstream. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); out: break; case TCGETA: dp = allocb(sizeof (struct termio), BPRI_MED); if (dp == NULL) { miocnak(q, mp, 0, EAGAIN); break; } termiop = (struct termio *)dp->b_rptr; termiop->c_cflag = (ushort_t)ntp->cflags; mioc2ack(mp, dp, sizeof (struct termio), 0); qreply(q, mp); break; case TCGETS: dp = allocb(sizeof (struct termios), BPRI_MED); if (dp == NULL) { miocnak(q, mp, 0, EAGAIN); break; } termiosp = (struct termios *)dp->b_rptr; termiosp->c_cflag = ntp->cflags; mioc2ack(mp, dp, sizeof (struct termios), 0); qreply(q, mp); break; case TCSBRK: error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); break; } /* * Need a copy of this message to pass it on to * the PCKT module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } /* * Send a copy of the M_IOCTL to the PCKT module. */ putnext(q, pckt_msgp); /* * TCSBRK meaningful if data part of message is 0 * cf. termio(7). */ if (!(*(int *)mp->b_cont->b_rptr)) (void) putnextctl(q, M_BREAK); /* * ACK the ioctl. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; case JWINSIZE: case TIOCGWINSZ: case TIOCSWINSZ: ptioc(q, mp, WRSIDE); break; case TIOCSTI: /* * Simulate typing of a character at the terminal. In * all cases, we acknowledge the ioctl and pass a copy * of it along for the PCKT module to encapsulate. If * not in remote mode, we also process the ioctl * itself, looping the character given as its argument * back around to the read side. */ /* * Need a copy of this message to pass on to the PCKT * module. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); break; } if ((ntp->state & REMOTEMODE) == 0) { mblk_t *bp; error = miocpullup(mp, sizeof (char)); if (error != 0) { freemsg(pckt_msgp); miocnak(q, mp, 0, error); break; } /* * The permission checking has already been * done at the stream head, since it has to be * done in the context of the process doing * the call. */ if ((bp = allocb(1, BPRI_MED)) == NULL) { freemsg(pckt_msgp); miocnak(q, mp, 0, EAGAIN); break; } /* * XXX: Is EAGAIN really the right response to * flow control blockage? */ if (!bcanputnext(RD(q), mp->b_band)) { freemsg(bp); freemsg(pckt_msgp); miocnak(q, mp, 0, EAGAIN); break; } *bp->b_wptr++ = *mp->b_cont->b_rptr; qreply(q, bp); } putnext(q, pckt_msgp); mioc2ack(mp, NULL, 0, 0); qreply(q, mp); break; case PTSSTTY: if (ntp->state & IS_PTSTTY) { miocnak(q, mp, 0, EEXIST); } else { ntp->state |= IS_PTSTTY; mioc2ack(mp, NULL, 0, 0); qreply(q, mp); } break; default: /* * End of the line. The slave driver doesn't see any * ioctls that we don't explicitly pass along to it. */ miocnak(q, mp, 0, EINVAL); break; } break; case M_DELAY: /* tty delays not supported */ freemsg(mp); break; case M_DATA: if ((mp->b_wptr - mp->b_rptr) < 0) { /* * Free all bad length messages. */ freemsg(mp); break; } else if ((mp->b_wptr - mp->b_rptr) == 0) { if (!(ntp->state & IS_PTSTTY)) { freemsg(mp); break; } } if (ntp->state & OFLOW_CTL) return (0); default: putnext(q, mp); break; } return (1); }
/* * Message must be of type M_IOCTL or M_IOCDATA for this routine to be called. */ static void ptioc(queue_t *q, mblk_t *mp, int qside) { struct ptem *tp; struct iocblk *iocp; struct winsize *wb; struct jwinsize *jwb; mblk_t *tmp; mblk_t *pckt_msgp; /* message sent to the PCKT module */ int error; iocp = (struct iocblk *)mp->b_rptr; tp = (struct ptem *)q->q_ptr; switch (iocp->ioc_cmd) { case JWINSIZE: /* * For compatibility: If all zeros, NAK the message for dumb * terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct jwinsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } if (iocp->ioc_count == TRANSPARENT) mcopyout(mp, NULL, sizeof (struct jwinsize), NULL, tmp); else mioc2ack(mp, tmp, sizeof (struct jwinsize), 0); jwb = (struct jwinsize *)mp->b_cont->b_rptr; jwb->bytesx = tp->wsz.ws_col; jwb->bytesy = tp->wsz.ws_row; jwb->bitsx = tp->wsz.ws_xpixel; jwb->bitsy = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCGWINSZ: /* * If all zeros NAK the message for dumb terminals. */ if ((tp->wsz.ws_row == 0) && (tp->wsz.ws_col == 0) && (tp->wsz.ws_xpixel == 0) && (tp->wsz.ws_ypixel == 0)) { miocnak(q, mp, 0, EINVAL); return; } tmp = allocb(sizeof (struct winsize), BPRI_MED); if (tmp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, tmp, sizeof (struct winsize), 0); wb = (struct winsize *)mp->b_cont->b_rptr; wb->ws_row = tp->wsz.ws_row; wb->ws_col = tp->wsz.ws_col; wb->ws_xpixel = tp->wsz.ws_xpixel; wb->ws_ypixel = tp->wsz.ws_ypixel; qreply(q, mp); return; case TIOCSWINSZ: error = miocpullup(mp, sizeof (struct winsize)); if (error != 0) { miocnak(q, mp, 0, error); return; } wb = (struct winsize *)mp->b_cont->b_rptr; /* * Send a SIGWINCH signal if the row/col information has * changed. */ if ((tp->wsz.ws_row != wb->ws_row) || (tp->wsz.ws_col != wb->ws_col) || (tp->wsz.ws_xpixel != wb->ws_xpixel) || (tp->wsz.ws_ypixel != wb->ws_xpixel)) { /* * SIGWINCH is always sent upstream. */ if (qside == WRSIDE) (void) putnextctl1(RD(q), M_SIG, SIGWINCH); else if (qside == RDSIDE) (void) putnextctl1(q, M_SIG, SIGWINCH); /* * Message may have come in as an M_IOCDATA; pass it * to the master side as an M_IOCTL. */ mp->b_datap->db_type = M_IOCTL; if (qside == WRSIDE) { /* * Need a copy of this message to pass on to * the PCKT module, only if the M_IOCTL * orginated from the slave side. */ if ((pckt_msgp = copymsg(mp)) == NULL) { miocnak(q, mp, 0, EAGAIN); return; } putnext(q, pckt_msgp); } tp->wsz.ws_row = wb->ws_row; tp->wsz.ws_col = wb->ws_col; tp->wsz.ws_xpixel = wb->ws_xpixel; tp->wsz.ws_ypixel = wb->ws_ypixel; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; case TIOCSIGNAL: { /* * This ioctl can emanate from the master side in remote * mode only. */ int sig; if (DB_TYPE(mp) == M_IOCTL && iocp->ioc_count != TRANSPARENT) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } if (DB_TYPE(mp) == M_IOCDATA || iocp->ioc_count != TRANSPARENT) sig = *(int *)mp->b_cont->b_rptr; else sig = (int)*(intptr_t *)mp->b_cont->b_rptr; if (sig < 1 || sig >= NSIG) { miocnak(q, mp, 0, EINVAL); return; } /* * Send an M_PCSIG message up the slave's read side and * respond back to the master with an ACK or NAK as * appropriate. */ if (putnextctl1(q, M_PCSIG, sig) == 0) { miocnak(q, mp, 0, EAGAIN); return; } mioc2ack(mp, NULL, 0, 0); qreply(q, mp); return; } case TIOCREMOTE: { int onoff; mblk_t *mctlp; if (DB_TYPE(mp) == M_IOCTL) { error = miocpullup(mp, sizeof (int)); if (error != 0) { miocnak(q, mp, 0, error); return; } } onoff = *(int *)mp->b_cont->b_rptr; /* * Send M_CTL up using the iocblk format. */ mctlp = mkiocb(onoff ? MC_NO_CANON : MC_DO_CANON); if (mctlp == NULL) { miocnak(q, mp, 0, EAGAIN); return; } mctlp->b_datap->db_type = M_CTL; putnext(q, mctlp); /* * ACK the ioctl. */ mioc2ack(mp, NULL, 0, 0); qreply(q, mp); /* * Record state change. */ if (onoff) tp->state |= REMOTEMODE; else tp->state &= ~REMOTEMODE; return; } default: putnext(q, mp); return; } }
/* * function to handle dlpi streams message from GLDv3 mac layer */ void oce_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) { struct oce_dev *dev = arg; struct iocblk *iocp; int cmd; uint32_t payload_length; int ret; iocp = (struct iocblk *)voidptr(mp->b_rptr); iocp->ioc_error = 0; cmd = iocp->ioc_cmd; DEV_LOCK(dev); if (dev->suspended) { miocnak(wq, mp, 0, EINVAL); DEV_UNLOCK(dev); return; } DEV_UNLOCK(dev); switch (cmd) { case OCE_ISSUE_MBOX: { ret = oce_issue_mbox(dev, wq, mp, &payload_length); miocack(wq, mp, payload_length, ret); break; } case OCE_QUERY_DRIVER_DATA: { struct oce_driver_query *drv_query = (struct oce_driver_query *)(void *)mp->b_cont->b_rptr; /* if the driver version does not match bail */ if (drv_query->version != OCN_VERSION_SUPPORTED) { oce_log(dev, CE_NOTE, MOD_CONFIG, "%s", "One Connect version mismatch"); miocnak(wq, mp, 0, ENOTSUP); break; } /* fill the return values */ bcopy(OCE_MOD_NAME, drv_query->driver_name, (sizeof (OCE_MOD_NAME) > 32) ? 31 : sizeof (OCE_MOD_NAME)); drv_query->driver_name[31] = '\0'; bcopy(OCE_VERSION, drv_query->driver_version, (sizeof (OCE_VERSION) > 32) ? 31 : sizeof (OCE_VERSION)); drv_query->driver_version[31] = '\0'; if (dev->num_smac == 0) { drv_query->num_smac = 1; bcopy(dev->mac_addr, drv_query->smac_addr[0], ETHERADDRL); } else { drv_query->num_smac = dev->num_smac; bcopy(dev->unicast_addr, drv_query->smac_addr[0], ETHERADDRL); } bcopy(dev->mac_addr, drv_query->pmac_addr, ETHERADDRL); payload_length = sizeof (struct oce_driver_query); miocack(wq, mp, payload_length, 0); break; } default: miocnak(wq, mp, 0, ENOTSUP); break; } } /* oce_m_ioctl */
/*ARGSUSED1*/ static int zc_close(queue_t *rqp, int flag, cred_t *credp) { queue_t *wqp; mblk_t *bp; zc_state_t *zcs; major_t major; minor_t minor; zcs = (zc_state_t *)rqp->q_ptr; if (rqp == zcs->zc_master_rdq) { DBG("Closing master side"); zcs->zc_master_rdq = NULL; zcs->zc_state &= ~ZC_STATE_MOPEN; /* * qenable slave side write queue so that it can flush * its messages as master's read queue is going away */ if (zcs->zc_slave_rdq != NULL) { qenable(WR(zcs->zc_slave_rdq)); } qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; } else if (rqp == zcs->zc_slave_rdq) { DBG("Closing slave side"); zcs->zc_state &= ~ZC_STATE_SOPEN; zcs->zc_slave_rdq = NULL; wqp = WR(rqp); while ((bp = getq(wqp)) != NULL) { if (zcs->zc_master_rdq != NULL) putnext(zcs->zc_master_rdq, bp); else if (bp->b_datap->db_type == M_IOCTL) miocnak(wqp, bp, 0, 0); else freemsg(bp); } /* * Qenable master side write queue so that it can flush its * messages as slaves's read queue is going away. */ if (zcs->zc_master_rdq != NULL) qenable(WR(zcs->zc_master_rdq)); qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; /* * Clear the sad configuration so that reopening doesn't fail * to set up sad configuration. */ major = ddi_driver_major(zcs->zc_devinfo); minor = ddi_get_instance(zcs->zc_devinfo) << 1 | ZC_SLAVE_MINOR; (void) kstr_autopush(CLR_AUTOPUSH, &major, &minor, NULL, NULL, NULL); } return (0); }
/* * telmodwput: * M_DATA is processed and forwarded if we aren't stopped awaiting the daemon * to process something. M_CTL's are data from the daemon bound for the * network. We forward them immediately. There are two classes of ioctl's * we must handle here also. One is ioctl's forwarded by ptem which we * ignore. The other is ioctl's issued by the daemon to control us. * Process them appropriately. M_PROTO's we pass along, figuring they are * are TPI operations for TCP. M_FLUSH requires careful processing, since * telnet cannot tolerate flushing its protocol requests. Also the flushes * can be running either daemon<->TCP or application<->telmod. We must * carefully deal with this. */ static void telmodwput( queue_t *q, /* Pointer to the read queue */ mblk_t *mp) /* Pointer to current message block */ { struct telmod_info *tmip; struct iocblk *ioc; mblk_t *savemp; int rw; int error; tmip = (struct telmod_info *)q->q_ptr; switch (mp->b_datap->db_type) { case M_DATA: if (!canputnext(q) || (tmip->flags & TEL_STOPPED) || (q->q_first)) { noenable(q); (void) putq(q, mp); break; } /* * This routine parses data generating from ptm side. * Insert a null character if carraige return * is not followed by line feed unless we are in binary mode. * Also, duplicate IAC if found in the data. */ (void) snd_parse(q, mp); break; case M_CTL: if (((mp->b_wptr - mp->b_rptr) == 1) && (*(mp->b_rptr) == M_CTL_MAGIC_NUMBER)) { savemp = mp->b_cont; freeb(mp); mp = savemp; } putnext(q, mp); break; case M_IOCTL: ioc = (struct iocblk *)mp->b_rptr; switch (ioc->ioc_cmd) { /* * This ioctl is issued by user level daemon to * request one more message block to process protocol */ case TEL_IOC_GETBLK: if (!(tmip->flags & TEL_STOPPED)) { miocnak(q, mp, 0, EINVAL); break; } tmip->flags |= TEL_GETBLK; qenable(RD(q)); enableok(RD(q)); miocack(q, mp, 0, 0); break; /* * This ioctl is issued by user level daemon to reenable the * read and write queues. This is issued during startup time * after setting up the mux links and also after processing * the protocol. It is also issued after each time an * an unrecognized telnet option is forwarded to the daemon. */ case TEL_IOC_ENABLE: /* * Send negative ack if TEL_STOPPED flag is not set */ if (!(tmip->flags & TEL_STOPPED)) { miocnak(q, mp, 0, EINVAL); break; } tmip->flags &= ~TEL_STOPPED; if (mp->b_cont) { (void) putbq(RD(q), mp->b_cont); mp->b_cont = 0; } qenable(RD(q)); enableok(RD(q)); qenable(q); enableok(q); miocack(q, mp, 0, 0); break; /* * Set binary/normal mode for input and output * according to the instructions from the daemon. */ case TEL_IOC_MODE: error = miocpullup(mp, sizeof (uchar_t)); if (error != 0) { miocnak(q, mp, 0, error); break; } tmip->flags |= *(mp->b_cont->b_rptr) & (TEL_BINARY_IN|TEL_BINARY_OUT); miocack(q, mp, 0, 0); break; #ifdef DEBUG case TCSETAF: case TCSETSF: case TCSETA: case TCSETAW: case TCSETS: case TCSETSW: case TCSBRK: case TIOCSTI: case TIOCSWINSZ: miocnak(q, mp, 0, EINVAL); break; #endif case CRYPTPASSTHRU: error = miocpullup(mp, sizeof (uchar_t)); if (error != 0) { miocnak(q, mp, 0, error); break; } if (*(mp->b_cont->b_rptr) == 0x01) tmip->flags |= TEL_IOCPASSTHRU; else tmip->flags &= ~TEL_IOCPASSTHRU; miocack(q, mp, 0, 0); break; default: if (tmip->flags & TEL_IOCPASSTHRU) { putnext(q, mp); } else { #ifdef DEBUG cmn_err(CE_NOTE, "telmodwput: unexpected ioctl type 0x%x", ioc->ioc_cmd); #endif miocnak(q, mp, 0, EINVAL); } break; } break; case M_FLUSH: /* * Flushing is tricky: We try to flush all we can, but certain * data cannot be flushed. Telnet protocol sequences cannot * be flushed. So, TCP's queues cannot be flushed since we * cannot tell what might be telnet protocol data. Then we * must take care to create and forward out-of-band data * indicating the flush to the far side. */ rw = *mp->b_rptr; if (rw & FLUSHR) { /* * We cannot flush our read queue, since there may * be telnet protocol bits in the queue, awaiting * processing. However, once it leaves this module * it's guaranteed that all protocol data is in * M_CTL, so we do flush read data beyond us, expecting * them (actually logindmux) to do FLUSHDATAs also. */ *mp->b_rptr = rw & ~FLUSHW; qreply(q, mp); } else { freemsg(mp); } if (rw & FLUSHW) { /* * Since all telnet protocol data comes from the * daemon, stored as M_CTL messages, flushq will * do exactly what's needed: Flush bytes which do * not have telnet protocol data. */ flushq(q, FLUSHDATA); } break; case M_PCPROTO: putnext(q, mp); break; case M_PROTO: /* We may receive T_DISCON_REQ from the mux */ if (!canputnext(q) || q->q_first != NULL) (void) putq(q, mp); else putnext(q, mp); break; default: #ifdef DEBUG cmn_err(CE_NOTE, "telmodwput: unexpected msg type 0x%x", mp->b_datap->db_type); #endif freemsg(mp); break; } }
/* * dm2s_wput - Streams write side put routine. * * All M_DATA messages are queued so that they are transmitted in * the service procedure. This is done to simplify the streams * synchronization. Other messages are handled appropriately. */ int dm2s_wput(queue_t *wq, mblk_t *mp) { dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr; DPRINTF(DBG_DRV, ("dm2s_wput: called\n")); if (dm2sp == NULL) { return (ENODEV); /* Can't happen. */ } switch (mp->b_datap->db_type) { case (M_DATA): DPRINTF(DBG_DRV, ("dm2s_wput: M_DATA message\n")); while (mp->b_wptr == mp->b_rptr) { mblk_t *mp1; mp1 = unlinkb(mp); freemsg(mp); mp = mp1; if (mp == NULL) { return (0); } } /* * Simply queue the message and handle it in the service * procedure. */ (void) putq(wq, mp); qenable(wq); return (0); case (M_PROTO): DPRINTF(DBG_DRV, ("dm2s_wput: M_PROTO message\n")); /* We don't expect this */ mp->b_datap->db_type = M_ERROR; mp->b_rptr = mp->b_wptr = mp->b_datap->db_base; *mp->b_wptr++ = EPROTO; qreply(wq, mp); return (EINVAL); case (M_IOCTL): DPRINTF(DBG_DRV, ("dm2s_wput: M_IOCTL message\n")); if (MBLKL(mp) < sizeof (struct iocblk)) { freemsg(mp); return (0); } /* * No ioctls required to be supported by this driver, so * return EINVAL for all ioctls. */ miocnak(wq, mp, 0, EINVAL); break; case (M_CTL): DPRINTF(DBG_DRV, ("dm2s_wput: M_CTL message\n")); /* * No M_CTL messages need to supported by this driver, * so simply ignore them. */ freemsg(mp); break; case (M_FLUSH): DPRINTF(DBG_DRV, ( "dm2s_wput: M_FLUSH message 0x%X\n", *mp->b_rptr)); if (*mp->b_rptr & FLUSHW) { /* Flush write-side */ (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key, MB_FLUSH_SEND); flushq(wq, FLUSHDATA); *mp->b_rptr &= ~FLUSHW; } if (*mp->b_rptr & FLUSHR) { (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key, MB_FLUSH_RECEIVE); flushq(RD(wq), FLUSHDATA); qreply(wq, mp); } else { freemsg(mp); } break; default: DPRINTF(DBG_DRV, ("dm2s_wput: UNKNOWN message\n")); freemsg(mp); } return (0); }
/* * Handle write-side M_IOCTL messages. */ static void pfioctl(queue_t *wq, mblk_t *mp) { struct epacketfilt *pfp = (struct epacketfilt *)wq->q_ptr; struct Pf_ext_packetfilt *upfp; struct packetfilt *opfp; ushort_t *fwp; int arg; int maxoff = 0; int maxoffreg = 0; struct iocblk *iocp = (struct iocblk *)mp->b_rptr; int error; switch (iocp->ioc_cmd) { case PFIOCSETF: /* * Verify argument length. Since the size of packet filter * got increased (ENMAXFILTERS was bumped up to 2047), to * maintain backwards binary compatibility, we need to * check for both possible sizes. */ switch (iocp->ioc_count) { case sizeof (struct Pf_ext_packetfilt): error = miocpullup(mp, sizeof (struct Pf_ext_packetfilt)); if (error != 0) { miocnak(wq, mp, 0, error); return; } upfp = (struct Pf_ext_packetfilt *)mp->b_cont->b_rptr; if (upfp->Pf_FilterLen > PF_MAXFILTERS) { miocnak(wq, mp, 0, EINVAL); return; } bcopy(upfp, pfp, sizeof (struct Pf_ext_packetfilt)); pfp->pf_FilterEnd = &pfp->pf_Filter[pfp->pf_FilterLen]; break; case sizeof (struct packetfilt): error = miocpullup(mp, sizeof (struct packetfilt)); if (error != 0) { miocnak(wq, mp, 0, error); return; } opfp = (struct packetfilt *)mp->b_cont->b_rptr; /* this strange comparison keeps gcc from complaining */ if (opfp->Pf_FilterLen - 1 >= ENMAXFILTERS) { miocnak(wq, mp, 0, EINVAL); return; } pfp->pf.Pf_Priority = opfp->Pf_Priority; pfp->pf.Pf_FilterLen = (unsigned int)opfp->Pf_FilterLen; bcopy(opfp->Pf_Filter, pfp->pf.Pf_Filter, sizeof (opfp->Pf_Filter)); pfp->pf_FilterEnd = &pfp->pf_Filter[pfp->pf_FilterLen]; break; default: miocnak(wq, mp, 0, EINVAL); return; } /* * Find and record maximum byte offset that the * filter users. We use this when executing the * filter to determine how much of the packet * body to pull up. This code depends on the * filter encoding. */ for (fwp = pfp->pf_Filter; fwp < pfp->pf_FilterEnd; fwp++) { arg = *fwp & ((1 << ENF_NBPA) - 1); switch (arg) { default: if ((arg -= ENF_PUSHWORD) > maxoff) maxoff = arg; break; case ENF_LOAD_OFFSET: /* Point to the offset */ fwp++; if (*fwp > maxoffreg) maxoffreg = *fwp; break; case ENF_PUSHLIT: case ENF_BRTR: case ENF_BRFL: /* Skip over the literal. */ fwp++; break; case ENF_PUSHZERO: case ENF_PUSHONE: case ENF_PUSHFFFF: case ENF_PUSHFF00: case ENF_PUSH00FF: case ENF_NOPUSH: case ENF_POP: break; } } /* * Convert word offset to length in bytes. */ pfp->pf_PByteLen = (maxoff + maxoffreg + 1) * sizeof (ushort_t); miocack(wq, mp, 0, 0); break; default: putnext(wq, mp); break; } }