/*ARGSUSED1*/ static int zc_close(queue_t *rqp, int flag, cred_t *credp) { queue_t *wqp; mblk_t *bp; zc_state_t *zcs; zcs = (zc_state_t *)rqp->q_ptr; if (rqp == zcs->zc_master_rdq) { DBG("Closing master side"); zcs->zc_master_rdq = NULL; zcs->zc_state &= ~ZC_STATE_MOPEN; /* * qenable slave side write queue so that it can flush * its messages as master's read queue is going away */ if (zcs->zc_slave_rdq != NULL) { qenable(WR(zcs->zc_slave_rdq)); } qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; } else if (rqp == zcs->zc_slave_rdq) { DBG("Closing slave side"); zcs->zc_state &= ~ZC_STATE_SOPEN; zcs->zc_slave_rdq = NULL; wqp = WR(rqp); while ((bp = getq(wqp)) != NULL) { if (zcs->zc_master_rdq != NULL) putnext(zcs->zc_master_rdq, bp); else if (bp->b_datap->db_type == M_IOCTL) miocnak(wqp, bp, 0, 0); else freemsg(bp); } /* * Qenable master side write queue so that it can flush its * messages as slaves's read queue is going away. */ if (zcs->zc_master_rdq != NULL) qenable(WR(zcs->zc_master_rdq)); qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; } return (0); }
/*ARGSUSED1*/ static int logiclose(queue_t *q, int flag, cred_t *cred_p) { struct strmseinfo *logiptr; qprocsoff(q); #ifdef LOGI_DEBUG if (logi_debug) printf("logiclose:entered\n"); #endif logiptr = q->q_ptr; mutex_enter(&logiptr->lock); #ifdef LOGI_DEBUG if (logi_debug) printf("logiclose:Disable interrupts ioaddr %x\n", BASE_IOA); #endif control_port(INTR_DISABLE); /* Disable interrupts */ q->q_ptr = NULL; WR(q)->q_ptr = NULL; #ifdef LOGI_DEBUG if (logi_debug) printf("logiclose:leaving\n"); #endif logiptr->state = 0; /* Not opened */ mutex_exit(&logiptr->lock); return (0); }
static streamscall int zap_qclose(queue_t *q, int oflags, cred_t *crp) { qprocsoff(q); q->q_ptr = WR(q)->q_ptr = NULL; return zt_specchan_release(sd->sd_inode, sd->sd_file); }
STATIC streamscall int mstr_qclose(queue_t *q, int oflag, cred_t *crp) { qprocsoff(q); q->q_ptr = WR(q)->q_ptr = NULL; return (0); }
/* ARGSUSED */ static int wcmclose(queue_t *q, int flag, cred_t *cred) { qprocsoff(q); srpop(q->q_stream->sd_vnode, B_TRUE); return (0); }
/*ARGSUSED*/ static int parseclose( queue_t *q, int flags ) { register parsestream_t *parse = (parsestream_t *)q->q_ptr; register unsigned long s; pprintf(DD_CLOSE, "parse: CLOSE\n"); qprocsoff(q); s = splhigh(); if (parse->parse_dqueue) close_linemon(parse->parse_dqueue, q); parse->parse_dqueue = (queue_t *)0; (void) splx(s); parse_ioend(&parse->parse_io); kmem_free((caddr_t)parse, sizeof(parsestream_t)); q->q_ptr = (caddr_t)NULL; WR(q)->q_ptr = (caddr_t)NULL; return 0; }
static int nca_close(queue_t *q) { qprocsoff(q); RD(q)->q_ptr = NULL; WR(q)->q_ptr = NULL; return (0); }
static int dlpi_close( queue_t *q, int oflag, cred_t *cred ) { struct atif_data *aid = (struct atif_data *)q->q_ptr; qprocsoff( q ); if_free( aid ); return( 0 ); }
static streamscall __unlikely int ch_qclose(queue_t *q, dev_t *devp, int oflags) { int err; qprocsoff(q); if ((err = mi_close_comm(&ch_head, q))) return (err); return (0); }
static streamscall int sad_close(queue_t *q, int oflag, cred_t *crp) { struct sad *sad = q->q_ptr; qprocsoff(q); q->q_ptr = WR(q)->q_ptr = NULL; sad->assigned = 0; sad->iocstate = 0; return (0); }
STATIC streamscall int srvmod_close(queue_t *q, int oflag, cred_t *crp) { (void) oflag; (void) crp; if (!q->q_ptr) return (ENXIO); qprocsoff(q); q->q_ptr = WR(q)->q_ptr = NULL; return (0); }
static streamscall int ll_qclose(queue_t *q, int oflags, cred_t *crp) { struct priv *p = PRIV(q); if (p == NULL) return (0); qprocsoff(q); mi_close_comm(&ll_opens, q); return (0); }
/* ARGSUSED */ static int ptemclose(queue_t *q, int flag, cred_t *credp) { struct ptem *ntp; /* ptem entry for this PTEM module */ qprocsoff(q); ntp = (struct ptem *)q->q_ptr; freemsg(ntp->dack_ptr); kmem_free(ntp, sizeof (*ntp)); q->q_ptr = WR(q)->q_ptr = NULL; return (0); }
/* * cvc_unregister() * called from cvcredir to clear pointers to its queues. * cvcredir no longer wants to send or receive data. */ void cvc_unregister(queue_t *q) { rw_enter(&cvclock, RW_WRITER); if (q == cvcoutput_q) { qprocsoff(cvcoutput_q); /* must be done within cvclock */ cvcoutput_q = NULL; } else { rw_exit(&cvclock); cmn_err(CE_WARN, "cvc_unregister: q = 0x%p not registered", q); return; } rw_exit(&cvclock); }
/* ARGSUSED1 */ static int ip_helper_stream_close(queue_t *q, int flag) { ip_helper_minfo_t *ip_minfop; qprocsoff(q); ip_minfop = (q)->q_ptr; inet_minor_free(ip_minfop->ip_minfo_arena, ip_minfop->ip_minfo_dev); kmem_free(ip_minfop, sizeof (ip_helper_minfo_t)); RD(q)->q_ptr = NULL; WR(q)->q_ptr = NULL; return (0); }
/*ARGSUSED1*/ static int kb8042_close(queue_t *qp, int flag, cred_t *credp) { struct kb8042 *kb8042; kb8042 = (struct kb8042 *)qp->q_ptr; (void) kbtrans_streams_fini(kb8042->hw_kbtrans); kb8042->w_qp = (queue_t *)NULL; qprocsoff(qp); return (0); }
static int pfclose(queue_t *rq) { struct epacketfilt *pfp = (struct epacketfilt *)rq->q_ptr; ASSERT(pfp); qprocsoff(rq); kmem_free(pfp, sizeof (struct epacketfilt)); rq->q_ptr = WR(rq)->q_ptr = NULL; return (0); }
/* ARGSUSED */ int dm2s_close(queue_t *rq, int flag, cred_t *cred) { dm2s_t *dm2sp = (dm2s_t *)rq->q_ptr; DPRINTF(DBG_DRV, ("dm2s_close: called\n")); if (dm2sp == NULL) { /* Already closed once */ return (ENODEV); } /* Close the lower layer first */ mutex_enter(&dm2sp->ms_lock); (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key, MB_FLUSH_ALL); dm2s_mbox_fini(dm2sp); mutex_exit(&dm2sp->ms_lock); /* * Now we can assume that no asynchronous callbacks exist. * Poison the stream head so that we can't be pushed again. */ (void) putnextctl(rq, M_HANGUP); qprocsoff(rq); if (dm2sp->ms_rbufcid != 0) { qunbufcall(rq, dm2sp->ms_rbufcid); dm2sp->ms_rbufcid = 0; } if (dm2sp->ms_rq_timeoutid != 0) { DTRACE_PROBE1(dm2s_rqtimeout__cancel, dm2s_t, dm2sp); (void) quntimeout(dm2sp->ms_rq, dm2sp->ms_rq_timeoutid); dm2sp->ms_rq_timeoutid = 0; } if (dm2sp->ms_wq_timeoutid != 0) { DTRACE_PROBE1(dm2s_wqtimeout__cancel, dm2s_t, dm2sp); (void) quntimeout(dm2sp->ms_wq, dm2sp->ms_wq_timeoutid); dm2sp->ms_wq_timeoutid = 0; } /* * Now we can really mark it closed. */ mutex_enter(&dm2sp->ms_lock); dm2sp->ms_rq = dm2sp->ms_wq = NULL; dm2sp->ms_state &= ~DM2S_OPENED; mutex_exit(&dm2sp->ms_lock); rq->q_ptr = WR(rq)->q_ptr = NULL; (void) qassociate(rq, -1); DPRINTF(DBG_DRV, ("dm2s_close: successfully closed\n")); return (0); }
/* ARGSUSED1 */ static int drclose(queue_t *q, int cflag, cred_t *crp) { struct drstate *dsp = q->q_ptr; ASSERT(dsp); ddi_assoc_queue_with_devi(q, NULL); qprocsoff(q); mutex_destroy(&dsp->dr_lock); kmem_free(dsp, sizeof (*dsp)); q->q_ptr = NULL; return (0); }
int streamscall ip2xinet_close(queue_t *q, int oflag, cred_t *credp) { (void) oflag; (void) credp; qprocsoff(q); spin_lock(&ip2xinet_lock); ip2xinet_numopen = 0; flushq(WR(q), FLUSHALL); q->q_ptr = NULL; WR(q)->q_ptr = NULL; spin_unlock(&ip2xinet_lock); goto quit; quit: return (0); }
/* ARGSUSED */ static int sadclose( queue_t *qp, /* pointer to read queue */ int flag, /* file open flags */ cred_t *credp) /* user credentials */ { struct saddev *sadp; qprocsoff(qp); sadp = (struct saddev *)qp->q_ptr; sadp->sa_qp = NULL; sadp->sa_addr = NULL; qp->q_ptr = NULL; WR(qp)->q_ptr = NULL; return (0); }
/** * socksys_qclose - SOCKSYS driver STREAMS close routine * @q: read queue of closing Stream * @oflag: flags to open call * @crp: pointer to closer's credentials */ STATIC streamscall int socksys_qclose(queue_t *q, int oflag, cred_t *crp) { struct ssys *s = SOCKSYS_PRIV(q); (void) oflag; (void) crp; (void) s; _printd(("%s: closing character device %d:%d\n", DRV_NAME, s->dev.cmajor, s->dev.cminor)); /* make sure procedures are off */ qprocsoff(q); ssys_free_priv(q); /* free and unlink the structure */ goto quit; quit: return (0); }
static int audio_strclose(queue_t *rq, int flag, cred_t *credp) { audio_client_t *c; audio_dev_t *d; int rv; _NOTE(ARGUNUSED(flag)); _NOTE(ARGUNUSED(credp)); if ((c = rq->q_ptr) == NULL) { return (ENXIO); } if (ddi_can_receive_sig() || (ddi_get_pid() == 0)) { rv = auclnt_drain(c); } /* make sure we won't get any upcalls */ auimpl_client_deactivate(c); /* * Pick up any data sitting around in input buffers. This * avoids leaving record data stuck in queues. */ if (c->c_istream.s_engine != NULL) audio_engine_produce(c->c_istream.s_engine); /* get a local hold on the device */ d = c->c_dev; auimpl_dev_hold(c->c_dev); /* Turn off queue processing... */ qprocsoff(rq); /* Call personality specific close handler */ c->c_close(c); auimpl_client_destroy(c); /* notify peers that a change has occurred */ atomic_inc_uint(&d->d_serial); /* now we can drop the release we had on the device */ auimpl_dev_release(d); return (rv); }
static streamscall int pckt_qclose(queue_t *q, int oflag, cred_t *crp) { struct pckt *p; qprocsoff(q); if ((p = (struct pckt *) q->q_ptr)) { bcid_t bc; /* atomic exchange for LiS's stupid sake */ if ((bc = xchg(&p->bufcall, 0))) unbufcall(bc); kmem_free(p, sizeof(*p)); } q->q_ptr = WR(q)->q_ptr = NULL; return (0); }
/*ARGSUSED*/ int connclose(queue_t *q, int cflag, cred_t *crp) { vnode_t *streamvp; fifonode_t *streamfnp; qprocsoff(q); streamvp = strq2vp(q); ASSERT(streamvp != NULL); ASSERT(streamvp->v_type == VFIFO); streamfnp = VTOF(streamvp); streamfnp->fn_flag &= ~FIFOCONNLD; VN_RELE(streamvp); return (0); }
/* ARGSUSED */ static int log_close(queue_t *q, int flag, cred_t *cr) { log_t *lp = (log_t *)q->q_ptr; qprocsoff(q); lp->log_inuse = 0; log_update(lp, NULL, 0, NULL); freemsg(lp->log_data); lp->log_data = NULL; if (lp->log_major == LOG_CONSMIN) log_free(lp); q->q_ptr = NULL; WR(q)->q_ptr = NULL; return (0); }
static streamscall int ptem_qclose(queue_t *q, int oflag, cred_t *crp) { struct ptem *p; if ((p = PTEM_PRIV(q))) { mblk_t *mp; /* didn't hang up, do it now */ if ((mp = xchg(&p->zero, NULL))) putnext(WR(q), mp); qprocsoff(q); q->q_ptr = WR(q)->q_ptr = NULL; kmem_free(p, sizeof(*p)); return (0); } return (EIO); }
/*ARGSUSED*/ static int telmodclose(queue_t *q, int flag, cred_t *credp) { struct telmod_info *tmip = (struct telmod_info *)q->q_ptr; mblk_t *mp; /* * Flush any write-side data downstream. Ignoring flow * control at this point is known to be safe because the * M_HANGUP below poisons the stream such that no modules can * be pushed again. */ while (mp = getq(WR(q))) putnext(WR(q), mp); /* Poison the stream head so that we can't be pushed again. */ (void) putnextctl(q, M_HANGUP); qprocsoff(q); if (tmip->wbufcid) { qunbufcall(q, tmip->wbufcid); tmip->wbufcid = 0; } if (tmip->rbufcid) { qunbufcall(q, tmip->rbufcid); tmip->rbufcid = 0; } if (tmip->wtimoutid) { (void) quntimeout(q, tmip->wtimoutid); tmip->wtimoutid = 0; } if (tmip->rtimoutid) { (void) quntimeout(q, tmip->rtimoutid); tmip->rtimoutid = 0; } if (tmip->unbind_mp != NULL) { freemsg(tmip->unbind_mp); } kmem_free(q->q_ptr, sizeof (struct telmod_info)); q->q_ptr = WR(q)->q_ptr = NULL; return (0); }
static int rds_close(queue_t *q) { rds_t *rdsp = (rds_t *)q->q_ptr; qprocsoff(q); /* * NPORT should be decremented only if this socket was previously * bound to an RDS port. */ if (rdsp->rds_state >= TS_IDLE) { RDS_DECR_NPORT(); RDS_SET_PORT_QUOTA(RDS_CURRENT_PORT_QUOTA()); rds_transport_ops-> rds_transport_resume_port(ntohs(rdsp->rds_port)); } /* close the transport driver if this is the last socket */ if (RDS_GET_NPORT() == 1) { (void) rds_close_transport_driver(); } /* * We set the flags without holding a lock as this is * just a hint for the fanout lookup to skip this rds. * We dont free the struct until it's out of the hash and * the ref count goes down. */ rdsp->rds_flags |= RDS_CLOSING; rds_bind_hash_remove(rdsp, B_FALSE); mutex_enter(&rdsp->rds_lock); ASSERT(rdsp->rds_refcnt > 0); if (rdsp->rds_refcnt != 1) { cv_wait(&rdsp->rds_refcv, &rdsp->rds_lock); } mutex_exit(&rdsp->rds_lock); RDS_DEC_REF_CNT(rdsp); RD(q)->q_ptr = NULL; WR(q)->q_ptr = NULL; return (0); }
static streamscall int spx_close(queue_t *q, int oflag, cred_t *crp) { struct spx *p; if ((p = q->q_ptr) == NULL) return (0); /* already closed */ qprocsoff(q); spin_lock(&spx_lock); if ((*(p->prev) = p->next)) p->next->prev = p->prev; p->next = NULL; p->prev = &p->next; p->init = 0; p->q = NULL; q->q_ptr = OTHERQ(q)->q_ptr = NULL; spin_unlock(&spx_lock); /* FIXME: we need to do more than this... If we are welded to another stream head we need to initiate a close on that stream head as well or at least unweld things. */ return (0); }