/* * Create a minimal queue with just enough fields filled in to support * canput(9F), putq(9F), and getq_noenab(9F). We set QNOENB to ensure * that the queue will never be enabled. */ static queue_t * log_makeq(size_t lowat, size_t hiwat, void *ibc) { queue_t *q; q = kmem_zalloc(sizeof (queue_t), KM_SLEEP); q->q_stream = &log_fakestr; q->q_flag = QISDRV | QMTSAFE | QNOENB | QREADR | QUSE; q->q_nfsrv = q; q->q_lowat = lowat; q->q_hiwat = hiwat; mutex_init(QLOCK(q), NULL, MUTEX_DRIVER, ibc); return (q); }
long netlogread(Fs *f, void *a, ulong _, long n) { int i, d; char *p, *rptr; QLOCK(f->alog); if(waserror()){ QUNLOCK(f->alog); nexterror(); } for(;;){ LOCK(f->alog); if(f->alog->len){ if(n > f->alog->len) n = f->alog->len; d = 0; rptr = f->alog->rptr; f->alog->rptr += n; if(f->alog->rptr >= f->alog->end){ d = f->alog->rptr - f->alog->end; f->alog->rptr = f->alog->buf + d; } f->alog->len -= n; UNLOCK(f->alog); i = n-d; p = a; memmove(p, rptr, i); memmove(p+i, f->alog->buf, d); break; } else UNLOCK(f->alog); sleep(&f->alog->rendez, netlogready, f); } QUNLOCK(f->alog); poperror(); return n; }
/* * wrapper for qi_putp entry in module ops vec. * implements asynchronous putnext(). * Note, that unlike putnext(), this routine is NOT optimized for the * fastpath. Calling this routine will grab whatever locks are necessary * to protect the stream head, q_next, and syncq's. * And since it is in the normal locks path, we do not use putlocks if * they exist (though this can be changed by swapping the value of * UseFastlocks). */ void put(queue_t *qp, mblk_t *mp) { queue_t *fqp = qp; /* For strft tracing */ syncq_t *sq; uint16_t flags; uint16_t drain_mask; struct qinit *qi; int (*putproc)(); int ix; boolean_t queued = B_FALSE; kmutex_t *sqciplock = NULL; ushort_t *sqcipcount = NULL; TRACE_2(TR_FAC_STREAMS_FR, TR_PUT_START, "put:(%X, %X)", qp, mp); ASSERT(mp->b_datap->db_ref != 0); ASSERT(mp->b_next == NULL && mp->b_prev == NULL); sq = qp->q_syncq; ASSERT(sq != NULL); qi = qp->q_qinfo; if (UseFastlocks && sq->sq_ciputctrl != NULL) { /* fastlock: */ ASSERT(sq->sq_flags & SQ_CIPUT); ix = CPU->cpu_seqid & sq->sq_nciputctrl; sqciplock = &sq->sq_ciputctrl[ix].ciputctrl_lock; sqcipcount = &sq->sq_ciputctrl[ix].ciputctrl_count; mutex_enter(sqciplock); if (!((*sqcipcount) & SQ_FASTPUT) || (sq->sq_flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS))) { mutex_exit(sqciplock); sqciplock = NULL; goto slowlock; } (*sqcipcount)++; ASSERT(*sqcipcount != 0); queued = qp->q_sqflags & Q_SQQUEUED; mutex_exit(sqciplock); } else { slowlock: ASSERT(sqciplock == NULL); mutex_enter(SQLOCK(sq)); flags = sq->sq_flags; /* * We are going to drop SQLOCK, so make a claim to prevent syncq * from closing. */ sq->sq_count++; ASSERT(sq->sq_count != 0); /* Wraparound */ /* * If there are writers or exclusive waiters, there is not much * we can do. Place the message on the syncq and schedule a * background thread to drain it. * * Also if we are approaching end of stack, fill the syncq and * switch processing to a background thread - see comments on * top. */ if ((flags & (SQ_STAYAWAY|SQ_EXCL|SQ_EVENTS)) || (sq->sq_needexcl != 0) || PUT_STACK_NOTENOUGH()) { TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, "putnext_end:(%p, %p, %p) SQ_EXCL fill", qp, mp, sq); /* * NOTE: qfill_syncq will need QLOCK. It is safe to drop * SQLOCK because positive sq_count keeps the syncq from * closing. */ mutex_exit(SQLOCK(sq)); qfill_syncq(sq, qp, mp); /* * NOTE: after the call to qfill_syncq() qp may be * closed, both qp and sq should not be referenced at * this point. * * This ASSERT is located here to prevent stack frame * consumption in the DEBUG code. */ ASSERT(sqciplock == NULL); return; } queued = qp->q_sqflags & Q_SQQUEUED; /* * If not a concurrent perimiter, we need to acquire * it exclusively. It could not have been previously * set since we held the SQLOCK before testing * SQ_GOAWAY above (which includes SQ_EXCL). * We do this here because we hold the SQLOCK, and need * to make this state change BEFORE dropping it. */ if (!(flags & SQ_CIPUT)) { ASSERT((sq->sq_flags & SQ_EXCL) == 0); ASSERT(!(sq->sq_type & SQ_CIPUT)); sq->sq_flags |= SQ_EXCL; } mutex_exit(SQLOCK(sq)); } ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); /* * We now have a claim on the syncq, we are either going to * put the message on the syncq and then drain it, or we are * going to call the putproc(). */ putproc = qi->qi_putp; if (!queued) { STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - mp->b_datap->db_base); (*putproc)(qp, mp); ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); } else { mutex_enter(QLOCK(qp)); /* * If there are no messages in front of us, just call putproc(), * otherwise enqueue the message and drain the queue. */ if (qp->q_syncqmsgs == 0) { mutex_exit(QLOCK(qp)); STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - mp->b_datap->db_base); (*putproc)(qp, mp); ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); } else { /* * We are doing a fill with the intent to * drain (meaning we are filling because * there are messages in front of us ane we * need to preserve message ordering) * Therefore, put the message on the queue * and call qdrain_syncq (must be done with * the QLOCK held). */ STR_FTEVENT_MSG(mp, fqp, FTEV_PUTNEXT, mp->b_rptr - mp->b_datap->db_base); #ifdef DEBUG /* * These two values were in the original code for * all syncq messages. This is unnecessary in * the current implementation, but was retained * in debug mode as it is usefull to know where * problems occur. */ mp->b_queue = qp; mp->b_prev = (mblk_t *)putproc; #endif SQPUT_MP(qp, mp); qdrain_syncq(sq, qp); ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); } } /* * Before we release our claim, we need to see if any * events were posted. If the syncq is SQ_EXCL && SQ_QUEUED, * we were responsible for going exclusive and, therefore, * are resposible for draining. */ if (sq->sq_flags & (SQ_EXCL)) { drain_mask = 0; } else { drain_mask = SQ_QUEUED; } if (sqciplock != NULL) { mutex_enter(sqciplock); flags = sq->sq_flags; ASSERT(flags & SQ_CIPUT); /* SQ_EXCL could have been set by qwriter_inner */ if ((flags & (SQ_EXCL|SQ_TAIL)) || sq->sq_needexcl) { /* * we need SQLOCK to handle * wakeups/drains/flags change. sqciplock * is needed to decrement sqcipcount. * SQLOCK has to be grabbed before sqciplock * for lock ordering purposes. * after sqcipcount is decremented some lock * still needs to be held to make sure * syncq won't get freed on us. * * To prevent deadlocks we try to grab SQLOCK and if it * is held already we drop sqciplock, acquire SQLOCK and * reacqwire sqciplock again. */ if (mutex_tryenter(SQLOCK(sq)) == 0) { mutex_exit(sqciplock); mutex_enter(SQLOCK(sq)); mutex_enter(sqciplock); } flags = sq->sq_flags; ASSERT(*sqcipcount != 0); (*sqcipcount)--; mutex_exit(sqciplock); } else { ASSERT(*sqcipcount != 0); (*sqcipcount)--; mutex_exit(sqciplock); TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, "putnext_end:(%p, %p, %p) done", qp, mp, sq); return; } } else { mutex_enter(SQLOCK(sq)); flags = sq->sq_flags; ASSERT(sq->sq_count != 0); sq->sq_count--; } if ((flags & (SQ_TAIL)) || sq->sq_needexcl) { putnext_tail(sq, qp, (flags & ~drain_mask)); /* * The only purpose of this ASSERT is to preserve calling stack * in DEBUG kernel. */ ASSERT(sq != NULL); return; } ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)) || queued); ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) || queued); /* * Safe to always drop SQ_EXCL: * Not SQ_CIPUT means we set SQ_EXCL above * For SQ_CIPUT SQ_EXCL will only be set if the put * procedure did a qwriter(INNER) in which case * nobody else is in the inner perimeter and we * are exiting. * * I would like to make the following assertion: * * ASSERT((flags & (SQ_EXCL|SQ_CIPUT)) != (SQ_EXCL|SQ_CIPUT) || * sq->sq_count == 0); * * which indicates that if we are both putshared and exclusive, * we became exclusive while executing the putproc, and the only * claim on the syncq was the one we dropped a few lines above. * But other threads that enter putnext while the syncq is exclusive * need to make a claim as they may need to drop SQLOCK in the * has_writers case to avoid deadlocks. If these threads are * delayed or preempted, it is possible that the writer thread can * find out that there are other claims making the (sq_count == 0) * test invalid. */ sq->sq_flags = flags & ~SQ_EXCL; mutex_exit(SQLOCK(sq)); TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, "putnext_end:(%p, %p, %p) done", qp, mp, sq); }