Пример #1
0
static void
pmclog_release(struct pmc_owner *po)
{
	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
	    ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));

	/* schedule an I/O if we've filled a buffer */
	if (po->po_curbuf->plb_ptr >= po->po_curbuf->plb_fence)
		pmclog_schedule_io(po);

	mtx_unlock_spin(&po->po_mtx);

	PMCDBG(LOG,REL,1, "po=%p", po);
}
Пример #2
0
int
pmclog_flush(struct pmc_owner *po)
{
	int error;
	struct pmclog_buffer *lb;

	PMCDBG(LOG,FLS,1, "po=%p", po);

	/*
	 * If there is a pending error recorded by the logger thread,
	 * return that.
	 */
	if (po->po_error)
		return (po->po_error);

	error = 0;

	/*
	 * Check that we do have an active log file.
	 */
	mtx_lock(&pmc_kthread_mtx);
	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) {
		error = EINVAL;
		goto error;
	}

	/*
	 * Schedule the current buffer if any and not empty.
	 */
	mtx_lock_spin(&po->po_mtx);
	lb = po->po_curbuf;
	if (lb && lb->plb_ptr != lb->plb_base) {
		pmclog_schedule_io(po);
	} else
		error = ENOBUFS;
	mtx_unlock_spin(&po->po_mtx);

 error:
	mtx_unlock(&pmc_kthread_mtx);

	return (error);
}
Пример #3
0
static uint32_t *
pmclog_reserve(struct pmc_owner *po, int length)
{
	uintptr_t newptr, oldptr;
	uint32_t *lh;
	struct timespec ts;

	PMCDBG(LOG,ALL,1, "po=%p len=%d", po, length);

	KASSERT(length % sizeof(uint32_t) == 0,
	    ("[pmclog,%d] length not a multiple of word size", __LINE__));

	mtx_lock_spin(&po->po_mtx);

	/* No more data when shutdown in progress. */
	if (po->po_flags & PMC_PO_SHUTDOWN) {
		mtx_unlock_spin(&po->po_mtx);
		return (NULL);
	}

	if (po->po_curbuf == NULL)
		if (pmclog_get_buffer(po) != 0) {
			mtx_unlock_spin(&po->po_mtx);
			return (NULL);
		}

	KASSERT(po->po_curbuf != NULL,
	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));

	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base &&
	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
		po->po_curbuf->plb_fence));

	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;
	newptr = oldptr + length;

	KASSERT(oldptr != (uintptr_t) NULL,
	    ("[pmclog,%d] po=%p Null log buffer pointer", __LINE__, po));

	/*
	 * If we have space in the current buffer, return a pointer to
	 * available space with the PO structure locked.
	 */
	if (newptr <= (uintptr_t) po->po_curbuf->plb_fence) {
		po->po_curbuf->plb_ptr = (char *) newptr;
		goto done;
	}

	/*
	 * Otherwise, schedule the current buffer for output and get a
	 * fresh buffer.
	 */
	pmclog_schedule_io(po);

	if (pmclog_get_buffer(po) != 0) {
		mtx_unlock_spin(&po->po_mtx);
		return (NULL);
	}

	KASSERT(po->po_curbuf != NULL,
	    ("[pmclog,%d] po=%p no current buffer", __LINE__, po));

	KASSERT(po->po_curbuf->plb_ptr != NULL,
	    ("[pmclog,%d] null return from pmc_get_log_buffer", __LINE__));

	KASSERT(po->po_curbuf->plb_ptr == po->po_curbuf->plb_base &&
	    po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] po=%p buffer invariants: ptr=%p base=%p fence=%p",
		__LINE__, po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base,
		po->po_curbuf->plb_fence));

	oldptr = (uintptr_t) po->po_curbuf->plb_ptr;

 done:
	lh = (uint32_t *) oldptr;
	lh++;				/* skip header */
	getnanotime(&ts);		/* fill in the timestamp */
	*lh++ = ts.tv_sec & 0xFFFFFFFF;
	*lh++ = ts.tv_nsec & 0xFFFFFFF;
	return ((uint32_t *) oldptr);
}