Esempio n. 1
0
slice_alloc(slice_t *slice, sa_size_t alloc_size)
#endif /* !DEBUG */
{
	if (slice->sa->flags & SMALL_ALLOC) {
		small_allocatable_row_t *row = slice_small_get_row(slice);
		
		if (row) {
			slice->alloc_count++;
		}
		
		return (void*)(row);
	} else {
		
#ifdef SLICE_CHECK_THREADS
		Boolean res = OSCompareAndSwap64(0, 1, &slice->semaphore);
		if (!res) {
			REPORT0("slice_alloc - thread already present\n");
		}
#endif /* SLICE_CHECK_THREADS */
		
		allocatable_row_t *row = slice_get_row(slice);
		if (row) {
#ifdef SLICE_CHECK_ROW_HEADERS
			if (row->prefix != ROW_HEADER_GUARD ||
				row->suffix != ROW_HEADER_GUARD) {
				REPORT0("slice_alloc - detected corrupted row "
						"header\n");
			}
#endif /* SLICE_CHECK_ROW_HEADERS */
			
#ifdef SLICE_CHECK_FREE_SIZE
			row->allocated_bytes = alloc_size;
#endif /* SLICE_CHECK_FREE_SIZE */
			
#ifdef SLICE_CHECK_WRITE_AFTER_FREE
			if (!slice_row_is_poisoned(slice, row)) {
				REPORT("slice_alloc - write after free detected - sa "
					   "size %llu\n", slice->sa->max_alloc_size);
			}
#endif /* SLICE_CHECK_WRITE_AFTER_FREE */
			
#ifdef SLICE_CHECK_BOUNDS_WRITE
			slice_poison_row(slice, row);
#endif /* SLICE_CHECK_BOUNDS_WRITE */
			
			slice->alloc_count++;
			row++;
		}
		
#ifdef SLICE_CHECK_THREADS
		if (res) {
			OSDecrementAtomic64(&slice->semaphore);
		}
#endif /* SLICE_CHECK_THREADS */
		
		return ((void *)row);
	}
}
Esempio n. 2
0
void
m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
{
	int i;

	if (m == NULL)
		return;

	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0)
		return;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
	    (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
	    (m->m_pkthdr.pkt_flags & PKTF_DRV_TS_VALID) == 0) {
		struct timespec now;

		nanouptime(&now);
		net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
	}
#endif /* (DEBUG || DEVELOPMENT) */

	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
		mbuf_tx_compl_func callback;

		if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0)
			continue;

		lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
		callback = mbuf_tx_compl_table[i];
		lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);

		if (callback != NULL) {
			callback(m->m_pkthdr.pkt_compl_context,
			    ifp, m->m_pkthdr.pkt_timestamp,
			    m->m_pkthdr.drv_tx_compl_arg,
			    m->m_pkthdr.drv_tx_compl_data,
			    m->m_pkthdr.drv_tx_status);
		}
	}
	m->m_pkthdr.pkt_compl_callbacks = 0;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0) {
		OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
		if (ifp == NULL)
			atomic_add_64(&mbuf_tx_compl_aborted, 1);
	}
#endif /* (DEBUG || DEVELOPMENT) */
}
Esempio n. 3
0
slice_free_row(slice_t *slice, allocatable_row_t *row, sa_size_t alloc_size)
#endif /* !DEBUG */
{
#ifdef SLICE_CHECK_THREADS
	Boolean res = OSCompareAndSwap64(0, 1, &slice->semaphore);
	if (!res) {
		REPORT0("slice_free_row - thread already present\n");
	}
#endif /* SLICE_CHECK_THREADS */
	
	slice->alloc_count--;
	
#ifdef SLICE_CHECK_ROW_HEADERS
	if (row->prefix != ROW_HEADER_GUARD ||
	    row->suffix != ROW_HEADER_GUARD) {
		REPORT0("slice_free_row - detected corrupted row header\n");
	}
#endif /* SLICE_CHECK_ROW_HEADERS */
	
#ifdef SLICE_CHECK_BOUNDS_WRITE
	if (!slice_row_is_within_bounds(slice, row)) {
		REPORT("slice_free_row - write outside of allocated memory "
			   "detected alloc_size = %llu\n", row->allocated_bytes);
	}
#endif /* SLICE_CHECK_BOUNDS_WRITE */
	
#ifdef SLICE_CHECK_FREE_SIZE
	if (row->allocated_bytes != alloc_size) {
		REPORT("slice_free_row - free of %llu bytes when allcated %llu",
			   alloc_size, row->allocated_bytes);
	}
	row->allocated_bytes = 0;
#endif /* SLICE_CHECK_FREE_SIZE */
	
#ifdef SLICE_CHECK_WRITE_AFTER_FREE
	slice_poison_row(slice, row);
#endif /* SLICE_CHECK_WRITE_AFTER_FREE */
	
	slice_insert_free_row(slice, row);
	
#ifdef SLICE_CHECK_THREADS
	if (res) {
		OSDecrementAtomic64(&slice->semaphore);
	}
#endif /* SLICE_CHECK_THREADS */
}