Esempio n. 1
0
/**
 * \brief Return a packet to where it was allocated.
 */
void PacketFreeOrRelease(Packet *p)
{
    if (p->flags & PKT_ALLOC)
        PacketFree(p);
    else
        PacketPoolReturnPacket(p);
}
Esempio n. 2
0
/** \brief Return packet to Packet pool
 *
 */
void PacketPoolReturnPacket(Packet *p)
{
    PktPool *my_pool = GetThreadPacketPool();

    PACKET_RELEASE_REFS(p);

    PktPool *pool = p->pool;
    if (pool == NULL) {
        PacketFree(p);
        return;
    }
#ifdef DEBUG_VALIDATION
    BUG_ON(pool->initialized == 0);
    BUG_ON(pool->destroyed == 1);
    BUG_ON(my_pool->initialized == 0);
    BUG_ON(my_pool->destroyed == 1);
#endif /* DEBUG_VALIDATION */

    if (pool == my_pool) {
        /* Push back onto this thread's own stack, so no locking. */
        p->next = my_pool->head;
        my_pool->head = p;
    } else {
        PktPool *pending_pool = my_pool->pending_pool;
        if (pending_pool == NULL) {
            /* No pending packet, so store the current packet. */
            my_pool->pending_pool = pool;
            my_pool->pending_head = p;
            my_pool->pending_tail = p;
            my_pool->pending_count = 1;
        } else if (pending_pool == pool) {
            /* Another packet for the pending pool list. */
            p->next = my_pool->pending_head;
            my_pool->pending_head = p;
            my_pool->pending_count++;
            if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) {
                /* Return the entire list of pending packets. */
                SCMutexLock(&pool->return_stack.mutex);
                my_pool->pending_tail->next = pool->return_stack.head;
                pool->return_stack.head = my_pool->pending_head;
                SC_ATOMIC_RESET(pool->return_stack.sync_now);
                SCMutexUnlock(&pool->return_stack.mutex);
                SCCondSignal(&pool->return_stack.cond);
                /* Clear the list of pending packets to return. */
                my_pool->pending_pool = NULL;
                my_pool->pending_head = NULL;
                my_pool->pending_tail = NULL;
                my_pool->pending_count = 0;
            }
        } else {
            /* Push onto return stack for this pool */
            SCMutexLock(&pool->return_stack.mutex);
            p->next = pool->return_stack.head;
            pool->return_stack.head = p;
            SC_ATOMIC_RESET(pool->return_stack.sync_now);
            SCMutexUnlock(&pool->return_stack.mutex);
            SCCondSignal(&pool->return_stack.cond);
        }
    }
}
Esempio n. 3
0
/*
 * Function:    PacketBufferClean
 *
 * Description:    
 *              Used to completly clear out the ring-buffer. This is used
 *              just before generating new statistics using the same
 *              stats object.
 *              
 *
 * In Args:    
 *
 * Out Args:    
 *
 * Scope:    
 * Returns:    
 * Side Effect:    
 */
static I2Boolean
PacketBufferClean(
        I2Datum k   __attribute__((unused)),
        I2Datum v,
        void    *app_data
        )
{
    OWPStats   stats = app_data;
    OWPPacket  node = v.dptr;

    PacketFree(stats,node);

    return True;
}
Esempio n. 4
0
void PacketPoolDestroy(void)
{
    Packet *p = NULL;
    PktPool *my_pool = GetThreadPacketPool();

#ifdef DEBUG_VALIDATION
    BUG_ON(my_pool->destroyed);
#endif /* DEBUG_VALIDATION */

    if (my_pool && my_pool->pending_pool != NULL) {
        p = my_pool->pending_head;
        while (p) {
            Packet *next_p = p->next;
            PacketFree(p);
            p = next_p;
            my_pool->pending_count--;
        }
#ifdef DEBUG_VALIDATION
        BUG_ON(my_pool->pending_count);
#endif /* DEBUG_VALIDATION */
        my_pool->pending_pool = NULL;
        my_pool->pending_head = NULL;
        my_pool->pending_tail = NULL;
    }

    while ((p = PacketPoolGetPacket()) != NULL) {
        PacketFree(p);
    }

    SC_ATOMIC_DESTROY(my_pool->return_stack.sync_now);

#ifdef DEBUG_VALIDATION
    my_pool->initialized = 0;
    my_pool->destroyed = 1;
#endif /* DEBUG_VALIDATION */
}
Esempio n. 5
0
int DecoderParseDataFromFile(char *filename, DecoderFunc Decoder) {
    uint8_t buffer[65536];
    int result = 1;

#ifdef AFLFUZZ_PERSISTANT_MODE
    while (__AFL_LOOP(1000)) {
        /* reset state */
        memset(buffer, 0, sizeof(buffer));
#endif /* AFLFUZZ_PERSISTANT_MODE */

        FILE *fp = fopen(filename, "r");
        BUG_ON(fp == NULL);

        ThreadVars tv;
        memset(&tv, 0, sizeof(tv));
        DecodeThreadVars *dtv = DecodeThreadVarsAlloc(&tv);
        DecodeRegisterPerfCounters(dtv, &tv);
        StatsSetupPrivate(&tv);

        while (1) {
            int done = 0;
            size_t result = fread(&buffer, 1, sizeof(buffer), fp);
            if (result < sizeof(buffer))
                 done = 1;

            Packet *p = PacketGetFromAlloc();
            if (p != NULL) {
                (void) Decoder (&tv, dtv, p, buffer, result, NULL);
                PacketFree(p);
            }

            if (done)
                break;
        }
        DecodeThreadVarsFree(&tv, dtv);

        fclose(fp);

#ifdef AFLFUZZ_PERSISTANT_MODE
    }
#endif /* AFLFUZZ_PERSISTANT_MODE */

    result = 0;
    return result;

}
Esempio n. 6
0
static OWPBoolean
PacketBeginFlush(
        OWPStats    stats
        )
{
    OWPPacket   node = stats->pbegin;
    OWPBoolean  keep_parsing = True;

    if(!node){
        OWPError(stats->ctx,OWPErrFATAL,EINVAL,
                "PacketBeginFlush: begin node empty?");
        return False;
    }

    /*
     * Move begin skip to next skip if needed (based on node->seq).
     */
    while(stats->skips && (stats->iskip < (long int)stats->hdr->num_skiprecs) &&
            (node->seq > stats->skips[stats->iskip].end)){
        stats->iskip++;
    }

    /*
     * Check if in "skip" range. If so, then skip aggregation information
     * and flush the packet. (iskip has been forwarded to guarentee the
     * first skip range is the only possible match.)
     */
    if(stats->skips && (stats->iskip < (long int)stats->hdr->num_skiprecs) &&
            (node->seq >= stats->skips[stats->iskip].begin)){
        goto flush;
    }

    /*
     * Loss and Dup Stats Happen Here
     */
    if(node->lost){
        /* count lost packets */
        stats->lost++;
    }
    else if(node->seen){
        /* count dups */
        stats->dups += (node->seen - 1);
    }

flush:

    /* Retain the last scheduled timestamp */
    stats->end_time = node->schedtime;

    if(node->next){
        stats->pbegin = node->next;
    }
    else if((node->seq+1) < stats->last){
        stats->pbegin = PacketGet(stats,node->seq+1);
    }
    else{
        keep_parsing = False;
    }

    PacketFree(stats,node);

    return keep_parsing;
}
Esempio n. 7
0
VOID
LpxTransferDataComplete(
	IN NDIS_HANDLE   ProtocolBindingContext,
	IN PNDIS_PACKET  Packet,
	IN NDIS_STATUS   Status,
	IN UINT          BytesTransfered
	)
{
	PDEVICE_CONTEXT	pDeviceContext;
	PLPX_HEADER	lpxHeader;
	PNDIS_BUFFER	firstBuffer;	
	PUCHAR			packetData;
	UINT			packetDataLength;
	USHORT			lpxHeaderSize;


	UNREFERENCED_PARAMETER( BytesTransfered );

	pDeviceContext = (PDEVICE_CONTEXT)ProtocolBindingContext;

	if (Status != NDIS_STATUS_SUCCESS) {

		ASSERT( FALSE );
		DebugPrint( 1,  ("[LPX] LpxTransferDataComplete error %x\n", Status) );
		PacketFree( pDeviceContext, Packet );
		return;
	}

	if (RESERVED(Packet)->HeaderCopied == FALSE) {
	
		NdisQueryPacket( Packet, NULL, NULL, &firstBuffer, NULL );
		NdisQueryBufferSafe( firstBuffer, &packetData, &packetDataLength, HighPagePriority );

		lpxHeader = (PLPX_HEADER)(packetData + RESERVED(Packet)->PacketRawDataOffset);

		lpxHeaderSize = sizeof(LPX_HEADER);

#if __LPX_OPTION_ADDRESSS__

		if (FlagOn(lpxHeader->Option, LPX_OPTION_SOURCE_ADDRESS)) {

			lpxHeaderSize += ETHERNET_ADDRESS_LENGTH;
		}

		if (FlagOn(lpxHeader->Option, LPX_OPTION_DESTINATION_ADDRESS)) {

			lpxHeaderSize += ETHERNET_ADDRESS_LENGTH;
		}

#endif

		RtlCopyMemory( &RESERVED(Packet)->LpxHeader, lpxHeader, lpxHeaderSize );
		RESERVED(Packet)->HeaderCopied = TRUE;

		RESERVED(Packet)->PacketRawDataLength = RESERVED(Packet)->PacketRawDataOffset + NTOHS(lpxHeader->PacketSize & ~LPX_TYPE_MASK);
		RESERVED(Packet)->PacketRawDataOffset += lpxHeaderSize;
	}
	
	lpxHeaderSize = sizeof(LPX_HEADER);

#if __LPX_OPTION_ADDRESSS__

	if (FlagOn(RESERVED(Packet)->LpxHeader.Option, LPX_OPTION_SOURCE_ADDRESS)) {

		if (!FlagOn(RESERVED(Packet)->LpxHeader.Option, LPX_OPTION_DESTINATION_ADDRESS)) {
		
			RtlCopyMemory( RESERVED(Packet)->OptionSourceAddress,
						   RESERVED(Packet)->OptionDestinationAddress,
						   ETHERNET_ADDRESS_LENGTH );	
		}

		lpxHeaderSize += ETHERNET_ADDRESS_LENGTH;

		ASSERT( RtlEqualMemory(RESERVED(Packet)->EthernetHeader.SourceAddress,
							   RESERVED(Packet)->OptionSourceAddress,
							   ETHERNET_ADDRESS_LENGTH) );
	}

	if (FlagOn(RESERVED(Packet)->LpxHeader.Option, LPX_OPTION_DESTINATION_ADDRESS)) {

		lpxHeaderSize += ETHERNET_ADDRESS_LENGTH;

		ASSERT( RtlEqualMemory(RESERVED(Packet)->EthernetHeader.DestinationAddress,
							   RESERVED(Packet)->OptionDestinationAddress,
							   ETHERNET_ADDRESS_LENGTH) );
	}

#endif

	if (NTOHS(RESERVED(Packet)->LpxHeader.PacketSize & ~LPX_TYPE_MASK) - lpxHeaderSize != 
		RESERVED(Packet)->PacketRawDataLength - RESERVED(Packet)->PacketRawDataOffset) {

		ASSERT( FALSE );
		PacketFree( pDeviceContext, Packet );
		return;
	}		

	ExInterlockedInsertTailList( &pDeviceContext->PacketInProgressList,
								 &(RESERVED(Packet)->ListEntry),
								 &pDeviceContext->PacketInProgressQSpinLock );
	return;
}