/** fmSetProcessEventMask * \ingroup api * * \desc Set which events are delivered to the current process's * event handler function (set by ''fmInitialize'' or * ''fmSetEventHandler''). * \lb\lb * Multiple processes may subscribe to the same event. A * copy of each event will be sent to all processes that * subscribe to it. * * \param[in] mask is a logical OR of ''Event Identifiers''. * * \return FM_OK if successful. * *****************************************************************************/ fm_status fmSetProcessEventMask(fm_uint32 mask) { fm_status err; fm_dlist_node * node; fm_uint count = 0; fm_localDelivery *delivery; fm_int myProcessId; fm_uint expectedCount; FM_LOG_ENTRY_API(FM_LOG_CAT_API, "mask=%u\n", mask); myProcessId = fmGetCurrentProcessId(); err = fmCaptureLock(&fmRootApi->localDeliveryLock, FM_WAIT_FOREVER); if (err == FM_OK) { expectedCount = fmRootApi->localDeliveryCount; for ( node = FM_DLL_GET_FIRST( (&fmRootApi->localDeliveryThreads), head ) ; node != NULL ; node = FM_DLL_GET_NEXT(node, next) ) { count++; delivery = (fm_localDelivery *) node->data; if (delivery->processId == myProcessId) { delivery->mask = mask; } } err = fmReleaseLock(&fmRootApi->localDeliveryLock); if (count != expectedCount) { FM_LOG_ERROR(FM_LOG_CAT_EVENT, "Expected %d processes but found %d\n", expectedCount, count); err = FM_FAIL; } } /* end if (err == FM_OK) */ FM_LOG_EXIT_API(FM_LOG_CAT_API, err); } /* end fmSetProcessEventMask */
/** fmRemoveEventHandler * \ingroup intApi * * \desc Remove a local event handler from the list of handlers. * * \param[out] delivery is a pointer to a pointer to a ''fm_localDelivery'' * structure. If a delivery structure is found for the calling * process then a pointer to it is returned. * * \return FM_OK if successful. * \return FM_ERR_NOT_FOUND if there is no delivery structure for * the calling process. * *****************************************************************************/ fm_status fmRemoveEventHandler(fm_localDelivery ** delivery) { fm_status err; fm_dlist_node * node; fm_localDelivery *cur; fm_int myProcessId; FM_LOG_ENTRY_API(FM_LOG_CAT_API, "delivery=%p\n", (void *) delivery); myProcessId = fmGetCurrentProcessId(); err = fmCaptureLock(&fmRootApi->localDeliveryLock, FM_WAIT_FOREVER); if (err == FM_OK) { for ( node = FM_DLL_GET_FIRST( (&fmRootApi->localDeliveryThreads), head ) ; node != NULL ; node = FM_DLL_GET_NEXT(node, next) ) { cur = (fm_localDelivery *) node->data; if (cur->processId == myProcessId) { break; } } if (node != NULL) { fmDListRemove(&fmRootApi->localDeliveryThreads, node); *delivery = cur; fmRootApi->localDeliveryCount--; } else { err = FM_ERR_NOT_FOUND; } (void) fmReleaseLock(&fmRootApi->localDeliveryLock); } /* end if (err == FM_OK) */ FM_LOG_EXIT_API(FM_LOG_CAT_API, err); } /* end fmRemoveEventHandler */
/** fmDListPeekFirst * \ingroup intList * * \desc Peeks into the first item in the list and returns data in * the first item. * * \param[in] list is the dlist on which to get the first item's data. * * \param[out] dataPtr contains the first item's data, if there are no * items then dataPtr will have NULL. * * \return None * *****************************************************************************/ fm_status fmDListPeekFirst(fm_dlist *list, void **dataPtr) { fm_dlist_node *nnode; nnode = FM_DLL_GET_FIRST(list, head); if (!nnode) { *dataPtr = NULL; return FM_ERR_NO_MORE; } *dataPtr = nnode->data; return FM_OK; } /* end fmDListPeekFirst */
fm_status fmDListRemoveBegin(fm_dlist *list, void **dataPtr) { fm_dlist_node *nnode; void * data; nnode = FM_DLL_GET_FIRST(list, head); if (!nnode) { return FM_ERR_NO_MORE; } data = fmDListRemove(list, nnode); *dataPtr = data; return FM_OK; } /* end fmDListRemoveBegin */
/** fmDistributeEvent * \ingroup intSwitch * * \desc distributes events to those processes that have registered * an interest in the particular event. * * \param[in] event points to the event structure. * * \return Nothing. * *****************************************************************************/ void fmDistributeEvent(fm_event *event) { fmCaptureLock(&fmRootApi->localDeliveryLock, FM_WAIT_FOREVER); { /************************************************** * We want to have a consistent snapshot of the * local delivery list, but we don't want to hold * the lock while we deliver all the events, so we * briefly grab the lock and copy the list into a * C99 variable-size array. **************************************************/ fm_uint count = fmRootApi->localDeliveryCount; fm_localDelivery delivery[count]; fm_dlist_node * node; fm_uint i; fm_uint pktDeliveryCount = 0; fm_eventPktRecv *rcvPktEvent = NULL; fm_status status; fm_buffer *buffer; node = FM_DLL_GET_FIRST( (&fmRootApi->localDeliveryThreads), head ); for (i = 0 ; (node != NULL) && (i < count) ; i++) { delivery[i] = *(fm_localDelivery *) node->data; if ( (delivery[i].mask & (FM_EVENT_PKT_RECV | FM_EVENT_SFLOW_PKT_RECV)) & event->type ) { /* Found thread we need to deliver packet to. */ pktDeliveryCount++; } node = FM_DLL_GET_NEXT(node, next); } /************************************************** * If the event is packet receive but no one has * registered for the event, free the associated * packet buffer and return. **************************************************/ if ( ( (event->type == FM_EVENT_PKT_RECV) || (event->type == FM_EVENT_SFLOW_PKT_RECV) ) && (pktDeliveryCount == 0) ) { rcvPktEvent = &event->info.fpPktEvent; if (enableFramePriority) { status = fmFreeBufferQueueNode(event->sw, rcvPktEvent); if (status != FM_OK) { FM_LOG_ERROR(FM_LOG_CAT_EVENT_PKT_RX, "Freeing Buffer queue node from the queue failed" "status = %d (%s) \n", status, fmErrorMsg(status)); } } fmFreeBufferChain(event->sw, rcvPktEvent->pkt); fmDbgDiagCountIncr(event->sw, FM_CTR_RX_API_PKT_DROPS, 1); fmReleaseLock(&fmRootApi->localDeliveryLock); return; } /* valid actually found */ count = i; fmReleaseLock(&fmRootApi->localDeliveryLock); /************************************************** * Now we do the actual delivery **************************************************/ for (i = 0 ; i < count ; i++) { fm_event *localEvent = NULL; fm_uint64 nanos = MIN_WAIT_NANOS; fm_status err = FM_FAIL; fm_uint32 numUpdates; if ( (delivery[i].mask & event->type) == 0 ) { continue; } /************************************************** * Always use high priority for the locally dispatched * events, because DistributeEvent is only called from * a single thread (the global event handler), and if * we allocated both low and high priority events here, * we could get priority inversion. **************************************************/ while (localEvent == NULL) { localEvent = fmAllocateEvent(event->sw, event->eventID, event->type, FM_EVENT_PRIORITY_HIGH); if (localEvent == NULL) { DELAY_NANOS(nanos); nanos *= 2; if (nanos > MAX_WAIT_NANOS) { nanos = MAX_WAIT_NANOS; FM_LOG_WARNING(FM_LOG_CAT_EVENT, "Waiting to allocate event of type %d " "for switch %d\n", event->type, event->sw); } } } if (event->type == FM_EVENT_TABLE_UPDATE) { /************************************************** * Because the updates field is a pointer to memory * that has been "secretly" allocated after the event, * rather than just being part of the union, we have * to handle it specially. **************************************************/ numUpdates = event->info.fpUpdateEvent.numUpdates; localEvent->info.fpUpdateEvent.numUpdates = numUpdates; FM_MEMCPY_S( localEvent->info.fpUpdateEvent.updates, numUpdates * sizeof(fm_eventTableUpdate), event->info.fpUpdateEvent.updates, numUpdates * sizeof(fm_eventTableUpdate) ); } else if (event->type == FM_EVENT_PURGE_SCAN_COMPLETE) { localEvent->info.purgeScanComplete = event->info.purgeScanComplete; } else if ( (event->type == FM_EVENT_PKT_RECV) || (event->type == FM_EVENT_SFLOW_PKT_RECV) ) { rcvPktEvent = &event->info.fpPktEvent; /************************************************** * Copy the whole event, including the packet, to * localEvent. If this is not the last registered * client, we will overwrite the packet with a * clone. **************************************************/ localEvent->info = event->info; /************************************************** * If there is more than one remaining process that is * interested in receive packet events, clone the * receive buffer for delivery. **************************************************/ if (pktDeliveryCount-- > 1) { if (enableFramePriority) { FM_LOG_ERROR(FM_LOG_CAT_EVENT, "Prioritization is supported only for the" "first registered process. Subsequent " "processes follow normal buffer allocation" " without prioritization.\n"); } localEvent->info.fpPktEvent.pkt = fmDuplicateBufferChain(event->sw, rcvPktEvent->pkt); if (localEvent->info.fpPktEvent.pkt == NULL) { /************************************************** * Couldn't copy the packet. Free the event so that * it is not lost and continue the loop. **************************************************/ fmReleaseEvent(localEvent); fmDbgDiagCountIncr(event->sw, FM_CTR_RX_API_PKT_DROPS, 1); continue; } } if (enableFramePriority) { buffer = ((fm_buffer *)(localEvent->info.fpPktEvent.pkt)); buffer->recvEvent = localEvent; } } else { /************************************************** * Otherwise, we can just copy the whole union * without worrying what type it is. **************************************************/ localEvent->info = event->info; } /************************************************** * Now try to send the event to the local dispatch * thread, using exponential backoff if the event * queue is full. **************************************************/ nanos = MIN_WAIT_NANOS; while (err != FM_OK) { err = fmSendThreadEvent(delivery[i].thread, localEvent); if (err != FM_OK) { DELAY_NANOS(nanos); nanos *= 2; if (nanos > MAX_WAIT_NANOS) { nanos = MAX_WAIT_NANOS; } } /* end if (err != FM_OK) */ } /* end while (err != FM_OK) */ } /* end for (i = 0 ; i < count ; i++) */ } /* end (local scope) */ } /* end fmDistributeEvent */