static int wdbUdpSockCancel ( WDB_COMM_ID commId ) { char dummy; netJobAdd (write, wdbUdpCancelSock, (int)&dummy, 1, 0, 0); return (OK); }
LOCAL void endPollStatsJobQueue ( END_IFDRVCONF *pDrvConf ) { if (netJobAdd ((FUNCPTR) endPollStatsPoll, (int) pDrvConf, 0, 0, 0, 0) != OK) { /* Reload the watchdog. */ wdStart(pDrvConf->ifWatchdog, pDrvConf->ifPollInterval, (FUNCPTR) endPollStatsJobQueue, (int) (pDrvConf)); } return; }
/* Move execution to netJob. returns 0 if successful. */ int ssh_netjob_synchronous_invoke(FUNCPTR function, void *context) { STATUS stat; SEMAPHORE *s; s = semBCreate(SEM_Q_PRIORITY, SEM_FULL); if (!s) return 2; semTake(s, WAIT_FOREVER); stat = netJobAdd(function, (int)context, (int)s, 0, 0, 0); if (stat == OK) { semTake(s, WAIT_FOREVER); semDelete(s); return 0; } semGive(s); semDelete(s); return 1; }
/* VA device send handler. */ static STATUS vxworks_va_send(END_OBJ *end , M_BLK_ID m) { VxWorksVa *va = (void *)end; SSH_DEBUG(SSH_D_LOWOK, ("send on %s", va->name)); /* Submit the packet as netJob for two reasons: 1) to avoid executing engine code from other than tNetTask, and 2) to avoid recursive calls to in_arpinput(). Without terminating the call stack here, the latter would occur when sending the first IP packet to the VA. The packet would be buffered pending ARP in the OS stack and when the spoofed ARP response was given to the stack, in_arpinput() would call the intercepted output function which would cause another ARP request generated by the IPsec engine during flow creation and another spoofed ARP response and so the buffered packet would be sent again because it was not cleared in the stack before calling the output function. */ /* Discard packet if too many messages pending */ if (vxworks_va_sends_submitted - vxworks_va_sends_processed >= VXWORKS_VA_SENDS_MAX) { SSH_TRACE( SSH_D_ERROR, ("%s: too many netJobs, dropping packet", va->name)); m_freem(m); return OK; } if (netJobAdd((FUNCPTR)vxworks_va_send_sub, (int)end, (int)m, 0, 0, 0) != OK) { SSH_TRACE( SSH_D_ERROR, ("%s: netJobAdd failed, dropping packet", va->name)); m_freem(m); return OK; } vxworks_va_sends_submitted++; return OK; }
/* Timer handler */ static void osl_cachereclaim_timer(void * data) { netJobAdd((FUNCPTR)osl_cachereclaim, (int)data, 0, 0, 0, 0); }
bcm_rx_t socend_receive(int unit, bcm_pkt_t *pkt, void *cookie) { #define OK_REASON ((uint32)(-1)) /* * Was: * (DMAS1_OP_IPSW | DMAS1_OP_IPINF | DMAS1_OP_IPMC | \ * DMAS1_OP_TTL | BCM_PMUX_PR_DEFIP | BCM_PMUX_PR_COSMK | \ * DMAS1_OP_BCA) */ int pkt_rv = BCM_RX_NOT_HANDLED; int rv; int enqueue = TRUE; if (pkt->rx_reason & ~OK_REASON) { return BCM_RX_NOT_HANDLED; } /* Check VLAN for match; Check dest MAC for match; check for bcast */ if (snoop_ip_rx != NULL) { pkt_rv = snoop_ip_rx(unit, pkt, &enqueue); } if (pkt_rv == BCM_RX_NOT_HANDLED && enqueue) { if (SOC_IS_ESW(unit)) { rv = netJobAdd((FUNCPTR)socend_receive_netjob, (int)cookie, (int)pkt->pkt_data[0].data, (int)pkt->pkt_len, 4, 5); pkt_rv = ((rv == OK) ? BCM_RX_HANDLED_OWNED : BCM_RX_NOT_HANDLED); } else { char *packet; socend_packet_alloc(unit, SOC_END_PK_SZ, 0, (void *)&packet); if (NULL == packet){ LOG_INFO(BSL_LS_SYS_END, (BSL_META_U(unit, "failed to allocate buffer with socend_packet_alloc\n"))); return BCM_RX_NOT_HANDLED; } sal_memset(packet, 0x0, SOC_END_PK_SZ); sal_memcpy(packet, (unsigned char *)pkt->pkt_data[0].data, pkt->pkt_len); rv = netJobAdd((FUNCPTR)socend_receive_netjob, (int)cookie, (int)packet, (int)pkt->pkt_len, 4, 5); LOG_INFO(BSL_LS_SYS_END, (BSL_META_U(unit, "netJobAdd rv=0x%x OK=%x\n"), rv, OK)); pkt_rv = BCM_RX_HANDLED; } } return pkt_rv; }