void bgp_adj_out_set (struct bgp_node *rn, struct peer *peer, struct prefix *p, struct attr *attr, afi_t afi, safi_t safi, struct bgp_info *binfo) { struct bgp_adj_out *adj = NULL; struct bgp_advertise *adv; #ifdef DISABLE_BGP_ANNOUNCE return; #endif /* DISABLE_BGP_ANNOUNCE */ /* Look for adjacency information. */ if (rn) { for (adj = rn->adj_out; adj; adj = adj->next) if (adj->peer == peer) break; } if (! adj) { adj = XCALLOC (MTYPE_BGP_ADJ_OUT, sizeof (struct bgp_adj_out)); adj->peer = peer_lock (peer); /* adj_out peer reference */ if (rn) { BGP_ADJ_OUT_ADD (rn, adj); bgp_lock_node (rn); } } if (adj->adv) bgp_advertise_clean (peer, adj, afi, safi); adj->adv = bgp_advertise_new (); adv = adj->adv; adv->rn = rn; assert (adv->binfo == NULL); adv->binfo = bgp_info_lock (binfo); /* bgp_info adj_out reference */ if (attr) adv->baa = bgp_advertise_intern (peer->hash[afi][safi], attr); else adv->baa = baa_new (); adv->adj = adj; /* Add new advertisement to advertisement attribute list. */ bgp_advertise_add (adv->baa, adv); FIFO_ADD (&peer->sync[afi][safi]->update, &adv->fifo); }
void bgp_adj_out_unset (struct bgp_node *rn, struct peer *peer, struct prefix *p, afi_t afi, safi_t safi) { struct bgp_adj_out *adj; struct bgp_advertise *adv; #ifdef DISABLE_BGP_ANNOUNCE return; #endif /* DISABLE_BGP_ANNOUNCE */ /* Lookup existing adjacency, if it is not there return immediately. */ for (adj = rn->adj_out; adj; adj = adj->next) if (adj->peer == peer) break; if (! adj) return; /* Clearn up previous advertisement. */ if (adj->adv) bgp_advertise_clean (peer, adj, afi, safi); if (adj->attr) { /* We need advertisement structure. */ adj->adv = bgp_advertise_new (); adv = adj->adv; adv->rn = rn; adv->adj = adj; /* Add to synchronization entry for withdraw announcement. */ FIFO_ADD (&peer->sync[afi][safi]->withdraw, &adv->fifo); /* Schedule packet write. */ BGP_WRITE_ON (peer->t_write, bgp_write, peer->fd); } else { /* Remove myself from adjacency. */ BGP_ADJ_OUT_DEL (rn, adj); /* Free allocated information. */ bgp_adj_out_free (adj); bgp_unlock_node (rn); } }
/* Enqueu message. */ void zebra_server_enqueue (int sock, u_char *buf, unsigned long length, unsigned long written) { struct zebra_message_queue *queue; queue = XCALLOC (MTYPE_TMP, sizeof (struct zebra_message_queue)); queue->buf = XMALLOC (MTYPE_TMP, length); memcpy (queue->buf, buf, length); queue->length = length; queue->written = written; FIFO_ADD (&message_queue, queue); THREAD_WRITE_ON (zebrad.master, t_write, zebra_server_dequeue, NULL, sock); }
/* BGP Peer Incoming Connection Accept thread handler */ s_int32_t bpn_sock_accept (struct thread *t_accept) { struct bgp_listen_sock_lnode *tmp_lnode; struct bgp_peer_inconn_req *peer_icr; u_int8_t su_buf [SU_ADDRSTRLEN]; pal_sock_handle_t accept_sock; pal_sock_handle_t bgp_sock; struct lib_globals *blg; struct bgp_peer *peer; union sockunion su; struct bgp *bgp; s_int32_t ret; bgp_sock = THREAD_FD (t_accept); blg = THREAD_GLOB (t_accept); bgp = THREAD_ARG (t_accept); ret = 0; /* Sanity check thread variables */ if (! blg || &BLG != blg) { ret = -1; goto EXIT; } if (! bgp) { zlog_err (&BLG, "[NETWORK] Accept Thread: Invalid Vital Vars, " "blg(%p) bgp(%p)", blg, bgp); ret = -1; goto EXIT; } /* Verify integrity of thread variables */ for (tmp_lnode = bgp->listen_sock_lnode; tmp_lnode; tmp_lnode = tmp_lnode->next) { if (tmp_lnode->listen_sock == bgp_sock) break; } if (! tmp_lnode) { zlog_err (&BLG, "[NETWORK] Accept Thread: Mismatch in thread args" "blg(%p) bgp(%p)", blg, bgp); ret = -1; goto EXIT; } /* Set BGP VR Context */ BGP_SET_VR_CONTEXT (&BLG, bgp->owning_bvr); /* Re-regiser accept thread */ t_accept = NULL; BGP_READ_ON (&BLG, t_accept, bgp, bpn_sock_accept, bgp_sock); /* Update the Accept Thread List Node */ tmp_lnode->t_accept = t_accept; /* Accept Incoming Connection (Blocking) */ accept_sock = sockunion_accept (&BLG, bgp_sock, &su); if (accept_sock < 0) { zlog_err (&BLG, "[NETWORK] Accept Thread: accept() Failed for Server" " Sock %d, Err:%d-%s", bgp_sock, errno, pal_strerror (errno)); ret = -1; goto EXIT; } if (BGP_DEBUG (events, EVENTS)) zlog_info (&BLG, "[NETWORK] Accept Thread: Incoming conn from host" " %s (FD=%u)", inet_sutop (&su, su_buf), accept_sock); /* Search for Configured Peer with same Remote IP address */ peer = bgp_peer_search (bgp, &su); if (! peer) { if (BGP_DEBUG (events, EVENTS)) zlog_info (&BLG, "[NETWORK] Accept Thread: %s - No such Peer " "configured", inet_sutop (&su, su_buf)); SSOCK_FD_CLOSE (&BLG, accept_sock); ret = -1; goto EXIT; } /* Prepare an Incoming Connection Req. Info structure */ peer_icr = XCALLOC (MTYPE_TMP, sizeof (struct bgp_peer_inconn_req)); if (! peer_icr) { zlog_err (&BLG, "[NETWORK] Accept Thread:" " Cannot allocate memory (%d) @ %s:%d", sizeof (struct bgp_peer_inconn_req), __FILE__, __LINE__); SSOCK_FD_CLOSE (&BLG, accept_sock); ret = -1; goto EXIT; } /* Initialize the FIFO Node */ FIFO_INIT (&peer_icr->icr_fifo); /* Store the ICR Information */ peer_icr->icr_sock = accept_sock; switch (su.sa.sa_family) { case AF_INET: peer_icr->icr_port = su.sin.sin_port; break; #ifdef HAVE_IPV6 case AF_INET6: peer_icr->icr_port = su.sin6.sin6_port; break; #endif /* HAVE_IPV6 */ } /* Enqueue into Peer's 'bicr_fifo' */ FIFO_ADD (&peer->bicr_fifo, &peer_icr->icr_fifo); /* Generate BGP Peer FSM ICR Event */ BGP_PEER_FSM_EVENT_ADD (&BLG, peer, BPF_EVENT_TCP_CONN_VALID); EXIT: return ret; }
int camFillColor(CamImage *image, int x, int y, int fillcolor, int tolerance) { int first=0,last=0; int i,j,d,xp,yp; CAM_PIXEL *ptr,*ptrx; const int nx[4]={-1,0,+1,0},ny[4]={0,-1,0,+1}; CAM_PIXEL pcolor[4],initcolor[4]; // 4 is the maximum number of channels CamInternalROIPolicyStruct iROI; int acc=1; int queuex[FIFO_SIZE]; int queuey[FIFO_SIZE]; // ROI (Region Of Interest) management CAM_CHECK(camFillColor,camInternalROIPolicy(image, NULL, &iROI, 0)); CAM_CHECK_ARGS(camFillColor, ((iROI.nChannels==1)||(image->dataOrder==CAM_DATA_ORDER_PIXEL))); if ((x>=iROI.srcroi.xOffset)&&(y>=iROI.srcroi.yOffset)&&(x<iROI.srcroi.xOffset+iROI.srcroi.width)&&(y<iROI.srcroi.yOffset+iROI.srcroi.height)) { for (i=0;i<iROI.nChannels;i++) { pcolor[i]=(fillcolor>>(i*8))&0xff; } ptr=ptrx=(CAM_PIXEL*)(image->imageData+iROI.srcchoffset+y*image->widthStep)+x*iROI.srcinc; if (tolerance>=0) { for (i=0;i<iROI.nChannels;i++) { initcolor[i]=*ptrx++; } FIFO_ADD(x,y); for (ptrx=ptr,i=0;i<iROI.nChannels;i++,ptrx++) { *ptrx=pcolor[i]; } while (!FIFO_EMPTY()) { x=queuex[first]; y=queuey[first]; FIFO_NEXT(); for (j=0;j<4;j++) { xp=x+nx[j]; yp=y+ny[j]; if ((xp>=iROI.srcroi.xOffset)&&(yp>=iROI.srcroi.yOffset)&&(xp<iROI.srcroi.xOffset+iROI.srcroi.width)&&(yp<iROI.srcroi.yOffset+iROI.srcroi.height)) { // Get the color at (xp,yp) ptr=ptrx=(CAM_PIXEL*)(image->imageData+iROI.srcchoffset+yp*image->widthStep)+xp*iROI.srcinc; // Is it the same color as the initial color? // Compute distance between colors d=0; for (i=0;i<iROI.nChannels;i++,ptrx++) { if (*ptrx>initcolor[i]) d+=*ptrx-initcolor[i]; else d+=initcolor[i]-*ptrx; } if (d<=tolerance) { // Yes, then this pixel should be repainted and added to the queue FIFO_ADD(xp,yp); for (ptrx=ptr,i=0;i<iROI.nChannels;i++,ptrx++) { *ptrx=pcolor[i]; } acc++; } } } } } else { FIFO_ADD(x,y); for (ptrx=ptr,i=0;i<iROI.nChannels;i++,ptrx++) { *ptrx=pcolor[i]; } while (!FIFO_EMPTY()) { x=queuex[first]; y=queuey[first]; FIFO_NEXT(); for (j=0;j<4;j++) { xp=x+nx[j]; yp=y+ny[j]; if ((xp>=iROI.srcroi.xOffset)&&(yp>=iROI.srcroi.yOffset)&&(xp<iROI.srcroi.xOffset+iROI.srcroi.width)&&(yp<iROI.srcroi.yOffset+iROI.srcroi.height)) { // Get the color at (xp,yp) ptr=ptrx=(CAM_PIXEL*)(image->imageData+iROI.srcchoffset+yp*image->widthStep)+xp*iROI.srcinc; for (i=0;i<iROI.nChannels;i++,ptrx++) if (*ptrx!=pcolor[i]) break; // Is it the same color as the fill color? if (i!=iROI.nChannels) { // Yes, then this pixel should be repainted and added to the queue FIFO_ADD(xp,yp); for (ptrx=ptr,i=0;i<iROI.nChannels;i++,ptrx++) { *ptrx=pcolor[i]; } acc++; } } } } } } return acc; }