int tcp_connect_thread(thread_t * thread) { checker_t *checker = THREAD_ARG(thread); tcp_checker_t *tcp_check = CHECKER_ARG(checker); int fd; int status; /* * Register a new checker thread & return * if checker is disabled */ if (!CHECKER_ENABLED(checker)) { thread_add_timer(thread->master, tcp_connect_thread, checker, checker->vs->delay_loop); return 0; } if ((fd = socket(tcp_check->dst.ss_family, SOCK_STREAM, IPPROTO_TCP)) == -1) { DBG("TCP connect fail to create socket."); return 0; } status = tcp_bind_connect(fd, &tcp_check->dst, &tcp_check->bindto); if (status == connect_error) { thread_add_timer(thread->master, tcp_connect_thread, checker, checker->vs->delay_loop); } /* handle tcp connection status & register check worker thread */ tcp_connection_state(fd, status, thread, tcp_check_thread, tcp_check->connection_to); return 0; }
static void ready_callback (u_char lsa_type, u_char opaque_type, struct in_addr addr) { printf ("ready_callback: lsa_type: %d opaque_type: %d addr=%s\n", lsa_type, opaque_type, inet_ntoa (addr)); /* Schedule opaque LSA originate in 5 secs */ thread_add_timer (master, lsa_inject, oclient, 5); /* Schedule opaque LSA update with new value */ thread_add_timer (master, lsa_inject, oclient, 10); /* Schedule delete */ thread_add_timer (master, lsa_delete, oclient, 30); }
static int ospf6_interface_run_metricfunction (struct thread *thread) { struct ospf6_interface *oi; struct ospf6_interface_metricfunction *imf; struct listnode *node; struct ospf6_neighbor *on; oi = (struct ospf6_interface *) THREAD_ARG (thread); assert (oi); imf = ospf6_interface_neighbor_metric_data (oi, metricfunction_nbrmetric_id); assert (imf); if (imf->metric_function == NULL) { zlog_err ("%s: attempt made to use NULL metric function", __func__); return 0; } for (ALL_LIST_ELEMENTS_RO (oi->neighbor_list, node, on)) ospf6_neighbor_run_metricfunction (imf, on); if (imf->metric_function_interval) imf->thread_metric_function = thread_add_timer (master, ospf6_interface_run_metricfunction, oi, imf->metric_function_interval); return 0; }
/* Get RIPng peer. At the same time update timeout thread. */ static struct ripng_peer *ripng_peer_get(struct ripng *ripng, struct in6_addr *addr) { struct ripng_peer *peer; peer = ripng_peer_lookup(ripng, addr); if (peer) { if (peer->t_timeout) thread_cancel(peer->t_timeout); } else { peer = ripng_peer_new(); peer->ripng = ripng; peer->addr = *addr; listnode_add_sort(ripng->peer_list, peer); } /* Update timeout thread. */ peer->t_timeout = NULL; thread_add_timer(master, ripng_peer_timeout, peer, RIPNG_PEER_TIMER_DEFAULT, &peer->t_timeout); /* Last update time set. */ time(&peer->uptime); return peer; }
/* global 1second timer used for periodic processing */ static void pim_vxlan_work_timer_setup(bool start) { THREAD_OFF(vxlan_info.work_timer); if (start) thread_add_timer(router->master, pim_vxlan_work_timer_cb, NULL, PIM_VXLAN_WORK_TIME, &vxlan_info.work_timer); }
/* register checkers to the global I/O scheduler */ void register_checkers_thread(void) { checker_t *checker; element e; long warmup; for (e = LIST_HEAD(checkers_queue); e; ELEMENT_NEXT(e)) { checker = ELEMENT_DATA(e); log_message(LOG_INFO, "Activating healthchecker for service %s" , FMT_CHK(checker)); CHECKER_ENABLE(checker); if (checker->launch) { /* wait for a random timeout to begin checker thread. It helps avoiding multiple simultaneous checks to the same RS. */ warmup = checker->warmup; if (warmup) warmup = warmup * rand() / RAND_MAX; thread_add_timer(master, checker->launch, checker, BOOTSTRAP_DELAY + warmup); } } }
static void zclient_event (enum event event, struct zclient *zclient) { switch (event) { case ZCLIENT_SCHEDULE: if (! zclient->t_connect) zclient->t_connect = thread_add_event (master, zclient_connect, zclient, 0); break; case ZCLIENT_CONNECT: if (zclient->fail >= 10) return; if (zclient_debug) zlog_debug ("zclient connect schedule interval is %d", zclient->fail < 3 ? 10 : 60); if (! zclient->t_connect) zclient->t_connect = thread_add_timer (master, zclient_connect, zclient, zclient->fail < 3 ? 10 : 60); break; case ZCLIENT_READ: zclient->t_read = thread_add_read (master, zclient_read, zclient, zclient->sock); break; } }
static void init_if_linkbeat(void) { interface_t *ifp; element e; int status; for (e = LIST_HEAD(if_queue); e; ELEMENT_NEXT(e)) { ifp = ELEMENT_DATA(e); ifp->lb_type = LB_IOCTL; status = if_mii_probe(ifp->ifname); if (status >= 0) { ifp->lb_type = LB_MII; ifp->linkbeat = (status) ? 1 : 0; } else { status = if_ethtool_probe(ifp->ifname); if (status >= 0) { ifp->lb_type = LB_ETHTOOL; ifp->linkbeat = (status) ? 1 : 0; } } /* Register new monitor thread */ thread_add_timer(master, if_linkbeat_refresh_thread, ifp, POLLING_DELAY); } }
static int bgp_dump_interval_add (struct bgp_dump *bgp_dump, int interval) { int secs_into_day; time_t t; struct tm *tm; if (interval > 0) { /* Periodic dump every interval seconds */ if ((interval < 86400) && ((86400 % interval) == 0)) { /* Dump at predictable times: if a day has a whole number of * intervals, dump every interval seconds starting from midnight */ (void) time(&t); tm = localtime(&t); secs_into_day = tm->tm_sec + 60*tm->tm_min + 60*60*tm->tm_hour; interval = interval - secs_into_day % interval; /* always > 0 */ } bgp_dump->t_interval = thread_add_timer (bm->master, bgp_dump_interval_func, bgp_dump, interval); } else { /* One-off dump: execute immediately, don't affect any scheduled dumps */ bgp_dump->t_interval = thread_add_event (bm->master, bgp_dump_interval_func, bgp_dump, 0); } return 0; }
void signal_init (struct thread_master *m, int sigc, struct quagga_signal_t signals[]) { int i = 0; struct quagga_signal_t *sig; /* First establish some default handlers that can be overridden by the application. */ trap_default_signals(); while (i < sigc) { sig = &signals[i]; if ( signal_set (sig->signal) < 0 ) { zlog_notice("signal_init exit(-1)\n"); EXIT(-1); } i++; } sigmaster.sigc = sigc; sigmaster.signals = signals; #ifdef SIGEVENT_SCHEDULE_THREAD sigmaster.t = thread_add_timer (m, quagga_signal_timer, &sigmaster, QUAGGA_SIGNAL_TIMER_INTERVAL); #endif /* SIGEVENT_SCHEDULE_THREAD */ }
int bgp_dump_interval_add (struct bgp_dump *bgp_dump, int interval) { int bgp_dump_interval_func (struct thread *); int interval2, secs_into_day; time_t t; struct tm *tm; if (interval > 0 ) { if ((interval < 86400) && ((86400 % interval) == 0)) { (void) time(&t); tm = localtime(&t); secs_into_day = tm->tm_sec + 60*tm->tm_min + 60*60*tm->tm_hour; interval2 = interval - secs_into_day % interval; if(interval2 == 0) interval2 = interval; } else { interval2 = interval; } bgp_dump->t_interval = thread_add_timer (master, bgp_dump_interval_func, bgp_dump, interval2); } else { bgp_dump->t_interval = thread_add_event (master, bgp_dump_interval_func, bgp_dump, 0); } return 0; }
int ospf6_damp_debug_thread (struct thread *thread) { int i; struct ospf6_damp_info *di; char buf[256]; time_t t_now; struct timeval now; for (i = 0; i < dc->reuse_list_size; i++) { for (di = dc->reuse_list_array[i]; di; di = di->next) { t_now = time (NULL); gettimeofday (&now, NULL); prefix2str (&di->name, buf, sizeof (buf)); zlog_info ("DAMP: %lu.%06lu %c %-32s penalty %7u", now.tv_sec, now.tv_usec, (di->damping == ON ? 'D' : 'A'), buf, (u_int) (di->penalty * ospf6_damp_decay (t_now - di->t_updated))); } } thread_add_timer (master, ospf6_damp_debug_thread, NULL, 1); return 0; }
static void pim_msdp_sa_adv_timer_setup(struct pim_instance *pim, bool start) { THREAD_OFF(pim->msdp.sa_adv_timer); if (start) { thread_add_timer(pim->msdp.master, pim_msdp_sa_adv_timer_cb, pim, PIM_MSDP_SA_ADVERTISMENT_TIME, &pim->msdp.sa_adv_timer); } }
static void pim_msdp_peer_hold_timer_setup(struct pim_msdp_peer *mp, bool start) { struct pim_instance *pim = mp->pim; THREAD_OFF(mp->hold_timer); if (start) { thread_add_timer(pim->msdp.master, pim_msdp_peer_hold_timer_cb, mp, PIM_MSDP_PEER_HOLD_TIME, &mp->hold_timer); } }
int bgp_dump_interval_add (struct bgp_dump *bgp_dump, int interval) { int bgp_dump_interval_func (struct thread *); bgp_dump->t_interval = thread_add_timer (master, bgp_dump_interval_func, bgp_dump, interval); return 0; }
/* Interface State Machine */ int interface_up (struct thread *thread) { struct ospf6_interface *oi; oi = (struct ospf6_interface *) THREAD_ARG (thread); assert (oi && oi->interface); if (IS_OSPF6_DEBUG_INTERFACE) zlog_debug ("Interface Event %s: [InterfaceUp]", oi->interface->name); /* check physical interface is up */ if (! if_is_up (oi->interface)) { if (IS_OSPF6_DEBUG_INTERFACE) zlog_debug ("Interface %s is down, can't execute [InterfaceUp]", oi->interface->name); return 0; } /* if already enabled, do nothing */ if (oi->state > OSPF6_INTERFACE_DOWN) { if (IS_OSPF6_DEBUG_INTERFACE) zlog_debug ("Interface %s already enabled", oi->interface->name); return 0; } /* Join AllSPFRouters */ // send join allspfrouters message to shim thread_add_event (master, rospf6_join_allspfrouters_send, oi, 0); // ospf6_join_allspfrouters (oi->interface->ifindex); /* Update interface route */ ospf6_interface_connected_route_update (oi->interface); zlog_debug("about to send hello message..."); /* Schedule Hello */ if (! CHECK_FLAG (oi->flag, OSPF6_INTERFACE_PASSIVE)) thread_add_event (master, rospf6_hello_send, oi, 0); /* decide next interface state */ if (if_is_pointopoint (oi->interface)) ospf6_interface_state_change (OSPF6_INTERFACE_POINTTOPOINT, oi); else if (oi->priority == 0) ospf6_interface_state_change (OSPF6_INTERFACE_DROTHER, oi); else { ospf6_interface_state_change (OSPF6_INTERFACE_WAITING, oi); thread_add_timer (master, wait_timer, oi, oi->dead_interval); } return 0; }
static void pim_msdp_peer_ka_timer_setup(struct pim_msdp_peer *mp, bool start) { THREAD_OFF(mp->ka_timer); if (start) { thread_add_timer(mp->pim->msdp.master, pim_msdp_peer_ka_timer_cb, mp, PIM_MSDP_PEER_KA_TIME, &mp->ka_timer); } }
static void pim_msdp_sa_state_timer_setup(struct pim_msdp_sa *sa, bool start) { THREAD_OFF(sa->sa_state_timer); if (start) { thread_add_timer(sa->pim->msdp.master, pim_msdp_sa_state_timer_cb, sa, PIM_MSDP_SA_HOLD_TIME, &sa->sa_state_timer); } }
/* timer thread to check signals. Shouldnt be needed */ int quagga_signal_timer(struct thread *t) { struct quagga_sigevent_master_t *sigm; sigm = THREAD_ARG(t); sigm->t = NULL; thread_add_timer(sigm->t->master, quagga_signal_timer, &sigmaster, QUAGGA_SIGNAL_TIMER_INTERVAL, &sigm->t); return quagga_sigevent_process(); }
void ospf_schedule_abr_task (struct ospf *ospf) { if (IS_DEBUG_OSPF_EVENT) zlog_info ("Scheduling ABR task"); if (ospf->t_abr_task == NULL) ospf->t_abr_task = thread_add_timer (master, ospf_abr_task_timer, ospf, OSPF_ABR_TASK_DELAY); }
int tcp_check_thread(thread_t * thread) { checker_t *checker; tcp_checker_t *tcp_check; int status; checker = THREAD_ARG(thread); tcp_check = CHECKER_ARG(checker); status = tcp_socket_state(thread->u.fd, thread, tcp_check_thread); /* If status = connect_success, TCP connection to remote host is established. * Otherwise we have a real connection error or connection timeout. */ if (status == connect_success) { close(thread->u.fd); if (!svr_checker_up(UP, checker->id, checker->rs)) { log_message(LOG_INFO, "TCP connection to [%s]:%d success." , inet_sockaddrtos(&tcp_check->dst) , ntohs(inet_sockaddrport(&tcp_check->dst))); smtp_alert(checker->rs, NULL, NULL, "UP", "=> TCP CHECK succeed on service <="); update_svr_checker_state(UP, checker->id , checker->vs , checker->rs); } } else { if (svr_checker_up(DOWN, checker->id, checker->rs)) { log_message(LOG_INFO, "TCP connection to [%s]:%d failed !!!" , inet_sockaddrtos(&tcp_check->dst) , ntohs(inet_sockaddrport(&tcp_check->dst))); smtp_alert(checker->rs, NULL, NULL, "DOWN", "=> TCP CHECK failed on service <="); update_svr_checker_state(DOWN, checker->id , checker->vs , checker->rs); } } /* Register next timer checker */ if (status != connect_in_progress) thread_add_timer(thread->master, tcp_connect_thread, checker, checker->vs->delay_loop); return 0; }
void handle_salt_upgrade(salt_node_list_t *msg) { int i, ret, len, index; int active_xor = 0, inactive_xor = 0; salt_ip_node_t *rs; salt_real_server_t *g_rs; if(msg->node_num != 0 && !check_ipaddr_against_vs(msg)) return; upgrade_phase = 1; thread_add_timer(master, protect_ipaddr_no_change, NULL, 20 * BOOTSTRAP_DELAY); for(i = 0; i < msg->node_num; i++) { rs = &msg->entry[i]; index = check_ipaddr_exist(rs->ipaddr); g_rs = &global_vs->entrytable[index]; if(g_rs->state == 1 && active_xor == 0){ active_xor = 1; ipvs_salt_del_rs(g_rs, 0); set_node_upgrade(index, 1); //group 1 under upgrading rs->dispathing_rate = 0xFFF3; //allow upgrade continue; } else if(g_rs->state == 1 && active_xor == 1){ active_xor = 0; rs->dispathing_rate = 0;//avoid invalid data continue; } if(g_rs->state == 2 && inactive_xor == 0){ inactive_xor = 1; set_node_upgrade(index, 1); //group 1 under upgrading rs->dispathing_rate = 0xFFF3; //allow upgrade } else if(g_rs->state == 2 && inactive_xor == 1){ inactive_xor = 0; rs->dispathing_rate = 0;//avoid invalid data } } len = sizeof(salt_node_list_t) + msg->node_num * sizeof(salt_ip_node_t); ret = send(salt_sock_fd, msg, len, 0); if (len != ret) log_message(LOG_ERR, "sned partial upgrade data to salt, org len=%d, sending len=%d", sizeof(salt_node_list_t), len);
int upgrade_phase2(thread_t * thread) { int i, ret, len, index; salt_ip_node_t *rs; salt_node_list_t *msg = (salt_node_list_t *)msgbuff; salt_real_server_t *g_rs; if(upgrade_phase == 0){ log_message(LOG_ERR, "upgrade process timeout, cancel this upgrade."); return 0; } //at least one new upgraded node is available, then can continue to upgrade the rest nodes for(i = 0; i < global_vs->num_dests; i++) { if(check_node_upgrade(i, 3)) { upgrade_phase = 3; break; } } if(upgrade_phase != 3){ log_message(LOG_ERR, "no active node so far among upgraded nodes, wait for up"); thread_add_timer(master, upgrade_phase2, NULL, BOOTSTRAP_DELAY); return 0; } for(i = 0; i < msg->node_num; i++) { rs = &msg->entry[i]; index = check_ipaddr_exist(rs->ipaddr); g_rs = &global_vs->entrytable[index]; if(check_node_upgrade(index, 3) || check_node_upgrade(index, 1)){ rs->dispathing_rate = 0; //avoid invalid data continue; } ipvs_salt_del_rs(g_rs, 0); set_node_upgrade(index, 2); //group 2 under upgrading rs->dispathing_rate = 0xFFF3; //allow upgrade } len = sizeof(salt_node_list_t) + msg->node_num * sizeof(salt_ip_node_t); ret = send(salt_sock_fd, msg, len, 0); if (len != ret) log_message(LOG_ERR, "sned partial upgrade data to salt, org len=%d, sending len=%d", sizeof(salt_node_list_t), len); return 1;
/* BGP scan thread. This thread check nexthop reachability. */ static int bgp_scan_timer (struct thread *t) { bgp_scan_thread = thread_add_timer (master, bgp_scan_timer, NULL, bgp_scan_interval); if (BGP_DEBUG (events, EVENTS)) zlog_debug ("Performing BGP general scanning"); bgp_scan (AFI_IP, SAFI_UNICAST); #ifdef HAVE_IPV6 bgp_scan (AFI_IP6, SAFI_UNICAST); #endif /* HAVE_IPV6 */ return 0; }
int start_comm_salt(thread_t * thread) { struct sockaddr_in srcaddr, dstaddr; int sock_fd; int flags; int ret = -1; sock_fd = socket(AF_INET, SOCK_DGRAM, 0); if(sock_fd == -1) { log_message(LOG_ERR, "not able to create socket: %s", ipvs_strerror(errno)); goto thread_add; } srcaddr.sin_family = AF_INET; srcaddr.sin_port = htons(35938); srcaddr.sin_addr.s_addr = INADDR_LOOPBACK; if(bind(sock_fd,(struct sockaddr *)&srcaddr, sizeof(srcaddr)) == -1) { close(sock_fd); log_message(LOG_ERR, "binding salt error: %s", ipvs_strerror(errno)); goto thread_add; } //set nonblock flags = fcntl(sock_fd, F_GETFL, 0); fcntl(sock_fd, F_SETFL, flags | O_NONBLOCK); dstaddr.sin_family = AF_INET; dstaddr.sin_port = htons(35937); dstaddr.sin_addr.s_addr = INADDR_LOOPBACK; ret = connect(sock_fd, (struct sockaddr *)&dstaddr, sizeof(dstaddr)); thread_add: if(ret < 0) thread_add_timer(master, start_comm_salt, NULL, BOOTSTRAP_DELAY); else { salt_sock_fd = sock_fd; thread_add_read(master, lbd_rcvmsg_salt, 0, sock_fd, BOOTSTRAP_DELAY); } return 0;
/* register checkers to the global I/O scheduler */ void register_checkers_thread(void) { checker_t *checker; element e; for (e = LIST_HEAD(checkers_queue); e; ELEMENT_NEXT(e)) { checker = ELEMENT_DATA(e); log_message(LOG_INFO, "Activating healthchecker for service [%s]:%d" , inet_sockaddrtos(&checker->rs->addr) , ntohs(inet_sockaddrport(&checker->rs->addr))); CHECKER_ENABLE(checker); if (checker->launch) thread_add_timer(master, checker->launch, checker, BOOTSTRAP_DELAY); } }
/* register checkers to the global I/O scheduler */ void register_checkers_thread(void) { checker *checker_obj; element e; for (e = LIST_HEAD(checkers_queue); e; ELEMENT_NEXT(e)) { checker_obj = ELEMENT_DATA(e); log_message(LOG_INFO, "Activating healtchecker for service [%s:%d]", inet_ntop2(CHECKER_RIP(checker_obj)), ntohs(CHECKER_RPORT(checker_obj))); CHECKER_ENABLE(checker_obj); if (checker_obj->launch) thread_add_timer(master, checker_obj->launch, checker_obj, BOOTSTRAP_DELAY); } }
void smux_event (enum smux_event event, int sock) { switch (event) { case SMUX_SCHEDULE: smux_connect_thread = thread_add_event (master, smux_connect, NULL, 0); break; case SMUX_CONNECT: smux_connect_thread = thread_add_timer (master, smux_connect, NULL, 10); break; case SMUX_READ: smux_read_thread = thread_add_read (master, smux_read, NULL, sock); break; default: break; } }
/* timer thread to check signals. Shouldnt be needed */ int quagga_signal_timer (struct thread *t) { struct quagga_sigevent_master_t *sigm; struct quagga_signal_t *sig; int i; sigm = THREAD_ARG (t); if(sigm == NULL) { zlog_warn ("In func %s get THREAD_ARG error\n",__func__); return 0; } sigm->t = thread_add_timer (sigm->t->master, quagga_signal_timer, &sigmaster, QUAGGA_SIGNAL_TIMER_INTERVAL); return quagga_sigevent_process (); }
int hello_received(struct thread * thread) { struct ospf6_neighbor * on; on = THREAD_ARG(thread); assert(on); printf("Hello Received\n"); /* reset Inactivity Timer */ THREAD_OFF (on->inactivity_timer); on->inactivity_timer = thread_add_timer (master, inactivity_timer, on, on->ospf6_if->dead_interval); if(on->state <= OSPF6_NEIGHBOR_DOWN) ospf6_neighbor_state_change(OSPF6_NEIGHBOR_INIT, on); return 0; }