static int sync_thread(void *startup) { DECLARE_WAITQUEUE(wait, current); mm_segment_t oldmm; int state; MOD_INC_USE_COUNT; daemonize(); oldmm = get_fs(); set_fs(KERNEL_DS); if (ip_vs_sync_state == IP_VS_STATE_MASTER) sprintf(current->comm, "ipvs_syncmaster"); else if (ip_vs_sync_state == IP_VS_STATE_BACKUP) sprintf(current->comm, "ipvs_syncbackup"); else IP_VS_BUG(); spin_lock_irq(¤t->sigmask_lock); siginitsetinv(¤t->blocked, 0); recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); /* set up multicast address */ mcast_addr.sin_family = AF_INET; mcast_addr.sin_port = htons(IP_VS_SYNC_PORT); mcast_addr.sin_addr.s_addr = htonl(IP_VS_SYNC_GROUP); add_wait_queue(&sync_wait, &wait); state = ip_vs_sync_state; sync_pid = current->pid; IP_VS_INFO("sync thread started.\n"); complete((struct completion *)startup); /* processing master/backup loop here */ if (state == IP_VS_STATE_MASTER) sync_master_loop(); else if (state == IP_VS_STATE_BACKUP) sync_backup_loop(); else IP_VS_BUG(); remove_wait_queue(&sync_wait, &wait); /* thread exits */ sync_pid = 0; IP_VS_INFO("sync thread stopped!\n"); set_fs(oldmm); MOD_DEC_USE_COUNT; stop_sync = 0; wake_up(&stop_sync_wait); return 0; }
/* * Unregister a scheduler from the scheduler list */ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { if (!scheduler) { IP_VS_ERR( "unregister_ip_vs_scheduler(): NULL arg\n"); return -EINVAL; } write_lock_bh(&__ip_vs_sched_lock); if (scheduler->n_list.next == &scheduler->n_list) { write_unlock_bh(&__ip_vs_sched_lock); IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler " "is not in the list. failed\n", scheduler->name); return -EINVAL; } /* * Remove it from the d-linked scheduler list */ list_del(&scheduler->n_list); write_unlock_bh(&__ip_vs_sched_lock); MOD_DEC_USE_COUNT; IP_VS_INFO("[%s] scheduler unregistered.\n", scheduler->name); return 0; }
int __init ip_vs_protocol_init(void) { char protocols[64]; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ strcat(protocols, ", "); \ strcat(protocols, (p)->name); \ } while (0) protocols[0] = '\0'; protocols[2] = '\0'; #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif #ifdef CONFIG_IP_VS_PROTO_UDP REGISTER_PROTOCOL(&ip_vs_protocol_udp); #endif #ifdef CONFIG_IP_VS_PROTO_AH REGISTER_PROTOCOL(&ip_vs_protocol_ah); #endif #ifdef CONFIG_IP_VS_PROTO_ESP REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif IP_VS_INFO("Registered protocols (%s)\n", &protocols[2]); return 0; }
int stop_sync_thread(void) { DECLARE_WAITQUEUE(wait, current); if (!sync_pid) return -ESRCH; IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); IP_VS_INFO("stopping sync thread %d ...\n", sync_pid); __set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&stop_sync_wait, &wait); ip_vs_sync_state = IP_VS_STATE_NONE; stop_sync = 1; wake_up(&sync_wait); schedule(); __set_current_state(TASK_RUNNING); remove_wait_queue(&stop_sync_wait, &wait); /* Note: no need to reap the sync thread, because its parent process is the init process */ if (stop_sync) IP_VS_BUG(); return 0; }
static void __exit ip_vs_cleanup(void) { nf_unregister_hook(&ip_vs_forward_icmp_ops); nf_unregister_hook(&ip_vs_post_routing_ops); nf_unregister_hook(&ip_vs_out_ops); nf_unregister_hook(&ip_vs_in_ops); ip_vs_app_cleanup(); ip_vs_conn_cleanup(); ip_vs_control_cleanup(); IP_VS_INFO("ipvs unloaded.\n"); }
/* * Weighted Round-Robin Scheduling */ static struct ip_vs_dest * ip_vs_wrr_schedule(struct ip_vs_service *svc, struct iphdr *iph) { struct ip_vs_dest *dest; struct ip_vs_wrr_mark *mark = svc->sched_data; IP_VS_DBG(6, "ip_vs_wrr_schedule(): Scheduling...\n"); /* * This loop will always terminate, because 0<mark->cw<max_weight, * and at least one server has its weight equal to max_weight. */ write_lock(&svc->sched_lock); while (1) { if (mark->cl == &svc->destinations) { /* it is at the head of the destination list */ if (mark->cl == mark->cl->next) { /* no dest entry */ write_unlock(&svc->sched_lock); return NULL; } mark->cl = svc->destinations.next; mark->cw -= mark->di; if (mark->cw <= 0) { mark->cw = mark->mw; /* * Still zero, which means no available servers. */ if (mark->cw == 0) { mark->cl = &svc->destinations; write_unlock(&svc->sched_lock); IP_VS_INFO("ip_vs_wrr_schedule(): " "no available servers\n"); return NULL; } } } else mark->cl = mark->cl->next; if (mark->cl != &svc->destinations) { /* not at the head of the list */ dest = list_entry(mark->cl, struct ip_vs_dest, n_list); if (atomic_read(&dest->weight) >= mark->cw) { write_unlock(&svc->sched_lock); break; } } }
/* * Register a scheduler in the scheduler list */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { struct ip_vs_scheduler *sched; if (!scheduler) { IP_VS_ERR("register_ip_vs_scheduler(): NULL arg\n"); return -EINVAL; } if (!scheduler->name) { IP_VS_ERR("register_ip_vs_scheduler(): NULL scheduler_name\n"); return -EINVAL; } MOD_INC_USE_COUNT; /* * Make sure that the scheduler with this name doesn't exist * in the scheduler list. */ sched = ip_vs_sched_getbyname(scheduler->name); if (sched) { ip_vs_scheduler_put(sched); MOD_DEC_USE_COUNT; IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " "already existed in the system\n", scheduler->name); return -EINVAL; } write_lock_bh(&__ip_vs_sched_lock); if (scheduler->n_list.next != &scheduler->n_list) { write_unlock_bh(&__ip_vs_sched_lock); MOD_DEC_USE_COUNT; IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " "already linked\n", scheduler->name); return -EINVAL; } /* * Add it into the d-linked scheduler list */ list_add(&scheduler->n_list, &ip_vs_schedulers); write_unlock_bh(&__ip_vs_sched_lock); IP_VS_INFO("[%s] scheduler registered.\n", scheduler->name); return 0; }
/* * Initialize IP Virtual Server */ static int __init ip_vs_init(void) { int ret; ret = ip_vs_control_init(); if (ret < 0) { IP_VS_ERR("can't setup control.\n"); goto cleanup_nothing; } ip_vs_protocol_init(); ret = ip_vs_app_init(); if (ret < 0) { IP_VS_ERR("can't setup application helper.\n"); goto cleanup_protocol; } ret = ip_vs_conn_init(); if (ret < 0) { IP_VS_ERR("can't setup connection table.\n"); goto cleanup_app; } ret = nf_register_hook(&ip_vs_in_ops); if (ret < 0) { IP_VS_ERR("can't register in hook.\n"); goto cleanup_conn; } ret = nf_register_hook(&ip_vs_out_ops); if (ret < 0) { IP_VS_ERR("can't register out hook.\n"); goto cleanup_inops; } ret = nf_register_hook(&ip_vs_post_routing_ops); if (ret < 0) { IP_VS_ERR("can't register post_routing hook.\n"); goto cleanup_outops; } ret = nf_register_hook(&ip_vs_forward_icmp_ops); if (ret < 0) { IP_VS_ERR("can't register forward_icmp hook.\n"); goto cleanup_postroutingops; } IP_VS_INFO("ipvs loaded.\n"); return ret; cleanup_postroutingops: nf_unregister_hook(&ip_vs_post_routing_ops); cleanup_outops: nf_unregister_hook(&ip_vs_out_ops); cleanup_inops: nf_unregister_hook(&ip_vs_in_ops); cleanup_conn: ip_vs_conn_cleanup(); cleanup_app: ip_vs_app_cleanup(); cleanup_protocol: ip_vs_protocol_cleanup(); ip_vs_control_cleanup(); cleanup_nothing: return ret; }