Exemple #1
0
static void
ipvs_laddr_group_cmd(int cmd, local_addr_group *laddr_group)
{
	local_addr_entry *laddr_entry;
	list l;
	element e;

	if (!laddr_group)
		return;

	l = laddr_group->addr_ip;
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		laddr_entry = ELEMENT_DATA(e);
		memset(laddr_rule, 0, sizeof(ipvs_laddr_t));
		laddr_rule->af = laddr_entry->addr.ss_family;
		if (laddr_entry->addr.ss_family == AF_INET6)
			inet_sockaddrip6(&laddr_entry->addr, &laddr_rule->addr.in6);
		else
			laddr_rule->addr.ip = inet_sockaddrip4(&laddr_entry->addr);
		ipvs_talk(cmd);
	}

	l = laddr_group->range;
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		laddr_entry = ELEMENT_DATA(e);
		ipvs_laddr_range_cmd(cmd, laddr_entry);
	}
}
Exemple #2
0
vrrp_rt *
vrrp_index_lookup(const int vrid, const int fd)
{
	vrrp_rt *vrrp;
	element e;
	list l = &vrrp_data->vrrp_index[vrid];

	/* return if list is empty */
	if (LIST_ISEMPTY(l))
		return NULL;

	/*
	 * If list size's is 1 then no collisions. So
	 * Test and return the singleton.
	 */
	if (LIST_SIZE(l) == 1) {
		vrrp = ELEMENT_DATA(LIST_HEAD(l));
		return (vrrp->fd_in == fd) ? vrrp : NULL;
	}

	/*
	 * List collision on the vrid bucket. The same
	 * vrid is used on a different interface. We perform
	 * a fd lookup as collisions solver.
	 */ 
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp =  ELEMENT_DATA(e);
		if (vrrp->fd_in == fd)
			return vrrp;
	}

	/* No match */
	return NULL;
}
Exemple #3
0
/* set IPVS group rules */
static int
ipvs_group_cmd(int cmd, list vs_group, real_server * rs, char * vsgname)
{
    virtual_server_group *vsg = ipvs_get_group_by_name(vsgname, vs_group);
    virtual_server_group_entry *vsg_entry;
    list l;
    element e;
    int err = 1;

    /* return if jointure fails */
    if (!vsg) return -1;

    /* visit addr_ip list */
    l = vsg->addr_ip;
    for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
        vsg_entry = ELEMENT_DATA(e);
        urule->vaddr = SVR_IP(vsg_entry);
        urule->vport = SVR_PORT(vsg_entry);

        /* Talk to the IPVS channel */
        if (IPVS_ALIVE(cmd, vsg_entry, rs)) {
            err = ipvs_talk(cmd);
            IPVS_SET_ALIVE(cmd, vsg_entry);
        }
    }

    /* visit vfwmark list */
    l = vsg->vfwmark;
    urule->vaddr = 0;
    urule->vport = 0;
    for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
        vsg_entry = ELEMENT_DATA(e);
        urule->vfwmark = vsg_entry->vfwmark;

        /* Talk to the IPVS channel */
        if (IPVS_ALIVE(cmd, vsg_entry, rs)) {
            err = ipvs_talk(cmd);
            IPVS_SET_ALIVE(cmd, vsg_entry);
        }
    }

    /* visit range list */
    l = vsg->range;
    urule->vfwmark = 0;
    for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
        vsg_entry = ELEMENT_DATA(e);

        /* Talk to the IPVS channel */
        if (IPVS_ALIVE(cmd, vsg_entry, rs)) {
            err = ipvs_group_range_cmd(cmd, vsg_entry);
            IPVS_SET_ALIVE(cmd, vsg_entry);
        }
    }

    return err;
}
Exemple #4
0
/* Sync checkers activity with netlink kernel reflection */
void
update_checker_activity(uint32_t address, int enable)
{
	checker *checker_obj;
	element e;

	/* Display netlink operation */
	if (debug & 32)
		log_message(LOG_INFO, "Netlink reflector reports IP %s %s",
		       inet_ntop2(address), (enable) ? "added" : "removed");

	/* Processing Healthcheckers queue */
	if (!LIST_ISEMPTY(checkers_queue))
		for (e = LIST_HEAD(checkers_queue); e; ELEMENT_NEXT(e)) {
			checker_obj = ELEMENT_DATA(e);
			if (CHECKER_VIP(checker_obj) == address && CHECKER_HA_SUSPEND(checker_obj)) {
				if (!CHECKER_ENABLED(checker_obj) && enable)
					log_message(LOG_INFO,
					       "Activating healtchecker for service [%s:%d]",
					       inet_ntop2(CHECKER_RIP(checker_obj)),
					       ntohs(CHECKER_RPORT(checker_obj)));
				if (CHECKER_ENABLED(checker_obj) && !enable)
					log_message(LOG_INFO,
					       "Suspending healtchecker for service [%s:%d]",
					       inet_ntop2(CHECKER_RIP(checker_obj)),
					       ntohs(CHECKER_RPORT(checker_obj)));
				checker_obj->enabled = enable;
			}
		}
}
Exemple #5
0
bool
find_rttables_table(const char *name, unsigned int *id)
{
	element e;
	char	*endptr;

	*id = strtoul(name, &endptr, 0);
	if (endptr != name && *endptr == '\0')
		return true;

	if (!rt_list && !read_rttables())
		return false;

	if (LIST_ISEMPTY(rt_list))
		return false;

	for (e = LIST_HEAD(rt_list); e; ELEMENT_NEXT(e)) {
		rt_entry_t *rte = ELEMENT_DATA(e);

		if (!strcmp(rte->name, name)) {
			*id = rte->id;
			return true;
		}
	}
	return false;
}
Exemple #6
0
/* Set a realserver IPVS rules */
static int
init_service_rs(virtual_server_t * vs)
{
	element e;
	real_server_t *rs;

	for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);
		/* Do not re-add failed RS instantly on reload */
		if (rs->reloaded) {
			/* force re-adding of the rs into vs_group:
			 * we may have new vsg entries */
			if (vs->vsgname)
				UNSET_ALIVE(rs);
			continue;
		}
		/* In alpha mode, be pessimistic (or realistic?) and don't
		 * add real servers into the VS pool. They will get there
		 * later upon healthchecks recovery (if ever).
		 */
		if (vs->alpha) {
			UNSET_ALIVE(rs);
			continue;
		}
		if (!ISALIVE(rs)) {
			if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs))
				return 0;
			SET_ALIVE(rs);
		}
	}

	return 1;
}
/* Add/Delete a list of IP addresses */
void
netlink_iplist(list ip_list, int cmd)
{
	ip_address_t *ipaddr;
	element e;

	/* No addresses in this list */
	if (LIST_ISEMPTY(ip_list))
		return;

	/*
	 * If "--dont-release-vrrp" is set then try to release addresses
	 * that may be there, even if we didn't set them.
	 */
	for (e = LIST_HEAD(ip_list); e; ELEMENT_NEXT(e)) {
		ipaddr = ELEMENT_DATA(e);
		if ((cmd == IPADDRESS_ADD && !ipaddr->set) ||
		    (cmd == IPADDRESS_DEL &&
		     (ipaddr->set || __test_bit(DONT_RELEASE_VRRP_BIT, &debug)))) {
			if (netlink_ipaddress(ipaddr, cmd) > 0)
				ipaddr->set = !(cmd == IPADDRESS_DEL);
			else
				ipaddr->set = false;
		}
	}
}
Exemple #8
0
/* register checkers to the global I/O scheduler */
void
register_checkers_thread(void)
{
	checker_t *checker;
	element e;
	long warmup;

	for (e = LIST_HEAD(checkers_queue); e; ELEMENT_NEXT(e)) {
		checker = ELEMENT_DATA(e);
		log_message(LOG_INFO, "Activating healthchecker for service %s"
				    , FMT_CHK(checker));
		CHECKER_ENABLE(checker);
		if (checker->launch)
		{
			/* wait for a random timeout to begin checker thread.
			   It helps avoiding multiple simultaneous checks to
			   the same RS.
			*/
			warmup = checker->warmup;
			if (warmup)
				warmup = warmup * rand() / RAND_MAX;
			thread_add_timer(master, checker->launch, checker,
					 BOOTSTRAP_DELAY + warmup);
		}
	}
}
Exemple #9
0
void
netlink_rulelist(list rule_list, int cmd, bool force)
{
	ip_rule_t *iprule;
	element e;

	/* No rules to add */
	if (LIST_ISEMPTY(rule_list))
		return;

	/* If force is set, we try to remove all the rules, but the
	 * rule might not exist. That's not an error, so indicate not
	 * to report such a situation */
	if (force && cmd == IPRULE_DEL)
	         netlink_error_ignore = ENOENT;

	for (e = LIST_HEAD(rule_list); e; ELEMENT_NEXT(e)) {
		iprule = ELEMENT_DATA(e);
		if (force ||
		    (cmd && !iprule->set) ||
		    (!cmd && iprule->set)) {
			if (netlink_rule(iprule, cmd) > 0)
				iprule->set = (cmd) ? 1 : 0;
			else
				iprule->set = 0;
		}
	}

	netlink_error_ignore = 0;
}
Exemple #10
0
/* Set a realserver IPVS rules */
static int
init_service_rs(virtual_server_t * vs)
{
	element e;
	real_server_t *rs;

	if (LIST_ISEMPTY(vs->rs)) {
		log_message(LOG_WARNING, "VS [%s] has no configured RS! Skipping RS activation."
				       , FMT_VS(vs));
		return 1;
	}

	for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);

		if (rs->reloaded) {
			if (rs->iweight != rs->pweight)
				update_svr_wgt(rs->iweight, vs, rs, 0);
			/* Do not re-add failed RS instantly on reload */
			continue;
		}
		/* In alpha mode, be pessimistic (or realistic?) and don't
		 * add real servers into the VS pool. They will get there
		 * later upon healthchecks recovery (if ever).
		 */
		if (!vs->alpha && !ISALIVE(rs)) {
			ipvs_cmd(LVS_CMD_ADD_DEST, vs, rs);
			SET_ALIVE(rs);
		}
	}

	return 1;
}
Exemple #11
0
static void
init_if_linkbeat(void)
{
	interface_t *ifp;
	element e;
	int status;

	for (e = LIST_HEAD(if_queue); e; ELEMENT_NEXT(e)) {
		ifp = ELEMENT_DATA(e);
		ifp->lb_type = LB_IOCTL;
		status = if_mii_probe(ifp->ifname);
		if (status >= 0) {
			ifp->lb_type = LB_MII;
			ifp->linkbeat = (status) ? 1 : 0;
		} else {
			status = if_ethtool_probe(ifp->ifname);
			if (status >= 0) {
				ifp->lb_type = LB_ETHTOOL;
				ifp->linkbeat = (status) ? 1 : 0;
			}
		}

		/* Register new monitor thread */
		thread_add_timer(master, if_linkbeat_refresh_thread, ifp, POLLING_DELAY);
	}
}
Exemple #12
0
static void
sync_service_vsg(virtual_server_t * vs)
{
	virtual_server_group_t *vsg;
	virtual_server_group_entry_t *vsge;
	list *l;
	element e;

	vsg = vs->vsg;
	list ll[] = {
		vsg->addr_ip,
		vsg->vfwmark,
		vsg->range,
		NULL,
	};

	for (l = ll; *l; l++)
		for (e = LIST_HEAD(*l); e; ELEMENT_NEXT(e)) {
			vsge = ELEMENT_DATA(e);
			if (vs->reloaded && !vsge->reloaded) {
				log_message(LOG_INFO, "VS [%s:%d:%u] added into group %s"
						    , inet_sockaddrtopair(&vsge->addr)
						    , vsge->range
						    , vsge->vfwmark
						    , vs->vsgname);
				/* add all reloaded and alive/inhibit-set dests
				 * to the newly created vsg item */
				ipvs_group_sync_entry(vs, vsge);
			}
		}
}
Exemple #13
0
/* Set a realserver IPVS rules */
static int
init_service_rs(virtual_server * vs)
{
	element e;
	real_server *rs;

	for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);
		/* In alpha mode, be pessimistic (or realistic?) and don't
		 * add real servers into the VS pool. They will get there
		 * later upon healthchecks recovery (if ever).
		 */
		if (vs->alpha) {
			UNSET_ALIVE(rs);
			continue;
		}
		if (!ISALIVE(rs)) {
			if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs))
				return 0;
			else
				SET_ALIVE(rs);
		} else if (vs->vsgname) {
			UNSET_ALIVE(rs);
			if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs))
				return 0;
			SET_ALIVE(rs);
		}
	}

	return 1;
}
Exemple #14
0
void
vrrp_sync_master(vrrp_rt * vrrp)
{
	vrrp_rt *isync;
	vrrp_sgroup *vgroup = vrrp->sync;
	list l = vgroup->index_list;
	element e;

	if (GROUP_STATE(vgroup) == VRRP_STATE_MAST)
		return;

	log_message(LOG_INFO, "VRRP_Group(%s) Syncing instances to MASTER state",
	       GROUP_NAME(vgroup));

	/* Perform sync index */
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		isync = ELEMENT_DATA(e);

		/* Send the higher priority advert on all synced instances */
		if (isync != vrrp && isync->state != VRRP_STATE_MAST) {
			isync->wantstate = VRRP_STATE_MAST;
			vrrp_state_goto_master(isync);
			vrrp_init_instance_sands(isync);
		}
	}
	vgroup->state = VRRP_STATE_MAST;
	vrrp_sync_smtp_notifier(vgroup);
	notify_group_exec(vgroup, VRRP_STATE_MAST);
}
Exemple #15
0
static void
vrrp_handler(vector_t *strvec)
{
	list l;
	element e;
	vrrp_t *vrrp;
	char *iname;

	if (vector_count(strvec) != 2) {
		log_message(LOG_INFO, "vrrp_instance must have a name");
		skip_block();
		return;
	}

	iname = vector_slot(strvec,1);

	/* Make sure the vrrp instance doesn't already exist */
	if (!LIST_ISEMPTY(vrrp_data->vrrp)) {
		l = vrrp_data->vrrp;
		for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
			vrrp = ELEMENT_DATA(e);
			if (!strcmp(iname,vrrp->iname)) {
				log_message(LOG_INFO, "vrrp instance %s already defined", iname );
				skip_block();
				return;
			}
		}
	}

	alloc_vrrp(iname);
}
Exemple #16
0
/* VRRP handlers */
static void
vrrp_sync_group_handler(vector_t *strvec)
{
	list l;
	element e;
	vrrp_sgroup_t *sg;
	char* gname;

	if (vector_count(strvec) != 2) {
		log_message(LOG_INFO, "vrrp_sync_group must have a name - skipping");
		skip_block();
		return;
	}

	gname = vector_slot(strvec, 1);

	/* check group doesn't already exist */
	if (!LIST_ISEMPTY(vrrp_data->vrrp_sync_group)) {
		l = vrrp_data->vrrp_sync_group;
		for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
			sg = ELEMENT_DATA(e);
			if (!strcmp(gname,sg->gname)) {
				log_message(LOG_INFO, "vrrp sync group %s already defined", gname);
				skip_block();
				return;
			}
		}
	}

	alloc_vrrp_sync_group(gname);
}
Exemple #17
0
static bool
find_entry(const char *name, unsigned int *id, list *l, const char* file_name, const struct rt_entry* default_list, uint32_t max)
{
	element e;
	char	*endptr;
	unsigned long l_id;

	l_id = strtoul(name, &endptr, 0);
	*id = (unsigned int)l_id;
	if (endptr != name && *endptr == '\0')
		return (*id <= max);

	if (!(*l))
		initialise_list(l, file_name, default_list, max);

	if (LIST_ISEMPTY(*l))
		return false;

	for (e = LIST_HEAD(*l); e; ELEMENT_NEXT(e)) {
		rt_entry_t *rte = ELEMENT_DATA(e);

		if (!strcmp(rte->name, name)) {
			*id = rte->id;
			return true;
		}
	}
	return false;
}
Exemple #18
0
static void
add_default(list *l, const struct rt_entry* default_list)
{
	bool found;
	rt_entry_t *rte;
	element e;

	for (;default_list->name; default_list++) {
		for (e = LIST_HEAD(*l), found = false; e; ELEMENT_NEXT(e)) {
			rte = ELEMENT_DATA(e);

			if (rte->id == default_list->id) {
				found = true;
				break;
			}
		}

		if (found)
			continue;

		rte = MALLOC(sizeof(rt_entry_t));
		rte->name = MALLOC(strlen(default_list->name) + 1);
		if (!rte->name) {
			FREE(rte);
			return;
		}

		strcpy(rte->name, default_list->name);
		rte->id = default_list->id;

		list_add(*l, rte);
	}
}
Exemple #19
0
static void
add_nexthops(ip_route_t *route, struct nlmsghdr *nlh, struct rtmsg *rtm)
{
	char buf[ENCAP_RTA_SIZE];
	struct rtattr *rta = (void *)buf;
	struct rtnexthop *rtnh;
	nexthop_t *nh;
	element e;

	rta->rta_type = RTA_MULTIPATH;
	rta->rta_len = RTA_LENGTH(0);
	rtnh = RTA_DATA(rta);

	for (e = LIST_HEAD(route->nhs); e; ELEMENT_NEXT(e)) {
		nh = ELEMENT_DATA(e);

		memset(rtnh, 0, sizeof(*rtnh));
		rtnh->rtnh_len = sizeof(*rtnh);
		rta->rta_len += rtnh->rtnh_len;
		add_nexthop(nh, nlh, rtm, rta, sizeof(buf), rtnh);
		rtnh = RTNH_NEXT(rtnh);
	}

	if (rta->rta_len > RTA_LENGTH(0))
		addattr_l(nlh, sizeof(buf), RTA_MULTIPATH, RTA_DATA(rta), RTA_PAYLOAD(rta));
}
Exemple #20
0
void
vrrp_sync_master_election(vrrp_rt * vrrp)
{
	vrrp_rt *isync;
	vrrp_sgroup *vgroup = vrrp->sync;
	list l = vgroup->index_list;
	element e;

	if (vrrp->wantstate != VRRP_STATE_GOTO_MASTER)
		return;
	if (GROUP_STATE(vgroup) == VRRP_STATE_FAULT)
		return;

	log_message(LOG_INFO, "VRRP_Group(%s) Transition to MASTER state",
	       GROUP_NAME(vgroup));

	/* Perform sync index */
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		isync = ELEMENT_DATA(e);
		if (isync != vrrp && isync->wantstate != VRRP_STATE_GOTO_MASTER) {
			/* Force a new protocol master election */
			isync->wantstate = VRRP_STATE_GOTO_MASTER;
			log_message(LOG_INFO,
			       "VRRP_Instance(%s) forcing a new MASTER election",
			       isync->iname);
			vrrp_send_adv(isync, isync->effective_priority);
		}
	}
}
Exemple #21
0
/* Update checker's state */
void
update_svr_checker_state(int alive, checker_id_t cid, virtual_server *vs, real_server *rs)
{
	element e;
	list l = rs->failed_checkers;
	checker_id_t *id;

	/* Handle alive state. Depopulate failed_checkers and call
	 * perform_svr_state() independently, letting the latter sort
	 * things out itself.
	 */
	if (alive) {
		/* Remove the succeeded check from failed_checkers list. */
		for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
			id = ELEMENT_DATA(e);
			if (*id == cid) {
				free_list_element(l, e);
				/* If we don't break, the next iteration will trigger
				 * a SIGSEGV.
				 */
				break;
			}
		}
		if (LIST_SIZE(l) == 0)
			perform_svr_state(alive, vs, rs);
	}
	/* Handle not alive state */
	else {
		id = (checker_id_t *) MALLOC(sizeof(checker_id_t));
		*id = cid;
		list_add(l, id);
		if (LIST_SIZE(l) == 1)
			perform_svr_state(alive, vs, rs);
	}
}
Exemple #22
0
void
vrrp_sync_backup(vrrp_rt * vrrp)
{
	vrrp_rt *isync;
	vrrp_sgroup *vgroup = vrrp->sync;
	list l = vgroup->index_list;
	element e;

	if (GROUP_STATE(vgroup) == VRRP_STATE_BACK)
		return;

	log_message(LOG_INFO, "VRRP_Group(%s) Syncing instances to BACKUP state",
	       GROUP_NAME(vgroup));

	/* Perform sync index */
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		isync = ELEMENT_DATA(e);
		if (isync != vrrp && isync->state != VRRP_STATE_BACK) {
			isync->wantstate = VRRP_STATE_BACK;
			vrrp_state_leave_master(isync);
			vrrp_init_instance_sands(isync);
		}
	}
	vgroup->state = VRRP_STATE_BACK;
	vrrp_sync_smtp_notifier(vgroup);
	notify_group_exec(vgroup, VRRP_STATE_BACK);
}
Exemple #23
0
void
vrrp_sync_fault(vrrp_rt * vrrp)
{
	vrrp_rt *isync;
	vrrp_sgroup *vgroup = vrrp->sync;
	list l = vgroup->index_list;
	element e;

	if (GROUP_STATE(vgroup) == VRRP_STATE_FAULT)
		return;

	log_message(LOG_INFO, "VRRP_Group(%s) Syncing instances to FAULT state",
	       GROUP_NAME(vgroup));

	/* Perform sync index */
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		isync = ELEMENT_DATA(e);

		/*
		 * We force sync instance to backup mode.
		 * This reduce instance takeover to less than ms_down_timer.
		 * => by default ms_down_timer is set to 3secs.
		 * => Takeover will be less than 3secs !
		 */
		if (isync != vrrp && isync->state != VRRP_STATE_FAULT) {
			if (isync->state == VRRP_STATE_MAST)
				isync->wantstate = VRRP_STATE_GOTO_FAULT;
			if (isync->state == VRRP_STATE_BACK)
				isync->state = VRRP_STATE_FAULT;
		}
	}
	vgroup->state = VRRP_STATE_FAULT;
	notify_group_exec(vgroup, VRRP_STATE_FAULT);
}
Exemple #24
0
/* Update checker's state */
void
update_svr_checker_state(int alive, checker_id_t cid, virtual_server_t *vs, real_server_t *rs)
{
	element e;
	list l = rs->failed_checkers;
	checker_id_t *id;

	/* Handle alive state. Depopulate failed_checkers and call
	 * perform_svr_state() independently, letting the latter sort
	 * things out itself.
	 */
	if (alive) {
		for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
			id = ELEMENT_DATA(e);
			if (*id == cid)
				break;
		}

		/* call the UP handler unless any more failed checks found */
		if (LIST_SIZE(l) == 0 || (LIST_SIZE(l) == 1 && e)) {
			if (perform_svr_state(alive, vs, rs))
				return;
		}

		/* Remove the succeeded check from failed_checkers */
		if (e)
			free_list_element(l, e);
	}
	/* Handle not alive state */
	else {
		if (LIST_SIZE(l) == 0) {
			if (perform_svr_state(alive, vs, rs))
				return;
		} else {
			/* do not add failed check into list twice */
			for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
				id = ELEMENT_DATA(e);
				if (*id == cid)
					return;
			}
		}

		id = (checker_id_t *) MALLOC(sizeof(checker_id_t));
		*id = cid;
		list_add(l, id);
	}
}
Exemple #25
0
/* Remove a realserver IPVS rule */
static int
clear_service_rs(virtual_server_t * vs, list l)
{
	element e;
	real_server_t *rs;
	long weight_sum;
	long down_threshold = vs->quorum - vs->hysteresis;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);
		if (ISALIVE(rs)) {
			log_message(LOG_INFO, "Removing service %s from VS %s"
						, FMT_RS(rs)
						, FMT_VS(vs));
			if (!ipvs_cmd(LVS_CMD_DEL_DEST, vs, rs))
				return 0;
			UNSET_ALIVE(rs);
			if (!vs->omega)
				continue;

			/* In Omega mode we call VS and RS down notifiers
			 * all the way down the exit, as necessary.
			 */
			if (rs->notify_down) {
				log_message(LOG_INFO, "Executing [%s] for service %s in VS %s"
						    , rs->notify_down
						    , FMT_RS(rs)
						    , FMT_VS(vs));
				notify_exec(rs->notify_down);
			}
#ifdef _WITH_SNMP_
			check_snmp_rs_trap(rs, vs);
#endif

			/* Sooner or later VS will lose the quorum (if any). However,
			 * we don't push in a sorry server then, hence the regression
			 * is intended.
			 */
			weight_sum = weigh_live_realservers(vs);
			if (vs->quorum_state == UP && (
				!weight_sum ||
				weight_sum < down_threshold)
			) {
				vs->quorum_state = DOWN;
				if (vs->quorum_down) {
					log_message(LOG_INFO, "Executing [%s] for VS %s"
							    , vs->quorum_down
							    , FMT_VS(vs));
					notify_exec(vs->quorum_down);
				}
#ifdef _WITH_SNMP_
				check_snmp_quorum_trap(vs);
#endif
			}
		}
	}

	return 1;
}
Exemple #26
0
void
vrrp_print_list(FILE *file, list l, void (*fptr)(FILE*, void*))
{
	element e;
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		(*fptr)(file, ELEMENT_DATA(e));
	}
}
Exemple #27
0
/* Remove a realserver IPVS rule */
static int
clear_service_rs(list vs_group, virtual_server * vs, list l)
{
	element e;
	real_server *rs;
	char rsip[16], vsip[16];

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		rs = ELEMENT_DATA(e);
		if (ISALIVE(rs)) {
			if (!ipvs_cmd(LVS_CMD_DEL_DEST
				      , vs_group
				      , vs
				      , rs))
				return 0;
			UNSET_ALIVE(rs);
			if (!vs->omega)
				continue;

			/* In Omega mode we call VS and RS down notifiers
			 * all the way down the exit, as necessary.
			 */
			if (rs->notify_down) {
				log_message(LOG_INFO, "Executing [%s] for service [%s:%d]"
					    " in VS [%s:%d]"
					    , rs->notify_down
					    , inet_ntoa2(SVR_IP(rs), rsip)
					    , ntohs(SVR_PORT(rs))
					    , (vs->vsgname) ? vs->vsgname : inet_ntoa2(SVR_IP(vs), vsip)
					    , ntohs(SVR_PORT(vs)));
				notify_exec(rs->notify_down);
			}

			/* Sooner or later VS will lose the quorum (if any). However,
			 * we don't push in a sorry server then, hence the regression
			 * is intended.
			 */
			if (vs->quorum_state == UP && vs->quorum_down
			  && weigh_live_realservers(vs) < vs->quorum - vs->hysteresis) {
				vs->quorum_state = DOWN;
				log_message(LOG_INFO, "Executing [%s] for VS [%s:%d]"
					    , vs->quorum_down
					    , (vs->vsgname) ? vs->vsgname : inet_ntoa2(SVR_IP(vs), vsip)
					    , ntohs(SVR_PORT(vs)));
				notify_exec(vs->quorum_down);
			}
		}
#ifdef _KRNL_2_2_
		/* if we have a /32 mask, we create one nat rules per
		 * realserver.
		 */
		if (vs->nat_mask == HOST_NETMASK)
			if (!ipfw_cmd(IP_FW_CMD_DEL, vs, rs))
				return 0;
#endif
	}
	return 1;
}
Exemple #28
0
static void
vrrp_init_sands(list l)
{
	vrrp_t *vrrp;
	element e;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp = ELEMENT_DATA(e);
		vrrp_init_instance_sands(vrrp);
	}
}
Exemple #29
0
void set_vrrp_fd_bucket(int old_fd, vrrp_t *vrrp)
{
	vrrp_t *vrrp_ptr;
	element e;
	element next;
	list l = &vrrp_data->vrrp_index_fd[old_fd%1024 + 1];

	/* Release old stalled entries */
	for (e = LIST_HEAD(l); e; e = next) {
		next = e->next;
		vrrp_ptr =  ELEMENT_DATA(e);
		if (vrrp_ptr->fd_in == old_fd) {
			if (e->prev)
				e->prev->next = e->next;
			else
				 l->head = e->next;

			if (e->next)
				e->next->prev = e->prev;
			else
				l->tail = e->prev;
			l->count--;
			FREE(e);
		}
	}
	if (LIST_ISEMPTY(l))
		l->head = l->tail = NULL;

	/* Hash refreshed entries */
	l = vrrp_data->vrrp;
	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vrrp_ptr = ELEMENT_DATA(e);

		if (vrrp_ptr->fd_in == old_fd) {
			/* Update new hash */
			vrrp_ptr->fd_in = vrrp->fd_in;
			vrrp_ptr->fd_out = vrrp->fd_out;
			alloc_vrrp_fd_bucket(vrrp_ptr);
		}
	}
}
Exemple #30
0
/* IPVS cleaner processing */
int
clear_services(void)
{
	element e;
	list l = check_data->vs;
	virtual_server *vs;
	real_server *rs;

	for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) {
		vs = ELEMENT_DATA(e);
		rs = ELEMENT_DATA(LIST_HEAD(vs->rs));
		if (!clear_service_vs(check_data->vs_group, vs))
			return 0;
#ifdef _KRNL_2_2_
		if (vs->nat_mask != HOST_NETMASK)
			if (!ipfw_cmd(IP_FW_CMD_DEL, vs, rs))
				return 0;
#endif
	}
	return 1;
}