/* Store new weight in real_server struct and then update kernel. */ void update_svr_wgt(int weight, virtual_server * vs, real_server * rs) { char rsip[INET6_ADDRSTRLEN]; if (weight != rs->weight) { log_message(LOG_INFO, "Changing weight from %d to %d for %s service [%s]:%d of VS [%s]:%d" , rs->weight , weight , ISALIVE(rs) ? "active" : "inactive" , inet_sockaddrtos2(&rs->addr, rsip) , ntohs(inet_sockaddrport(&rs->addr)) , (vs->vsgname) ? vs->vsgname : inet_sockaddrtos(&vs->addr) , ntohs(inet_sockaddrport(&vs->addr))); rs->weight = weight; /* * Have weight change take effect now only if rs is in * the pool and alive and the quorum is met (or if * there is no sorry server). If not, it will take * effect later when it becomes alive. */ if (rs->set && ISALIVE(rs) && (vs->quorum_state == UP || !vs->s_svr || !ISALIVE(vs->s_svr))) ipvs_cmd(LVS_CMD_EDIT_DEST, check_data->vs_group, vs, rs); update_quorum_state(vs); } }
/* Store new weight in real_server struct and then update kernel. */ void update_svr_wgt(int weight, virtual_server_t * vs, real_server_t * rs , int update_quorum) { if (weight != rs->weight) { log_message(LOG_INFO, "Changing weight from %d to %d for %s service %s of VS %s" , rs->weight , weight , ISALIVE(rs) ? "active" : "inactive" , FMT_RS(rs) , FMT_VS(vs)); rs->weight = weight; /* * Have weight change take effect now only if rs is in * the pool and alive and the quorum is met (or if * there is no sorry server). If not, it will take * effect later when it becomes alive. */ if (rs->set && ISALIVE(rs) && (vs->quorum_state == UP || !vs->s_svr || !ISALIVE(vs->s_svr))) ipvs_cmd(LVS_CMD_EDIT_DEST, vs, rs); if (update_quorum) update_quorum_state(vs); } }
/* Set a virtualserver IPVS rules */ static int init_service_vs(virtual_server_t * vs) { /* Init the VS root */ if (!ISALIVE(vs) || vs->vsgname) { if (!ipvs_cmd(LVS_CMD_ADD, vs, NULL)) return 0; else SET_ALIVE(vs); } /* Processing real server queue */ if (!init_service_rs(vs)) return 0; if (vs->reloaded) { if (vs->vsgname) /* add reloaded dests into new vsg entries */ sync_service_vsg(vs); /* we may have got/lost quorum due to quorum setting changed */ update_quorum_state(vs); } return 1; }
/* Set a realserver IPVS rules */ static int init_service_rs(virtual_server * vs) { element e; real_server *rs; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); /* In alpha mode, be pessimistic (or realistic?) and don't * add real servers into the VS pool. They will get there * later upon healthchecks recovery (if ever). */ if (vs->alpha) { UNSET_ALIVE(rs); continue; } if (!ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs)) return 0; else SET_ALIVE(rs); } else if (vs->vsgname) { UNSET_ALIVE(rs); if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs)) return 0; SET_ALIVE(rs); } } return 1; }
/* Set a realserver IPVS rules */ static int init_service_rs(virtual_server_t * vs) { element e; real_server_t *rs; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); /* Do not re-add failed RS instantly on reload */ if (rs->reloaded) { /* force re-adding of the rs into vs_group: * we may have new vsg entries */ if (vs->vsgname) UNSET_ALIVE(rs); continue; } /* In alpha mode, be pessimistic (or realistic?) and don't * add real servers into the VS pool. They will get there * later upon healthchecks recovery (if ever). */ if (vs->alpha) { UNSET_ALIVE(rs); continue; } if (!ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs)) return 0; SET_ALIVE(rs); } } return 1; }
/* Set a virtualserver IPVS rules */ static int init_service_vs(virtual_server_t * vs) { /* Init the VS root */ if (!ISALIVE(vs) || vs->vsgname) { if (!ipvs_cmd(LVS_CMD_ADD, check_data->vs_group, vs, NULL)) return 0; else SET_ALIVE(vs); } /* Processing real server queue */ if (!LIST_ISEMPTY(vs->rs)) { if (vs->alpha && ! vs->reloaded) vs->quorum_state = DOWN; if (!init_service_rs(vs)) return 0; } /* if the service was reloaded, we may have got/lost quorum due to quorum setting changed */ if (vs->reloaded) update_quorum_state(vs); return 1; }
/* Set a realserver IPVS rules */ static int init_service_rs(virtual_server_t * vs) { element e; real_server_t *rs; if (LIST_ISEMPTY(vs->rs)) { log_message(LOG_WARNING, "VS [%s] has no configured RS! Skipping RS activation." , FMT_VS(vs)); return 1; } for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (rs->reloaded) { if (rs->iweight != rs->pweight) update_svr_wgt(rs->iweight, vs, rs, 0); /* Do not re-add failed RS instantly on reload */ continue; } /* In alpha mode, be pessimistic (or realistic?) and don't * add real servers into the VS pool. They will get there * later upon healthchecks recovery (if ever). */ if (!vs->alpha && !ISALIVE(rs)) { ipvs_cmd(LVS_CMD_ADD_DEST, vs, rs); SET_ALIVE(rs); } } return 1; }
/* Remove a realserver IPVS rule */ static int clear_service_rs(virtual_server_t * vs, list l) { element e; real_server_t *rs; long weight_sum; long down_threshold = vs->quorum - vs->hysteresis; for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (ISALIVE(rs)) { log_message(LOG_INFO, "Removing service %s from VS %s" , FMT_RS(rs) , FMT_VS(vs)); if (!ipvs_cmd(LVS_CMD_DEL_DEST, vs, rs)) return 0; UNSET_ALIVE(rs); if (!vs->omega) continue; /* In Omega mode we call VS and RS down notifiers * all the way down the exit, as necessary. */ if (rs->notify_down) { log_message(LOG_INFO, "Executing [%s] for service %s in VS %s" , rs->notify_down , FMT_RS(rs) , FMT_VS(vs)); notify_exec(rs->notify_down); } #ifdef _WITH_SNMP_ check_snmp_rs_trap(rs, vs); #endif /* Sooner or later VS will lose the quorum (if any). However, * we don't push in a sorry server then, hence the regression * is intended. */ weight_sum = weigh_live_realservers(vs); if (vs->quorum_state == UP && ( !weight_sum || weight_sum < down_threshold) ) { vs->quorum_state = DOWN; if (vs->quorum_down) { log_message(LOG_INFO, "Executing [%s] for VS %s" , vs->quorum_down , FMT_VS(vs)); notify_exec(vs->quorum_down); } #ifdef _WITH_SNMP_ check_snmp_quorum_trap(vs); #endif } } } return 1; }
/* Remove a realserver IPVS rule */ static int clear_service_rs(list vs_group, virtual_server * vs, list l) { element e; real_server *rs; char rsip[16], vsip[16]; for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_DEL_DEST , vs_group , vs , rs)) return 0; UNSET_ALIVE(rs); if (!vs->omega) continue; /* In Omega mode we call VS and RS down notifiers * all the way down the exit, as necessary. */ if (rs->notify_down) { log_message(LOG_INFO, "Executing [%s] for service [%s:%d]" " in VS [%s:%d]" , rs->notify_down , inet_ntoa2(SVR_IP(rs), rsip) , ntohs(SVR_PORT(rs)) , (vs->vsgname) ? vs->vsgname : inet_ntoa2(SVR_IP(vs), vsip) , ntohs(SVR_PORT(vs))); notify_exec(rs->notify_down); } /* Sooner or later VS will lose the quorum (if any). However, * we don't push in a sorry server then, hence the regression * is intended. */ if (vs->quorum_state == UP && vs->quorum_down && weigh_live_realservers(vs) < vs->quorum - vs->hysteresis) { vs->quorum_state = DOWN; log_message(LOG_INFO, "Executing [%s] for VS [%s:%d]" , vs->quorum_down , (vs->vsgname) ? vs->vsgname : inet_ntoa2(SVR_IP(vs), vsip) , ntohs(SVR_PORT(vs))); notify_exec(vs->quorum_down); } } #ifdef _KRNL_2_2_ /* if we have a /32 mask, we create one nat rules per * realserver. */ if (vs->nat_mask == HOST_NETMASK) if (!ipfw_cmd(IP_FW_CMD_DEL, vs, rs)) return 0; #endif } return 1; }
int kvm_getcptime(kvm_t *kd, long *cp_time) { struct pcpu *pc; int i, j, maxcpu; if (kd == NULL) { kvm_cp_time_cached = 0; return (0); } if (ISALIVE(kd)) return (getsysctl(kd, "kern.cp_time", cp_time, sizeof(long) * CPUSTATES)); if (!kd->arch->ka_native(kd)) { _kvm_err(kd, kd->program, "cannot read cp_time from non-native core"); return (-1); } if (kvm_cp_time_cached == 0) { if (_kvm_cp_time_init(kd) < 0) return (-1); } /* If this kernel has a "cp_time[]" symbol, then just read that. */ if (kvm_cp_time_nl[NL_CP_TIME].n_value != 0) { if (kvm_read(kd, kvm_cp_time_nl[NL_CP_TIME].n_value, cp_time, sizeof(long) * CPUSTATES) != sizeof(long) * CPUSTATES) { _kvm_err(kd, kd->program, "cannot read cp_time array"); return (-1); } return (0); } /* * If we don't have that symbol, then we have to simulate * "cp_time[]" by adding up the individual times for each CPU. */ maxcpu = kvm_getmaxcpu(kd); if (maxcpu < 0) return (-1); for (i = 0; i < CPUSTATES; i++) cp_time[i] = 0; for (i = 0; i < maxcpu; i++) { pc = kvm_getpcpu(kd, i); if (pc == NULL) continue; if (pc == (void *)-1) return (-1); for (j = 0; j < CPUSTATES; j++) cp_time[j] += pc->pc_cp_time[j]; free(pc); } return (0); }
static int _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; arm_pt_entry_t pte; arm_physaddr_t offset, a; kvaddr_t pteindex; off_t ofs; arm_pt_entry_t *ptemap; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!"); return (0); } vm = kd->vmst; ptemap = vm->ptemap; if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT; if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap)) goto invalid; pte = _kvm32toh(kd, ptemap[pteindex]); if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not valid"); goto invalid; } if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) { /* 64K page -> convert to be like 4K page */ offset = va & ARM_L2_S_OFFSET; a = (pte & ARM_L2_L_FRAME) + (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME); } else { if (kd->vmst->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 && (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: pte not supported"); goto invalid; } /* 4K page */ offset = va & ARM_L2_S_OFFSET; a = pte & ARM_L2_S_FRAME; } ofs = _kvm_pt_find(kd, a); if (ofs == -1) { _kvm_err(kd, kd->program, "_arm_minidump_kvatop: " "physical address 0x%jx not in minidump", (uintmax_t)a); goto invalid; } *pa = ofs + offset; return (ARM_PAGE_SIZE - offset); } else
/* * Translate a kernel virtual address to a physical address. */ int _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) { struct vmstate *vm; pt_entry_t pte; u_long idx, addr; int offset; if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return((off_t)0); } vm = kd->vmst; offset = (int)va & vm->pagemask; /* * If we are initializing (kernel segment table pointer not yet set) * then return pa == va to avoid infinite recursion. */ if (vm->Sysmap == 0) { *pa = va; return vm->pagesize - offset; } /* * Check for direct-mapped segments */ if (IS_XKPHYS(va)) { *pa = XKPHYS_TO_PHYS(va); return vm->pagesize - offset; } if (va >= (vaddr_t)CKSEG0_BASE && va < (vaddr_t)CKSSEG_BASE) { *pa = CKSEG0_TO_PHYS(va); return vm->pagesize - offset; } if (va < vm->Sysmapbase) goto invalid; idx = (va - vm->Sysmapbase) >> vm->pageshift; if (idx >= vm->Sysmapsize) goto invalid; addr = (u_long)vm->Sysmap + idx; /* * Can't use KREAD to read kernel segment table entries. * Fortunately it is 1-to-1 mapped so we don't have to. */ if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte), (off_t)addr) < 0) goto invalid; if (!(pte & PG_V)) goto invalid; *pa = (pte & PG_FRAME) | (paddr_t)offset; return vm->pagesize - offset; invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }
int _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) { if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } /* XXX this only really works for the kernel image only */ *pa = va; return (kd->nbpg - (va & (kd->nbpg - 1))); }
/* * Translate a KVA to a PA */ int _kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa) { // cpu_kcore_hdr_t *cpu_kh; if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return 0; } /* No hit -- no translation */ *pa = (u_long)~0UL; return 0; }
/* Returns the sum of all RS weight in a virtual server. */ long unsigned weigh_live_realservers(virtual_server * vs) { element e; real_server *svr; long unsigned count = 0; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { svr = ELEMENT_DATA(e); if (ISALIVE(svr)) count += svr->weight; } return count; }
/* Store new weight in real_server struct and then update kernel. */ void update_svr_wgt(int weight, virtual_server * vs, real_server * rs) { char rsip[16], vsip[16]; if (weight != rs->weight) { log_message(LOG_INFO, "Changing weight from %d to %d for %s service [%s:%d]" " of VS [%s:%d]" , rs->weight , weight , ISALIVE(rs) ? "active" : "inactive" , inet_ntoa2(SVR_IP(rs), rsip) , ntohs(SVR_PORT(rs)) , (vs->vsgname) ? vs->vsgname : inet_ntoa2(SVR_IP(vs), vsip) , ntohs(SVR_PORT(vs))); rs->weight = weight; /* * Have weight change take effect now only if rs is alive. * If not, it will take effect later when it becomes alive. */ if (ISALIVE(rs)) ipvs_cmd(LVS_CMD_EDIT_DEST, check_data->vs_group, vs, rs); } }
/* Remove a realserver IPVS rule */ static int clear_service_rs(list vs_group, virtual_server * vs, list l) { element e; real_server *rs; char rsip[INET6_ADDRSTRLEN]; for (e = LIST_HEAD(l); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_DEL_DEST, vs_group, vs, rs)) return 0; UNSET_ALIVE(rs); if (!vs->omega) continue; /* In Omega mode we call VS and RS down notifiers * all the way down the exit, as necessary. */ if (rs->notify_down) { log_message(LOG_INFO, "Executing [%s] for service [%s]:%d in VS [%s]:%d" , rs->notify_down , inet_sockaddrtos2(&rs->addr, rsip) , ntohs(inet_sockaddrport(&rs->addr)) , (vs->vsgname) ? vs->vsgname : inet_sockaddrtos(&vs->addr) , ntohs(inet_sockaddrport(&vs->addr))); notify_exec(rs->notify_down); } /* Sooner or later VS will lose the quorum (if any). However, * we don't push in a sorry server then, hence the regression * is intended. */ if (vs->quorum_state == UP && vs->quorum_down && weigh_live_realservers(vs) < vs->quorum - vs->hysteresis) { vs->quorum_state = DOWN; log_message(LOG_INFO, "Executing [%s] for VS [%s]:%d" , vs->quorum_down , (vs->vsgname) ? vs->vsgname : inet_sockaddrtos(&vs->addr) , ntohs(inet_sockaddrport(&vs->addr))); notify_exec(vs->quorum_down); } } } return 1; }
/* Returns the sum of all RS weight in a virtual server. */ static long weigh_live_realservers(virtual_server_t * vs) { element e; real_server_t *svr; long count = 0; if (LIST_ISEMPTY(vs->rs)) return count; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { svr = ELEMENT_DATA(e); if (ISALIVE(svr)) count += svr->weight; } return count; }
/* * Translate a kernel virtual address to a physical address. */ int _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) { u_long offset, pte_pa; struct vmstate *vm; pt_entry_t pte; if (!kd->vmst) { _kvm_err(kd, 0, "vatop called before initvtop"); return (0); } if (ISALIVE(kd)) { _kvm_err(kd, 0, "vatop called in live kernel!"); return (0); } vm = kd->vmst; offset = va & PGOFSET; /* * If we are initializing (kernel page table descriptor pointer * not yet set) * then return pa == va to avoid infinite recursion. */ if (vm->PTD == NULL) { *pa = va; return (NBPG - (int)offset); } if ((vm->PTD[pdei(va)] & PG_V) == 0) goto invalid; pte_pa = (vm->PTD[pdei(va)] & PG_FRAME) + (ptei(va) * sizeof(pt_entry_t)); /* XXX READ PHYSICAL XXX */ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof pte, (off_t)_kvm_pa2off(kd, pte_pa)) != sizeof pte) goto invalid; *pa = (pte & PG_FRAME) + offset; return (NBPG - (int)offset); invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }
/* Remove a virtualserver IPVS rule */ static int clear_service_vs(virtual_server_t * vs) { /* Processing real server queue */ if (!LIST_ISEMPTY(vs->rs)) { if (vs->s_svr) { if (ISALIVE(vs->s_svr)) ipvs_cmd(LVS_CMD_DEL_DEST, vs, vs->s_svr); } else if (!clear_service_rs(vs, vs->rs)) return 0; /* The above will handle Omega case for VS as well. */ } ipvs_cmd(LVS_CMD_DEL, vs, NULL); UNSET_ALIVE(vs); return 1; }
int kvm_getswapinfo(kvm_t *kd, struct kvm_swap *swap_ary, int swap_max, int flags) { /* * clear cache */ if (kd == NULL) { kvm_swap_nl_cached = 0; return(0); } if (ISALIVE(kd)) { return kvm_getswapinfo_sysctl(kd, swap_ary, swap_max, flags); } else { return kvm_getswapinfo_kvm(kd, swap_ary, swap_max, flags); } }
/* Set a virtualserver IPVS rules */ static int init_service_vs(virtual_server * vs) { /* Init the VS root */ if (!ISALIVE(vs) || vs->vsgname) { if (!ipvs_cmd(LVS_CMD_ADD, check_data->vs_group, vs, NULL)) return 0; else SET_ALIVE(vs); } /* Processing real server queue */ if (!LIST_ISEMPTY(vs->rs)) { if (vs->alpha) vs->quorum_state = DOWN; if (!init_service_rs(vs)) return 0; } return 1; }
int _kvm_minidump_kvatop(kvm_t *kd, u_long va, off_t *pa) { struct vmstate *vm; pt_entry_t pte; u_long offset, pteindex, a; off_t ofs; pt_entry_t *ptemap; if (ISALIVE(kd)) { _kvm_err(kd, 0, "kvm_kvatop called in live kernel!"); return (0); } offset = va & PAGE_MASK; /* Operate with page-aligned address */ va &= ~PAGE_MASK; vm = kd->vmst; ptemap = vm->ptemap; #if defined(__mips_n64) if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) a = (MIPS_XKPHYS_TO_PHYS(va)); else #endif if (va >= (u_long)MIPS_KSEG0_START && va < (u_long)MIPS_KSEG0_END) a = (MIPS_KSEG0_TO_PHYS(va)); else if (va >= (u_long)MIPS_KSEG1_START && va < (u_long)MIPS_KSEG1_END) a = (MIPS_KSEG1_TO_PHYS(va)); else if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> PAGE_SHIFT; pte = ptemap[pteindex]; if (!pte) { _kvm_err(kd, kd->program, "_kvm_vatop: pte not valid"); goto invalid; } a = TLBLO_PTE_TO_PA(pte); } else {
/* Set a realserver IPVS rules */ static int init_service_rs(virtual_server * vs) { element e; real_server *rs; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); /* In alpha mode, be pessimistic (or realistic?) and don't * add real servers into the VS pool. They will get there * later upon healthchecks recovery (if ever). */ if (vs->alpha) { UNSET_ALIVE(rs); continue; } if (!ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs)) return 0; else SET_ALIVE(rs); } else if (vs->vsgname) { UNSET_ALIVE(rs); if (!ipvs_cmd(LVS_CMD_ADD_DEST, check_data->vs_group, vs, rs)) return 0; SET_ALIVE(rs); } #ifdef _KRNL_2_2_ /* if we have a /32 mask, we create one nat rules per * realserver. */ if (vs->nat_mask == HOST_NETMASK) if (!ipfw_cmd(IP_FW_CMD_ADD, vs, rs)) return 0; #endif } return 1; }
/* add or remove _alive_ real servers from a virtual server */ static void perform_quorum_state(virtual_server_t *vs, int add) { element e; real_server_t *rs; if (LIST_ISEMPTY(vs->rs)) return; log_message(LOG_INFO, "%s the pool for VS %s" , add?"Adding alive servers to":"Removing alive servers from" , FMT_VS(vs)); for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (!ISALIVE(rs)) /* We only handle alive servers */ continue; if (add) rs->alive = 0; ipvs_cmd(add?LVS_CMD_ADD_DEST:LVS_CMD_DEL_DEST, vs, rs); rs->alive = 1; } }
/* add or remove _alive_ real servers from a virtual server */ void perform_quorum_state(virtual_server *vs, int add) { element e; real_server *rs; if (LIST_ISEMPTY(vs->rs)) return; log_message(LOG_INFO, "%s the pool for VS [%s]:%d" , add?"Adding alive servers to":"Removing alive servers from" , (vs->vsgname) ? vs->vsgname : inet_sockaddrtos(&vs->addr) , ntohs(inet_sockaddrport(&vs->addr))); for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); if (!ISALIVE(rs)) /* We only handle alive servers */ continue; if (add) rs->alive = 0; ipvs_cmd(add?LVS_CMD_ADD_DEST:LVS_CMD_DEL_DEST, check_data->vs_group, vs, rs); rs->alive = 1; } }
/* * kvm_getloadavg() -- Get system load averages, from live or dead kernels. * * Put `nelem' samples into `loadavg' array. * Return number of samples retrieved, or -1 on error. */ int kvm_getloadavg(kvm_t *kd, double loadavg[], int nelem) { struct loadavg loadinfo; struct nlist *p; int fscale, i; if (ISALIVE(kd)) return (getloadavg(loadavg, nelem)); if (kvm_nlist(kd, nl) != 0) { for (p = nl; p->n_type != 0; ++p); _kvm_err(kd, kd->program, "%s: no such symbol", p->n_name); return (-1); } #define KREAD(kd, addr, obj) \ (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj)) if (KREAD(kd, nl[X_AVERUNNABLE].n_value, &loadinfo)) { _kvm_err(kd, kd->program, "can't read averunnable"); return (-1); } /* * Old kernels have fscale separately; if not found assume * running new format. */ if (!KREAD(kd, nl[X_FSCALE].n_value, &fscale)) loadinfo.fscale = fscale; nelem = MIN(nelem, (int)(sizeof(loadinfo.ldavg) / sizeof(fixpt_t))); for (i = 0; i < nelem; i++) loadavg[i] = (double) loadinfo.ldavg[i] / loadinfo.fscale; return (nelem); }
/* Set a realserver IPVS rules */ static int init_service_rs(virtual_server_t * vs) { element e; real_server_t *rs; for (e = LIST_HEAD(vs->rs); e; ELEMENT_NEXT(e)) { rs = ELEMENT_DATA(e); /* Do not re-add failed RS instantly on reload */ if (rs->reloaded) continue; /* In alpha mode, be pessimistic (or realistic?) and don't * add real servers into the VS pool. They will get there * later upon healthchecks recovery (if ever). */ if (!vs->alpha && !ISALIVE(rs)) { if (!ipvs_cmd(LVS_CMD_ADD_DEST, vs, rs)) return 0; SET_ALIVE(rs); } } return 1; }
static int _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa) { struct vmstate *vm; mips_physaddr_t offset, a; kvaddr_t pteindex; u_long valid; off_t ofs; mips32_pte_t pte32; mips64_pte_t pte64; if (ISALIVE(kd)) { _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!"); return (0); } offset = va & MIPS_PAGE_MASK; /* Operate with page-aligned address */ va &= ~MIPS_PAGE_MASK; vm = kd->vmst; if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) { if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) { a = va & MIPS_XKPHYS_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG0_START && va < MIPS64_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS64_KSEG1_START && va < MIPS64_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } else { if (va >= MIPS32_KSEG0_START && va < MIPS32_KSEG0_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } if (va >= MIPS32_KSEG1_START && va < MIPS32_KSEG1_END) { a = va & MIPS_KSEG0_PHYS_MASK; goto found; } } if (va >= vm->hdr.kernbase) { pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT; if (vm->pte_size == 64) { valid = pteindex < vm->hdr.ptesize / sizeof(pte64); if (pteindex >= vm->hdr.ptesize / sizeof(pte64)) goto invalid; pte64 = _mips64_pte_get(kd, pteindex); valid = pte64 & MIPS_PTE_V; if (valid) a = MIPS64_PTE_TO_PA(pte64); } else { if (pteindex >= vm->hdr.ptesize / sizeof(pte32)) goto invalid; pte32 = _mips32_pte_get(kd, pteindex); valid = pte32 & MIPS_PTE_V; if (valid) a = MIPS32_PTE_TO_PA(pte32); } if (!valid) { _kvm_err(kd, kd->program, "_mips_minidump_kvatop: pte " "not valid"); goto invalid; } } else {
/* manipulate add/remove rs according to alive state */ static int perform_svr_state(int alive, virtual_server_t * vs, real_server_t * rs) { /* * | ISALIVE(rs) | alive | context * | 0 | 0 | first check failed under alpha mode, unreachable here * | 0 | 1 | RS went up, add it to the pool * | 1 | 0 | RS went down, remove it from the pool * | 1 | 1 | first check succeeded w/o alpha mode, unreachable here */ if (!ISALIVE(rs) && alive) { log_message(LOG_INFO, "%s service %s to VS %s" , (rs->inhibit) ? "Enabling" : "Adding" , FMT_RS(rs) , FMT_VS(vs)); /* Add only if we have quorum or no sorry server */ if (vs->quorum_state == UP || !vs->s_svr || !ISALIVE(vs->s_svr)) { if (ipvs_cmd(LVS_CMD_ADD_DEST, vs, rs)) return -1; } rs->alive = alive; if (rs->notify_up) { log_message(LOG_INFO, "Executing [%s] for service %s in VS %s" , rs->notify_up , FMT_RS(rs) , FMT_VS(vs)); notify_exec(rs->notify_up); } #ifdef _WITH_SNMP_CHECKER_ check_snmp_rs_trap(rs, vs); #endif /* We may have gained quorum */ update_quorum_state(vs); } if (ISALIVE(rs) && !alive) { log_message(LOG_INFO, "%s service %s from VS %s" , (rs->inhibit) ? "Disabling" : "Removing" , FMT_RS(rs) , FMT_VS(vs)); /* server is down, it is removed from the LVS realserver pool * Remove only if we have quorum or no sorry server */ if (vs->quorum_state == UP || !vs->s_svr || !ISALIVE(vs->s_svr)) { if (ipvs_cmd(LVS_CMD_DEL_DEST, vs, rs)) return -1; } rs->alive = alive; if (rs->notify_down) { log_message(LOG_INFO, "Executing [%s] for service %s in VS %s" , rs->notify_down , FMT_RS(rs) , FMT_VS(vs)); notify_exec(rs->notify_down); } #ifdef _WITH_SNMP_CHECKER_ check_snmp_rs_trap(rs, vs); #endif /* We may have lost quorum */ update_quorum_state(vs); } return 0; }