vbp_task(struct worker *wrk, void *priv) { struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(vt, priv, VBP_TARGET_MAGIC); AN(vt->running); AN(vt->req); assert(vt->req_len > 0); vbp_start_poke(vt); vbp_poke(vt); vbp_has_poked(vt); VBP_Update_Backend(vt); Lck_Lock(&vbp_mtx); if (vt->running < 0) { assert(vt->heap_idx == BINHEAP_NOIDX); vbp_delete(vt); } else { vt->running = 0; if (vt->heap_idx != BINHEAP_NOIDX) { vt->due = VTIM_real() + vt->interval; binheap_delete(vbp_heap, vt->heap_idx); binheap_insert(vbp_heap, vt); } } Lck_Unlock(&vbp_mtx); }
static void * vbp_wrk_poll_backend(void *priv) { struct vbp_target *vt; THR_SetName("backend poll"); CAST_OBJ_NOTNULL(vt, priv, VBP_TARGET_MAGIC); while (!vt->stop) { AN(vt->req); assert(vt->req_len > 0); if (!vt->disable) { vbp_start_poke(vt); vbp_poke(vt); vbp_has_poked(vt); } if (!vt->stop) VTIM_sleep(vt->probe.interval); } Lck_Delete(&vt->mtx); VTAILQ_REMOVE(&vbp_list, vt, list); VBT_Rel(&vt->tcp_pool); free(vt->req); FREE_OBJ(vt); return (NULL); }
void VBP_Insert(struct backend *b, const struct vrt_backend_probe *p, const char *hosthdr) { struct vbp_target *vt; unsigned u; ASSERT_CLI(); CHECK_OBJ_NOTNULL(b, BACKEND_MAGIC); CHECK_OBJ_NOTNULL(p, VRT_BACKEND_PROBE_MAGIC); AZ(b->probe); ALLOC_OBJ(vt, VBP_TARGET_MAGIC); XXXAN(vt); VTAILQ_INSERT_TAIL(&vbp_list, vt, list); Lck_New(&vt->mtx, lck_backend); vt->disable = -1; vt->tcp_pool = VBT_Ref(b->ipv4, b->ipv6); AN(vt->tcp_pool); vt->probe = *p; vbp_set_defaults(vt); vbp_build_req(vt, hosthdr); for (u = 0; u < vt->probe.initial; u++) { if (u) vbp_has_poked(vt); vbp_start_poke(vt); vt->happy |= 1; vbp_has_poked(vt); } vt->backend = b; b->probe = vt; vbp_has_poked(vt); }
static void vbp_reset(struct vbp_target *vt) { unsigned u; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); vt->avg = 0.0; vt->rate = 0.0; #define BITMAP(n, c, t, b) \ vt->n = 0; #include "tbl/backend_poll.h" for (u = 0; u < vt->initial; u++) { vbp_start_poke(vt); vt->happy |= 1; vbp_has_poked(vt); } }