void vdir_new(struct vdir **vdp, const char *name, const char *vcl_name, vdi_healthy_f *healthy, vdi_resolve_f *resolve, void *priv) { struct vdir *vd; AN(name); AN(vcl_name); AN(vdp); AZ(*vdp); ALLOC_OBJ(vd, VDIR_MAGIC); AN(vd); *vdp = vd; AZ(pthread_mutex_init(&vd->mtx, NULL)); ALLOC_OBJ(vd->dir, DIRECTOR_MAGIC); AN(vd->dir); vd->dir->name = name; REPLACE(vd->dir->vcl_name, vcl_name); vd->dir->priv = priv; vd->dir->healthy = healthy; vd->dir->resolve = resolve; vd->vbm = vbit_init(8); AN(vd->vbm); }
VCL_BACKEND sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, enum healthy_e healthy) { VCL_BACKEND be; struct shard_state state[1]; unsigned picklist_sz; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); sharddir_rdlock(shardd); if (shardd->n_backend == 0) { shard_err0(ctx, shardd, "no backends"); sharddir_unlock(shardd); return (NULL); } picklist_sz = VBITMAP_SZ(shardd->n_backend); char picklist_spc[picklist_sz]; memset(state, 0, sizeof(state)); init_state(state, ctx, shardd, vbit_init(picklist_spc, picklist_sz)); be = sharddir_pick_be_locked(ctx, shardd, key, alt, warmup, rampup, healthy, state); sharddir_unlock(shardd); vbit_destroy(state->picklist); return (be); }
struct VSL_data * VSL_New(void) { struct VSL_data *vsl; ALLOC_OBJ(vsl, VSL_MAGIC); if (vsl == NULL) return (NULL); vsl->vbm_select = vbit_init(256); vsl->vbm_supress = vbit_init(256); VTAILQ_INIT(&vsl->vslf_select); VTAILQ_INIT(&vsl->vslf_suppress); return (vsl); }
static int vsl_IX_arg(struct VSL_data *vsl, int opt, const char *arg) { int i, l, off; const char *b, *e, *err; vre_t *vre; struct vslf *vslf; struct vbitmap *tags = NULL; CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC); vsl->flags |= F_SEEN_ixIX; b = arg; e = strchr(b, ':'); if (e) { tags = vbit_init(SLT__MAX); AN(tags); l = e - b; i = VSL_List2Tags(b, l, vsl_vbm_bitset, tags); if (i < 0) vbit_destroy(tags); if (i == -1) return (vsl_diag(vsl, "-%c: \"%*.*s\" matches zero tags", (char)opt, l, l, b)); else if (i == -2) return (vsl_diag(vsl, "-%c: \"%*.*s\" is ambiguous", (char)opt, l, l, b)); else if (i <= -3) return (vsl_diag(vsl, "-%c: Syntax error in \"%*.*s\"", (char)opt, l, l, b)); b = e + 1; } vre = VRE_compile(b, vsl->C_opt ? VRE_CASELESS : 0, &err, &off); if (vre == NULL) { if (tags) vbit_destroy(tags); return (vsl_diag(vsl, "-%c: Regex error at position %d (%s)\n", (char)opt, off, err)); } ALLOC_OBJ(vslf, VSLF_MAGIC); AN(vslf); vslf->tags = tags; vslf->vre = vre; if (opt == 'I') VTAILQ_INSERT_TAIL(&vsl->vslf_select, vslf, list); else { assert(opt == 'X'); VTAILQ_INSERT_TAIL(&vsl->vslf_suppress, vslf, list); } return (1); }
struct VSL_data * VSL_New(void) { struct VSL_data *vd; vd = calloc(sizeof *vd, 1); assert(vd != NULL); vd->regflags = 0; vd->magic = VSL_MAGIC; vd->fd = -1; vd->vbm_client = vbit_init(4096); vd->vbm_backend = vbit_init(4096); vd->vbm_supress = vbit_init(256); vd->vbm_select = vbit_init(256); vd->rbuflen = SHMLOG_NEXTTAG + 256; vd->rbuf = malloc(vd->rbuflen); assert(vd->rbuf != NULL); return (vd); }
void mgt_child_inherit(int fd, const char *what) { assert(fd >= 0); if (fd_map == NULL) fd_map = vbit_init(128); AN(fd_map); if (what != NULL) vbit_set(fd_map, fd); else vbit_clr(fd_map, fd); }
vmod_hash__init(VRT_CTX, struct vmod_directors_hash **rrp, const char *vcl_name) { struct vmod_directors_hash *rr; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(rrp); AZ(*rrp); ALLOC_OBJ(rr, VMOD_DIRECTORS_HASH_MAGIC); AN(rr); rr->vbm = vbit_init(8); AN(rr->vbm); *rrp = rr; vdir_new(&rr->vd, vcl_name, NULL, NULL, rr); }
void vdir_new(struct vdir **vdp, const char *vcl_name, vdi_healthy *healthy, vdi_getfd_f *getfd, void *priv) { struct vdir *vd; AN(vcl_name); AN(vdp); AZ(*vdp); ALLOC_OBJ(vd, VDIR_MAGIC); AN(vd); *vdp = vd; AZ(pthread_mutex_init(&vd->mtx, NULL)); ALLOC_OBJ(vd->dir, DIRECTOR_MAGIC); AN(vd->dir); REPLACE(vd->dir->vcl_name, vcl_name); vd->dir->priv = priv; vd->dir->healthy = healthy; vd->dir->getfd = getfd; vd->vbm = vbit_init(8); AN(vd->vbm); }
static void vxp_expr_lhs(struct vxp *vxp, struct vex_lhs **plhs) { char *p; int i; AN(plhs); AZ(*plhs); ALLOC_OBJ(*plhs, VEX_LHS_MAGIC); AN(*plhs); (*plhs)->tags = vbit_init(SLT__MAX); (*plhs)->level = -1; if (vxp->t->tok == '{') { /* Transaction level limits */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected integer got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } (*plhs)->level = (int)strtol(vxp->t->dec, &p, 0); if ((*plhs)->level < 0) { VSB_printf(vxp->sb, "Expected positive integer "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (*p == '-') { (*plhs)->level_pm = -1; p++; } else if (*p == '+') { (*plhs)->level_pm = 1; p++; } if (*p) { VSB_printf(vxp->sb, "Syntax error in level limit "); vxp_ErrWhere(vxp, vxp->t, -1); return; } vxp_NextToken(vxp); ExpectErr(vxp, '}'); vxp_NextToken(vxp); } while (1) { /* The tags this expression applies to */ if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected VSL tag name got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } i = VSL_Glob2Tags(vxp->t->dec, -1, vsl_vbm_bitset, (*plhs)->tags); if (i == -1) { VSB_printf(vxp->sb, "Tag name matches zero tags "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (i == -2) { VSB_printf(vxp->sb, "Tag name is ambiguous "); vxp_ErrWhere(vxp, vxp->t, -1); return; } if (i == -3) { VSB_printf(vxp->sb, "Syntax error in tag name "); vxp_ErrWhere(vxp, vxp->t, -1); return; } assert(i > 0); vxp_NextToken(vxp); if (vxp->t->tok != ',') break; vxp_NextToken(vxp); } if (vxp->t->tok == ':') { /* Record prefix */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected string got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } AN(vxp->t->dec); (*plhs)->prefix = strdup(vxp->t->dec); AN((*plhs)->prefix); (*plhs)->prefixlen = strlen((*plhs)->prefix); vxp_NextToken(vxp); } if (vxp->t->tok == '[') { /* LHS field [] */ vxp_NextToken(vxp); if (vxp->t->tok != VAL) { VSB_printf(vxp->sb, "Expected integer got '%.*s' ", PF(vxp->t)); vxp_ErrWhere(vxp, vxp->t, -1); return; } (*plhs)->field = (int)strtol(vxp->t->dec, &p, 0); if (*p || (*plhs)->field <= 0) { VSB_printf(vxp->sb, "Expected positive integer "); vxp_ErrWhere(vxp, vxp->t, -1); return; } vxp_NextToken(vxp); ExpectErr(vxp, ']'); vxp_NextToken(vxp); } }
/* * core function for the director backend method * * while other directors return a reference to their own backend object (on * which varnish will call the resolve method to resolve to a non-director * backend), this director immediately reolves in the backend method, to make * the director choice visible in VCL * * consequences: * - we need no own struct director * - we can only respect a busy object when being called on the backend side, * which probably is, for all practical purposes, only relevant when the * saintmode vmod is used * * if we wanted to offer delayed resolution, we'd need something like * per-request per-director state or we'd need to return a dynamically created * director object. That should be straight forward once we got director * refcounting #2072. Until then, we could create it on the workspace, but then * we'd need to keep other directors from storing any references to our dynamic * object for longer than the current task * */ VCL_BACKEND sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, enum healthy_e healthy) { VCL_BACKEND be; struct shard_state state; unsigned picklist_sz = VBITMAP_SZ(shardd->n_backend); char picklist_spc[picklist_sz]; VCL_DURATION chosen_r, alt_r; CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(ctx->vsl); memset(&state, 0, sizeof(state)); init_state(&state, ctx, shardd, vbit_init(picklist_spc, picklist_sz)); sharddir_rdlock(shardd); if(shardd->n_backend == 0) { shard_err0(ctx, shardd, "no backends"); goto err; } assert(shardd->hashcircle); validate_alt(ctx, shardd, &alt); state.idx = shard_lookup(shardd, key); assert(state.idx >= 0); SHDBG(SHDBG_LOOKUP, shardd, "lookup key %x idx %d host %u", key, state.idx, shardd->hashcircle[state.idx].host); if (alt > 0) { if (shard_next(&state, alt - 1, healthy == ALL ? 1 : 0) == -1) { if (state.previous.hostid != -1) { be = sharddir_backend(shardd, state.previous.hostid); goto ok; } goto err; } } if (shard_next(&state, 0, healthy == IGNORE ? 0 : 1) == -1) { if (state.previous.hostid != -1) { be = sharddir_backend(shardd, state.previous.hostid); goto ok; } goto err; } be = sharddir_backend(shardd, state.last.hostid); if (warmup == -1) warmup = shardd->warmup; /* short path for cases we dont want ramup/warmup or can't */ if (alt > 0 || healthy == IGNORE || (! rampup && warmup == 0) || shard_next(&state, 0, 0) == -1) goto ok; assert(alt == 0); assert(state.previous.hostid >= 0); assert(state.last.hostid >= 0); assert(state.previous.hostid != state.last.hostid); assert(be == sharddir_backend(shardd, state.previous.hostid)); chosen_r = shardcfg_get_rampup(shardd, state.previous.hostid); alt_r = shardcfg_get_rampup(shardd, state.last.hostid); SHDBG(SHDBG_RAMPWARM, shardd, "chosen host %d rampup %f changed %f", state.previous.hostid, chosen_r, ctx->now - state.previous.changed); SHDBG(SHDBG_RAMPWARM, shardd, "alt host %d rampup %f changed %f", state.last.hostid, alt_r, ctx->now - state.last.changed); if (ctx->now - state.previous.changed < chosen_r) { /* * chosen host is in rampup * - no change if alternative host is also in rampup or the dice * has rolled in favour of the chosen host */ if (! rampup || ctx->now - state.last.changed < alt_r || VRND_RandomTestableDouble() * chosen_r < (ctx->now - state.previous.changed)) goto ok; } else { /* chosen host not in rampup - warmup ? */ if (warmup == 0 || VRND_RandomTestableDouble() > warmup) goto ok; } be = sharddir_backend(shardd, state.last.hostid); ok: AN(be); sharddir_unlock(shardd); vbit_destroy(state.picklist); return (be); err: sharddir_unlock(shardd); vbit_destroy(state.picklist); return NULL; }