static int vsl_IX_arg(struct VSL_data *vsl, int opt, const char *arg) { int i, l, off; const char *b, *e, *err; vre_t *vre; struct vslf *vslf; struct vbitmap *tags = NULL; CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC); vsl->flags |= F_SEEN_ixIX; b = arg; e = strchr(b, ':'); if (e) { tags = vbit_init(SLT__MAX); AN(tags); l = e - b; i = VSL_List2Tags(b, l, vsl_vbm_bitset, tags); if (i < 0) vbit_destroy(tags); if (i == -1) return (vsl_diag(vsl, "-%c: \"%*.*s\" matches zero tags", (char)opt, l, l, b)); else if (i == -2) return (vsl_diag(vsl, "-%c: \"%*.*s\" is ambiguous", (char)opt, l, l, b)); else if (i <= -3) return (vsl_diag(vsl, "-%c: Syntax error in \"%*.*s\"", (char)opt, l, l, b)); b = e + 1; } vre = VRE_compile(b, vsl->C_opt ? VRE_CASELESS : 0, &err, &off); if (vre == NULL) { if (tags) vbit_destroy(tags); return (vsl_diag(vsl, "-%c: Regex error at position %d (%s)\n", (char)opt, off, err)); } ALLOC_OBJ(vslf, VSLF_MAGIC); AN(vslf); vslf->tags = tags; vslf->vre = vre; if (opt == 'I') VTAILQ_INSERT_TAIL(&vsl->vslf_select, vslf, list); else { assert(opt == 'X'); VTAILQ_INSERT_TAIL(&vsl->vslf_suppress, vslf, list); } return (1); }
void VSL_Delete(struct VSL_data *vsl) { CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC); vbit_destroy(vsl->vbm_select); vbit_destroy(vsl->vbm_supress); vsl_IX_free(&vsl->vslf_select); vsl_IX_free(&vsl->vslf_suppress); VSL_ResetError(vsl); FREE_OBJ(vsl); }
void vex_Free(struct vex **pvex) { if ((*pvex)->lhs != NULL) { if ((*pvex)->lhs->tags != NULL) vbit_destroy((*pvex)->lhs->tags); if ((*pvex)->lhs->prefix != NULL) free((*pvex)->lhs->prefix); FREE_OBJ((*pvex)->lhs); } if ((*pvex)->rhs != NULL) { if ((*pvex)->rhs->val_string) free((*pvex)->rhs->val_string); if ((*pvex)->rhs->val_regex) VRE_free(&(*pvex)->rhs->val_regex); FREE_OBJ((*pvex)->rhs); } if ((*pvex)->a != NULL) { vex_Free(&(*pvex)->a); AZ((*pvex)->a); } if ((*pvex)->b != NULL) { vex_Free(&(*pvex)->b); AZ((*pvex)->b); } FREE_OBJ(*pvex); *pvex = NULL; }
VCL_BACKEND sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, enum healthy_e healthy) { VCL_BACKEND be; struct shard_state state[1]; unsigned picklist_sz; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); sharddir_rdlock(shardd); if (shardd->n_backend == 0) { shard_err0(ctx, shardd, "no backends"); sharddir_unlock(shardd); return (NULL); } picklist_sz = VBITMAP_SZ(shardd->n_backend); char picklist_spc[picklist_sz]; memset(state, 0, sizeof(state)); init_state(state, ctx, shardd, vbit_init(picklist_spc, picklist_sz)); be = sharddir_pick_be_locked(ctx, shardd, key, alt, warmup, rampup, healthy, state); sharddir_unlock(shardd); vbit_destroy(state->picklist); return (be); }
vmod_hash__fini(struct vmod_directors_hash **rrp) { struct vmod_directors_hash *rr; rr = *rrp; *rrp = NULL; CHECK_OBJ_NOTNULL(rr, VMOD_DIRECTORS_HASH_MAGIC); vdir_delete(&rr->vd); vbit_destroy(rr->vbm); FREE_OBJ(rr); }
void vdir_delete(struct vdir **vdp) { struct vdir *vd; TAKE_OBJ_NOTNULL(vd, vdp, VDIR_MAGIC); AZ(vd->dir); free(vd->backend); free(vd->weight); AZ(pthread_rwlock_destroy(&vd->mtx)); vbit_destroy(vd->healthy); FREE_OBJ(vd); }
static void vsl_IX_free(vslf_list *filters) { struct vslf *vslf; while (!VTAILQ_EMPTY(filters)) { vslf = VTAILQ_FIRST(filters); CHECK_OBJ_NOTNULL(vslf, VSLF_MAGIC); VTAILQ_REMOVE(filters, vslf, list); if (vslf->tags) vbit_destroy(vslf->tags); AN(vslf->vre); VRE_free(&vslf->vre); AZ(vslf->vre); } }
void vdir_delete(struct vdir **vdp) { struct vdir *vd; AN(vdp); vd = *vdp; *vdp = NULL; CHECK_OBJ_NOTNULL(vd, VDIR_MAGIC); free(vd->backend); free(vd->weight); AZ(pthread_mutex_destroy(&vd->mtx)); FREE_OBJ(vd->dir); vbit_destroy(vd->vbm); FREE_OBJ(vd); }
/* * core function for the director backend method * * while other directors return a reference to their own backend object (on * which varnish will call the resolve method to resolve to a non-director * backend), this director immediately reolves in the backend method, to make * the director choice visible in VCL * * consequences: * - we need no own struct director * - we can only respect a busy object when being called on the backend side, * which probably is, for all practical purposes, only relevant when the * saintmode vmod is used * * if we wanted to offer delayed resolution, we'd need something like * per-request per-director state or we'd need to return a dynamically created * director object. That should be straight forward once we got director * refcounting #2072. Until then, we could create it on the workspace, but then * we'd need to keep other directors from storing any references to our dynamic * object for longer than the current task * */ VCL_BACKEND sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, enum healthy_e healthy) { VCL_BACKEND be; struct shard_state state; unsigned picklist_sz = VBITMAP_SZ(shardd->n_backend); char picklist_spc[picklist_sz]; VCL_DURATION chosen_r, alt_r; CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC); CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(ctx->vsl); memset(&state, 0, sizeof(state)); init_state(&state, ctx, shardd, vbit_init(picklist_spc, picklist_sz)); sharddir_rdlock(shardd); if(shardd->n_backend == 0) { shard_err0(ctx, shardd, "no backends"); goto err; } assert(shardd->hashcircle); validate_alt(ctx, shardd, &alt); state.idx = shard_lookup(shardd, key); assert(state.idx >= 0); SHDBG(SHDBG_LOOKUP, shardd, "lookup key %x idx %d host %u", key, state.idx, shardd->hashcircle[state.idx].host); if (alt > 0) { if (shard_next(&state, alt - 1, healthy == ALL ? 1 : 0) == -1) { if (state.previous.hostid != -1) { be = sharddir_backend(shardd, state.previous.hostid); goto ok; } goto err; } } if (shard_next(&state, 0, healthy == IGNORE ? 0 : 1) == -1) { if (state.previous.hostid != -1) { be = sharddir_backend(shardd, state.previous.hostid); goto ok; } goto err; } be = sharddir_backend(shardd, state.last.hostid); if (warmup == -1) warmup = shardd->warmup; /* short path for cases we dont want ramup/warmup or can't */ if (alt > 0 || healthy == IGNORE || (! rampup && warmup == 0) || shard_next(&state, 0, 0) == -1) goto ok; assert(alt == 0); assert(state.previous.hostid >= 0); assert(state.last.hostid >= 0); assert(state.previous.hostid != state.last.hostid); assert(be == sharddir_backend(shardd, state.previous.hostid)); chosen_r = shardcfg_get_rampup(shardd, state.previous.hostid); alt_r = shardcfg_get_rampup(shardd, state.last.hostid); SHDBG(SHDBG_RAMPWARM, shardd, "chosen host %d rampup %f changed %f", state.previous.hostid, chosen_r, ctx->now - state.previous.changed); SHDBG(SHDBG_RAMPWARM, shardd, "alt host %d rampup %f changed %f", state.last.hostid, alt_r, ctx->now - state.last.changed); if (ctx->now - state.previous.changed < chosen_r) { /* * chosen host is in rampup * - no change if alternative host is also in rampup or the dice * has rolled in favour of the chosen host */ if (! rampup || ctx->now - state.last.changed < alt_r || VRND_RandomTestableDouble() * chosen_r < (ctx->now - state.previous.changed)) goto ok; } else { /* chosen host not in rampup - warmup ? */ if (warmup == 0 || VRND_RandomTestableDouble() > warmup) goto ok; } be = sharddir_backend(shardd, state.last.hostid); ok: AN(be); sharddir_unlock(shardd); vbit_destroy(state.picklist); return (be); err: sharddir_unlock(shardd); vbit_destroy(state.picklist); return NULL; }