int _callout_reset(struct callout_block *cb, struct callout *c, int to_ticks, void (*ftn)(void *), void *arg, const char *d_func, int d_line) { int cancelled = 0; if (c->c_flags & CALLOUT_PENDING) { if (cb->nextsoftcheck == c) cb->nextsoftcheck = VTAILQ_NEXT(c, c_links.tqe); VTAILQ_REMOVE(&cb->callwheel[c->c_time & cb->callwheelmask], c, c_links.tqe); cancelled = 1; } if (to_ticks <= 0) to_ticks = 1; c->magic = CALLOUT_MAGIC; c->c_arg = arg; c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); c->c_func = ftn; c->c_time = cb->ticks + to_ticks; c->d_func = d_func; c->d_line = d_line; VTAILQ_INSERT_TAIL(&cb->callwheel[c->c_time & cb->callwheelmask], c, c_links.tqe); if (callout_debug) fprintf(stdout, "%sscheduled %p func %p arg %p in %d", cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); return (cancelled); }
int _callout_stop_safe(struct callout_block *cb, struct callout *c) { /* * If the callout isn't pending, it's not on the queue, so * don't attempt to remove it from the queue. We can try to * stop it by other means however. */ if (!(c->c_flags & CALLOUT_PENDING)) { c->c_flags &= ~CALLOUT_ACTIVE; if (callout_debug) fprintf(stdout, "failed to stop %p func %p arg %p", c, c->c_func, c->c_arg); return (0); } c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); if (cb->nextsoftcheck == c) cb->nextsoftcheck = VTAILQ_NEXT(c, c_links.tqe); VTAILQ_REMOVE(&cb->callwheel[c->c_time & cb->callwheelmask], c, c_links.tqe); if (callout_debug) fprintf(stderr, "cancelled %p func %p arg %p", c, c->c_func, c->c_arg); return (1); }
static double ban_lurker_work(struct worker *wrk, struct vsl_log *vsl) { struct ban *b, *bd; struct banhead_s obans; double d, dt, n; dt = 49.62; // Random, non-magic if (cache_param->ban_lurker_sleep == 0) return (dt); Lck_Lock(&ban_mtx); b = ban_start; Lck_Unlock(&ban_mtx); d = VTIM_real() - cache_param->ban_lurker_age; bd = NULL; VTAILQ_INIT(&obans); for (; b != NULL; b = VTAILQ_NEXT(b, list)) { if (bd != NULL) ban_lurker_test_ban(wrk, vsl, b, &obans, bd); if (b->flags & BANS_FLAG_COMPLETED) continue; if (b->flags & BANS_FLAG_REQ) { bd = VTAILQ_NEXT(b, list); continue; } n = ban_time(b->spec) - d; if (n < 0) { VTAILQ_INSERT_TAIL(&obans, b, l_list); if (bd == NULL) bd = b; } else if (n < dt) { dt = n; } } Lck_Lock(&ban_mtx); VTAILQ_FOREACH(b, &obans, l_list) ban_mark_completed(b); Lck_Unlock(&ban_mtx); return (dt); }
int STV__iter(struct stevedore ** const pp) { AN(pp); CHECK_OBJ_ORNULL(*pp, STEVEDORE_MAGIC); if (*pp != NULL) *pp = VTAILQ_NEXT(*pp, list); else *pp = VTAILQ_FIRST(&stevedores); return (*pp != NULL); }
void vcc_NextToken(struct tokenlist *tl) { tl->t = VTAILQ_NEXT(tl->t, list); if (tl->t == NULL) { vsb_printf(tl->sb, "Ran out of input, something is missing or" " maybe unbalanced (...) or {...}\n"); tl->err = 1; return; } }
void vxp_NextToken(struct vxp *vxp) { AN(vxp->t); vxp->t = VTAILQ_NEXT(vxp->t, list); if (vxp->t == NULL) { VSB_printf(vxp->sb, "Ran out of input, something is missing or" " maybe unbalanced parenthesis\n"); vxp->err = 1; } }
static void parse_restart(struct tokenlist *tl) { struct token *t1; t1 = VTAILQ_NEXT(tl->t, list); if (t1->tok == ID && vcc_IdIs(t1, "rollback")) { Fb(tl, 1, "VRT_Rollback(sp);\n"); vcc_NextToken(tl); } else if (t1->tok != ';') { vsb_printf(tl->sb, "Expected \"rollback\" or semicolon.\n"); vcc_ErrWhere(tl, t1); ERRCHK(tl); } Fb(tl, 1, "VRT_done(sp, VCL_RET_RESTART);\n"); vcc_ProcAction(tl->curproc, VCL_RET_RESTART, tl->t); vcc_NextToken(tl); }
int ObjIter(struct objiter *oi, void **p, ssize_t *l) { CHECK_OBJ_NOTNULL(oi, OBJITER_MAGIC); AN(p); AN(l); if (oi->st == NULL) oi->st = VTAILQ_FIRST(&oi->obj->store); else oi->st = VTAILQ_NEXT(oi->st, list); if (oi->st != NULL) { *p = oi->st->ptr; *l = oi->st->len; return (1); } return (0); }
int VDP_bytes(struct req *req, enum vdp_action act, const void *ptr, ssize_t len) { int retval; struct vdp_entry *vdp; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); assert(act == VDP_NULL || act == VDP_FLUSH); if (req->vdp_errval) return (req->vdp_errval); vdp = req->vdp_nxt; CHECK_OBJ_NOTNULL(vdp, VDP_ENTRY_MAGIC); req->vdp_nxt = VTAILQ_NEXT(vdp, list); assert(act > VDP_NULL || len > 0); /* Call the present layer, while pointing to the next layer down */ retval = vdp->func(req, act, &vdp->priv, ptr, len); if (retval) req->vdp_errval = retval; /* Latch error value */ req->vdp_nxt = vdp; return (retval); }
const char * BAN_Commit(struct ban_proto *bp) { struct ban *b, *bi; ssize_t ln; double t0; CHECK_OBJ_NOTNULL(bp, BAN_PROTO_MAGIC); AN(bp->vsb); if (ban_shutdown) return (ban_error(bp, "Shutting down")); AZ(VSB_finish(bp->vsb)); ln = VSB_len(bp->vsb); assert(ln >= 0); ALLOC_OBJ(b, BAN_MAGIC); if (b == NULL) return (ban_error(bp, ban_build_err_no_mem)); VTAILQ_INIT(&b->objcore); b->spec = malloc(ln + BANS_HEAD_LEN); if (b->spec == NULL) { free(b); return (ban_error(bp, ban_build_err_no_mem)); } b->flags = bp->flags; memset(b->spec, 0, BANS_HEAD_LEN); t0 = VTIM_real(); memcpy(b->spec + BANS_TIMESTAMP, &t0, sizeof t0); b->spec[BANS_FLAGS] = b->flags & 0xff; memcpy(b->spec + BANS_HEAD_LEN, VSB_data(bp->vsb), ln); ln += BANS_HEAD_LEN; vbe32enc(b->spec + BANS_LENGTH, ln); Lck_Lock(&ban_mtx); if (ban_shutdown) { /* We could have raced a shutdown */ Lck_Unlock(&ban_mtx); BAN_Free(b); return (ban_error(bp, "Shutting down")); } bi = VTAILQ_FIRST(&ban_head); VTAILQ_INSERT_HEAD(&ban_head, b, list); ban_start = b; VSC_C_main->bans++; VSC_C_main->bans_added++; VSC_C_main->bans_persisted_bytes += ln; if (b->flags & BANS_FLAG_OBJ) VSC_C_main->bans_obj++; if (b->flags & BANS_FLAG_REQ) VSC_C_main->bans_req++; if (bi != NULL) ban_info_new(b->spec, ln); /* Notify stevedores */ if (cache_param->ban_dups) { /* Hunt down duplicates, and mark them as completed */ for (bi = VTAILQ_NEXT(b, list); bi != NULL; bi = VTAILQ_NEXT(bi, list)) { if (!(bi->flags & BANS_FLAG_COMPLETED) && ban_equal(b->spec, bi->spec)) { ban_mark_completed(bi); VSC_C_main->bans_dups++; } } } if (!(b->flags & BANS_FLAG_REQ)) ban_kick_lurker(); Lck_Unlock(&ban_mtx); BAN_Abandon(bp); return (NULL); }
void COT_clock(struct callout_block *cb) { struct callout *c; struct callout_tailq *bucket; clock_t curticks; int steps; /* #steps since we last allowed interrupts */ int depth; int mpcalls; int mtxcalls; int gcalls; #ifndef MAX_SOFTCLOCK_STEPS #define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ #endif /* MAX_SOFTCLOCK_STEPS */ mpcalls = 0; mtxcalls = 0; gcalls = 0; depth = 0; steps = 0; while (cb->softticks != cb->ticks) { cb->softticks++; /* * softticks may be modified by hard clock, so cache * it while we work on a given bucket. */ curticks = cb->softticks; bucket = &cb->callwheel[curticks & cb->callwheelmask]; c = VTAILQ_FIRST(bucket); while (c) { depth++; if (c->c_time != curticks) { c = VTAILQ_NEXT(c, c_links.tqe); ++steps; if (steps >= MAX_SOFTCLOCK_STEPS) { cb->nextsoftcheck = c; c = cb->nextsoftcheck; steps = 0; } } else { void (*c_func)(void *); void *c_arg; cb->nextsoftcheck = VTAILQ_NEXT(c, c_links.tqe); VTAILQ_REMOVE(bucket, c, c_links.tqe); c_func = c->c_func; c_arg = c->c_arg; c->c_flags = (c->c_flags & ~CALLOUT_PENDING); mpcalls++; if (callout_debug) fprintf(stdout, "callout mpsafe %p func %p " "arg %p", c, c_func, c_arg); c_func(c_arg); if (callout_debug) fprintf(stdout, "callout %p finished", c); steps = 0; c = cb->nextsoftcheck; } } } avg_depth += (depth * 1000 - avg_depth) >> 8; avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; cb->nextsoftcheck = NULL; }