void VBP_Update_Backend(struct vbp_target *vt) { unsigned i = 0; char bits[10]; const char *logmsg; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); Lck_Lock(&vbp_mtx); if (vt->backend == NULL) { Lck_Unlock(&vbp_mtx); return; } #define BITMAP(n, c, t, b) \ bits[i++] = (vt->n & 1) ? c : '-'; #include "tbl/backend_poll.h" bits[i] = '\0'; assert(i < sizeof bits); if (vt->backend->director == NULL) { Lck_Unlock(&vbp_mtx); return; } if (vt->good >= vt->threshold) { if (vt->backend->director->sick) { logmsg = "Back healthy"; VRT_SetHealth(vt->backend->director, 1); } else { logmsg = "Still healthy"; } } else { if (vt->backend->director->sick) { logmsg = "Still sick"; } else { logmsg = "Went sick"; VRT_SetHealth(vt->backend->director, 0); } } VSL(SLT_Backend_health, 0, "%s %s %s %u %u %u %.6f %.6f %s", vt->backend->director->vcl_name, logmsg, bits, vt->good, vt->threshold, vt->window, vt->last, vt->avg, vt->resp_buf); VBE_SetHappy(vt->backend, vt->happy); Lck_Unlock(&vbp_mtx); }
void SES_Delete(struct sess *sp) { struct acct *b = &sp->acct_ses; struct sessmem *sm; static char noaddr[] = "-"; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); sm = sp->mem; CHECK_OBJ_NOTNULL(sm, SESSMEM_MAGIC); AZ(sp->obj); AZ(sp->vcl); VSC_C_main->n_sess--; /* XXX: locking ? */ assert(!isnan(b->first)); assert(!isnan(sp->t_end)); if (sp->addr == NULL) sp->addr = noaddr; if (sp->port == NULL) sp->port = noaddr; VSL(SLT_StatSess, sp->id, "%s %s %.0f %ju %ju %ju %ju %ju %ju %ju", sp->addr, sp->port, sp->t_end - b->first, b->sess, b->req, b->pipe, b->pass, b->fetch, b->hdrbytes, b->bodybytes); if (sm->workspace != params->sess_workspace) { Lck_Lock(&stat_mtx); VSC_C_main->n_sess_mem--; Lck_Unlock(&stat_mtx); free(sm); } else { /* Clean and prepare for reuse */ ses_setup(sm); Lck_Lock(&ses_mem_mtx); VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); Lck_Unlock(&ses_mem_mtx); } /* Try to precreate some ses-mem so the acceptor will not have to */ if (VSC_C_main->n_sess_mem < VSC_C_main->n_sess + 10) { sm = ses_sm_alloc(); if (sm != NULL) { ses_setup(sm); Lck_Lock(&ses_mem_mtx); VTAILQ_INSERT_HEAD(&ses_free_mem[1 - ses_qp], sm, list); Lck_Unlock(&ses_mem_mtx); } } }
static void ban_cleantail(void) { struct ban *b; do { Lck_Lock(&ban_mtx); b = VTAILQ_LAST(&ban_head, banhead_s); if (b != VTAILQ_FIRST(&ban_head) && b->refcount == 0) { if (b->flags & BANS_FLAG_COMPLETED) VSC_C_main->bans_completed--; if (b->flags & BANS_FLAG_OBJ) VSC_C_main->bans_obj--; if (b->flags & BANS_FLAG_REQ) VSC_C_main->bans_req--; VSC_C_main->bans--; VSC_C_main->bans_deleted++; VTAILQ_REMOVE(&ban_head, b, list); VSC_C_main->bans_persisted_fragmentation += ban_len(b->spec); ban_info(BI_DROP, b->spec, ban_len(b->spec)); } else { b = NULL; } Lck_Unlock(&ban_mtx); if (b != NULL) BAN_Free(b); } while (b != NULL); }
ban_lurker(struct worker *wrk, void *priv) { struct vsl_log vsl; volatile double d; unsigned gen = ban_generation + 1; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); VSL_Setup(&vsl, NULL, 0); while (!ban_shutdown) { d = ban_lurker_work(wrk, &vsl); ban_cleantail(); if (DO_DEBUG(DBG_LURKER)) VSLb(&vsl, SLT_Debug, "lurker: sleep = %lf", d); d += VTIM_real(); Lck_Lock(&ban_mtx); if (gen == ban_generation) { (void)Lck_CondWait(&ban_lurker_cond, &ban_mtx, d); ban_batch = 0; } gen = ban_generation; Lck_Unlock(&ban_mtx); } pthread_exit(0); NEEDLESS_RETURN(NULL); }
struct storage * VFP_GetStorage(struct busyobj *bo, ssize_t sz) { ssize_t l; struct storage *st; struct object *obj; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); obj = bo->fetch_obj; CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); st = VTAILQ_LAST(&obj->store, storagehead); if (st != NULL && st->len < st->space) return (st); AN(bo->stats); l = fetchfrag; if (l == 0) l = sz; if (l == 0) l = cache_param->fetch_chunksize; st = STV_alloc(bo, l); if (st == NULL) { (void)VFP_Error(bo, "Could not get storage"); } else { AZ(st->len); Lck_Lock(&bo->mtx); VTAILQ_INSERT_TAIL(&obj->store, st, list); Lck_Unlock(&bo->mtx); } return (st); }
void EXP_Rearm(const struct object *o) { struct objcore *oc; CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); oc = o->objcore; if (oc == NULL) return; CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); Lck_Lock(&exp_mtx); /* * The hang-man might have this object of the binheap while * tending to a timer. If so, we do not muck with it here. */ if (oc->timer_idx != BINHEAP_NOIDX && update_object_when(o)) { /* * XXX: this could possibly be optimized by shuffling * XXX: up or down, but that leaves some very nasty * XXX: corner cases, such as shuffling all the way * XXX: down the left half, then back up the right half. */ assert(oc->timer_idx != BINHEAP_NOIDX); binheap_delete(exp_heap, oc->timer_idx); assert(oc->timer_idx == BINHEAP_NOIDX); binheap_insert(exp_heap, oc); assert(oc->timer_idx != BINHEAP_NOIDX); } Lck_Unlock(&exp_mtx); if (o->smp_object != NULL) SMP_TTLchanged(o); }
void MPL_Free(struct mempool *mpl, void *item) { struct memitem *mi; CHECK_OBJ_NOTNULL(mpl, MEMPOOL_MAGIC); AN(item); mi = (void*)((uintptr_t)item - sizeof(*mi)); CHECK_OBJ_NOTNULL(mi, MEMITEM_MAGIC); memset(item, 0, mi->size); Lck_Lock(&mpl->mtx); mpl->vsc->frees++; mpl->vsc->live = --mpl->live; if (mi->size < *mpl->cur_size) { mpl->vsc->toosmall++; VTAILQ_INSERT_HEAD(&mpl->surplus, mi, list); } else { mpl->vsc->pool = ++mpl->n_pool; mi->touched = mpl->t_now; VTAILQ_INSERT_HEAD(&mpl->list, mi, list); } Lck_Unlock(&mpl->mtx); }
void EXP_Insert(struct object *o) { struct objcore *oc; struct objcore_head *lru; CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); AN(o->objhead); AN(ObjIsBusy(o)); assert(o->cacheable); HSH_Ref(o); CHECK_OBJ_NOTNULL(o->objcore, OBJCORE_MAGIC); oc = o->objcore; assert(o->entered != 0 && !isnan(o->entered)); o->last_lru = o->entered; Lck_Lock(&exp_mtx); assert(oc->timer_idx == BINHEAP_NOIDX); (void)update_object_when(o); binheap_insert(exp_heap, oc); assert(oc->timer_idx != BINHEAP_NOIDX); lru = STV_lru(o->objstore); if (lru != NULL) { VTAILQ_INSERT_TAIL(lru, oc, lru_list); oc->flags |= OC_F_ONLRU; } Lck_Unlock(&exp_mtx); if (o->smp_object != NULL) SMP_TTLchanged(o); }
int Pool_Task_Arg(struct worker *wrk, task_func_t *func, const void *arg, size_t arg_len) { struct pool *pp; struct worker *wrk2; int retval; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AN(arg); AN(arg_len); pp = wrk->pool; CHECK_OBJ_NOTNULL(pp, POOL_MAGIC); Lck_Lock(&pp->mtx); wrk2 = pool_getidleworker(pp); if (wrk2 != NULL) { VTAILQ_REMOVE(&pp->idle_queue, &wrk2->task, list); retval = 1; } else { wrk2 = wrk; retval = 0; } Lck_Unlock(&pp->mtx); AZ(wrk2->task.func); assert(arg_len <= WS_Reserve(wrk2->aws, arg_len)); memcpy(wrk2->aws->f, arg, arg_len); wrk2->task.func = func; wrk2->task.priv = wrk2->aws->f; if (retval) AZ(pthread_cond_signal(&wrk2->cond)); return (retval); }
vbp_task(struct worker *wrk, void *priv) { struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(vt, priv, VBP_TARGET_MAGIC); AN(vt->running); AN(vt->req); assert(vt->req_len > 0); vbp_start_poke(vt); vbp_poke(vt); vbp_has_poked(vt); VBP_Update_Backend(vt); Lck_Lock(&vbp_mtx); if (vt->running < 0) { assert(vt->heap_idx == BINHEAP_NOIDX); vbp_delete(vt); } else { vt->running = 0; if (vt->heap_idx != BINHEAP_NOIDX) { vt->due = VTIM_real() + vt->interval; binheap_delete(vbp_heap, vt->heap_idx); binheap_insert(vbp_heap, vt); } } Lck_Unlock(&vbp_mtx); }
vbp_thread(struct worker *wrk, void *priv) { vtim_real now, nxt; struct vbp_target *vt; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); AZ(priv); Lck_Lock(&vbp_mtx); while (1) { now = VTIM_real(); vt = binheap_root(vbp_heap); if (vt == NULL) { nxt = 8.192 + now; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else if (vt->due > now) { nxt = vt->due; vt = NULL; (void)Lck_CondWait(&vbp_cond, &vbp_mtx, nxt); } else { binheap_delete(vbp_heap, vt->heap_idx); vt->due = now + vt->interval; if (!vt->running) { vt->running = 1; vt->task.func = vbp_task; vt->task.priv = vt; if (Pool_Task_Any(&vt->task, TASK_QUEUE_REQ)) vt->running = 0; } binheap_insert(vbp_heap, vt); } } NEEDLESS(Lck_Unlock(&vbp_mtx)); NEEDLESS(return NULL); }
int EXP_Touch(const struct object *o) { struct objcore *oc; int retval = 0; struct objcore_head *lru; CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); oc = o->objcore; if (oc == NULL) return (retval); lru = STV_lru(o->objstore); if (lru == NULL) return (retval); AN(o->objhead); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); if (Lck_Trylock(&exp_mtx)) return (retval); if (oc->flags & OC_F_ONLRU) { VTAILQ_REMOVE(lru, oc, lru_list); VTAILQ_INSERT_TAIL(lru, oc, lru_list); VSL_stats->n_lru_moved++; retval = 1; } Lck_Unlock(&exp_mtx); return (retval); }
struct vclref * VRT_ref_vcl(VRT_CTX, const char *desc) { struct vcl *vcl; struct vclref* ref; ASSERT_CLI(); CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(desc); AN(*desc); vcl = ctx->vcl; CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); assert(VCL_WARM(vcl)); ALLOC_OBJ(ref, VCLREF_MAGIC); AN(ref); ref->vcl = vcl; bprintf(ref->desc, "%s", desc); Lck_Lock(&vcl_mtx); VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list); vcl->nrefs++; Lck_Unlock(&vcl_mtx); return (ref); }
void VRT_rel_vcl(VRT_CTX, struct vclref **refp) { struct vcl *vcl; struct vclref *ref; AN(refp); ref = *refp; *refp = NULL; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(ref, VCLREF_MAGIC); vcl = ctx->vcl; CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); assert(vcl == ref->vcl); /* NB: A VCL may be released by a VMOD at any time, but it must happen * after a warmup and before the end of a cooldown. The release may or * may not happen while the same thread holds the temperature lock, so * instead we check that all references are gone in VCL_Nuke. */ Lck_Lock(&vcl_mtx); assert(!VTAILQ_EMPTY(&vcl->ref_list)); VTAILQ_REMOVE(&vcl->ref_list, ref, list); vcl->nrefs--; /* No garbage collection here, for the same reasons as in VCL_Rel. */ Lck_Unlock(&vcl_mtx); FREE_OBJ(ref); }
static void vbp_has_poked(struct vbp_target *vt) { unsigned i, j; uint64_t u; const char *logmsg; char bits[10]; CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); /* Calculate exponential average */ if (vt->happy & 1) { if (vt->rate < AVG_RATE) vt->rate += 1.0; vt->avg += (vt->last - vt->avg) / vt->rate; } i = 0; #define BITMAP(n, c, t, b) bits[i++] = (vt->n & 1) ? c : '-'; #include "tbl/backend_poll.h" #undef BITMAP bits[i] = '\0'; u = vt->happy; for (i = j = 0; i < vt->probe.window; i++) { if (u & 1) j++; u >>= 1; } vt->good = j; Lck_Lock(&vt->mtx); if (vt->backend != NULL) { if (vt->good >= vt->probe.threshold) { if (vt->backend->healthy) logmsg = "Still healthy"; else { logmsg = "Back healthy"; vt->backend->health_changed = VTIM_real(); } vt->backend->healthy = 1; } else { if (vt->backend->healthy) { logmsg = "Went sick"; vt->backend->health_changed = VTIM_real(); } else logmsg = "Still sick"; vt->backend->healthy = 0; } VSL(SLT_Backend_health, 0, "%s %s %s %u %u %u %.6f %.6f %s", vt->backend->display_name, logmsg, bits, vt->good, vt->probe.threshold, vt->probe.window, vt->last, vt->avg, vt->resp_buf); if (!vt->disable) { AN(vt->backend->vsc); vt->backend->vsc->happy = vt->happy; } } Lck_Unlock(&vt->mtx); }
static void sma_trim(struct storage *s, size_t size) { struct sma_sc *sma_sc; struct sma *sma; void *p; size_t delta; CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC); CAST_OBJ_NOTNULL(sma, s->priv, SMA_MAGIC); sma_sc = sma->sc; assert(sma->sz == sma->s.space); assert(size < sma->sz); delta = sma->sz - size; if (delta < 256) return; if ((p = realloc(sma->s.ptr, size)) != NULL) { Lck_Lock(&sma_sc->sma_mtx); sma_sc->sma_alloc -= delta; sma_sc->stats->g_bytes -= delta; sma_sc->stats->c_freed += delta; if (sma_sc->sma_max != SIZE_MAX) sma_sc->stats->g_space += delta; sma->sz = size; Lck_Unlock(&sma_sc->sma_mtx); sma->s.ptr = p; s->space = size; } }
void VRT_DelDirector(VCL_BACKEND *bp) { struct vcl *vcl; struct vcldir *vdir; VCL_BACKEND d; TAKE_OBJ_NOTNULL(d, bp, DIRECTOR_MAGIC); vdir = d->vdir; CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC); vcl = vdir->vcl; CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC); Lck_Lock(&vcl_mtx); VTAILQ_REMOVE(&vcl->director_list, vdir, list); Lck_Unlock(&vcl_mtx); AZ(errno=pthread_rwlock_rdlock(&vcl->temp_rwl)); if (VCL_WARM(vcl)) VDI_Event(d, VCL_EVENT_COLD); AZ(errno=pthread_rwlock_unlock(&vcl->temp_rwl)); if(vdir->methods->destroy != NULL) vdir->methods->destroy(d); free(vdir->cli_name); FREE_OBJ(vdir->dir); FREE_OBJ(vdir); }
static struct storage * smu_alloc(const struct stevedore *st, size_t size) { struct smu *smu; Lck_Lock(&smu_mtx); VSC_C_main->sma_nreq++; if (VSC_C_main->sma_nbytes + size > smu_max) size = 0; else { VSC_C_main->sma_nobj++; VSC_C_main->sma_nbytes += size; VSC_C_main->sma_balloc += size; } Lck_Unlock(&smu_mtx); if (size == 0) return (NULL); smu = umem_zalloc(sizeof *smu, UMEM_DEFAULT); if (smu == NULL) return (NULL); smu->sz = size; smu->s.priv = smu; smu->s.ptr = umem_alloc(size, UMEM_DEFAULT); XXXAN(smu->s.ptr); smu->s.len = 0; smu->s.space = size; smu->s.fd = -1; smu->s.stevedore = st; smu->s.magic = STORAGE_MAGIC; return (&smu->s); }
static void bes_conn_try(struct busyobj *bo, struct vbc *vc, const struct vdi_simple *vs) { int s; struct backend *bp = vs->backend; char abuf1[VTCP_ADDRBUFSIZE]; char pbuf1[VTCP_PORTBUFSIZE]; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); CHECK_OBJ_NOTNULL(vs, VDI_SIMPLE_MAGIC); Lck_Lock(&bp->mtx); bp->refcount++; bp->n_conn++; /* It mostly works */ Lck_Unlock(&bp->mtx); s = -1; assert(bp->ipv6 != NULL || bp->ipv4 != NULL); /* release lock during stuff that can take a long time */ if (cache_param->prefer_ipv6 && bp->ipv6 != NULL) { s = vbe_TryConnect(bo, PF_INET6, bp->ipv6, vs); vc->addr = bp->ipv6; } if (s == -1 && bp->ipv4 != NULL) { s = vbe_TryConnect(bo, PF_INET, bp->ipv4, vs); vc->addr = bp->ipv4; } if (s == -1 && !cache_param->prefer_ipv6 && bp->ipv6 != NULL) { s = vbe_TryConnect(bo, PF_INET6, bp->ipv6, vs); vc->addr = bp->ipv6; } vc->fd = s; if (s < 0) { Lck_Lock(&bp->mtx); bp->n_conn--; bp->refcount--; /* Only keep ref on success */ Lck_Unlock(&bp->mtx); vc->addr = NULL; } else { VTCP_myname(s, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1); VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s ", vc->fd, vs->backend->display_name, abuf1, pbuf1); } }
void WRK_SumStat(struct worker *w) { Lck_Lock(&wstat_mtx); wrk_sumstat(w); Lck_Unlock(&wstat_mtx); }
void Pool_PurgeStat(unsigned nobj) { Lck_Lock(&wstat_mtx); VSC_C_main->n_purges++; VSC_C_main->n_obj_purged += nobj; Lck_Unlock(&wstat_mtx); }
struct vbe_conn * VBE_GetVbe(struct sess *sp, struct backend *bp) { struct vbe_conn *vc; CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); /* first look for vbe_conn's we can recycle */ while (1) { Lck_Lock(&bp->mtx); vc = VTAILQ_FIRST(&bp->connlist); if (vc != NULL) { bp->refcount++; assert(vc->backend == bp); assert(vc->fd >= 0); VTAILQ_REMOVE(&bp->connlist, vc, list); } Lck_Unlock(&bp->mtx); if (vc == NULL) break; if (VBE_CheckFd(vc->fd)) { /* XXX locking of stats */ VSL_stats->backend_reuse += 1; VSL_stats->backend_conn++; WSP(sp, SLT_Backend, "%d %s %s", vc->fd, sp->director->vcl_name, bp->vcl_name); return (vc); } sp->vbe = vc; VBE_ClosedFd(sp); } if (!bp->healthy) { VSL_stats->backend_unhealthy++; return (NULL); } if (bp->max_conn > 0 && bp->n_conn >= bp->max_conn) { VSL_stats->backend_busy++; return (NULL); } vc = VBE_NewConn(); assert(vc->fd == -1); AZ(vc->backend); vc->fd = bes_conn_try(sp, bp); if (vc->fd < 0) { VBE_ReleaseConn(vc); VSL_stats->backend_fail++; return (NULL); } vc->backend = bp; VSL_stats->backend_conn++; WSP(sp, SLT_Backend, "%d %s %s", vc->fd, sp->director->vcl_name, bp->vcl_name); return (vc); }
static struct objcore * ban_lurker_getfirst(struct vsl_log *vsl, struct ban *bt) { struct objhead *oh; struct objcore *oc; while (1) { Lck_Lock(&ban_mtx); oc = VTAILQ_FIRST(&bt->objcore); CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); if (oc == &oc_marker) { VTAILQ_REMOVE(&bt->objcore, oc, ban_list); Lck_Unlock(&ban_mtx); return (NULL); } oh = oc->objhead; CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC); if (!Lck_Trylock(&oh->mtx)) { if (oc->refcnt == 0) { Lck_Unlock(&oh->mtx); } else { /* * We got the lock, and the oc is not being * dismantled under our feet. * Take it off the ban and (optimistically) * put it on the * destination ban */ AZ(oc->flags & OC_F_BUSY); oc->refcnt += 1; VTAILQ_REMOVE(&bt->objcore, oc, ban_list); VTAILQ_INSERT_TAIL(&bt->objcore, oc, ban_list); Lck_Unlock(&oh->mtx); Lck_Unlock(&ban_mtx); break; } } /* Try again, later */ Lck_Unlock(&ban_mtx); VSC_C_main->bans_lurker_contention++; VSL_Flush(vsl, 0); VTIM_sleep(cache_param->ban_lurker_sleep); } return (oc); }
void Pool_Sumstat(struct worker *wrk) { Lck_Lock(&wstat_mtx); pool_sumstat(wrk->stats); Lck_Unlock(&wstat_mtx); memset(wrk->stats, 0, sizeof *wrk->stats); }
int WRK_TrySumStat(struct worker *w) { if (Lck_Trylock(&wstat_mtx)) return (0); wrk_sumstat(w); Lck_Unlock(&wstat_mtx); return (1); }
void VRT_vcl_rel(VRT_CTX, VCL_VCL vcl) { CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(vcl); Lck_Lock(&vcl_mtx); vcl->nrefs--; Lck_Unlock(&vcl_mtx); }
void CLI_AddFuncs(struct cli_proto *p) { AZ(add_check); Lck_Lock(&cli_mtx); AZ(CLS_AddFunc(cls, 0, p)); Lck_Unlock(&cli_mtx); }
static void cli_cb_after(const struct cli *cli) { ASSERT_CLI(); Lck_Unlock(&cli_mtx); VSL(SLT_CLI, 0, "Wr %03u %u %s", cli->result, vsb_len(cli->sb), vsb_data(cli->sb)); }
void SES_Ref(struct sess *sp) { CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); Lck_Lock(&sp->mtx); assert(sp->refcnt > 0); sp->refcnt++; Lck_Unlock(&sp->mtx); }
static void vbf_release_req(struct busyobj *bo) { if (bo->req == NULL) return; Lck_Lock(&bo->mtx); bo->req = NULL; AZ(pthread_cond_signal(&bo->cond)); Lck_Unlock(&bo->mtx); }