Esempio n. 1
0
void
HSH_Cleanup(struct worker *wrk)
{

	if (wrk->nobjcore != NULL) {
		FREE_OBJ(wrk->nobjcore);
		wrk->stats.n_objectcore--;
		wrk->nobjcore = NULL;
	}
	if (wrk->nobjhead != NULL) {
		Lck_Delete(&wrk->nobjhead->mtx);
		FREE_OBJ(wrk->nobjhead);
		wrk->nobjhead = NULL;
		wrk->stats.n_objecthead--;
	}
	if (wrk->nwaitinglist != NULL) {
		FREE_OBJ(wrk->nwaitinglist);
		wrk->nwaitinglist = NULL;
	}
	if (wrk->nhashpriv != NULL) {
		/* XXX: If needed, add slinger method for this */
		free(wrk->nhashpriv);
		wrk->nhashpriv = NULL;
	}
	if (wrk->nvbo != NULL)
		VBO_Free(&wrk->nvbo);
}
Esempio n. 2
0
void
VBO_DerefBusyObj(struct worker *wrk, struct busyobj **pbo)
{
	struct busyobj *bo;
	struct objcore *oc = NULL;
	unsigned r;

	CHECK_OBJ_ORNULL(wrk, WORKER_MAGIC);
	AN(pbo);
	bo = *pbo;
	*pbo = NULL;
	CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_objcore, OBJCORE_MAGIC);
	CHECK_OBJ_ORNULL(bo->fetch_obj, OBJECT_MAGIC);
	if (bo->fetch_objcore != NULL) {
		oc = bo->fetch_objcore;
		CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
		CHECK_OBJ_NOTNULL(oc->objhead, OBJHEAD_MAGIC);
		Lck_Lock(&oc->objhead->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&oc->objhead->mtx);
	} else {
		Lck_Lock(&bo->mtx);
		assert(bo->refcount > 0);
		r = --bo->refcount;
		Lck_Unlock(&bo->mtx);
	}

	if (r)
		return;

	VSLb(bo->vsl, SLT_BereqAcct, "%ju %ju %ju %ju %ju %ju",
	    (uintmax_t)bo->acct.bereq_hdrbytes,
	    (uintmax_t)bo->acct.bereq_bodybytes,
	    (uintmax_t)(bo->acct.bereq_hdrbytes + bo->acct.bereq_bodybytes),
	    (uintmax_t)bo->acct.beresp_hdrbytes,
	    (uintmax_t)bo->acct.beresp_bodybytes,
	    (uintmax_t)(bo->acct.beresp_hdrbytes + bo->acct.beresp_bodybytes));

	VSL_End(bo->vsl);

	if (bo->fetch_objcore != NULL) {
		AN(wrk);
		(void)HSH_DerefObjCore(&wrk->stats, &bo->fetch_objcore);
	}

	VCL_Rel(&bo->vcl);

	if (bo->vary != NULL)
		free(bo->vary);

	memset(&bo->refcount, 0,
	    sizeof *bo - offsetof(struct busyobj, refcount));

	if (cache_param->bo_cache && wrk != NULL && wrk->nbo == NULL)
		wrk->nbo = bo;
	else
		VBO_Free(&bo);
}
Esempio n. 3
0
static void *
wrk_thread_real(void *priv, unsigned thread_workspace)
{
	struct worker *w, ww;
	unsigned char ws[thread_workspace];

	THR_SetName("cache-worker");
	w = &ww;
	memset(w, 0, sizeof *w);
	w->magic = WORKER_MAGIC;
	w->lastused = NAN;
	AZ(pthread_cond_init(&w->cond, NULL));

	WS_Init(w->aws, "wrk", ws, thread_workspace);

	VSL(SLT_WorkThread, 0, "%p start", w);

	Pool_Work_Thread(priv, w);
	AZ(w->pool);

	VSL(SLT_WorkThread, 0, "%p end", w);
	if (w->vcl != NULL)
		VCL_Rel(&w->vcl);
	AZ(pthread_cond_destroy(&w->cond));
	if (w->nvbo != NULL)
		VBO_Free(&w->nvbo);
	HSH_Cleanup(w);
	WRK_SumStat(w);
	return (NULL);
}
Esempio n. 4
0
void BlockRender_PutBuffersIntoPlace(World* world) {
	if (pendingChunks.length > 0) {
		pendingChunkVtxs vtxData = vec_pop(&pendingChunks);
		Chunk* c = World_GetChunk(world, vtxData.x, vtxData.z);
		if (c != NULL) {
			Cluster* cluster = &c->data[vtxData.y];
			for (int i = 0; i < 2; i++) {
				if (vtxData.buf[i] != NULL) {
					int vboBytestNeeded = sizeof(world_vertex) * vtxData.vtxCount[i];
					if (cluster->vbo[i].memory == NULL || cluster->vbo[i].size == 0) {
						cluster->vbo[i] = VBO_Alloc(vboBytestNeeded + (sizeof(world_vertex) * 24));
					} else if (cluster->vbo[i].size < vboBytestNeeded) {
						VBO_Free(cluster->vbo[i]);
						cluster->vbo[i] = VBO_Alloc(vboBytestNeeded + (sizeof(world_vertex) * 24));
					}
					if (!cluster->vbo[i].memory) printf("VBO allocation failed\n");
					memcpy(cluster->vbo[i].memory, vtxData.buf[i], vboBytestNeeded);
					freeBuffer(vtxData.buf[i]);
				}
			}
			cluster->vertexCount[0] = vtxData.vtxCount[0];
			cluster->vertexCount[1] = vtxData.vtxCount[1];
		}
	}
}
Esempio n. 5
0
void
WRK_Thread(struct pool *qp, size_t stacksize, unsigned thread_workspace)
{
	struct worker *w, ww;
	unsigned char ws[thread_workspace];
	uintptr_t u;

	AN(qp);
	AN(stacksize);
	AN(thread_workspace);

	THR_SetName("cache-worker");
	w = &ww;
	INIT_OBJ(w, WORKER_MAGIC);
	w->lastused = NAN;
	AZ(pthread_cond_init(&w->cond, NULL));

	WS_Init(w->aws, "wrk", ws, thread_workspace);

	u = getpagesize();
	AN(u);
	u -= 1U;
	w->stack_start = (((uintptr_t)&qp) + u) & ~u;

	/* XXX: assuming stack grows down. */
	w->stack_end = w->stack_start - stacksize;

	VSL(SLT_WorkThread, 0, "%p start", w);

	Pool_Work_Thread(qp, w);
	AZ(w->pool);

	VSL(SLT_WorkThread, 0, "%p end", w);
	if (w->vcl != NULL)
		VCL_Rel(&w->vcl);
	AZ(pthread_cond_destroy(&w->cond));
	if (w->nbo != NULL)
		VBO_Free(&w->nbo);
	HSH_Cleanup(w);
	Pool_Sumstat(w);
}