예제 #1
0
파일: queue.c 프로젝트: yuxans/haproxy
/* Locks the queue the pendconn element belongs to. This relies on both p->px
 * and p->srv to be properly initialized (which is always the case once the
 * element has been added).
 */
static inline void pendconn_queue_lock(struct pendconn *p)
{
	if (p->srv)
		HA_SPIN_LOCK(SERVER_LOCK, &p->srv->lock);
	else
		HA_SPIN_LOCK(PROXY_LOCK, &p->px->lock);
}
예제 #2
0
파일: queue.c 프로젝트: yuxans/haproxy
/* Check for pending connections at the backend, and assign some of them to
 * the server coming up. The server's weight is checked before being assigned
 * connections it may not be able to handle. The total number of transferred
 * connections is returned.
 */
int pendconn_grab_from_px(struct server *s)
{
	struct pendconn *p;
	int maxconn, xferred = 0;

	if (!srv_currently_usable(s))
		return 0;

	/* if this is a backup server and there are active servers or at
	 * least another backup server was elected, then this one must
	 * not dequeue requests from the proxy.
	 */
	if ((s->flags & SRV_F_BACKUP) &&
	    (s->proxy->srv_act ||
	     ((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
		return 0;

	HA_SPIN_LOCK(PROXY_LOCK, &s->proxy->lock);
	maxconn = srv_dynamic_maxconn(s);
	while ((p = pendconn_first(&s->proxy->pendconns))) {
		if (s->maxconn && s->served + xferred >= maxconn)
			break;

		__pendconn_unlink(p);
		p->target = s;

		task_wakeup(p->strm->task, TASK_WOKEN_RES);
		xferred++;
	}
	HA_SPIN_UNLOCK(PROXY_LOCK, &s->proxy->lock);
	return xferred;
}
예제 #3
0
파일: queue.c 프로젝트: yuxans/haproxy
/* Manages a server's connection queue. This function will try to dequeue as
 * many pending streams as possible, and wake them up.
 */
void process_srv_queue(struct server *s)
{
	struct proxy  *p = s->proxy;
	int maxconn;

	HA_SPIN_LOCK(PROXY_LOCK,  &p->lock);
	HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
	maxconn = srv_dynamic_maxconn(s);
	while (s->served < maxconn) {
		int ret = pendconn_process_next_strm(s, p);
		if (!ret)
			break;
	}
	HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
	HA_SPIN_UNLOCK(PROXY_LOCK,  &p->lock);
}
예제 #4
0
/*
 * Alloc the comp_ctx
 */
static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
{
#ifdef USE_ZLIB
	z_stream *strm;

	if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < sizeof(struct comp_ctx))
		return -1;
#endif

	if (unlikely(pool_comp_ctx == NULL)) {
		HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
		if (unlikely(pool_comp_ctx == NULL))
			pool_comp_ctx = create_pool("comp_ctx", sizeof(struct comp_ctx), MEM_F_SHARED);
		HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
	}

	*comp_ctx = pool_alloc(pool_comp_ctx);
	if (*comp_ctx == NULL)
		return -1;
#if defined(USE_SLZ)
	(*comp_ctx)->direct_ptr = NULL;
	(*comp_ctx)->direct_len = 0;
	(*comp_ctx)->queued = NULL;
#elif defined(USE_ZLIB)
	HA_ATOMIC_ADD(&zlib_used_memory, sizeof(struct comp_ctx));

	strm = &(*comp_ctx)->strm;
	strm->zalloc = alloc_zlib;
	strm->zfree = free_zlib;
	strm->opaque = *comp_ctx;
#endif
	return 0;
}
예제 #5
0
/*
 * This is a tricky allocation function using the zlib.
 * This is based on the allocation order in deflateInit2.
 */
static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
{
	struct comp_ctx *ctx = opaque;
	static THREAD_LOCAL char round = 0; /* order in deflateInit2 */
	void *buf = NULL;
	struct pool_head *pool = NULL;

	if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < (long)(items * size))
		goto end;

	switch (round) {
		case 0:
			if (zlib_pool_deflate_state == NULL) {
				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
				if (zlib_pool_deflate_state == NULL)
					zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
			}
			pool = zlib_pool_deflate_state;
			ctx->zlib_deflate_state = buf = pool_alloc(pool);
		break;

		case 1:
			if (zlib_pool_window == NULL) {
				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
				if (zlib_pool_window == NULL)
					zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
			}
			pool = zlib_pool_window;
			ctx->zlib_window = buf = pool_alloc(pool);
		break;

		case 2:
			if (zlib_pool_prev == NULL) {
				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
				if (zlib_pool_prev == NULL)
					zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
			}
			pool = zlib_pool_prev;
			ctx->zlib_prev = buf = pool_alloc(pool);
		break;

		case 3:
			if (zlib_pool_head == NULL) {
				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
				if (zlib_pool_head == NULL)
					zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
			}
			pool = zlib_pool_head;
			ctx->zlib_head = buf = pool_alloc(pool);
		break;

		case 4:
			if (zlib_pool_pending_buf == NULL) {
				HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
				if (zlib_pool_pending_buf == NULL)
					zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
				HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
			}
			pool = zlib_pool_pending_buf;
			ctx->zlib_pending_buf = buf = pool_alloc(pool);
		break;
	}
	if (buf != NULL)
		HA_ATOMIC_ADD(&zlib_used_memory, pool->size);

end:

	/* deflateInit2() first allocates and checks the deflate_state, then if
	 * it succeeds, it allocates all other 4 areas at ones and checks them
	 * at the end. So we want to correctly count the rounds depending on when
	 * zlib is supposed to abort.
	 */
	if (buf || round)
		round = (round + 1) % 5;
	return buf;
}