예제 #1
0
void li_connection_update_io_timeout(liConnection *con) {
	liWorker *wrk = con->wrk;

	if (con->io_timeout_elem.queued && (con->io_timeout_elem.ts + 1.0) < li_cur_ts(wrk)) {
		li_waitqueue_push(&wrk->io_timeout_queue, &con->io_timeout_elem);
	}
}
예제 #2
0
static void progress_vrclose(liVRequest *vr, liPlugin *p) {
	mod_progress_node *node = (mod_progress_node*) g_ptr_array_index(vr->plugin_ctx, p->id);
	mod_progress_data *pd = p->data;

	if (node) {
		/* connection is being tracked, replace with tombstone */
		node->vr = NULL;
		node->request_size = vr->request.content_length;
		node->response_size = vr->out->bytes_out;
		node->bytes_in = vr->vr_in->bytes_in;
		node->bytes_out = MAX(0, vr->vr_out->bytes_out - vr->coninfo->out_queue_length);
		node->status_code = vr->response.http_status;
		li_waitqueue_push(&pd->worker_data[vr->wrk->ndx].timeout_queue, &(node->timeout_queue_elem));
	}
}
예제 #3
0
liHandlerResult li_stat_cache_get_dirlist(liVRequest *vr, GString *path, liStatCacheEntry **result) {
    liStatCache *sc;
    liStatCacheEntry *sce;
    guint i;

    sc = vr->wrk->stat_cache;
    sce = g_hash_table_lookup(sc->dirlists, path);

    if (sce) {
        /* cache hit, check state */
        if (g_atomic_int_get(&sce->state) == STAT_CACHE_ENTRY_WAITING) {
            /* already waiting for it? */
            for (i = 0; i < vr->stat_cache_entries->len; i++) {
                if (g_ptr_array_index(vr->stat_cache_entries, i) == sce)
                    return LI_HANDLER_WAIT_FOR_EVENT;
            }
            li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */
            return LI_HANDLER_WAIT_FOR_EVENT;
        }

        sc->hits++;
        *result = sce;
        for (i = 0; i < vr->stat_cache_entries->len; i++) {
            if (g_ptr_array_index(vr->stat_cache_entries, i) == sce)
                return LI_HANDLER_GO_ON;
        }
        li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */
        return LI_HANDLER_GO_ON;
    } else {
        /* cache miss, allocate new entry */
        sce = stat_cache_entry_new(sc, path);
        sce->type = STAT_CACHE_ENTRY_DIR;

        li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */

        /* uses initial reference of sce */
        li_waitqueue_push(&sc->delete_queue, &sce->queue_elem);
        g_hash_table_insert(sc->dirlists, sce->data.path, sce);

        sce->refcount++;
        li_tasklet_push(vr->wrk->tasklets, stat_cache_run, stat_cache_finished, sce);

        sc->misses++;
        return LI_HANDLER_WAIT_FOR_EVENT;
    }
}
예제 #4
0
void li_connection_update_io_wait(liConnection *con) {
	liWorker *wrk = con->wrk;
	gboolean want_timeout = FALSE;
	gboolean stopping = wrk->wait_for_stop_connections.active;

	switch (con->state) {
	case LI_CON_STATE_DEAD:
	case LI_CON_STATE_CLOSE: /* only a temporary state before DEAD */
		want_timeout = FALSE;
		break;
	case LI_CON_STATE_KEEP_ALIVE:
		want_timeout = stopping;
		break;
	case LI_CON_STATE_REQUEST_START:
		want_timeout = TRUE;
		break;
	case LI_CON_STATE_READ_REQUEST_HEADER:
		want_timeout = TRUE;
		break;
	case LI_CON_STATE_HANDLE_MAINVR:
		/* want timeout while we're still reading request body */
		want_timeout = stopping || !con->in.out->is_closed;
		break;
	case LI_CON_STATE_WRITE:
		want_timeout = TRUE;
		break;
	case LI_CON_STATE_UPGRADED:
		want_timeout = stopping;
		break;
	}

	if (want_timeout == con->io_timeout_elem.queued) return;
	if (want_timeout) {
		li_waitqueue_push(&wrk->io_timeout_queue, &con->io_timeout_elem);
	} else {
		li_waitqueue_remove(&wrk->io_timeout_queue, &con->io_timeout_elem);
	}
}
예제 #5
0
static liHandlerResult stat_cache_get(liVRequest *vr, GString *path, struct stat *st, int *err, int *fd, gboolean async) {
    liStatCache *sc;
    liStatCacheEntry *sce;
    guint i;

    /* force blocking call if we are not in a vrequest context or stat cache is disabled */
    if (!vr || !(sc = vr->wrk->stat_cache) || !CORE_OPTION(LI_CORE_OPTION_ASYNC_STAT).boolean)
        async = FALSE;

    if (async) {
        sce = g_hash_table_lookup(sc->entries, path);

        if (sce) {
            /* cache hit, check state */
            if (g_atomic_int_get(&sce->state) == STAT_CACHE_ENTRY_WAITING) {
                /* already waiting for it? */
                for (i = 0; i < vr->stat_cache_entries->len; i++) {
                    if (g_ptr_array_index(vr->stat_cache_entries, i) == sce) {
                        return LI_HANDLER_WAIT_FOR_EVENT;
                    }
                }
                li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */
                return LI_HANDLER_WAIT_FOR_EVENT;
            }

            sc->hits++;
        } else {
            /* cache miss, allocate new entry */
            sce = stat_cache_entry_new(sc, path);
            sce->type = STAT_CACHE_ENTRY_SINGLE;

            li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */

            /* uses initial reference of sce */
            li_waitqueue_push(&sc->delete_queue, &sce->queue_elem);
            g_hash_table_insert(sc->entries, sce->data.path, sce);

            sce->refcount++;
            li_tasklet_push(vr->wrk->tasklets, stat_cache_run, stat_cache_finished, sce);

            sc->misses++;
            return LI_HANDLER_WAIT_FOR_EVENT;
        }
    }

    if (fd) {
        /* open + fstat */
        while (-1 == (*fd = open(path->str, O_RDONLY))) {
            if (errno == EINTR)
                continue;

            *err = errno;
            return LI_HANDLER_ERROR;
        }
        if (-1 == fstat(*fd, st)) {
            *err = errno;
            close(*fd);
            *fd = -1;
            return LI_HANDLER_ERROR;
        }
    } else {
        /* stat */
        if (-1 == stat(path->str, st)) {
            *err = errno;
            return LI_HANDLER_ERROR;
        }
    }

    return LI_HANDLER_GO_ON;
}
예제 #6
0
static liHandlerResult mod_limit_action_handle(liVRequest *vr, gpointer param, gpointer *context) {
	gboolean limit_reached = FALSE;
	mod_limit_context *ctx = (mod_limit_context*) param;
	GPtrArray *arr = g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id);
	gint cons;
	mod_limit_req_ip_data *rid;
	liSocketAddress *remote_addr = &vr->coninfo->remote_addr;
	gpointer addr;
	guint32 bits;

	UNUSED(context);

	if (li_vrequest_is_handled(vr)) {
		VR_DEBUG(vr, "%s", "mod_limit: already have a content handler - ignoring limits. Put limit.* before content handlers such as 'static', 'fastcgi' or 'proxy'");
		return LI_HANDLER_GO_ON;
	}

	/* IPv4 or IPv6? */
	switch (remote_addr->addr->plain.sa_family) {
	case AF_INET:
		addr = &remote_addr->addr->ipv4.sin_addr.s_addr;
		bits = 32;
		break;
	case AF_INET6:
		addr = &remote_addr->addr->ipv6.sin6_addr.s6_addr;
		bits = 128;
		break;
	default:
		if (ctx->type == ML_TYPE_CON_IP || ctx->type == ML_TYPE_REQ_IP) {
			VR_DEBUG(vr, "%s", "mod_limit only supports ipv4 or ipv6 clients");
			return LI_HANDLER_ERROR;
		}
		addr = NULL;
		bits = 0;
	}

	if (!arr) {
		/* request is not in any context yet, create new array */
		arr = g_ptr_array_sized_new(2);
		g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id) = arr;
	}

	switch (ctx->type) {
	case ML_TYPE_CON:
#ifdef GLIB_VERSION_2_30
		/* since 2.30 g_atomic_int_add does the same as g_atomic_int_exchange_and_add,
		 * before it didn't return the old value. this fixes the deprecation warning. */
		if (g_atomic_int_add(&ctx->pool.con, 1) > ctx->limit) {
			g_atomic_int_add(&ctx->pool.con, -1);
			limit_reached = TRUE;
			VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit);
		}
#else
		if (g_atomic_int_exchange_and_add(&ctx->pool.con, 1) > ctx->limit) {
			g_atomic_int_add(&ctx->pool.con, -1);
			limit_reached = TRUE;
			VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit);
		}
#endif
		break;
	case ML_TYPE_CON_IP:
		g_mutex_lock(ctx->mutex);
		cons = GPOINTER_TO_INT(li_radixtree_lookup_exact(ctx->pool.con_ip, addr, bits));
		if (cons < ctx->limit) {
			li_radixtree_insert(ctx->pool.con_ip, addr, bits, GINT_TO_POINTER(cons+1));
		} else {
			limit_reached = TRUE;
			VR_DEBUG(vr, "limit.con_ip: limit reached (%d active connections)", ctx->limit);
		}
		g_mutex_unlock(ctx->mutex);
		break;
	case ML_TYPE_REQ:
		g_mutex_lock(ctx->mutex);
		if (li_cur_ts(vr->wrk) - ctx->pool.req.ts > 1.0) {
			/* reset pool */
			ctx->pool.req.ts = li_cur_ts(vr->wrk);
			ctx->pool.req.num = 1;
		} else {
			ctx->pool.req.num++;
			if (ctx->pool.req.num > ctx->limit) {
				limit_reached = TRUE;
				VR_DEBUG(vr, "limit.req: limit reached (%d req/s)", ctx->limit);
			}
		}
		g_mutex_unlock(ctx->mutex);
		break;
	case ML_TYPE_REQ_IP:
		g_mutex_lock(ctx->mutex);
		rid = li_radixtree_lookup_exact(ctx->pool.req_ip, addr, bits);
		if (!rid) {
			/* IP not known */
			rid = g_slice_new0(mod_limit_req_ip_data);
			rid->requests = 1;
			rid->ip = li_sockaddr_dup(*remote_addr);
			rid->ctx = ctx;
			rid->timeout_elem.data = rid;
			li_radixtree_insert(ctx->pool.req_ip, addr, bits, rid);
			li_waitqueue_push(&(((mod_limit_data*)ctx->plugin->data)->timeout_queues[vr->wrk->ndx]), &rid->timeout_elem);
		} else if (rid->requests < ctx->limit) {
			rid->requests++;
		} else {
			limit_reached = TRUE;
			VR_DEBUG(vr, "limit.req_ip: limit reached (%d req/s)", ctx->limit);
		}
		g_mutex_unlock(ctx->mutex);
		break;
	}

	if (limit_reached) {
		/* limit reached, we either execute the defined action or return a 503 error page */
		if (ctx->action_limit_reached) {
			/* execute action */
			li_action_enter(vr, ctx->action_limit_reached);
		} else {
			/* return 503 error page */
			if (!li_vrequest_handle_direct(vr)) {
				return LI_HANDLER_ERROR;
			}

			vr->response.http_status = 503;
		}
	} else {
		g_ptr_array_add(arr, ctx);
		g_atomic_int_inc(&ctx->refcount);
	}

	return LI_HANDLER_GO_ON;
}
예제 #7
0
guint li_throttle_query(liWorker *wrk, liThrottleState *state, guint interested, liThrottleNotifyCB notify_callback, gpointer data) {
	guint now = msec_timestamp(li_cur_ts(wrk));
	gint fill, pool_fill;
	guint i, len;

	if (NULL == state) return interested;

	state->notify_callback = NULL;
	state->wqueue_elem.data = NULL;

	throttle_debug("li_throttle_query[%u]: interested %i, magazine %i\n", now, interested, state->magazine);

	if (interested > THROTTLE_MAX_STEP) interested = THROTTLE_MAX_STEP;

	if ((gint) interested <= state->magazine + THROTTLE_OVERLOAD) return interested;

	/* also try to balance negative magazine */
	fill = interested - state->magazine;
	if (state->single_rate != 0) {
		if (now - state->single_last_rearm >= LI_THROTTLE_GRANULARITY) {
			guint single_fill = (((guint64) state->single_rate) * (now - state->single_last_rearm)) / 1000u;
			state->single_last_rearm = now;
			if (state->single_burst - state->single_magazine < single_fill) {
				state->single_magazine = state->single_burst;
			} else {
				state->single_magazine += single_fill;
			}
		}
		if (fill > state->single_magazine) fill = state->single_magazine;
		throttle_debug("single_magazine: %i\n", state->single_magazine);
	}

	/* pool_fill <= fill in the loop */
	pool_fill = fill;
	for (i = 0, len = state->pools->len; i < len; ++i) {
		liThrottlePoolState *pstate = g_ptr_array_index(state->pools, i);
		liThrottlePool *pool = pstate->pool;
		liThrottlePoolWorkerState *pwstate = &pool->workers[wrk->ndx];
		if (fill > pstate->magazine) {
			throttle_register(pwstate, pstate);
			throttle_pool_rearm(wrk, pool, now);
			if (fill > pstate->magazine) {
				throttle_register(pwstate, pstate);
				if (pool_fill > pstate->magazine) {
					pool_fill = pstate->magazine;
				}
			}
		}
		throttle_debug("pool %i magazine: %i\n", i, state->single_magazine);
	}

	throttle_debug("query refill: %i\n", pool_fill);

	if (pool_fill > 0) {
		if (state->single_rate != 0) {
			state->single_magazine -= pool_fill;
		}
		for (i = 0, len = state->pools->len; i < len; ++i) {
			liThrottlePoolState *pstate = g_ptr_array_index(state->pools, i);
			pstate->magazine -= pool_fill;
		}
		state->magazine += pool_fill;
	}

	if (state->magazine + THROTTLE_OVERLOAD <= 0) {
		throttle_debug("query queueing\n");
		state->wqueue_elem.data = data;
		state->notify_callback = notify_callback;
		state->interested = interested;
		if (!state->wqueue_elem.queued) {
			li_waitqueue_push(&wrk->throttle_queue, &state->wqueue_elem);
		}
		return 0;
	}

	throttle_debug("query success: %i\n", state->magazine + THROTTLE_OVERLOAD);

	if ((gint) interested <= state->magazine + THROTTLE_OVERLOAD) return interested;
	return state->magazine + THROTTLE_OVERLOAD;
}