static int cgi_create_env(server *srv, connection *con, plugin_data *p, buffer *cgi_handler) {
	pid_t pid;

#ifdef HAVE_IPV6
	char b2[INET6_ADDRSTRLEN + 1];
#endif

	int to_cgi_fds[2];
	int from_cgi_fds[2];
	struct stat st;

#ifndef __WIN32

	if (cgi_handler->used > 1) {
		/* stat the exec file */
		if (-1 == (stat(cgi_handler->ptr, &st))) {
			log_error_write(srv, __FILE__, __LINE__, "sbss",
					"stat for cgi-handler", cgi_handler,
					"failed:", strerror(errno));
			return -1;
		}
	}

	if (pipe(to_cgi_fds)) {
		log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno));
		return -1;
	}

	if (pipe(from_cgi_fds)) {
		log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno));
		return -1;
	}

	/* fork, execve */
	switch (pid = fork()) {
	case 0: {
		/* child */
		char **args;
		int argc;
		int i = 0;
		char buf[32];
		size_t n;
		char_array env;
		char *c;
		const char *s;
		server_socket *srv_sock = con->srv_socket;

		/* move stdout to from_cgi_fd[1] */
		close(STDOUT_FILENO);
		dup2(from_cgi_fds[1], STDOUT_FILENO);
		close(from_cgi_fds[1]);
		/* not needed */
		close(from_cgi_fds[0]);

		/* move the stdin to to_cgi_fd[0] */
		close(STDIN_FILENO);
		dup2(to_cgi_fds[0], STDIN_FILENO);
		close(to_cgi_fds[0]);
		/* not needed */
		close(to_cgi_fds[1]);

		/* HACK:
		 * this is not nice, but it works
		 *
		 * we feed the stderr of the CGI to our errorlog, if possible
		 */
		if (srv->errorlog_mode == ERRORLOG_FILE) {
			close(STDERR_FILENO);
			dup2(srv->errorlog_fd, STDERR_FILENO);
		}

		/* create environment */
		env.ptr = NULL;
		env.size = 0;
		env.used = 0;

		cgi_env_add(&env, CONST_STR_LEN("SERVER_SOFTWARE"), CONST_STR_LEN(PACKAGE_NAME"/"PACKAGE_VERSION));

		if (!buffer_is_empty(con->server_name)) {
			cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), CONST_BUF_LEN(con->server_name));
		} else {
#ifdef HAVE_IPV6
			s = inet_ntop(srv_sock->addr.plain.sa_family,
				      srv_sock->addr.plain.sa_family == AF_INET6 ?
				      (const void *) &(srv_sock->addr.ipv6.sin6_addr) :
				      (const void *) &(srv_sock->addr.ipv4.sin_addr),
				      b2, sizeof(b2)-1);
#else
			s = inet_ntoa(srv_sock->addr.ipv4.sin_addr);
#endif
			cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), s, strlen(s));
		}
		cgi_env_add(&env, CONST_STR_LEN("GATEWAY_INTERFACE"), CONST_STR_LEN("CGI/1.1"));

		s = get_http_version_name(con->request.http_version);

		cgi_env_add(&env, CONST_STR_LEN("SERVER_PROTOCOL"), s, strlen(s));

		ltostr(buf,
#ifdef HAVE_IPV6
			ntohs(srv_sock->addr.plain.sa_family == AF_INET6 ? srv_sock->addr.ipv6.sin6_port : srv_sock->addr.ipv4.sin_port)
#else
			ntohs(srv_sock->addr.ipv4.sin_port)
#endif
			);
		cgi_env_add(&env, CONST_STR_LEN("SERVER_PORT"), buf, strlen(buf));

#ifdef HAVE_IPV6
		s = inet_ntop(srv_sock->addr.plain.sa_family,
			      srv_sock->addr.plain.sa_family == AF_INET6 ?
			      (const void *) &(srv_sock->addr.ipv6.sin6_addr) :
			      (const void *) &(srv_sock->addr.ipv4.sin_addr),
			      b2, sizeof(b2)-1);
#else
		s = inet_ntoa(srv_sock->addr.ipv4.sin_addr);
#endif
		cgi_env_add(&env, CONST_STR_LEN("SERVER_ADDR"), s, strlen(s));

		s = get_http_method_name(con->request.http_method);
		cgi_env_add(&env, CONST_STR_LEN("REQUEST_METHOD"), s, strlen(s));

		if (!buffer_is_empty(con->request.pathinfo)) {
			cgi_env_add(&env, CONST_STR_LEN("PATH_INFO"), CONST_BUF_LEN(con->request.pathinfo));
		}
		cgi_env_add(&env, CONST_STR_LEN("REDIRECT_STATUS"), CONST_STR_LEN("200"));
		if (!buffer_is_empty(con->uri.query)) {
			cgi_env_add(&env, CONST_STR_LEN("QUERY_STRING"), CONST_BUF_LEN(con->uri.query));
		}
		if (!buffer_is_empty(con->request.orig_uri)) {
			cgi_env_add(&env, CONST_STR_LEN("REQUEST_URI"), CONST_BUF_LEN(con->request.orig_uri));
		}


#ifdef HAVE_IPV6
		s = inet_ntop(con->dst_addr.plain.sa_family,
			      con->dst_addr.plain.sa_family == AF_INET6 ?
			      (const void *) &(con->dst_addr.ipv6.sin6_addr) :
			      (const void *) &(con->dst_addr.ipv4.sin_addr),
			      b2, sizeof(b2)-1);
#else
		s = inet_ntoa(con->dst_addr.ipv4.sin_addr);
#endif
		cgi_env_add(&env, CONST_STR_LEN("REMOTE_ADDR"), s, strlen(s));

		ltostr(buf,
#ifdef HAVE_IPV6
			ntohs(con->dst_addr.plain.sa_family == AF_INET6 ? con->dst_addr.ipv6.sin6_port : con->dst_addr.ipv4.sin_port)
#else
			ntohs(con->dst_addr.ipv4.sin_port)
#endif
			);
		cgi_env_add(&env, CONST_STR_LEN("REMOTE_PORT"), buf, strlen(buf));

		if (!buffer_is_empty(con->authed_user)) {
			cgi_env_add(&env, CONST_STR_LEN("REMOTE_USER"),
				    CONST_BUF_LEN(con->authed_user));
		}

		/* request.content_length < SSIZE_MAX, see request.c */
		ltostr(buf, con->request.content_length);
		cgi_env_add(&env, CONST_STR_LEN("CONTENT_LENGTH"), buf, strlen(buf));
		cgi_env_add(&env, CONST_STR_LEN("SCRIPT_FILENAME"), CONST_BUF_LEN(con->physical.path));
		cgi_env_add(&env, CONST_STR_LEN("SCRIPT_NAME"), CONST_BUF_LEN(con->uri.path));
		cgi_env_add(&env, CONST_STR_LEN("DOCUMENT_ROOT"), CONST_BUF_LEN(con->physical.doc_root));

		/* for valgrind */
		if (NULL != (s = getenv("LD_PRELOAD"))) {
			cgi_env_add(&env, CONST_STR_LEN("LD_PRELOAD"), s, strlen(s));
		}

		if (NULL != (s = getenv("LD_LIBRARY_PATH"))) {
			cgi_env_add(&env, CONST_STR_LEN("LD_LIBRARY_PATH"), s, strlen(s));
		}
#ifdef __CYGWIN__
		/* CYGWIN needs SYSTEMROOT */
		if (NULL != (s = getenv("SYSTEMROOT"))) {
			cgi_env_add(&env, CONST_STR_LEN("SYSTEMROOT"), s, strlen(s));
		}
#endif

		for (n = 0; n < con->request.headers->used; n++) {
			data_string *ds;

			ds = (data_string *)con->request.headers->data[n];

			if (ds->value->used && ds->key->used) {
				size_t j;

				buffer_reset(p->tmp_buf);

				if (0 != strcasecmp(ds->key->ptr, "CONTENT-TYPE")) {
					buffer_copy_string(p->tmp_buf, "HTTP_");
					p->tmp_buf->used--; /* strip \0 after HTTP_ */
				}

				buffer_prepare_append(p->tmp_buf, ds->key->used + 2);

				for (j = 0; j < ds->key->used - 1; j++) {
					char cr = '_';
					if (light_isalpha(ds->key->ptr[j])) {
						/* upper-case */
						cr = ds->key->ptr[j] & ~32;
					} else if (light_isdigit(ds->key->ptr[j])) {
						/* copy */
						cr = ds->key->ptr[j];
					}
					p->tmp_buf->ptr[p->tmp_buf->used++] = cr;
				}
				p->tmp_buf->ptr[p->tmp_buf->used++] = '\0';

				cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value));
			}
		}

		for (n = 0; n < con->environment->used; n++) {
			data_string *ds;

			ds = (data_string *)con->environment->data[n];

			if (ds->value->used && ds->key->used) {
				size_t j;

				buffer_reset(p->tmp_buf);

				buffer_prepare_append(p->tmp_buf, ds->key->used + 2);

				for (j = 0; j < ds->key->used - 1; j++) {
					p->tmp_buf->ptr[p->tmp_buf->used++] =
						isalpha((unsigned char)ds->key->ptr[j]) ?
						toupper((unsigned char)ds->key->ptr[j]) : '_';
				}
				p->tmp_buf->ptr[p->tmp_buf->used++] = '\0';

				cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value));
			}
		}

		if (env.size == env.used) {
			env.size += 16;
			env.ptr = realloc(env.ptr, env.size * sizeof(*env.ptr));
		}

		env.ptr[env.used] = NULL;

		/* set up args */
		argc = 3;
		args = malloc(sizeof(*args) * argc);
		i = 0;

		if (cgi_handler->used > 1) {
			args[i++] = cgi_handler->ptr;
		}
		args[i++] = con->physical.path->ptr;
		args[i++] = NULL;

		/* search for the last / */
		if (NULL != (c = strrchr(con->physical.path->ptr, '/'))) {
			*c = '\0';

			/* change to the physical directory */
			if (-1 == chdir(con->physical.path->ptr)) {
				log_error_write(srv, __FILE__, __LINE__, "ssb", "chdir failed:", strerror(errno), con->physical.path);
			}
			*c = '/';
		}

		/* we don't need the client socket */
		for (i = 3; i < 256; i++) {
			if (i != srv->errorlog_fd) close(i);
		}

		/* exec the cgi */
		execve(args[0], args, env.ptr);

		log_error_write(srv, __FILE__, __LINE__, "sss", "CGI failed:", strerror(errno), args[0]);

		/* */
		SEGFAULT();
		break;
	}
	case -1:
		/* error */
		log_error_write(srv, __FILE__, __LINE__, "ss", "fork failed:", strerror(errno));
		break;
	default: {
		handler_ctx *hctx;
		/* father */

		close(from_cgi_fds[1]);
		close(to_cgi_fds[0]);

		if (con->request.content_length) {
			chunkqueue *cq = con->request_content_queue;
			chunk *c;

			assert(chunkqueue_length(cq) == (off_t)con->request.content_length);

			/* there is content to send */
			for (c = cq->first; c; c = cq->first) {
				int r = 0;

				/* copy all chunks */
				switch(c->type) {
				case FILE_CHUNK:

					if (c->file.mmap.start == MAP_FAILED) {
						if (-1 == c->file.fd &&  /* open the file if not already open */
						    -1 == (c->file.fd = open(c->file.name->ptr, O_RDONLY))) {
							log_error_write(srv, __FILE__, __LINE__, "ss", "open failed: ", strerror(errno));

							close(from_cgi_fds[0]);
							close(to_cgi_fds[1]);
							return -1;
						}

						c->file.mmap.length = c->file.length;

						if (MAP_FAILED == (c->file.mmap.start = mmap(0,  c->file.mmap.length, PROT_READ, MAP_SHARED, c->file.fd, 0))) {
							log_error_write(srv, __FILE__, __LINE__, "ssbd", "mmap failed: ",
									strerror(errno), c->file.name,  c->file.fd);

							close(from_cgi_fds[0]);
							close(to_cgi_fds[1]);
							return -1;
						}

						close(c->file.fd);
						c->file.fd = -1;

						/* chunk_reset() or chunk_free() will cleanup for us */
					}

					if ((r = write(to_cgi_fds[1], c->file.mmap.start + c->offset, c->file.length - c->offset)) < 0) {
						switch(errno) {
						case ENOSPC:
							con->http_status = 507;

							break;
						default:
							con->http_status = 403;
							break;
						}
					}
					break;
				case MEM_CHUNK:
					if ((r = write(to_cgi_fds[1], c->mem->ptr + c->offset, c->mem->used - c->offset - 1)) < 0) {
						switch(errno) {
						case ENOSPC:
							con->http_status = 507;

							break;
						default:
							con->http_status = 403;
							break;
						}
					}
					break;
				case UNUSED_CHUNK:
					break;
				}

				if (r > 0) {
					c->offset += r;
					cq->bytes_out += r;
				} else {
					break;
				}
				chunkqueue_remove_finished_chunks(cq);
			}
		}

		close(to_cgi_fds[1]);

		/* register PID and wait for them asyncronously */
		con->mode = p->id;
		buffer_reset(con->physical.path);

		hctx = cgi_handler_ctx_init();

		hctx->remote_conn = con;
		hctx->plugin_data = p;
		hctx->pid = pid;
		hctx->fd = from_cgi_fds[0];
		hctx->fde_ndx = -1;

		con->plugin_ctx[p->id] = hctx;

		fdevent_register(srv->ev, hctx->fd, cgi_handle_fdevent, hctx);
		fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN);

		if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));

			fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd);
			fdevent_unregister(srv->ev, hctx->fd);

			log_error_write(srv, __FILE__, __LINE__, "sd", "cgi close:", hctx->fd);

			close(hctx->fd);

			cgi_handler_ctx_free(hctx);

			con->plugin_ctx[p->id] = NULL;

			return -1;
		}

		break;
	}
	}

	return 0;
#else
	return -1;
#endif
}
static int cgi_create_env(server *srv, connection *con, plugin_data *p, buffer *cgi_handler) {
	pid_t pid;

	int to_cgi_fds[2];
	int from_cgi_fds[2];
	int from_cgi_err_fds[2];
	struct stat st;

#ifndef _WIN32

	if (cgi_handler && cgi_handler->used > 1) {
		/* stat the exec file */
		if (-1 == (stat(cgi_handler->ptr, &st))) {
			log_error_write(srv, __FILE__, __LINE__, "sbss",
					"stat for cgi-handler", cgi_handler,
					"failed:", strerror(errno));
			return -1;
		}
	}

	if (pipe(to_cgi_fds)) {
		log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno));
		return -1;
	}

	if (pipe(from_cgi_fds)) {
		close(to_cgi_fds[0]); close(to_cgi_fds[1]);
		log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno));
		return -1;
	}

	if (pipe(from_cgi_err_fds)) {
		close(to_cgi_fds[0]); close(to_cgi_fds[1]);
		close(from_cgi_fds[0]); close(from_cgi_fds[1]);
		log_error_write(srv, __FILE__, __LINE__, "ss", "pipe failed:", strerror(errno));
		return -1;
	}

	/* fork, execve */
	switch (pid = fork()) {
	case 0: {
		/* child */
		char **args;
		int argc;
		int i = 0;
		char buf[32];
		size_t n;
		char_array env;
		char *c;
		const char *s;
		server_socket *srv_sock = con->srv_socket;

		/* move stdout to from_cgi_fd[1] */
		close(STDOUT_FILENO);
		dup2(from_cgi_fds[1], STDOUT_FILENO);
		close(from_cgi_fds[1]);
		/* not needed */
		close(from_cgi_fds[0]);

		/* move stderr to from_cgi_err_fd[1] */
		close(STDERR_FILENO);
		dup2(from_cgi_err_fds[1], STDERR_FILENO);
		close(from_cgi_err_fds[1]);
		/* not needed */
		close(from_cgi_err_fds[0]);

		/* move the stdin to to_cgi_fd[0] */
		close(STDIN_FILENO);
		dup2(to_cgi_fds[0], STDIN_FILENO);
		close(to_cgi_fds[0]);
		/* not needed */
		close(to_cgi_fds[1]);

		/* create environment */
		env.ptr = NULL;
		env.size = 0;
		env.used = 0;

		cgi_env_add(&env, CONST_STR_LEN("SERVER_SOFTWARE"), CONST_STR_LEN(PACKAGE_NAME"/"PACKAGE_VERSION));

		s = sock_addr_to_p(srv, &srv_sock->addr);
		cgi_env_add(&env, CONST_STR_LEN("SERVER_ADDR"), s, strlen(s));
		/* !!! careful: s maybe reused for SERVER_NAME !!! */

		if (!buffer_is_empty(con->server_name)) {
			size_t len = con->server_name->used - 1;
			char *colon = strchr(con->server_name->ptr, ':');
			if (colon) len = colon - con->server_name->ptr;

			cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), con->server_name->ptr, len);
		} else {
			/* use SERVER_ADDR */
			cgi_env_add(&env, CONST_STR_LEN("SERVER_NAME"), s, strlen(s));
		}
		cgi_env_add(&env, CONST_STR_LEN("GATEWAY_INTERFACE"), CONST_STR_LEN("CGI/1.1"));

		s = get_http_version_name(con->request.http_version);

		cgi_env_add(&env, CONST_STR_LEN("SERVER_PROTOCOL"), s, strlen(s));

		LI_ltostr(buf, sock_addr_get_port(&srv_sock->addr));
		cgi_env_add(&env, CONST_STR_LEN("SERVER_PORT"), buf, strlen(buf));

		s = get_http_method_name(con->request.http_method);
		cgi_env_add(&env, CONST_STR_LEN("REQUEST_METHOD"), s, strlen(s));

		if (!buffer_is_empty(con->request.pathinfo)) {
			cgi_env_add(&env, CONST_STR_LEN("PATH_INFO"), CONST_BUF_LEN(con->request.pathinfo));
		}
		cgi_env_add(&env, CONST_STR_LEN("REDIRECT_STATUS"), CONST_STR_LEN("200"));
		if (!buffer_is_empty(con->uri.query)) {
			cgi_env_add(&env, CONST_STR_LEN("QUERY_STRING"), CONST_BUF_LEN(con->uri.query));
		} else {
			/* set a empty QUERY_STRING */
			cgi_env_add(&env, CONST_STR_LEN("QUERY_STRING"), CONST_STR_LEN(""));
		}
		if (!buffer_is_empty(con->request.orig_uri)) {
			cgi_env_add(&env, CONST_STR_LEN("REQUEST_URI"), CONST_BUF_LEN(con->request.orig_uri));
		}

		s = sock_addr_to_p(srv, &con->dst_addr);
		cgi_env_add(&env, CONST_STR_LEN("REMOTE_ADDR"), s, strlen(s));

		LI_ltostr(buf, sock_addr_get_port(&con->dst_addr));
		cgi_env_add(&env, CONST_STR_LEN("REMOTE_PORT"), buf, strlen(buf));

		if (!buffer_is_empty(con->authed_user)) {
			cgi_env_add(&env, CONST_STR_LEN("REMOTE_USER"),
				    CONST_BUF_LEN(con->authed_user));
		}

#ifdef USE_OPENSSL
		if (srv_sock->is_ssl) {
			cgi_env_add(&env, CONST_STR_LEN("HTTPS"), CONST_STR_LEN("on"));
		}
#endif

		/* request.content_length < SSIZE_MAX, see request.c */
		if (con->request.content_length > 0) {
			LI_ltostr(buf, con->request.content_length);
			cgi_env_add(&env, CONST_STR_LEN("CONTENT_LENGTH"), buf, strlen(buf));
		}
		cgi_env_add(&env, CONST_STR_LEN("SCRIPT_FILENAME"), CONST_BUF_LEN(con->physical.path));
		cgi_env_add(&env, CONST_STR_LEN("SCRIPT_NAME"), CONST_BUF_LEN(con->uri.path));
		cgi_env_add(&env, CONST_STR_LEN("DOCUMENT_ROOT"), CONST_BUF_LEN(con->physical.doc_root));

		/* for valgrind */
		if (NULL != (s = getenv("LD_PRELOAD"))) {
			cgi_env_add(&env, CONST_STR_LEN("LD_PRELOAD"), s, strlen(s));
		}

		if (NULL != (s = getenv("LD_LIBRARY_PATH"))) {
			cgi_env_add(&env, CONST_STR_LEN("LD_LIBRARY_PATH"), s, strlen(s));
		}
#ifdef __CYGWIN__
		/* CYGWIN needs SYSTEMROOT */
		if (NULL != (s = getenv("SYSTEMROOT"))) {
			cgi_env_add(&env, CONST_STR_LEN("SYSTEMROOT"), s, strlen(s));
		}
#endif

		for (n = 0; n < con->request.headers->used; n++) {
			data_string *ds;

			ds = (data_string *)con->request.headers->data[n];

			if (ds->value->used && ds->key->used) {
				size_t j;

				buffer_reset(p->tmp_buf);

				if (0 != strcasecmp(ds->key->ptr, "CONTENT-TYPE")) {
					buffer_copy_string_len(p->tmp_buf, CONST_STR_LEN("HTTP_"));
					p->tmp_buf->used--; /* strip \0 after HTTP_ */
				}

				buffer_prepare_append(p->tmp_buf, ds->key->used + 2);

				for (j = 0; j < ds->key->used - 1; j++) {
					char cr = '_';
					if (light_isalpha(ds->key->ptr[j])) {
						/* upper-case */
						cr = ds->key->ptr[j] & ~32;
					} else if (light_isdigit(ds->key->ptr[j])) {
						/* copy */
						cr = ds->key->ptr[j];
					}
					p->tmp_buf->ptr[p->tmp_buf->used++] = cr;
				}
				p->tmp_buf->ptr[p->tmp_buf->used++] = '\0';

				cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value));
			}
		}

		for (n = 0; n < con->environment->used; n++) {
			data_string *ds;

			ds = (data_string *)con->environment->data[n];

			if (ds->value->used && ds->key->used) {
				size_t j;

				buffer_reset(p->tmp_buf);

				buffer_prepare_append(p->tmp_buf, ds->key->used + 2);

				for (j = 0; j < ds->key->used - 1; j++) {
					char cr = '_';
					if (light_isalpha(ds->key->ptr[j])) {
						/* upper-case */
						cr = ds->key->ptr[j] & ~32;
					} else if (light_isdigit(ds->key->ptr[j])) {
						/* copy */
						cr = ds->key->ptr[j];
					}
					p->tmp_buf->ptr[p->tmp_buf->used++] = cr;
				}
				p->tmp_buf->ptr[p->tmp_buf->used++] = '\0';

				cgi_env_add(&env, CONST_BUF_LEN(p->tmp_buf), CONST_BUF_LEN(ds->value));
			}
		}

		if (env.size == env.used) {
			env.size += 16;
			env.ptr = realloc(env.ptr, env.size * sizeof(*env.ptr));
		}

		env.ptr[env.used] = NULL;

		/* set up args */
		argc = 3;
		args = malloc(sizeof(*args) * argc);
		i = 0;

		if (cgi_handler && cgi_handler->used > 1) {
			args[i++] = cgi_handler->ptr;
		}
		args[i++] = con->physical.path->ptr;
		args[i++] = NULL;

		/* search for the last / */
		if (NULL != (c = strrchr(con->physical.path->ptr, '/'))) {
			*c = '\0';

			/* change to the physical directory */
			if (-1 == chdir(con->physical.path->ptr)) {
				log_error_write(srv, __FILE__, __LINE__, "ssb", "chdir failed:", strerror(errno), con->physical.path);
			}
			*c = '/';
		}

		/* we don't need the client socket */
		for (i = 3; i < 256; i++) {
			close(i);
		}

		/* exec the cgi */
		execve(args[0], args, env.ptr);

		/* */
		SEGFAULT("execve(%s) failed: %s", args[0], strerror(errno));
		break;
	}
	case -1:
		/* error */
		ERROR("fork() failed: %s", strerror(errno));
		close(to_cgi_fds[0]); close(to_cgi_fds[1]);
		close(from_cgi_fds[0]); close(from_cgi_fds[1]);
		close(from_cgi_err_fds[0]); close(from_cgi_err_fds[1]);
		return -1;
		break;
	default: {
		cgi_session *sess;
		/* father */

		close(from_cgi_fds[1]);
		close(from_cgi_err_fds[1]);
		close(to_cgi_fds[0]);

		/* register PID and wait for them asyncronously */
		con->mode = p->id;
		buffer_reset(con->physical.path);

		sess = cgi_session_init();

		sess->remote_con = con;
		sess->pid = pid;

		assert(sess->sock);

		sess->sock->fd = from_cgi_fds[0];
		sess->sock->type = IOSOCKET_TYPE_PIPE;
		sess->sock_err->fd = from_cgi_err_fds[0];
		sess->sock_err->type = IOSOCKET_TYPE_PIPE;
		sess->wb_sock->fd = to_cgi_fds[1];
		sess->wb_sock->type = IOSOCKET_TYPE_PIPE;

		if (-1 == fdevent_fcntl_set(srv->ev, sess->sock)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));

			cgi_session_free(sess);

			return -1;
		}

		if (-1 == fdevent_fcntl_set(srv->ev, sess->sock_err)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));

			cgi_session_free(sess);

			return -1;
		}

		con->plugin_ctx[p->id] = sess;

		fdevent_register(srv->ev, sess->sock, cgi_handle_fdevent, sess);
		fdevent_event_add(srv->ev, sess->sock, FDEVENT_IN);

		fdevent_register(srv->ev, sess->sock_err, cgi_handle_err_fdevent, sess);
		fdevent_event_add(srv->ev, sess->sock_err, FDEVENT_IN);

		sess->state = CGI_STATE_READ_RESPONSE_HEADER;

		break;
	}
	}

	return 0;
#else
	return -1;
#endif
}
Exemple #3
0
int main (int argc, char **argv) {
	server *srv = NULL;
	int print_config = 0;
	int test_config = 0;
	int i_am_root;
	int o;
	int num_childs = 0;
	int pid_fd = -1, fd;
	size_t i;
#ifdef HAVE_SIGACTION
	struct sigaction act;
#endif
#ifdef HAVE_GETRLIMIT
	struct rlimit rlim;
#endif

#ifdef USE_ALARM
	struct itimerval interval;

	interval.it_interval.tv_sec = 1;
	interval.it_interval.tv_usec = 0;
	interval.it_value.tv_sec = 1;
	interval.it_value.tv_usec = 0;
#endif


	/* for nice %b handling in strfime() */
	setlocale(LC_TIME, "C");

	if (NULL == (srv = server_init())) {
		fprintf(stderr, "did this really happen?\n");
		return -1;
	}

	/* init structs done */

	srv->srvconf.port = 0;
#ifdef HAVE_GETUID
	i_am_root = (getuid() == 0);
#else
	i_am_root = 0;
#endif
	srv->srvconf.dont_daemonize = 0;

	while(-1 != (o = getopt(argc, argv, "f:m:hvVDpt"))) {
		switch(o) {
		case 'f':
			if (srv->config_storage) {
				log_error_write(srv, __FILE__, __LINE__, "s",
						"Can only read one config file. Use the include command to use multiple config files.");

				server_free(srv);
				return -1;
			}
			if (config_read(srv, optarg)) {
				server_free(srv);
				return -1;
			}
			break;
		case 'm':
			buffer_copy_string(srv->srvconf.modules_dir, optarg);
			break;
		case 'p': print_config = 1; break;
		case 't': test_config = 1; break;
		case 'D': srv->srvconf.dont_daemonize = 1; break;
		case 'v': show_version(); return 0;
		case 'V': show_features(); return 0;
		case 'h': show_help(); return 0;
		default:
			show_help();
			server_free(srv);
			return -1;
		}
	}

	if (!srv->config_storage) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"No configuration available. Try using -f option.");

		server_free(srv);
		return -1;
	}

	if (print_config) {
		data_unset *dc = srv->config_context->data[0];
		if (dc) {
			dc->print(dc, 0);
			fprintf(stdout, "\n");
		} else {
			/* shouldn't happend */
			fprintf(stderr, "global config not found\n");
		}
	}

	if (test_config) {
		printf("Syntax OK\n");
	}

	if (test_config || print_config) {
		server_free(srv);
		return 0;
	}

	/* close stdin and stdout, as they are not needed */
	openDevNull(STDIN_FILENO);
	openDevNull(STDOUT_FILENO);

	if (0 != config_set_defaults(srv)) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"setting default values failed");
		server_free(srv);
		return -1;
	}

	/* UID handling */
#ifdef HAVE_GETUID
	if (!i_am_root && issetugid()) {
		/* we are setuid-root */

		log_error_write(srv, __FILE__, __LINE__, "s",
				"Are you nuts ? Don't apply a SUID bit to this binary");

		server_free(srv);
		return -1;
	}
#endif

	/* check document-root */
	if (srv->config_storage[0]->document_root->used <= 1) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"document-root is not set\n");

		server_free(srv);

		return -1;
	}

	if (plugins_load(srv)) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"loading plugins finally failed");

		plugins_free(srv);
		server_free(srv);

		return -1;
	}

	/* open pid file BEFORE chroot */
	if (srv->srvconf.pid_file->used) {
		if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_EXCL | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) {
			struct stat st;
			if (errno != EEXIST) {
				log_error_write(srv, __FILE__, __LINE__, "sbs",
					"opening pid-file failed:", srv->srvconf.pid_file, strerror(errno));
				return -1;
			}

			if (0 != stat(srv->srvconf.pid_file->ptr, &st)) {
				log_error_write(srv, __FILE__, __LINE__, "sbs",
						"stating existing pid-file failed:", srv->srvconf.pid_file, strerror(errno));
			}

			if (!S_ISREG(st.st_mode)) {
				log_error_write(srv, __FILE__, __LINE__, "sb",
						"pid-file exists and isn't regular file:", srv->srvconf.pid_file);
				return -1;
			}

			if (-1 == (pid_fd = open(srv->srvconf.pid_file->ptr, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH))) {
				log_error_write(srv, __FILE__, __LINE__, "sbs",
						"opening pid-file failed:", srv->srvconf.pid_file, strerror(errno));
				return -1;
			}
		}
	}

	if (srv->event_handler == FDEVENT_HANDLER_SELECT) {
		/* select limits itself
		 *
		 * as it is a hard limit and will lead to a segfault we add some safety
		 * */
		srv->max_fds = FD_SETSIZE - 200;
	} else {
		srv->max_fds = 4096;
	}

	if (i_am_root) {
		struct group *grp = NULL;
		struct passwd *pwd = NULL;
		int use_rlimit = 1;

#ifdef HAVE_VALGRIND_VALGRIND_H
		if (RUNNING_ON_VALGRIND) use_rlimit = 0;
#endif

#ifdef HAVE_GETRLIMIT
		if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) {
			log_error_write(srv, __FILE__, __LINE__,
					"ss", "couldn't get 'max filedescriptors'",
					strerror(errno));
			return -1;
		}

		if (use_rlimit && srv->srvconf.max_fds) {
			/* set rlimits */

			rlim.rlim_cur = srv->srvconf.max_fds;
			rlim.rlim_max = srv->srvconf.max_fds;

			if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) {
				log_error_write(srv, __FILE__, __LINE__,
						"ss", "couldn't set 'max filedescriptors'",
						strerror(errno));
				return -1;
			}
		}

		if (srv->event_handler == FDEVENT_HANDLER_SELECT) {
			srv->max_fds = rlim.rlim_cur < ((int)FD_SETSIZE) - 200 ? rlim.rlim_cur : FD_SETSIZE - 200;
		} else {
			srv->max_fds = rlim.rlim_cur;
		}

		/* set core file rlimit, if enable_cores is set */
		if (use_rlimit && srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) {
			rlim.rlim_cur = rlim.rlim_max;
			setrlimit(RLIMIT_CORE, &rlim);
		}
#endif
		if (srv->event_handler == FDEVENT_HANDLER_SELECT) {
			/* don't raise the limit above FD_SET_SIZE */
			if (srv->max_fds > ((int)FD_SETSIZE) - 200) {
				log_error_write(srv, __FILE__, __LINE__, "sd",
						"can't raise max filedescriptors above",  FD_SETSIZE - 200,
						"if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds.");
				return -1;
			}
		}


#ifdef HAVE_PWD_H
		/* set user and group */
		if (srv->srvconf.username->used) {
			if (NULL == (pwd = getpwnam(srv->srvconf.username->ptr))) {
				log_error_write(srv, __FILE__, __LINE__, "sb",
						"can't find username", srv->srvconf.username);
				return -1;
			}

			if (pwd->pw_uid == 0) {
				log_error_write(srv, __FILE__, __LINE__, "s",
						"I will not set uid to 0\n");
				return -1;
			}
		}

		if (srv->srvconf.groupname->used) {
			if (NULL == (grp = getgrnam(srv->srvconf.groupname->ptr))) {
				log_error_write(srv, __FILE__, __LINE__, "sb",
					"can't find groupname", srv->srvconf.groupname);
				return -1;
			}
			if (grp->gr_gid == 0) {
				log_error_write(srv, __FILE__, __LINE__, "s",
						"I will not set gid to 0\n");
				return -1;
			}
		}
#endif
		/* we need root-perms for port < 1024 */
		if (0 != network_init(srv)) {
			plugins_free(srv);
			server_free(srv);

			return -1;
		}
#ifdef HAVE_PWD_H
		/* 
		 * Change group before chroot, when we have access
		 * to /etc/group
		 * */
		if (NULL != grp) {
			setgid(grp->gr_gid);
			setgroups(0, NULL);
			if (srv->srvconf.username->used) {
				initgroups(srv->srvconf.username->ptr, grp->gr_gid);
			}
		}
#endif
#ifdef HAVE_CHROOT
		if (srv->srvconf.changeroot->used) {
			tzset();

			if (-1 == chroot(srv->srvconf.changeroot->ptr)) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "chroot failed: ", strerror(errno));
				return -1;
			}
			if (-1 == chdir("/")) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "chdir failed: ", strerror(errno));
				return -1;
			}
		}
#endif
#ifdef HAVE_PWD_H
		/* drop root privs */
		if (NULL != pwd) {
			setuid(pwd->pw_uid);
		}
#endif
#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_DUMPABLE)
		/**
		 * on IRIX 6.5.30 they have prctl() but no DUMPABLE
		 */
		if (srv->srvconf.enable_cores) {
			prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
		}
#endif
	} else {

#ifdef HAVE_GETRLIMIT
		if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) {
			log_error_write(srv, __FILE__, __LINE__,
					"ss", "couldn't get 'max filedescriptors'",
					strerror(errno));
			return -1;
		}

		/**
		 * we are not root can can't increase the fd-limit, but we can reduce it
		 */
		if (srv->srvconf.max_fds && srv->srvconf.max_fds < rlim.rlim_cur) {
			/* set rlimits */

			rlim.rlim_cur = srv->srvconf.max_fds;

			if (0 != setrlimit(RLIMIT_NOFILE, &rlim)) {
				log_error_write(srv, __FILE__, __LINE__,
						"ss", "couldn't set 'max filedescriptors'",
						strerror(errno));
				return -1;
			}
		}

		if (srv->event_handler == FDEVENT_HANDLER_SELECT) {
			srv->max_fds = rlim.rlim_cur < ((int)FD_SETSIZE) - 200 ? rlim.rlim_cur : FD_SETSIZE - 200;
		} else {
			srv->max_fds = rlim.rlim_cur;
		}

		/* set core file rlimit, if enable_cores is set */
		if (srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0) {
			rlim.rlim_cur = rlim.rlim_max;
			setrlimit(RLIMIT_CORE, &rlim);
		}

#endif
		if (srv->event_handler == FDEVENT_HANDLER_SELECT) {
			/* don't raise the limit above FD_SET_SIZE */
			if (srv->max_fds > ((int)FD_SETSIZE) - 200) {
				log_error_write(srv, __FILE__, __LINE__, "sd",
						"can't raise max filedescriptors above",  FD_SETSIZE - 200,
						"if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds.");
				return -1;
			}
		}

		if (0 != network_init(srv)) {
			plugins_free(srv);
			server_free(srv);

			return -1;
		}
	}

	/* set max-conns */
	if (srv->srvconf.max_conns > srv->max_fds/2) {
		/* we can't have more connections than max-fds/2 */
		log_error_write(srv, __FILE__, __LINE__, "sdd", "can't have more connections than fds/2: ", srv->srvconf.max_conns, srv->max_fds);
		srv->max_conns = srv->max_fds/2;
	} else if (srv->srvconf.max_conns) {
		/* otherwise respect the wishes of the user */
		srv->max_conns = srv->srvconf.max_conns;
	} else {
		/* or use the default: we really don't want to hit max-fds */
		srv->max_conns = srv->max_fds/3;
	}

	if (HANDLER_GO_ON != plugins_call_init(srv)) {
		log_error_write(srv, __FILE__, __LINE__, "s", "Initialization of plugins failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

#ifdef HAVE_FORK
	/* network is up, let's deamonize ourself */
	if (srv->srvconf.dont_daemonize == 0) daemonize();
#endif

	srv->gid = getgid();
	srv->uid = getuid();

	/* write pid file */
	if (pid_fd != -1) {
		buffer_copy_long(srv->tmp_buf, getpid());
		buffer_append_string_len(srv->tmp_buf, CONST_STR_LEN("\n"));
		write(pid_fd, srv->tmp_buf->ptr, srv->tmp_buf->used - 1);
		close(pid_fd);
		pid_fd = -1;
	}

	/* Close stderr ASAP in the child process to make sure that nothing
	 * is being written to that fd which may not be valid anymore. */
	if (-1 == log_error_open(srv)) {
		log_error_write(srv, __FILE__, __LINE__, "s", "Opening errorlog failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);
		return -1;
	}

	if (HANDLER_GO_ON != plugins_call_set_defaults(srv)) {
		log_error_write(srv, __FILE__, __LINE__, "s", "Configuration of plugins failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

	/* dump unused config-keys */
	for (i = 0; i < srv->config_context->used; i++) {
		array *config = ((data_config *)srv->config_context->data[i])->value;
		size_t j;

		for (j = 0; config && j < config->used; j++) {
			data_unset *du = config->data[j];

			/* all var.* is known as user defined variable */
			if (strncmp(du->key->ptr, "var.", sizeof("var.") - 1) == 0) {
				continue;
			}

			if (NULL == array_get_element(srv->config_touched, du->key->ptr)) {
				log_error_write(srv, __FILE__, __LINE__, "sbs",
						"WARNING: unknown config-key:",
						du->key,
						"(ignored)");
			}
		}
	}

	if (srv->config_unsupported) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"Configuration contains unsupported keys. Going down.");
	}

	if (srv->config_deprecated) {
		log_error_write(srv, __FILE__, __LINE__, "s",
				"Configuration contains deprecated keys. Going down.");
	}

	if (srv->config_unsupported || srv->config_deprecated) {
		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}




#ifdef HAVE_SIGACTION
	memset(&act, 0, sizeof(act));
	act.sa_handler = SIG_IGN;
	sigaction(SIGPIPE, &act, NULL);
	sigaction(SIGUSR1, &act, NULL);
# if defined(SA_SIGINFO)
	act.sa_sigaction = sigaction_handler;
	sigemptyset(&act.sa_mask);
	act.sa_flags = SA_SIGINFO;
# else
	act.sa_handler = signal_handler;
	sigemptyset(&act.sa_mask);
	act.sa_flags = 0;
# endif
	sigaction(SIGINT,  &act, NULL);
	sigaction(SIGTERM, &act, NULL);
	sigaction(SIGHUP,  &act, NULL);
	sigaction(SIGALRM, &act, NULL);
	sigaction(SIGCHLD, &act, NULL);

#elif defined(HAVE_SIGNAL)
	/* ignore the SIGPIPE from sendfile() */
	signal(SIGPIPE, SIG_IGN);
	signal(SIGUSR1, SIG_IGN);
	signal(SIGALRM, signal_handler);
	signal(SIGTERM, signal_handler);
	signal(SIGHUP,  signal_handler);
	signal(SIGCHLD,  signal_handler);
	signal(SIGINT,  signal_handler);
#endif

#ifdef USE_ALARM
	signal(SIGALRM, signal_handler);

	/* setup periodic timer (1 second) */
	if (setitimer(ITIMER_REAL, &interval, NULL)) {
		log_error_write(srv, __FILE__, __LINE__, "s", "setting timer failed");
		return -1;
	}

	getitimer(ITIMER_REAL, &interval);
#endif

#ifdef HAVE_FORK
	/* start watcher and workers */
	num_childs = srv->srvconf.max_worker;
	if (num_childs > 0) {
		int child = 0;
		while (!child && !srv_shutdown && !graceful_shutdown) {
			if (num_childs > 0) {
				switch (fork()) {
				case -1:
					return -1;
				case 0:
					child = 1;
					break;
				default:
					num_childs--;
					break;
				}
			} else {
				int status;

				if (-1 != wait(&status)) {
					/** 
					 * one of our workers went away 
					 */
					num_childs++;
				} else {
					switch (errno) {
					case EINTR:
						/**
						 * if we receive a SIGHUP we have to close our logs ourself as we don't 
						 * have the mainloop who can help us here
						 */
						if (handle_sig_hup) {
							handle_sig_hup = 0;

							log_error_cycle(srv);

							/**
							 * forward to all procs in the process-group
							 * 
							 * we also send it ourself
							 */
							if (!forwarded_sig_hup) {
								forwarded_sig_hup = 1;
								kill(0, SIGHUP);
							}
						}
						break;
					default:
						break;
					}
				}
			}
		}

		/**
		 * for the parent this is the exit-point 
		 */
		if (!child) {
			/** 
			 * kill all children too 
			 */
			if (graceful_shutdown) {
				kill(0, SIGINT);
			} else if (srv_shutdown) {
				kill(0, SIGTERM);
			}

			log_error_close(srv);
			network_close(srv);
			connections_free(srv);
			plugins_free(srv);
			server_free(srv);
			return 0;
		}
	}
#endif

	if (NULL == (srv->ev = fdevent_init(srv, srv->max_fds + 1, srv->event_handler))) {
		log_error_write(srv, __FILE__, __LINE__,
				"s", "fdevent_init failed");
		return -1;
	}

	/* libev backend overwrites our SIGCHLD handler and calls waitpid on SIGCHLD; we want our own SIGCHLD handling. */
#ifdef HAVE_SIGACTION
	sigaction(SIGCHLD, &act, NULL);
#elif defined(HAVE_SIGNAL)
	signal(SIGCHLD,  signal_handler);
#endif

	/*
	 * kqueue() is called here, select resets its internals,
	 * all server sockets get their handlers
	 *
	 * */
	if (0 != network_register_fdevents(srv)) {
		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

	/* might fail if user is using fam (not gamin) and famd isn't running */
	if (NULL == (srv->stat_cache = stat_cache_init())) {
		log_error_write(srv, __FILE__, __LINE__, "s",
			"stat-cache could not be setup, dieing.");
		return -1;
	}

#ifdef HAVE_FAM_H
	/* setup FAM */
	if (srv->srvconf.stat_cache_engine == STAT_CACHE_ENGINE_FAM) {
		if (0 != FAMOpen2(srv->stat_cache->fam, "lighttpd")) {
			log_error_write(srv, __FILE__, __LINE__, "s",
					 "could not open a fam connection, dieing.");
			return -1;
		}
#ifdef HAVE_FAMNOEXISTS
		FAMNoExists(srv->stat_cache->fam);
#endif

		srv->stat_cache->fam_fcce_ndx = -1;
		fdevent_register(srv->ev, FAMCONNECTION_GETFD(srv->stat_cache->fam), stat_cache_handle_fdevent, NULL);
		fdevent_event_set(srv->ev, &(srv->stat_cache->fam_fcce_ndx), FAMCONNECTION_GETFD(srv->stat_cache->fam), FDEVENT_IN);
	}
#endif


	/* get the current number of FDs */
	srv->cur_fds = open("/dev/null", O_RDONLY);
	close(srv->cur_fds);

	for (i = 0; i < srv->srv_sockets.used; i++) {
		server_socket *srv_socket = srv->srv_sockets.ptr[i];
		if (-1 == fdevent_fcntl_set(srv->ev, srv_socket->fd)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed:", strerror(errno));
			return -1;
		}
	}

	/* main-loop */
	while (!srv_shutdown) {
		int n;
		size_t ndx;
		time_t min_ts;

		if (handle_sig_hup) {
			handler_t r;

			/* reset notification */
			handle_sig_hup = 0;


			/* cycle logfiles */

			switch(r = plugins_call_handle_sighup(srv)) {
			case HANDLER_GO_ON:
				break;
			default:
				log_error_write(srv, __FILE__, __LINE__, "sd", "sighup-handler return with an error", r);
				break;
			}

			if (-1 == log_error_cycle(srv)) {
				log_error_write(srv, __FILE__, __LINE__, "s", "cycling errorlog failed, dying");

				return -1;
			} else {
#ifdef HAVE_SIGACTION
				log_error_write(srv, __FILE__, __LINE__, "sdsd", 
					"logfiles cycled UID =",
					last_sighup_info.si_uid,
					"PID =",
					last_sighup_info.si_pid);
#else
				log_error_write(srv, __FILE__, __LINE__, "s", 
					"logfiles cycled");
#endif
			}
		}

		if (handle_sig_alarm) {
			/* a new second */

#ifdef USE_ALARM
			/* reset notification */
			handle_sig_alarm = 0;
#endif

			/* get current time */
			min_ts = time(NULL);

			if (min_ts != srv->cur_ts) {
				int cs = 0;
				connections *conns = srv->conns;
				handler_t r;

				switch(r = plugins_call_handle_trigger(srv)) {
				case HANDLER_GO_ON:
					break;
				case HANDLER_ERROR:
					log_error_write(srv, __FILE__, __LINE__, "s", "one of the triggers failed");
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "d", r);
					break;
				}

				/* trigger waitpid */
				srv->cur_ts = min_ts;

				/* cleanup stat-cache */
				stat_cache_trigger_cleanup(srv);
				/**
				 * check all connections for timeouts
				 *
				 */
				for (ndx = 0; ndx < conns->used; ndx++) {
					int changed = 0;
					connection *con;
					int t_diff;

					con = conns->ptr[ndx];

					if (con->state == CON_STATE_READ ||
					    con->state == CON_STATE_READ_POST) {
						if (con->request_count == 1) {
							if (srv->cur_ts - con->read_idle_ts > con->conf.max_read_idle) {
								/* time - out */
#if 0
								log_error_write(srv, __FILE__, __LINE__, "sd",
										"connection closed - read-timeout:", con->fd);
#endif
								connection_set_state(srv, con, CON_STATE_ERROR);
								changed = 1;
							}
						} else {
							if (srv->cur_ts - con->read_idle_ts > con->keep_alive_idle) {
								/* time - out */
#if 0
								log_error_write(srv, __FILE__, __LINE__, "sd",
										"connection closed - read-timeout:", con->fd);
#endif
								connection_set_state(srv, con, CON_STATE_ERROR);
								changed = 1;
							}
						}
					}

					if ((con->state == CON_STATE_WRITE) &&
					    (con->write_request_ts != 0)) {
#if 0
						if (srv->cur_ts - con->write_request_ts > 60) {
							log_error_write(srv, __FILE__, __LINE__, "sdd",
									"connection closed - pre-write-request-timeout:", con->fd, srv->cur_ts - con->write_request_ts);
						}
#endif

						if (srv->cur_ts - con->write_request_ts > con->conf.max_write_idle) {
							/* time - out */
							if (con->conf.log_timeouts) {
								log_error_write(srv, __FILE__, __LINE__, "sbsosds",
									"NOTE: a request for",
									con->request.uri,
									"timed out after writing",
									con->bytes_written,
									"bytes. We waited",
									(int)con->conf.max_write_idle,
									"seconds. If this a problem increase server.max-write-idle");
							}
							connection_set_state(srv, con, CON_STATE_ERROR);
							changed = 1;
						}
					}

					if (con->state == CON_STATE_CLOSE && (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT)) {
						changed = 1;
					}

					/* we don't like div by zero */
					if (0 == (t_diff = srv->cur_ts - con->connection_start)) t_diff = 1;

					if (con->traffic_limit_reached &&
					    (con->conf.kbytes_per_second == 0 ||
					     ((con->bytes_written / t_diff) < con->conf.kbytes_per_second * 1024))) {
						/* enable connection again */
						con->traffic_limit_reached = 0;

						changed = 1;
					}

					if (changed) {
						connection_state_machine(srv, con);
					}
					con->bytes_written_cur_second = 0;
					*(con->conf.global_bytes_per_second_cnt_ptr) = 0;

#if 0
					if (cs == 0) {
						fprintf(stderr, "connection-state: ");
						cs = 1;
					}

					fprintf(stderr, "c[%d,%d]: %s ",
						con->fd,
						con->fcgi.fd,
						connection_get_state(con->state));
#endif
				}

				if (cs == 1) fprintf(stderr, "\n");
			}
		}

		if (srv->sockets_disabled) {
			/* our server sockets are disabled, why ? */

			if ((srv->cur_fds + srv->want_fds < srv->max_fds * 8 / 10) && /* we have enough unused fds */
			    (srv->conns->used <= srv->max_conns * 9 / 10) &&
			    (0 == graceful_shutdown)) {
				for (i = 0; i < srv->srv_sockets.used; i++) {
					server_socket *srv_socket = srv->srv_sockets.ptr[i];
					fdevent_event_set(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd, FDEVENT_IN);
				}

				log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets enabled again");

				srv->sockets_disabled = 0;
			}
		} else {
			if ((srv->cur_fds + srv->want_fds > srv->max_fds * 9 / 10) || /* out of fds */
			    (srv->conns->used >= srv->max_conns) || /* out of connections */
			    (graceful_shutdown)) { /* graceful_shutdown */

				/* disable server-fds */

				for (i = 0; i < srv->srv_sockets.used; i++) {
					server_socket *srv_socket = srv->srv_sockets.ptr[i];
					fdevent_event_del(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd);

					if (graceful_shutdown) {
						/* we don't want this socket anymore,
						 *
						 * closing it right away will make it possible for
						 * the next lighttpd to take over (graceful restart)
						 *  */

						fdevent_unregister(srv->ev, srv_socket->fd);
						close(srv_socket->fd);
						srv_socket->fd = -1;

						/* network_close() will cleanup after us */

						if (srv->srvconf.pid_file->used &&
						    srv->srvconf.changeroot->used == 0) {
							if (0 != unlink(srv->srvconf.pid_file->ptr)) {
								if (errno != EACCES && errno != EPERM) {
									log_error_write(srv, __FILE__, __LINE__, "sbds",
											"unlink failed for:",
											srv->srvconf.pid_file,
											errno,
											strerror(errno));
								}
							}
						}
					}
				}

				if (graceful_shutdown) {
					log_error_write(srv, __FILE__, __LINE__, "s", "[note] graceful shutdown started");
				} else if (srv->conns->used >= srv->max_conns) {
					log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, connection limit reached");
				} else {
					log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, out-of-fds");
				}

				srv->sockets_disabled = 1;
			}
		}

		if (graceful_shutdown && srv->conns->used == 0) {
			/* we are in graceful shutdown phase and all connections are closed
			 * we are ready to terminate without harming anyone */
			srv_shutdown = 1;
		}

		/* we still have some fds to share */
		if (srv->want_fds) {
			/* check the fdwaitqueue for waiting fds */
			int free_fds = srv->max_fds - srv->cur_fds - 16;
			connection *con;

			for (; free_fds > 0 && NULL != (con = fdwaitqueue_unshift(srv, srv->fdwaitqueue)); free_fds--) {
				connection_state_machine(srv, con);

				srv->want_fds--;
			}
		}

		if ((n = fdevent_poll(srv->ev, 1000)) > 0) {
			/* n is the number of events */
			int revents;
			int fd_ndx;
#if 0
			if (n > 0) {
				log_error_write(srv, __FILE__, __LINE__, "sd",
						"polls:", n);
			}
#endif
			fd_ndx = -1;
			do {
				fdevent_handler handler;
				void *context;
				handler_t r;

				fd_ndx  = fdevent_event_next_fdndx (srv->ev, fd_ndx);
				if (-1 == fd_ndx) break; /* not all fdevent handlers know how many fds got an event */

				revents = fdevent_event_get_revent (srv->ev, fd_ndx);
				fd      = fdevent_event_get_fd     (srv->ev, fd_ndx);
				handler = fdevent_get_handler(srv->ev, fd);
				context = fdevent_get_context(srv->ev, fd);

				/* connection_handle_fdevent needs a joblist_append */
#if 0
				log_error_write(srv, __FILE__, __LINE__, "sdd",
						"event for", fd, revents);
#endif
				switch (r = (*handler)(srv, context, revents)) {
				case HANDLER_FINISHED:
				case HANDLER_GO_ON:
				case HANDLER_WAIT_FOR_EVENT:
				case HANDLER_WAIT_FOR_FD:
					break;
				case HANDLER_ERROR:
					/* should never happen */
					SEGFAULT();
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "d", r);
					break;
				}
			} while (--n > 0);
		} else if (n < 0 && errno != EINTR) {
			log_error_write(srv, __FILE__, __LINE__, "ss",
					"fdevent_poll failed:",
					strerror(errno));
		}

		for (ndx = 0; ndx < srv->joblist->used; ndx++) {
			connection *con = srv->joblist->ptr[ndx];
			handler_t r;

			connection_state_machine(srv, con);

			switch(r = plugins_call_handle_joblist(srv, con)) {
			case HANDLER_FINISHED:
			case HANDLER_GO_ON:
				break;
			default:
				log_error_write(srv, __FILE__, __LINE__, "d", r);
				break;
			}

			con->in_joblist = 0;
		}

		srv->joblist->used = 0;
	}

	if (srv->srvconf.pid_file->used &&
	    srv->srvconf.changeroot->used == 0 &&
	    0 == graceful_shutdown) {
		if (0 != unlink(srv->srvconf.pid_file->ptr)) {
			if (errno != EACCES && errno != EPERM) {
				log_error_write(srv, __FILE__, __LINE__, "sbds",
						"unlink failed for:",
						srv->srvconf.pid_file,
						errno,
						strerror(errno));
			}
		}
	}

#ifdef HAVE_SIGACTION
	log_error_write(srv, __FILE__, __LINE__, "sdsd", 
			"server stopped by UID =",
			last_sigterm_info.si_uid,
			"PID =",
			last_sigterm_info.si_pid);
#else
	log_error_write(srv, __FILE__, __LINE__, "s", 
			"server stopped");
#endif

	/* clean-up */
	log_error_close(srv);
	network_close(srv);
	connections_free(srv);
	plugins_free(srv);
	server_free(srv);

	return 0;
}
connection *connection_accept(server *srv, server_socket *srv_socket) {
	/* accept everything */

	/* search an empty place */
	int cnt;
	sock_addr cnt_addr;
	socklen_t cnt_len;
	/* accept it and register the fd */

	/**
	 * check if we can still open a new connections
	 *
	 * see #1216
	 */

	if (srv->conns->used >= srv->max_conns) {
		return NULL;
	}

	cnt_len = sizeof(cnt_addr);

	if (-1 == (cnt = accept(srv_socket->fd, (struct sockaddr *) &cnt_addr, &cnt_len))) {
		switch (errno) {
		case EAGAIN:
#if EWOULDBLOCK != EAGAIN
		case EWOULDBLOCK:
#endif
		case EINTR:
			/* we were stopped _before_ we had a connection */
		case ECONNABORTED: /* this is a FreeBSD thingy */
			/* we were stopped _after_ we had a connection */
			break;
		case EMFILE:
			/* out of fds */
			break;
		default:
			log_error_write(srv, __FILE__, __LINE__, "ssd", "accept failed:", strerror(errno), errno);
		}
		return NULL;
	} else {
		connection *con;

		srv->cur_fds++;

		/* ok, we have the connection, register it */
#if 0
		log_error_write(srv, __FILE__, __LINE__, "sd",
				"appected()", cnt);
#endif
		srv->con_opened++;

		con = connections_get_new_connection(srv);

		con->fd = cnt;
		con->fde_ndx = -1;
#if 0
		gettimeofday(&(con->start_tv), NULL);
#endif
		fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con);

		connection_set_state(srv, con, CON_STATE_REQUEST_START);

		con->connection_start = srv->cur_ts;
		con->dst_addr = cnt_addr;
		buffer_copy_string(con->dst_addr_buf, inet_ntop_cache_get_ip(srv, &(con->dst_addr)));
		con->srv_socket = srv_socket;

		if (-1 == (fdevent_fcntl_set(srv->ev, con->fd))) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));
			return NULL;
		}
#ifdef USE_OPENSSL
		/* connect FD to SSL */
		if (srv_socket->is_ssl) {
			if (NULL == (con->ssl = SSL_new(srv_socket->ssl_ctx))) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:",
						ERR_error_string(ERR_get_error(), NULL));

				return NULL;
			}

			con->renegotiations = 0;
			SSL_set_app_data(con->ssl, con);
			SSL_set_accept_state(con->ssl);

			if (1 != (SSL_set_fd(con->ssl, cnt))) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:",
						ERR_error_string(ERR_get_error(), NULL));
				return NULL;
			}
		}
#endif
		return con;
	}
}
Exemple #5
0
static handler_t proxy_write_request(server *srv, handler_ctx *hctx) {
	data_proxy *host= hctx->host;
	connection *con   = hctx->remote_conn;

	int ret;

	if (!host ||
	    (!host->host->used || !host->port)) return -1;

	switch(hctx->state) {
	case PROXY_STATE_CONNECT:
		/* wait for the connect() to finish */

		/* connect failed ? */
		if (-1 == hctx->fde_ndx) return HANDLER_ERROR;

		/* wait */
		return HANDLER_WAIT_FOR_EVENT;

		break;

	case PROXY_STATE_INIT:
#if defined(HAVE_IPV6) && defined(HAVE_INET_PTON)
		if (strstr(host->host->ptr,":")) {
		    if (-1 == (hctx->fd = socket(AF_INET6, SOCK_STREAM, 0))) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno));
			return HANDLER_ERROR;
		    }
		} else
#endif
		{
		    if (-1 == (hctx->fd = socket(AF_INET, SOCK_STREAM, 0))) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno));
			return HANDLER_ERROR;
		    }
		}
		hctx->fde_ndx = -1;

		srv->cur_fds++;

		fdevent_register(srv->ev, hctx->fd, proxy_handle_fdevent, hctx);

		if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));

			return HANDLER_ERROR;
		}

		switch (proxy_establish_connection(srv, hctx)) {
		case 1:
			proxy_set_state(srv, hctx, PROXY_STATE_CONNECT);

			/* connection is in progress, wait for an event and call getsockopt() below */

			fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT);

			return HANDLER_WAIT_FOR_EVENT;
		case -1:
			/* if ECONNREFUSED choose another connection -> FIXME */
			hctx->fde_ndx = -1;

			return HANDLER_ERROR;
		default:
			/* everything is ok, go on */
			proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE);
			break;
		}

		/* fall through */

	case PROXY_STATE_PREPARE_WRITE:
		proxy_create_env(srv, hctx);

		proxy_set_state(srv, hctx, PROXY_STATE_WRITE);

		/* fall through */
	case PROXY_STATE_WRITE:;
		ret = srv->network_backend_write(srv, con, hctx->fd, hctx->wb, MAX_WRITE_LIMIT);

		chunkqueue_remove_finished_chunks(hctx->wb);

		if (-1 == ret) { /* error on our side */
			log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), errno);

			return HANDLER_ERROR;
		} else if (-2 == ret) { /* remote close */
			log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed, remote connection close:", strerror(errno), errno);

			return HANDLER_ERROR;
		}

		if (hctx->wb->bytes_out == hctx->wb->bytes_in) {
			proxy_set_state(srv, hctx, PROXY_STATE_READ);

			fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd);
			fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN);
		} else {
			fdevent_event_set(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT);

			return HANDLER_WAIT_FOR_EVENT;
		}

		return HANDLER_WAIT_FOR_EVENT;
	case PROXY_STATE_READ:
		/* waiting for a response */
		return HANDLER_WAIT_FOR_EVENT;
	default:
		log_error_write(srv, __FILE__, __LINE__, "s", "(debug) unknown state");
		return HANDLER_ERROR;
	}

	return HANDLER_GO_ON;
}
Exemple #6
0
static handler_t proxy_write_request(server *srv, handler_ctx *hctx) {
	data_proxy *host= hctx->host;
	plugin_data *p    = hctx->plugin_data;
	connection *con   = hctx->remote_conn;

	int ret;

	if (!host ||
	    (!host->host->used || !host->port)) return -1;

	switch(hctx->state) {
	case PROXY_STATE_INIT:
		if (-1 == (hctx->fd = socket(AF_INET, SOCK_STREAM, 0))) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "socket failed: ", strerror(errno));
			return HANDLER_ERROR;
		}
		hctx->fde_ndx = -1;

		srv->cur_fds++;

		fdevent_register(srv->ev, hctx->fd, proxy_handle_fdevent, hctx);

		if (-1 == fdevent_fcntl_set(srv->ev, hctx->fd)) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));

			return HANDLER_ERROR;
		}

		/* fall through */

	case PROXY_STATE_CONNECT:
		/* try to finish the connect() */
		if (hctx->state == PROXY_STATE_INIT) {
			/* first round */
			switch (proxy_establish_connection(srv, hctx)) {
			case 1:
				proxy_set_state(srv, hctx, PROXY_STATE_CONNECT);

				/* connection is in progress, wait for an event and call getsockopt() below */

				fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT);

				return HANDLER_WAIT_FOR_EVENT;
			case -1:
				/* if ECONNREFUSED choose another connection -> FIXME */
				hctx->fde_ndx = -1;

				return HANDLER_ERROR;
			default:
				/* everything is ok, go on */
				break;
			}
		} else {
			int socket_error;
			socklen_t socket_error_len = sizeof(socket_error);

			/* we don't need it anymore */
			fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd);

			/* try to finish the connect() */
			if (0 != getsockopt(hctx->fd, SOL_SOCKET, SO_ERROR, &socket_error, &socket_error_len)) {
				log_error_write(srv, __FILE__, __LINE__, "ss",
						"getsockopt failed:", strerror(errno));

				return HANDLER_ERROR;
			}
			if (socket_error != 0) {
				log_error_write(srv, __FILE__, __LINE__, "ss",
						"establishing connection failed:", strerror(socket_error),
						"port:", hctx->host->port);

				return HANDLER_ERROR;
			}
			if (p->conf.debug) {
				log_error_write(srv, __FILE__, __LINE__,  "s", "proxy - connect - delayed success");
			}
		}

		proxy_set_state(srv, hctx, PROXY_STATE_PREPARE_WRITE);
		/* fall through */
	case PROXY_STATE_PREPARE_WRITE:
		proxy_create_env(srv, hctx);

		proxy_set_state(srv, hctx, PROXY_STATE_WRITE);

		/* fall through */
	case PROXY_STATE_WRITE:;
		ret = srv->network_backend_write(srv, con, hctx->fd, hctx->wb);

		chunkqueue_remove_finished_chunks(hctx->wb);

		if (-1 == ret) {
			if (errno != EAGAIN &&
			    errno != EINTR) {
				log_error_write(srv, __FILE__, __LINE__, "ssd", "write failed:", strerror(errno), errno);

				return HANDLER_ERROR;
			} else {
				fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT);

				return HANDLER_WAIT_FOR_EVENT;
			}
		}

		if (hctx->wb->bytes_out == hctx->wb->bytes_in) {
			proxy_set_state(srv, hctx, PROXY_STATE_READ);

			fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd);
			fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_IN);
		} else {
			fdevent_event_add(srv->ev, &(hctx->fde_ndx), hctx->fd, FDEVENT_OUT);

			return HANDLER_WAIT_FOR_EVENT;
		}

		return HANDLER_WAIT_FOR_EVENT;
	case PROXY_STATE_READ:
		/* waiting for a response */
		return HANDLER_WAIT_FOR_EVENT;
	default:
		log_error_write(srv, __FILE__, __LINE__, "s", "(debug) unknown state");
		return HANDLER_ERROR;
	}

	return HANDLER_GO_ON;
}
static int fdt_spawn_connection(server *srv,
				 fdt_server *s_server) {

	int status;
	struct timeval tv = { 0, 100 * 1000 };
	struct sockaddr_un fdt_addr_un;
	struct sockaddr *fdt_addr;

	socklen_t servlen;

	if (buffer_is_empty(s_server->unixsocket)) {
		log_error_write(srv, __FILE__, __LINE__, "s",
			"Socket path missing");
		return -1;
	}

	memset(&fdt_addr, 0, sizeof(fdt_addr));

	fdt_addr_un.sun_family = AF_UNIX;
	strcpy(fdt_addr_un.sun_path, s_server->unixsocket->ptr);

	servlen = s_server->unixsocket->used + sizeof(fdt_addr_un.sun_family);
	fdt_addr = (struct sockaddr *) &fdt_addr_un;

	/** We first try to connect to the backend to see if it is already present or not. If it is present then we do not need to fork that process*/
	if (-1 == (s_server->fdt_sfd = socket(AF_UNIX, SOCK_STREAM, 0))) {
		log_error_write(srv, __FILE__, __LINE__, "ss",
				"failed:", strerror(errno));
		return -1;
	}

	if (-1 == connect(s_server->fdt_sfd, fdt_addr, servlen)) {
		/* server is not up, spawn it  */
		log_error_write(srv, __FILE__, __LINE__, "sb",
			"spawning a new fdt proc, socket:", s_server->unixsocket);

		pid_t child;
		int val;

		if (errno != ENOENT &&
		    !buffer_is_empty(s_server->unixsocket)) {
			unlink(s_server->unixsocket->ptr);
		}

		close(s_server->fdt_sfd);

		/** here we create a socket bind to address and then go in listen state. Reason behind this is that the server process is ideally suppose to start and then listen on this socket but it so happens that there is a race condition that lighttpd does connect before the child could do listen so in that case connect fails 2nd time so the best way to do is do the listen in parent itself and then do fork and then after fork in child process that listen socket will still be open. While in parent process we can close that socket and  reopen it again and do connect*/
		/* reopen socket */
		if (-1 == (s_server->fdt_sfd = socket(AF_UNIX, SOCK_STREAM, 0))) {
			log_error_write(srv, __FILE__, __LINE__, "ss",
				"socket failed:", strerror(errno));
			return -1;
		}

		val = 1;
		if (setsockopt(s_server->fdt_sfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) < 0) {
			log_error_write(srv, __FILE__, __LINE__, "ss",
					"socketsockopt failed:", strerror(errno));
			return -1;
		}

		/* create socket */
		if (-1 == bind(s_server->fdt_sfd, fdt_addr, servlen)) {
			log_error_write(srv, __FILE__, __LINE__, "sbs",
				"bind failed for:",
				s_server->service_name,
				strerror(errno));
			return -1;
		}

		if (-1 == listen(s_server->fdt_sfd, 1024)) {
			log_error_write(srv, __FILE__, __LINE__, "ss",
				"listen failed:", strerror(errno));
			return -1;
		}

		switch ((child = fork())) {
		case 0: {

			size_t i = 0;
			char *c;
			char_array env;
			char_array arg;

			/* create environment */
			env.ptr = NULL;
			env.size = 0;
			env.used = 0;

			arg.ptr = NULL;
			arg.size = 0;
			arg.used = 0;

/** build clean enviornment basically says what all environ variables do you want to copy from the environ of the lighttpd and pass it to child process. Currently we dont need this right now but we mihgt need it later on
			// build clean environment 
			if (s_server->bin_env_copy->used) {
				for (i = 0; i < s_server->bin_env_copy->used; i++) {
					data_string *ds = (data_string *)s_server->bin_env_copy->data[i];
					char *ge;

					if (NULL != (ge = getenv(ds->value->ptr))) {
						env_add(&env, CONST_BUF_LEN(ds->value), ge, strlen(ge));
					}
				}
			} else {
				for (i = 0; environ[i]; i++) {
					char *eq;

					if (NULL != (eq = strchr(environ[i], '='))) {
						env_add(&env, environ[i], eq - environ[i], eq+1, strlen(eq+1));
					}
				}
			}*/

			for (i = 0; environ[i]; i++) {
				char *eq;

				if (NULL != (eq = strchr(environ[i], '='))) {
					env_add(&env, environ[i], eq - environ[i], eq+1, strlen(eq+1));
				}
			}

			/* create environment */
			for (i = 0; i < s_server->bin_env->used; i++) {
				data_string *ds = (data_string *)s_server->bin_env->data[i];

				env_add(&env, CONST_BUF_LEN(ds->key), CONST_BUF_LEN(ds->value));
			}

			/** The socket file descirtor handle is passed in environmental variables to the client.*/
			char *env_key = calloc(1, sizeof(char)*10);
			char *env_value = calloc(1, sizeof(char)*4);

			memcpy(env_key, "SOCKET_FD", 9);
			sprintf(env_value,"%d", s_server->fdt_sfd);
			env_add(&env, env_key, 9, env_value, strlen(env_value));

/*			log_error_write(srv, __FILE__, __LINE__, "ss",
					"socket fd in child process is", env_value);*/
			free(env_key);
			free(env_value);

			env.ptr[env.used] = NULL;

			parse_binpath(&arg, s_server->bin_path);

			/* chdir into the base of the bin-path,
			 * search for the last / */
			if (NULL != (c = strrchr(arg.ptr[0], '/'))) {
				*c = '\0';

				/* change to the physical directory */
				if (-1 == chdir(arg.ptr[0])) {
					*c = '/';
					log_error_write(srv, __FILE__, __LINE__, "sss", "chdir failed:", strerror(errno), arg.ptr[0]);
				}
				*c = '/';
			}

			reset_signals();

			/* exec the fdt process */
			execve(arg.ptr[0], arg.ptr, env.ptr);

			log_error_write(srv, __FILE__, __LINE__, "sbs",
					"execve failed for:", s_server->bin_path, strerror(errno)); 

			exit(errno);

			break;
		}
		case -1:
			/* error */
			break;
		default:
			/* father */

			/* wait */
			select(0, NULL, NULL, NULL, &tv);

			switch (waitpid(child, &status, WNOHANG)) {
			case 0:
				/* child still running after timeout, good */
				break;
			case -1:
				/* no PID found ? should never happen */
				log_error_write(srv, __FILE__, __LINE__, "ss",
						"pid not found:", strerror(errno));
				return -1;
			default:
				log_error_write(srv, __FILE__, __LINE__, "sbs",
						"the fdt-transfer-backend", s_server->bin_path, "failed to start:");
				/* the child should not terminate at all */
				if (WIFEXITED(status)) {
					log_error_write(srv, __FILE__, __LINE__, "sdb",
							"child exited with status",
							WEXITSTATUS(status), s_server->bin_path);
				} else if (WIFSIGNALED(status)) {
					log_error_write(srv, __FILE__, __LINE__, "sd",
							"terminated by signal:",
							WTERMSIG(status));

					if (WTERMSIG(status) == 11) {
						log_error_write(srv, __FILE__, __LINE__, "s",
								"to be exact: it segfaulted, crashed, died, ... you get the idea." );
					}
				} else {
					log_error_write(srv, __FILE__, __LINE__, "sd",
							"child died somehow:",
							status);
				}
				return -1;
			}

			/* register process */
			s_server->pid = child;

			close(s_server->fdt_sfd);

			/* reopen socket */
			if (-1 == (s_server->fdt_sfd = socket(AF_UNIX, SOCK_STREAM, 0))) {
				log_error_write(srv, __FILE__, __LINE__, "ss",
					"socket failed:", strerror(errno));
				return -1;
			}
	
			if (-1 == connect(s_server->fdt_sfd, fdt_addr, servlen)) {
				log_error_write(srv, __FILE__, __LINE__, "ss",
					"connect failed 2nd time This should never happen:", strerror(errno));
				return -1;
			}

			fdevent_register(srv->ev, s_server->fdt_sfd, fdt_handle_fdevent, s_server);
			if (-1 == fdevent_fcntl_set(srv->ev, s_server->fdt_sfd)) {
				log_error_write(srv, __FILE__, __LINE__, "ss",
						"fcntl failed:", strerror(errno));
	
				return HANDLER_ERROR;
			}

			fdevent_event_set(srv->ev, NULL, s_server->fdt_sfd, FDEVENT_HUP);

			break;
		}
	} else {
		s_server->pid = 0;

		log_error_write(srv, __FILE__, __LINE__, "sb",
				"(debug) socket is already used; won't spawn:",
				s_server->service_name);
	}

	s_server->state = PROC_STATE_RUNNING;

	return 0;
}
Exemple #8
0
/*
 ******************
 * 程序的入口点
 *****************
 */
int main(int argc, char **argv)
{
	server *srv = NULL;
	int print_config = 0;
	int test_config = 0;
	int i_am_root;
	int o;
	int num_childs = 0;
	int pid_fd = -1, fd;
	size_t i;
#ifdef HAVE_SIGACTION
	struct sigaction act;
#endif
#ifdef HAVE_GETRLIMIT
	struct rlimit rlim;
#endif

#ifdef USE_ALARM
	struct itimerval interval;

	interval.it_interval.tv_sec = 1;
	interval.it_interval.tv_usec = 0;
	interval.it_value.tv_sec = 1;
	interval.it_value.tv_usec = 0;
#endif


	/*
	 * for nice %b handling in strfime() 
	 */
	setlocale(LC_TIME, "C");

	if (NULL == (srv = server_init()))
	{
		fprintf(stderr, "did this really happen?\n");
		return -1;
	}

	/*
	 * init structs done 
	 */

	srv->srvconf.port = 0;
	//
#ifdef HAVE_GETUID
	i_am_root = (getuid() == 0);
#else
	i_am_root = 0;
#endif

	//程序将被设置为守护进程。
	srv->srvconf.dont_daemonize = 0;

	//处理参数。
	while (-1 != (o = getopt(argc, argv, "f:m:hvVDpt")))
	{
		switch (o)
		{
		case 'f':
			if (config_read(srv, optarg))
			{
				server_free(srv);
				return -1;
			}
			break;
		case 'm':
			buffer_copy_string(srv->srvconf.modules_dir, optarg);
			break;
		case 'p':
			print_config = 1;
			break;
		case 't':
			test_config = 1;
			break;
		case 'D':
			srv->srvconf.dont_daemonize = 1;
			break;
		case 'v':
			show_version();
			return 0;
		case 'V':
			show_features();
			return 0;
		case 'h':
			show_help();
			return 0;
		default:
			show_help();
			server_free(srv);
			return -1;
		}
	}

	if (!srv->config_storage)
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"No configuration available. Try using -f option.");

		server_free(srv);
		return -1;
	}

	if (print_config)
	{
		data_unset *dc = srv->config_context->data[0];
		if (dc)
		{
			dc->print(dc, 0);
			fprintf(stdout, "\n");
		} else
		{
			/*
			 * shouldn't happend 
			 */
			fprintf(stderr, "global config not found\n");
		}
	}

	if (test_config) //没有进行任何测试。。。
	{
		printf("Syntax OK\n");
	}

	if (test_config || print_config)
	{
		server_free(srv);
		return 0;
	}

	/*
	 * close stdin and stdout, as they are not needed 
	 * 关闭标准输入和标准输出。
	 */
	openDevNull(STDIN_FILENO);
	openDevNull(STDOUT_FILENO);

	//设置为默认的配置。
	if (0 != config_set_defaults(srv))
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"setting default values failed");
		server_free(srv);
		return -1;
	}

	/*
	 * UID handling 
	 */
#ifdef HAVE_GETUID
	//检查有效用户ID和有效组ID是否是0(root)。
	if (!i_am_root && (geteuid() == 0 || getegid() == 0))
	{
		/*
		 * we are setuid-root 
		 * 程序的实际用户ID不是0,也就是程序不是由超级用户运行的,但是程序的有效用户ID
		 * 或者有效组ID是超级用户(组),因此,程序可以访问任何文件而不受限制!这样很
		 * 不安全。因此程序退出并提示用户。
		 */

		log_error_write(srv, __FILE__, __LINE__, "s",
						"Are you nuts ? Don't apply a SUID bit to this binary");

		server_free(srv);
		return -1;
	}
#endif

	/*
	 * check document-root 
	 */
	if (srv->config_storage[0]->document_root->used <= 1)
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"document-root is not set\n");

		server_free(srv);

		return -1;
	}

	
	/*
	 * 加载插件
	 *
	 * 插件是以动态链接库的形式存在的。在配置文件中,要配置好插件的链接库的
	 * 路径位置和库中函数的名称。在这个函数中,通过这些配置,获得函数的入口
	 * 地址。
	 * 可以看到,这个函数的调用是在整个程序的初始化阶段,而且只调用了这一次,
	 * 因此,在服务器运行之前,要配置好所有的插件。
	 * 如果想增加插件,只能重启服务器。
	 *
	 * 在实现plugins_load函数的时候,同时也有一个用于加载静态链接库的版本,
	 * 但函数并没有什么实质性的实现。
	 * 
	 */
	if (plugins_load(srv))
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"loading plugins finally failed");

		plugins_free(srv);
		server_free(srv);

		return -1;
	}

	/*
	 * open pid file BEFORE chroot 
	 * 打开pid文件,并将进程号写入pid文件。
	 */
	if (srv->srvconf.pid_file->used)
	{
		if (-1 ==
			(pid_fd =
			 open(srv->srvconf.pid_file->ptr,
				  O_WRONLY | O_CREAT | O_EXCL | O_TRUNC,
				  S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)))
			/**
			 * O_EXCL和O_CREAT同时使用,测试文件是否存在,如果存在
			 * 则报错。
			 */
		{
			//pid文件打开失败。。。
			struct stat st;
			if (errno != EEXIST)  //不是报文件已经存在的错误。
			{
				log_error_write(srv, __FILE__, __LINE__, "sbs",
								"opening pid-file failed:",
								srv->srvconf.pid_file, strerror(errno));
				return -1;
			}

			//pid文件已经存在,测试文件的状态。
			if (0 != stat(srv->srvconf.pid_file->ptr, &st))
			{
				log_error_write(srv, __FILE__, __LINE__, "sbs",
								"stating existing pid-file failed:",
								srv->srvconf.pid_file, strerror(errno));
			}

			if (!S_ISREG(st.st_mode)) //pid文件是普通文件。
			{
				log_error_write(srv, __FILE__, __LINE__, "sb",
								"pid-file exists and isn't regular file:",
								srv->srvconf.pid_file);
				return -1;
			}

			//重新打开pid文件。
			//这里不在使用O_EXCL参数,由于pid文件已经存在且是普通文件,则覆盖原先的文件。
			if (-1 ==
				(pid_fd =
				 open(srv->srvconf.pid_file->ptr,
					  O_WRONLY | O_CREAT | O_TRUNC,
					  S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)))
			{
				log_error_write(srv, __FILE__, __LINE__, "sbs",
								"opening pid-file failed:",
								srv->srvconf.pid_file, strerror(errno));
				return -1;
			}
		}
	}

	if (srv->event_handler == FDEVENT_HANDLER_SELECT)
	{
		/*
		 * select limits itself as it is a hard limit and will lead to a segfault 
		 * we add some safety 
		 * select的硬限制。减去200是为了增加安全性,防止出现段错误。
		 */
		srv->max_fds = FD_SETSIZE - 200;
	} 
	else
	{
		srv->max_fds = 4096;
	}

	//程序是在超级用户模式下运行的。
	if (i_am_root)
	{
		struct group *grp = NULL;
		struct passwd *pwd = NULL;
		int use_rlimit = 1;

#ifdef HAVE_VALGRIND_VALGRIND_H
		if (RUNNING_ON_VALGRIND)
			use_rlimit = 0;
#endif

#ifdef HAVE_GETRLIMIT
		/**
		 * getrlimit和setrlimit函数用于查询和修改进程的资源限制。
		 *
		 * include <sys/resource.h>
		 * int getrlimit(int resource, struct rlimit *rlim);
		 * int setrlimit(int resource, const struct rlimit *rlim);
		 * 	返回:若成功为0,出错为非0
		 *
		 * 对这两个函数的每一次调用都指定一个资源以及一个指向下列结构的指针。
		 *
		 * struct rlimit
		 * {
		 * 		rlim_t rlim_cur; 	//软限制:当前限制
		 * 		rlim_t rlim_max; 	//硬限制:rlimcur的最大值
		 * };
		 *
		 * 这两个函数不属于POSIX.1,但SVR4和4.3+BSD提供它们。SVR4在上面的结构中使用基本系统数据类型rlim_t。
		 * 其它系统则将这两个成员定义为整型或长整型。
		 *
		 * 程序中使用的参数RLIMIT_NOFILE:Specifies a value one greater than the maximum  file  descriptor
		 * number  that  can be opened by this process. 设置最大的文件打开数,且实际打开的文件数要比这个数
		 * 小一。
		 *
		 * 详细使用:man getrlimit
		 */
		if (0 != getrlimit(RLIMIT_NOFILE, &rlim)) //获得当前的文件打开数限制。
		{
			log_error_write(srv, __FILE__, __LINE__,
							"ss", "couldn't get 'max filedescriptors'", strerror(errno));
			return -1;
		}

		if (use_rlimit && srv->srvconf.max_fds)
		{
			/*
			 * set rlimits. 设置限制。
			 */
			rlim.rlim_cur = srv->srvconf.max_fds; //软限制。
			rlim.rlim_max = srv->srvconf.max_fds; //硬限制。

			if (0 != setrlimit(RLIMIT_NOFILE, &rlim))
			{
				log_error_write(srv, __FILE__, __LINE__,
								"ss", "couldn't set 'max filedescriptors'", strerror(errno));
				return -1;
			}
		}

		//根据实际设置情况,重新设置max_fds。
		if (srv->event_handler == FDEVENT_HANDLER_SELECT)
		{
			srv->max_fds = rlim.rlim_cur < FD_SETSIZE - 200 ? rlim.rlim_cur : FD_SETSIZE - 200;
		} 
		else
		{
			srv->max_fds = rlim.rlim_cur;
		}

		/*
		 * set core file rlimit, if enable_cores is set 
		 * 设置core文件的限制。如果设置了enable_cores。
		 */
		if (use_rlimit && srv->srvconf.enable_cores
			&& getrlimit(RLIMIT_CORE, &rlim) == 0)
		{
			rlim.rlim_cur = rlim.rlim_max;
			setrlimit(RLIMIT_CORE, &rlim);
		}
#endif
		if (srv->event_handler == FDEVENT_HANDLER_SELECT)
		{
			/*
			 * don't raise the limit above FD_SET_SIZE 
			 */
			if (srv->max_fds > FD_SETSIZE - 200)
			{
				log_error_write(srv, __FILE__, __LINE__, "sd",
								"can't raise max filedescriptors above",
								FD_SETSIZE - 200,
								"if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds.");
				return -1;
			}
		}
#ifdef HAVE_PWD_H
		/*
		 * set user and group 设置用户和组。
		 */
		if (srv->srvconf.username->used)
		{
			//根据配置中的用户名获取用户信息。
			if (NULL == (pwd = getpwnam(srv->srvconf.username->ptr)))
			{
				log_error_write(srv, __FILE__, __LINE__, "sb", "can't find username", srv->srvconf.username);
				return -1;
			}

			if (pwd->pw_uid == 0) 
			{
				log_error_write(srv, __FILE__, __LINE__, "s", "I will not set uid to 0\n");
				return -1;
			}
		}

		if (srv->srvconf.groupname->used)
		{
			//根据上面得到的用户所在的组的组名,获得组的信息。
			if (NULL == (grp = getgrnam(srv->srvconf.groupname->ptr)))
			{
				log_error_write(srv, __FILE__, __LINE__, "sb", "can't find groupname", srv->srvconf.groupname);
				return -1;
			}
			if (grp->gr_gid == 0)
			{
				log_error_write(srv, __FILE__, __LINE__, "s", "I will not set gid to 0\n");
				return -1;
			}
		}
#endif
		/*
		 * we need root-perms for port < 1024 
		 * 使用超级用户模式获得小于1024的端口。初始化网络。
		 * 创建监听socket,绑定地址并开始监听。
		 */
		if (0 != network_init(srv))
		{
			plugins_free(srv);
			server_free(srv);

			return -1;
		}
#ifdef HAVE_PWD_H
		/*
		 * Change group before chroot, when we have access
		 * to /etc/group
		 * */
		if (srv->srvconf.groupname->used)
		{
			setgid(grp->gr_gid);
			setgroups(0, NULL); //返回用户组的数目。
			if (srv->srvconf.username->used)
			{
				//Initialize the group access list by reading the group database /etc/group and using all groups of which
				//user is a member.  The additional group group is also added to the list.
				initgroups(srv->srvconf.username->ptr, grp->gr_gid);
			}
		}
#endif
#ifdef HAVE_CHROOT
		if (srv->srvconf.changeroot->used)
		{
			//The tzset() function initializes the tzname variable from the TZ environment variable.  
			//This function is automatically called by the other time conversion functions that depend 
			//on the time zone.  
			//In a SysV-like environment it will also set the variables  time-zone  (seconds  West  of GMT) 
			//and daylight 
			//(0 if this time zone does not have any daylight saving time rules, nonzero if there is a
			//time during the year when daylight saving time applies).
			tzset();

			//设置程序所参考的根目录,将被所有的子进程继承。
			//也就是对于本程序而言,"/"并不是系统的根目录,而是这设置的目录。
			if (-1 == chroot(srv->srvconf.changeroot->ptr))
			{
				log_error_write(srv, __FILE__, __LINE__, "ss", "chroot failed: ", strerror(errno));
				return -1;
			}
			//修改工作目录.
			/*
			 * 注意:
			 * 		由于前面已经设置了根目录。因此这里将工作目录切换到"/"并不是系统的根目录,而是
			 * 		上面通过函数chroot设置的根目录。
			 */
			if (-1 == chdir("/"))
			{
				log_error_write(srv, __FILE__, __LINE__, "ss", "chdir failed: ", strerror(errno));
				return -1;
			}
		}
#endif
#ifdef HAVE_PWD_H
		/*
		 * drop root privs 放弃超级管理员权限。
		 */
		if (srv->srvconf.username->used)
		{
			setuid(pwd->pw_uid);
		}
#endif
#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_DUMPABLE)
		/**
		 * on IRIX 6.5.30 they have prctl() but no DUMPABLE
		 */
		if (srv->srvconf.enable_cores)
		{
			prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
		}
#endif
	} 
	/*
	 * 下面的是程序在非root用户下执行的设置。
	 */
	else
	{

#ifdef HAVE_GETRLIMIT
		if (0 != getrlimit(RLIMIT_NOFILE, &rlim))
		{
			log_error_write(srv, __FILE__, __LINE__,
							"ss", "couldn't get 'max filedescriptors'",
							strerror(errno));
			return -1;
		}

		/**
		 * we are not root can can't increase the fd-limit, but we can reduce it
		 * 我们不是root,不能增加fd-limit,但我们可以减少。
		 */
		if (srv->srvconf.max_fds && srv->srvconf.max_fds < rlim.rlim_cur)
		{
			/*
			 * set rlimits 
			 */

			rlim.rlim_cur = srv->srvconf.max_fds; //只能设置软限制。

			if (0 != setrlimit(RLIMIT_NOFILE, &rlim))
			{
				log_error_write(srv, __FILE__, __LINE__,
								"ss", "couldn't set 'max filedescriptors'",
								strerror(errno));
				return -1;
			}
		}

		if (srv->event_handler == FDEVENT_HANDLER_SELECT)
		{
			srv->max_fds = rlim.rlim_cur < FD_SETSIZE - 200 ? rlim.rlim_cur : FD_SETSIZE - 200;
		} 
		else
		{
			srv->max_fds = rlim.rlim_cur;
		}

		/*
		 * set core file rlimit, if enable_cores is set 
		 */
		if (srv->srvconf.enable_cores && getrlimit(RLIMIT_CORE, &rlim) == 0)
		{
			rlim.rlim_cur = rlim.rlim_max;
			setrlimit(RLIMIT_CORE, &rlim);
		}
#endif
		if (srv->event_handler == FDEVENT_HANDLER_SELECT)
		{
			/*
			 * don't raise the limit above FD_SET_SIZE 
			 */
			if (srv->max_fds > FD_SETSIZE - 200)
			{
				log_error_write(srv, __FILE__, __LINE__, "sd",
								"can't raise max filedescriptors above", FD_SETSIZE - 200,
								"if event-handler is 'select'. Use 'poll' or something else or reduce server.max-fds.");
				return -1;
			}
		}

		if (0 != network_init(srv))
		{
			plugins_free(srv);
			server_free(srv);

			return -1;
		}
	}

	/*
	 * set max-conns 设置最大连接数。
	 */
	if (srv->srvconf.max_conns > srv->max_fds)
	{
		/*
		 * we can't have more connections than max-fds 
		 * 最大连接数要小于最大文件打开数(max-fds)
		 */
		srv->max_conns = srv->max_fds;
	}  
	else if (srv->srvconf.max_conns)
	{
		/*
		 * otherwise respect the wishes of the user 
		 * 根据用户设置。
		 */
		srv->max_conns = srv->srvconf.max_conns;
	} 
	else
	{
		/*
		 * or use the default  默认。
		 */
		srv->max_conns = srv->max_fds;
	}

	/*
	 * 在前面的plugins_load函数中,已经所有的插件读入到系统中,并对插件中含有
	 * 的各种函数,确定入口地址。
	 * 在这里,程序对所有插件进行登记造册,确定插件中含有的功能,并计入表中(srv->plugins_slot)
	 * 
	 */
	if (HANDLER_GO_ON != plugins_call_init(srv)) 
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"Initialization of plugins failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}
#ifdef HAVE_FORK
	/*
	 * network is up, let's deamonize ourself 
	 * 设置为守护进程。
	 */
	if (srv->srvconf.dont_daemonize == 0)
		daemonize();
#endif

	srv->gid = getgid();
	srv->uid = getuid();

	/*
	 * write pid file 写pid文件。
	 */
	if (pid_fd != -1)
	{
		buffer_copy_long(srv->tmp_buf, getpid());
		buffer_append_string_len(srv->tmp_buf, CONST_STR_LEN("\n"));
		write(pid_fd, srv->tmp_buf->ptr, srv->tmp_buf->used - 1);
		close(pid_fd);
		pid_fd = -1;
	}

	/*
	 * Close stderr ASAP in the child process to make sure that nothing is
	 * being written to that fd which may not be valid anymore. 
	 * 关闭向标准输出的输出,打开日志文件。
	 */
	if (-1 == log_error_open(srv))
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"Opening errorlog failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);
		return -1;
	}
	
	//将插件设置为默认配置
	if (HANDLER_GO_ON != plugins_call_set_defaults(srv))
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"Configuration of plugins failed. Going down.");

		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

	/*
	 * dump unused config-keys 
	 */
	for (i = 0; i < srv->config_context->used; i++)
	{
		array *config = ((data_config *) srv->config_context->data[i])->value;
		size_t j;

		for (j = 0; config && j < config->used; j++)
		{
			data_unset *du = config->data[j];

			/*
			 * all var.* is known as user defined variable 
			 */
			if (strncmp(du->key->ptr, "var.", sizeof("var.") - 1) == 0)
			{
				continue;
			}

			if (NULL == array_get_element(srv->config_touched, du->key->ptr))
			{
				log_error_write(srv, __FILE__, __LINE__, "sbs",
								"WARNING: unknown config-key:", du->key,
								"(ignored)");
			}
		}
	}

	if (srv->config_unsupported)
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"Configuration contains unsupported keys. Going down.");
	}

	if (srv->config_deprecated)
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"Configuration contains deprecated keys. Going down.");
	}

	if (srv->config_unsupported || srv->config_deprecated)
	{
		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

//设置一些信号的处理方法。
//SIGPIPE:在写管道时,读管道的进程终止,产生此信号。
#ifdef HAVE_SIGACTION
	memset(&act, 0, sizeof(act));
	act.sa_handler = SIG_IGN;
	sigaction(SIGPIPE, &act, NULL);
	sigaction(SIGUSR1, &act, NULL);
# if defined(SA_SIGINFO)
	act.sa_sigaction = sigaction_handler;
	sigemptyset(&act.sa_mask);
	act.sa_flags = SA_SIGINFO;
# else
	act.sa_handler = signal_handler;
	sigemptyset(&act.sa_mask);
	act.sa_flags = 0;
# endif
	sigaction(SIGINT, &act, NULL);
	sigaction(SIGTERM, &act, NULL);
	sigaction(SIGHUP, &act, NULL);
	sigaction(SIGALRM, &act, NULL);
	sigaction(SIGCHLD, &act, NULL);

#elif defined(HAVE_SIGNAL)
	/*
	 * ignore the SIGPIPE from sendfile() 
	 */
	signal(SIGPIPE, SIG_IGN);
	signal(SIGUSR1, SIG_IGN);
	signal(SIGALRM, signal_handler);
	signal(SIGTERM, signal_handler);
	signal(SIGHUP, signal_handler);
	signal(SIGCHLD, signal_handler);
	signal(SIGINT, signal_handler);
#endif

#ifdef USE_ALARM
	signal(SIGALRM, signal_handler);

	/*
	 * setup periodic timer (1 second) 
	 *  The  system provides each process with three interval timers, each decrementing in a distinct time domain.  
	 *  When any timer expires a signal is sent to the process, and the timer (potentially) restarts.
	 *
	 *     ITIMER_REAL    decrements in real time, and delivers SIGALRM upon expiration.
	 *     ITIMER_VIRTUAL decrements only when the process is executing, and delivers SIGVTALRM upon expiration.
	 *     ITIMER_PROF    decrements both when the process executes and when the system is executing on 
	 * 					  behalf of the process.   
	 *     				  Coupled with ITIMER_VIRTUAL,  this  timer  is usually used to profile the time spent 
	 *     				  by the application in user and kernel space.
	 *  SIGPROF is delivered upon expiration.
	 *  Timer values are defined by the following structures:
	 *  struct itimerval 
	 *  {
	 *    	struct timeval it_interval; //next value
	 *    	struct timeval it_value;    //current value
	 *  };
	 *  struct timeval 
	 *  {
	 *  	long tv_sec;                // seconds 
	 *  	long tv_usec;               //microseconds
	 *  };
	 *  The function getitimer() fills the structure indicated by value with the current setting for the timer 
	 *  indicated by which  (one  of ITIMER_REAL, ITIMER_VIRTUAL, or ITIMER_PROF).  The element it_value is 
	 *  set to the amount of time remaining on the timer, or zero ifthe timer is disabled.  
	 *  Similarly, it_interval is set to the reset value.  The function setitimer() sets the indicated timer to the
	 *  value in value.  If ovalue is nonzero, the old value of the timer is stored there.
	 */
	if (setitimer(ITIMER_REAL, &interval, NULL))
	{
		log_error_write(srv, __FILE__, __LINE__, "s", "setting timer failed");
		return -1;
	}

	getitimer(ITIMER_REAL, &interval);
#endif

#ifdef HAVE_FORK
	/*
	 * *************************
	 * start watcher and workers
	 * *************************
	 *
	 * 下面程序将产生多个子进程。这些子进程成为worker,也就是用于接受处理用户的连接的进程。而当前的主进程将
	 * 成为watcher,主要工作就是监视workers的工作状态,当有worker因为意外而退出时,产生新的worker。
	 * 在程序退出时,watcher负责停止所有的workers并清理资源。
	 */
	num_childs = srv->srvconf.max_worker;//最大worker数。
	if (num_childs > 0)
	{
		int child = 0;
		while (!child && !srv_shutdown && !graceful_shutdown)
		{
			if (num_childs > 0) //继续产生worker
			{
				switch (fork())
				{
				case -1:
					return -1;
				case 0:
					child = 1;
					break;
				default:
					num_childs--;
					break;
				}
			} 
			else 		//watcher
			{
				/**
				 * 当产生了足够的worker时,watcher就在这个while中不断的循环。
				 * 一但发现有worker退出(进程死亡),立即产生新的worker。
				 * 如果发生错误并接受到SIGHUP信号,向所有的进程(父进程及其子进程)包括自己发送SIGHUP信号。
				 * 并退出。
				 */
				int status;

				if (-1 != wait(&status))
				{
					/** 
					 * one of our workers went away 
					 */
					num_childs++;
				} 
				else
				{
					switch (errno)
					{
					case EINTR:
						/**
						 * if we receive a SIGHUP we have to close our logs ourself as we don't 
						 * have the mainloop who can help us here
						 */
						if (handle_sig_hup)
						{
							handle_sig_hup = 0;

							log_error_cycle(srv);

							/**
							 * forward to all procs in the process-group
							 * 向所有进程发送SIGHUP信号。(父进程及其子进程)
							 * we also send it ourself
							 */
							if (!forwarded_sig_hup)
							{
								forwarded_sig_hup = 1;
								kill(0, SIGHUP);
							}
						}
						break;
					default:
						break;
					}
				}
			}
		}

		/**
		 * for the parent this is the exit-point 
		 * *****************************************************
		 * 父进程,也就是watcher在执行完这个if语句中就直接退出了。
		 * 后面是worker执行的代码。
		 * *****************************************************
		 */
		if (!child)
		{
			/** 
			 * kill all children too 。杀死所有的子进程。
			 */
			if (graceful_shutdown)
			{
				kill(0, SIGINT);
			} 
			else if (srv_shutdown)
			{
				kill(0, SIGTERM);
			}

			log_error_close(srv);
			network_close(srv);
			connections_free(srv);
			plugins_free(srv);
			server_free(srv);
			return 0;
		}
	}
#endif

	/* 
	 * **************************
	 * 从这开始是worker执行的代码。
	 * **************************
	 */

	
	if (NULL == (srv->ev = fdevent_init(srv->max_fds + 1, srv->event_handler)))
	{
		log_error_write(srv, __FILE__, __LINE__, "s", "fdevent_init failed");
		return -1;
	}
	/*
	 * kqueue() is called here, select resets its internals,
	 * all server sockets get their handlers
	 * 将监听socket注册到fd events系统中。
	 * 在注册的时候为每个socket都同时注册了一个处理函数,用来处理这个socket的IO事件。
	 * 对于在这次调用中注册的监听socket,注册的处理函数是:network_server_handle_fdevent。
	 * 这个处理函数用来建立socket连接。
	 * */
	if (0 != network_register_fdevents(srv))
	{
		plugins_free(srv);
		network_close(srv);
		server_free(srv);

		return -1;
	}

	/*
	 * might fail if user is using fam (not gamin) and famd isn't running 
	 * famd没有运行,则运行失败。。。
	 */
	if (NULL == (srv->stat_cache = stat_cache_init()))
	{
		log_error_write(srv, __FILE__, __LINE__, "s",
						"stat-cache could not be setup, dieing.");
		return -1;
	}
#ifdef HAVE_FAM_H
	/*
	 * setup FAM 设置FAM。
	 */
	if (srv->srvconf.stat_cache_engine == STAT_CACHE_ENGINE_FAM)
	{
		if (0 != FAMOpen2(srv->stat_cache->fam, "lighttpd"))
		{
			log_error_write(srv, __FILE__, __LINE__, "s",
							"could not open a fam connection, dieing.");
			return -1;
		}
#ifdef HAVE_FAMNOEXISTS
		FAMNoExists(srv->stat_cache->fam);
#endif

		srv->stat_cache->fam_fcce_ndx = -1;
		fdevent_register(srv->ev,
						 FAMCONNECTION_GETFD(srv->stat_cache->fam),
						 stat_cache_handle_fdevent, NULL);
		fdevent_event_add(srv->ev, &(srv->stat_cache->fam_fcce_ndx),
						  FAMCONNECTION_GETFD(srv->stat_cache->fam),
						  FDEVENT_IN);
	}
#endif


	/*
	 * get the current number of FDs 获得当前可用的fd值
	 */
	srv->cur_fds = open("/dev/null", O_RDONLY);
	close(srv->cur_fds);

	for (i = 0; i < srv->srv_sockets.used; i++)
	{
		server_socket *srv_socket = srv->srv_sockets.ptr[i];
	 	/*
		 * close fd on exec (cgi) 
		 */
		if (-1 == fdevent_fcntl_set(srv->ev, srv_socket->fd))
		{
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed:", strerror(errno));
			return -1;
		}
	}

	/*
	 * main-loop 
	 * *******************
	 * worker工作的主循环。
	 * *******************
	 */
	while (!srv_shutdown)
	{
		int n;
		size_t ndx;
		time_t min_ts;

		/**
		 * 收到SIGHUP信号。主要是重新开始日志的周期并提示插件。
		 * 这个信号表示连接已经断开,通常是做一些清理和准备工作,等待下一次的连接。
		 */
		if (handle_sig_hup)
		{
			handler_t r;

			/*
			 * reset notification 重置
			 */
			handle_sig_hup = 0;

			/*
			 * cycle logfiles 
			 * 重新开始新一轮日志。
			 * 这里使用了switch而不是if语句,有意思。。。
			 * 调用插件关于SIGHUP信号的处理函数。
			 * 这个函数貌似也没实现。。。
			 */
			switch (r = plugins_call_handle_sighup(srv))
			{
				case HANDLER_GO_ON:
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "sd", "sighup-handler return with an error", r);
					break;
			}

			if (-1 == log_error_cycle(srv))
			{
				log_error_write(srv, __FILE__, __LINE__, "s", "cycling errorlog failed, dying");

				return -1;
			} 
			else
			{
#ifdef HAVE_SIGACTION
				log_error_write(srv, __FILE__, __LINE__, "sdsd", "logfiles cycled UID =",
								last_sighup_info.si_uid, "PID =", last_sighup_info.si_pid);
#else
				log_error_write(srv, __FILE__, __LINE__, "s", "logfiles cycled");
#endif
			}
		}

		/**
		 * alarm函数发出的信号,表示一秒钟已经过去了。
		 */
		if (handle_sig_alarm)
		{
			/*
			 * a new second  新的一秒开始了。。。
			 */

#ifdef USE_ALARM
			/*
			 * reset notification 重置
			 */
			handle_sig_alarm = 0;
#endif

			/*
			 * get current time  当前时间。精确到一秒
			 */
			min_ts = time(NULL);

			/**
			 * 这里判断和服务器记录的当前时间是否相同。
			 * 相同,则表示服务器还在这一秒中,继续处理请求等。
			 * 如果不相同,则进入了一个新的周期(当然周期是一秒)。这就要做一些触发和检查以及清理的动作。
			 * 如插件的触发连接的超时清理状态缓存等。
			 * 其中,最主要的工作是检查连接的超时。
			 */
			if (min_ts != srv->cur_ts)
			{
				int cs = 0;
				connections *conns = srv->conns;
				handler_t r;

				switch (r = plugins_call_handle_trigger(srv))
				{
					case HANDLER_GO_ON:
						break;
					case HANDLER_ERROR:
						log_error_write(srv, __FILE__, __LINE__, "s", "one of the triggers failed");
						break;
					default:
						log_error_write(srv, __FILE__, __LINE__, "d", r);
						break;
				}

				/*
				 * trigger waitpid 么意思??
				 */
				srv->cur_ts = min_ts;

				/*
				 * cleanup stat-cache 清理状态缓存。每秒钟清理一次。
				 */
				stat_cache_trigger_cleanup(srv);

				/**
				 * check all connections for timeouts 检查所有的连接是否超时。
				 */
				for (ndx = 0; ndx < conns->used; ndx++)
				{
					int changed = 0;
					connection *con;
					int t_diff;

					con = conns->ptr[ndx];

					//连接的状态是在读
					if (con->state == CON_STATE_READ || con->state == CON_STATE_READ_POST)
					{
						if (con->request_count == 1) //连接正在处理一个请求
						{
							if (srv->cur_ts - con->read_idle_ts > con->conf.max_read_idle)
							{
								/*
								 * time - out 
								 */
#if 0
								log_error_write(srv, __FILE__, __LINE__, "sd",
												"connection closed - read-timeout:", con->fd);
#endif
								connection_set_state(srv, con, CON_STATE_ERROR);
								changed = 1;
							}
						}  //这个连接同时处理多个请求
						else
						{
							if (srv->cur_ts - con->read_idle_ts > con->conf.max_keep_alive_idle)
							{
								/*
								 * time - out 
								 */
#if 0
								log_error_write(srv, __FILE__, __LINE__, "sd",
												"connection closed - read-timeout:", con->fd);
#endif
								connection_set_state(srv, con, CON_STATE_ERROR);
								changed = 1;
							}
						}
					}

					//连接的状态是写
					if ((con->state == CON_STATE_WRITE) && (con->write_request_ts != 0))
					{
#if 0
						if (srv->cur_ts - con->write_request_ts > 60)
						{
							log_error_write(srv, __FILE__, __LINE__, "sdd",
											"connection closed - pre-write-request-timeout:",
											con->fd, srv->cur_ts - con->write_request_ts);
						}
#endif

						if (srv->cur_ts - con->write_request_ts > con->conf.max_write_idle)
						{
							/*
							 * time - out 
							 */
#if 1
							log_error_write(srv, __FILE__, __LINE__, "sbsosds", "NOTE: a request for",
											con->request.uri, "timed out after writing", con->bytes_written, "bytes. We waited",
											(int) con->conf. max_write_idle,
											"seconds. If this a problem increase server.max-write-idle");
#endif
							connection_set_state(srv, con, CON_STATE_ERROR);
							changed = 1;
						}
					}

					/*
					 * we don't like div by zero 防止除0。。。
					 */
					if (0 == (t_diff = srv->cur_ts - con->connection_start))
						t_diff = 1;

					/**
					 * 下面的if语句不是用来判断连接是否超时。
					 * lighttpd对每个连接设置了一个kbytes_per_second,这个变量设定每个连接在一秒钟内多能传输的最大数据量。
					 * 如果传送的数据大于这个值,那么这个连接将停止传输数据,被追加到作业队列中等待下一次处理。
					 * 作者这样做估计是为了平衡各个连接之间的数据传输。
					 */
					if (con->traffic_limit_reached &&
						(con->conf.kbytes_per_second == 0 
							|| ((con->bytes_written / t_diff) < con->conf.kbytes_per_second * 1024)))
					{
						/*
						 * enable connection again 
						 */
						con->traffic_limit_reached = 0;
						changed = 1;
					}

					if (changed)
					{
						connection_state_machine(srv, con);
					}
					con->bytes_written_cur_second = 0;
					*(con->conf.global_bytes_per_second_cnt_ptr) = 0;

#if 0
					if (cs == 0)
					{
						fprintf(stderr, "connection-state: ");
						cs = 1;
					}
					fprintf(stderr, "c[%d,%d]: %s ", con->fd, con->fcgi.fd, connection_get_state(con->state));
#endif
				}//end of for( ndx = 0; ...

				if (cs == 1)
					fprintf(stderr, "\n");
			}//end of if (min_ts != srv->cur_ts)...
		}//end of if (handle_sig_alarm)...

		if (srv->sockets_disabled)
		{
			/*
			 * our server sockets are disabled, why ? 
			 * 服务器socket连接失效。为什么捏???后面的服务器过载处理中。。。
			 *
			 * 将所有连接重新加入的fdevent中。
			 */

			if ((srv->cur_fds + srv->want_fds < srv->max_fds * 0.8) &&	/* we have enough unused fds */
				(srv->conns->used < srv->max_conns * 0.9) && (0 == graceful_shutdown))
			{
				for (i = 0; i < srv->srv_sockets.used; i++)
				{
					server_socket *srv_socket = srv->srv_sockets.ptr[i];
					fdevent_event_add(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd, FDEVENT_IN);
				}
				log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets enabled again");
				srv->sockets_disabled = 0;
			}
		} 
		else
		{
			/*
			 * 下面处理服务器过载的情况。
			 */
			if ((srv->cur_fds + srv->want_fds > srv->max_fds * 0.9) ||	/* out of fds */
				(srv->conns->used > srv->max_conns) ||	/* out of connections */
				(graceful_shutdown)) /* graceful_shutdown */
			{
				/*
				 * disable server-fds 关闭所有的服务socket
				 */
				for (i = 0; i < srv->srv_sockets.used; i++)
				{
					server_socket *srv_socket = srv->srv_sockets.ptr[i];
					fdevent_event_del(srv->ev, &(srv_socket->fde_ndx), srv_socket->fd);

					if (graceful_shutdown)
					{
						/*
						 * we don't want this socket anymore, closing it right 
						 * away will make it possible for the next lighttpd to
						 * take over (graceful restart) 
						 */

						fdevent_unregister(srv->ev, srv_socket->fd);
						close(srv_socket->fd);
						srv_socket->fd = -1;

						/*
						 * network_close() will cleanup after us 
						 */

						if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0)
						{
							if (0 != unlink(srv->srvconf.pid_file->ptr))
							{
								if (errno != EACCES && errno != EPERM)
								{
									log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:",
													srv -> srvconf.pid_file, errno, strerror(errno));
								}
							}
						}
					}
				}//end of for(i = 0; ...

				if (graceful_shutdown)
				{
					log_error_write(srv, __FILE__, __LINE__, "s", "[note] graceful shutdown started");
				} 
				else if (srv->conns->used > srv->max_conns)
				{
					log_error_write(srv, __FILE__, __LINE__, "s","[note] sockets disabled, connection limit reached");
				} 
				else
				{
					log_error_write(srv, __FILE__, __LINE__, "s", "[note] sockets disabled, out-of-fds");
				}

				srv->sockets_disabled = 1; //服务器过载了,socket失效。
			}
		}

		if (graceful_shutdown && srv->conns->used == 0)
		{
			/*
			 * we are in graceful shutdown phase and all connections are closed
			 * we are ready to terminate without harming anyone 
			 */
			srv_shutdown = 1;
		}

		/*
		 * we still have some fds to share 
		 */
		if (srv->want_fds)
		{
			/*
			 * check the fdwaitqueue for waiting fds 
			 */
			int free_fds = srv->max_fds - srv->cur_fds - 16;
			connection *con;

			for (; free_fds > 0 && NULL != (con = fdwaitqueue_unshift(srv, srv->fdwaitqueue)); free_fds--)
			{
				connection_state_machine(srv, con);
				srv->want_fds--;
			}
		}

		/**
		 **********************************************************
		 * 至此,上面那些杂七杂八的事全部处理结束。下面,干正事!!
		 * 也就是处理服务请求。
		 **********************************************************
		 */

		//启动事件轮询。底层使用的是IO多路转接。
		if ((n = fdevent_poll(srv->ev, 1000)) > 0)
		{
			/*
			 * n is the number of events n是事件的数量(服务请求啦,文件读写啦什么的。。。)
			 */
			int revents;
			int fd_ndx;
#if 0
			if (n > 0)
			{
				log_error_write(srv, __FILE__, __LINE__, "sd", "polls:", n);
			}
#endif
			fd_ndx = -1;
			/**
			 * 这个循环中逐个的处理已经准备好的请求,知道所有的请求处理结束。
			 */
			do
			{
				fdevent_handler handler;
				void *context;
				handler_t r;

				fd_ndx = fdevent_event_next_fdndx(srv->ev, fd_ndx);
				revents = fdevent_event_get_revent(srv->ev, fd_ndx);
				fd = fdevent_event_get_fd(srv->ev, fd_ndx);
				handler = fdevent_get_handler(srv->ev, fd);
				context = fdevent_get_context(srv->ev, fd);

				/*
				 * connection_handle_fdevent needs a joblist_append 
				 */
#if 0
				log_error_write(srv, __FILE__, __LINE__, "sdd",
								"event for", fd, revents);
#endif
				/**
				 * 这里,调用请求的处理函数handler处理请求!
				 * 这才是重点中的重点!!
				 */
				switch (r = (*handler) (srv, context, revents))
				{
				case HANDLER_FINISHED:
				case HANDLER_GO_ON:
				case HANDLER_WAIT_FOR_EVENT:
				case HANDLER_WAIT_FOR_FD:
					break;
				case HANDLER_ERROR:
					/*
					 * should never happen 
					 */
					SEGFAULT();
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "d", r);
					break;
				}
			}while (--n > 0);

			//到这里,本次的请求都处理结束了。。。累啊!
		} 
		else if (n < 0 && errno != EINTR)
		{
			log_error_write(srv, __FILE__, __LINE__, "ss", "fdevent_poll failed:", strerror(errno));
		}

		//由于上面处理的都是发生了IO事件的描述符,这些描述符的状态都被正确的设置了。
		//对于没有发生IO事件的描述符,其状态机的状态也需要设置,
		//下面的循环就是对剩下的描述符进行处理。
		for (ndx = 0; ndx < srv->joblist->used; ndx++)
		{
			connection *con = srv->joblist->ptr[ndx];
			handler_t r;

			connection_state_machine(srv, con);

			switch (r = plugins_call_handle_joblist(srv, con))
			{
			case HANDLER_FINISHED:
			case HANDLER_GO_ON:
				break;
			default:
				log_error_write(srv, __FILE__, __LINE__, "d", r);
				break;
			}

			con->in_joblist = 0;
		}

		srv->joblist->used = 0;
	} /* end of main loop  */
	/*
	 * 主循环的结束
	 */

	if (srv->srvconf.pid_file->used && srv->srvconf.changeroot->used == 0 && 0 == graceful_shutdown)
	{
		if (0 != unlink(srv->srvconf.pid_file->ptr))
		{
			if (errno != EACCES && errno != EPERM)
			{
				log_error_write(srv, __FILE__, __LINE__, "sbds", "unlink failed for:",
								srv->srvconf.pid_file, errno, strerror(errno));
			}
		}
	}
#ifdef HAVE_SIGACTION
	log_error_write(srv, __FILE__, __LINE__, "sdsd", "server stopped by UID =",
					last_sigterm_info.si_uid,"PID =", last_sigterm_info.si_pid);
#else
	log_error_write(srv, __FILE__, __LINE__, "s", "server stopped");
#endif

	/*
	 * clean-up 
	 */
	log_error_close(srv);
	network_close(srv);
	connections_free(srv);
	plugins_free(srv);
	server_free(srv);

	return 0;
}
/* Open and return the socket. Cf. proxy_establish_connection() in
   mod_proxy.c. */
static handler_t lisp_connection_open(server *srv, handler_ctx *hctx)
{
  struct sockaddr_in addr;
  int ret, sock;
  plugin_data *p = hctx->plugin;
  connection *con = hctx->connection;

  if (hctx->socket_data) {
    sock = hctx->socket_data->fd;
  } else {
    if (! (hctx->socket_data = mod_lisp_allocate_socket(p))) {
      LOG_ERROR_MAYBE(srv, p, LOGLEVEL_ERR,
                      "Cannot allocate from Lisp socket pool: no free slots");
      return HANDLER_WAIT_FOR_FD;
    }
    LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG,
                        "Lisp process at %s:%d for %s: allocated fd=%d,"
                        " fde_ndx=%d, state=%d, total socket slot(s) allocated: %d",
                        SPLICE_HOSTPORT(hctx->socket_data),
                        hctx->socket_data->fd, hctx->socket_data->fde_ndx,
                        hctx->socket_data->state, p->LispSocketPoolUsed);
    if ((sock = hctx->socket_data->fd) >= 0) {
      if (FD_STATE_UNSAFE_EV_COUNT(hctx->socket_data->state) > 0) {
        fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data));
        fdevent_unregister(srv->ev, sock);
        close(sock);
        srv->cur_fds--;
        LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG,
                            "Lisp process at %s:%d for %s: close unsafe socket (fd=%d)",
                            SPLICE_HOSTPORT(hctx->socket_data), sock);
        mod_lisp_reset_socket(hctx->socket_data);
        sock = -1;
      } else {
        return HANDLER_GO_ON;
      }
    }
  }

  if (sock <= 0) {
    if (-1 == (sock = socket(AF_INET, SOCK_STREAM, 0))) {
      LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR,
                          "socket() failed (%s)", strerror(errno));
      return HANDLER_ERROR;
    }
    mod_lisp_reset_socket(hctx->socket_data);
    hctx->socket_data->fd = sock;
    srv->cur_fds++;
    fdevent_register(srv->ev, sock, lisp_handle_fdevent, hctx);

    if (-1 == fdevent_fcntl_set(srv->ev, sock)) {
      LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR,
                          "fcntl() failed (%s)", strerror(errno));
      return HANDLER_ERROR;
    }
    
    addr.sin_addr.s_addr = inet_addr(hctx->socket_data->ip->ptr);
    addr.sin_port = htons(hctx->socket_data->port);
    addr.sin_family = AF_INET;
   
    /* Try to connect to Lisp. */
    ret = connect(sock, (struct sockaddr *)&addr, sizeof(struct sockaddr_in));
#ifdef WIN32
    if (ret == SOCKET_ERROR) {
      ret = -1;
      errno = WSAGetLastError()-WSABASEERR;
    }
#endif /* WIN32 */
    if (ret == -1 && (errno == EINTR || errno == EINPROGRESS)) {
      /* As soon as something happens on the socket, this function shall be
         re-entered and follow the getsockopt branch below. */
      fdevent_event_set(srv->ev, SPLICE_FDE(hctx->socket_data), FDEVENT_OUT);
      LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG,
                          "connection to Lisp process at %s:%d for %s delayed (%s)",
                          SPLICE_HOSTPORT(hctx->socket_data), strerror(errno));
      return HANDLER_WAIT_FOR_EVENT;
    }
  } else {
    int sockerr;
    socklen_t sockerr_len = sizeof(sockerr);
    fdevent_event_del(srv->ev, SPLICE_FDE(hctx->socket_data));
    /* try to finish the connect() */
    if (0 != getsockopt(sock, SOL_SOCKET, SO_ERROR, &sockerr, &sockerr_len)) {
      LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR,
                          "getsockopt() failed (%s)", strerror(errno));
      return HANDLER_ERROR;
    }
    ret = sockerr ? -1 : 0;
  }

  /* Check if we connected */
  if (ret == -1) {
    LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_ERR,
                        "cannot connect socket to Lisp process at %s:%d for %s (%s)",
                        SPLICE_HOSTPORT(hctx->socket_data), strerror(errno));
    hctx->socket_data->fde_ndx = -1;
    mod_lisp_connection_close(srv, con, p);
    /* reset the enviroment and restart the sub-request */	
    connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST);
    con->http_status = 503;
    con->mode = DIRECT;
    joblist_append(srv, con);
    return HANDLER_FINISHED;
  }

  hctx->socket_data->state |= (FD_STATE_READ | FD_STATE_WRITE);
  LOG_ERROR_MAYBE_BUF(srv, p, LOGLEVEL_DEBUG,
                      "opened socket fd=%d to Lisp process at %s:%d for %s",
                      sock, SPLICE_HOSTPORT(hctx->socket_data));
  return HANDLER_GO_ON;
}
Exemple #10
0
connection *connection_accept(server *srv, server_socket *srv_socket) {
	/* accept everything */

	/* search an empty place */
	int cnt;
	sock_addr cnt_addr;
#ifndef HAVE_LIBMTCP
	socklen_t cnt_len;
#endif
	/* accept it and register the fd */

	/**
	 * check if we can still open a new connections
	 *
	 * see #1216
	 */

	if (srv->conns->used >= srv->max_conns) {
		return NULL;
	}

#ifdef HAVE_LIBMTCP
	if (-1 == (cnt = mtcp_accept(srv->mctx, srv_socket->fd, NULL, NULL))) {
#else
	cnt_len = sizeof(cnt_addr);
	if (-1 == (cnt = accept(srv_socket->fd, (struct sockaddr *) &cnt_addr, &cnt_len))) {
#endif
		switch (errno) {
		case EAGAIN:
#if EWOULDBLOCK != EAGAIN
		case EWOULDBLOCK:
#endif
		case EINTR:
			/* we were stopped _before_ we had a connection */
		case ECONNABORTED: /* this is a FreeBSD thingy */
			/* we were stopped _after_ we had a connection */
			break;
		case EMFILE:
			/* out of fds */
			break;
		default:
			log_error_write(srv, __FILE__, __LINE__, "ssd", "accept failed:", strerror(errno), errno);
		}
		return NULL;
	} else {
		connection *con;

		srv->cur_fds++;

		/* ok, we have the connection, register it */
#if 0
		log_error_write(srv, __FILE__, __LINE__, "sd",
				"appected()", cnt);
#endif
		srv->con_opened++;

		con = connections_get_new_connection(srv);

		con->fd = cnt;
		con->fde_ndx = -1;
#if 0
		gettimeofday(&(con->start_tv), NULL);
#endif
		fdevent_register(srv->ev, con->fd, connection_handle_fdevent, con);

		connection_set_state(srv, con, CON_STATE_REQUEST_START);

		con->connection_start = srv->cur_ts;
		con->dst_addr = cnt_addr;
		buffer_copy_string(con->dst_addr_buf, inet_ntop_cache_get_ip(srv, &(con->dst_addr)));
		con->srv_socket = srv_socket;

		if (-1 == (fdevent_fcntl_set(srv->ev, con->fd))) {
			log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl failed: ", strerror(errno));
			return NULL;
		}
#ifdef USE_OPENSSL
		/* connect FD to SSL */
		if (srv_socket->is_ssl) {
			if (NULL == (con->ssl = SSL_new(srv_socket->ssl_ctx))) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:",
						ERR_error_string(ERR_get_error(), NULL));

				return NULL;
			}

			con->renegotiations = 0;
			SSL_set_app_data(con->ssl, con);
			SSL_set_accept_state(con->ssl);
			con->conf.is_ssl=1;

			if (1 != (SSL_set_fd(con->ssl, cnt))) {
				log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:",
						ERR_error_string(ERR_get_error(), NULL));
				return NULL;
			}
		}
#endif
		return con;
	}
}


int connection_state_machine(server *srv, connection *con) {
	int done = 0, r;
#ifdef USE_OPENSSL
	server_socket *srv_sock = con->srv_socket;
#endif

	if (srv->srvconf.log_state_handling) {
		log_error_write(srv, __FILE__, __LINE__, "sds",
				"state at start",
				con->fd,
				connection_get_state(con->state));
	}

	while (done == 0) {
		size_t ostate = con->state;

		switch (con->state) {
		case CON_STATE_REQUEST_START: /* transient */
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			con->request_start = srv->cur_ts;
			con->read_idle_ts = srv->cur_ts;

			con->request_count++;
			con->loops_per_request = 0;

			connection_set_state(srv, con, CON_STATE_READ);

			/* patch con->conf.is_ssl if the connection is a ssl-socket already */

#ifdef USE_OPENSSL
			con->conf.is_ssl = srv_sock->is_ssl;
#endif

			break;
		case CON_STATE_REQUEST_END: /* transient */
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			buffer_reset(con->uri.authority);
			buffer_reset(con->uri.path);
			buffer_reset(con->uri.query);
			buffer_reset(con->request.orig_uri);

			if (http_request_parse(srv, con)) {
				/* we have to read some data from the POST request */

				connection_set_state(srv, con, CON_STATE_READ_POST);

				break;
			}

			connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST);

			break;
		case CON_STATE_HANDLE_REQUEST:
			/*
			 * the request is parsed
			 *
			 * decided what to do with the request
			 * -
			 *
			 *
			 */

			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			switch (r = http_response_prepare(srv, con)) {
			case HANDLER_FINISHED:
				if (con->mode == DIRECT) {
					if (con->http_status == 404 ||
					    con->http_status == 403) {
						/* 404 error-handler */

						if (con->in_error_handler == 0 &&
						    (!buffer_is_empty(con->conf.error_handler) ||
						     !buffer_is_empty(con->error_handler))) {
							/* call error-handler */

							con->error_handler_saved_status = con->http_status;
							con->http_status = 0;

							if (buffer_is_empty(con->error_handler)) {
								buffer_copy_string_buffer(con->request.uri, con->conf.error_handler);
							} else {
								buffer_copy_string_buffer(con->request.uri, con->error_handler);
							}
							buffer_reset(con->physical.path);

							con->in_error_handler = 1;

							connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST);

							done = -1;
							break;
						} else if (con->in_error_handler) {
							/* error-handler is a 404 */

							con->http_status = con->error_handler_saved_status;
						}
					} else if (con->in_error_handler) {
						/* error-handler is back and has generated content */
						/* if Status: was set, take it otherwise use 200 */
					}
				}
				if (con->http_status == 0) con->http_status = 200;

				/* we have something to send, go on */
				connection_set_state(srv, con, CON_STATE_RESPONSE_START);
				break;
			case HANDLER_WAIT_FOR_FD:
				srv->want_fds++;

				fdwaitqueue_append(srv, con);

				connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST);

				break;
			case HANDLER_COMEBACK:
				done = -1;
			case HANDLER_WAIT_FOR_EVENT:
				/* come back here */
				connection_set_state(srv, con, CON_STATE_HANDLE_REQUEST);

				break;
			case HANDLER_ERROR:
				/* something went wrong */
				connection_set_state(srv, con, CON_STATE_ERROR);
				break;
			default:
				log_error_write(srv, __FILE__, __LINE__, "sdd", "unknown ret-value: ", con->fd, r);
				break;
			}

			break;
		case CON_STATE_RESPONSE_START:
			/*
			 * the decision is done
			 * - create the HTTP-Response-Header
			 *
			 */

			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			if (-1 == connection_handle_write_prepare(srv, con)) {
				connection_set_state(srv, con, CON_STATE_ERROR);

				break;
			}

			connection_set_state(srv, con, CON_STATE_WRITE);
			break;
		case CON_STATE_RESPONSE_END: /* transient */
			/* log the request */

			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			plugins_call_handle_request_done(srv, con);

			srv->con_written++;

			if (con->keep_alive) {
				connection_set_state(srv, con, CON_STATE_REQUEST_START);

#if 0
				con->request_start = srv->cur_ts;
				con->read_idle_ts = srv->cur_ts;
#endif
			} else {
				switch(r = plugins_call_handle_connection_close(srv, con)) {
				case HANDLER_GO_ON:
				case HANDLER_FINISHED:
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r);
					break;
				}

#ifdef USE_OPENSSL
				if (srv_sock->is_ssl) {
					switch (SSL_shutdown(con->ssl)) {
					case 1:
						/* done */
						break;
					case 0:
						/* wait for fd-event
						 *
						 * FIXME: wait for fdevent and call SSL_shutdown again
						 *
						 */

						break;
					default:
						log_error_write(srv, __FILE__, __LINE__, "ss", "SSL:",
								ERR_error_string(ERR_get_error(), NULL));
					}
				}
#endif
				if ((0 == shutdown(con->fd, SHUT_WR))) {
					con->close_timeout_ts = srv->cur_ts;
					connection_set_state(srv, con, CON_STATE_CLOSE);
				} else {
					connection_close(srv, con);
				}

				srv->con_closed++;
			}

			connection_reset(srv, con);

			break;
		case CON_STATE_CONNECT:
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			chunkqueue_reset(con->read_queue);

			con->request_count = 0;

			break;
		case CON_STATE_CLOSE:
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			/* we have to do the linger_on_close stuff regardless
			 * of con->keep_alive; even non-keepalive sockets may
			 * still have unread data, and closing before reading
			 * it will make the client not see all our output.
			 */
			{
				int len;
				char buf[1024];
#ifdef HAVE_LIBMTCP
				len = mtcp_read(srv->mctx, con->fd, buf, sizeof(buf));
#else
				len = read(con->fd, buf, sizeof(buf));
#endif
				if (len == 0 || (len < 0 && errno != EAGAIN && errno != EINTR) ) {
					con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1);
				}
			}

			if (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT) {
				connection_close(srv, con);

				if (srv->srvconf.log_state_handling) {
					log_error_write(srv, __FILE__, __LINE__, "sd",
							"connection closed for fd", con->fd);
				}
			}

			break;
		case CON_STATE_READ_POST:
		case CON_STATE_READ:
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			connection_handle_read_state(srv, con);
			break;
		case CON_STATE_WRITE:
			if (srv->srvconf.log_state_handling) {
				log_error_write(srv, __FILE__, __LINE__, "sds",
						"state for fd", con->fd, connection_get_state(con->state));
			}

			/* only try to write if we have something in the queue */
			if (!chunkqueue_is_empty(con->write_queue)) {
#if 0
				log_error_write(srv, __FILE__, __LINE__, "dsd",
						con->fd,
						"packets to write:",
						con->write_queue->used);
#endif
			}
			if (!chunkqueue_is_empty(con->write_queue) && con->is_writable) {
				if (-1 == connection_handle_write(srv, con)) {
					log_error_write(srv, __FILE__, __LINE__, "ds",
							con->fd,
							"handle write failed.");
					connection_set_state(srv, con, CON_STATE_ERROR);
				}
			}

			break;
		case CON_STATE_ERROR: /* transient */

			/* even if the connection was drop we still have to write it to the access log */
			if (con->http_status) {
				plugins_call_handle_request_done(srv, con);
			}
#ifdef USE_OPENSSL
			if (srv_sock->is_ssl) {
				int ret, ssl_r;
				unsigned long err;
				ERR_clear_error();
				switch ((ret = SSL_shutdown(con->ssl))) {
				case 1:
					/* ok */
					break;
				case 0:
					ERR_clear_error();
					if (-1 != (ret = SSL_shutdown(con->ssl))) break;

					/* fall through */
				default:

					switch ((ssl_r = SSL_get_error(con->ssl, ret))) {
					case SSL_ERROR_WANT_WRITE:
					case SSL_ERROR_WANT_READ:
						break;
					case SSL_ERROR_SYSCALL:
						/* perhaps we have error waiting in our error-queue */
						if (0 != (err = ERR_get_error())) {
							do {
								log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:",
										ssl_r, ret,
										ERR_error_string(err, NULL));
							} while((err = ERR_get_error()));
						} else if (errno != 0) { /* ssl bug (see lighttpd ticket #2213): sometimes errno == 0 */
							switch(errno) {
							case EPIPE:
							case ECONNRESET:
								break;
							default:
								log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):",
									ssl_r, ret, errno,
									strerror(errno));
								break;
							}
						}

						break;
					default:
						while((err = ERR_get_error())) {
							log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:",
									ssl_r, ret,
									ERR_error_string(err, NULL));
						}

						break;
					}
				}
			}
			ERR_clear_error();
#endif

			switch(con->mode) {
			case DIRECT:
#if 0
				log_error_write(srv, __FILE__, __LINE__, "sd",
						"emergency exit: direct",
						con->fd);
#endif
				break;
			default:
				switch(r = plugins_call_handle_connection_close(srv, con)) {
				case HANDLER_GO_ON:
				case HANDLER_FINISHED:
					break;
				default:
					log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r);
					break;
				}
				break;
			}

			connection_reset(srv, con);

			/* close the connection */
			if ((0 == shutdown(con->fd, SHUT_WR))) {
				con->close_timeout_ts = srv->cur_ts;
				connection_set_state(srv, con, CON_STATE_CLOSE);

				if (srv->srvconf.log_state_handling) {
					log_error_write(srv, __FILE__, __LINE__, "sd",
							"shutdown for fd", con->fd);
				}
			} else {
				connection_close(srv, con);
			}

			con->keep_alive = 0;

			srv->con_closed++;

			break;
		default:
			log_error_write(srv, __FILE__, __LINE__, "sdd",
					"unknown state:", con->fd, con->state);

			break;
		}

		if (done == -1) {
			done = 0;
		} else if (ostate == con->state) {
			done = 1;
		}
	}

	if (srv->srvconf.log_state_handling) {
		log_error_write(srv, __FILE__, __LINE__, "sds",
				"state at exit:",
				con->fd,
				connection_get_state(con->state));
	}

	switch(con->state) {
	case CON_STATE_READ_POST:
	case CON_STATE_READ:
	case CON_STATE_CLOSE:
		fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_IN);
		break;
	case CON_STATE_WRITE:
		/* request write-fdevent only if we really need it
		 * - if we have data to write
		 * - if the socket is not writable yet
		 */
		if (!chunkqueue_is_empty(con->write_queue) &&
		    (con->is_writable == 0) &&
		    (con->traffic_limit_reached == 0)) {
			fdevent_event_set(srv->ev, &(con->fde_ndx), con->fd, FDEVENT_OUT);
		} else {
			fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd);
		}
		break;
	default:
		fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd);
		break;
	}

	return 0;
}