示例#1
0
文件: ptl_conn.c 项目: foool/portals4
/**
 * Destroys all connections belonging to an NI
 */
void destroy_conns(ni_t *ni)
{
    if (ni->options & PTL_NI_LOGICAL) {
        if (ni->logical.mapping) {
            int i;
            const int map_size = ni->logical.map_size;

            /* Destroy active connections. */
            for (i = 0; i < map_size; i++) {
                entry_t *entry = &ni->logical.rank_table[i];
                destroy_conn(entry->connect);
                entry->connect = NULL;
            }
        }
    } else {
#ifdef HAVE_TDESTROY
        tdestroy(ni->physical.tree, destroy_conn);
#else
        while (ni->physical.tree != NULL) {
            destroy_conn(*(void **)ni->physical.tree);
            tdelete(*(void **)ni->physical.tree, &ni->physical.tree,
                    compare_conn_id);
        }
#endif
    }
}
示例#2
0
static void
accept_cb(struct ev_loop *loop, ev_uinet *w, int revents)
{
	struct passive_context *passive = w->data;
	struct uinet_socket *newso = NULL;
	struct uinet_socket *newpeerso = NULL;
	struct connection_context *conn = NULL;
	struct connection_context *peerconn = NULL;
	int error;
	int batch_limit = 32;
	int processed = 0;

	while ((processed < batch_limit) &&
	       (UINET_EWOULDBLOCK != (error = uinet_soaccept(w->so, NULL, &newso)))) {
		processed++;

		if (0 == error) {
			newpeerso = NULL;
			conn = NULL;
			peerconn = NULL;

			if (passive->verbose)
				printf("accept succeeded\n");

			conn = create_conn(passive, newso, 1);
			if (NULL == conn)
				goto fail;

			newpeerso = uinet_sogetpassivepeer(newso);
			peerconn = create_conn(passive, newpeerso, 0);
			if (NULL == peerconn)
				goto fail;

			conn->peer = peerconn;
			peerconn->peer = conn;
			
			ev_uinet_start(loop, &conn->watcher);

			if (!passive->extract)
				ev_uinet_start(loop, &peerconn->watcher);

			passive->interface->num_sockets += 2;

			continue;
		fail:
			if (conn) destroy_conn(conn);
			if (newso) uinet_soclose(newso);
			if (newpeerso) uinet_soclose(newpeerso);
		}
	}

	if (processed > passive->interface->max_accept_batch)
		passive->interface->max_accept_batch = processed;
}
示例#3
0
文件: conns.c 项目: slayer/rt-n56u
/* net_closeconn:
 *  Closes a previously opened conn. Returns zero on success.
 */
int net_closeconn (NET_CONN *conn)
{
	struct __conn_list_t *ptr;

	MUTEX_LOCK(__libnet_internal__openconns);

	ptr = __libnet_internal__openconns->next;
	while (ptr && (ptr->conn != conn)) ptr = ptr->next;
	if (!ptr) {
		MUTEX_UNLOCK(__libnet_internal__openconns);
		return -1;
	}

	conn->driver->destroy_conn (conn);

	destroy_conn (ptr);

	MUTEX_UNLOCK(__libnet_internal__openconns);

	return 0;
}
示例#4
0
文件: conns.c 项目: slayer/rt-n56u
/* net_openconn:
 *  Opens a conn over the specified network type `type'.  `addr' defines
 *  whether or not the conn should be use a specific local association. 
 *  The function returns a pointer to the NET_CONN struct created, or 
 *  NULL on error.
 */
NET_CONN *net_openconn (int type, const char *addr)
{
	struct __conn_list_t *ptr;

	MUTEX_LOCK(__libnet_internal__openconns);

	if ((ptr = create_conn())) {
		NET_CONN *conn = ptr->conn;
		conn->type = type;
		conn->driver = __libnet_internal__get_driver (type);

		if (conn->driver && !conn->driver->init_conn (conn, addr)) {
			MUTEX_UNLOCK(__libnet_internal__openconns);
			return conn;
		}

		destroy_conn (ptr);
	}

	MUTEX_UNLOCK(__libnet_internal__openconns);

	return NULL;
}
示例#5
0
文件: reqs.c 项目: OPSF/uClinux
/*
 * This is the main drive for each connection. As you can tell, for the
 * first few steps we are using a blocking socket. If you remember the
 * older tinyproxy code, this use to be a very confusing state machine.
 * Well, no more! :) The sockets are only switched into nonblocking mode
 * when we start the relay portion. This makes most of the original
 * tinyproxy code, which was confusing, redundant. Hail progress.
 * 	- rjkaes
 */
void
handle_connection(int fd)
{
	struct conn_s *connptr;
	struct request_s *request = NULL;
	hashmap_t hashofheaders = NULL;

	char peer_ipaddr[PEER_IP_LENGTH];
	char peer_string[PEER_STRING_LENGTH];

	getpeer_information(fd, peer_ipaddr, peer_string);

	log_message(LOG_CONN, "Connect (file descriptor %d): %s [%s]",
		    fd, peer_string, peer_ipaddr);

	connptr = initialize_conn(fd, peer_ipaddr, peer_string);
	if (!connptr) {
		close(fd);
		return;
	}

	if (check_acl(fd, peer_ipaddr, peer_string) <= 0) {
		update_stats(STAT_DENIED);
		indicate_http_error(connptr, 403, "Access denied",
				    "detail", "The administrator of this proxy has not configured it to service requests from your host.",
				    NULL);
		send_http_error_message(connptr);
		destroy_conn(connptr);
		return;
	}

	if (read_request_line(connptr) < 0) {
		update_stats(STAT_BADCONN);
		indicate_http_error(connptr, 408, "Timeout",
				    "detail", "Server timeout waiting for the HTTP request from the client.",
				    NULL);
		send_http_error_message(connptr);
		destroy_conn(connptr);
		return;
	}

	/*
	 * The "hashofheaders" store the client's headers.
	 */
	if (!(hashofheaders = hashmap_create(HEADER_BUCKETS))) {
		update_stats(STAT_BADCONN);
		indicate_http_error(connptr, 503, "Internal error",
				    "detail", "An internal server error occurred while processing your request.  Please contact the administrator.",
				    NULL);
		send_http_error_message(connptr);
		destroy_conn(connptr);
		return;
	}

	/*
	 * Get all the headers from the client in a big hash.
	 */
	if (get_all_headers(connptr->client_fd, hashofheaders) < 0) {
		log_message(LOG_WARNING, "Could not retrieve all the headers from the client");
		hashmap_delete(hashofheaders);
		update_stats(STAT_BADCONN);
		destroy_conn(connptr);
		return;
	}

	request = process_request(connptr, hashofheaders);
	if (!request) {
		if (!connptr->error_variables && !connptr->show_stats) {
			update_stats(STAT_BADCONN);
			destroy_conn(connptr);
			hashmap_delete(hashofheaders);
			return;
		}
		goto send_error;
	}

	connptr->upstream_proxy = UPSTREAM_HOST(request->host);
	if (connptr->upstream_proxy != NULL) {
		if (connect_to_upstream(connptr, request) < 0) {
			goto send_error;
		}
	} else {
		connptr->server_fd = opensock(request->host, request->port);
		if (connptr->server_fd < 0) {
			indicate_http_error(connptr, 500, "Unable to connect",
					    "detail", PACKAGE " was unable to connect to the remote web server.",
					    "error", strerror(errno),
					    NULL);
			goto send_error;
		}

		log_message(LOG_CONN,
			    "Established connection to host \"%s\" using file descriptor %d.",
			    request->host, connptr->server_fd);

		if (!connptr->connect_method)
			establish_http_connection(connptr, request);
	}

      send_error:
	free_request_struct(request);

	if (process_client_headers(connptr, hashofheaders) < 0) {
		update_stats(STAT_BADCONN);
		if (!connptr->error_variables) {
			hashmap_delete(hashofheaders);
			destroy_conn(connptr);
			return;
		}
	}
	hashmap_delete(hashofheaders);

	if (connptr->error_variables) {
		send_http_error_message(connptr);
		destroy_conn(connptr);
		return;
	} else if (connptr->show_stats) {
		showstats(connptr);
		destroy_conn(connptr);
		return;
	}

	if (!connptr->connect_method || (connptr->upstream_proxy != NULL)) {
		if (process_server_headers(connptr) < 0) {
			if (connptr->error_variables)
				send_http_error_message(connptr);

			update_stats(STAT_BADCONN);
			destroy_conn(connptr);
			return;
		}
	} else {
		if (send_ssl_response(connptr) < 0) {
			log_message(LOG_ERR,
				    "handle_connection: Could not send SSL greeting to client.");
			update_stats(STAT_BADCONN);
			destroy_conn(connptr);
			return;
		}
	}

	relay_connection(connptr);

	log_message(LOG_INFO, "Closed connection between local client (fd:%d) and remote client (fd:%d)",
		    connptr->client_fd, connptr->server_fd);

	/*
	 * All done... close everything and go home... :)
	 */
	destroy_conn(connptr);
	return;
}
示例#6
0
/*
 * The passive http extraction code works by alternately parsing the
 * passively reconstructed request and response streams.  The same callback
 * (below) is used to drive the parsing of each stream.  Parsing begins with
 * the request stream, and once a complete request has been parsed, the
 * parser and read watcher for the request stream are paused and the parser
 * and read watcher for the response stream are activated.  Once an entire
 * response is parsed, the parser and read watcher for the response stream
 * are paused, and the parser and read watcher for the request stream are
 * activated.  Along the way, response bodies that match the supplied list
 * of content types are extracted to files.
 *
 * This is example code whose purpose is to demonstrate upper layer protocol
 * processing using libuinet passive sockets functionality.  Little to no
 * attempt is made to deal with a number of ugly realities involved in
 * robustly parsing http streams in the wild.
 */
static void
passive_extract_cb(struct ev_loop *loop, ev_uinet *w, int revents)
{
	struct connection_context *conn = (struct connection_context *)w->data;
	struct uinet_iovec iov;
	struct uinet_uio uio;
	int max_read;
	int read_size;
	int bytes_read;
	int error;
	int flags;
	size_t nparsed;

	max_read = uinet_soreadable(w->so, 0);
	if (max_read <= 0) {
		/* the watcher should never be invoked if there is no error and there no bytes to be read */
		assert(max_read != 0);

		/*
		 * There are no more complete requests/responses to be had, shut everything down.
		 */
		if (conn->verbose)
			printf("%s: can't read, closing\n", conn->label);
		goto err;
	} else {
		read_size = imin(max_read, conn->buffer_size - conn->buffer_index);

		uio.uio_iov = &iov;
		iov.iov_base = &conn->buffer[conn->buffer_index];
		iov.iov_len = read_size;
		uio.uio_iovcnt = 1;
		uio.uio_offset = 0;
		uio.uio_resid = read_size;
		flags = UINET_MSG_HOLE_BREAK;

		error = uinet_soreceive(w->so, NULL, &uio, &flags);
		if (0 != error) {
			printf("%s: read error (%d), closing\n", conn->label, error);
			goto err;
		}

		if (flags & UINET_MSG_HOLE_BREAK) {
			printf("%s: hole in data, closing connections\n", conn->label);
			goto err;
		}

		bytes_read = read_size - uio.uio_resid;
		conn->buffer_count += bytes_read;
		conn->bytes_read += bytes_read;
		
		do {
			passive_extract_parse_buffer(conn);

			if (HTTP_PARSER_ERRNO(conn->parser) != HPE_OK) {
				if (HTTP_PARSER_ERRNO(conn->parser) == HPE_PAUSED) {
					if (conn->verbose > 1)
						printf("%s: completed parsing request or response\n", conn->label);
					http_parser_pause(conn->peer->parser, 0);
					passive_extract_parse_buffer(conn->peer);
					if (HTTP_PARSER_ERRNO(conn->peer->parser) == HPE_OK) {
						if (conn->verbose > 1)
							printf("%s: peer needs more data\n", conn->label);
						/* Peer parser needs more data */
						ev_uinet_stop(conn->server->loop, &conn->watcher);
						ev_uinet_start(conn->server->loop, &conn->peer->watcher);
						break;
					} else if (HTTP_PARSER_ERRNO(conn->peer->parser) != HPE_PAUSED) {
						printf("Peer parse failure %s, closing connections\n",
						       http_errno_name(HTTP_PARSER_ERRNO(conn->peer->parser)));
						goto err;
					} else {
						if (conn->verbose > 1)
							printf("%s: peer completed parsing request or response\n", conn->label);
						/*
						 * The other parser has paused, so it's time for us to continue
						 * parsing/receiving.
						 */
						http_parser_pause(conn->parser, 0);
					}
				} else {
					printf("Parse failure %s, closing connections\n",
					       http_errno_name(HTTP_PARSER_ERRNO(conn->parser)));
					goto err;
				}
			}
		} while (conn->buffer_count);
	}

	return;
err:
	/*
	 * Deliver EOS to each parser.  If a parser is paused or otherwise
	 * in an error state, no work will be done.  The main reason for
	 * doing this is to correctly handle the case where response parsing
	 * requires an EOS to complete.  Under such circumstances, one of
	 * the calls below will complete the work.
	 */
	http_parser_execute(conn->parser, conn->parser_settings, NULL, 0);
	http_parser_execute(conn->peer->parser, conn->peer->parser_settings, NULL, 0);

	destroy_conn(conn->peer);
	destroy_conn(conn);
}
示例#7
0
static void
passive_receive_cb(struct ev_loop *loop, ev_uinet *w, int revents)
{
	struct connection_context *conn = (struct connection_context *)w->data;
#define BUFFER_SIZE (64*1024)
	uint8_t buffer[BUFFER_SIZE];
	struct uinet_iovec iov;
	struct uinet_uio uio;
	int max_read;
	int read_size;
	int bytes_read;
	int error;
	int flags;
	int i;
	int print_threshold = 10;
	int printable;
	int skipped;

	max_read = uinet_soreadable(w->so, 0);
	if (max_read <= 0) {
		/* the watcher should never be invoked if there is no error and there no bytes to be read */
		assert(max_read != 0);
		if (conn->verbose)
			printf("%s: can't read, closing\n", conn->label);
		goto err;
	} else {
		read_size = imin(max_read, BUFFER_SIZE - 1);

		uio.uio_iov = &iov;
		iov.iov_base = buffer;
		iov.iov_len = read_size;
		uio.uio_iovcnt = 1;
		uio.uio_offset = 0;
		uio.uio_resid = read_size;
		flags = UINET_MSG_HOLE_BREAK;

		error = uinet_soreceive(w->so, NULL, &uio, &flags);
		if (0 != error) {
			printf("%s: read error (%d), closing\n", conn->label, error);
			goto err;
		}

		bytes_read = read_size - uio.uio_resid;

		conn->bytes_read += bytes_read;

		if (conn->verbose > 2)
			print_tcp_state(w->so, conn->label);

		if (conn->verbose > 1) {

			printf("========================================================================================\n");
		}

		if (conn->verbose)
			printf("To %s (%u bytes, %llu total, %s)\n", conn->label, bytes_read,
			       (unsigned long long)conn->bytes_read, flags & UINET_MSG_HOLE_BREAK ? "HOLE" : "normal");
		
		if (conn->verbose > 1) {
			buffer[bytes_read] = '\0';
			printf("----------------------------------------------------------------------------------------\n");
			skipped = 0;
			printable = 0;
			for (i = 0; i < bytes_read; i++) {
				if ((buffer[i] >= 0x20 && buffer[i] <= 0x7e) || buffer[i] == 0x0a || buffer[i] == 0x0d || buffer[i] == 0x09) {
					printable++;
				} else {
					/*
					 * Print on printable-to-unprintable
					 * transition if enough consecutive
					 * printable chars were seen.
					 */
					if (printable >= print_threshold) {
						if (skipped) {
							printf("<%u>", skipped);
						}
						buffer[i] = '\0';
						printf("%s", &buffer[i - printable]);
					} else {
						skipped += printable;
					}
					printable = 0;
					skipped++;
				}
			}
			if (skipped) {
				printf("<%u>", skipped);
			}
			buffer[i] = '\0';
			printf("%s", &buffer[i - printable]);
			printf("\n");
			printf("========================================================================================\n");
		}
	}

	return;

err:
	destroy_conn(conn);
}
示例#8
0
static void
passive_accept_cb(struct ev_loop *loop, ev_uinet *w, int revents)
{
	struct uinet_demo_passive *passive = w->data;
	struct uinet_socket *newso = NULL;
	struct uinet_socket *newpeerso = NULL;
	struct passive_connection *conn = NULL;
	struct passive_connection *peerconn = NULL;
	int error;
	unsigned int batch_limit = 32;
	unsigned int processed = 0;

	while ((processed < batch_limit) &&
	       (UINET_EWOULDBLOCK != (error = uinet_soaccept(w->so, NULL, &newso)))) {
		processed++;

		if (0 == error) {
			newpeerso = NULL;
			conn = NULL;
			peerconn = NULL;

			if (passive->cfg.verbose)
				printf("%s: Accept succeeded\n", passive->cfg.name);

			conn = create_conn(passive, newso, 1);
			if (NULL == conn) {
				printf("%s: Failed to alloc new connection context\n",
				       passive->cfg.name);
				goto fail;
			}

			newpeerso = uinet_sogetpassivepeer(newso);
			peerconn = create_conn(passive, newpeerso, 0);
			if (NULL == peerconn) {
				printf("%s: Failed to alloc new peer connection context\n",
				       passive->cfg.name);
				goto fail;
			}

			conn->peer = peerconn;
			peerconn->peer = conn;
			
			ev_uinet_start(loop, &conn->watcher);
			ev_uinet_start(loop, &peerconn->watcher);

			if (conn->verbose || (passive->cfg.copy_mode & UINET_IP_COPY_MODE_MAYBE))
				ev_uinet_start(loop, &conn->connected_watcher);

			if (peerconn->verbose || (passive->cfg.copy_mode & UINET_IP_COPY_MODE_MAYBE))
				ev_uinet_start(loop, &peerconn->connected_watcher);

			passive->num_sockets += 2;

			continue;
		fail:
			if (conn) destroy_conn(conn);
			if (newso) uinet_soclose(newso);
			if (newpeerso) uinet_soclose(newpeerso);
		}
	}

	if (processed > passive->max_accept_batch)
		passive->max_accept_batch = processed;
}