示例#1
0
文件: vs_taggroup.c 项目: verse/verse
int vs_handle_taggroup_create_ack(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct Generic_Cmd *cmd)
{
	struct VSNode *node;
	struct VSTagGroup *tg;
	struct VSEntityFollower *tg_foll;
	struct TagGroup_Create_Ack_Cmd *cmd_tg_create_ack = (struct TagGroup_Create_Ack_Cmd*)cmd;
	int all_created = 1, ret = 0;

	/* Try to find node */
	if((node = vs_node_find(vs_ctx, cmd_tg_create_ack->node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s() node (id: %d) not found\n",
				__func__, cmd_tg_create_ack->node_id);
		return 0;
	}

	pthread_mutex_lock(&node->mutex);

	if( (tg = vs_taggroup_find(node, cmd_tg_create_ack->taggroup_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s() tag_group (id: %d) in node (id: %d) not found\n",
				__func__,
				cmd_tg_create_ack->taggroup_id,
				cmd_tg_create_ack->node_id);
	} else {

		ret = 1;

		for(tg_foll = tg->tg_folls.first;
				tg_foll != NULL;
				tg_foll = tg_foll->next)
		{
			/* Try to find follower of this tag group */
			if(tg_foll->node_sub->session->session_id == vsession->session_id) {
				/* Switch from state CREATING to state CREATED */
				if(tg_foll->state == ENTITY_CREATING) {
					tg_foll->state = ENTITY_CREATED;
				}

				/* If the tag group is in the state DELETING, then it is possible
				 * now to sent tag_group_destroy command to the client, because
				 * the client knows about this tag group now */
				if(tg->state == ENTITY_DELETING) {
					struct Generic_Cmd *taggroup_destroy_cmd = v_taggroup_destroy_create(node->id, tg->id);

					/* Push this command to the outgoing queue */
					if ( taggroup_destroy_cmd!= NULL &&
							v_out_queue_push_tail(tg_foll->node_sub->session->out_queue,
									0,
									tg_foll->node_sub->prio,
									taggroup_destroy_cmd) == 1) {
						tg_foll->state = ENTITY_DELETING;
					} else {
						v_print_log(VRS_PRINT_DEBUG_MSG,
								"taggroup_destroy (node_id: %d, tg_id: %d) wasn't added to the queue\n",
								node->id, tg->id);
						ret = 0;
					}
				}
			} else {
				if(tg_foll->state != ENTITY_CREATED) {
					all_created = 0;
				}
			}
		}

		if(all_created == 1) {
			tg->state = ENTITY_CREATED;
		}
	}

	pthread_mutex_unlock(&node->mutex);

	return ret;
}
示例#2
0
/**
 * \brief This function send packets in OPEN and CLOSEREQ state.
 */
int send_packet_in_OPEN_CLOSEREQ_state(struct vContext *C)
{
	struct VDgramConn *vconn = CTX_current_dgram_conn(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VSession *vsession = CTX_current_session(C);
	struct VPacket *s_packet = CTX_s_packet(C);
	struct VSent_Packet *sent_packet = NULL;
	unsigned short buffer_pos = 0;
	struct timeval tv;
	int ret, keep_alive_packet = -1, full_packet = 0;
	int error_num;
	uint16 swin, prio_win, sent_size;
	uint32 rwin;
	int cmd_rank = 0;

	/* Verse packet header */
	s_packet->header.version = 1;

	/* Clear header flags */
	s_packet->header.flags = 0;

	/* Check if it is necessary to send payload packet */
	ret = check_pay_flag(C);
	if(ret!=0) {
		s_packet->header.flags |= PAY_FLAG;
		if(ret==2) {
			keep_alive_packet = 1;
		}
	}

	/* When server is in CLOSEREQ state, then FIN flag should be set up */
	if(vconn->host_state == UDP_SERVER_STATE_CLOSEREQ) {
		s_packet->header.flags |= FIN_FLAG;
	}

	/* Check if it is necessary to send acknowledgment of received payload
	 * packet */
	ret = check_ack_nak_flag(C);
	if(ret==1) {
		s_packet->header.flags |= ACK_FLAG;

		/* Update last acknowledged Payload packet */
		vconn->last_acked_pay = vconn->last_r_pay;

		/* Add ACK and NAK commands from the list of ACK and NAK commands to the
		 * packet (only max count of ACK and NAK commands could be added to
		 * the packet) */
		for(cmd_rank = 0;
				cmd_rank < vconn->ack_nak.count &&
				cmd_rank < MAX_SYSTEM_COMMAND_COUNT;
				cmd_rank++)
		{
			s_packet->sys_cmd[cmd_rank].ack_cmd.id = vconn->ack_nak.cmds[cmd_rank].id;
			s_packet->sys_cmd[cmd_rank].ack_cmd.pay_id = vconn->ack_nak.cmds[cmd_rank].pay_id;
		}
		s_packet->sys_cmd[cmd_rank].cmd.id = CMD_RESERVED_ID;

	}

	/* If there is no need to send Payload or AckNak packet, then cancel
	 * sending of packet */
	if(! ((s_packet->header.flags & PAY_FLAG) ||
			(s_packet->header.flags & ACK_FLAG)) ) return SEND_PACKET_CANCELED;

	s_packet->header.flags |= ANK_FLAG;
	s_packet->header.ank_id = vconn->ank_id;

	/* Compute current windows for flow control and congestion control */
	set_host_rwin(C);
	set_host_cwin(C);

	/* Set window of flow control that will sent to receiver */
	rwin = vconn->rwin_host >> vconn->rwin_host_scale;
	s_packet->header.window = (unsigned short)(rwin > 0xFFFF) ? 0xFFFF : rwin;
	/*printf("\t---real window: %d---\n", s_packet->header.window);*/

	/* Compute how many data could be sent to not congest receiver */
	rwin = vconn->rwin_peer - vconn->sent_size;

	/* Select smallest window for sending (congestion control window or flow control window)*/
	swin = (vconn->cwin < rwin) ? vconn->cwin : rwin;

	/* Set up Payload ID, when there is need to send payload packet */
	if(s_packet->header.flags & PAY_FLAG)
		s_packet->header.payload_id = vconn->host_id + vconn->count_s_pay;
	else
		s_packet->header.payload_id  = 0;

	/* Set up AckNak ID, when there are some ACK or NAK command in the packet */
	if(s_packet->header.flags & ACK_FLAG)
		s_packet->header.ack_nak_id = vconn->count_s_ack;
	else
		s_packet->header.ack_nak_id = 0;

	/* When negotiated and used FPS is different, then pack negotiate command
	 * for FPS */
	if(vsession->fps_host != vsession->fps_peer) {
		cmd_rank += v_add_negotiate_cmd(s_packet->sys_cmd, cmd_rank,
				CMD_CHANGE_L_ID, FTR_FPS, &vsession->fps_host, NULL);
	} else {
		if(vsession->tmp_flags & SYS_CMD_NEGOTIATE_FPS) {
			cmd_rank += v_add_negotiate_cmd(s_packet->sys_cmd, cmd_rank,
					CMD_CONFIRM_L_ID, FTR_FPS, &vsession->fps_peer, NULL);
			/* Send confirmation only once for received system command */
			vsession->tmp_flags &= ~SYS_CMD_NEGOTIATE_FPS;
		}
	}

	v_print_send_packet(C);

	/* Fill buffer */
	buffer_pos += v_pack_packet_header(s_packet, &io_ctx->buf[buffer_pos]);
	buffer_pos += v_pack_dgram_system_commands(s_packet, &io_ctx->buf[buffer_pos]);

	/* When this is not pure keep alive packet */
	if(s_packet->header.flags & PAY_FLAG) {

		sent_packet = v_packet_history_add_packet(&vconn->packet_history, s_packet->header.payload_id);

		assert(sent_packet!=NULL);

		if(keep_alive_packet != 1) {
			real32 prio_sum_high, prio_sum_low, r_prio;
			uint32 prio_count;
			int16 prio, max_prio, min_prio;
			uint16 tot_cmd_size;

			/* Print outgoing command with green color */
			if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
				printf("%c[%d;%dm", 27, 1, 32);
			}

			max_prio = v_out_queue_get_max_prio(vsession->out_queue);
			min_prio = v_out_queue_get_min_prio(vsession->out_queue);

			prio_sum_high = v_out_queue_get_prio_sum_high(vsession->out_queue);
			prio_sum_low = v_out_queue_get_prio_sum_low(vsession->out_queue);

			v_print_log(VRS_PRINT_DEBUG_MSG, "Packing prio queues, cmd count: %d\n", v_out_queue_get_count(vsession->out_queue));

			/* Go through all priorities and pick commands from priority queues */
			for(prio = max_prio; prio >= min_prio; prio--)
			{
				/* TODO: Add better check here */
				if(prio <= VRS_DEFAULT_PRIORITY && buffer_pos >= vconn->io_ctx.mtu) {
					break;
				}

				prio_count = v_out_queue_get_count_prio(vsession->out_queue, prio);

				if(prio_count > 0) {
					r_prio = v_out_queue_get_prio(vsession->out_queue, prio);

					/* Compute size of buffer that could be occupied by
					 * commands from this queue */
					if(prio >= VRS_DEFAULT_PRIORITY) {
						prio_win = ((swin - buffer_pos)*r_prio)/prio_sum_high;
					} else {
						prio_win = ((swin - buffer_pos)*r_prio)/prio_sum_low;
					}

					/* Debug print */
					v_print_log(VRS_PRINT_DEBUG_MSG, "Queue: %d, count: %d, r_prio: %6.3f, prio_win: %d\n",
							prio, prio_count, r_prio, prio_win);

					/* Get total size of commands that were stored in queue (sent_size) */
					tot_cmd_size = 0;
					/* Pack commands from queues with high priority to the buffer */
					buffer_pos = pack_prio_queue(C, sent_packet, buffer_pos, prio, prio_win, &tot_cmd_size);
					sent_size += tot_cmd_size;
				}
			}

			/* Use default color for output */
			if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
				printf("%c[%dm", 27, 0);
			}
		} else {
			if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
				printf("%c[%d;%dm", 27, 1, 32);
				v_print_log(VRS_PRINT_DEBUG_MSG, "Keep alive packet\n");
				printf("%c[%dm", 27, 0);
			}
		}
	}

	/* Update sent_size */
	vconn->sent_size += sent_size;

	io_ctx->buf_size = buffer_pos;

	/* Send buffer */
	ret = v_send_packet(io_ctx, &error_num);

	if(ret==SEND_PACKET_SUCCESS) {
		gettimeofday(&tv, NULL);

		/* Update time of sending last payload packet */
		if(s_packet->header.flags & PAY_FLAG) {
			vconn->tv_pay_send.tv_sec = tv.tv_sec;
			vconn->tv_pay_send.tv_usec = tv.tv_usec;
			/* Store time of sending packet in history of sent packets. It is
			 * used for computing RTT and SRTT */
			if(sent_packet != NULL) {
				sent_packet->tv.tv_sec = tv.tv_sec;
				sent_packet->tv.tv_usec = tv.tv_usec;
			}
		}

		/* Update time of sending last acknowledgment packet */
		if(s_packet->header.flags & ACK_FLAG) {
			vconn->tv_ack_send.tv_sec = tv.tv_sec;
			vconn->tv_ack_send.tv_usec = tv.tv_usec;
		}

		/* Update counter of sent packets */
		if(s_packet->header.flags & PAY_FLAG) vconn->count_s_pay++;
		if(s_packet->header.flags & ACK_FLAG) vconn->count_s_ack++;

		/* If the packet was sent full and there are some pending data to send
		 * then modify returned value*/
		if(full_packet == 1) {
			ret = SEND_PACKET_FULL;
		}
	} else {
		/* When packet wasn't sent, then remove this packet from history */
		if(sent_packet != NULL) {
			v_packet_history_rem_packet(C, s_packet->header.payload_id);
		}
	}

	/*v_print_packet_history(&vconn->packet_history);*/

	return ret;
}
示例#3
0
文件: vs_tag.c 项目: verse/verse
/**
 * \brief This function creates new Verse Tag
 */
struct VSTag *vs_tag_create(struct VSTagGroup *tg,
                            uint16 tag_id,
                            uint8 data_type,
                            uint8 count,
                            uint16 custom_type)
{
    struct VSTag *tag = NULL;
    struct VBucket *tag_bucket;

    tag = (struct VSTag*)calloc(1, sizeof(struct VSTag));
    if(tag == NULL) {
        v_print_log(VRS_PRINT_DEBUG_MSG, "Out of memory.\n");
        return NULL;
    }

    /* Initialize new tag */
    vs_tag_init(tag);

    if(tag_id == RESERVED_TAG_ID) {
        /* Try to find first free id for tag */
        tag->id = tg->last_tag_id;
        while( v_hash_array_find_item(&tg->tags, tag) != NULL ) {
            tag->id++;

            if(tag->id > LAST_TAG_ID) {
                tag->id = FIRST_TAG_ID;
            }
        }
    } else {
        tag->id = tag_id;
    }
    tg->last_tag_id = tag->id;

    /* Try to add new Tag to the hashed linked list of tags */
    tag_bucket = v_hash_array_add_item(&tg->tags, (void*)tag, sizeof(struct VSTag));

    if(tag_bucket==NULL) {
        v_print_log(VRS_PRINT_DEBUG_MSG,
                    "Tag could not be added to tag group: %d.\n",
                    tg->id);
        free(tag);
        return NULL;
    }

    tag->data_type = data_type;
    tag->count = count;
    tag->custom_type = custom_type;

    /* Allocate memory for value (not for type string8) */
    switch(data_type) {
    case VRS_VALUE_TYPE_UINT8:
        tag->value = (void*)calloc(count, sizeof(uint8));
        break;
    case VRS_VALUE_TYPE_UINT16:
        tag->value = (void*)calloc(count, sizeof(uint16));
        break;
    case VRS_VALUE_TYPE_UINT32:
        tag->value = (void*)calloc(count, sizeof(uint32));
        break;
    case VRS_VALUE_TYPE_UINT64:
        tag->value = (void*)calloc(count, sizeof(uint64));
        break;
    case VRS_VALUE_TYPE_REAL16:
        tag->value = (void*)calloc(count, sizeof(real16));
        break;
    case VRS_VALUE_TYPE_REAL32:
        tag->value = (void*)calloc(count, sizeof(real32));
        break;
    case VRS_VALUE_TYPE_REAL64:
        tag->value = (void*)calloc(count, sizeof(real64));
        break;
    case VRS_VALUE_TYPE_STRING8:
        /* Memory for this type of tag is allocated, when value of
         * tag is set. */
        tag->value = NULL;
        break;
    default:
        break;
    }

    vs_taggroup_inc_version(tg);

    return tag;
}
示例#4
0
/*
 * \brief Performs simple HTTP handshake. *fd* is the file descriptor of the
 * connection to the client. This function returns 0 if it succeeds,
 * or returns -1.
 */
static int http_handshake(int sockfd)
{
	char header[16384], accept_key[29], res_header[256];
	char *keyhdstart, *keyhdend;
	size_t header_length = 0, res_header_sent = 0, res_header_length;
	ssize_t ret;
	fd_set set;
	struct timeval timeout_tv, start_tv, current_tv;

	/* Get current time */
	gettimeofday(&start_tv, NULL);

	/* Set default timeout */
	timeout_tv.tv_sec = VRS_TIMEOUT;
	timeout_tv.tv_usec = 0;

	/* Try to read whole header without blocking read, use select and
	 * timeout */
	while(1) {

		FD_ZERO(&set);
		FD_SET(sockfd, &set);

		if( (ret = select(sockfd + 1, &set, NULL, NULL, &timeout_tv)) == -1 ) {
			v_print_log(VRS_PRINT_ERROR, "%s:%s():%d select(): %s\n",
					__FILE__, __FUNCTION__,  __LINE__, strerror(errno));
			return -1;
		/* Was event on the listen socket */
		} else if(ret > 0) {
			if(FD_ISSET(sockfd, &set)) {
				ret = read(sockfd,
						header + header_length,
						sizeof(header) - header_length);

				if(ret == -1) {
					v_print_log(VRS_PRINT_ERROR, "read(): %s\n", strerror(errno));
					return -1;
				} else if(ret == 0) {
					v_print_log(VRS_PRINT_ERROR,
							"HTTP Handshake: Got EOF");
					return -1;
				} else {
					header_length += ret;
					/* Was end of HTTP header reached? */
					if(header_length >= 4 &&
							memcmp(header + header_length - 4, "\r\n\r\n", 4) == 0)
					{
						break;
					} else if(header_length == sizeof(header)) {
						v_print_log(VRS_PRINT_ERROR,
								"HTTP Handshake: Too large HTTP headers\n");
						return -1;
					}
				}
			}
		}

		gettimeofday(&current_tv, NULL);

		/* Update timeout */
		timeout_tv.tv_sec = VRS_TIMEOUT - (current_tv.tv_sec - start_tv.tv_sec);
		timeout_tv.tv_usec = 0;

		/* Where there is no elapsing time, then exit handshake */
		if(timeout_tv.tv_sec <= 0) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"HTTP Handshake: Timed out\n");
			return -1;
		}
	}

	header[header_length] = '\0';

	v_print_log(VRS_PRINT_DEBUG_MSG,
			"HTTP Handshake: received request: %s\n",
			header);

	/* Check if required HTTP headers were received in the request */

	/* Header has to contain field "Upgrade: websocket" */
	if(http_header_find_field_value(header, "Upgrade", "websocket") == NULL) {
		v_print_log(VRS_PRINT_ERROR,
				"HTTP Handshake: Missing required header field 'Upgrade: websocket'\n");
		return -1;
	}
	/* Header has to contain field "Connection: Upgrade" */
	if(http_header_find_field_value(header, "Connection", "Upgrade") == NULL) {
		v_print_log(VRS_PRINT_ERROR,
				"HTTP Handshake: Missing required header field 'Connection: Upgrade'\n");
		return -1;
	}
	/* Client has to send field Sec-WebSocket-Key in HTTP header */
	if( (keyhdstart = http_header_find_field_value(header, "Sec-WebSocket-Key",
			NULL)) == NULL)
	{
		v_print_log(VRS_PRINT_ERROR,
				"HTTP Handshake: Missing required header field 'Sec-WebSocket-Key: SOME_SECRET_KEY'\n");
		return -1;
	}
	/* Requested protocol name has to be equal to "v1.verse.tul.cz" */
	if( http_header_find_field_value(header, "Sec-WebSocket-Protocol",
			WEB_SOCKET_PROTO_NAME) == NULL)
	{
		v_print_log(VRS_PRINT_ERROR,
				"HTTP Handshake: Missing required header field 'Sec-WebSocket-Protocol: %s'\n",
				WEB_SOCKET_PROTO_NAME);
		return -1;
	}

	/* Check the length of WebSocket key */
	for(; *keyhdstart == ' '; ++keyhdstart);
	keyhdend = keyhdstart;
	for(; *keyhdend != '\r' && *keyhdend != ' '; ++keyhdend);
	if(keyhdend - keyhdstart != WS_CLIENT_KEY_LEN) {
		v_print_log(VRS_PRINT_ERROR,
				"HTTP Handshake: Invalid value in Sec-WebSocket-Key\n");
		return -1;
	}

	/* Create accepted key */
	create_accept_key(accept_key, keyhdstart);

	/* Create response for client */
	snprintf(res_header, sizeof(res_header),
			"HTTP/1.1 101 Switching Protocols\r\n"
			"Upgrade: websocket\r\n"
			"Connection: Upgrade\r\n"
			"Sec-WebSocket-Accept: %s\r\n"
			"Sec-WebSocket-Protocol: %s\r\n"
			"\r\n",
			accept_key,
			WEB_SOCKET_PROTO_NAME);

	/* Send response to the client */
	res_header_length = strlen(res_header);
	while(res_header_sent < res_header_length) {
		while((ret = write(sockfd, res_header + res_header_sent,
				res_header_length - res_header_sent)) == -1 &&
				errno == EINTR);
		if(ret == -1) {
			v_print_log(VRS_PRINT_ERROR, "write(): %s\n", strerror(errno));
			return -1;
		} else {
			res_header_sent += ret;
		}
	}

	v_print_log(VRS_PRINT_DEBUG_MSG, "HTTP Handshake: sent response\n");

	return 0;
}
示例#5
0
/**
 * \brief The function with WebSocket infinite loop
 */
void *vs_websocket_loop(void *arg)
{
	/* The vContext is passed as *user_data* in callback functions. */
	struct vContext *C = (struct vContext*)arg;
	struct VS_CTX *vs_ctx = CTX_server_ctx(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VStreamConn *stream_conn = CTX_current_stream_conn(C);
	struct VSession *vsession = CTX_current_session(C);
	struct VMessage *r_message=NULL, *s_message=NULL;
	wslay_event_context_ptr wslay_ctx;
	fd_set read_set, write_set;
	struct timeval tv;
	int ret, flags;
	unsigned int int_size;

	struct wslay_event_callbacks callbacks = {
			vs_recv_ws_callback_data,
			vs_send_ws_callback_data,
			NULL,
			NULL,
			NULL,
			NULL,
			vs_ws_recv_msg_callback
	};

	vsession->flags |= VRS_TP_WEBSOCKET;

	/* Set socket non-blocking */
	flags = fcntl(io_ctx->sockfd, F_GETFL, 0);
	if( fcntl(io_ctx->sockfd, F_SETFL, flags | O_NONBLOCK) == -1) {
		v_print_log(VRS_PRINT_ERROR, "fcntl(): %s\n", strerror(errno));
		goto end;
	}

	/* Listen for HTTP request from web client and try to do
	 * WebSocket handshake */
	if(http_handshake(io_ctx->sockfd) != 0 ) {
		goto end;
	}

	/* Try to get size of TCP buffer */
	int_size = sizeof(int_size);
	if( getsockopt(io_ctx->sockfd, SOL_SOCKET, SO_RCVBUF,
			(void *)&stream_conn->socket_buffer_size, &int_size) != 0)
	{
		v_print_log(VRS_PRINT_ERROR,
				"Unable to get TCP buffer size of WebSocket connection.\n");
		goto end;
	}

	r_message = (struct VMessage*)calloc(1, sizeof(struct VMessage));
	s_message = (struct VMessage*)calloc(1, sizeof(struct VMessage));

	if(r_message == NULL || s_message == NULL) {
		v_print_log(VRS_PRINT_ERROR, "Out of memory\n");
		goto end;
	}

	CTX_r_message_set(C, r_message);
	CTX_s_message_set(C, s_message);

	/* Try to initialize WebSocket server context */
	if(wslay_event_context_server_init(&wslay_ctx, &callbacks, C) != 0) {
		v_print_log(VRS_PRINT_ERROR,
				"Unable to initialize WebSocket server context\n");
		goto end;
	}

	/* Set initial state */
	stream_conn->host_state = TCP_SERVER_STATE_RESPOND_METHODS;

	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"Server WebSocket state: RESPOND_methods\n");
		printf("%c[%dm", 27, 0);
	}

	/* "Never ending" loop */
	while(vsession->stream_conn->host_state != TCP_SERVER_STATE_CLOSED &&
			( wslay_event_want_read(wslay_ctx) == 1 ||
					wslay_event_want_write(wslay_ctx) == 1) )
	{

		/* When server is going to stop, then close connection with WebSocket
		 * client */
		if(vs_ctx->state != SERVER_STATE_READY) {

			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Closing WebSocket connection: Server shutdown\n");

			stream_conn->host_state = TCP_SERVER_STATE_CLOSING;

			/* Try to close connection with WebSocket client */
			wslay_event_queue_close(wslay_ctx,
					WSLAY_CODE_GOING_AWAY,
					(uint8_t*)"Server shutdown",	/* Close message */
					15);	/* The length of close message s*/
		}

		/* Initialize read set */
		FD_ZERO(&read_set);
		if(wslay_event_want_read(wslay_ctx) == 1) {
			/*
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Waiting for WebSocket message ...\n");
			*/
			FD_SET(io_ctx->sockfd, &read_set);
		}

		/* Initialize write set */
		FD_ZERO(&write_set);
		if(wslay_event_want_write(wslay_ctx) == 1) {
#if DEBUG_WEB_SOCKET
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Going to write message to WebSocket ...\n");
#endif
			FD_SET(io_ctx->sockfd, &write_set);
		}

		/* Set timeout for select() */
		if(stream_conn->host_state == TCP_SERVER_STATE_STREAM_OPEN) {
			/* Use negotiated FPS */
			tv.tv_sec = 0;
			tv.tv_usec = 1000000/vsession->fps_host;
		} else {
			/* User have to send something in 30 seconds */
			tv.tv_sec = VRS_TIMEOUT;
			tv.tv_usec = 0;
		}

		if( (ret = select(io_ctx->sockfd + 1,
				&read_set,
				&write_set,
				NULL,			/* Don't care about exception */
				&tv)) == -1) {
			v_print_log(VRS_PRINT_ERROR, "%s:%s():%d select(): %s\n",
					__FILE__, __FUNCTION__,  __LINE__, strerror(errno));
			goto end;
			/* Was event on the listen socket */
		} else if(ret > 0) {
			if(FD_ISSET(io_ctx->sockfd, &read_set)) {
				if( wslay_event_recv(wslay_ctx) != 0 ) {
					goto end;
				}
			}
			if (FD_ISSET(io_ctx->sockfd, &write_set)) {
				if( wslay_event_send(wslay_ctx) != 0 ) {
					goto end;
				}
			}
		} else if(ret == 0 && stream_conn->host_state != TCP_SERVER_STATE_STREAM_OPEN) {
			/* When handshake is not finished during VRS_TIMEOT seconds, then
			 * close connection with WebSocket client. */

			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Closing WebSocket connection: Handshake timed-out\n");

			stream_conn->host_state = TCP_SERVER_STATE_CLOSING;

			wslay_event_queue_close(wslay_ctx,
					WSLAY_CODE_PROTOCOL_ERROR,
					(uint8_t*)"Handshake timed-out",	/* Close message */
					19);	/* The length of close message */
		}

		if(stream_conn->host_state == TCP_SERVER_STATE_STREAM_OPEN) {
			/* Check if there is any command in outgoing queue
			 * and eventually pack these commands to buffer */
			if((ret = v_STREAM_pack_message(C)) == 0 ) {
				goto end;
			}

			/* When at least one command was packed to buffer, then
			 * queue this buffer to WebSocket layer */
			if(ret == 1) {
				struct wslay_event_msg msgarg;
			    msgarg.opcode = WSLAY_BINARY_FRAME;
			    msgarg.msg = (uint8_t*)io_ctx->buf;
			    msgarg.msg_length = io_ctx->buf_size;
			    wslay_event_queue_msg(wslay_ctx, &msgarg);
			}
		}
	}

end:
	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server WebSocket state: CLOSING\n");
		printf("%c[%dm", 27, 0);
	}

	/* Set up TCP CLOSING state (non-blocking) */
	vs_CLOSING(C);

	/* Receive and Send messages are not necessary any more */
	if(r_message != NULL) {
		free(r_message);
		r_message = NULL;
		CTX_r_message_set(C, NULL);
	}
	if(s_message != NULL) {
		free(s_message);
		s_message = NULL;
		CTX_s_message_set(C, NULL);
	}

	/* TCP connection is considered as CLOSED, but it is not possible to use
	 * this connection for other client */
	stream_conn->host_state = TCP_SERVER_STATE_CLOSED;

	/* NULL pointer at stream connection */
	CTX_current_stream_conn_set(C, NULL);

	/* Set TCP connection to CLOSED */
	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server WebSocket state: CLOSED\n");
		printf("%c[%dm", 27, 0);
	}


	pthread_mutex_lock(&vs_ctx->data.mutex);
	/* Unsubscribe this session (this avatar) from all nodes */
	vs_node_free_avatar_reference(vs_ctx, vsession);
	/* Try to destroy avatar node */
	vs_node_destroy_avatar_node(vs_ctx, vsession);
	pthread_mutex_unlock(&vs_ctx->data.mutex);

	/* This session could be used again for authentication */
	stream_conn->host_state = TCP_SERVER_STATE_LISTEN;

	/* Clear session flags */
	vsession->flags = 0;

	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server WebSocket state: LISTEN\n");
		printf("%c[%dm", 27, 0);
	}

	free(C);
	C = NULL;

	pthread_exit(NULL);
	return NULL;
}
示例#6
0
/**
 * \brief Initialize IO context of Verse server used for incomming connections (TCP, WebSocket)
 */
static int vs_init_io_ctx(struct IO_CTX *io_ctx,
		unsigned short port,
		int max_sessions)
{
	int flag;

	/* Allocate buffer for incoming packets */
	if ( (io_ctx->buf = (char*)calloc(MAX_PACKET_SIZE, sizeof(char))) == NULL) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "calloc(): %s\n", strerror(errno));
		return -1;
	}

	/* "Address" of server */
	if(io_ctx->host_addr.ip_ver == IPV4) {		/* IPv4 */

		/* Create socket which server uses for listening for new connections */
		if ( (io_ctx->sockfd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1 ) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "socket(): %s\n", strerror(errno));
			return -1;
		}

		/* Set socket to reuse address */
		flag = 1;
		if( setsockopt(io_ctx->sockfd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) == -1) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "setsockopt(): %s\n", strerror(errno));
			return -1;
		}

		io_ctx->host_addr.addr.ipv4.sin_family = AF_INET;
		io_ctx->host_addr.addr.ipv4.sin_addr.s_addr = htonl(INADDR_ANY);
		io_ctx->host_addr.addr.ipv4.sin_port = htons(port);
		io_ctx->host_addr.port = port;

		/* Bind address and socket */
		if( bind(io_ctx->sockfd,
				(struct sockaddr*)&(io_ctx->host_addr.addr.ipv4),
				sizeof(io_ctx->host_addr.addr.ipv4)) == -1)
		{
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "bind(): %s\n", strerror(errno));
			return -1;
		}

	}
	else if(io_ctx->host_addr.ip_ver == IPV6) {	/* IPv6 */

		/* Create socket which server uses for listening for new connections */
		if ( (io_ctx->sockfd = socket(AF_INET6, SOCK_STREAM, IPPROTO_TCP)) == -1 ) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "socket(): %s\n", strerror(errno));
			return -1;
		}

		/* Set socket to reuse address */
		flag = 1;
		if( setsockopt(io_ctx->sockfd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) == -1) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "setsockopt(): %s\n", strerror(errno));
			return -1;
		}

		io_ctx->host_addr.addr.ipv6.sin6_family = AF_INET6;
		io_ctx->host_addr.addr.ipv6.sin6_addr = in6addr_any;
		io_ctx->host_addr.addr.ipv6.sin6_port = htons(port);
		io_ctx->host_addr.addr.ipv6.sin6_flowinfo = 0; /* Obsolete value */
		io_ctx->host_addr.addr.ipv6.sin6_scope_id = 0;
		io_ctx->host_addr.port = port;

		/* Bind address and socket */
		if( bind(io_ctx->sockfd,
				(struct sockaddr*)&(io_ctx->host_addr.addr.ipv6),
				sizeof(io_ctx->host_addr.addr.ipv6)) == -1)
		{
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "bind(): %d\n", strerror(errno));
			return -1;
		}

	}

	/* Create queue for TCP connection attempts, set maximum number of
	 * attempts in listen queue */
	if( listen(io_ctx->sockfd, max_sessions) == -1) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "listen(): %s\n", strerror(errno));
		return -1;
	}

	/* Set up flag for V_CTX of server */
	io_ctx->flags = 0;

	/* Set all bytes of buffer for incoming packet to zero */
	memset(io_ctx->buf, 0, MAX_PACKET_SIZE);

	return 1;
}
示例#7
0
文件: vs_auth_pam.c 项目: verse/verse
/* NULL Verse server conversation function with PAM */
static int vs_pam_null_conv(int num_msg, const struct pam_message **msg,
    struct pam_response **resp, void *data)
{
	v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: %s called with %d messages\n", __func__, num_msg);
	return PAM_CONV_ERR;
}
示例#8
0
/**
 * \brief This function creates new VSNode at Verse server
 */
struct VSNode *vs_node_create(struct VS_CTX *vs_ctx,
		struct VSNode *parent_node,
		struct VSUser *owner,
		uint32 node_id,
		uint16 custom_type)
{
	struct VSNode *node;
	struct VSLink *link;
	struct VBucket *bucket;

	if(! (v_hash_array_count_items(&vs_ctx->data.nodes) < VRS_MAX_COMMON_NODE_COUNT) ) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"max number: %d of nodes reached\n",
				VRS_MAX_COMMON_NODE_COUNT);
		return NULL;
	}

	node = (struct VSNode*)calloc(1, sizeof(struct VSNode));
	if(node == NULL) {
		v_print_log(VRS_PRINT_ERROR, "Out of memory\n");
		return NULL;
	}

	vs_node_init(node);

	if(node_id == VRS_RESERVED_NODE_ID) {
		/* Try to find first free node_id. It is fast and easy, When
		 * VRS_LAST_COMMON_NODE_ID did not reach 0xFFFFFFFF-1 value yet and not used
		 * node_id are not reused.*/
		node->id = vs_ctx->data.last_common_node_id + 1;
		while( v_hash_array_find_item(&vs_ctx->data.nodes, &node) != NULL) {
			node->id++;
			/* Node id 0xFFFFFFFF has special purpose and node IDs in range <0, 65535>
			 * have special purposes too (skip them) */
			if(node->id > VRS_LAST_COMMON_NODE_ID)
				node->id = VRS_FIRST_COMMON_NODE_ID;
			/* TODO: implement faster finding of free node id */
		}
		vs_ctx->data.last_common_node_id = node->id;
	} else {
		node->id = node_id;
	}

	/* Create link to the parent node */
	if(parent_node != NULL) {
		link = vs_link_create(parent_node, node);
		if(link == NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"link between nodes %d %d could not be created\n",
					parent_node->id, node->id);
			free(node);
			return NULL;
		}
	} else {
		/* This can happen only for root node */
		assert(node_id == VRS_ROOT_NODE_ID);
		node->parent_link = NULL;
		node->level = 0;
	}

	/* Add node to the hashed array of all verse nodes */
	bucket = v_hash_array_add_item(&vs_ctx->data.nodes, node, sizeof(struct VSNode));
	if(bucket == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"node %d could not be added to the hashed list of nodes\n",
				node->id);
		if(node->parent_link != NULL) {
			v_list_free_item(&parent_node->children_links, node->parent_link);
		}
		free(node);
		return NULL;
	}

	node->owner = owner;
	node->type = custom_type;

	return node;
}
示例#9
0
/**
 * \brief This function creates new node at verse server, when client sent
 * node_create command.
 */
static struct VSNode *vs_node_new(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		const uint16 type)
{
	struct VSNode *node = NULL;
	struct VSNode find_node, *avatar_node;
	struct VSUser *owner;
	struct VBucket *bucket;
	struct VSNodePermission *perm;
	struct VSNodeSubscriber *node_subscriber;

	/* Try to find avatar node to be able to create initial link to
	 * avatar node (initial parent of new created node) */
	find_node.id = vsession->avatar_id;
	bucket = v_hash_array_find_item(&vs_ctx->data.nodes, &find_node);
	if(bucket != NULL) {
		avatar_node = (struct VSNode*)bucket->data;
	} else {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"vsession->avatar_id: %d not found\n", vsession->avatar_id);
		goto end;
	}

	/* Try to find owner of the new node */
	if((owner = vs_user_find(vs_ctx, vsession->user_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"vsession->user_id: %d not found\n", vsession->user_id);
		goto end;
	}

	/* Try to create new verse node */
	if( (node = vs_node_create(vs_ctx, avatar_node, owner, VRS_RESERVED_NODE_ID, type)) == NULL) {
		goto end;
	}

	/* Set initial state of this node */
	node->state = ENTITY_CREATING;

	/* Find node representing fake user other_users */
	if( vs_ctx->other_users != NULL) {
		/* Set access permissions for other users */
		perm = (struct VSNodePermission *)calloc(1, sizeof(struct VSNodePermission));
		perm->user = vs_ctx->other_users;

		/* TODO: implement default session permissions and use them,
		 * when are available */

		perm->permissions = vs_ctx->default_perm;
		v_list_add_tail(&node->permissions, perm);
	}

	/* Send node_create to all subscribers of avatar node data */
	node_subscriber = avatar_node->node_subs.first;
	while(node_subscriber) {
		vs_node_send_create(node_subscriber, node, avatar_node);
		node_subscriber = node_subscriber->next;
	}


end:

	return node;
}
示例#10
0
文件: vs_config.c 项目: laishi/verse
/**
 * \brief Load configuration from file to the Verse server context.
 *
 * \param	vs_ctx	The Verse server context.
 * \param	file	The pointer at FILE structure;
 */
void vs_read_config_file(struct VS_CTX *vs_ctx, const char *ini_file_name)
{
	dictionary *ini_dict;

	ini_dict = iniparser_load((char*) ini_file_name);

	if(ini_dict != NULL) {
		char *user_auth_method;
		char *certificate_file_name;
		char *ca_certificate_file_name;
		char *private_key;
		char *fc_type;
#ifdef WITH_MONGODB
		char *mongodb_server_hostname;
		int mongodb_server_port;
		char *mongodb_server_db_name;
		char *mongodb_user;
		char *mongodb_pass;
#endif
		int fc_win_scale;
		int in_queue_max_size;
		int out_queue_max_size;
		int tcp_port_number;
		int ws_port_number;
		int udp_low_port_number;
		int udp_high_port_number;
		int max_session_count;

		v_print_log(VRS_PRINT_DEBUG_MSG, "Reading config file: %s\n",
				ini_file_name);

#ifdef WITH_OPENSSL
		/* Try to get TLS port number */
		tcp_port_number = iniparser_getint(ini_dict, "Global:TLS_port", -1);
		if(tcp_port_number != -1) {
			if(tcp_port_number >= 1024 && tcp_port_number <= 65535) {
				vs_ctx->tcp_port = tcp_port_number;
			} else {
				v_print_log(VRS_PRINT_WARNING, "TLS port: %d out of range: 1024-65535\n",
						tcp_port_number);
			}
		}
#else
		/* Try to get TCP port number */
		tcp_port_number = iniparser_getint(ini_dict, "Global:TCP_port", -1);
		if(tcp_port_number != -1) {
			if(tcp_port_number >= 1024 && tcp_port_number <= 65535) {
				vs_ctx->tcp_port = tcp_port_number;
			} else {
				v_print_log(VRS_PRINT_WARNING, "TCP port: %d out of range: 1024-65535\n",
						tcp_port_number);
			}
		}
#endif

		/* Try to get WebSocket port number */
		ws_port_number = iniparser_getint(ini_dict, "Global:WS_port", -1);
		if(ws_port_number != -1) {
			if(ws_port_number >= 1024 && ws_port_number <= 65535) {
				vs_ctx->ws_port = ws_port_number;
			} else {
				v_print_log(VRS_PRINT_WARNING, "WebSocket port: %d out of range: 1024-65535\n",
						ws_port_number);
			}
		}

		/* Try to get lowest UDP port */
		udp_low_port_number = iniparser_getint(ini_dict, "Global:UDP_port_low", -1);
		if(udp_low_port_number != -1) {
			if(udp_low_port_number >= 49152 && udp_low_port_number <= 65535) {
				vs_ctx->port_low = udp_low_port_number;
			} else {
				v_print_log(VRS_PRINT_WARNING, "UDP port: %d out of range: 49152-65535\n",
						udp_low_port_number);
			}
		}

		udp_high_port_number = iniparser_getint(ini_dict, "Global:UDP_port_high", -1);
		if(udp_high_port_number != -1) {
			if(udp_high_port_number >= 49152 && udp_high_port_number <= 65535) {
				vs_ctx->port_high = udp_high_port_number;
			} else {
				v_print_log(VRS_PRINT_WARNING, "UDP port: %d out of range: 49152-65535\n",
						udp_high_port_number);
			}
		}

		max_session_count = iniparser_getint(ini_dict, "Global:MaxSessionCount", -1);
		if(max_session_count != -1) {
			vs_ctx->max_sessions = max_session_count;
		}

		/* Try to load section [Users] */
		user_auth_method = iniparser_getstring(ini_dict, "Users:Method", NULL);
		if(user_auth_method != NULL &&
				strcmp(user_auth_method, "file") == 0)
		{
			char *file_type;

			v_print_log(VRS_PRINT_DEBUG_MSG, "user_auth_method: %s\n", user_auth_method);

			file_type = iniparser_getstring(ini_dict, "Users:FileType", NULL);

			if(file_type != NULL &&
					strcmp(file_type, "csv") == 0)
			{
				char *csv_file_name;

				v_print_log(VRS_PRINT_DEBUG_MSG, "file_type: %s\n", file_type);

				csv_file_name = iniparser_getstring(ini_dict, "Users:File", NULL);

				if(csv_file_name !=NULL) {
					vs_ctx->auth_type = AUTH_METHOD_CSV_FILE;
					if(vs_ctx->csv_user_file != NULL) {
						free(vs_ctx->csv_user_file);
					}
					vs_ctx->csv_user_file = strdup(csv_file_name);
					v_print_log(VRS_PRINT_DEBUG_MSG,
							"csv_file_name: %s\n", csv_file_name);
				}
			}
		}

		/* Try to load section [Security] */
		certificate_file_name = iniparser_getstring(ini_dict, "Security:Certificate", NULL);
		if(certificate_file_name != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"certificate_file_name: %s\n", certificate_file_name);
			if(vs_ctx->public_cert_file != NULL) {
				free(vs_ctx->public_cert_file);
			}
			vs_ctx->public_cert_file = strdup(certificate_file_name);
		}

		/* Certificate of certificate authority */
		ca_certificate_file_name = iniparser_getstring(ini_dict, "Security:CACertificate", NULL);
		if(ca_certificate_file_name != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"ca_certificate_file_name: %s\n", ca_certificate_file_name);
			vs_ctx->ca_cert_file = strdup(ca_certificate_file_name);
		}

		/* Server private key */
		private_key = iniparser_getstring(ini_dict, "Security:PrivateKey", NULL);
		if(private_key != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"private_key: %s\n", private_key);
			if(vs_ctx->private_cert_file != NULL) {
				free(vs_ctx->private_cert_file);
			}
			vs_ctx->private_cert_file = strdup(private_key);
		}

		/* Type of Flow Control */
		fc_type = iniparser_getstring(ini_dict, "FlowControl:Type", NULL);
		if(fc_type != NULL) {
			if(strcmp(fc_type, "tcp_like")==0) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"flow_control: %s\n", fc_type);
				vs_ctx->fc_meth = FC_TCP_LIKE;
			} else if(strcmp(fc_type, "none")==0) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"flow_control type: %s\n", fc_type);
				vs_ctx->fc_meth = FC_NONE;
			}
		}

		/* Scale of Flow Control window */
		fc_win_scale = iniparser_getint(ini_dict, "FlowControl:WinScale", -1);
		if(fc_win_scale != -1) {
			if(fc_win_scale >= 0 && fc_win_scale <= 255) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"flow_control scale: %d\n", fc_win_scale);
				vs_ctx->rwin_scale = fc_win_scale;
			}
		}

		/* Maximal size of incoming queue */
		in_queue_max_size = iniparser_getint(ini_dict, "InQueue:MaxSize", -1);
		if(in_queue_max_size != -1) {
			if(in_queue_max_size > 0) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"in_queue max size: %d\n", in_queue_max_size);
				vs_ctx->in_queue_max_size = in_queue_max_size;
			}
		}

		/* Maximal size of outgoing queue */
		out_queue_max_size = iniparser_getint(ini_dict, "OutQueue:MaxSize", -1);
		if(out_queue_max_size != -1) {
			if(out_queue_max_size > 0) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"in_queue max size: %d\n", out_queue_max_size);
				vs_ctx->in_queue_max_size = out_queue_max_size;
			}
		}
#ifdef WITH_MONGODB
		/* Hostname of MongoDB server */
		mongodb_server_hostname = iniparser_getstring(ini_dict,
				"MongoDB:ServerHostname", NULL);
		if(mongodb_server_hostname != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"mongodb server hostname: %s\n", mongodb_server_hostname);
			vs_ctx->mongodb_server = strdup(mongodb_server_hostname);
		}

		/* Port of MongoDB server */
		mongodb_server_port = iniparser_getint(ini_dict,
				"MongoDB:ServerPort", -1);
		if(mongodb_server_port != -1) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"mongodb server port: %d\n", mongodb_server_port);
			vs_ctx->mongodb_port = mongodb_server_port;
		}

		/* MongoDB database name used by Verse server */
		mongodb_server_db_name = iniparser_getstring(ini_dict,
				"MongoDB:DatabaseName", NULL);
		if(mongodb_server_db_name != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"mongodb server database name: %s\n", mongodb_server_db_name);
			vs_ctx->mongodb_db_name = strdup(mongodb_server_db_name);
		}

		/* Username used for authentication at MongoDB */
		mongodb_user = iniparser_getstring(ini_dict,
				"MongoDB:Username", NULL);
		if(mongodb_user != NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG, "mongodb server username: %s\n",
					mongodb_user);
			vs_ctx->mongodb_user = strdup(mongodb_user);
		}

		/* Password used for authentication at MongoDB */
		mongodb_pass = iniparser_getstring(ini_dict,
				"MongoDB:Password", NULL);
		if(mongodb_user != NULL) {
			int i;
			v_print_log(VRS_PRINT_DEBUG_MSG, "mongodb server password: "******"*");
			}
			v_print_log_simple(VRS_PRINT_DEBUG_MSG, "\n");
			vs_ctx->mongodb_pass = strdup(mongodb_pass);
		}
#endif

		iniparser_freedict(ini_dict);
	} else {
		v_print_log(VRS_PRINT_WARNING, "Unable to load config file: %s\n",
				ini_file_name);
	}
}
示例#11
0
/**
 * \brief This function add session (client) to the list of clients that are
 * subscribed this node.
 */
static int vs_node_subscribe(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct VSNode *node,
		uint32 version)
{
	struct VSNode				*child_node;
	struct VSNodePermission		*perm;
	struct VSNodeSubscriber		*node_subscriber;
	struct VSLink				*link;
	struct VBucket				*bucket;
	struct VSTagGroup			*tg;
	struct VSLayer				*layer;
	int							user_can_read = 0;

	/* Can user subscribe to this node? */
	user_can_read = vs_node_can_read(vs_ctx, vsession, node);

	/* Add current session to the list of node subscribers */
	node_subscriber = (struct VSNodeSubscriber*)calloc(1, sizeof(struct VSNodeSubscriber));
	node_subscriber->session = vsession;
	node_subscriber->prio = VRS_DEFAULT_PRIORITY;
	v_list_add_tail(&node->node_subs, node_subscriber);

	/* TODO: send node_subscribe with version and commands with difference
	 * between this version and current state, when versing will be supported */
	if(version != 0) {
		v_print_log(VRS_PRINT_WARNING,
				"Version: %d != 0, versing is not supported yet\n", version);
	}

	/* Send node_perm commands to the new subscriber */
	perm = node->permissions.first;
	while(perm != NULL) {
		vs_node_send_perm(node_subscriber, node, perm->user, perm->permissions);
		perm = perm->next;
	}

	/* If the node is locked, then send node_lock to the subscriber */
	if(node->lock.session != NULL) {
		vs_node_send_lock(node_subscriber, node->lock.session, node);
	}

	/* If user doesn't have permission to subscribe to this node, then send
	 * only node_perm command explaining, why user can't rest of this
	 * node */
	if(user_can_read == 0) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"Insufficient permission to read content of the node: %d\n",
				node->id);
		return 0;
	}

	/* Send node_create of all child nodes of this node and corresponding
	 * links */
	link = node->children_links.first;
	while(link!=NULL) {
		child_node = link->child;
		vs_node_send_create(node_subscriber, child_node, NULL);
		link = link->next;
	}

	/* Send taggroup_create of all tag_groups in this node */
	bucket = node->tag_groups.lb.first;
	while(bucket != NULL) {
		tg = (struct VSTagGroup*)bucket->data;
		if(tg->state == ENTITY_CREATING || tg->state == ENTITY_CREATED) {
			vs_taggroup_send_create(node_subscriber, node, tg);
		}
		bucket = bucket->next;
	}

	/* Send layer_create for all layers in this node */
	bucket = node->layers.lb.first;
	while(bucket != NULL) {
		layer = (struct VSLayer*)bucket->data;
		if(layer->state == ENTITY_CREATING || layer->state == ENTITY_CREATED) {
			vs_layer_send_create(node_subscriber, node, layer);
		}
		bucket = bucket->next;
	}

	return 1;
}
示例#12
0
文件: vs_main.c 项目: ged/verse
/**
 * \brief Verse server main function
 * \param	argc	The number of options and arguments.
 * \param	argv	The array of options and arguments.
 * \return Function returns 0, when it is ended successfully and non-zero
 * value, when there some error occurs. This function never reach the end,
 * because it is server.
 */
int main(int argc, char *argv[])
{
	VS_CTX vs_ctx;
	int opt;
	char *config_file=NULL;
	int debug_level_set = 0;
	void *res;

	/* Set up initial state */
	vs_ctx.state = SERVER_STATE_CONF;

	/* Default debug prints of verse server */
	v_init_print_log(VRS_PRINT_WARNING, stdout);

	/* When server received some arguments */
	if(argc>1) {
		while( (opt = getopt(argc, argv, "c:hd:")) != -1) {
			switch(opt) {
			case 'c':
				config_file = strdup(optarg);
				break;
			case 'd':
				debug_level_set = vs_set_debug_level(optarg);
				break;
			case 'h':
				vs_print_help(argv[0]);
				exit(EXIT_SUCCESS);
			case ':':
				exit(EXIT_FAILURE);
			case '?':
				exit(EXIT_FAILURE);
			}
		}
	}

	/* Load Verse server configuration file */
	vs_load_config_file(&vs_ctx, config_file);

	/* When debug level wasn't specified as option at command line, then use
	 * configuration from file */
	if(debug_level_set == 1) {
		uint8 log_level = v_log_level();
		v_init_print_log(log_level, vs_ctx.log_file);
	} else {
		v_init_print_log(vs_ctx.print_log_level, vs_ctx.log_file);
	}

	/* Load user accounts and save them in the linked list of verse server
	 * context */
	switch (vs_ctx.auth_type) {
		case AUTH_METHOD_CSV_FILE:
			if(vs_load_user_accounts(&vs_ctx) != 1) {
				vs_destroy_ctx(&vs_ctx);
				exit(EXIT_FAILURE);
			}
			break;
		case AUTH_METHOD_PAM:
			/* TODO: read list of supported usernames and their uids somehow */
			exit(EXIT_FAILURE);
		case AUTH_METHOD_LDAP:
			/* TODO: not implemented yet */
			exit(EXIT_FAILURE);
		default:
			/* Not supported method */
			vs_destroy_ctx(&vs_ctx);
			exit(EXIT_FAILURE);
	}

	/* Add superuser account to the list of users */
	vs_add_superuser_account(&vs_ctx);

	/* Add fake account for other users to the list of users*/
	vs_add_other_users_account(&vs_ctx);

	/* Initialize data mutex */
	if( pthread_mutex_init(&vs_ctx.data.mutex, NULL) != 0) {
		exit(EXIT_FAILURE);
	}

	/* Create basic node structure of node tree */
	if(vs_nodes_init(&vs_ctx) == -1) {
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	if(vs_ctx.stream_protocol == TCP) {
		/* Initialize Verse server context */
		if(vs_init_stream_ctx(&vs_ctx) == -1) {
			vs_destroy_ctx(&vs_ctx);
			exit(EXIT_FAILURE);
		}
	} else {
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	if(vs_ctx.flag & SERVER_DEBUG_MODE) {
		/* Set up signal handlers (only for debug mode, real server should ignore most of signals) */
		if(vs_config_signal_handling() == -1 ) {
			vs_destroy_ctx(&vs_ctx);
			exit(EXIT_FAILURE);
		}
	} else {
#if 0
		/* Make from verse server real verse application:
		 * - detach from standard file descriptors, terminals, PPID process etc. */
		if(vs_init_server(&vs_ctx) == -1) {
			vs_destroy_ctx(&vs_ctx);
			exit(EXIT_FAILURE);
		}
#endif
	}

	/* Initialize data semaphore */
	if( sem_init(&vs_ctx.data.sem, 0, 0) != 0) {
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	/* Try to create new data thread */
	if(pthread_create(&vs_ctx.data_thread, NULL, vs_data_loop, (void*)&vs_ctx) != 0) {
		v_print_log(VRS_PRINT_ERROR, "pthread_create(): %s\n", strerror(errno));
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	/* Try to create cli thread */
	if(pthread_create(&vs_ctx.cli_thread, NULL, vs_server_cli, (void*)&vs_ctx) != 0) {
		v_print_log(VRS_PRINT_ERROR, "pthread_create(): %s\n", strerror(errno));
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	/* Set up pointer to local server CTX -> server server could be terminated
	 * with signal now. */
	local_vs_ctx = &vs_ctx;

	vs_ctx.state = SERVER_STATE_READY;

	if(vs_ctx.stream_protocol == TCP) {
		if(vs_main_listen_loop(&vs_ctx) == -1) {
			vs_destroy_ctx(&vs_ctx);
			exit(EXIT_FAILURE);
		}
	} else {
		vs_destroy_ctx(&vs_ctx);
		exit(EXIT_FAILURE);
	}

	/* Free Verse server context */
	vs_destroy_ctx(&vs_ctx);

	/* Join cli thread */
	if(pthread_join(vs_ctx.cli_thread, &res) != 0) {
		v_print_log(VRS_PRINT_ERROR, "pthread_join(): %s\n", strerror(errno));
		exit(EXIT_FAILURE);
	} else {
		if(res != PTHREAD_CANCELED && res != NULL) free(res);
	}

	/* Join data thread */
	if(pthread_join(vs_ctx.data_thread, &res) != 0) {
		v_print_log(VRS_PRINT_ERROR, "pthread_join(): %s\n", strerror(errno));
		exit(EXIT_FAILURE);
	} else {
		if(res != PTHREAD_CANCELED && res != NULL) free(res);
	}

	return EXIT_SUCCESS;
}
示例#13
0
文件: vs_taggroup.c 项目: verse/verse
/**
 * \brief This function handle node_subscribe command
 */
int vs_handle_taggroup_subscribe(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct Generic_Cmd *taggroup_subscribe)
{
	struct VSNode	*node;
	uint32			node_id = UINT32(taggroup_subscribe->data[0]);
	uint16			taggroup_id = UINT16(taggroup_subscribe->data[UINT32_SIZE]);
	int				ret = 0;

	/* Try to find node */
	if((node = vs_node_find(vs_ctx, node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s() node (id: %d) not found\n",
				__func__, node_id);
		return 0;
	}

	pthread_mutex_lock(&node->mutex);

	/* Node has to be created */
	if( vs_node_is_created(node) != 1 ) {
		goto end;
	}

	/* Is user owner of the node or can user read the node? */
	if(vs_node_can_read(vsession, node) == 1) {
		struct VSNodeSubscriber		*node_subscriber;
		struct VSTagGroup			*tg;
		struct VSTag				*tag;
		struct VSEntitySubscriber	*tg_subscriber;
		struct VBucket				*bucket;

		/* Try to find node subscriber */
		node_subscriber = node->node_subs.first;
		while(node_subscriber != NULL) {
			if(node_subscriber->session == vsession) {
				break;
			}
			node_subscriber = node_subscriber->next;
		}

		/* Client has to be subscribed to the node first */
		if(node_subscriber == NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"%s(): client has to be subscribed to the node: %d before subscribing to the tag_group: %d\n",
					__func__, node_id, taggroup_id);
			goto end;
		}

		/* Try to find TagGroup */
		if( (tg = vs_taggroup_find(node, taggroup_id)) == NULL) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"%s() tag_group (id: %d) in node (id: %d) not found\n",
					__func__, taggroup_id, node_id);
			goto end;
		}

		/* Is Client already subscribed to this tag group? */
		tg_subscriber = tg->tg_subs.first;
		while(tg_subscriber != NULL) {
			if(tg_subscriber->node_sub->session->session_id == vsession->session_id) {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"%s() client already subscribed to the tag_group (id: %d) in node (id: %d)\n",
						__func__, taggroup_id, node_id);
				goto end;
			}
			tg_subscriber = tg_subscriber->next;
		}

		ret = 1;

		/* Add new subscriber to the list of tag group subscribers */
		tg_subscriber = (struct VSEntitySubscriber*)malloc(sizeof(struct VSEntitySubscriber));
		tg_subscriber->node_sub = node_subscriber;
		v_list_add_tail(&tg->tg_subs, tg_subscriber);

		/* Try to send tag_create for all tags in this tag group */
		bucket = tg->tags.lb.first;
		while(bucket != NULL) {
			tag = (struct VSTag*)bucket->data;
			vs_tag_send_create(tg_subscriber, node, tg, tag);
			/* When TCP/WebSocket is used and tag is initialized, then it
			 * is possible (necessary) to send value too. */
			if( ((vsession->flags & VRS_TP_TCP) ||
					(vsession->flags & VRS_TP_WEBSOCKET)) &&
					(tag->flag & TAG_INITIALIZED))
			{
				vs_tag_send_set(vsession, node_subscriber->prio, node, tg, tag);
			}
			bucket = bucket->next;
		}
	} else {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s(): user: %s doesn't have permissions to subscribe to taggroup: %d in node: %d\n",
				__func__,
				((struct VSUser *)vsession->user)->username,
				taggroup_id, node->id);
	}

end:

	pthread_mutex_unlock(&node->mutex);

	return ret;
}
示例#14
0
文件: vs_taggroup.c 项目: verse/verse
/**
 * \brief This function tries to handle node_create command
 */
int vs_handle_taggroup_create(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct Generic_Cmd *taggroup_create_cmd)
{
	struct VSNode			*node;
	struct VSTagGroup		*tg;
	struct VBucket			*vbucket;
	uint32 					node_id = UINT32(taggroup_create_cmd->data[0]);
	uint16 					taggroup_id = UINT16(taggroup_create_cmd->data[UINT32_SIZE]);
	uint16 					type = UINT16(taggroup_create_cmd->data[UINT32_SIZE+UINT16_SIZE]);
	int						ret = 0;

	/* Client has to send taggroup_create command with taggroup_id equal to
	 * the value 0xFFFF */
	if(taggroup_id != VRS_RESERVED_TAGGROUP_ID) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s() taggroup_id: %d is not 0xFFFF\n",
				__func__, taggroup_id);
		return 0;
	}

	/* Try to find node */
	if((node = vs_node_find(vs_ctx, node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s() node (id: %d) not found\n",
				__func__, node_id);
		return 0;
	}

	pthread_mutex_lock(&node->mutex);

	/* Node has to be created */
	if( vs_node_is_created(node) != 1 ) {
		goto end;
	}

	vbucket = node->tag_groups.lb.first;
	/* Check, if there isn't tag group with the same type */
	while(vbucket != NULL) {
		tg = vbucket->data;
		if(tg->custom_type == type) {
			v_print_log(VRS_PRINT_DEBUG_MSG,
					"%s() taggroup type: %d is already used in node: %d\n",
					__func__, type, node->id);
			goto end;
		}
		vbucket = vbucket->next;
	}

	/* Is user owner of this node or can user write to this node? */
	if(vs_node_can_write(vsession, node) != 1) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s(): user: %s can't write to node: %d\n",
				__func__,
				((struct VSUser *)vsession->user)->username,
				node->id);
		goto end;
	}

	/* Try to create new tag group */
	tg = vs_taggroup_create(node, VRS_RESERVED_TAGGROUP_ID, type);
	if(tg == NULL) {
		goto end;
	} else {
		struct VSNodeSubscriber *node_subscriber;

		/* Set state for this entity */
		tg->state = ENTITY_CREATING;

		ret = 1;

		/* Send tag group create command to all subscribers to the node
		 * that can read this node */
		for(node_subscriber = node->node_subs.first;
				node_subscriber != NULL;
				node_subscriber = node_subscriber->next)
		{
			if( vs_node_can_read(node_subscriber->session, node) == 1) {
				if(vs_taggroup_send_create(node_subscriber, node, tg) != 1) {
					ret = 0;
				}
			}
		}
	}

end:

	pthread_mutex_unlock(&node->mutex);

	return ret;
}
示例#15
0
/**
 * \brief Main Verse server loop. Server waits for connect attempts, responds to attempts
 * and creates per connection threads
 */
int vs_main_listen_loop(VS_CTX *vs_ctx)
{
	struct vContext *C;
	struct timeval start, tv;
	fd_set set;
	int count, tmp, i, ret;
	int sockfd;

	/* Allocate context for server */
	C = (struct vContext*)calloc(1, sizeof(struct vContext));
	/* Set up client context, connection context and IO context */
	CTX_server_ctx_set(C, vs_ctx);
	CTX_client_ctx_set(C, NULL);
	CTX_current_dgram_conn_set(C, NULL);

	/* Get time of the start of the server */
	gettimeofday(&start, NULL);

	/* Seed random number generator */
#ifdef __APPLE__
	sranddev();
	/* Other BSD based systems probably support this or similar function too. */
#else
	/* Other systems have to use this evil trick */
	srand(start.tv_sec - start.tv_usec);
#endif

	/* "Never ending" listen loop */
	while(vs_ctx->state == SERVER_STATE_READY) {
		/* Debug print */
		gettimeofday(&tv, NULL);
		if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
			if(tv.tv_sec==start.tv_sec)
				v_print_log(VRS_PRINT_DEBUG_MSG, "\t+0s");
			else
				v_print_log(VRS_PRINT_DEBUG_MSG, "\t+%lds", (long int)(tv.tv_sec - start.tv_sec));
#ifdef WSLAY
			v_print_log_simple(VRS_PRINT_DEBUG_MSG,
					"\tServer listen on (TCP port: %d, WebSocket port: %d)\n",
					vs_ctx->tcp_io_ctx.host_addr.port,
					vs_ctx->ws_io_ctx.host_addr.port);
#else
			v_print_log_simple(VRS_PRINT_DEBUG_MSG,
					"\tServer listen on TCP port: %d\n",
					vs_ctx->tcp_io_ctx.host_addr.port);
#endif
		}

		/* Set up set of sockets */
		FD_ZERO(&set);
		FD_SET(vs_ctx->tcp_io_ctx.sockfd, &set);
		/* When Verse is compiled with support of WebSocket, then listen on
		 * WebSocket port too */
#ifdef WSLAY
		FD_SET(vs_ctx->ws_io_ctx.sockfd, &set);
		sockfd = (vs_ctx->tcp_io_ctx.sockfd > vs_ctx->ws_io_ctx.sockfd) ?
				vs_ctx->tcp_io_ctx.sockfd : vs_ctx->ws_io_ctx.sockfd;
#else
		sockfd = vs_ctx->tcp_io_ctx.sockfd;
#endif

		/* We will wait one second for connect attempt, then debug print will
		 * be print again */
		tv.tv_sec = 1;
		tv.tv_usec = 0;


		/* Wait for event on listening sockets */
		if( (ret = select(sockfd+1, &set, NULL, NULL, &tv)) == -1 ) {
			int err = errno;
			if(err==EINTR) {
				break;
			} else {
				if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR,
						"%s:%s():%d select(): %s\n",
						__FILE__,
						__FUNCTION__,
						__LINE__,
						strerror(err));
				return -1;
			}
			/* Was event on the listen socket */
		} else if(ret>0) {
			if (FD_ISSET(vs_ctx->tcp_io_ctx.sockfd, &set))
			{
				v_print_log(VRS_PRINT_DEBUG_MSG, "TCP Connection attempt\n");
				CTX_io_ctx_set(C, &vs_ctx->tcp_io_ctx);
				vs_new_stream_conn(C, vs_tcp_conn_loop);
#ifdef WSLAY
			} else if(FD_ISSET(vs_ctx->ws_io_ctx.sockfd, &set)) {
				v_print_log(VRS_PRINT_DEBUG_MSG, "WebSocket Connection attempt\n");
				CTX_io_ctx_set(C, &vs_ctx->ws_io_ctx);
				vs_new_stream_conn(C, vs_websocket_loop);
#endif
			}
		}
	}

	count = 0;
	while( (vs_ctx->state == SERVER_STATE_CLOSING) && (count < VRS_TIMEOUT) ) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "Wait for Server state CLOSED\n");

		/* Check if there are still some pending connection */
		tmp = 0;
		for(i=0; i<vs_ctx->max_sessions; i++) {
			if(vs_ctx->vsessions[i] != NULL) {
				if(vs_ctx->vsessions[i]->stream_conn != NULL &&
					vs_ctx->vsessions[i]->stream_conn->host_state != TCP_SERVER_STATE_LISTEN ) {
					tmp++;
				}
				/* TODO: cancel thread with closed connection to speed up server exit
					pthread_kill(vs_ctx->vsessions[i]->tcp_thread, SIGALRM);
					pthread_join(vs_ctx->vsessions[i]->tcp_thread, NULL);
				*/

			}
		}
		if(tmp==0) {
			vs_ctx->state = SERVER_STATE_CLOSED;
		} else {
			sleep(1);
		}
	}

	free(C);

	return 1;
}
示例#16
0
/**
 * \brief This function do recursive unsubscribing from node
 */
static int vs_node_unsubscribe(struct VSNode *node,
		struct VSNodeSubscriber *node_subscriber,
		int level)
{
	struct VSNode *child_node;
	struct VSLink *link;
	struct VBucket *tg_bucket, *layer_bucket;
	struct VSTagGroup *tg;
	struct VSLayer *layer;
	struct VSNodeSubscriber *_node_subscriber, *_next_node_subscriber;
	struct VSEntityFollower *node_follower;
	struct VSEntityFollower	*taggroup_follower;
	struct VSEntityFollower	*layer_follower;

	/* Unsubscribe from all child nodes */
	link = node->children_links.first;
	while(link != NULL) {
		child_node = link->child;

		_node_subscriber = child_node->node_subs.first;
		while(_node_subscriber != NULL) {
			_next_node_subscriber = _node_subscriber->next;
			if(_node_subscriber->session->session_id == node_subscriber->session->session_id) {
				/* Unsubscribe from child node */
				vs_node_unsubscribe(child_node, _node_subscriber, level+1);
				break;
			}
			_node_subscriber = _next_node_subscriber;
		}

		link = link->next;
	}

	/* Unsubscribe client from all tag groups */
	tg_bucket = node->tag_groups.lb.first;
	while(tg_bucket != NULL) {
		tg = (struct VSTagGroup*)tg_bucket->data;

		/* Remove client from the list of TagGroup subscribers */
		vs_taggroup_unsubscribe(tg, node_subscriber->session);

		/* Remove client from the list of TagGroup followers */
		taggroup_follower = tg->tg_folls.first;
		while(taggroup_follower != NULL) {
			if(taggroup_follower->node_sub->session->session_id == node_subscriber->session->session_id) {
				v_list_free_item(&tg->tg_folls, taggroup_follower);
				break;
			}
			taggroup_follower = taggroup_follower->next;
		}

		tg_bucket = tg_bucket->next;
	}

	/* Unsubscribe client from all layers */
	layer_bucket = node->layers.lb.first;
	while(layer_bucket != NULL) {
		layer = (struct VSLayer*)layer_bucket->data;

		/* Remove client from the list of Layer subscribers */
		vs_layer_unsubscribe(layer, node_subscriber->session);

		/* Remove client from the list of Layer followers */
		layer_follower = layer->layer_folls.first;
		while(layer_follower != NULL) {
			if(layer_follower->node_sub->session->session_id == node_subscriber->session->session_id) {
				v_list_free_item(&layer->layer_folls, layer_follower);
				break;
			}
			layer_follower = layer_follower->next;
		}
		layer_bucket = layer_bucket->next;
	}

	if(level > 0) {
		/* Remove this session from list of followers too */
		node_follower = node->node_folls.first;
		while(node_follower != NULL) {
			if(node_follower->node_sub->session->session_id == node_subscriber->session->session_id) {
				/* Remove client from list of clients, that knows about this node */
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"Removing session: %d from the list of node: %d followers\n",
						node_subscriber->session->session_id, node->id);
				v_list_free_item(&node->node_folls, node_follower);
				break;
			}
			node_follower = node_follower->next;
		}
	}

	/* Finally remove this session from list of node subscribers */
	v_list_free_item(&node->node_subs, node_subscriber);

	return 1;
}
示例#17
0
/*
 * \brief Initialize OpenSSl of Verse server
 */
static int vs_init_ssl(VS_CTX *vs_ctx)
{
	/* Set up the library */
	SSL_library_init();
	ERR_load_BIO_strings();
	SSL_load_error_strings();
	OpenSSL_add_all_algorithms();

	/* Set up SSL context for TLS  */
	if( (vs_ctx->tls_ctx = SSL_CTX_new(TLSv1_server_method())) == NULL ) {
		v_print_log(VRS_PRINT_ERROR, "Setting up SSL_CTX failed.\n");
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Load certificate chain file from CA */
	if(vs_ctx->ca_cert_file != NULL) {
		if(SSL_CTX_use_certificate_chain_file(vs_ctx->tls_ctx, vs_ctx->ca_cert_file) != 1) {
			v_print_log(VRS_PRINT_ERROR, "TLS: Loading certificate chain file: %s failed.\n",
					vs_ctx->ca_cert_file);
			ERR_print_errors_fp(v_log_file());
			return -1;
		}
	}

	/* Load certificate with public key for TLS */
	if(SSL_CTX_use_certificate_file(vs_ctx->tls_ctx, vs_ctx->public_cert_file, SSL_FILETYPE_PEM) != 1) {
		v_print_log(VRS_PRINT_ERROR, "TLS: Loading certificate file: %s failed.\n",
				vs_ctx->public_cert_file);
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Load private key for TLS */
	if(SSL_CTX_use_PrivateKey_file(vs_ctx->tls_ctx, vs_ctx->private_cert_file, SSL_FILETYPE_PEM) != 1) {
		v_print_log(VRS_PRINT_ERROR, "TLS: Loading private key file: %s failed.\n",
				vs_ctx->private_cert_file);
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Check the consistency of a private key with the corresponding
	 * certificate loaded into ssl_ctx */
	if(SSL_CTX_check_private_key(vs_ctx->tls_ctx) != 1) {
		v_print_log(VRS_PRINT_ERROR, "TLS: Private key does not match the certificate public key\n");
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* When CA certificate file was set, then try to load it */
	if(vs_ctx->ca_cert_file != NULL) {
		if(SSL_CTX_load_verify_locations(vs_ctx->tls_ctx, vs_ctx->ca_cert_file, NULL) != 1) {
			v_print_log(VRS_PRINT_ERROR, "TLS: Loading CA certificate file: %s failed.\n",
					vs_ctx->ca_cert_file);
			ERR_print_errors_fp(v_log_file());
			return -1;
		}
	}

#if OPENSSL_VERSION_NUMBER>=0x10000000

	/* Set up SSL context for DTLS  */
	if( (vs_ctx->dtls_ctx = SSL_CTX_new(DTLSv1_server_method())) == NULL ) {
		v_print_log(VRS_PRINT_ERROR, "Setting up SSL_CTX failed.\n");
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Load certificate chain file from CA */
	if(vs_ctx->ca_cert_file != NULL) {
		if(SSL_CTX_use_certificate_chain_file(vs_ctx->dtls_ctx, vs_ctx->ca_cert_file) != 1) {
			v_print_log(VRS_PRINT_ERROR, "DTLS: Loading certificate chain file: %s failed.\n",
					vs_ctx->ca_cert_file);
			ERR_print_errors_fp(v_log_file());
			return -1;
		}
	}

	/* Load certificate with public key for DTLS */
	if (SSL_CTX_use_certificate_file(vs_ctx->dtls_ctx, vs_ctx->public_cert_file, SSL_FILETYPE_PEM) != 1) {
		v_print_log(VRS_PRINT_ERROR, "DTLS: Loading certificate file: %s failed.\n",
						vs_ctx->public_cert_file);
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Load private key for DTLS */
	if(SSL_CTX_use_PrivateKey_file(vs_ctx->dtls_ctx, vs_ctx->private_cert_file, SSL_FILETYPE_PEM) != 1) {
		v_print_log(VRS_PRINT_ERROR, "DTLS: Loading private key file: %s failed.\n",
						vs_ctx->private_cert_file);
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* Check the consistency of a private key with the corresponding
	 * certificate loaded into ssl_ctx */
	if(SSL_CTX_check_private_key(vs_ctx->dtls_ctx) != 1) {
		v_print_log(VRS_PRINT_ERROR, "DTLS: Private key does not match the certificate public key\n");
		ERR_print_errors_fp(v_log_file());
		return -1;
	}

	/* When CA certificate file was set, then try to load it */
	if(vs_ctx->ca_cert_file != NULL) {
		if(SSL_CTX_load_verify_locations(vs_ctx->dtls_ctx, vs_ctx->ca_cert_file, NULL) != 1) {
			v_print_log(VRS_PRINT_ERROR, "DTLS: Loading CA certificate file: %s failed.\n",
					vs_ctx->ca_cert_file);
			ERR_print_errors_fp(v_log_file());
			return -1;
		}
	}

	/* Set up callback functions for DTLS cookie */
	SSL_CTX_set_cookie_generate_cb(vs_ctx->dtls_ctx, vs_dtls_generate_cookie);
	SSL_CTX_set_cookie_verify_cb(vs_ctx->dtls_ctx, vs_dtls_verify_cookie);
	/* Accept all cipher including NULL cipher (testing) */
	if( SSL_CTX_set_cipher_list(vs_ctx->dtls_ctx, "ALL:NULL:eNULL:aNULL") == 0) {
		v_print_log(VRS_PRINT_ERROR, "Setting ciphers for DTLS failed.\n");
		ERR_print_errors_fp(v_log_file());
		return 0;
	}
	/* DTLS require this */
	SSL_CTX_set_read_ahead(vs_ctx->dtls_ctx, 1);
#else
	vs_ctx->dtls_ctx = NULL;
#endif

	return 1;
}
示例#18
0
/**
 * \brief This function is called, when server receive ack command of packet
 * that contained node_create command sent to the client
 *
 * When this function is called, then we can be sure, that client knows about
 * the node. This node can be switched to the NODE_CREATED state for the
 * follower of this node
 */
int vs_handle_node_create_ack(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct Generic_Cmd *cmd)
{
	VSNode *node;
	struct VSEntityFollower *node_follower;
	struct Node_Create_Ack_Cmd *node_create_ack = (struct Node_Create_Ack_Cmd*)cmd;
	int all_created = 1;

	/* Try to find node */
	if((node = vs_node_find(vs_ctx, node_create_ack->node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s() node (id: %d) not found\n",
				__FUNCTION__, node_create_ack->node_id);
		return 0;
	}

	node_follower = node->node_folls.first;
	while(node_follower != NULL) {
		if(node_follower->node_sub->session->session_id == vsession->session_id) {

			if(node_follower->state == ENTITY_CREATING) {
				node_follower->state = ENTITY_CREATED;

				/* If the node is in state DELETING, then send to the client command
				 * node_delete. The client knows about this node now and can receive
				 * node_destroy command */
				if(node->state == ENTITY_DELETING) {
					/* Create Destroy_Node command */
					struct Generic_Cmd *node_destroy_cmd = v_node_destroy_create(node->id);

					/* Push this command to the outgoing queue */
					if ( node_destroy_cmd != NULL &&
							v_out_queue_push_tail(node_follower->node_sub->session->out_queue,
									node_follower->node_sub->prio,
									node_destroy_cmd) == 1)
					{
						node_follower->state = ENTITY_DELETING;
					} else {
						v_print_log(VRS_PRINT_DEBUG_MSG,
								"node_destroy (id: %d) wasn't added to the queue\n",
								node->id);
					}
				}
			} else {
				v_print_log(VRS_PRINT_DEBUG_MSG,
						"node %d isn't in CREATING state\n");
			}
		} else {
			if(node_follower->state != ENTITY_CREATED) {
				all_created = 0;
			}
		}
		node_follower = node_follower->next;
	}

	/* When all followers know about this node, then change state of this node */
	if(all_created == 1) {
		node->state = ENTITY_CREATED;
	}

	return 0;
}
示例#19
0
/**
 * \brief Main function for new thread. This thread is created for new
 * connection with client. This thread will try to authenticate new user
 * and negotiate new udp port. */
void *vs_tcp_conn_loop(void *arg)
{
	struct vContext *C = (struct vContext*)arg;
	struct VS_CTX *vs_ctx = CTX_server_ctx(C);
	struct VSession *vsession = CTX_current_session(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VStreamConn *stream_conn = CTX_current_stream_conn(C);
	struct VMessage *r_message=NULL, *s_message=NULL;
	struct timeval tv;
	fd_set set;
	int error, ret;
	void *udp_thread_result;
	unsigned int int_size;

	/* Try to get size of TCP buffer */
	int_size = sizeof(int_size);
	getsockopt(io_ctx->sockfd, SOL_SOCKET, SO_RCVBUF,
			(void *)&stream_conn->socket_buffer_size, &int_size);

#ifdef WITH_OPENSSL
	/* Try to do TLS handshake with client */
	if(vs_TLS_handshake(C)!=1) {
		goto end;
	}
#endif

	r_message = (struct VMessage*)calloc(1, sizeof(struct VMessage));
	s_message = (struct VMessage*)calloc(1, sizeof(struct VMessage));
	CTX_r_message_set(C, r_message);
	CTX_s_message_set(C, s_message);

	stream_conn->host_state = TCP_SERVER_STATE_RESPOND_METHODS;

	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server TCP state: RESPOND_methods\n");
		printf("%c[%dm", 27, 0);
	}

	/* "Never ending" loop */
	while(1)
	{
		FD_ZERO(&set);
		FD_SET(io_ctx->sockfd, &set);

		tv.tv_sec = VRS_TIMEOUT;	/* User have to send something in 30 seconds */
		tv.tv_usec = 0;

		if( (ret = select(io_ctx->sockfd+1, &set, NULL, NULL, &tv)) == -1) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "%s:%s():%d select(): %s\n",
					__FILE__, __FUNCTION__,  __LINE__, strerror(errno));
			goto end;
			/* Was event on the listen socket */
		} else if(ret>0 && FD_ISSET(io_ctx->sockfd, &set)) {

			/* Try to receive data through TCP connection */
			if( v_tcp_read(io_ctx, &error) <= 0 ) {
				goto end;
			}

			/* Handle verse handshake at TCP connection */
			if( (ret = vs_handle_handshake(C)) == -1) {
				goto end;
			}

			/* When there is something to send, then send it to peer */
			if( ret == 1 ) {
				/* Send response message to the client */
				if( (ret = v_tcp_write(io_ctx, &error)) <= 0) {
					goto end;
				}
			}

		} else {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "No response in %d seconds\n", VRS_TIMEOUT);
			goto end;
		}
	}

end:
	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server TCP state: CLOSING\n");
		printf("%c[%dm", 27, 0);
	}

	/* Set up TCP CLOSING state (non-blocking) */
	vs_CLOSING(C);

	/* Receive and Send messages are not neccessary any more */
	if(r_message != NULL) {
		free(r_message);
		r_message = NULL;
		CTX_r_message_set(C, NULL);
	}
	if(s_message != NULL) {
		free(s_message);
		s_message = NULL;
		CTX_s_message_set(C, NULL);
	}

	/* TCP connection is considered as CLOSED, but it is not possible to use
	 * this connection for other client */
	stream_conn->host_state = TCP_SERVER_STATE_CLOSED;

	/* NULL pointer at stream connection */
	CTX_current_stream_conn_set(C, NULL);

	/* Set TCP connection to CLOSED */
	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server TCP state: CLOSED\n");
		printf("%c[%dm", 27, 0);
	}

	/* Was udp thread created? */
	if(vsession->udp_thread != 0 ) {
		/* Wait for UDP thread (this is blocking operation) */
		v_print_log(VRS_PRINT_DEBUG_MSG, "Waiting for join with UDP thread ...\n");
		if(pthread_join(vsession->udp_thread, &udp_thread_result) != 0) {
			v_print_log(VRS_PRINT_DEBUG_MSG, "UDP thread was not joined\n");
		}
	}

	pthread_mutex_lock(&vs_ctx->data.mutex);
	/* Unsubscribe this session (this avatar) from all nodes */
	vs_node_free_avatar_reference(vs_ctx, vsession);
	/* Try to destroy avatar node */
	vs_node_destroy_avatar_node(vs_ctx, vsession);
	pthread_mutex_unlock(&vs_ctx->data.mutex);

	/* This session could be used again for authentication */
	stream_conn->host_state = TCP_SERVER_STATE_LISTEN;

	/* Clear session flags */
	vsession->flags = 0;

	if(vsession->peer_cookie.str != NULL) {
		free(vsession->peer_cookie.str);
		vsession->peer_cookie.str = NULL;
	}
	if(vsession->ded.str != NULL) {
		free(vsession->ded.str);
		vsession->ded.str = NULL;
	}
	if(vsession->client_name != NULL) {
		free(vsession->client_name);
		vsession->client_name = NULL;
	}
	if(vsession->client_version != NULL) {
		free(vsession->client_version);
		vsession->client_version = NULL;
	}

	if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
		printf("%c[%d;%dm", 27, 1, 31);
		v_print_log(VRS_PRINT_DEBUG_MSG, "Server TCP state: LISTEN\n");
		printf("%c[%dm", 27, 0);
	}

	free(C);
	C = NULL;

	pthread_exit(NULL);
	return NULL;
}
示例#20
0
文件: verse.c 项目: laishi/verse
/**
 * \brief This functions calls registered callback function for appropriate
 * commands
 */
static void vc_call_callback_func(const uint8 session_id,
		struct Generic_Cmd *cmd)
{
	switch(cmd->id) {
	case FAKE_CMD_CONNECT_ACCEPT:
		if(vc_ctx->vfs.receive_connect_accept != NULL) {
			vc_ctx->vfs.receive_connect_accept(session_id,
					((struct Connect_Accept_Cmd*)cmd)->user_id,
					((struct Connect_Accept_Cmd*)cmd)->avatar_id);
		}
		break;
	case FAKE_CMD_CONNECT_TERMINATE:
		if(vc_ctx->vfs.receive_connect_terminate != NULL) {
			vc_ctx->vfs.receive_connect_terminate(session_id,
					((struct Connect_Terminate_Cmd*)cmd)->error_num);
		}
		break;
	case FAKE_CMD_USER_AUTHENTICATE:
		if(vc_ctx->vfs.receive_user_authenticate != NULL) {
			vc_ctx->vfs.receive_user_authenticate(session_id,
					((struct User_Authenticate_Cmd*)cmd)->username,
					((struct User_Authenticate_Cmd*)cmd)->auth_meth_count,
					((struct User_Authenticate_Cmd*)cmd)->methods);
		}
		break;
	case CMD_NODE_CREATE:
		if(vc_ctx->vfs.receive_node_create != NULL) {
			vc_ctx->vfs.receive_node_create(session_id,
					UINT32(cmd->data[UINT16_SIZE+UINT32_SIZE]),
					UINT32(cmd->data[UINT16_SIZE]),
					UINT16(cmd->data[0]),
					UINT16(cmd->data[UINT16_SIZE+UINT32_SIZE+UINT32_SIZE]));
		}
		break;
	case CMD_NODE_DESTROY:
		if(vc_ctx->vfs.receive_node_destroy != NULL) {
			vc_ctx->vfs.receive_node_destroy(session_id,
					UINT32(cmd->data[0]));
		}
		break;
	case CMD_NODE_SUBSCRIBE:
		if(vc_ctx->vfs.receive_node_subscribe != NULL) {
			vc_ctx->vfs.receive_node_subscribe(session_id,
					UINT32(cmd->data[0]),
					UINT32(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT32_SIZE]));
		}
		break;
	case CMD_NODE_UNSUBSCRIBE:
		if(vc_ctx->vfs.receive_node_unsubscribe != NULL) {
			vc_ctx->vfs.receive_node_unsubscribe(session_id,
					UINT32(cmd->data[0]),
					UINT32(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT32_SIZE]));
		}
		break;
	case CMD_NODE_PERMISSION:
		if(vc_ctx->vfs.receive_node_perm != NULL) {
			vc_ctx->vfs.receive_node_perm(session_id,
					UINT32(cmd->data[UINT16_SIZE+UINT8_SIZE]),
					UINT16(cmd->data[0]),
					UINT8(cmd->data[UINT16_SIZE]));
		}
		break;
	case CMD_NODE_OWNER:
		if(vc_ctx->vfs.receive_node_owner != NULL) {
			vc_ctx->vfs.receive_node_owner(session_id,
					UINT32(cmd->data[UINT16_SIZE]),
					UINT16(cmd->data[0]));
		}
		break;
	case CMD_NODE_LOCK:
		if(vc_ctx->vfs.receive_node_lock != NULL) {
			vc_ctx->vfs.receive_node_lock(session_id,
					UINT32(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[0]));
		}
		break;
	case CMD_NODE_UNLOCK:
		if(vc_ctx->vfs.receive_node_unlock != NULL) {
			vc_ctx->vfs.receive_node_unlock(session_id,
					UINT32(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[0]));
		}
		break;
	case CMD_NODE_LINK:
		if(vc_ctx->vfs.receive_node_link != NULL) {
			vc_ctx->vfs.receive_node_link(session_id,
					UINT32(cmd->data[0]),
					UINT32(cmd->data[UINT32_SIZE]));
		}
		break;
	case CMD_TAGGROUP_CREATE:
		if(vc_ctx->vfs.receive_taggroup_create != NULL) {
			vc_ctx->vfs.receive_taggroup_create(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE+UINT16_SIZE]));
		}
		break;
	case CMD_TAGGROUP_DESTROY:
		if(vc_ctx->vfs.receive_taggroup_destroy) {
			vc_ctx->vfs.receive_taggroup_destroy(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]));
		}
		break;
	case CMD_TAGGROUP_SUBSCRIBE:
		if(vc_ctx->vfs.receive_taggroup_subscribe) {
			vc_ctx->vfs.receive_taggroup_subscribe(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE+UINT32_SIZE]));
		}
		break;
	case CMD_TAGGROUP_UNSUBSCRIBE:
		if(vc_ctx->vfs.receive_taggroup_unsubscribe) {
			vc_ctx->vfs.receive_taggroup_unsubscribe(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE+UINT32_SIZE]));
		}
		break;
	case CMD_TAG_CREATE:
		if(vc_ctx->vfs.receive_tag_create != NULL) {
			vc_ctx->vfs.receive_tag_create(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					UINT8(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]),
					UINT8(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE + UINT8_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE + UINT8_SIZE + UINT8_SIZE]));
		}
		break;
	case CMD_TAG_DESTROY:
		if(vc_ctx->vfs.receive_tag_destroy != NULL) {
			vc_ctx->vfs.receive_tag_destroy(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]));
		}
		break;
	case CMD_TAG_SET_UINT8:
	case CMD_TAG_SET_VEC2_UINT8:
	case CMD_TAG_SET_VEC3_UINT8:
	case CMD_TAG_SET_VEC4_UINT8:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT8,
					cmd->id - CMD_TAG_SET_UINT8 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_UINT16:
	case CMD_TAG_SET_VEC2_UINT16:
	case CMD_TAG_SET_VEC3_UINT16:
	case CMD_TAG_SET_VEC4_UINT16:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT16,
					cmd->id - CMD_TAG_SET_UINT16 + 1 ,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_UINT32:
	case CMD_TAG_SET_VEC2_UINT32:
	case CMD_TAG_SET_VEC3_UINT32:
	case CMD_TAG_SET_VEC4_UINT32:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT32,
					cmd->id - CMD_TAG_SET_UINT32 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_UINT64:
	case CMD_TAG_SET_VEC2_UINT64:
	case CMD_TAG_SET_VEC3_UINT64:
	case CMD_TAG_SET_VEC4_UINT64:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT64,
					cmd->id - CMD_TAG_SET_UINT64 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_REAL16:
	case CMD_TAG_SET_VEC2_REAL16:
	case CMD_TAG_SET_VEC3_REAL16:
	case CMD_TAG_SET_VEC4_REAL16:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL16,
					cmd->id - CMD_TAG_SET_REAL16 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_REAL32:
	case CMD_TAG_SET_VEC2_REAL32:
	case CMD_TAG_SET_VEC3_REAL32:
	case CMD_TAG_SET_VEC4_REAL32:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL32,
					cmd->id - CMD_TAG_SET_REAL32 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_REAL64:
	case CMD_TAG_SET_VEC2_REAL64:
	case CMD_TAG_SET_VEC3_REAL64:
	case CMD_TAG_SET_VEC4_REAL64:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL64,
					cmd->id - CMD_TAG_SET_REAL64 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]);
		}
		break;
	case CMD_TAG_SET_STRING8:
		if(vc_ctx->vfs.receive_tag_set_value != NULL) {
			vc_ctx->vfs.receive_tag_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_STRING8,
					1,
					PTR(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]));
		}
		break;
	case CMD_LAYER_CREATE:
		if(vc_ctx->vfs.receive_layer_create != NULL) {
			vc_ctx->vfs.receive_layer_create(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					UINT8(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE]),
					UINT8(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE + UINT8_SIZE]),
					UINT16(cmd->data[UINT32_SIZE + UINT16_SIZE + UINT16_SIZE + UINT8_SIZE  + UINT8_SIZE]));
		}
		break;
	case CMD_LAYER_DESTROY:
		if(vc_ctx->vfs.receive_layer_destroy != NULL) {
			vc_ctx->vfs.receive_layer_destroy(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]));
		}
		break;
	case CMD_LAYER_SUBSCRIBE:
		if(vc_ctx->vfs.receive_layer_subscribe != NULL) {
			vc_ctx->vfs.receive_layer_subscribe(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE+UINT32_SIZE]));
		}
		break;
	case CMD_LAYER_UNSUBSCRIBE:
		if(vc_ctx->vfs.receive_layer_unsubscribe != NULL) {
			vc_ctx->vfs.receive_layer_unsubscribe(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE]),
					UINT32(cmd->data[UINT32_SIZE+UINT16_SIZE+UINT32_SIZE]));
		}
		break;
	case CMD_LAYER_UNSET_VALUE:
		if(vc_ctx->vfs.receive_layer_unset_value != NULL) {
			vc_ctx->vfs.receive_layer_unset_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]));
		}
		break;
	case CMD_LAYER_SET_UINT8:
	case CMD_LAYER_SET_VEC2_UINT8:
	case CMD_LAYER_SET_VEC3_UINT8:
	case CMD_LAYER_SET_VEC4_UINT8:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT8,
					cmd->id - CMD_LAYER_SET_UINT8 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_UINT16:
	case CMD_LAYER_SET_VEC2_UINT16:
	case CMD_LAYER_SET_VEC3_UINT16:
	case CMD_LAYER_SET_VEC4_UINT16:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT16,
					cmd->id - CMD_LAYER_SET_UINT16 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_UINT32:
	case CMD_LAYER_SET_VEC2_UINT32:
	case CMD_LAYER_SET_VEC3_UINT32:
	case CMD_LAYER_SET_VEC4_UINT32:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT32,
					cmd->id - CMD_LAYER_SET_UINT32 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_UINT64:
	case CMD_LAYER_SET_VEC2_UINT64:
	case CMD_LAYER_SET_VEC3_UINT64:
	case CMD_LAYER_SET_VEC4_UINT64:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_UINT64,
					cmd->id - CMD_LAYER_SET_UINT64 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_REAL16:
	case CMD_LAYER_SET_VEC2_REAL16:
	case CMD_LAYER_SET_VEC3_REAL16:
	case CMD_LAYER_SET_VEC4_REAL16:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL16,
					cmd->id - CMD_LAYER_SET_REAL16 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_REAL32:
	case CMD_LAYER_SET_VEC2_REAL32:
	case CMD_LAYER_SET_VEC3_REAL32:
	case CMD_LAYER_SET_VEC4_REAL32:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL32,
					cmd->id - CMD_LAYER_SET_REAL32 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	case CMD_LAYER_SET_REAL64:
	case CMD_LAYER_SET_VEC2_REAL64:
	case CMD_LAYER_SET_VEC3_REAL64:
	case CMD_LAYER_SET_VEC4_REAL64:
		if(vc_ctx->vfs.receive_layer_set_value != NULL) {
			vc_ctx->vfs.receive_layer_set_value(session_id,
					UINT32(cmd->data[0]),
					UINT16(cmd->data[UINT32_SIZE]),
					UINT32(cmd->data[UINT32_SIZE + UINT16_SIZE]),
					VRS_VALUE_TYPE_REAL64,
					cmd->id - CMD_LAYER_SET_REAL64 + 1,
					&cmd->data[UINT32_SIZE + UINT16_SIZE + UINT32_SIZE]);
		}
		break;
	default:
		v_print_log(VRS_PRINT_ERROR, "This command: %d is not supported yet\n", cmd->id);
		break;
	}
}
示例#21
0
文件: vs_auth_pam.c 项目: verse/verse
/* Verse server conversation function with PAM */
int vs_pam_conv(int num_msg,
					const struct pam_message **msg,
					struct pam_response **resp,
					void *appdata_ptr)
{
	struct pam_response *r;
	int i;

	*resp = NULL;

	if( appdata_ptr == NULL )
		return PAM_CONV_ERR;

	v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: %s called with %d messages\n", __func__, num_msg);

	if( (num_msg <= 0) || (num_msg > PAM_MAX_NUM_MSG))
		return PAM_CONV_ERR;

	if( (r=(struct pam_response*)calloc(num_msg, sizeof(struct pam_response))) == NULL)
		return PAM_BUF_ERR;

	for(i=0; i<num_msg; i++) {
		r[i].resp = NULL;
		r[i].resp_retcode = 0;

		switch(msg[i]->msg_style) {
			case PAM_PROMPT_ECHO_ON:
				v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: %s:%d PAM_PROMPT_ECHO_ON %s\n",
						__func__, __LINE__, msg[i]->msg);
				break;
			case PAM_PROMPT_ECHO_OFF:
				v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: %s:%d PAM_PROMPT_ECHO_OFF %s\n",
						__func__, __LINE__, msg[i]->msg);
				if(strncmp("Password:"******"PAM: %s:%d PAM_ERROR_MSG %s\n",
						__func__, __LINE__, msg[i]->msg);
				break;
			case PAM_TEXT_INFO:
				v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: %s:%d PAM_TEXT_INFO %s\n",
						__func__, __LINE__, msg[i]->msg);
				break;
			default:
				goto fail;
		}
	}
	*resp = r;

	return PAM_SUCCESS;
fail:
	v_print_log(VRS_PRINT_DEBUG_MSG, "PAM: fail\n");

	for(i=0; i < num_msg; i++) {
		if (r[i].resp != NULL) {
			memset(r[i].resp, 0, strlen(r[i].resp));
			free(r[i].resp);
			r[i].resp = NULL;
		}
	}

	memset(r, 0, num_msg*sizeof(*r));
	free(r);
	r = NULL;

	*resp = NULL;

	return PAM_CONV_ERR;
}
示例#22
0
文件: verse.c 项目: laishi/verse
int32_t vrs_send_connect_request(const char *hostname,
		const char *service,
		const uint16_t flags,
		uint8_t *session_id)
{
	struct VSession *vsession = NULL;
	uint16 _flags = flags;
	int already_connected = 0, i, ret;

	/* Check if CTX was initialized (initialization is done) */
	if(vc_ctx == NULL) {
		v_print_log(VRS_PRINT_ERROR,
				"Basic callback functions were not set.\n");
		return VRS_NO_CB_FUNC;
	} else {
		/* Check if all needed callback functions was set up */
		if(vc_ctx->vfs.receive_connect_accept == NULL) {
			v_print_log(VRS_PRINT_ERROR,
					"receive_connect_accept() callback functions was not set.\n");
			return VRS_NO_CB_CONN_FUNC;
		}
		if(vc_ctx->vfs.receive_connect_terminate == NULL) {
			v_print_log(VRS_PRINT_ERROR,
					"receive_connect_terminate() callback functions was not set.\n");
			return VRS_NO_CB_TERM_FUNC;
		}
		if(vc_ctx->vfs.receive_user_authenticate == NULL) {
			v_print_log(VRS_PRINT_ERROR,
					"receive_user_authenticat() callback functions was not set.\n");
			return VRS_NO_CB_USER_AUTH;
		}
	}

	/* Set security protocol */
#if OPENSSL_VERSION_NUMBER >= 0x10000000
	/* Check consistency of flags */
	if((_flags & VRS_SEC_DATA_NONE) && (_flags & VRS_SEC_DATA_TLS)) {
		if(is_log_level(VRS_PRINT_ERROR))
			v_print_log(VRS_PRINT_ERROR,
					"VRS_SEC_DATA_NONE or VRS_SEC_DATA_TLS could be set, not both.\n");
		return VRS_FAILURE;
	}
#else
	if (_flags & VRS_SEC_DATA_TLS) {
		v_print_log(VRS_PRINT_WARNING,
				"flag VRS_SEC_DATA_TLS could be set due to low version of OpenSSL (at least 1.0 is required).\n");
		_flags &= ~VRS_SEC_DATA_TLS;
		_flags |= VRS_SEC_DATA_NONE;
	}
#endif

	/* Set transport protocol */
	if((_flags & VRS_TP_UDP) && (_flags & VRS_TP_TCP)) {
		if(is_log_level(VRS_PRINT_ERROR))
			v_print_log(VRS_PRINT_ERROR,
					"VRS_TP_UDP or VRS_TP_TCP could be set, not both.\n");
		return VRS_FAILURE;
	} else if(!(_flags & VRS_TP_UDP) && !(_flags & VRS_TP_TCP)) {
		/* When no transport protocol is selected, then use UDP as default */
		_flags |= VRS_TP_UDP;
	}

	pthread_mutex_lock(&vc_ctx->mutex);

	/* Check if this client isn't already connected to this server or isn't
	 * trying to connect to the server with hostname:service */
	for(i=0; i<vc_ctx->max_sessions; i++) {
		if(vc_ctx->vsessions[i] != NULL) {
			if(strcmp(vc_ctx->vsessions[i]->peer_hostname, hostname) == 0 &&
					strcmp(vc_ctx->vsessions[i]->service, service) == 0) {
				v_print_log(VRS_PRINT_ERROR, "Client already connected to this server.\n");
				already_connected = 1;
				break;
			}
		}
	}

	if(already_connected == 0) {
		/* Try to find free verse session slot */
		for(i=0; i<vc_ctx->max_sessions; i++) {
			/* When free VSession slot is found, then create new session */
			if(vc_ctx->vsessions[i]==NULL) {
				vsession = (struct VSession*)malloc(sizeof(struct VSession));
				v_init_session(vsession);
				vsession->peer_hostname = strdup(hostname);
				vsession->service = strdup(service);
				/* Copy flags */
				vsession->flags = _flags;
				vsession->in_queue = (struct VInQueue*)calloc(1, sizeof(VInQueue));
				v_in_queue_init(vsession->in_queue, IN_QUEUE_DEFAULT_MAX_SIZE);
				vsession->out_queue = (struct VOutQueue*)calloc(1, sizeof(VOutQueue));
				v_out_queue_init(vsession->out_queue, OUT_QUEUE_DEFAULT_MAX_SIZE);

				vc_ctx->vsessions[i] = vsession;
				break;
			}
		}
	}

	pthread_mutex_unlock(&vc_ctx->mutex);

	if(already_connected == 1) {
		return VRS_FAILURE;
	}

	/* Check if we found free slot for new session */
	if(vsession == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"Maximal count of sessions: %d reached.\n",
				vc_ctx->max_sessions);
		return VRS_FAILURE;
	}

	vsession->session_id = vc_ctx->session_counter++;
	*session_id = vsession->session_id;

	/* Try to initialize thread attributes */
	if( (ret = pthread_attr_init(&vsession->tcp_thread_attr)) != 0 ) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "pthread_attr_init(): %s\n", strerror(errno));
		return VRS_FAILURE;
	}

	/* Try to set thread attributes as detached */
	if( (ret = pthread_attr_setdetachstate(&vsession->tcp_thread_attr, PTHREAD_CREATE_DETACHED)) != 0) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "pthread_attr_setdetachstate(): %s\n", strerror(errno));
		return VRS_FAILURE;
	}

	/* Create thread for new client session */
	if(pthread_create(&vsession->tcp_thread, &vsession->tcp_thread_attr, vc_new_session_thread, (void*)vsession) != 0) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "Thread creation failed.\n");
		pthread_mutex_lock(&vc_ctx->mutex);
		v_destroy_session(vsession);
		free(vsession);
		vc_ctx->vsessions[i] = NULL;
		pthread_mutex_unlock(&vc_ctx->mutex);
		return VRS_FAILURE;
	}

	/* Destroy thread attributes */
	pthread_attr_destroy(&vsession->tcp_thread_attr);

	return VRS_SUCCESS;
}
示例#23
0
/**
 * \brief WebSocket callback function for received message
 */
void vs_ws_recv_msg_callback(wslay_event_context_ptr wslay_ctx,
		const struct wslay_event_on_msg_recv_arg *arg,
		void *user_data)
{
	struct vContext *C = (struct vContext*)user_data;
	struct VS_CTX *vs_ctx = CTX_server_ctx(C);
	struct VSession *session = CTX_current_session(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	int ret;

	if(!wslay_is_ctrl_frame(arg->opcode)) {

		/* Verse server uses only binary message for communication */
		if(arg->opcode == WSLAY_BINARY_FRAME) {
		    struct wslay_event_msg msgarg;

#if DEBUG_WEB_SOCKET
		    unsigned int i;

			v_print_log(VRS_PRINT_DEBUG_MSG,
					"WS Callback received binary message\n");

			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Binary dump\n");

			/* Print dump of received data */
			for(i=0; i<arg->msg_length; i++) {
				v_print_log_simple(VRS_PRINT_DEBUG_MSG, "%d,", arg->msg[i]);
			}
			v_print_log_simple(VRS_PRINT_DEBUG_MSG, "\n");
#endif

			/* Copy received data to IO context */
			memcpy(io_ctx->buf,	arg->msg, arg->msg_length);
			io_ctx->buf_size = arg->msg_length;

			if(session->stream_conn->host_state == TCP_SERVER_STATE_STREAM_OPEN) {
				if(v_STREAM_handle_messages(C) != 0) {
					/* When some payload data were received, then poke data thread */
					sem_post(vs_ctx->data.sem);
				} else {
					/* End connection */
					session->stream_conn->host_state = TCP_SERVER_STATE_CLOSING;

					/* Try to close connection with WebSocket client */
					wslay_event_queue_close(wslay_ctx,
							WSLAY_CODE_PROTOCOL_ERROR,
							(uint8_t*)"Wrong command",	/* Close message */
							13);	/* The length of close message */
					return;
				}
			} else {
				if( vs_handle_handshake(C) == -1 ) {
					/* End connection */
					session->stream_conn->host_state = TCP_SERVER_STATE_CLOSING;

					/* Try to close connection with WebSocket client */
					wslay_event_queue_close(wslay_ctx,
							WSLAY_CODE_PROTOCOL_ERROR,
							(uint8_t*)"Wrong command",	/* Close message */
							13);	/* The length of close message */
					return;
				}

				/* During handshake send response immediately. */

				/* TODO: optionally send message fragmented, when it is needed using:
				 * wslay_event_queue_fragmented_msg() */
				msgarg.opcode = WSLAY_BINARY_FRAME;
				msgarg.msg = (uint8_t*)io_ctx->buf;
				msgarg.msg_length = io_ctx->buf_size;

				/* Queue message for sending */
				if((ret = wslay_event_queue_msg(wslay_ctx, &msgarg)) != 0) {
					v_print_log(VRS_PRINT_ERROR,
							"Unable to queue message to WebSocket: %d\n", ret);
					return;
				} else {
					v_print_log(VRS_PRINT_DEBUG_MSG,
							"WebSocket message successfully queued\n");
				}
			}

		} else if(arg->opcode == WSLAY_TEXT_FRAME) {
			v_print_log(VRS_PRINT_ERROR, "WebSocket text frame is not supported\n");
			return;
		}

	} else {
		/* Print opcode of control message */
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"WS Callback Received Ctrl Message: opcode: %d\n",
				arg->opcode);

		/* Is it closing message? */
		if(arg->opcode & WSLAY_CONNECTION_CLOSE) {

			v_print_log(VRS_PRINT_DEBUG_MSG,
					"Close message with code: %d, message: %s\n",
					arg->status_code,
					arg->msg);

			/* When this control message was received at second time, then
			 * switch to the state CLOSED. Otherwise switch to the state
			 * CLOSING */
			if(session->stream_conn->host_state == TCP_SERVER_STATE_CLOSING) {
				session->stream_conn->host_state = TCP_SERVER_STATE_CLOSED;
			} else {
				session->stream_conn->host_state = TCP_SERVER_STATE_CLOSING;
				/* When server wasn't in the state closing, then send
				 * confirmation to the client, that this connection will be
				 * closed */
				wslay_event_queue_close(wslay_ctx,
						WSLAY_CODE_NORMAL_CLOSURE,
						(uint8_t*)"Closing connection",
						15);
			}
		}
	}
}
示例#24
0
/**
 * \brief This function try to pack message that is going to be
 * sent in STREAM OPEN state
 *
 * \param[in] *C The pointer at cpntext
 *
 * \return This function return 1, when there is something to send,
 * it returns -1, when there is nothing to send and it returns 0, when
 * there is some error
 */
int v_STREAM_pack_message(struct vContext *C)
{
	struct VSession *vsession = CTX_current_session(C);
	struct VStreamConn *conn = CTX_current_stream_conn(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VMessage *s_message = CTX_s_message(C);
	struct Generic_Cmd *cmd;
	int ret = -1, queue_size = 0, buffer_pos = 0, prio_cmd_count, cmd_rank=0;
	int8 cmd_share;
	int16 prio, max_prio, min_prio;
	uint16 cmd_count, cmd_len, prio_win, swin, sent_size, tot_cmd_size;
	real32 prio_sum_high, prio_sum_low, r_prio;

	/* Is here something to send? */
	if((v_out_queue_get_count(vsession->out_queue) > 0) ||
			(vsession->tmp_flags & SYS_CMD_NEGOTIATE_FPS))
	{

		/* Get current size of data in TCP outgoing buffer */
#ifdef __linux__
		if( ioctl(io_ctx->sockfd, SIOCOUTQ, &queue_size) == -1 ) {
			perror("ioctl()");
			return 0;
		}
#endif

		/* Compute, how many data could be added to the TCP stack? */
		swin = conn->socket_buffer_size - queue_size;

		buffer_pos = VERSE_MESSAGE_HEADER_SIZE;

		s_message->sys_cmd[0].cmd.id = CMD_RESERVED_ID;

		/* When negotiated and used FPS is different, then pack negotiate command
		 * for FPS */
		if(vsession->fps_host != vsession->fps_peer) {
			cmd_rank += v_add_negotiate_cmd(s_message->sys_cmd, cmd_rank,
					CMD_CHANGE_L_ID, FTR_FPS, &vsession->fps_host, NULL);
		} else {
			if(vsession->tmp_flags & SYS_CMD_NEGOTIATE_FPS) {
				cmd_rank += v_add_negotiate_cmd(s_message->sys_cmd, cmd_rank,
						CMD_CONFIRM_L_ID, FTR_FPS, &vsession->fps_peer, NULL);
				/* Send confirmation only once for received system command */
				vsession->tmp_flags &= ~SYS_CMD_NEGOTIATE_FPS;
			}
		}

		buffer_pos += v_pack_stream_system_commands(s_message, &io_ctx->buf[buffer_pos]);

		max_prio = v_out_queue_get_max_prio(vsession->out_queue);
		min_prio = v_out_queue_get_min_prio(vsession->out_queue);

		prio_sum_high = v_out_queue_get_prio_sum_high(vsession->out_queue);
		prio_sum_low = v_out_queue_get_prio_sum_low(vsession->out_queue);

		v_print_log(VRS_PRINT_DEBUG_MSG, "Packing prio queues, cmd count: %d\n",
				v_out_queue_get_count(vsession->out_queue));

		/* Go through all priorities and pick commands from priority queues */
		for(prio = max_prio; prio >= min_prio; prio--)
		{
			prio_cmd_count = v_out_queue_get_count_prio(vsession->out_queue, prio);

			if(prio_cmd_count > 0) {

				r_prio = v_out_queue_get_prio(vsession->out_queue, prio);

				/* Compute size of buffer that could be occupied by
				 * commands from this queue */
				if(prio >= VRS_DEFAULT_PRIORITY) {
					prio_win = ((swin - buffer_pos)*r_prio)/prio_sum_high;
				} else {
					prio_win = ((swin - buffer_pos)*r_prio)/prio_sum_low;
				}

				/* Debug print */
				v_print_log(VRS_PRINT_DEBUG_MSG, "Queue: %d, count: %d, r_prio: %6.3f, prio_win: %d\n",
						prio, prio_cmd_count, r_prio, prio_win);

				/* Get total size of commands that were stored in queue (sent_size) */
				sent_size = 0;
				tot_cmd_size = 0;

				while(prio_cmd_count > 0) {
					cmd_share = 0;
					cmd_count = 0;
					cmd_len = prio_win - sent_size;

					/* Pack commands from queues with high priority to the buffer */
					cmd = v_out_queue_pop(vsession->out_queue, prio, &cmd_count, &cmd_share, &cmd_len);
					if(cmd != NULL) {

						/* Is this command fake command? */
						if(cmd->id < MIN_CMD_ID) {
							if(cmd->id == FAKE_CMD_CONNECT_TERMINATE) {
								/* TODO */
							} else if(cmd->id == FAKE_CMD_FPS) {
								struct Fps_Cmd *fps_cmd = (struct Fps_Cmd*)cmd;
								/* Change value of FPS. It will be sent in negotiate command
								 * until it is confirmed be the peer (server) */
								vsession->fps_host = fps_cmd->fps;
							}
						} else {
							buffer_pos += tot_cmd_size = v_cmd_pack(&io_ctx->buf[buffer_pos], cmd, v_cmd_size(cmd), 0);
							v_cmd_print(VRS_PRINT_DEBUG_MSG, cmd);
							sent_size += tot_cmd_size;
						}

						/* It is not neccessary to put cmd to history of sent commands,
						 * when TCP is used. */
						v_cmd_destroy(&cmd);
						prio_cmd_count--;
					} else {
						break;
					}
				}
			}
		}

		s_message->header.len = io_ctx->buf_size = buffer_pos;
		s_message->header.version = VRS_VERSION;

		/* Pack header to the beginning of the buffer */
		v_pack_message_header(s_message, io_ctx->buf);

		/* Debug print of command to be send */
		if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
			v_print_send_message(C);
		}

		ret = 1;

	}

	return ret;
}
示例#25
0
/**
 * \brief This function packs and compress command to the packet from one
 * priority queue.
 *
 * \param[in]	*C	The verse context
 * \param[in]	*sent_packet	The pointer at structure with send packet
 * \param[in]	buffer_pos		The curent size of buffer of sent packet
 * \param[in]	prio			The priority of sub queue
 * \param[in]	prio_win		The window size of current prio queue
 * \param[out]	tot_cmd_size	The total size of commands that were poped from prio queue
 */
static int pack_prio_queue(struct vContext *C,
		struct VSent_Packet *sent_packet,
		int buffer_pos,
		uint8 prio,
		uint16 prio_win,
		uint16 *tot_cmd_size)
{
	struct VSession *vsession = CTX_current_session(C);
	struct VDgramConn *vconn = CTX_current_dgram_conn(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct Generic_Cmd *cmd;
	int ret, last_cmd_count = 0;
	uint16 cmd_count, cmd_len, cmd_size, sum_len=0;
	int8 cmd_share;
	uint8  last_cmd_id = CMD_RESERVED_ID;

	while( (v_out_queue_get_count_prio(vsession->out_queue, prio) > 0) &&
			(sum_len < prio_win) &&
			(buffer_pos < vconn->io_ctx.mtu))
	{
		cmd_count = 0;
		cmd_share = 0;

		/* Compute how many commands could be compressed to the packet
		 * and compute right length of compressed commands. */
		cmd_len = ((prio_win - sum_len)<(vconn->io_ctx.mtu - buffer_pos)) ?
				(prio_win - sum_len) :
				(vconn->io_ctx.mtu - buffer_pos);

		/* Remove command from queue */
		cmd = v_out_queue_pop(vsession->out_queue, prio, &cmd_count, &cmd_share, &cmd_len);

		/* When it is not possible to pop more commands from queue, then break
		 * while loop */
		if(cmd == NULL) {
			break;
		}

		/* Is this command fake command? */
		if(cmd->id < MIN_CMD_ID) {
			if(cmd->id == FAKE_CMD_CONNECT_TERMINATE) {
				struct VS_CTX *vs_ctx = CTX_server_ctx(C);
				if(vs_ctx != NULL) {
					vconn->host_state = UDP_SERVER_STATE_CLOSEREQ;
				} else {
					vconn->host_state = UDP_CLIENT_STATE_CLOSING;
				}
			} else if(cmd->id == FAKE_CMD_FPS) {
				struct Fps_Cmd *fps_cmd = (struct Fps_Cmd*)cmd;
				/* Change value of FPS. It will be sent in negotiate command
				 * until it is confirmed be the peer (server) */
				vsession->fps_host = fps_cmd->fps;
			}
			v_cmd_destroy(&cmd);
		} else {

			/* What was size of command in queue */
			cmd_size = v_cmd_size(cmd);

			if(!(buffer_pos < (vconn->io_ctx.mtu - cmd_size))) {
				/* When there is not enough space for other command,
				 * then push command back to the beginning of queue. */
				v_out_queue_push_head(vsession->out_queue, prio, cmd);
				break;
			} else {

				/* Update total size of commands that were poped from queue */
				*tot_cmd_size += cmd_size;

				/* When compression is not allowed, then add this command as is */
				if( vconn->host_cmd_cmpr == CMPR_NONE) {
					cmd_count = 0;
					cmd_len = cmd_size;
					/* Debug print */
					v_print_log(VRS_PRINT_DEBUG_MSG, "Cmd: %d, count: %d, length: %d\n",
							cmd->id, cmd_count, cmd_len);
					/* Add command to the buffer */
					buffer_pos += v_cmd_pack(&io_ctx->buf[buffer_pos], cmd, cmd_len, 0);
				} else {
					/* When command compression is allowed and was ID of command changed? */
					if( (cmd->id != last_cmd_id) || (last_cmd_count <= 0) )	{
						/* When this command is alone, then use default command size */
						if(cmd_count == 0) {
							cmd_len = cmd_size;
						} else {
							/* FIXME: do not recompute command length here, but do it right,
							 * when command is added to the queue */
							cmd_len = v_cmds_len(cmd, cmd_count, cmd_share, cmd_len);
						}
						/* Debug print */
						v_print_log(VRS_PRINT_DEBUG_MSG, "Cmd: %d, count: %d, length: %d\n",
								cmd->id, cmd_count, cmd_len);
						/* Add command to the buffer */
						buffer_pos += v_cmd_pack(&io_ctx->buf[buffer_pos], cmd, cmd_len, cmd_share);
						/* Set up current count of commands in the line */
						last_cmd_count = cmd_count;
						/* Update summary of commands length */
						sum_len += cmd_len;
					} else {
						buffer_pos += v_cmd_pack(&io_ctx->buf[buffer_pos], cmd, 0, cmd_share);
					}
				}

				/* Print command */
				v_cmd_print(VRS_PRINT_DEBUG_MSG, cmd);

				/* TODO: remove command alias here (layer value set/unset) */

				/* Add command to the packet history */
				ret = v_packet_history_add_cmd(&vconn->packet_history, sent_packet, cmd, prio);
				assert(ret==1);

				/* Update last command id */
				last_cmd_id = cmd->id;
				/* Decrement counter of commands in queue */
				last_cmd_count--;
			}
		}
	}

	return buffer_pos;
}
示例#26
0
文件: vs_link.c 项目: verse/verse
/**
 * \brief This function handle changing link between nodes
 */
int vs_handle_link_change(struct VS_CTX *vs_ctx,
		struct VSession *vsession,
		struct Generic_Cmd *node_link)
{
	struct VSUser			*user = (struct VSUser*)vsession->user;
	struct VSNode			*old_parent_node, *parent_node, *child_node;
	struct VSLink			*link;
	struct VSNodeSubscriber	*node_subscriber;
	struct VSEntityFollower *node_follower;
	uint32					parent_node_id = UINT32(node_link->data[0]);
	uint32					child_node_id = UINT32(node_link->data[UINT32_SIZE]);
	int						i;

	/* Try to find child node */
	if((child_node = vs_node_find(vs_ctx, child_node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s():%d node (id: %d) not found\n",
				__func__, __LINE__, child_node_id);
		return 0;
	}

	/* Child node has to be created */
	if(! (child_node->state == ENTITY_CREATED || child_node->state == ENTITY_CREATING)) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d node id: %d is not in NODE_CREATED state: %d\n",
				__func__, __LINE__, child_node->id, child_node->state);
		return 0;
	}

	/* Is user owner of child node or can user write to child node? */
	if(vs_node_can_write(vsession, child_node) != 1) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d user: %s can't write to child node: %d (owner: %s)\n",
				__func__, __LINE__, user->username, child_node->id,
				child_node->owner->username);
		return 0;
	}

	/* Old link */
	link = child_node->parent_link;

	/* Old parent node */
	old_parent_node = link->parent;

	/* Is user owner of old parent node or can user write to old parent node? */
	if(vs_node_can_write(vsession, old_parent_node) != 1) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d user: %s can't write to old parent node: %d (owner: %s)\n",
				__func__, __LINE__, user->username, old_parent_node->id,
				old_parent_node->owner->username);
		return 0;
	}

	/* Try to find new parent node */
	if((parent_node = vs_node_find(vs_ctx, parent_node_id)) == NULL) {
		v_print_log(VRS_PRINT_DEBUG_MSG, "%s():%d node (id: %d) not found\n",
				__func__, __LINE__, parent_node_id);
		return 0;
	}

	/* Parent node has to be created */
	if(! (parent_node->state == ENTITY_CREATED || parent_node->state == ENTITY_CREATING)) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d node (id: %d) is not in NODE_CREATED state: %d\n",
				__func__, __LINE__, parent_node->id, parent_node->state);
		return 0;
	}

	/* Test if client doesn't want to recreate existing link */
	if( parent_node == old_parent_node) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d link between nodes (parent_id: %d) (child_id: %d) already exists\n",
				__func__, __LINE__, parent_node->id, child_node->id);
		return 0;
	}

	/* Is user owner of parent node or can user write to parent node? */
	if(vs_node_can_write(vsession, parent_node) != 1) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d user: %s can't write to parent node: %d (owner: %s)\n",
				__func__, __LINE__, user->username, parent_node->id,
				parent_node->owner->username);
		return 0;
	}

	/* Test, if new link could be created */
	if(vs_link_test_nodes(parent_node, child_node) != 1) {
		v_print_log(VRS_PRINT_DEBUG_MSG,
				"%s():%d node: %d can't be child of node: %d\n",
				__func__, __LINE__, child_node->id, parent_node->id);
		return 0;
	}

	/* Remove link from old parent node */
	v_list_rem_item(&old_parent_node->children_links, link);

	/* Add link to new parent node */
	v_list_add_tail(&parent_node->children_links, link);
	link->parent = parent_node;

	/* Update child node internal properties according new parent node */
	vs_link_update_child(parent_node, child_node);

	/* Update version in child node, parent node and old parent node */
	vs_node_inc_version(parent_node);
	vs_node_inc_version(child_node);
	vs_node_inc_version(old_parent_node);

	/* Subscribers of old and new parent node will receive information about
	 * changing link between nodes. Prevent double sending command Node_Link,
	 * when clients are subscribed to both nodes. */

	/* Set temporary value in all sessions */
	for(i=0; i<vs_ctx->max_sessions; i++) {
		vs_ctx->vsessions[i]->tmp = 0;
	}

	/* Send Node_Link command to subscribers of old parent node and set
	 * session temporary value */
	node_subscriber = old_parent_node->node_subs.first;
	while(node_subscriber != NULL) {
		if(vs_node_can_read(node_subscriber->session, old_parent_node) == 1) {
			node_subscriber->session->tmp = 1;
			vs_link_change_send(node_subscriber, link);
		}
		node_subscriber = node_subscriber->next;
	}

	/* When client is subscribed to the new parent node and aware of child
	 * node, then send to the client only node_link */
	node_follower = child_node->node_folls.first;
	while(node_follower != NULL) {
		if(node_follower->node_sub->session->tmp != 1) {
			vs_link_change_send(node_follower->node_sub, link);
			node_follower->node_sub->session->tmp = 1;
		}
		node_follower = node_follower->next;
	}

	/* Send Node_Create command to subscribers of new parent node, when
	 * subscribers were not subscribed to child node */
	node_subscriber = parent_node->node_subs.first;
	while(node_subscriber != NULL) {
		if(node_subscriber->session->tmp != 1) {
			if(vs_node_can_read(node_subscriber->session, parent_node) == 1) {
				vs_node_send_create(node_subscriber, child_node, NULL);
			}
		}
		node_subscriber = node_subscriber->next;
	}

	return 1;
}
示例#27
0
/**
 * \brief This function is called, when acknowledgment packet was received.
 *
 * This function processes all ACK and NAK commands and it add not obsolete
 * commands from the history of sent packets to the out queue again. This
 * function also removes positively and negatively acknowledged packets from
 * history of sent packets. ANK ID is updated.
 *
 * \param[in] *C	The verse context.
 *
 * \return	This function returns index of last ACK command in sequence of
 * system command, when ACK and NAK commands are the beginning of system
 * commands, otherwise it returns -2. When no ACK or NAK command was found,
 * then -1 is returned;
 */
static int handle_ack_nak_commands(struct vContext *C)
{
	struct VSession *vsession = CTX_current_session(C);
	struct VDgramConn *vconn = CTX_current_dgram_conn(C);
	struct VPacket *r_packet = CTX_r_packet(C);
	struct VSent_Packet *sent_packet;
	struct VSent_Command *sent_cmd, *sent_cmd_prev;
	unsigned long int rtt = ULONG_MAX;
	struct timeval tv;
	uint32 ack_id, nak_id;
	int i, ret=-1;

	gettimeofday(&tv, NULL);

	/* Compute SRTT */
	if(r_packet->sys_cmd[0].cmd.id==CMD_ACK_ID) {
		unsigned long int tmp;
		int i=0;

		/* Try to find the smallest RTT from acknowledged packets */
		for(i=0; r_packet->sys_cmd[i].cmd.id!=CMD_RESERVED_ID; i++) {
			if(r_packet->sys_cmd[i].cmd.id==CMD_ACK_ID) {
				sent_packet = v_packet_history_find_packet(&vconn->packet_history,
					r_packet->sys_cmd[i].ack_cmd.pay_id);
				if(sent_packet!=NULL) {
					tmp = packet_rtt(sent_packet, &tv);
					if(tmp<rtt) rtt=tmp;
				}
			}
		}

		if(rtt<ULONG_MAX) {
			/* Computation of SRTT as described in RFC */
			if(vconn->srtt==0) {
				vconn->srtt = rtt;
			} else {
				vconn->srtt = RTT_ALPHA*vconn->srtt + (1-RTT_ALPHA)*rtt;
			}
			v_print_log(VRS_PRINT_DEBUG_MSG, "RTT: %d [us]\n", rtt);
			v_print_log(VRS_PRINT_DEBUG_MSG, "SRTT: %d [us]\n", vconn->srtt);
		}
	}


	/* Process all ACK and NAK commands. ACK and NAK commands should be first
	 * and there should not be other system commands between ACK and NAK
	 * commands. */
	for(i=0;
			r_packet->sys_cmd[i].cmd.id == CMD_NAK_ID ||
					r_packet->sys_cmd[i].cmd.id == CMD_ACK_ID;
			i++) {

		if(r_packet->sys_cmd[i].cmd.id == CMD_ACK_ID) {
			/* Check if ACK and NAK commands are the first system commands */
			if(ret!=-2 && ret==i-1) {
				ret = i;
			} else {
				ret = -2;
			}
			/* If this is not the last ACK command in the sequence of
			 * ACK/NAK commands, then remove all packets from history of
			 * sent packet, that are in following sub-sequence of ACK
			 * commands */
			if(r_packet->sys_cmd[i+1].cmd.id == CMD_NAK_ID ||
					r_packet->sys_cmd[i+1].cmd.id == CMD_ACK_ID)
			{
				/* Remove all acknowledged payload packets from the history
				 * of sent payload packets */
				for(ack_id = r_packet->sys_cmd[i].ack_cmd.pay_id;
						ack_id < r_packet->sys_cmd[i+1].nak_cmd.pay_id;
						ack_id++)
				{
					v_packet_history_rem_packet(C, ack_id);
				}
			} else {
				/* Remove this acknowledged payload packets from the history
				 * of sent payload packets */
				v_packet_history_rem_packet(C, r_packet->sys_cmd[i].ack_cmd.pay_id);
				/* This is the last ACK command in the sequence of ACK/NAK
				 * commands. Update ANK ID. */
				vconn->ank_id = r_packet->sys_cmd[i].ack_cmd.pay_id;
			}
		} else if(r_packet->sys_cmd[i].cmd.id == CMD_NAK_ID) {
			/* Check if ACK and NAK commands are the first system commands */
			if(ret!=-2 && ret==i-1) {
				ret = i;
			} else {
				ret = -2;
			}
			/* Go through the sub-sequence of NAk commands and try to re-send
			 * not-obsolete data from these packets */
			for(nak_id = r_packet->sys_cmd[i].nak_cmd.pay_id;
					nak_id < r_packet->sys_cmd[i+1].ack_cmd.pay_id;
					nak_id++)
			{
				/* Add not obsolete data of lost packet to the outgoing queue */
				/* Update ANK ID */
				sent_packet = v_packet_history_find_packet(&vconn->packet_history, nak_id);
				if(sent_packet != NULL) {
					v_print_log(VRS_PRINT_DEBUG_MSG, "Try to re-send packet: %d\n", nak_id);
					sent_cmd = sent_packet->cmds.last;

					/* Go through all commands in command list and add not
					 * obsolete commands to the outgoing queue */
					while(sent_cmd != NULL) {
						sent_cmd_prev = sent_cmd->prev;
						if(sent_cmd->vbucket != NULL && sent_cmd->vbucket->data != NULL) {

							/* Try to add command back to the outgoing command queue */
							if(v_out_queue_push_head(vsession->out_queue,
									sent_cmd->prio,
									(struct Generic_Cmd*)sent_cmd->vbucket->data) == 1)
							{
								/* Remove bucket from the history of sent commands too */
								v_hash_array_remove_item(&vconn->packet_history.cmd_hist[sent_cmd->id]->cmds, sent_cmd->vbucket->data);

								/* When command was added back to the queue,
								 * then delete only sent command */
								v_list_free_item(&sent_packet->cmds, sent_cmd);

							}
						}
						sent_cmd = sent_cmd_prev;
					}

					/* When all not obsolete commands are added to outgoing
					 * queue, then this packet could be removed from packet
					 * history*/
					v_packet_history_rem_packet(C, nak_id);
				}
			}
		}
	}

	return ret;
}
示例#28
0
/**
 * \brief This connection tries to handle new connection attempt. When this
 * attempt is successful, then it creates new thread
 */
static int vs_new_stream_conn(struct vContext *C, void *(*conn_loop)(void*))
{
	VS_CTX *vs_ctx = CTX_server_ctx(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VSession *current_session = NULL;
	socklen_t addr_len;
	int ret, i;
	static uint32 last_session_id = 0;

	/* Try to find free session */
	for(i=0; i< vs_ctx->max_sessions; i++) {
		if(vs_ctx->vsessions[i]->stream_conn->host_state == TCP_SERVER_STATE_LISTEN) {
			/* TODO: lock mutex here */
			current_session = vs_ctx->vsessions[i];
			current_session->stream_conn->host_state = TCP_SERVER_STATE_RESPOND_METHODS;
			current_session->session_id = last_session_id++;
			/* TODO: unlock mutex here */
			break;
		}
	}

	if(current_session != NULL) {
		struct vContext *new_C;
		int flag;

		/* Try to accept client connection (do TCP handshake) */
		if(io_ctx->host_addr.ip_ver==IPV4) {
			/* Prepare IPv4 variables for TCP handshake */
			struct sockaddr_in *client_addr4 = &current_session->stream_conn->io_ctx.peer_addr.addr.ipv4;
			current_session->stream_conn->io_ctx.peer_addr.ip_ver = IPV4;
			addr_len = sizeof(current_session->stream_conn->io_ctx.peer_addr.addr.ipv4);

			/* Try to do TCP handshake */
			if( (current_session->stream_conn->io_ctx.sockfd = accept(io_ctx->sockfd, (struct sockaddr*)client_addr4, &addr_len)) == -1) {
				if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "accept(): %s\n", strerror(errno));
				return 0;
			}

			/* Save the IPv4 of the client as string in verse session */
			inet_ntop(AF_INET, &client_addr4->sin_addr, current_session->peer_hostname, INET_ADDRSTRLEN);
		} else if(io_ctx->host_addr.ip_ver==IPV6) {
			/* Prepare IPv6 variables for TCP handshake */
			struct sockaddr_in6 *client_addr6 = &current_session->stream_conn->io_ctx.peer_addr.addr.ipv6;
			current_session->stream_conn->io_ctx.peer_addr.ip_ver = IPV6;
			addr_len = sizeof(current_session->stream_conn->io_ctx.peer_addr.addr.ipv6);

			/* Try to do TCP handshake */
			if( (current_session->stream_conn->io_ctx.sockfd = accept(io_ctx->sockfd, (struct sockaddr*)client_addr6, &addr_len)) == -1) {
				if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "accept(): %s\n", strerror(errno));
				return 0;
			}

			/* Save the IPv6 of the client as string in verse session */
			inet_ntop(AF_INET6, &client_addr6->sin6_addr, current_session->peer_hostname, INET6_ADDRSTRLEN);
		}

		/* Set to this socket flag "no delay" */
		flag = 1;
		if(setsockopt(current_session->stream_conn->io_ctx.sockfd,
				IPPROTO_TCP, TCP_NODELAY, &flag, (socklen_t)sizeof(flag)) == -1)
		{
			if(is_log_level(VRS_PRINT_ERROR)) {
				v_print_log(VRS_PRINT_ERROR,
						"setsockopt: TCP_NODELAY: %d\n",
						strerror(errno));
			}
			return -1;;
		}

		CTX_current_session_set(C, current_session);
		CTX_current_stream_conn_set(C, current_session->stream_conn);
		CTX_io_ctx_set(C, &current_session->stream_conn->io_ctx);

		if(is_log_level(VRS_PRINT_DEBUG_MSG)) {
			v_print_log(VRS_PRINT_DEBUG_MSG, "New connection from: ");
			v_print_addr_port(VRS_PRINT_DEBUG_MSG, &(current_session->stream_conn->io_ctx.peer_addr));
			v_print_log_simple(VRS_PRINT_DEBUG_MSG, "\n");
		}

		/* Duplicate verse context for new thread */
		new_C = (struct vContext*)calloc(1, sizeof(struct vContext));
		memcpy(new_C, C, sizeof(struct vContext));

		/* Try to initialize thread attributes */
		if( (ret = pthread_attr_init(&current_session->tcp_thread_attr)) !=0 ) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "pthread_attr_init(): %s\n", strerror(errno));
			return 0;
		}

		/* Try to set thread attributes as detached */
		if( (ret = pthread_attr_setdetachstate(&current_session->tcp_thread_attr, PTHREAD_CREATE_DETACHED)) != 0) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "pthread_attr_setdetachstate(): %s\n", strerror(errno));
			return 0;
		}

		/* Try to create new thread */
		if((ret = pthread_create(&current_session->tcp_thread, &current_session->tcp_thread_attr, conn_loop, (void*)new_C)) != 0) {
			if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "pthread_create(): %s\n", strerror(errno));
		}

		/* Destroy thread attributes */
		pthread_attr_destroy(&current_session->tcp_thread_attr);
	} else {
		int tmp_sockfd;
		v_print_log(VRS_PRINT_DEBUG_MSG, "Number of session slot: %d reached\n", vs_ctx->max_sessions);
		/* TODO: return some error. Not only accept and close connection. */
		/* Try to accept client connection (do TCP handshake) */
		if(io_ctx->host_addr.ip_ver == IPV4) {
			/* Prepare IP6 variables for TCP handshake */
			struct sockaddr_in client_addr4;

			addr_len = sizeof(client_addr4);

			/* Try to do TCP handshake */
			if( (tmp_sockfd = accept(io_ctx->sockfd, (struct sockaddr*)&client_addr4, &addr_len)) == -1) {
				if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "accept(): %s\n", strerror(errno));
				return 0;
			}
			/* Close connection (no more free session slots) */
			close(tmp_sockfd);
		} else if(io_ctx->host_addr.ip_ver == IPV6) {
			/* Prepare IP6 variables for TCP handshake */
			struct sockaddr_in6 client_addr6;
			addr_len = sizeof(client_addr6);

			/* Try to do TCP handshake */
			if( (tmp_sockfd = accept(io_ctx->sockfd, (struct sockaddr*)&client_addr6, &addr_len)) == -1) {
				if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "accept(): %s\n", strerror(errno));
				return 0;
			}
			/* Close connection (no more free session slots) */
			close(tmp_sockfd);
		}
		/* TODO: Fix this */
		sleep(1);
	}

	return 1;
}
示例#29
0
文件: vs_tag.c 项目: verse/verse
/**
 * \brief This function tries to handle Tag_Create command.
 *
 * This function change state of follower from CREATING to CREATED
 * and it sends value to this client.
 */
int vs_handle_tag_create_ack(struct VS_CTX *vs_ctx,
                             struct VSession *vsession,
                             struct Generic_Cmd *cmd)
{
    struct VSNode				*node;
    struct VSTagGroup			*tg;
    struct VSTag				*tag;
    struct VSEntityFollower		*tag_follower;
    struct Tag_Create_Ack_Cmd	*tag_create_ack = (struct Tag_Create_Ack_Cmd*)cmd;
    uint32						node_id = tag_create_ack->node_id;
    uint16						taggroup_id = tag_create_ack->taggroup_id;
    uint16						tag_id = tag_create_ack->tag_id;
    int							all_created = 1, tag_found = 0, ret = 0;

    /* Try to find node */
    if((node = vs_node_find(vs_ctx, node_id)) == NULL) {
        v_print_log(VRS_PRINT_DEBUG_MSG, "%s() node (id: %d) not found\n",
                    __FUNCTION__, node_id);
        return 0;
    }

    pthread_mutex_lock(&node->mutex);

    /* Try to find TagGroup */
    if( (tg = vs_taggroup_find(node, taggroup_id)) == NULL) {
        v_print_log(VRS_PRINT_DEBUG_MSG, "%s() tag_group (id: %d) in node (id: %d) not found\n",
                    __FUNCTION__, taggroup_id, node_id);
        goto end;
    }

    /* Try to find Tag */
    if ( (tag = vs_tag_find(tg, tag_id)) == NULL) {
        v_print_log(VRS_PRINT_DEBUG_MSG, "%s() tag (id: %d) in tag_group (id: %d), node (id: %d) not found\n",
                    __FUNCTION__, tag_id, taggroup_id, node_id);
        goto end;
    }

    /* Try to find tag follower that generated this fake command */
    tag_follower = tag->tag_folls.first;
    while(tag_follower != NULL) {
        if(tag_follower->node_sub->session->session_id == vsession->session_id) {
            tag_found = 1;
            /* When tag contain value, then send this value to the client */
            if( (tag->flag & TAG_INITIALIZED) && (tag_follower->state != ENTITY_CREATED)) {
                ret = vs_tag_send_set(tag_follower->node_sub->session, tag_follower->node_sub->prio, node, tg, tag);
            }
            tag_follower->state = ENTITY_CREATED;

            /* When this tag has been destroyed during sending tag_create
             * command, then send tag_destroy command now */
            if(tag->state == ENTITY_DELETING) {
                struct Generic_Cmd *tag_destroy_cmd = v_tag_destroy_create(node->id, tg->id, tag->id);

                if( tag_destroy_cmd != NULL &&
                        (v_out_queue_push_tail(tag_follower->node_sub->session->out_queue,
                                               0,
                                               tag_follower->node_sub->prio,
                                               tag_destroy_cmd) == 1))
                {
                    tag_follower->state = ENTITY_DELETING;
                } else {
                    v_print_log(VRS_PRINT_DEBUG_MSG,
                                "Tag_Destroy (node_id: %d, taggroup_id: %d, tag_id: %d) wasn't added to the queue\n",
                                node->id, tg->id, tag->id);
                    ret = 0;
                }
            }
        } else if(tag_follower->state != ENTITY_CREATED) {
            all_created = 0;
        }
        tag_follower = tag_follower->next;
    }

    /* When all clients knows about this tag, then switch tag to state CREATED */
    if(all_created == 1) {
        tag->state = ENTITY_CREATED;
    }

    if(tag_found == 0) {
        v_print_log(VRS_PRINT_DEBUG_MSG,
                    "%s() tag_follower of tag (id: %d) in tag_group (id: %d) in node (id: %d) not found\n",
                    __FUNCTION__, tag_id, tg->id, node->id);
    } else {
        ret = 1;
    }

end:

    pthread_mutex_unlock(&node->mutex);

    return ret;
}
示例#30
0
/* Generic receiving and handling of packet */
int vc_receive_and_handle_packet(struct vContext *C, int handle_packet(struct vContext *C))
{
	struct VDgramConn *dgram_conn = CTX_current_dgram_conn(C);
	struct IO_CTX *io_ctx = CTX_io_ctx(C);
	struct VPacket *r_packet = CTX_r_packet(C);
	int ret, error_num;
	long int sec, usec;
	fd_set set;
	struct timeval tv;

	/* Initialize set */
	FD_ZERO(&set);
	FD_SET(dgram_conn->io_ctx.sockfd, &set);

	switch(dgram_conn->host_state) {
		case UDP_CLIENT_STATE_REQUEST:
		case UDP_CLIENT_STATE_PARTOPEN:
		case UDP_CLIENT_STATE_CLOSING:
			sec = INIT_TIMEOUT + (int) (v_exponential_backoff(dgram_conn->state[dgram_conn->host_state].attempts) * (rand() / (RAND_MAX + 1.0)));
			usec = 0;
			break;
		case UDP_CLIENT_STATE_OPEN:
			sec = 0;
			usec = 10000;
			break;
	}

	/* Initialize time value */
	tv.tv_sec = sec;
	tv.tv_usec = usec;

	/* Wait on response from server */
	if( (ret = select(io_ctx->sockfd+1, &set, NULL, NULL, &tv)) == -1 ) {
		if(is_log_level(VRS_PRINT_ERROR)) v_print_log(VRS_PRINT_ERROR, "%s:%s():%d select(): %s\n", __FILE__, __FUNCTION__,  __LINE__, strerror(errno));
		return RECEIVE_PACKET_ERROR;
	}
	/* Check if the event occurred on sockfd */
	else if(ret>0 && FD_ISSET(io_ctx->sockfd, &set)) {
		/* Try to receive packet from server */
		if(v_receive_packet(io_ctx, &error_num) == -1) {
			switch(error_num) {
				case ECONNREFUSED:	/* A remote host refused this connection */
					return RECEIVE_PACKET_ERROR;
				case EAGAIN:
				case EBADF:
				case EFAULT:
				case EINTR:
				case EINVAL:
				case ENOMEM:
				case ENOTCONN:
				case ENOTSOCK:
					break;
			}
		/* Address of received packet has to be same as address of server, when
		 * connection is not connected */
		} else if((io_ctx->flags & SOCKET_CONNECTED) ||
				v_compare_addr_and_port(&dgram_conn->peer_address, &dgram_conn->io_ctx.peer_addr))
		{
			/* The size of buffer must be bigger then zero */
			if(dgram_conn->io_ctx.buf_size>0) {
				int buffer_pos = 0;

				/* Get time of receiving packet */
				gettimeofday(&tv, NULL);

				/* Extract verse header from packet */
				ret = v_unpack_packet_header(io_ctx->buf,
						io_ctx->buf_size,
						r_packet);
				if(ret != VERSE_PACKET_HEADER_SIZE)
					return RECEIVE_PACKET_CORRUPTED;

				/* Check for right packet header version number */
				if(r_packet->header.version != VRS_VERSION) {
					if(is_log_level(VRS_PRINT_WARNING)) v_print_log(VRS_PRINT_WARNING, "Packet with unsupported version: %d was dropped.\n",
							r_packet->header.version);
					return RECEIVE_PACKET_CORRUPTED;
				}

				/* Compute RWIN */
				dgram_conn->rwin_peer = r_packet->header.window << dgram_conn->rwin_peer_scale;

				/* Extract system commands from packet */
				buffer_pos += v_unpack_packet_system_commands(io_ctx->buf,
						io_ctx->buf_size,
						r_packet);

				/* Are there some node commands in the buffer? */
				if(io_ctx->buf_size > buffer_pos) {
					/* Set pointer to not proceeded buffer */
					r_packet->data = (uint8*)&io_ctx->buf[buffer_pos];
					/* Set size of not proceeded buffer */
					r_packet->data_size = io_ctx->buf_size - buffer_pos;
				} else {
					r_packet->data = NULL;
					r_packet->data_size = 0;
				}

				/* When important things are done, then print content of the
				 * command (in debug mode) */
				v_print_receive_packet(C);

				/* Handle received packet and its data */
				ret = handle_packet(C);

				if(ret == RECEIVE_PACKET_SUCCESS) {
					/* Update information about last received packet payload
					 * packet */
					if(r_packet->header.flags & PAY_FLAG) {
						dgram_conn->last_r_pay = r_packet->header.payload_id;
						/* Update time of last received payload packet */
						dgram_conn->tv_pay_recv.tv_sec = tv.tv_sec;
						dgram_conn->tv_pay_recv.tv_usec = tv.tv_usec;
					}
					/* Update information about last received packet acknowledgment
					 * packet */
					if(r_packet->header.flags & ACK_FLAG) {
						dgram_conn->last_r_ack = r_packet->header.ack_nak_id;
					}
				}

				return ret;

			} else {
				if(is_log_level(VRS_PRINT_WARNING)) v_print_log(VRS_PRINT_WARNING, "Packet with zero size was dropped.\n");
				return RECEIVE_PACKET_CORRUPTED;
			}
		} else {
			/* When some 'bad' packet is received on this port, then do not increase counter
			 * of connection attempts (packet was received from address, that does not belong
			 * to the server) */
			if(is_log_level(VRS_PRINT_WARNING))
				v_print_log(VRS_PRINT_WARNING, "Packet from unknown address was dropped.\n");

			return RECEIVE_PACKET_FAKED;	/* This should not happen, because connect() function is used */
		}
	}

	return RECEIVE_PACKET_TIMEOUT;
}