Exemplo n.º 1
0
static void on_frame_received(void* context, AMQP_VALUE performative, uint32_t payload_size, const unsigned char* payload_bytes)
{
	SESSION_INSTANCE* session_instance = (SESSION_INSTANCE*)context;
	AMQP_VALUE descriptor = amqpvalue_get_inplace_descriptor(performative);

	if (is_begin_type_by_descriptor(descriptor))
	{
		BEGIN_HANDLE begin_handle;

		if (amqpvalue_get_begin(performative, &begin_handle) != 0)
		{
			connection_close(session_instance->connection, "amqp:decode-error", "Cannot decode BEGIN frame");
		}
		else
		{
			if ((begin_get_incoming_window(begin_handle, &session_instance->remote_incoming_window) != 0) ||
				(begin_get_next_outgoing_id(begin_handle, &session_instance->next_incoming_id) != 0))
			{
				/* error */
				begin_destroy(begin_handle);
				session_set_state(session_instance, SESSION_STATE_DISCARDING);
				connection_close(session_instance->connection, "amqp:decode-error", "Cannot get incoming windows and next outgoing id");
			}
			else
			{
				begin_destroy(begin_handle);

				if (session_instance->session_state == SESSION_STATE_BEGIN_SENT)
				{
					session_set_state(session_instance, SESSION_STATE_MAPPED);
				}
				else if(session_instance->session_state == SESSION_STATE_UNMAPPED)
				{
					session_set_state(session_instance, SESSION_STATE_BEGIN_RCVD);
					if (send_begin(session_instance) != 0)
					{
						connection_close(session_instance->connection, "amqp:internal-error", "Failed sending BEGIN frame");
						session_set_state(session_instance, SESSION_STATE_DISCARDING);
					}
					else
					{
						session_set_state(session_instance, SESSION_STATE_MAPPED);
					}
				}
			}
		}
	}
	else if (is_attach_type_by_descriptor(descriptor))
	{
		const char* name = NULL;
		ATTACH_HANDLE attach_handle;

		if (amqpvalue_get_attach(performative, &attach_handle) != 0)
		{
			end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode ATTACH frame");
		}
		else
		{
			role role;
			AMQP_VALUE source;
			AMQP_VALUE target;

			if ((attach_get_name(attach_handle, &name) != 0) ||
				(attach_get_role(attach_handle, &role) != 0) ||
				(attach_get_source(attach_handle, &source) != 0) ||
				(attach_get_target(attach_handle, &target) != 0))
			{
				end_session_with_error(session_instance, "amqp:decode-error", "Cannot get link name from ATTACH frame");
			}
			else
			{
				LINK_ENDPOINT_INSTANCE* link_endpoint = find_link_endpoint_by_name(session_instance, name);
				if (link_endpoint == NULL)
				{
					/* new link attach */
					if (session_instance->on_link_attached != NULL)
					{
						LINK_ENDPOINT_HANDLE new_link_endpoint = session_create_link_endpoint(session_instance, name);
						if (new_link_endpoint == NULL)
						{
							end_session_with_error(session_instance, "amqp:internal-error", "Cannot create link endpoint");
						}
						else
						{
							if (!session_instance->on_link_attached(session_instance->on_link_attached_callback_context, new_link_endpoint, name, role, source, target))
							{
								session_destroy_link_endpoint(new_link_endpoint);
								new_link_endpoint = NULL;
							}
							else
							{
								if (new_link_endpoint->frame_received_callback != NULL)
								{
									new_link_endpoint->frame_received_callback(new_link_endpoint->callback_context, performative, payload_size, payload_bytes);
								}
							}
						}
					}
				}
				else
				{
					if (attach_get_handle(attach_handle, &link_endpoint->input_handle) != 0)
					{
						end_session_with_error(session_instance, "amqp:decode-error", "Cannot get input handle from ATTACH frame");
					}
					else
					{
						link_endpoint->frame_received_callback(link_endpoint->callback_context, performative, payload_size, payload_bytes);
					}
				}
			}

			attach_destroy(attach_handle);
		}
	}
	else if (is_detach_type_by_descriptor(descriptor))
	{
		DETACH_HANDLE detach_handle;

		if (amqpvalue_get_detach(performative, &detach_handle) != 0)
		{
			end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode DETACH frame");
		}
		else
		{
			uint32_t remote_handle;
			if (detach_get_handle(detach_handle, &remote_handle) != 0)
			{
				end_session_with_error(session_instance, "amqp:decode-error", "Cannot get handle from DETACH frame");

				detach_destroy(detach_handle);
			}
			else
			{
				detach_destroy(detach_handle);

				LINK_ENDPOINT_INSTANCE* link_endpoint = find_link_endpoint_by_input_handle(session_instance, remote_handle);
				if (link_endpoint == NULL)
				{
					end_session_with_error(session_instance, "amqp:session:unattached-handle", "");
				}
				else
				{
					link_endpoint->frame_received_callback(link_endpoint->callback_context, performative, payload_size, payload_bytes);
				}
			}
		}
	}
	else if (is_flow_type_by_descriptor(descriptor))
	{
		FLOW_HANDLE flow_handle;

		if (amqpvalue_get_flow(performative, &flow_handle) != 0)
		{
			end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode FLOW frame");
		}
		else
		{
			uint32_t remote_handle;
			transfer_number flow_next_incoming_id;
			uint32_t flow_incoming_window;
			if ((flow_get_next_outgoing_id(flow_handle, &session_instance->next_incoming_id) != 0) ||
				(flow_get_next_incoming_id(flow_handle, &flow_next_incoming_id) != 0) ||
				(flow_get_incoming_window(flow_handle, &flow_incoming_window) != 0))
			{
				flow_destroy(flow_handle);

				end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode FLOW frame");
			}
			else
			{
				LINK_ENDPOINT_INSTANCE* link_endpoint_instance = NULL;

				session_instance->remote_incoming_window = flow_next_incoming_id + flow_incoming_window - session_instance->next_outgoing_id;

				if (flow_get_handle(flow_handle, &remote_handle) == 0)
				{
					link_endpoint_instance = find_link_endpoint_by_input_handle(session_instance, remote_handle);
				}

				flow_destroy(flow_handle);

				if (link_endpoint_instance != NULL)
				{
					link_endpoint_instance->frame_received_callback(link_endpoint_instance->callback_context, performative, payload_size, payload_bytes);
				}

				size_t i = 0;
				while ((session_instance->remote_incoming_window > 0) && (i < session_instance->link_endpoint_count))
				{
					/* notify the caller that it can send here */
					if (session_instance->link_endpoints[i]->on_session_flow_on != NULL)
					{
						session_instance->link_endpoints[i]->on_session_flow_on(session_instance->link_endpoints[i]->callback_context);
					}

					i++;
				}
			}
		}
	}
	else if (is_transfer_type_by_descriptor(descriptor))
	{
		TRANSFER_HANDLE transfer_handle;

		if (amqpvalue_get_transfer(performative, &transfer_handle) != 0)
		{
			end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode TRANSFER frame");
		}
		else
		{
			uint32_t remote_handle;
			delivery_number delivery_id;

			transfer_get_delivery_id(transfer_handle, &delivery_id);
			if (transfer_get_handle(transfer_handle, &remote_handle) != 0)
			{
				transfer_destroy(transfer_handle);
				end_session_with_error(session_instance, "amqp:decode-error", "Cannot get handle from TRANSFER frame");
			}
			else
			{
				transfer_destroy(transfer_handle);

				session_instance->next_incoming_id++;
				session_instance->remote_outgoing_window--;
				session_instance->incoming_window--;

				LINK_ENDPOINT_INSTANCE* link_endpoint = find_link_endpoint_by_output_handle(session_instance, remote_handle);
				if (link_endpoint == NULL)
				{
					end_session_with_error(session_instance, "amqp:session:unattached-handle", "");
				}
				else
				{
					link_endpoint->frame_received_callback(link_endpoint->callback_context, performative, payload_size, payload_bytes);
				}

				if (session_instance->incoming_window == 0)
				{
                    session_instance->incoming_window = session_instance->desired_incoming_window;
					send_flow(session_instance);
				}
			}
		}
	}
	else if (is_disposition_type_by_descriptor(descriptor))
	{
		uint32_t i;

		for (i = 0; i < session_instance->link_endpoint_count; i++)
		{
			LINK_ENDPOINT_INSTANCE* link_endpoint = session_instance->link_endpoints[i];
			link_endpoint->frame_received_callback(link_endpoint->callback_context, performative, payload_size, payload_bytes);
		}
	}
	else if (is_end_type_by_descriptor(descriptor))
	{
		END_HANDLE end_handle;

		if (amqpvalue_get_end(performative, &end_handle) != 0)
		{
			end_session_with_error(session_instance, "amqp:decode-error", "Cannot decode END frame");
		}
		else
		{
			if ((session_instance->session_state != SESSION_STATE_END_RCVD) &&
				(session_instance->session_state != SESSION_STATE_DISCARDING))
			{
				session_set_state(session_instance, SESSION_STATE_END_RCVD);
				if (send_end_frame(session_instance, NULL) != 0)
				{
					/* fatal error */
					(void)connection_close(session_instance->connection, "amqp:internal-error", "Cannot send END frame.");
				}

				session_set_state(session_instance, SESSION_STATE_DISCARDING);
			}
		}
	}
}
Exemplo n.º 2
0
/*
 * Per-packet callback function from libpcap.
 */
static void
  packet_cb(u_char *user_data, const struct pcap_pkthdr* phdr,
	    const u_char *pkt)
{
#ifdef NF9
   int i,count;

#endif
   const struct pfsync_header *ph = (const struct pfsync_header *)pkt;
#if __FreeBSD_version > 900000
   const struct pfsync_subheader *psh = (const struct pfsync_subheader *)pkt + sizeof(*ph);
#endif
   const struct pfsync_state *st;
   u_int64_t bytes[2], packets[2];

   if (phdr->caplen < PFSYNC_HDRLEN)
     {
	syslog(LOG_WARNING, "Runt pfsync packet header");
	return;
     }
   if (ph->version != _PFSYNC_VER)
     {
	syslog(LOG_WARNING, "Unsupported pfsync version %d, exiting",
	       ph->version);
	exit(1);
     }
#if __FreeBSD_version > 900000
   if (psh->count == 0)
#else
   if (ph->count == 0)
#endif
     {
	syslog(LOG_WARNING, "Empty pfsync packet");
	return;
     }
	/* Skip non-delete messages */
#if __FreeBSD_version > 900000
   if (psh->action != PFSYNC_ACT_DEL)
     return;
   if (sizeof(*ph) + sizeof(*psh) + ((sizeof(*st) * psh->count)) > phdr->caplen)
#else
   if (ph->action != PFSYNC_ACT_DEL)
     return;
   if (sizeof(*ph) + (sizeof(*st) * ph->count) > phdr->caplen)
#endif
     {
	syslog(LOG_WARNING, "Runt pfsync packet");
	return;
     }

#if __FreeBSD_version > 900000
   st = (const struct pfsync_state *)((const u_int8_t *)psh + sizeof(*psh));
#else
   st = (const struct pfsync_state *)((const u_int8_t *)ph + sizeof(*ph));
#endif

#ifdef NF9

       /*0.7 code seems to assume ph->count =1
        *Instead we will go through the list of states and for states with overflowed 32bit counters
	*we will send multple flows each.
	*The disadvantage is that we have to send 1 datagram per flow for overflowed states.
	*/

# ifdef NF9_ULL
        /*Define 64bits counters for nf9 so we dont have to waste time messing with sending multiple flows.
	 */
   if(NF9_VERSION == export_version)
     {
#if __FreeBSD_version > 900000
	send_flow(st, psh->count, &flows_exported);
#else
	send_flow(st, ph->count, &flows_exported);
#endif
     }

   else
# endif
     do
     {
#if __FreeBSD_version > 900000
	for(count=i=0 ; i < psh->count ;i++)
#else
	for(count=i=0 ; i < ph->count ;i++)
#endif
	  {
	     pf_state_counter_ntoh(st[i].packets[0],packets[0]);
	     pf_state_counter_ntoh(st[i].packets[1],packets[1]);
	     pf_state_counter_ntoh(st[i].bytes[0],bytes[0]);
	     pf_state_counter_ntoh(st[i].bytes[1],bytes[1]);

		   /*If current state has overflowed, send previous non overflowed states
		    *and send current state as multiple flows
		    */
	     if(bytes[0] > UINT_MAX || bytes[1] > UINT_MAX ||
		packets[0] >UINT_MAX  || packets[1] > UINT_MAX )
	       {
			/*Send non overflowed  states*/
		  if(0<count)
		    {
		       send_flow(&st[i -count], count, &flows_exported);
		       count =0;
		    }

			/*Send overflowed state*/
		  while (bytes[0] > 0 || bytes[1] > 0 ||
			 packets[0] > 0 || packets[1] > 0)
		    {

		       struct pfsync_state st1;

			     /*Copy the state as we have to modify it*/
		       memcpy(&st1, &st[i], sizeof(st1));

		       st1.bytes[0][0] = 0;
		       st1.bytes[1][0] = 0;
		       st1.packets[0][0] = 0;
		       st1.packets[1][0] = 0;

		       if (bytes[0] > UINT_MAX)
			 {

			    st1.bytes[0][1] = 0xffffffff;
			    bytes[0] -= MIN(bytes[0], 0xffffffff);
			 }
		       else
			 {
			    st1.bytes[0][1] = htonl(bytes[0]);
			    bytes[0] = 0;
			 }

		       if (bytes[1] > UINT_MAX)
			 {
			    st1.bytes[1][1] = 0xffffffff;
			    bytes[1] -= MIN(bytes[1], 0xffffffff);
			 }
		       else
			 {
			    st1.bytes[1][1] = htonl(bytes[1]);
			    bytes[1] = 0;
			 }

		       if (packets[0] > UINT_MAX)
			 {
			    st1.packets[0][1] = 0xffffffff;
			    packets[0] -= MIN(packets[0], 0xffffffff);
			 }
		       else
			 {
			    st1.packets[0][1] = htonl(packets[0]);
			    packets[0] = 0;
			 }

		       if (packets[1] > UINT_MAX)
			 {
			    st1.packets[1][1] = 0xffffffff;
			    packets[1] -= MIN(packets[1], 0xffffffff);
			 }
		       else
			 {
			    st1.packets[1][1] = htonl(packets[1]);
			    packets[1] = 0;
			 }

			     /*Send the modified state*/
		       send_flow(&st1, 1 , &flows_exported);
		    }
	       }
	     else
	       {

		  count++;
	       }
	  }

	if(0<count)
	  {
	     send_flow(&st[i -count], count, &flows_exported);
	  }

     }
   while(0);

#else
	/*
	 * Check if any members of st->packets or st->bytes overflow
	 * the 32 bit netflow counters, if so, create as many flow records
	 * that are needed to clear the counter.
	 */

   pf_state_counter_ntoh(st->packets[0],packets[0]);
   pf_state_counter_ntoh(st->packets[1],packets[1]);
   pf_state_counter_ntoh(st->bytes[0],bytes[0]);
   pf_state_counter_ntoh(st->bytes[1],bytes[1]);

   while (bytes[0] > 0 || bytes[1] > 0 ||
	  packets[0] > 0 || packets[1] > 0)
     {

	struct pfsync_state st1;

	memcpy(&st1, st, sizeof(st1));

	if (bytes[0] > UINT_MAX)
	  {
	     st1.bytes[0][0] = 0xffffffff;
	     bytes[0] -= MIN(bytes[0], 0xffffffff);
	  }
	else
	  {
	     st1.bytes[0][0] = htonl(bytes[0]);
	     bytes[0] = 0;
	  }
	if (bytes[1] > UINT_MAX)
	  {
	     st1.bytes[1][0] = 0xffffffff;
	     bytes[1] -= MIN(bytes[1], 0xffffffff);
	  }
	else
	  {
	     st1.bytes[1][0] = htonl(bytes[1]);
	     bytes[1] = 0;
	  }
	if (packets[0] > UINT_MAX)
	  {
	     st1.packets[0][0] = 0xffffffff;
	     packets[0] -= MIN(packets[0], 0xffffffff);
	  }
	else
	  {
	     st1.packets[0][0] = htonl(packets[0]);
	     packets[0] = 0;
	  }
	if (packets[1] > UINT_MAX)
	  {
	     st1.packets[1][0] = 0xffffffff;
	     packets[1] -= MIN(packets[1], 0xffffffff);
	  }
	else
	  {
	     st1.packets[1][0] = htonl(packets[1]);
	     packets[1] = 0;
	  }

	send_flow(&st1, ph->count, &flows_exported);

	/*
	 * A strange mistake to is made in 0.7. If st1 is not on the stack,
	 * it would likely have caused a access violation.
	 * As it is only the first pfsync_state is copied to st1. The
	 * rest of the (ph->count -1) pfsync_state's are read from all read
	 * from invalid stack memory by send_flow().
	 */

     }

#endif /*NF9*/

}