示例#1
0
文件: hud.c 项目: broese/mcbuild
// workaround for bug MC-46345 - renew map ID when changing dimension
void hud_renew(MCPacketQueue *cq) {
    int sid;
    int hud_newid = hud_autoid-1;

    for(sid=0; sid<=45; sid++) {
        if (hud_bogus_map(&gs.inv.slots[sid])) {
            gs.inv.slots[sid].damage = hud_newid;
            NEWPACKET(SP_SetSlot, ss);
            tss->wid = 0;
            tss->sid = sid;
            clone_slot(&gs.inv.slots[sid], &tss->slot);
            queue_packet(ss,cq);
        }
    }
    if (hud_bogus_map(&gs.inv.drag)) {
        gs.inv.drag.damage = hud_newid;
        NEWPACKET(SP_SetSlot, ss);
        tss->wid = 255;
        tss->sid = -1;
        clone_slot(&gs.inv.drag, &tss->slot);
        queue_packet(ss,cq);
    }

    if (hud_id == hud_autoid) hud_id = hud_newid;
    hud_autoid = hud_newid;
}
示例#2
0
bool XBeeUtil::wait_for_packet_type(XBee* xbee, int timeout, int api_id, bool (*match_packet)(), void (*queue_packet)())
{

	unsigned long end = millis() + timeout;
	while(millis() < end)
	{
		if(xbee->readPacket(end - millis()) && xbee->getResponse().getApiId() == api_id)
		{
			if(match_packet != NULL)
			{
				if(match_packet())
				{
					return true;
				}else
				{
					if (queue_packet != NULL) {
						queue_packet();
					}
				}

			}else
			{
				return true;
			}
		} else {
			if (queue_packet != NULL) {
				queue_packet();
			}
		}
	}
	return false;
}
示例#3
0
/*
 * Queue a packet for reliable transmission.
 */
void
queue_packet_reliable(PACKET *p, int priority) 
{
	assert(p->len < 510);

	THREAD_DATA::net_t *n = get_thread_data()->net;

	if (priority == SP_DEFERRED) {
		n->queues->d_prio->push_back(p);
	} else if (priority == SP_HIGH || priority == SP_NORMAL) {
		uint32_t ack_id;
		PACKET *p2;

		/* packet with space for reliable header */
		p2 = allocate_packet(p->len + 6);

		ack_id = n->rel_o->next_ack_id++;

		/* add reliable header and free old (headerless) packet */
		build_packet(p2->data, "AACZ", 0x00, 0x03, ack_id, p->data, p->len);
		free_packet(p);

		/* store a copy for retransmission until its ack is received */
		RPACKET *rp = allocate_rpacket(p2->len, get_ticks_ms(), ack_id);
		memcpy(rp->data, p2->data, p2->len);
		n->rel_o->queue->push_back(rp);

		/* send p2 */
		queue_packet(p2, priority);
	} else {
		assert(0);
	}
}
示例#4
0
文件: hud.c 项目: broese/mcbuild
// create a new bogus map item for the client
int hud_new(char * reply, MCPacketQueue *cq) {
    hud_unbind(reply, cq);
    reply[0] = 0;

    // find a free slot in user's inventory (including the off-hand slot)
    // search in descending order, so the hotbar gets preference
    int sid=(currentProtocol>=PROTO_1_9)?45:44;
    while(sid>=9) {
        if (gs.inv.slots[sid].item <= 0) break;
        sid--;
    }
    if (sid<9) {
        sprintf(reply, "no free inventory slot to give you a new item");
        return -1;
    }

    NEWPACKET(SP_SetSlot, ss);
    tss->wid = 0;
    tss->sid = sid;
    tss->slot.item = 358;
    tss->slot.count = 1;
    tss->slot.damage = hud_autoid;
    tss->slot.nbt = NULL;

    gs_packet(ss);
    queue_packet(ss,cq);

    return hud_autoid;
}
示例#5
0
static void delayed_init_logon_handler(struct event_context *event_ctx,
				       struct timed_event *te,
				       struct timeval now,
				       void *private_data)
{
	struct packet_struct *p = (struct packet_struct *)private_data;

	DEBUG(10, ("delayed_init_logon_handler (%lx): re-queuing packet.\n",
		   (unsigned long)te));

	queue_packet(p);

	TALLOC_FREE(te);
}
示例#6
0
文件: hud.c 项目: broese/mcbuild
// unbind the HUD
void hud_unbind(char *reply, MCPacketQueue *cq) {
    int sid;
    for(sid=0; sid<=45; sid++) {
        if (hud_bogus_map(&gs.inv.slots[sid])) {
            NEWPACKET(SP_SetSlot, ss);
            tss->wid = 0;
            tss->sid = sid;
            clear_slot(&tss->slot);
            queue_packet(ss,cq);
            clear_slot(&gs.inv.slots[sid]);
        }
    }

    if (hud_bogus_map(&gs.inv.drag)) {
        NEWPACKET(SP_SetSlot, ss);
        tss->wid = 255;
        tss->sid = -1;
        clear_slot(&tss->slot);
        queue_packet(ss,cq);
        clear_slot(&gs.inv.drag);
    }

    hud_id = -1;
}
示例#7
0
extern PACKET *
do_packets(			/* queue a packet list, return finished */
	register PACKET	*pl
)
{
	register PACKET	*p;
					/* consistency check */
	if (nprocs < 1)
		error(CONSISTENCY, "do_packets called with no active process");
					/* queue each new packet */
	while (pl != NULL) {
		p = pl; pl = p->next; p->next = NULL;
		queue_packet(p);
	}
	return(get_packets(slots_avail()));	/* return processed packets */
}
示例#8
0
文件: hud.c 项目: broese/mcbuild
void hud_update(MCPacketQueue *cq) {
    hud_prune();
    if (hud_id < 0 || !hud_inv) return;

    bg_color = 34;
    draw_clear();
    bg_color = 0;

    int updated = 0;

    switch(hud_mode) {
        case HUDMODE_TEST:      updated = huddraw_test();   break;
        case HUDMODE_INFO:      updated = huddraw_info();   break;
        case HUDMODE_TUNNEL:    updated = huddraw_tunnel(); break;
        case HUDMODE_MAP:       updated = huddraw_map();    break;
        case HUDMODE_BUILD:     updated = huddraw_build();  break;
        case HUDMODE_HELP:      updated = huddraw_help();   break;
        default:                break;
    }

    if (updated) {
        NEWPACKET(SP_Map, map);
        tmap->mapid    = hud_id;
        tmap->scale    = 0;
        tmap->trackpos = 0;
        tmap->nicons   = 0;
        tmap->icons    = NULL;
        tmap->ncols    = 128;
        tmap->nrows    = 128;
        tmap->X        = 0;
        tmap->Z        = 0;
        tmap->len      = sizeof(hud_image);
        lh_alloc_num(tmap->data, sizeof(hud_image));
        memmove(tmap->data, hud_image, sizeof(hud_image));

        queue_packet(map, cq);
    }

    hud_inv = HUDINV_NONE;
}
示例#9
0
static
void
mainloop(THREAD_DATA *td)
{
	THREAD_DATA::net_t *n = td->net;

	ticks_ms_t acc, ticks, lticks;	/* accumulator, current ticks, last iteration ticks */

	int	pktl;			/* packet length */
	uint8_t pkt[MAX_PACKET];	/* buffer space for a packet */

	if (connect_to_server(td) != 0) {
		free_thread_data(td);
		LogFmt(OP_MOD, "Error performing initial connect");
		return;
	}

	acc = 0;
	ticks = get_ticks_ms();
	lticks = ticks;
	ticks_ms_t last_botman_checkin = ticks;
	ticks_ms_t last_botman_stopcheck = ticks;
	ticks_ms_t last_config_mtime_check = ticks;
	while (td->running >= 0) {
		ticks = get_ticks_ms();
		acc += ticks - lticks;
		lticks = ticks;

		/* check in with the bot manager */
		if (ticks - last_botman_checkin >= BOTMAN_CHECKIN_INTERVAL) {
			botman_bot_checkin(td->botman_handle);
			last_botman_checkin = ticks;
		}
		if (ticks - last_botman_stopcheck >= BOTMAN_STOPCHECK_INTERVAL) {
			if (botman_bot_shouldstop(td->botman_handle)) {
				td->running = -1;
			}
			last_botman_stopcheck = ticks;
		}

		/* flush out tick events to bots */
		if (acc >= STEP_INTERVAL) { 
			libman_expire_timers(td);
			while(acc >= STEP_INTERVAL) {
				/* event_tick */
				libman_export_event(td, EVENT_TICK, NULL);
				acc -= STEP_INTERVAL;
			}
		}

		/* if the bot is disconnected, see if it is time to reconnect */
		if (n->state == NS_DISCONNECTED) {
			if (ticks - n->ticks->disconnected > 60000) {
				free_thread_data(td);
				init_thread_data(td);
				connect_to_server(td);
			} else {
				usleep(50000);	/* 50ms */
				continue;
			}
		}

		/* see if the config file has been modified and if so send a reread event */
		if (ticks - last_config_mtime_check >= CONFIG_MTIME_POLL_INTERVAL) {
			struct stat attr;
			memset(&attr, 0, sizeof(struct stat));
			if (stat(td->config->filename, &attr) == 0) {
				if (td->config->last_modified_time != attr.st_mtime) {
					libman_export_event(td, EVENT_CONFIG_CHANGE, NULL);
					td->config->last_modified_time = attr.st_mtime;
				}
			}
			last_config_mtime_check = ticks;
		}

		/* use up to STEP_INTERVAL ms for the db thread */
		ticks_ms_t ticks_taken = get_ticks_ms() - ticks;
		ticks_ms_t db_ticks = ticks_taken > STEP_INTERVAL ? STEP_INTERVAL : STEP_INTERVAL - ticks_taken;
		db_instance_export_events(db_ticks);

		/* read a packet or wait for a timeout */
		ticks_taken = get_ticks_ms() - ticks;
		ticks_ms_t timeout = ticks_taken > STEP_INTERVAL ? 0 : STEP_INTERVAL - ticks_taken;
		while (poll(n->pfd, 1, (int)timeout) > 0) {
			/* process incoming packet, data is waiting */
			pktl = (int)read(n->fd, pkt, MAX_PACKET);
			if (pktl >= 0) {
				++n->stats->packets_read;
				n->ticks->last_pkt_received = get_ticks_ms();

				if (n->encrypt->use_encryption) {
					if (pkt[0] == 0x00) {
						if (pktl >= 2) {
							decrypt_buffer(td, &pkt[2], pktl-2);
						}
					} else {
						decrypt_buffer(td, &pkt[1], pktl-1);
					}
				}

				if (td->debug->spew_packets) {
					spew_packet(pkt, pktl, DIR_INCOMING);
				}

				process_incoming_packet(td, pkt, pktl);
			}

			ticks_taken = get_ticks_ms() - ticks;
			timeout = timeout > ticks_taken ? timeout - ticks_taken : 0;
		}

		/* update the tick count after potential sleeping in poll() */
		ticks = get_ticks_ms();

		/* network state specfic actions */
		if (n->state == NS_CONNECTING) {
			/* retransmit connection request if it was lost */
			if (ticks - n->ticks->last_connection_request > 15000) {
				pkt_send_client_key(n->encrypt->client_key);
				n->ticks->last_connection_request = ticks;
			}
		} else if (ticks - n->ticks->last_pkt_received > 30*1000) {
			/* disconnect if no packets have been received for 30 seconds */
			Log(OP_MOD, "No data received for 30 seconds, reconnecting...");
			disconnect_from_server(td);
			continue;
		}

		/* transmit player position update if necessary */
		if (n->state == NS_CONNECTED && td->in_arena) {
			if ((ticks - n->ticks->last_pos_update_sent > 100
			    && td->bot_ship != SHIP_SPECTATOR)
			    || (ticks - n->ticks->last_pos_update_sent > 1000
			    && td->bot_ship == SHIP_SPECTATOR)) {
				pkt_send_position_update(td->bot_pos->x, td->bot_pos->y,
				    td->bot_vel->x, td->bot_vel->y);
				n->ticks->last_pos_update_sent = ticks;
			}
		}

		/* send periodic info/einfo */
		if (n->state == NS_CONNECTED) {
			// subtract 10000 to offset this by 10 seconds from *einfo to avoid filling buffers with commands/responses
			if (td->periodic->info && ticks - (td->periodic->last_info - 10000U) >= td->periodic->info) {
				int nhere = player_get_phere(td);
				PLAYER *parray = player_get_parray(td);
				for (int i = 0; i < nhere; ++i) {
					if (parray[i].here && td->enter->send_info) {
						PrivMessage(&parray[i], "*info");
					}
				}

				td->periodic->last_info = ticks;
			}

			if (td->periodic->einfo && ticks - td->periodic->last_einfo >= td->periodic->einfo) {
				int nhere = player_get_phere(td);
				PLAYER *parray = player_get_parray(td);
				for (int i = 0; i < nhere; ++i) {
					if (parray[i].here && td->enter->send_einfo) {
						PrivMessage(&parray[i], "*einfo");
					}
				}

				td->periodic->last_einfo = ticks;
			}
		}

		/* retransmit reliable packets that have not been acked */
		rpacket_list_t *l = n->rel_o->queue;
		rpacket_list_t::iterator iter = l->begin();
		while (iter != l->end()) {
			RPACKET *rp = *iter;
			if (ticks - rp->ticks > RELIABLE_RETRANSMIT_INTERVAL) {
				PACKET *p = allocate_packet(rp->len);
				memcpy(p->data, rp->data, rp->len);

				/* update packets retransmit tick */
				rp->ticks = ticks;

				queue_packet(p, SP_HIGH);
			}
			
			++iter;
		}

		/* free absent players if its time */
		ticks_ms_t flush_check_interval = 60 * 60 * 1000;
		if (ticks - td->arena->ticks->last_player_flush > flush_check_interval) {
			player_free_absent_players(td, flush_check_interval, true);
			td->arena->ticks->last_player_flush = ticks;
		}

		/* write packets generated during loop iteration */
		send_outgoing_packets(td);
	} /* while td->running != 0 */
}
示例#10
0
int esp_mainloop(struct openconnect_info *vpninfo, int *timeout)
{
	struct esp *esp = &vpninfo->esp_in[vpninfo->current_esp_in];
	struct esp *old_esp = &vpninfo->esp_in[vpninfo->current_esp_in ^ 1];
	struct pkt *this;
	int work_done = 0;
	int ret;

	if (vpninfo->dtls_state == DTLS_SLEEPING) {
		int when = vpninfo->new_dtls_started + vpninfo->dtls_attempt_period - time(NULL);
		if (when <= 0 || vpninfo->dtls_need_reconnect) {
			vpn_progress(vpninfo, PRG_DEBUG, _("Send ESP probes\n"));
			esp_send_probes(vpninfo);
			when = vpninfo->dtls_attempt_period;
		}
		if (*timeout > when * 1000)
			*timeout = when * 1000;
	}
	if (vpninfo->dtls_fd == -1)
		return 0;

	while (1) {
		int len = vpninfo->ip_info.mtu + vpninfo->pkt_trailer;
		int i;
		struct pkt *pkt;

		if (!vpninfo->dtls_pkt) {
			vpninfo->dtls_pkt = malloc(sizeof(struct pkt) + len);
			if (!vpninfo->dtls_pkt) {
				vpn_progress(vpninfo, PRG_ERR, _("Allocation failed\n"));
				break;
			}
		}
		pkt = vpninfo->dtls_pkt;
		len = recv(vpninfo->dtls_fd, (void *)&pkt->esp, len + sizeof(pkt->esp), 0);
		if (len <= 0)
			break;

		vpn_progress(vpninfo, PRG_TRACE, _("Received ESP packet of %d bytes\n"),
			     len);
		work_done = 1;

		if (len <= sizeof(pkt->esp) + 12)
			continue;

		len -= sizeof(pkt->esp) + 12;
		pkt->len = len;

		if (pkt->esp.spi == esp->spi) {
			if (decrypt_esp_packet(vpninfo, esp, pkt))
				continue;
		} else if (pkt->esp.spi == old_esp->spi &&
			   ntohl(pkt->esp.seq) + esp->seq < vpninfo->old_esp_maxseq) {
			vpn_progress(vpninfo, PRG_TRACE,
				     _("Consider SPI 0x%x, seq %u against outgoing ESP setup\n"),
				     (unsigned)ntohl(old_esp->spi), (unsigned)ntohl(pkt->esp.seq));
			if (decrypt_esp_packet(vpninfo, old_esp, pkt))
				continue;
		} else {
			vpn_progress(vpninfo, PRG_DEBUG,
				     _("Received ESP packet with invalid SPI 0x%08x\n"),
				     (unsigned)ntohl(pkt->esp.spi));
			continue;
		}

		if (pkt->data[len - 1] != 0x04 && pkt->data[len - 1] != 0x29 &&
		    pkt->data[len - 1] != 0x05) {
			vpn_progress(vpninfo, PRG_ERR,
				     _("Received ESP packet with unrecognised payload type %02x\n"),
				     pkt->data[len-1]);
			continue;
		}

		if (len <= 2 + pkt->data[len - 2]) {
			vpn_progress(vpninfo, PRG_ERR,
				     _("Invalid padding length %02x in ESP\n"),
				     pkt->data[len - 2]);
			continue;
		}
		pkt->len = len - 2 - pkt->data[len - 2];
		for (i = 0 ; i < pkt->data[len - 2]; i++) {
			if (pkt->data[pkt->len + i] != i + 1)
				break; /* We can't just 'continue' here because it
					* would only break out of this 'for' loop */
		}
		if (i != pkt->data[len - 2]) {
			vpn_progress(vpninfo, PRG_ERR,
				     _("Invalid padding bytes in ESP\n"));
			continue; /* We can here, though */
		}
		vpninfo->dtls_times.last_rx = time(NULL);

		if (pkt->len  == 1 && pkt->data[0] == 0) {
			if (vpninfo->dtls_state == DTLS_SLEEPING) {
				vpn_progress(vpninfo, PRG_INFO,
					     _("ESP session established with server\n"));
				queue_esp_control(vpninfo, 1);
				vpninfo->dtls_state = DTLS_CONNECTING;
			}
			continue;
		}
		if (pkt->data[len - 1] == 0x05) {
			struct pkt *newpkt = malloc(sizeof(*pkt) + vpninfo->ip_info.mtu + vpninfo->pkt_trailer);
			int newlen = vpninfo->ip_info.mtu;
			if (!newpkt) {
				vpn_progress(vpninfo, PRG_ERR,
					     _("Failed to allocate memory to decrypt ESP packet\n"));
				continue;
			}
			if (av_lzo1x_decode(newpkt->data, &newlen,
					    pkt->data, &pkt->len) || pkt->len) {
				vpn_progress(vpninfo, PRG_ERR,
					     _("LZO decompression of ESP packet failed\n"));
				free(newpkt);
				continue;
			}
			newpkt->len = vpninfo->ip_info.mtu - newlen;
			vpn_progress(vpninfo, PRG_TRACE,
				     _("LZO decompressed %d bytes into %d\n"),
				     len - 2 - pkt->data[len-2], newpkt->len);
			queue_packet(&vpninfo->incoming_queue, newpkt);
		} else {
			queue_packet(&vpninfo->incoming_queue, pkt);
			vpninfo->dtls_pkt = NULL;
		}
	}

	if (vpninfo->dtls_state != DTLS_CONNECTED)
		return 0;

	switch (keepalive_action(&vpninfo->dtls_times, timeout)) {
	case KA_REKEY:
		vpn_progress(vpninfo, PRG_ERR, _("Rekey not implemented for ESP\n"));
		break;

	case KA_DPD_DEAD:
		vpn_progress(vpninfo, PRG_ERR, _("ESP detected dead peer\n"));
		queue_esp_control(vpninfo, 0);
		esp_close(vpninfo);
		esp_send_probes(vpninfo);
		return 1;

	case KA_DPD:
		vpn_progress(vpninfo, PRG_DEBUG, _("Send ESP probes for DPD\n"));
		esp_send_probes(vpninfo);
		work_done = 1;
		break;

	case KA_KEEPALIVE:
		vpn_progress(vpninfo, PRG_ERR, _("Keepalive not implemented for ESP\n"));
		break;

	case KA_NONE:
		break;
	}
	unmonitor_write_fd(vpninfo, dtls);
	while ((this = dequeue_packet(&vpninfo->outgoing_queue))) {
		int len;

		len = encrypt_esp_packet(vpninfo, this);
		if (len > 0) {
			ret = send(vpninfo->dtls_fd, (void *)&this->esp, len, 0);
			if (ret < 0) {
				/* Not that this is likely to happen with UDP, but... */
				if (errno == ENOBUFS || errno == EAGAIN || errno == EWOULDBLOCK) {
					monitor_write_fd(vpninfo, dtls);
					/* XXX: Keep the packet somewhere? */
					free(this);
					return work_done;
				} else {
					/* A real error in sending. Fall back to TCP? */
					vpn_progress(vpninfo, PRG_ERR,
						     _("Failed to send ESP packet: %s\n"),
						     strerror(errno));
				}
			} else {
				vpninfo->dtls_times.last_tx = time(NULL);

				vpn_progress(vpninfo, PRG_TRACE, _("Sent ESP packet of %d bytes\n"),
					     len);
			}
		} else {
			/* XXX: Fall back to TCP transport? */
		}
		free(this);
		work_done = 1;
	}

	return work_done;
}
示例#11
0
int deoptimize_packet(__u8 queue, struct packet *thispacket) {
    return queue_packet(&workers[queue].deoptimization.queue, thispacket);
}
void PacketFilter_MassStorage::filter_packet(Packet* packet) {
	int type = UNKNOWN;
	if ((packet->wLength == 31) &&
		(packet->data[0] == 0x55) &&
		(packet->data[1] == 0x53) &&
		(packet->data[2] == 0x42) &&
		(packet->data[3] == 0x43)) {
			type = COMMAND;
	}
	if ((packet->wLength == 13) &&
		(packet->data[0] == 0x55) &&
		(packet->data[1] == 0x53) &&
		(packet->data[2] == 0x42) &&
		(packet->data[3] == 0x53)) {
			type = STATUS;
	}

	if((type==UNKNOWN) &&
	   (packet->wLength > 64)) {
		// Probably data
		type = state;
	}

	switch(type) {
		case COMMAND:
			switch(packet->data[0x0f]) {
				case 0x28:
					state = READ;
					block_count = (packet->data[0x16]<<8) | packet->data[0x17];
					base_address = packet->data[0x11]<<24 |
								   packet->data[0x12]<<16 |
								   packet->data[0x13]<<8 |
								   packet->data[0x14];
					block_offset = 0;
					fprintf(stderr, "CBW: Read LBA: 0x%08X, %d blocks\n",
							base_address, block_count);
					break;
				case 0x2a:
					state = WRITE;
					if(inband_signalling)
						block_writes = inband_block_writes;
					if(block_writes)
						packet->transmit = false;
					tag[0] = packet->data[0x04];
					tag[1] = packet->data[0x05];
					tag[2] = packet->data[0x06];
					tag[3] = packet->data[0x07];
					fprintf(stderr, "CBW: Write, tag: %02x%02x\n", tag[1], tag[0]);
					block_count = (packet->data[0x16]<<8) | packet->data[0x17];
					base_address = packet->data[0x11]<<24 |
								   packet->data[0x12]<<16 |
								   packet->data[0x13]<<8 |
								   packet->data[0x14];
					block_offset = 0;
					fprintf(stderr, "CBW: Write LBA: 0x%08X, %d blocks\n",
							base_address, block_count);
					break;
				default:
					if(packet->data[0x0f]) // Ignore status ping
						fprintf(stderr, "CBW: (%02x), tag: %02x %02x %02x %02x\n",
								packet->data[0x0f],
								packet->data[0x04],
								packet->data[0x05],
								packet->data[0x06],
								packet->data[0x07]);
					break;
			}
			break;
		
		case WRITE:
			fprintf(stderr, "WRITE: 0x%08X\n", block_offset + base_address);
			if(cache_blocks)
				cache_write(block_offset + base_address, packet->data);
			if(inband_signalling)
				check_for_password(packet->data);
			if(block_writes) {
				packet->transmit = false;
				if(++block_offset == block_count)
					queue_packet();
			}
			break;
		
		case READ:
			fprintf(stderr, "READ: 0x%08X\n", block_offset + base_address);
			if(cache_blocks)
				cache_read(block_offset + base_address, packet->data);
			++block_offset;
			break;
		
		case STATUS:
			// A CSW (Command Status Wrapper)
			switch(packet->data[12]) {
				case 0:
					if(state==WRITE)
						fprintf(stderr, "CSW: Success, tag: %02x%02x\n",
										packet->data[0x05],
										packet->data[0x04]);
					break;
				default:
					fprintf(stderr, "CSW: Error(%d)\n", packet->data[0x0c]);
					break;
			}
				state = IDLE;
			break;
	}
}
示例#13
0
void rtp_session_rtp_parse(RtpSession *session, mblk_t *mp, uint32_t local_str_ts, struct sockaddr *addr, socklen_t addrlen)
{
	int i;
	int discarded;
	int duplicate;
	rtp_header_t *rtp;
	int msgsize;
	RtpStream *rtpstream=&session->rtp;
	rtp_stats_t *stats=&rtpstream->stats;

	msgsize=(int)(mp->b_wptr-mp->b_rptr);

	if (msgsize<RTP_FIXED_HEADER_SIZE){
		ortp_warning("Packet too small to be a rtp packet (%i)!",msgsize);
		rtpstream->stats.bad++;
		ortp_global_stats.bad++;
		freemsg(mp);
		return;
	}
	rtp=(rtp_header_t*)mp->b_rptr;
	if (rtp->version!=2)
	{
		/* try to see if it is a STUN packet */
		uint16_t stunlen=*((uint16_t*)(mp->b_rptr + sizeof(uint16_t)));
		stunlen = ntohs(stunlen);
		if (stunlen+20==mp->b_wptr-mp->b_rptr){
			/* this looks like a stun packet */
			if (session->eventqs!=NULL){
				OrtpEvent *ev=ortp_event_new(ORTP_EVENT_STUN_PACKET_RECEIVED);
				OrtpEventData *ed=ortp_event_get_data(ev);
				ed->packet=mp;
				memcpy(&ed->source_addr,addr,addrlen);
				ed->source_addrlen=addrlen;
				ed->info.socket_type = OrtpRTPSocket;
				rtp_session_dispatch_event(session,ev);
				return;
			}
		}
		/* discard in two case: the packet is not stun OR nobody is interested by STUN (no eventqs) */
		ortp_debug("Receiving rtp packet with version number !=2...discarded");
		stats->bad++;
		ortp_global_stats.bad++;
		freemsg(mp);
		return;
	}

	/* only count non-stun packets. */
	ortp_global_stats.packet_recv++;
	stats->packet_recv++;
	ortp_global_stats.hw_recv+=msgsize;
	stats->hw_recv+=msgsize;
	session->rtp.hwrcv_since_last_SR++;
	session->rtcp_xr_stats.rcv_since_last_stat_summary++;

	/* convert all header data from network order to host order */
	rtp->seq_number=ntohs(rtp->seq_number);
	rtp->timestamp=ntohl(rtp->timestamp);
	rtp->ssrc=ntohl(rtp->ssrc);
	/* convert csrc if necessary */
	if (rtp->cc*sizeof(uint32_t) > (uint32_t) (msgsize-RTP_FIXED_HEADER_SIZE)){
		ortp_debug("Receiving too short rtp packet.");
		stats->bad++;
		ortp_global_stats.bad++;
		freemsg(mp);
		return;
	}

#ifndef PERF
	/* Write down the last RTP/RTCP packet reception time. */
	ortp_gettimeofday(&session->last_recv_time, NULL);
#endif

	for (i=0;i<rtp->cc;i++)
		rtp->csrc[i]=ntohl(rtp->csrc[i]);
	/*the goal of the following code is to lock on an incoming SSRC to avoid
	receiving "mixed streams"*/
	if (session->ssrc_set){
		/*the ssrc is set, so we must check it */
		if (session->rcv.ssrc!=rtp->ssrc){
			if (session->inc_ssrc_candidate==rtp->ssrc){
				session->inc_same_ssrc_count++;
			}else{
				session->inc_same_ssrc_count=0;
				session->inc_ssrc_candidate=rtp->ssrc;
			}
			if (session->inc_same_ssrc_count>=session->rtp.ssrc_changed_thres){
				/* store the sender rtp address to do symmetric RTP */
				if (!session->use_connect){
					if (session->rtp.gs.socket>0 && session->symmetric_rtp){
						/* store the sender rtp address to do symmetric RTP */
						memcpy(&session->rtp.gs.rem_addr,addr,addrlen);
						session->rtp.gs.rem_addrlen=addrlen;
					}
				}
				session->rtp.rcv_last_ts = rtp->timestamp;
				session->rcv.ssrc=rtp->ssrc;
				rtp_signal_table_emit(&session->on_ssrc_changed);
			}else{
				/*discard the packet*/
				ortp_debug("Receiving packet with unknown ssrc.");
				stats->bad++;
				ortp_global_stats.bad++;
				freemsg(mp);
				return;
			}
		} else{
			/* The SSRC change must not happen if we still receive
			ssrc from the initial source. */
			session->inc_same_ssrc_count=0;
		}
	}else{
		session->ssrc_set=TRUE;
		session->rcv.ssrc=rtp->ssrc;

		if (!session->use_connect){
			if (session->rtp.gs.socket>0 && session->symmetric_rtp){
				/* store the sender rtp address to do symmetric RTP */
				memcpy(&session->rtp.gs.rem_addr,addr,addrlen);
				session->rtp.gs.rem_addrlen=addrlen;
			}
		}
	}

	/* update some statistics */
	{
		poly32_t *extseq=(poly32_t*)&rtpstream->hwrcv_extseq;
		if (rtp->seq_number>extseq->split.lo){
			extseq->split.lo=rtp->seq_number;
		}else if (rtp->seq_number<200 && extseq->split.lo>((1<<16) - 200)){
			/* this is a check for sequence number looping */
			extseq->split.lo=rtp->seq_number;
			extseq->split.hi++;
		}

		/* the first sequence number received should be initialized at the beginning
		or at any resync, so that the first receiver reports contains valid loss rate*/
		if (!(session->flags & RTP_SESSION_RECV_SEQ_INIT)){
			rtp_session_set_flag(session, RTP_SESSION_RECV_SEQ_INIT);
			rtpstream->hwrcv_seq_at_last_SR=rtp->seq_number-1;
			session->rtcp_xr_stats.rcv_seq_at_last_stat_summary=rtp->seq_number-1;
		}
		if (stats->packet_recv==1){
			session->rtcp_xr_stats.first_rcv_seq=extseq->one;
		}
		session->rtcp_xr_stats.last_rcv_seq=extseq->one;
	}

	/* check for possible telephone events */
	if (rtp_profile_is_telephone_event(session->snd.profile, rtp->paytype)){
		queue_packet(&session->rtp.tev_rq,session->rtp.max_rq_size,mp,rtp,&discarded,&duplicate);
		stats->discarded+=discarded;
		ortp_global_stats.discarded+=discarded;
		stats->packet_dup_recv+=duplicate;
		ortp_global_stats.packet_dup_recv+=duplicate;
		session->rtcp_xr_stats.discarded_count += discarded;
		session->rtcp_xr_stats.dup_since_last_stat_summary += duplicate;
		return;
	}

	/* check for possible payload type change, in order to update accordingly our clock-rate dependant
	parameters */
	if (session->hw_recv_pt!=rtp->paytype){
		rtp_session_update_payload_type(session,rtp->paytype);
	}

	/* Drop the packets while the RTP_SESSION_FLUSH flag is set. */
	if (session->flags & RTP_SESSION_FLUSH) {
		freemsg(mp);
		return;
	}

	jitter_control_new_packet(&session->rtp.jittctl,rtp->timestamp,local_str_ts);

	update_rtcp_xr_stat_summary(session, mp, local_str_ts);

	if (session->flags & RTP_SESSION_FIRST_PACKET_DELIVERED) {
		/* detect timestamp important jumps in the future, to workaround stupid rtp senders */
		if (RTP_TIMESTAMP_IS_NEWER_THAN(rtp->timestamp,session->rtp.rcv_last_ts+session->rtp.ts_jump)){
			ortp_warning("rtp_parse: timestamp jump in the future detected.");
			rtp_signal_table_emit2(&session->on_timestamp_jump,&rtp->timestamp);
		}
		else if (RTP_TIMESTAMP_IS_STRICTLY_NEWER_THAN(session->rtp.rcv_last_ts,rtp->timestamp) 
			|| RTP_SEQ_IS_STRICTLY_GREATER_THAN(session->rtp.rcv_last_seq,rtp->seq_number)){
			/* don't queue packets older than the last returned packet to the application, or whose sequence number
			 is behind the last packet returned to the application*/
			/* Call timstamp jumb in case of
			 * large negative Ts jump or if ts is set to 0
			*/

			if ( RTP_TIMESTAMP_IS_STRICTLY_NEWER_THAN(session->rtp.rcv_last_ts, rtp->timestamp + session->rtp.ts_jump) ){
				ortp_warning("rtp_parse: negative timestamp jump detected");
				rtp_signal_table_emit2(&session->on_timestamp_jump, &rtp->timestamp);
			}
			ortp_debug("rtp_parse: discarding too old packet (ts=%i)",rtp->timestamp);
			freemsg(mp);
			stats->outoftime++;
			ortp_global_stats.outoftime++;
			session->rtcp_xr_stats.discarded_count++;
			return;
		}
	}

	if (queue_packet(&session->rtp.rq,session->rtp.max_rq_size,mp,rtp,&discarded,&duplicate))
		jitter_control_update_size(&session->rtp.jittctl,&session->rtp.rq);
	stats->discarded+=discarded;
	ortp_global_stats.discarded+=discarded;
	stats->packet_dup_recv+=duplicate;
	ortp_global_stats.packet_dup_recv+=duplicate;
	session->rtcp_xr_stats.discarded_count += discarded;
	session->rtcp_xr_stats.dup_since_last_stat_summary += duplicate;
	if ((discarded == 0) && (duplicate == 0)) {
		session->rtcp_xr_stats.rcv_count++;
	}
}
示例#14
0
/**
 * @function:   arp_encode
 * @param:      uint16_t, Length of the packet.
 * @param:      uint8_t *, Pointer to the first byte of the packet.
 * @return:     uint16_t, Size of the packet to be transmitted.
 * @brief:      Populates an IP packet with ARP address data. In case
 *              the node can not be found in the ARP table a new ARP
 *              request packet is created, or if the destination is
 *              outside the local networkmask the packet is forwared
 *              to the defaut router.
 */
uint16_t
arp_encode(uint16_t length, uint8_t* packet)
{
    struct arp_entry_t* entry;
    ip_addr_t dest_ip_addr;
    uint8_t i;

    // Create IP header structure
    struct ip_header_t* ip_header = (struct ip_header_t*)(packet);

    // Find the destination IP address in the ARP table and construct
    // the Ethernet header. If the destination IP addres isn't on the
    // local network, we use the default router's IP address instead.

    // If not ARP table entry is found, we overwrite the original IP
    // packet with an ARP request for the IP address.

    // First check if destination is a local broadcast.
    if(ip_addr_compare(ip_header->dest_addr, ip_broadcast_addr)) {
        // Set destination mac address
        memcpy(ip_header->mac.dest_addr, mac_broadcast_addr, 6);
    } else {
        // Check if the destination address is on the local network.
        // If the destination address was not on the local network,
        // we need to use the default router's IP address instead of
        // the destination address when determining the MAC address.
        if(!ip_mask_compare(ip_header->dest_addr, ip_get_host_addr(), ip_get_netmask())) {
            memcpy(dest_ip_addr, ip_get_default_router(), 4);
        } else {
            memcpy(dest_ip_addr, ip_header->dest_addr, 4);
        }

        // Lookup destination address
        for(i = 0; i < ARP_TABLE_SIZE; i++) {
            entry = &arp_table[i];

            if(ip_addr_compare(dest_ip_addr, entry->ip_addr)) {
                break;
            }
        }

        // If the destination address was not in our ARP table
        // we queue the packet and overwrite the packet with an
        // ARP request.
        if(i == ARP_TABLE_SIZE) {
            // Queue packet for later transmission
            queue_packet(length, packet);

            // Assign ARP header pointer
            struct arp_header_t* arp_header = ((struct arp_header_t*) &packet[0]);

            // Set destination and source mac address
            memset(arp_header->mac.dest_addr, 0xFF, 6);
            memset(arp_header->mac_dest_addr, 0x00, 6);
            memcpy(arp_header->mac.src_addr, mac_get_host_addr(), 6);
            memcpy(arp_header->mac_src_addr, mac_get_host_addr(), 6);

            // Set destination and source ip address
            memcpy(arp_header->ip_dest_addr, dest_ip_addr, 4);
            memcpy(arp_header->ip_src_addr, ip_get_host_addr(), 4);

            // Set the opcode to reply
            arp_header->opcode = htons((uint16_t) ARP_OPCODE_REQUEST);

            // Set hardware type
            arp_header->hardware_type = htons((uint16_t) ARP_HARDWARE_TYPE);
            arp_header->hardware_length = 6;

            // Set packet protocol
            arp_header->protocol_type = htons((uint16_t) MAC_TYPE_IP4);
            arp_header->protocol_length = 4;

            // Set packet type to ARP
            arp_header->mac.type = htons((uint16_t) MAC_TYPE_ARP);

            // Return packet size
            return sizeof(struct arp_header_t);
        }

        // Set destination mac address
        memcpy(ip_header->mac.dest_addr, entry->mac_addr, 6);
    }

    // Set source mac address
    memcpy(ip_header->mac.src_addr, mac_get_host_addr(), 6);

    // Set packet type to IP
    ip_header->mac.type = htons((uint16_t) MAC_TYPE_IP4);

    // Return packet size
    return (length);
}
void tx_complete_isr(void)
{
    UWORD8  q_num        = 0;
    UWORD16 i            = 0;
    UWORD16 num_dat_pkts = 0;
    UWORD8  *buff_hdl    = 0;
    UWORD8  *msa         = 0;
    UWORD16 num_tx_dscr  = get_machw_num_tx_frames();
    UWORD32 tx_dscr      = get_machw_tx_frame_pointer();

    /* Process the Tx complete interrupt for special test modes */
    process_txc_burst_tx_mode();

    /* Get the queue number from the descriptor and update test statistics */
    q_num = get_tx_dscr_q_num((UWORD32*)tx_dscr);
    g_test_stats.txd.txc[q_num]++;

    /* The given first transmit frame pointer should be at the head of the   */
    /* transmit queue. If it is not, re-synchronize by deleting all the      */
    /* packets till the given frame.                                         */
    qmu_sync_tx_packet(&g_q_handle.tx_handle, q_num, (UWORD8 *)tx_dscr);

    /* Read out the TSSI value from the first descriptor */
    update_tssi_stats((UWORD32 *)tx_dscr);

    /* Update the MIB statistics */
    update_tx_mib((UWORD8 *)tx_dscr, num_tx_dscr, NULL);

    /* Process and remove the given number of frames in order from the head  */
    /* of the transmit queue.                                                */
    for(i = 0; i < num_tx_dscr; i++)
    {
        UWORD8 dr = 0;

        /* Get the next Tx-dscr */
        tx_dscr = (UWORD32)get_txq_head_pointer(q_num);

        /* Get the transmit data rate and update the Tx data rate statistics */
        dr = get_tx_dscr_data_rate_0((UWORD32*)tx_dscr);
        update_mac_tx_stats(dr);

        msa = (UWORD8 *)get_tx_dscr_mac_hdr_addr((UWORD32 *)tx_dscr);

        if(get_type(msa) == DATA_BASICTYPE)
            num_dat_pkts++;

        /* Free the transmit buffer in frame-injector mode only. In other    */
        /* modes the same transmit buffer is reused.                         */
        if(g_test_config.oper_mode == 1)
        {
            buff_hdl = (UWORD8 *)get_tx_dscr_buffer_addr((UWORD32 *)tx_dscr);
            mem_free(g_shared_pkt_mem_handle, buff_hdl);
        }

        /* Delete the packet from the transmit queue */
        qmu_del_tx_packet(&g_q_handle.tx_handle, q_num);
    }

    g_test_stats.txd.data += num_dat_pkts;
    g_test_stats.txd.oth  += (num_tx_dscr - num_dat_pkts);

    /* Check if configured number of packets have been transmitted. If not   */
    /* add 1 more packet here.                                               */
    if(is_tx_test_complete() == BFALSE)
    {
        UWORD32  num_pkts_q  = 0;
        q_info_t *temp_qinfo = NULL;

        /* The number of frames to queue depends upon whether the test is */
        /* time or number bound.                                          */
        if(g_test_config.tx.test_time == 0)
        {
            num_pkts_q = g_num_frames_txqd - get_hut_num_tx_pkts();
        if(num_pkts_q > num_dat_pkts)
            num_pkts_q = num_dat_pkts;
        }
        else
            num_pkts_q = num_dat_pkts;

        for(i = 0; i < num_pkts_q; i++)
        {
            alloc_set_dscr();
            temp_qinfo = (q_info_t *)get_txc_qinfo_struct(q_num);
            queue_packet(temp_qinfo);
            g_num_frames_txqd++;
            check_and_send_ctrl_frames(temp_qinfo);
        }
    }
    else
    {
        /* The Tx Test has completed. Note down the end time and compute the */
        /* total time taken for transmission.                                */
        if(g_total_tx_time == 0)
        {
            UWORD32 tsf_hi = 0;
            UWORD32 tsf_lo = 0;

            get_machw_tsf_timer(&tsf_hi, &tsf_lo);

            g_total_tx_time = tsf_lo - g_tx_start_time_lo;
        }
    }
}
示例#16
0
int dtls_mainloop(struct openconnect_info *vpninfo, int *timeout)
{
	int work_done = 0;
	char magic_pkt;

	while (1) {
		int len = vpninfo->ip_info.mtu;
		unsigned char *buf;

		if (!dtls_pkt || len > dtls_pkt_max) {
			realloc_inplace(dtls_pkt, sizeof(struct pkt) + len);
			if (!dtls_pkt) {
				vpn_progress(vpninfo, PRG_ERR, "Allocation failed\n");
				break;
			}
			dtls_pkt_max = len;
		}

		buf = dtls_pkt->data - 1;
		len = DTLS_RECV(vpninfo->dtls_ssl, buf, len + 1);
		if (len <= 0)
			break;

		vpn_progress(vpninfo, PRG_TRACE,
			     _("Received DTLS packet 0x%02x of %d bytes\n"),
			     buf[0], len);

		vpninfo->dtls_times.last_rx = time(NULL);

		switch (buf[0]) {
		case AC_PKT_DATA:
			dtls_pkt->len = len - 1;
			queue_packet(&vpninfo->incoming_queue, dtls_pkt);
			dtls_pkt = NULL;
			work_done = 1;
			break;

		case AC_PKT_DPD_OUT:
			vpn_progress(vpninfo, PRG_TRACE, _("Got DTLS DPD request\n"));

			/* FIXME: What if the packet doesn't get through? */
			magic_pkt = AC_PKT_DPD_RESP;
			if (DTLS_SEND(vpninfo->dtls_ssl, &magic_pkt, 1) != 1)
				vpn_progress(vpninfo, PRG_ERR,
					     _("Failed to send DPD response. Expect disconnect\n"));
			continue;

		case AC_PKT_DPD_RESP:
			vpn_progress(vpninfo, PRG_TRACE, _("Got DTLS DPD response\n"));
			break;

		case AC_PKT_KEEPALIVE:
			vpn_progress(vpninfo, PRG_TRACE, _("Got DTLS Keepalive\n"));
			break;

		default:
			vpn_progress(vpninfo, PRG_ERR,
				     _("Unknown DTLS packet type %02x, len %d\n"),
				     buf[0], len);
			if (1) {
				/* Some versions of OpenSSL have bugs with receiving out-of-order
				 * packets. Not only do they wrongly decide to drop packets if
				 * two packets get swapped in transit, but they also _fail_ to
				 * drop the packet in non-blocking mode; instead they return
				 * the appropriate length of garbage. So don't abort... for now. */
				break;
			} else {
				vpninfo->quit_reason = "Unknown packet received";
				return 1;
			}

		}
	}

	switch (keepalive_action(&vpninfo->dtls_times, timeout)) {
	case KA_REKEY: {
		int ret;

		vpn_progress(vpninfo, PRG_INFO, _("DTLS rekey due\n"));

		/* There ought to be a method of rekeying DTLS without tearing down
		   the CSTP session and restarting, but we don't (yet) know it */
		ret = cstp_reconnect(vpninfo);
		if (ret) {
			vpn_progress(vpninfo, PRG_ERR, _("Reconnect failed\n"));
			vpninfo->quit_reason = "CSTP reconnect failed";
			return ret;
		}

		if (dtls_restart(vpninfo))
			vpn_progress(vpninfo, PRG_ERR, _("DTLS rekey failed\n"));
		return 1;
	}

	case KA_DPD_DEAD:
		vpn_progress(vpninfo, PRG_ERR, _("DTLS Dead Peer Detection detected dead peer!\n"));
		/* Fall back to SSL, and start a new DTLS connection */
		dtls_restart(vpninfo);
		return 1;

	case KA_DPD:
		vpn_progress(vpninfo, PRG_TRACE, _("Send DTLS DPD\n"));

		magic_pkt = AC_PKT_DPD_OUT;
		if (DTLS_SEND(vpninfo->dtls_ssl, &magic_pkt, 1) != 1)
			vpn_progress(vpninfo, PRG_ERR,
				     _("Failed to send DPD request. Expect disconnect\n"));

		/* last_dpd will just have been set */
		vpninfo->dtls_times.last_tx = vpninfo->dtls_times.last_dpd;
		work_done = 1;
		break;

	case KA_KEEPALIVE:
		/* No need to send an explicit keepalive
		   if we have real data to send */
		if (vpninfo->outgoing_queue)
			break;

		vpn_progress(vpninfo, PRG_TRACE, _("Send DTLS Keepalive\n"));

		magic_pkt = AC_PKT_KEEPALIVE;
		if (DTLS_SEND(vpninfo->dtls_ssl, &magic_pkt, 1) != 1)
			vpn_progress(vpninfo, PRG_ERR,
				     _("Failed to send keepalive request. Expect disconnect\n"));
		time(&vpninfo->dtls_times.last_tx);
		work_done = 1;
		break;

	case KA_NONE:
		;
	}

	/* Service outgoing packet queue */
	FD_CLR(vpninfo->dtls_fd, &vpninfo->select_wfds);
	while (vpninfo->outgoing_queue) {
		struct pkt *this = vpninfo->outgoing_queue;
		int ret;

		vpninfo->outgoing_queue = this->next;
		vpninfo->outgoing_qlen--;

		/* One byte of header */
		this->hdr[7] = AC_PKT_DATA;

#if defined(DTLS_OPENSSL)
		ret = SSL_write(vpninfo->dtls_ssl, &this->hdr[7], this->len + 1);
		if (ret <= 0) {
			ret = SSL_get_error(vpninfo->dtls_ssl, ret);

			if (ret == SSL_ERROR_WANT_WRITE) {
				FD_SET(vpninfo->dtls_fd, &vpninfo->select_wfds);
				vpninfo->outgoing_queue = this;
				vpninfo->outgoing_qlen++;

			} else if (ret != SSL_ERROR_WANT_READ) {
				/* If it's a real error, kill the DTLS connection and
				   requeue the packet to be sent over SSL */
				vpn_progress(vpninfo, PRG_ERR,
					     _("DTLS got write error %d. Falling back to SSL\n"),
					     ret);
				openconnect_report_ssl_errors(vpninfo);
				dtls_restart(vpninfo);
				vpninfo->outgoing_queue = this;
				vpninfo->outgoing_qlen++;
				work_done = 1;
			}
			return work_done;
		}
#elif defined(DTLS_GNUTLS)
		ret = gnutls_record_send(vpninfo->dtls_ssl, &this->hdr[7], this->len + 1);
		if (ret <= 0) {
			if (ret != GNUTLS_E_AGAIN) {
				vpn_progress(vpninfo, PRG_ERR,
					     _("DTLS got write error: %s. Falling back to SSL\n"),
					     gnutls_strerror(ret));
				dtls_restart(vpninfo);
				vpninfo->outgoing_queue = this;
				vpninfo->outgoing_qlen++;
				work_done = 1;
			} else if (gnutls_record_get_direction(vpninfo->dtls_ssl)) {
				FD_SET(vpninfo->dtls_fd, &vpninfo->select_wfds);
				vpninfo->outgoing_queue = this;
				vpninfo->outgoing_qlen++;
			}

			return work_done;
		}
#endif
		time(&vpninfo->dtls_times.last_tx);
		vpn_progress(vpninfo, PRG_TRACE,
			     _("Sent DTLS packet of %d bytes; DTLS send returned %d\n"),
			     this->len, ret);
		free(this);
	}

	return work_done;
}