Пример #1
0
//-----------------------------------------------------------------------------
int read_data_word ( unsigned int addr, unsigned int *data )
{
    unsigned int ra,rb,rc,rd;

    sprintf((char *)sdata,"R %u 4\r",addr);
    ra=strlen((char *)sdata);
    ser_senddata(sdata,ra);
    if(get_return_code()) return(1);

    ra=wait_for_packet();
    if(ra<3) return(1);


    for(rb=0;rb<ra;rb++) printf("0x%02X ",rdata[rb]); printf("\n");

    rb=uudecode(udata,&rdata[1],ra-2);
    rc=0;
    rd=0;
    for(ra=0;ra<rb;ra++)
    {
        rd<<=8;
        rd|=udata[ra];
        rc+=udata[ra];
        printf("0x%02X %u\n",udata[ra],rc);
    }
    if(get_checksum(&ra)) return(1);
    if(ra!=rc) return(1);
    ser_senddata(okpatt,sizeof(okpatt));
    *data=rd;
    return(0);
}
Пример #2
0
static void
endless_loop(void)
{
    val_context_t *context;

    /*
     * signal handlers to exit gracefully
     */
#ifdef SIGTERM
    signal(SIGTERM, sig_shutdown);
#endif
#ifdef SIGINT
    signal(SIGINT, sig_shutdown);
#endif

    /*
     * open a port and process incoming packets
     */
    port_setup(1153);
    if (VAL_NO_ERROR != val_create_context(NULL, &context)) {
        val_log(NULL, LOG_ERR, "Cannot create validator context. Exiting.");
        return;
    }

    while (!done) {
        wait_for_packet();
        process_packet(context);
    }

    val_free_context(context);

    val_free_validator_state();
}
Пример #3
0
/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
 *	@flags: MSG_ flags
 *	@peeked: returns non-zero if this packet has been seen before
 *	@err: error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;

		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue);
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else{
                            if(!skb->next || IS_ERR(skb->next)){
                                printk("[NET] skb->next error in %s\n", __func__);
                                error = -EAGAIN;
                                spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                                goto no_packet;
                            }else{
				__skb_unlink(skb, &sk->sk_receive_queue);
                            }
                        }
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Пример #4
0
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff *skb;
	long timeo;
	int error = 0;

	if ((!sk) || (IS_ERR(sk)))
		goto no_packet;

	error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;

		spin_lock_irqsave(&queue->lock, cpu_flags);
		skb_queue_walk(queue, skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
#ifdef CONFIG_HTC_NETWORK_MODIFY
				if (*off >= skb->len && skb->len) {
#else
				if (*off >= skb->len && skb->len) {
#endif
					*off -= skb->len;
					continue;
				}
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);

			spin_unlock_irqrestore(&queue->lock, cpu_flags);
			return skb;
		}
		spin_unlock_irqrestore(&queue->lock, cpu_flags);

		
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
/**
 *	skb_recv_datagram - Receive a datagram skbuff
 *	@sk - socket
 *	@flags - MSG_ flags
 *	@noblock - blocking operation?
 *	@err - error code returned
 *
 *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
 *	and possible races. This replaces identical code in packet, raw and
 *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
 *	the long standing peek and read race for datagram sockets. If you
 *	alter this routine remember it must be re-entrant.
 *
 *	This function will lock the socket if a skb is returned, so the caller
 *	needs to unlock the socket in that case (usually by calling
 *	skb_free_datagram)
 *
 *	* It does not lock socket since today. This function is
 *	* free of race conditions. This measure should/can improve
 *	* significantly datagram socket latencies at high loads,
 *	* when data copying to user space takes lots of time.
 *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
 *	*  8) Great win.)
 *	*			                    --ANK (980729)
 *
 *	The order of the tests when we find no data waiting are specified
 *	quite explicitly by POSIX 1003.1g, don't change them without having
 *	the standard around please.
 */
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
				  int noblock, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

	timeo = sock_rcvtimeo(sk, noblock);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was corrent in any case. 8)
		 */
		if (flags & MSG_PEEK) {
			unsigned long cpu_flags;

			spin_lock_irqsave(&sk->sk_receive_queue.lock,
					  cpu_flags);
			skb = skb_peek(&sk->sk_receive_queue);
			if (skb)
				atomic_inc(&skb->users);
			spin_unlock_irqrestore(&sk->sk_receive_queue.lock,
					       cpu_flags);
		} else
			skb = skb_dequeue(&sk->sk_receive_queue);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Пример #6
0
//-----------------------------------------------------------------------------
int write_data_word ( unsigned int addr, unsigned int data )
{
    unsigned int ra,rb,rc,rd;

    ra=0;
    rd=data;

    //sdata[ra++]=(rd>>24)&0xFF; rd<<=8;
    //sdata[ra++]=(rd>>24)&0xFF; rd<<=8;
    //sdata[ra++]=(rd>>24)&0xFF; rd<<=8;
    //sdata[ra++]=(rd>>24)&0xFF; rd<<=8;

    sdata[ra++]=rd&0xFF; rd>>=8;
    sdata[ra++]=rd&0xFF; rd>>=8;
    sdata[ra++]=rd&0xFF; rd>>=8;
    sdata[ra++]=rd&0xFF; rd>>=8;

    rd=0; for(rb=0;rb<ra;rb++) rd+=sdata[rb];
    rb=uuencode(udata,sdata,ra);
    udata[rb++]=0x0D;
    udata[rb++]=0x0A;
    sprintf((char *)sdata,"W %u 4\r\n",addr);
    ra=strlen((char *)sdata);
    ser_senddata(sdata,ra);
    if(get_return_code()) return(1);
    ser_senddata(udata,rb);
    sprintf((char *)sdata,"%u\r\n",rd);
    ra=strlen((char *)sdata);
    ser_senddata(sdata,ra);
    ra=wait_for_packet();
    rb=0;
    if(ra!=4) rb++;
    if(rdata[1]!='O') rb++;
    if(rdata[2]!='K') rb++;
    if(rb)
    {
        for(rc=0;rc<ra;rc++) printf("[0x%02X]",rdata[rc]); printf("\n");
        printf("%s\n",rdata);
        return(1);
    }
    return(0);
}
Пример #7
0
//-----------------------------------------------------------------------------
unsigned int get_return_code ( void )
{
    unsigned int ra;
    unsigned int rb;
    unsigned int rc;

    rb=wait_for_packet();
    if(rb<3) return(111);
    rb--;
    rc=0;
    for(ra=1;ra<rb;ra++)
    {
        if(rdata[ra]<0x30) return(112);
        if(rdata[ra]>0x39) return(113);
        rc*=10;
        rc+=rdata[ra]&0xF;
    }
    if(rc) printf("returned %u\n",rc);
    else { printf("."); fflush(stdout); }
    return(rc);
}
Пример #8
0
//-----------------------------------------------------------------------------
int get_checksum ( unsigned int *data )
{
    unsigned int ra;
    unsigned int rb;
    unsigned int rc;

    rb=wait_for_packet();
    if(rb<3) return(1);
    rb--;
    rc=0;
    for(ra=1;ra<rb;ra++)
    {
        if(rdata[ra]<0x30) return(1);
        if(rdata[ra]>0x39) return(1);
        rc*=10;
        rc+=rdata[ra]&0xF;
    }
    printf("checksum %u\n",rc);
    *data=rc;
    return(0);
}
// Loop forever, converting the incoming GTH data to libpcap format
static void
convert_to_pcap(GTH_api *api,
		int data_socket,
		const char *base_name,
		const int n_sus_per_file,
		const int duration_per_file,
		Channel_t channels[],
		int n_channels,
		const enum PCap_format format)
{
  u16 length;
  GTH_mtp2 signal_unit;
  int su_count;
  int file_number = 1;
  HANDLE_OR_FILEPTR file;
  int write_to_stdout = 0;
  int write_to_pipe;

  write_to_stdout = (strcmp(base_name, "-") == 0);
  write_to_pipe = is_filename_a_pipe(base_name);

  init_timer(duration_per_file);

  while (1) {
    char filename[MAX_FILENAME];

    if (!write_to_stdout && !write_to_pipe)
      {
	snprintf(filename, MAX_FILENAME, "%s.%d",
		 base_name, file_number);
	open_file_for_writing(&file, filename);
	fprintf(stderr, "saving to file %s\n", filename);
      }
    else if (write_to_stdout)
      {
	file = stdout_handle_or_file();
	fprintf(stderr, "saving capture to stdout\n");
      }
    else
      {
	fprintf(stderr, "saving capture to a windows named pipe\n");
	file = open_windows_pipe(base_name);
      }

    write_pcap_global_header(file, format, channels, n_channels);

    file_number++;
    su_count = 0;

    do
      {
	if (wait_for_packet(api, data_socket) != 0)
	  {
	    read_exact(data_socket, (void*)&length, sizeof length);
	    length = ntohs(length);
	    assert(length <= sizeof signal_unit);
	    read_exact(data_socket, (void*)&signal_unit, length);

	    length -= (signal_unit.payload - (char*)&(signal_unit.tag));
	    write_packet(file,
			 ntohs(signal_unit.timestamp_hi),
			 ntohl(signal_unit.timestamp_lo),
			 ntohs(signal_unit.tag),
			 signal_unit.payload,
			 length,
			 format);
	    flush_file(file);
	    su_count++;
	  }
      }
    while ( !is_time_to_rotate(su_count, n_sus_per_file, duration_per_file)
	    || write_to_pipe
	    || write_to_stdout );

    fclose(file);
  }
}
Пример #10
0
static void test(const char *model)
{
	knet_handle_t knet_h;
	int logfds[2];
	int datafd = 0;
	int8_t channel = 0;
	struct knet_handle_stats stats;
	char send_buff[KNET_MAX_PACKET_SIZE];
	char recv_buff[KNET_MAX_PACKET_SIZE];
	ssize_t send_len = 0;
	int recv_len = 0;
	int savederrno;
	struct sockaddr_storage lo;
	struct knet_handle_compress_cfg knet_handle_compress_cfg;

	if (make_local_sockaddr(&lo, 0) < 0) {
		printf("Unable to convert loopback to sockaddr: %s\n", strerror(errno));
		exit(FAIL);
	}

	memset(send_buff, 0, sizeof(send_buff));

	setup_logpipes(logfds);

	knet_h = knet_handle_start(logfds, KNET_LOG_DEBUG);

	flush_logs(logfds[0], stdout);

	printf("Test knet_send with %s and valid data\n", model);

	memset(&knet_handle_compress_cfg, 0, sizeof(struct knet_handle_compress_cfg));
	strncpy(knet_handle_compress_cfg.compress_model, model, sizeof(knet_handle_compress_cfg.compress_model) - 1);
	knet_handle_compress_cfg.compress_level = 4;
	knet_handle_compress_cfg.compress_threshold = 0;

	if (knet_handle_compress(knet_h, &knet_handle_compress_cfg) < 0) {
		printf("knet_handle_compress did not accept zlib compress mode with compress level 1 cfg\n");
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
        }

	if (knet_handle_enable_sock_notify(knet_h, &private_data, sock_notify) < 0) {
		printf("knet_handle_enable_sock_notify failed: %s\n", strerror(errno));
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
        }

	datafd = 0;
	channel = -1;

	if (knet_handle_add_datafd(knet_h, &datafd, &channel) < 0) {
		printf("knet_handle_add_datafd failed: %s\n", strerror(errno));
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (knet_host_add(knet_h, 1) < 0) {
		printf("knet_host_add failed: %s\n", strerror(errno));
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (knet_link_set_config(knet_h, 1, 0, KNET_TRANSPORT_UDP, &lo, &lo, 0) < 0) {
		printf("Unable to configure link: %s\n", strerror(errno));
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (knet_link_set_enable(knet_h, 1, 0, 1) < 0) {
		printf("knet_link_set_enable failed: %s\n", strerror(errno));
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (knet_handle_setfwd(knet_h, 1) < 0) {
		printf("knet_handle_setfwd failed: %s\n", strerror(errno));
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (wait_for_host(knet_h, 1, 10, logfds[0], stdout) < 0) {
		printf("timeout waiting for host to be reachable");
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	send_len = knet_send(knet_h, send_buff, KNET_MAX_PACKET_SIZE, channel);
	if (send_len <= 0) {
		printf("knet_send failed: %s\n", strerror(errno));
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (send_len != sizeof(send_buff)) {
		printf("knet_send sent only %zd bytes: %s\n", send_len, strerror(errno));
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	flush_logs(logfds[0], stdout);

	if (wait_for_packet(knet_h, 10, datafd, logfds[0], stdout)) {
		printf("Error waiting for packet: %s\n", strerror(errno));
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	recv_len = knet_recv(knet_h, recv_buff, KNET_MAX_PACKET_SIZE, channel);
	savederrno = errno;
	if (recv_len != send_len) {
		printf("knet_recv received only %d bytes: %s (errno: %d)\n", recv_len, strerror(errno), errno);
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		if ((is_helgrind()) && (recv_len == -1) && (savederrno == EAGAIN)) {
			printf("helgrind exception. this is normal due to possible timeouts\n");
			exit(PASS);
		}
		exit(FAIL);
	}

	if (memcmp(recv_buff, send_buff, KNET_MAX_PACKET_SIZE)) {
		printf("recv and send buffers are different!\n");
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	/* A sanity check on the stats */
	if (knet_handle_get_stats(knet_h, &stats, sizeof(stats)) < 0) {
		printf("knet_handle_get_stats failed: %s\n", strerror(errno));
		knet_link_set_enable(knet_h, 1, 0, 0);
		knet_link_clear_config(knet_h, 1, 0);
		knet_host_remove(knet_h, 1);
		knet_handle_free(knet_h);
		flush_logs(logfds[0], stdout);
		close_logpipes(logfds);
		exit(FAIL);
	}

	if (strcmp(model, "none") == 0) {
		if (stats.tx_compressed_packets != 0 ||
		    stats.rx_compressed_packets != 0) {

			printf("stats look wrong: s/b all 0 for model 'none' tx_packets: %" PRIu64 " (%" PRIu64 "/%" PRIu64 " comp/uncomp), rx_packets: %" PRIu64 " (%" PRIu64 "/%" PRIu64 " comp/uncomp)\n",
			       stats.tx_compressed_packets,
			       stats.tx_compressed_size_bytes,
			       stats.tx_compressed_original_bytes,
			       stats.rx_compressed_packets,
			       stats.rx_compressed_size_bytes,
			       stats.rx_compressed_original_bytes);
		}
	} else {
		if (stats.tx_compressed_packets != 1 ||
		    stats.rx_compressed_packets != 1 ||
		    stats.tx_compressed_original_bytes < stats.tx_compressed_size_bytes ||
		    stats.tx_compressed_original_bytes < stats.tx_compressed_size_bytes) {
			printf("stats look wrong: tx_packets: %" PRIu64 " (%" PRIu64 "/%" PRIu64 " comp/uncomp), rx_packets: %" PRIu64 " (%" PRIu64 "/%" PRIu64 " comp/uncomp)\n",
			       stats.tx_compressed_packets,
			       stats.tx_compressed_size_bytes,
			       stats.tx_compressed_original_bytes,
			       stats.rx_compressed_packets,
			       stats.rx_compressed_size_bytes,
			       stats.rx_compressed_original_bytes);

		}
	}
	flush_logs(logfds[0], stdout);

	knet_link_set_enable(knet_h, 1, 0, 0);
	knet_link_clear_config(knet_h, 1, 0);
	knet_host_remove(knet_h, 1);
	knet_handle_free(knet_h);
	flush_logs(logfds[0], stdout);
	close_logpipes(logfds);
}
Пример #11
0
void pktio_test_send_failure(void)
{
	odp_pktio_t pktio_tx, pktio_rx;
	odp_packet_t pkt_tbl[TX_BATCH_LEN];
	uint32_t pkt_seq[TX_BATCH_LEN];
	int ret, mtu, i, alloc_pkts;
	odp_pool_param_t pool_params;
	odp_pool_t pkt_pool;
	int long_pkt_idx = TX_BATCH_LEN / 2;
	pktio_info_t info_rx;

	pktio_tx = create_pktio(0, ODP_PKTIN_MODE_RECV,
				ODP_PKTOUT_MODE_SEND);
	if (pktio_tx == ODP_PKTIO_INVALID) {
		CU_FAIL("failed to open pktio");
		return;
	}

	/* read the MTU from the transmit interface */
	mtu = odp_pktio_mtu(pktio_tx);

	ret = odp_pktio_start(pktio_tx);
	CU_ASSERT_FATAL(ret == 0);

	/* configure the pool so that we can generate test packets larger
	 * than the interface MTU */
	memset(&pool_params, 0, sizeof(pool_params));
	pool_params.pkt.len     = mtu + 32;
	pool_params.pkt.seg_len = pool_params.pkt.len;
	pool_params.pkt.num     = TX_BATCH_LEN + 1;
	pool_params.type        = ODP_POOL_PACKET;
	pkt_pool = odp_pool_create("pkt_pool_oversize", &pool_params);
	CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);

	if (num_ifaces > 1) {
		pktio_rx = create_pktio(1, ODP_PKTIN_MODE_RECV,
					ODP_PKTOUT_MODE_SEND);
		ret = odp_pktio_start(pktio_rx);
		CU_ASSERT_FATAL(ret == 0);
	} else {
		pktio_rx = pktio_tx;
	}

	/* generate a batch of packets with a single overly long packet
	 * in the middle */
	for (i = 0; i < TX_BATCH_LEN; ++i) {
		uint32_t pkt_len;

		if (i == long_pkt_idx)
			pkt_len = pool_params.pkt.len;
		else
			pkt_len = PKT_LEN_NORMAL;

		pkt_tbl[i] = odp_packet_alloc(pkt_pool, pkt_len);
		if (pkt_tbl[i] == ODP_PACKET_INVALID)
			break;

		pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);

		pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
		if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
			odp_packet_free(pkt_tbl[i]);
			break;
		}

		if (pkt_seq[i] == TEST_SEQ_INVALID) {
			odp_packet_free(pkt_tbl[i]);
			break;
		}
	}
	alloc_pkts = i;

	if (alloc_pkts == TX_BATCH_LEN) {
		/* try to send the batch with the long packet in the middle,
		 * the initial short packets should be sent successfully */
		odp_errno_zero();
		ret = odp_pktio_send(pktio_tx, pkt_tbl, TX_BATCH_LEN);
		CU_ASSERT(ret == long_pkt_idx);
		CU_ASSERT(odp_errno() == 0);

		info_rx.id   = pktio_rx;
		info_rx.outq = ODP_QUEUE_INVALID;
		info_rx.inq  = ODP_QUEUE_INVALID;
		info_rx.in_mode = ODP_PKTIN_MODE_RECV;

		for (i = 0; i < ret; ++i) {
			pkt_tbl[i] = wait_for_packet(&info_rx, pkt_seq[i],
						     ODP_TIME_SEC_IN_NS);
			if (pkt_tbl[i] == ODP_PACKET_INVALID)
				break;
		}

		if (i == ret) {
			/* now try to send starting with the too-long packet
			 * and verify it fails */
			odp_errno_zero();
			ret = odp_pktio_send(pktio_tx,
					     &pkt_tbl[long_pkt_idx],
					     TX_BATCH_LEN - long_pkt_idx);
			CU_ASSERT(ret == -1);
			CU_ASSERT(odp_errno() != 0);
		} else {
			CU_FAIL("failed to receive transmitted packets\n");
		}

		/* now reduce the size of the long packet and attempt to send
		 * again - should work this time */
		i = long_pkt_idx;
		odp_packet_pull_tail(pkt_tbl[i],
				     odp_packet_len(pkt_tbl[i]) -
				     PKT_LEN_NORMAL);
		pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);

		pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
		ret = pktio_fixup_checksums(pkt_tbl[i]);
		CU_ASSERT_FATAL(ret == 0);

		CU_ASSERT_FATAL(pkt_seq[i] != TEST_SEQ_INVALID);
		ret = odp_pktio_send(pktio_tx, &pkt_tbl[i], TX_BATCH_LEN - i);
		CU_ASSERT_FATAL(ret == (TX_BATCH_LEN - i));

		for (; i < TX_BATCH_LEN; ++i) {
			pkt_tbl[i] = wait_for_packet(&info_rx,
						     pkt_seq[i],
						     ODP_TIME_SEC_IN_NS);
			if (pkt_tbl[i] == ODP_PACKET_INVALID)
				break;
		}
		CU_ASSERT(i == TX_BATCH_LEN);
	} else {
		CU_FAIL("failed to generate test packets\n");
	}

	for (i = 0; i < alloc_pkts; ++i) {
		if (pkt_tbl[i] != ODP_PACKET_INVALID)
			odp_packet_free(pkt_tbl[i]);
	}

	if (pktio_rx != pktio_tx)
		CU_ASSERT(odp_pktio_close(pktio_rx) == 0);
	CU_ASSERT(odp_pktio_close(pktio_tx) == 0);
	CU_ASSERT(odp_pool_destroy(pkt_pool) == 0);
}
// Loop forever, converting the incoming GTH data to libpcap format
static void
convert_to_pcap(GTH_api *api,
int data_socket,
const char *base_name,
const int n_sus_per_file,
const int duration_per_file,
const int stop_after_interval,
const int output_filename_format,
Channel_t channels[],
int n_channels,
const enum PCap_format format)
{
	u16 length;
	GTH_mtp2 signal_unit;
	int su_count;
	int file_number = 1;
	HANDLE_OR_FILEPTR file;
	int write_to_stdout = 0;
	int write_to_pipe;

	write_to_stdout = (strcmp(base_name, "-") == 0);
	write_to_pipe = is_filename_a_pipe(base_name);

	init_timer(duration_per_file);

	int always_true = 1;
	time_t rawtime;
	struct tm * timeinfo = NULL;

	unsigned long long interval_threshold=0;
		
	u32 curr_sec = 0;
	u32 curr_usec = 0;
	unsigned long long curr_ts;

	while (always_true) {
		char filename[MAX_FILENAME];

		if (!write_to_stdout && !write_to_pipe)
		{
			if (output_filename_format > 0)
			{
				if(timeinfo==NULL) //setting interval time
					time(&rawtime);
				else
					rawtime+=duration_per_file; //hard setting next interval (duration_per_file is in seconds)

				if (output_filename_format == 1)
					timeinfo = localtime(&rawtime); // local time
				else
					timeinfo = gmtime(&rawtime); // utc time
				
				interval_threshold=convert_epoch_micro(rawtime+duration_per_file);
				
				fprintf(stderr, "interval started at %04d/%02d/%02d %02d:%02d:%02d\n",
						timeinfo->tm_year + 1900,
						timeinfo->tm_mon + 1,
						timeinfo->tm_mday,
						timeinfo->tm_hour,
						timeinfo->tm_min,
						timeinfo->tm_sec);
						
				fprintf(stderr, "threshold epoch: %llu\n", interval_threshold);	
			}

			if (!stop_after_interval)
			{
				if (output_filename_format > 0)
				{
					snprintf(filename, MAX_FILENAME, "%s_%05d_%04d%02d%02d%02d%02d%02d",
						base_name, file_number,
						timeinfo->tm_year + 1900,
						timeinfo->tm_mon + 1,
						timeinfo->tm_mday,
						timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
				}
				else
				{
					snprintf(filename, MAX_FILENAME, "%s_%05d",
						base_name, file_number);
				}
			}
			else
			{
				if (output_filename_format > 0)
				{
					snprintf(filename, MAX_FILENAME, "%s_%04d%02d%02d%02d%02d%02d",
						base_name,
						timeinfo->tm_year + 1900,
						timeinfo->tm_mon + 1,
						timeinfo->tm_mday,
						timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec);
				}
				else
				{
					snprintf(filename, MAX_FILENAME, "%s",
						base_name);
				}
			}
			open_file_for_writing(&file, filename);
			fprintf(stderr, "saving to file %s\n", filename);
		}
		else if (write_to_stdout)
		{
			file = stdout_handle_or_file();
			fprintf(stderr, "saving capture to stdout\n");
		}
		else
		{
			fprintf(stderr, "saving capture to a windows named pipe\n");
			file = open_windows_pipe(base_name);
		}

		write_pcap_global_header(file, format, channels, n_channels);

		file_number++;
		su_count = 0;

		int rotation_time_reached = 0;
		do
		{
			if (wait_for_packet(api, data_socket) != 0)
			{
				read_exact(data_socket, (void*)&length, sizeof length);
				length = ntohs(length);
				assert(length <= sizeof signal_unit);
				read_exact(data_socket, (void*)&signal_unit, length);

				curr_sec = ntohs(signal_unit.timestamp_hi);
				curr_usec = ntohl(signal_unit.timestamp_lo);

				length -= (signal_unit.payload - (char*)&(signal_unit.tag));
				write_packet(file,
					curr_sec,
					curr_usec,
					ntohs(signal_unit.tag),
					signal_unit.payload,
					length,
					format);
				flush_file(file);
				su_count++;

				if (!write_to_pipe && !write_to_stdout)
				{
					if (duration_per_file)
					{
						curr_ts = convert_timestamp_micro(curr_sec, curr_usec, format);

						if (curr_ts >= interval_threshold)
						{
							fprintf(stderr, "interval threshold triggered.\n");
							set_timer(duration_per_file);
							break;
						}
					}
				}
			}
		} while (
			!(rotation_time_reached = is_time_to_rotate(su_count, n_sus_per_file, duration_per_file))
			|| write_to_pipe
			|| write_to_stdout
			);

		fclose(file);

		if (!write_to_pipe && !write_to_stdout)
		{
			if (rotation_time_reached && stop_after_interval)
			{
				fprintf(stderr, "stopped capturing when rotation time reached\n");
				always_true = 0;
			}
		}
	}
}
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
				    int *peeked, int *err)
{
	struct sk_buff *skb;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
	 */
	int error = sock_error(sk);

	if (error)
		goto no_packet;

    // /* 当socket为阻塞时,获取timeout的值 */
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);

	do {
		/* Again only user level code calls this function, so nothing
		 * interrupt level will suddenly eat the receive_queue.
		 *
		 * Look at current nfs client by the way...
		 * However, this function was corrent in any case. 8)
		 */
		unsigned long cpu_flags;
         /* 
         当查看socket是否有数据包时,需要上锁,因为需要保证其它线程不会将数据包取走。
         */
		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
		skb = skb_peek(&sk->sk_receive_queue); /* 查看在socket的buffer中是否有数据包 */
		if (skb) {
			*peeked = skb->peeked;
			if (flags & MSG_PEEK) {
			     /* 
                设置MSG_PEEK,表示用户不是真的要读取数据,只是一个peek调用。
                那么并不真正读取数据
                */
				skb->peeked = 1;
				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, &sk->sk_receive_queue);//从队列中取出数据,即可看作读出数据
		}
		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);

		if (skb) // 有数据包,返回skb
			return skb;

         /*
        timeo为0,有2中情况:1种是socket为非阻塞的,第2种,即socket阻塞的时间已经超过了timeo的值,
	那么就跳到no_packet处理 
        */
		/* User doesn't want to wait */
		error = -EAGAIN;
		if (!timeo)
			goto no_packet;

	} while (!wait_for_packet(sk, err, &timeo));//阻塞进程,等待数据包

	return NULL;

no_packet:
	*err = error;
	return NULL;
}
Пример #14
0
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
{
	int error;
	struct sk_buff *skb;

	/* Caller is allowed not to check sk->err before skb_recv_datagram() */
	error = sock_error(sk);
	if (error)
		goto no_packet;

restart:
	while(skb_queue_empty(&sk->receive_queue))	/* No data */
	{
		/* Socket errors? */
		error = sock_error(sk);
		if (error)
			goto no_packet;

		/* Socket shut down? */
		if (sk->shutdown & RCV_SHUTDOWN)
			goto no_packet;

		/* Sequenced packets can come disconnected. If so we report the problem */
		error = -ENOTCONN;
		if(connection_based(sk) && sk->state!=TCP_ESTABLISHED)
			goto no_packet;

		/* handle signals */
		error = -ERESTARTSYS;
		if (signal_pending(current))
			goto no_packet;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (noblock)
			goto no_packet;

		wait_for_packet(sk);
	}

	/* Again only user level code calls this function, so nothing interrupt level
	   will suddenly eat the receive_queue */
	if (flags & MSG_PEEK)
	{
		unsigned long cpu_flags;

		/* It is the only POTENTIAL race condition
		   in this function. skb may be stolen by
		   another receiver after peek, but before
		   incrementing use count, provided kernel
		   is reentearble (it is not) or this function
		   is called by interrupts.

		   Protect it with global skb spinlock,
		   though for now even this is overkill.
		                                --ANK (980728)
		 */
		spin_lock_irqsave(&skb_queue_lock, cpu_flags);
		skb = skb_peek(&sk->receive_queue);
		if(skb!=NULL)
			atomic_inc(&skb->users);
		spin_unlock_irqrestore(&skb_queue_lock, cpu_flags);
	} else
		skb = skb_dequeue(&sk->receive_queue);

	if (!skb)	/* Avoid race if someone beats us to the data */
		goto restart;
	return skb;

no_packet:
	*err = error;
	return NULL;
}
Пример #15
0
FXint InputThread::run(){
  Event * event;

  ap_set_thread_name("ap_input");

  for (;;) {
    if (reader && state==StateProcessing)
      event = wait_for_packet();
    else
      event = wait_for_event();

    switch(event->type) {
      case Ctrl_Close     : ctrl_flush(true);
                            ctrl_close_input(true);
                            break;

      case Ctrl_Open_Flush: ctrl_flush();
      case Ctrl_Open      : ctrl_open_input(((ControlEvent*)event)->text);
                            break;

      case Ctrl_Quit      : ctrl_close_input(true);
                            engine->decoder->post(event,EventQueue::Flush);
                            return 0;
                            break;
      case Ctrl_Seek      : ctrl_seek(((CtrlSeekEvent*)event)->pos);
                            break;
      case End            : if (event->stream==streamid) {
                              ctrl_eos();
                              }
                            break;
      case Meta           : engine->decoder->post(event);
                            continue;
                            break;
      case AP_EOS         : GM_DEBUG_PRINT("[input] eos\n");
                            if (state!=StateError) {
                              engine->post(event);
                              continue;
                              }
                            break;
      case Buffer         :
        {
          Packet * packet = dynamic_cast<Packet*>(event);
          FXASSERT(reader);
          FXASSERT(packet);
          packet->stream = streamid;
          FXuint status = reader->process(packet);
          switch(status) {
            case ReadError    : GM_DEBUG_PRINT("[input] error\n");
                                ctrl_close_input();
                                set_state(StateError,true);
                                break;
            case ReadDone     : GM_DEBUG_PRINT("[input] done\n");
                                set_state(StateIdle);
                                break;
            case ReadRedirect : {GM_DEBUG_PRINT("[input] redirect\n");
                                FXStringList list;
                                reader->redirect(list);
                                ctrl_open_inputs(list);
                                }
                                break;
            default         : break;
            }
          continue; /* packet already released */
          break;
        }
      }
    Event::unref(event);
    }
  return 0;
  }
Пример #16
0
static u32 ts_interleave_thread_run(void *param) {
	GF_AbstractTSMuxer * mux = (GF_AbstractTSMuxer *) param;
	AVStream * video_st = mux->video_st;
	AVStream * audio_st = mux->audio_st;
	u64 audio_pts, video_pts;
	u64 audioSize, videoSize, videoKbps, audioKbps;
	u32 pass;
	u32 now, start;
	/* open the output file, if needed */
	if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) {
		if (url_fopen(&mux->oc->pb, mux->destination, URL_WRONLY) < 0) {
			fprintf(stderr, "Could not open '%s'\n", mux->destination);
			return 0;
		}
	}
	/* write the stream header, if any */
	av_write_header(mux->oc);
	audio_pts = video_pts = 0;
	// Buffering...
	gf_sleep(1000);
	now = start = gf_sys_clock();
	audioSize = videoSize = 0;
	audioKbps = videoKbps = 0;
	pass = 0;
	while ( mux->encode) {
		pass++;
		if (0== (pass%16)) {
			now = gf_sys_clock();
			if (now - start > 1000) {
				videoKbps = videoSize * 8000 / (now-start) / 1024;
				audioKbps = audioSize * 8000 / (now-start) / 1024;
				audioSize = videoSize = 0;
				start = now;
				GF_LOG(GF_LOG_DEBUG, GF_LOG_MODULE, ("\rPTS audio="LLU" ("LLU"kbps), video="LLU" ("LLU"kbps)", audio_pts, audioKbps, video_pts, videoKbps));
			}
		}
		/* write interleaved audio and video frames */
		if (!video_st ||
		        (audio_pts == AV_NOPTS_VALUE && has_packet_ready(mux, mux->audioMx, &mux->audioPackets)) ||
		        ((audio_st && audio_pts < video_pts && audio_pts!= AV_NOPTS_VALUE))) {
			AVPacketList * pl = wait_for_packet(mux, mux->audioMx, &mux->audioPackets);
			if (!pl)
				goto exit;
			audio_pts = pl->pkt.pts ;
			audioSize+=pl->pkt.size;
			if (pl->pkt.pts == AV_NOPTS_VALUE) {
				pl->pkt.pts = 0;
			}
			if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write audio interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts));
			}
			gf_free(pl);
		} else {
			AVPacketList * pl = wait_for_packet(mux, mux->videoMx, &mux->videoPackets);
			if (!pl)
				goto exit;
			video_pts = pl->pkt.pts;
			/* write the compressed frame in the media file */
			if (0 && audio_pts != AV_NOPTS_VALUE && audio_pts > video_pts && pl->next) {
				u32 skipped = 0;
				u64 first = video_pts;
				/* We may be too slow... */
				gf_mx_p(mux->videoMx);
				while (video_pts < audio_pts && pl->next) {
					AVPacketList * old = pl;
					// We skip frames...
					pl = pl->next;
					video_pts = pl->pkt.pts;
					skipped++;
					gf_free(old);
				}
				mux->videoPackets = pl->next;
				gf_mx_v(mux->videoMx);
				if (skipped > 0)
					GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("Skipped %u video frames, frame was "LLU", but is now "LLU"\n", skipped, first, video_pts));
			}
			videoSize+=pl->pkt.size;
			video_pts = pl->pkt.pts; // * video_st->time_base.num / video_st->time_base.den;
			assert( video_pts);
			if (av_interleaved_write_frame(mux->oc, &(pl->pkt)) < 0) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_MODULE, ("[AVRedirect] : failed to write video interleaved frame audio_pts="LLU", video_pts="LLU"\n", audio_pts, video_pts));
			}
			gf_free(pl);
		}
		gf_sleep(1);
	}
exit:
	GF_LOG(GF_LOG_INFO, GF_LOG_MODULE, ("[AVRedirect] Ending TS thread...\n"));
	av_write_trailer(mux->oc);
	if (!(mux->oc->oformat->flags & AVFMT_NOFILE)) {
		/* close the output file */
		url_fclose(mux->oc->pb);
	}
	return 0;
}
Пример #17
0
static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
			     int num_pkts)
{
	odp_packet_t tx_pkt[num_pkts];
	odp_event_t tx_ev[num_pkts];
	odp_packet_t rx_pkt;
	uint32_t tx_seq[num_pkts];
	int i, ret;

	/* generate test packets to send */
	for (i = 0; i < num_pkts; ++i) {
		tx_pkt[i] = odp_packet_alloc(default_pkt_pool, packet_len);
		if (tx_pkt[i] == ODP_PACKET_INVALID)
			break;

		tx_seq[i] = pktio_init_packet(tx_pkt[i]);
		if (tx_seq[i] == TEST_SEQ_INVALID) {
			odp_packet_free(tx_pkt[i]);
			break;
		}

		pktio_pkt_set_macs(tx_pkt[i], pktio_a->id, pktio_b->id);
		if (pktio_fixup_checksums(tx_pkt[i]) != 0) {
			odp_packet_free(tx_pkt[i]);
			break;
		}

		tx_ev[i] = odp_packet_to_event(tx_pkt[i]);
	}

	if (i != num_pkts) {
		CU_FAIL("failed to generate test packets");
		return;
	}

	/* send packet(s) out */
	if (num_pkts == 1) {
		ret = odp_queue_enq(pktio_a->outq, tx_ev[0]);
		if (ret != 0) {
			CU_FAIL("failed to enqueue test packet");
			odp_packet_free(tx_pkt[0]);
			return;
		}
	} else {
		ret = odp_queue_enq_multi(pktio_a->outq, tx_ev, num_pkts);
		if (ret != num_pkts) {
			CU_FAIL("failed to enqueue test packets");
			i = ret < 0 ? 0 : ret;
			for ( ; i < num_pkts; i++)
				odp_packet_free(tx_pkt[i]);
			return;
		}
	}

	/* and wait for them to arrive back */
	for (i = 0; i < num_pkts; ++i) {
		rx_pkt = wait_for_packet(pktio_b, tx_seq[i],
					 ODP_TIME_SEC_IN_NS);

		if (rx_pkt == ODP_PACKET_INVALID)
			break;
		CU_ASSERT(odp_packet_input(rx_pkt) == pktio_b->id);
		CU_ASSERT(odp_packet_has_error(rx_pkt) == 0);
		odp_packet_free(rx_pkt);
	}

	CU_ASSERT(i == num_pkts);
}