예제 #1
0
파일: buffer.hpp 프로젝트: bobvodka/bluetoe
    read_buffer ll_data_pdu_buffer< TransmitSize, ReceiveSize, Radio >::allocate_receive_buffer()
    {
        // The gap must be greater than max_rx_size_ or otherwise we could endup with
        // received_ == received_end_ without the buffer beeing empty
        if ( received_end_ >= received_ )
        {
            // place a 0 into the length field to denote that the rest until the buffer end is not used
            if ( end_receive_buffer() - received_end_ >= ll_header_size )
            {
                received_end_[ 1 ] = 0;
            }

            // is there still a gap at the end of the buffer?
            if ( end_receive_buffer() - received_end_ > max_rx_size_ )
            {
                return read_buffer{ received_end_, max_rx_size_ };
            }

            // is there a gap at the beginning of the buffer?
            if ( received_ - receive_buffer() > max_rx_size_ )
            {
                return read_buffer{ receive_buffer(), max_rx_size_ };
            }
        }
        // the filled part of the buffer is wrapped around the end; is there a gap in the middle?
        else if ( received_ - received_end_ > max_rx_size_ )
        {
            return read_buffer{ received_end_, max_rx_size_ };
        }

        return read_buffer{ nullptr, 0 };
    }
예제 #2
0
파일: buffer.hpp 프로젝트: bobvodka/bluetoe
    void ll_data_pdu_buffer< TransmitSize, ReceiveSize, Radio >::reset()
    {
        max_rx_size_    = min_buffer_size;
        received_end_   = receive_buffer();
        received_       = receive_buffer();

        max_tx_size_    = min_buffer_size;
        transmit_       = transmit_buffer();
        transmit_end_   = transmit_buffer();

        sequence_number_ = false;
        next_expected_sequence_number_ = false;
        next_empty_      = false;
    }
예제 #3
0
    PacketInfo Client::receivePacket( bool is_nonblocking )
    {
        if ( udp_socket < 0 )
            openSocket();

        int flags = 0;
        if ( is_nonblocking )
            flags |= MSG_DONTWAIT;

        sockaddr_in          peer_address;
        socklen_t            peer_address_size = sizeof( peer_address );
        std::vector<uint8_t> receive_buffer( UDP_RECEIVE_BUFFER_SIZE );
        int                  recv_size = recvfrom( udp_socket,
						   receive_buffer.data(),
						   UDP_RECEIVE_BUFFER_SIZE,
						   flags,
						   reinterpret_cast<sockaddr *>( &peer_address ),
						   &peer_address_size );
        if ( recv_size < 0 ) {
            int error_num = errno;
            if ( error_num == EAGAIN ) {
                PacketInfo info;
                return info;
            }
            std::perror( "cannot recv" );
            throw SocketError( get_error_message( "cannot recv packet", error_num ) );
        }

        PacketInfo info;
        info.source_address = convertAddressBinaryToString( peer_address.sin_addr );
        info.source_port    = ntohs( peer_address.sin_port );
        info.payload        = receive_buffer;
        return info;
    }
예제 #4
0
파일: buffer.hpp 프로젝트: bobvodka/bluetoe
    write_buffer ll_data_pdu_buffer< TransmitSize, ReceiveSize, Radio >::received( read_buffer pdu )
    {
        assert( pdu.buffer );
        assert( pdu.buffer >= receive_buffer() );
        assert( pdu.size >= pdu.buffer[ 1 ] + ll_header_size );
        assert( end_receive_buffer() - pdu.buffer >= pdu.buffer[ 1 ] + ll_header_size );
        assert( check_receive_buffer_consistency( "+received" ) );

        // invalid LLID or resent?
        if ( ( pdu.buffer[ 0 ] & 0x3 ) != 0 )
        {
            acknowledge( pdu.buffer[ 0 ] & nesn_flag );

            // resent PDU?
            if ( static_cast< bool >( pdu.buffer[ 0 ] & sn_flag ) == next_expected_sequence_number_ )
            {
                next_expected_sequence_number_ = !next_expected_sequence_number_;

                if ( pdu.buffer[ 1 ] != 0 )
                {
                    received_end_ = pdu.buffer + pdu.buffer[ 1 ] + ll_header_size;

                    // we received data, so we can not end up with the receive buffer beeing empty
                    assert( received_end_ != received_ );
                }
            }
        }

        assert( check_receive_buffer_consistency( "-received" ) );

        return next_transmit();
    }
예제 #5
0
파일: buffer.hpp 프로젝트: bobvodka/bluetoe
    void ll_data_pdu_buffer< TransmitSize, ReceiveSize, Radio >::free_received()
    {
        assert( received_ != received_end_ );
        assert( check_receive_buffer_consistency( "+free_received" ) );

        received_ += received_[ 1 ] + ll_header_size;

        // do we have to wrap?
        if ( end_receive_buffer() - received_ < 2 || received_[ 1 ] == 0 )
        {
            if ( received_ == received_end_ )
                received_end_ = receive_buffer();

            received_ = receive_buffer();
        }

        assert( check_receive_buffer_consistency( "-free_received" ) );
    }
예제 #6
0
파일: buffer.hpp 프로젝트: bobvodka/bluetoe
    bool ll_data_pdu_buffer< TransmitSize, ReceiveSize, Radio >::check_receive_buffer_consistency( const char* label ) const
    {
        if ( received_ == received_end_ )
            return true;

        const std::uint8_t* c = received_;

        if ( received_ < received_end_ )
        {
            // now, by following the length field, we have to reach received_end_ whithout wrapping
            while ( c < received_end_ )
                c += c[ 1 ] + ll_header_size;

            return c == received_end_;
        }

        // now by following the length field, we have to reach the end of the buffer
        while ( c != receive_buffer() && c < end_receive_buffer() )
        {
            c += c[ 1 ] + ll_header_size;

            if ( c > end_receive_buffer() )
                return false;

            if ( end_receive_buffer() - c < 2 || c[ 1 ] == 0 )
                c = receive_buffer();
        }

        if ( c != receive_buffer() )
            return false;

        // wrap around and have to meet received_end_
        while ( c < received_end_ )
            c += c[ 1 ] + ll_header_size;

        return c == received_end_;
    }
예제 #7
0
static int get_keymap(USBKeyboard *kb, uint8_t mapindex,
                      uint8_t *buffer, uint8_t mapsize)
{
  int ret=receive_buffer(kb,KURQ_GET_KEYMAP,mapindex,0,buffer,mapsize);

  if(ret < 0)
  {
    fprintf(stderr,"Error while reading key map (index %d).\n",mapindex);
    return -1;
  }

  if(ret != mapsize)
  {
    fprintf(stderr,"Received unexpected number of bytes (%d instead of %d)\n",
            ret,mapsize);
    return -1;
  }

  return 0;
}
예제 #8
0
static int download_layout(USBKeyboard *kb, const KBHwinfo *info,
                           const char *outfilename)
{
  uint8_t vector[info->matrix_bvlen];
  int ret=receive_buffer(kb,KURQ_GET_LAYOUT,0,0,vector,info->matrix_bvlen);

  if(ret < 0)
  {
    fprintf(stderr,"Error while reading keyboard layout.\n");
    return -1;
  }

  if(ret != info->matrix_bvlen)
  {
    fprintf(stderr,"Received unexpected number of bytes (%d instead of %d)\n",
            ret,info->matrix_bvlen);
    return -1;
  }

  return write_blob_to_file(outfilename,vector,sizeof(vector));
}
예제 #9
0
	boost::optional<piece_block_progress>
	http_seed_connection::downloading_piece_progress() const
	{
		if (m_requests.empty())
			return boost::optional<piece_block_progress>();

		boost::shared_ptr<torrent> t = associated_torrent().lock();
		TORRENT_ASSERT(t);

		piece_block_progress ret;

		peer_request const& pr = m_requests.front();
		ret.piece_index = pr.piece;
		if (!m_parser.header_finished())
		{
			ret.bytes_downloaded = 0;
		}
		else
		{
			int receive_buffer_size = receive_buffer().left() - m_parser.body_start();
			// TODO: 1 in chunked encoding mode, this assert won't hold.
			// the chunk headers should be subtracted from the receive_buffer_size
			TORRENT_ASSERT(receive_buffer_size <= t->block_size());
			ret.bytes_downloaded = t->block_size() - receive_buffer_size;
		}
		// this is used to make sure that the block_index stays within
		// bounds. If the entire piece is downloaded, the block_index
		// would otherwise point to one past the end
		int correction = ret.bytes_downloaded ? -1 : 0;
		ret.block_index = (pr.start + ret.bytes_downloaded + correction) / t->block_size();
		ret.full_block_bytes = t->block_size();
		const int last_piece = t->torrent_file().num_pieces() - 1;
		if (ret.piece_index == last_piece && ret.block_index
			== t->torrent_file().piece_size(last_piece) / t->block_size())
			ret.full_block_bytes = t->torrent_file().piece_size(last_piece) % t->block_size();
		return ret;
	}
예제 #10
0
	void http_seed_connection::on_receive(error_code const& error
		, std::size_t bytes_transferred)
	{
		INVARIANT_CHECK;

		if (error)
		{
			m_statistics.received_bytes(0, bytes_transferred);
#ifdef TORRENT_VERBOSE_LOGGING
			peer_log("*** http_seed_connection error: %s", error.message().c_str());
#endif
			return;
		}

		boost::shared_ptr<torrent> t = associated_torrent().lock();
		TORRENT_ASSERT(t);

		for (;;)
		{
			buffer::const_interval recv_buffer = receive_buffer();

			if (bytes_transferred == 0) break;
			TORRENT_ASSERT(recv_buffer.left() > 0);

			TORRENT_ASSERT(!m_requests.empty());
			if (m_requests.empty())
			{
				m_statistics.received_bytes(0, bytes_transferred);
				disconnect(errors::http_error, 2);
				return;
			}

			peer_request front_request = m_requests.front();

			bool header_finished = m_parser.header_finished();
			if (!header_finished)
			{
				bool parse_error = false;
				int protocol = 0;
				int payload = 0;
				boost::tie(payload, protocol) = m_parser.incoming(recv_buffer, parse_error);
				m_statistics.received_bytes(0, protocol);
				bytes_transferred -= protocol;
				if (payload > front_request.length) payload = front_request.length;

				if (parse_error)
				{
					m_statistics.received_bytes(0, bytes_transferred);
					disconnect(errors::http_parse_error, 2);
					return;
				}

				TORRENT_ASSERT(recv_buffer.left() == 0 || *recv_buffer.begin == 'H');
			
				TORRENT_ASSERT(recv_buffer.left() <= packet_size());
				
				// this means the entire status line hasn't been received yet
				if (m_parser.status_code() == -1)
				{
					TORRENT_ASSERT(payload == 0);
					TORRENT_ASSERT(bytes_transferred == 0);
					break;
				}

				// if the status code is not one of the accepted ones, abort
				if (!is_ok_status(m_parser.status_code()))
				{
					int retry_time = atoi(m_parser.header("retry-after").c_str());
					if (retry_time <= 0) retry_time = 5 * 60;
					// temporarily unavailable, retry later
					t->retry_web_seed(this, retry_time);

					std::string error_msg = to_string(m_parser.status_code()).elems
						+ (" " + m_parser.message());
					if (m_ses.m_alerts.should_post<url_seed_alert>())
					{
						m_ses.m_alerts.post_alert(url_seed_alert(t->get_handle(), url()
							, error_msg));
					}
					m_statistics.received_bytes(0, bytes_transferred);
					disconnect(error_code(m_parser.status_code(), get_http_category()), 1);
					return;
				}
				if (!m_parser.header_finished())
				{
					TORRENT_ASSERT(payload == 0);
					TORRENT_ASSERT(bytes_transferred == 0);
					break;
				}
			}

			// we just completed reading the header
			if (!header_finished)
			{
				if (is_redirect(m_parser.status_code()))
				{
					// this means we got a redirection request
					// look for the location header
					std::string location = m_parser.header("location");
					m_statistics.received_bytes(0, bytes_transferred);

					if (location.empty())
					{
						// we should not try this server again.
						t->remove_web_seed(this);
						disconnect(errors::missing_location, 2);
						return;
					}
					
					// add the redirected url and remove the current one
					t->add_web_seed(location, web_seed_entry::http_seed);
					t->remove_web_seed(this);
					disconnect(errors::redirecting, 2);
					return;
				}

				std::string const& server_version = m_parser.header("server");
				if (!server_version.empty())
				{
					m_server_string = "URL seed @ ";
					m_server_string += m_host;
					m_server_string += " (";
					m_server_string += server_version;
					m_server_string += ")";
				}

				m_response_left = atol(m_parser.header("content-length").c_str());
				if (m_response_left == -1)
				{
					m_statistics.received_bytes(0, bytes_transferred);
					// we should not try this server again.
					t->remove_web_seed(this);
					disconnect(errors::no_content_length, 2);
					return;
				}
				if (m_response_left != front_request.length)
				{
					m_statistics.received_bytes(0, bytes_transferred);
					// we should not try this server again.
					t->remove_web_seed(this);
					disconnect(errors::invalid_range, 2);
					return;
				}
				m_body_start = m_parser.body_start();
			}

			recv_buffer.begin += m_body_start;

			// =========================
			// === CHUNKED ENCODING  ===
			// =========================
			while (m_parser.chunked_encoding()
				&& m_chunk_pos >= 0
				&& m_chunk_pos < recv_buffer.left())
			{
				int header_size = 0;
				size_type chunk_size = 0;
				buffer::const_interval chunk_start = recv_buffer;
				chunk_start.begin += m_chunk_pos;
				TORRENT_ASSERT(chunk_start.begin[0] == '\r' || is_hex(chunk_start.begin, 1));
				bool ret = m_parser.parse_chunk_header(chunk_start, &chunk_size, &header_size);
				if (!ret)
				{
					TORRENT_ASSERT(bytes_transferred >= size_t(chunk_start.left() - m_partial_chunk_header));
					bytes_transferred -= chunk_start.left() - m_partial_chunk_header;
					m_statistics.received_bytes(0, chunk_start.left() - m_partial_chunk_header);
					m_partial_chunk_header = chunk_start.left();
					if (bytes_transferred == 0) return;
					break;
				}
				else
				{
#ifdef TORRENT_VERBOSE_LOGGING
					peer_log("*** parsed chunk: %d header_size: %d", chunk_size, header_size);
#endif
					TORRENT_ASSERT(bytes_transferred >= size_t(header_size - m_partial_chunk_header));
					bytes_transferred -= header_size - m_partial_chunk_header;

					m_statistics.received_bytes(0, header_size - m_partial_chunk_header);
					m_partial_chunk_header = 0;
					TORRENT_ASSERT(chunk_size != 0 || chunk_start.left() <= header_size || chunk_start.begin[header_size] == 'H');
					// cut out the chunk header from the receive buffer
					TORRENT_ASSERT(m_chunk_pos + m_body_start < INT_MAX);
					cut_receive_buffer(header_size, t->block_size() + 1024, int(m_chunk_pos + m_body_start));
					recv_buffer = receive_buffer();
					recv_buffer.begin += m_body_start;
					m_chunk_pos += chunk_size;
					if (chunk_size == 0)
					{
						TORRENT_ASSERT(receive_buffer().left() < m_chunk_pos + m_body_start + 1
							|| receive_buffer()[int(m_chunk_pos + m_body_start)] == 'H'
							|| (m_parser.chunked_encoding() && receive_buffer()[int(m_chunk_pos + m_body_start)] == '\r'));
						m_chunk_pos = -1;
					}
				}
			}

			int payload = bytes_transferred;
			if (payload > m_response_left) payload = int(m_response_left);
			if (payload > front_request.length) payload = front_request.length;
			m_statistics.received_bytes(payload, 0);
			incoming_piece_fragment(payload);
			m_response_left -= payload;

			if (m_parser.status_code() == 503)
			{
				if (!m_parser.finished()) return;

				int retry_time = atol(std::string(recv_buffer.begin, recv_buffer.end).c_str());
				if (retry_time <= 0) retry_time = 60;
#ifdef TORRENT_VERBOSE_LOGGING
				peer_log("*** retrying in %d seconds", retry_time);
#endif

				m_statistics.received_bytes(0, bytes_transferred);
				// temporarily unavailable, retry later
				t->retry_web_seed(this, retry_time);
				disconnect(error_code(m_parser.status_code(), get_http_category()), 1);
				return;
			}


			// we only received the header, no data
			if (recv_buffer.left() == 0) break;

			if (recv_buffer.left() < front_request.length) break;

			// if the response is chunked, we need to receive the last
			// terminating chunk and the tail headers before we can proceed
			if (m_parser.chunked_encoding() && m_chunk_pos >= 0) break;

			m_requests.pop_front();
			incoming_piece(front_request, recv_buffer.begin);
			if (associated_torrent().expired()) return;

			int size_to_cut = m_body_start + front_request.length;
			TORRENT_ASSERT(receive_buffer().left() < size_to_cut + 1
				|| receive_buffer()[size_to_cut] == 'H'
				|| (m_parser.chunked_encoding() && receive_buffer()[size_to_cut] == '\r'));

			cut_receive_buffer(size_to_cut, t->block_size() + 1024);
			if (m_response_left == 0) m_chunk_pos = 0;
			else m_chunk_pos -= front_request.length;
			bytes_transferred -= payload;
			m_body_start = 0;
			if (m_response_left > 0) continue;
			TORRENT_ASSERT(m_response_left == 0);
			m_parser.reset();
		}
	}
예제 #11
0
// Gets the conn_fd to the server.
// Returns 0 if game ended properly.
// The main logic of the client.
int Play(int conn_fd) {
    game_state = WAITING_FOR_CONNECTION_RESPONSE;

    fd_set read_fds, write_fds;

    int read_buf_current_index = 0;
    char read_buffer[BUF_MAX_SIZE] = { 0 };

    int client_input_buf_current_index = 0;
    char client_input[CLIENT_INPUT_MAX_SIZE] = { 0 };

    int send_buf_current_index = 0;
    char* send_buf = NULL;

    queue send_queue = { 0 };

    printf("nim\n");


    while (!quit) {

        FD_ZERO(&read_fds);
        FD_SET(conn_fd, &read_fds);
        FD_SET(STDIN_FILENO, &read_fds);

        FD_ZERO(&write_fds);
        FD_SET(conn_fd, &write_fds);

        if (select(FD_SETSIZE, &read_fds, &write_fds, NULL, NULL) < 0) {
            clean_resources(conn_fd, &send_queue);
            perror("select");
            exit(EXIT_FAILURE);
        }

        if (FD_ISSET(conn_fd, &read_fds)) {
            int ret = receive_buffer(conn_fd, read_buffer, read_buf_current_index);
            if (-1 == ret) {
                clean_resources(conn_fd, &send_queue);
                exit(EXIT_FAILURE);
            }
            else if(-2 == ret){
                if(game_state == WAITING_FOR_CONNECTION_RESPONSE){
                    printf("Client rejected.\n");
                }
                else {
                    printf("Server disconnected.\n");
                }
                clean_resources(conn_fd, &send_queue);
                exit(EXIT_FAILURE);
            }
            read_buf_current_index += ret;
            // Received a whole message, size equals the size in the message header.
            if (read_buf_current_index >= 2 && read_buf_current_index == read_buffer[0]) {
                if(-1 == handle_received_buf(read_buffer)){
                    clean_resources(conn_fd, &send_queue);
                    exit(EXIT_FAILURE);
                }
                read_buf_current_index = 0;
            }

        }
        if (FD_ISSET(STDIN_FILENO, &read_fds)) {
            if(get_input_from_client(client_input, &client_input_buf_current_index, BUF_MAX_SIZE + 2) == 0){
                // Received a whole message, size equals the size in the message header.
                if(client_input[client_input_buf_current_index - 1] == '\n') {
                    if (-1 == handle_client_input_buffer(&send_queue, client_input, client_input_buf_current_index)) {
                        clean_resources(conn_fd, &send_queue);
                        exit(EXIT_FAILURE);
                    }
                    client_input_buf_current_index = 0;
                }
            }
            else{
                client_input_buf_current_index = 0;
            }
        }
        if (FD_ISSET(conn_fd, &write_fds)) {
            if (send_buf == NULL) {
                send_buf = queue_pop(&send_queue);
                send_buf_current_index = 0;
            }
            if(send_buf != NULL) {
                int ret = send_buffer(conn_fd, send_buf, send_buf_current_index);
                if (-1 == ret) {
                    clean_resources(conn_fd, &send_queue);
                    exit(EXIT_FAILURE);
                }
                send_buf_current_index += ret;
                if (send_buf_current_index == send_buf[0]) {
                    free(send_buf);
                    send_buf = NULL;
                    send_buf_current_index = 0;
                }
            }
        }

        if (quit == true) {
            break;
        }
    }
    clean_queue(&send_queue);
    return 0;
}
예제 #12
0
inline void apply_pattern_CTable(const SimpleCommunicationPattern& pattern, RangeT range)
{
  //	 CFinfo << "applying pattern to CTable<Real>" << CFendl;

  const Uint nb_procs = mpi::PE::instance().size();
  
  Uint total_width = 0;
  BOOST_FOREACH(CTable<Real>& array, range)
  {
    total_width += array.row_size();
  }
  
  std::vector<Real> receive_buffer(total_width * pattern.receive_list.size());
  std::vector<Real> send_buffer;
  send_buffer.reserve(total_width * pattern.receive_list.size());
  
/// @todo transform this non-blocking to mpi collectives
/*
  // track non-blocking requests
  std::vector<boost::mpi::request> reqs;
  reqs.reserve(nb_procs*2);
  
  // Do the buffer initialization and communication
  Uint receive_begin = 0;
  for(Uint proc = 0; proc != nb_procs; ++proc)
  {
    const Uint send_begin = send_buffer.size();
    const Uint proc_begin = pattern.send_dist[proc];
    const Uint proc_end = pattern.send_dist[proc+1];
    Uint receive_size = 0;
    BOOST_FOREACH(CTable<Real>& array, range)
    {
      const Uint nb_cols = array.row_size();
      receive_size += nb_cols * (pattern.receive_dist[proc+1] - pattern.receive_dist[proc]);
      for(Uint i = proc_begin; i != proc_end; ++i)
      {
        cf_assert(pattern.send_list[i] < array.size());
        CTable<Real>::ConstRow row = array[pattern.send_list[i]];
        send_buffer.insert(send_buffer.end(), row.begin(), row.end());
      }
    }
    
    // CFinfo << "proc " << proc << " sending to " << CF::Common::mpi::PE::instance().rank() << CFendl;

    // Schedule send and receive operations
    reqs.push_back(world.isend(proc, 0, &send_buffer[send_begin], send_buffer.size() - send_begin));
    reqs.push_back(world.irecv(proc, 0, &receive_buffer[receive_begin], receive_size));
    receive_begin += receive_size;
  }
  
  // Wait for the comms to be done
  boost::mpi::wait_all(reqs.begin(), reqs.end());
*/
  // Unpack the receive buffer
  Uint buffer_idx = 0;
  for(Uint proc = 0; proc != nb_procs; ++proc)
  {
    const Uint proc_begin = pattern.receive_dist[proc];
    const Uint proc_end = pattern.receive_dist[proc+1];
    BOOST_FOREACH(CTable<Real>& array, range)
    {
      const Uint nb_cols = array.row_size();
      for(Uint i = proc_begin; i != proc_end; ++i)
      {
        cf_assert(pattern.receive_targets[i] < array.size());
        CTable<Real>::Row row = array[pattern.receive_targets[i]];
        std::copy(receive_buffer.begin() + buffer_idx, receive_buffer.begin() + buffer_idx + nb_cols, row.begin());
        buffer_idx += nb_cols;
      }
    }
  }
}
예제 #13
0
inline void apply_pattern_clist(const SimpleCommunicationPattern& pattern, RangeT range)
{
	typedef CList<ValueT> CListT;

  const Uint nb_procs = mpi::PE::instance().size();
  
	Uint total_width = 0;
  BOOST_FOREACH(CList<ValueT>& list, range)
  {
    ++total_width;
  }
	
  std::vector<ValueT> receive_buffer(total_width * pattern.receive_list.size());
  std::vector<ValueT> send_buffer;
  send_buffer.reserve(total_width * pattern.receive_list.size());
  
  // track non-blocking requests
  std::vector<boost::mpi::request> reqs;
  reqs.reserve(nb_procs*2);
  
  // Do the buffer initialization and communication
  Uint receive_begin = 0;
  for(Uint proc = 0; proc != nb_procs; ++proc)
  {
    const Uint send_begin = send_buffer.size();
    const Uint proc_begin = pattern.send_dist[proc];
    const Uint proc_end = pattern.send_dist[proc+1];
    Uint receive_size = 0;
    BOOST_FOREACH(CListT& array, range)
    {
      const Uint nb_cols = 1;
      receive_size += nb_cols * (pattern.receive_dist[proc+1] - pattern.receive_dist[proc]);
      for(Uint i = proc_begin; i != proc_end; ++i)
      {
        cf_assert(pattern.send_list[i] < array.size());
        ValueT& row = array[pattern.send_list[i]];
        send_buffer.insert(send_buffer.end(), row);
      }
    }
    
    // Schedule send and receive operations
    reqs.push_back(world.isend(proc, 0, &send_buffer[send_begin], send_buffer.size() - send_begin));
    reqs.push_back(world.irecv(proc, 0, &receive_buffer[receive_begin], receive_size));
    receive_begin += receive_size;
  }
  
  // Wait for the comms to be done
  boost::mpi::wait_all(reqs.begin(), reqs.end());
  
  // Unpack the receive buffer
  Uint buffer_idx = 0;
  for(Uint proc = 0; proc != nb_procs; ++proc)
  {
    const Uint proc_begin = pattern.receive_dist[proc];
    const Uint proc_end = pattern.receive_dist[proc+1];
    BOOST_FOREACH(CListT& array, range)
    {
      const Uint nb_cols = 1;
      for(Uint i = proc_begin; i != proc_end; ++i)
      {
        cf_assert(pattern.receive_targets[i] < array.size());
        ValueT& row = array[pattern.receive_targets[i]];
        std::copy(receive_buffer.begin() + buffer_idx, receive_buffer.begin() + buffer_idx + nb_cols, &row);
        buffer_idx += nb_cols;
      }
    }
  }
}
예제 #14
0
파일: ntp.cpp 프로젝트: BitMoneta/fc
      void read_loop()
      {
        assert(_ntp_thread.is_current());

        uint32_t receive_buffer_size = sizeof(uint64_t) * 1024;
        std::shared_ptr<char> receive_buffer(new char[receive_buffer_size], [](char* p){ delete[] p; });
        uint64_t* recv_buf = (uint64_t*)receive_buffer.get();

        //outer while to restart read-loop if exception is thrown while waiting to receive on socket.
        while( !_read_loop_done.canceled() )
        {
          // if you start the read while loop here, the recieve_from call will throw "invalid argument" on win32,
          // so instead we start the loop after making our first request
          try 
          {
            _sock.open();
            request_time_task(); //this will re-send a time request

            while( !_read_loop_done.canceled() )
            {
              fc::ip::endpoint from;
              try
              {
                _sock.receive_from( receive_buffer, receive_buffer_size, from );
                wlog("received ntp reply from ${from}",("from",from) );
              } FC_RETHROW_EXCEPTIONS(error, "Error reading from NTP socket");

              fc::time_point receive_time = fc::time_point::now();
              fc::time_point origin_time = ntp_timestamp_to_fc_time_point(recv_buf[3]);
              fc::time_point server_receive_time = ntp_timestamp_to_fc_time_point(recv_buf[4]);
              fc::time_point server_transmit_time = ntp_timestamp_to_fc_time_point(recv_buf[5]);

              fc::microseconds offset(((server_receive_time - origin_time) +
                                       (server_transmit_time - receive_time)).count() / 2);
              fc::microseconds round_trip_delay((receive_time - origin_time) -
                                                (server_transmit_time - server_receive_time));
              //wlog("origin_time = ${origin_time}, server_receive_time = ${server_receive_time}, server_transmit_time = ${server_transmit_time}, receive_time = ${receive_time}",
              //     ("origin_time", origin_time)("server_receive_time", server_receive_time)("server_transmit_time", server_transmit_time)("receive_time", receive_time));
              wlog("ntp offset: ${offset}, round_trip_delay ${delay}", ("offset", offset)("delay", round_trip_delay));

              //if the reply we just received has occurred more than a second after our last time request (it was more than a second ago since our last request)
              if( round_trip_delay > fc::seconds(1) )
              {
                wlog("received stale ntp reply requested at ${request_time}, send a new time request", ("request_time", origin_time));
                request_now(); //request another reply and ignore this one
              }
              else //we think we have a timely reply, process it
              {
                if( offset < fc::seconds(60*60*24) && offset > fc::seconds(-60*60*24) )
                {
                  _last_ntp_delta_microseconds = offset.count();
                  _last_ntp_delta_initialized = true;
                  fc::microseconds ntp_delta_time = fc::microseconds(_last_ntp_delta_microseconds);
                  _last_valid_ntp_reply_received_time = receive_time;
                  wlog("ntp_delta_time updated to ${delta_time}", ("delta_time",ntp_delta_time) );
                }
                else
                  elog( "NTP time and local time vary by more than a day! ntp:${ntp_time} local:${local}", 
                       ("ntp_time", receive_time + offset)("local", fc::time_point::now()) );
              }
            }
          } // try
          catch (fc::canceled_exception)
          {
            throw;
          }
          catch (const fc::exception& e)
          {
            //swallow any other exception and restart loop
            elog("exception in read_loop, going to restart it. ${e}",("e",e));
          }
          catch (...)
          {
            //swallow any other exception and restart loop
            elog("unknown exception in read_loop, going to restart it.");
          }
          _sock.close();
          fc::usleep(fc::seconds(_retry_failed_request_interval_sec));
        } //outer while loop
        wlog("exiting ntp read_loop");
      } //end read_loop()