예제 #1
0
            /**
             * Copy constructor
             * @param other The other data_buffer.
             */
			data_buffer(const data_buffer & other)
			    : m_read_ptr(0)
                , file_offset_(other.file_offset_)
                , file_(other.file_)
			{
                clear();
                
			    write_bytes(other.data(), other.size());
                
                m_read_ptr = m_data.size() > 0 ? &m_data[0] : 0;
			}
예제 #2
0
bool db_tx::read(const data_buffer & key, T & value)
{
    if (m_Db == 0)
    {
        return false;
    }
    
    Dbt dbt_key(key.data(), static_cast<std::uint32_t> (key.size()));

    Dbt dbt_value;
    
    dbt_value.set_flags(DB_DBT_MALLOC);
    
    auto ret = m_Db->get(m_DbTxn, &dbt_key, &dbt_value, 0);
    
    std::memset(dbt_key.get_data(), 0, dbt_key.get_size());
    
    if (dbt_value.get_data() == 0)
    {
        return false;
    }
    
    try
    {
        /**
         * Allocate the data_buffer.
         */
        data_buffer buffer(
            static_cast<char *>(dbt_value.get_data()), dbt_value.get_size()
        );
        
        /**
         * Decode the value from the buffer.
         */
        value.decode(buffer);
    }
    catch (std::exception & e)
    {
        log_error("DB TX read failed, what = " << e.what() << ".");
        
        return false;
    }

    std::memset(dbt_value.get_data(), 0, dbt_value.get_size());
    
    free(dbt_value.get_data());
    
    return ret == 0;
}
예제 #3
0
    bool packedmessage_scan<MessageType>::pack(data_buffer& buffer)const
    {

        if(!msgs)
            return false;

        unsigned msg_size = msgs->ByteSize();
        buffer.resize(HEADER_SIZE + msg_size);
        //Included header file.
        encode_header(buffer, msg_size);

				LOG(INFO)<<" Pack message, msg_size : "<< 
					msg_size <<", buffer : "<<buffer.size();

        return msgs->SerializeToArray(&buffer[HEADER_SIZE], msg_size);

    }
예제 #4
0
    unsigned packedmessage_scan<MessageType>::
    decode_header(const data_buffer& buffer)const
    {
        LOG(INFO)<<" Decode header buffer.";

        if(buffer.size() < HEADER_SIZE) {
            return 0;
        }

        unsigned msgs_size = 0;

        for(unsigned count_header = 0; count_header < HEADER_SIZE; ++count_header) {
            msgs_size =
                    msgs_size * 256 +
                    (static_cast<unsigned>(buffer[count_header]) & 0xFF);

            LOG(INFO)<<"Buffer data : " <<(static_cast<unsigned>(buffer[count_header]) & 0xFF);
        }

        return msgs_size;
    }
예제 #5
0
 /**
  * Copy constructor
  * @param other The other data_buffer.
  */
 data_buffer(const data_buffer & other)
     : m_read_ptr(0)
     , file_offset_(0)
 {
     write_bytes(other.data(), other.size());
 }
예제 #6
0
    void decode_message(parcelport& pp,
        data_buffer const & parcel_data,
        boost::uint64_t inbound_data_size,
        performance_counters::parcels::data_point receive_data)
    {
        unsigned archive_flags = boost::archive::no_header;
        if (!pp.allow_array_optimizations())
            archive_flags |= util::disable_array_optimization;

        // protect from un-handled exceptions bubbling up
        try {
            try {
                // mark start of serialization
                util::high_resolution_timer timer;
                boost::int64_t overall_add_parcel_time = 0;

                {
                    // De-serialize the parcel data
                    util::portable_binary_iarchive archive(parcel_data,
                        parcel_data.size(), archive_flags);

                    std::size_t parcel_count = 0;

                    archive >> parcel_count;
                    for(std::size_t i = 0; i < parcel_count; ++i)
                    {
                        // de-serialize parcel and add it to incoming parcel queue
                        parcel p;
                        archive >> p;

                        // make sure this parcel ended up on the right locality
                        BOOST_ASSERT(p.get_destination_locality() == pp.here());

                        // be sure not to measure add_parcel as serialization time
                        boost::int64_t add_parcel_time = timer.elapsed_nanoseconds();
                        pp.add_received_parcel(p);
                        overall_add_parcel_time += timer.elapsed_nanoseconds() -
                            add_parcel_time;
                    }

                    // complete received data with parcel count
                    receive_data.num_parcels_ = parcel_count;
                    receive_data.raw_bytes_ = archive.bytes_read();
                }

                // store the time required for serialization
                receive_data.serialization_time_ = timer.elapsed_nanoseconds() -
                    overall_add_parcel_time;

                pp.add_received_data(receive_data);
            }
            catch (hpx::exception const& e) {
                LPT_(error)
                    << "decode_message: caught hpx::exception: "
                    << e.what();
                hpx::report_error(boost::current_exception());
            }
            catch (boost::system::system_error const& e) {
                LPT_(error)
                    << "decode_message: caught boost::system::error: "
                    << e.what();
                hpx::report_error(boost::current_exception());
            }
            catch (boost::exception const&) {
                LPT_(error)
                    << "decode_message: caught boost::exception.";
                hpx::report_error(boost::current_exception());
            }
            catch (std::exception const& e) {
                // We have to repackage all exceptions thrown by the
                // serialization library as otherwise we will loose the
                // e.what() description of the problem, due to slicing.
                boost::throw_exception(boost::enable_error_info(
                    hpx::exception(serialization_error, e.what())));
            }
        }
        catch (...) {
            LPT_(error)
                << "decode_message: caught unknown exception.";
            hpx::report_error(boost::current_exception());
        }
    }
예제 #7
0
    bool packedmessage_scan<MessageType>::unpack(const data_buffer& buffer)
    {
        LOG(INFO)<<" Unpack buffer size " << buffer.size();

        return msgs->ParseFromArray(&buffer[HEADER_SIZE], buffer.size() - HEADER_SIZE);
    }
예제 #8
0
 void reclaim_data_buffer(data_buffer& buffer)
 {
     cache_.add(buffer.size(), buffer);
     buffer.resize(0);
     buffer.reset();
 }
예제 #9
0
bool utility::add_orphan_tx(const data_buffer & buffer)
{
    /**
     * Copy the buffer.
     */
    auto buffer_copy = std::make_shared<data_buffer> (
        buffer.data(), buffer.size()
    );
    
    /**
     * Allocate the transaction.
     */
    transaction tx;
    
    /**
     * Decode the transaction from the buffer.
     */
    tx.decode(*buffer_copy);
    
    /**
     * Rewind the buffer.
     */
    buffer_copy->rewind();
    
    /**
     * Get the hash of the transaction.
     */
    auto hash_tx = tx.get_hash();
    
    if (globals::instance().orphan_transactions().count(hash_tx) > 0)
    {
        return false;
    }

    /**
     * Ignore big transactions, to avoid a send-big-orphans memory
     * exhaustion attack. If a peer has a legitimate large transaction
     * with a missing parent then we assume it will rebroadcast it later,
     * after the parent transaction(s) have been mined or received.
     * 10,000 orphans, each of which is at most 5,000 bytes big is at most 
     * 500 megabytes of orphans.
     */
    if (buffer_copy->size() > 5000)
    {
        log_debug(
            "Utility, add orphan tx ignoring large orphan tx size = " <<
            buffer_copy->size() << ", hash = " <<
            hash_tx.to_string().substr(0, 10)  << "."
        );

        return false;
    }

    globals::instance().orphan_transactions()[hash_tx] = buffer_copy;
    
    for (auto & i : tx.transactions_in())
    {
        globals::instance().orphan_transactions_by_previous()[
            i.previous_out().get_hash()].insert(
            std::make_pair(hash_tx, buffer_copy)
        );
    }

    log_debug(
        "Utility, add orphan tx stored orphan tx " <<
        hash_tx.to_string().substr(0, 10) << ", orphans = " <<
        globals::instance().orphan_transactions().size() << "."
    );
    
    return true;
}