json::Node PreparedQueryBase::_refetchStringValue( const std::size_t index ){ // These types which are of uncertain size we must fetch separately now that we know how much // space to allocate. MYSQL_BIND& binder = m_results[ index ]; if( *binder.length == 0 ){ return json::Node( "", "" ); } // It is not empty, so create a new binder and copy over this one's values. MYSQL_BIND rightSizeBinder; memcpy( &rightSizeBinder, &binder, sizeof( MYSQL_BIND ) ); rightSizeBinder.buffer = new char[ (*binder.length) + 1 ]; rightSizeBinder.buffer_length = (*binder.length) + 1; // Now fetch the column with our right-sized buffer. try { mysql_stmt_fetch_column( m_statement, &rightSizeBinder, index, 0 ); LW_MYSQL_STMT_CHECK_FOR_ERRORS( m_statement, "Failed to fetch column" ); } catch( ... ){ lw::util::safeDeleteArray( (char*&)rightSizeBinder.buffer ); rethrow_exception( current_exception() ); } // Now convert the value to a string JSON node, clean up, and return. json::Node node( "", string( (char*)rightSizeBinder.buffer, *rightSizeBinder.length ) ); lw::util::safeDeleteArray( (char*&)rightSizeBinder.buffer ); return node; }
wstring ExceptionHelper::GetCurrentExceptionMessage() { wstring exceptionMessage; const exception_ptr exceptionPointer = current_exception(); if(exceptionPointer == nullptr) { return wstring(); } try { rethrow_exception(exceptionPointer); } catch (const SelectedTextTranslateBaseException& exception) { exceptionMessage = exception.GetFullErrorMessage(); } catch (const SelectedTextTranslateBaseException* exception) { exceptionMessage = exception->GetFullErrorMessage(); } catch (const exception& exception) { exceptionMessage = StringUtilities::Format(L"\tException message: '%hs'", exception.what()); } catch (...) { exceptionMessage = L"\tUnknown exception occurred."; } return exceptionMessage; }
ThreadState* VMThreadState::state_as_object(STATE) { if(raise_reason_ == cNone && current_exception_.get()->nil_p()) return nil<ThreadState>(); ThreadState* thread_state = ThreadState::create(state); thread_state->raise_reason(state, Fixnum::from(raise_reason_)); thread_state->destination_scope(state, destination_scope()); thread_state->throw_dest(state, throw_dest()); thread_state->current_exception(state, current_exception()); thread_state->raise_value(state, raise_value()); return thread_state; }
void trampoline_push( intptr_t vp) { typedef typename Coro::param_type param_type; BOOST_ASSERT( vp); setup< Fn > * from( reinterpret_cast< setup< Fn > * >( vp) ); #ifndef BOOST_NO_CXX11_RVALUE_REFERENCES Fn fn( forward< Fn >( from->fn) ); #else Fn fn( move( from->fn) ); #endif coroutine_context caller( * from->caller); coroutine_context callee( * from->callee); Coro c( & caller, & callee, stack_unwind == from->attr.do_unwind, fpu_preserved == from->attr.preserve_fpu); from = 0; { param_type * from( reinterpret_cast< param_type * >( c.callee_->jump( * c.caller_, reinterpret_cast< intptr_t >( & c), c.preserve_fpu() ) ) ); if ( ! from->do_unwind) { BOOST_ASSERT( from->data); // create push_coroutine typename Self::impl_type b( & callee, & caller, false, c.preserve_fpu(), from->data); Self yield( & b); try { fn( yield); } catch ( forced_unwind const&) {} catch (...) { c.except_ = current_exception(); } } } c.flags_ |= flag_complete; param_type to; c.callee_->jump( * c.caller_, reinterpret_cast< intptr_t >( & to), c.preserve_fpu() ); BOOST_ASSERT_MSG( false, "push_coroutine is complete"); }
void CThreadPoolItemExecutorJoin::Impl::exec() { try { func(); }catch(...) { except=current_exception(); } complete.signal(); //notify CThreadPool::join }
inline exception_ptr make_exception_ptr( E const & e ) { try { throw e; } catch (...) { return current_exception(); } }
inline exception_ptr copy_exception( T const & e ) { try { throw enable_current_exception(e); } catch( ... ) { return current_exception(); } }
Object* ThreadState::state_as_object(STATE) { if(raise_reason_ == cNone && current_exception_.get()->nil_p()) return cNil; Exception* exc = Exception::create(state); exc->klass(state, G(exc_vm_internal)); exc->set_ivar(state, state->symbol("reason"), Fixnum::from(raise_reason_)); exc->set_ivar(state, state->symbol("destination"), destination_scope()); exc->set_ivar(state, state->symbol("throw_dest"), throw_dest()); exc->set_ivar(state, state->symbol("exception"), current_exception()); exc->set_ivar(state, state->symbol("value"), raise_value()); return exc; }
void trampoline( typename worker_fiber::coro_t::yield_type & yield) { BOOST_ASSERT( yield); void * p( yield.get() ); BOOST_ASSERT( p); setup< Fn > * from( static_cast< setup< Fn > * >( p) ); #ifndef BOOST_NO_CXX11_RVALUE_REFERENCES Fn fn_( forward< Fn >( from->fn) ); #else Fn fn_( move( from->fn) ); #endif worker_fiber f( & yield); from->f = & f; f.set_running(); f.suspend(); try { #ifndef BOOST_NO_CXX11_RVALUE_REFERENCES Fn fn( forward< Fn >( fn_) ); #else Fn fn( move( fn_) ); #endif BOOST_ASSERT( f.is_running() ); fn(); BOOST_ASSERT( f.is_running() ); } catch ( coro::detail::forced_unwind const&) { f.set_terminated(); f.release(); throw; } catch ( fiber_interrupted const&) { f.set_exception( current_exception() ); } catch (...) { std::terminate(); } f.set_terminated(); f.release(); f.suspend(); BOOST_ASSERT_MSG( false, "fiber already terminated"); }
void result::update(pool_node_ptr const& node, call_ptr const& _call) { { boost::lock_guard<boost::mutex> lock(m_mutex); if(m_ready) return; BOOST_ASSERT(m_id); BOOST_ASSERT(!m_exception); BOOST_ASSERT(m_pointers.empty()); if(_call && (_call->id() == m_id)) { boost::swap(m_exception, _call->m_exception); if(!m_exception) { try { boost::swap(m_pointers, _call->m_pointers); update_param(*node, _call); } catch(...) { m_exception = current_exception(); } } } else { m_exception = boost::make_shared<remote_error> (remote_error::invalid_return_type, "invalid return call"); } m_ready = true; } m_condition.notify_all(); }
void SessionImpl::fetch_body( const size_t length, const shared_ptr< Session > session, const function< void ( const shared_ptr< Session >, const Bytes& ) >& callback ) const { const auto data_ptr = asio::buffer_cast< const Byte* >( session->m_pimpl->m_request->m_pimpl->m_buffer->data( ) ); const auto data = Bytes( data_ptr, data_ptr + length ); session->m_pimpl->m_request->m_pimpl->m_buffer->consume( length ); auto& body = m_request->m_pimpl->m_body; if ( body.empty( ) ) { body = data; } else { body.insert( body.end( ), data.begin( ), data.end( ) ); } try { callback(session, data); } catch ( const int status_code ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( status_code, runtime_error( m_settings->get_status_message( status_code ) ), session ); } catch ( const regex_error& re ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 500, re, session ); } catch ( const runtime_error& re ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 400, re, session ); } catch ( const exception& ex ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 500, ex, session ); } catch ( ... ) { auto cex = current_exception( ); if ( cex not_eq nullptr ) { try { rethrow_exception( cex ); } catch ( const exception& ex ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 500, ex, session ); } catch ( ... ) { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 500, runtime_error( "Internal Server Error" ), session ); } } else { const auto error_handler = session->m_pimpl->get_error_handler(); error_handler( 500, runtime_error( "Internal Server Error" ), session ); } } }
inline bool uncaught_exception() noexcept(true) {return current_exception() != nullptr;}
nested_exception() : nested(current_exception()) {}
nested_exception::nested_exception() : __ptr_(current_exception()) { }
static exception_ptr make_error_from_current_exception() { return current_exception(); }
void BlockchainScanner::scanBlockData(shared_ptr<BlockDataBatch> batch) { //getBlock lambda auto getBlock = [&](unsigned height)->BlockData { auto iter = batch->blocks_.find(height); if (iter == batch->blocks_.end()) { //TODO: encapsulate in try block to catch deser errors and signal pull thread //termination before exiting scope. cant have the scan thread hanging if this //one fails. Also update batch->end_ if we didn't go as far as that block height //grab block file map BlockHeader* blockheader = nullptr; blockheader = &blockchain_->getHeaderByHeight(height); auto filenum = blockheader->getBlockFileNum(); auto mapIter = batch->fileMaps_.find(filenum); if (mapIter == batch->fileMaps_.end()) { //we haven't grabbed that file map yet auto insertPair = batch->fileMaps_.insert( make_pair(filenum, move(blockDataLoader_.get(filenum, true)))); mapIter = insertPair.first; } auto filemap = mapIter->second.get(); //find block and deserialize it try { BlockData bdata; bdata.deserialize( filemap->getPtr() + blockheader->getOffset(), blockheader->getBlockSize(), blockheader, false); auto insertPair = batch->blocks_.insert(make_pair(height, move(bdata))); iter = insertPair.first; } catch (...) { LOGERR << "unknown block deser error during scan at height #" << height; batch->exceptionPtr_ = current_exception(); return BlockData(); } } return iter->second; }; //parser lambda auto blockDataLoop = [&](function<void(const BlockData&)> callback) { auto currentBlock = batch->start_; while (currentBlock <= batch->end_) { BlockData&& bdata = getBlock(currentBlock); if (!bdata.isInitialized()) return; callback(bdata); currentBlock += totalThreadCount_; } }; //txout lambda auto txoutParser = [&](const BlockData& blockdata)->void { //TODO: flag isMultisig const BlockHeader* header = blockdata.header(); //update processed height auto topHeight = header->getBlockHeight(); batch->highestProcessedHeight_ = topHeight; auto& txns = blockdata.getTxns(); for (unsigned i = 0; i < txns.size(); i++) { const BCTX& txn = *(txns[i].get()); for (unsigned y = 0; y < txn.txouts_.size(); y++) { auto& txout = txn.txouts_[y]; BinaryRefReader brr( txn.data_ + txout.first, txout.second); brr.advance(8); unsigned scriptSize = (unsigned)brr.get_var_int(); auto&& scrAddr = BtcUtils::getTxOutScrAddr( brr.get_BinaryDataRef(scriptSize)); if (!scrAddrFilter_->hasScrAddress(scrAddr)) continue; //if we got this far, this txout is ours //get tx hash auto& txHash = txn.getHash(); //construct StoredTxOut StoredTxOut stxo; stxo.dataCopy_ = BinaryData( txn.data_ + txout.first, txout.second); stxo.parentHash_ = txHash; stxo.blockHeight_ = header->getBlockHeight(); stxo.duplicateID_ = header->getDuplicateID(); stxo.txIndex_ = i; stxo.txOutIndex_ = y; stxo.scrAddr_ = scrAddr; stxo.spentness_ = TXOUT_UNSPENT; stxo.parentTxOutCount_ = txn.txouts_.size(); stxo.isCoinbase_ = txn.isCoinbase_; auto value = stxo.getValue(); auto&& hgtx = DBUtils::heightAndDupToHgtx( stxo.blockHeight_, stxo.duplicateID_); auto&& txioKey = DBUtils::getBlkDataKeyNoPrefix( stxo.blockHeight_, stxo.duplicateID_, i, y); //update utxos_ auto& stxoHashMap = batch->utxos_[txHash]; stxoHashMap.insert(make_pair(y, move(stxo))); //update ssh_ auto& ssh = batch->ssh_[scrAddr]; auto& subssh = ssh.subHistMap_[hgtx]; //deal with txio count in subssh at serialization TxIOPair txio; txio.setValue(value); txio.setTxOut(txioKey); txio.setFromCoinbase(txn.isCoinbase_); subssh.txioMap_.insert(make_pair(txioKey, move(txio))); } } }; //txin lambda auto txinParser = [&](const BlockData& blockdata)->void { const BlockHeader* header = blockdata.header(); auto& txns = blockdata.getTxns(); for (unsigned i = 0; i < txns.size(); i++) { const BCTX& txn = *(txns[i].get()); for (unsigned y = 0; y < txn.txins_.size(); y++) { auto& txin = txn.txins_[y]; BinaryDataRef outHash( txn.data_ + txin.first, 32); auto utxoIter = utxoMap_.find(outHash); if (utxoIter == utxoMap_.end()) continue; unsigned txOutId = READ_UINT32_LE( txn.data_ + txin.first + 32); auto idIter = utxoIter->second.find(txOutId); if (idIter == utxoIter->second.end()) continue; //if we got this far, this txins consumes one of our utxos //create spent txout auto&& hgtx = DBUtils::getBlkDataKeyNoPrefix( header->getBlockHeight(), header->getDuplicateID()); auto&& txinkey = DBUtils::getBlkDataKeyNoPrefix( header->getBlockHeight(), header->getDuplicateID(), i, y); StoredTxOut stxo = idIter->second; stxo.spentness_ = TXOUT_SPENT; stxo.spentByTxInKey_ = txinkey; //if this tx's hash was never pulled, let's add it to the stxo's //parent hash, in order to keep track of this tx in the hint db if (txn.txHash_.getSize() == 0) stxo.parentHash_ = move(txn.getHash()); //add to ssh_ auto& ssh = batch->ssh_[stxo.getScrAddress()]; auto& subssh = ssh.subHistMap_[hgtx]; //deal with txio count in subssh at serialization TxIOPair txio; auto&& txoutkey = stxo.getDBKey(false); txio.setTxOut(txoutkey); txio.setTxIn(txinkey); txio.setValue(stxo.getValue()); subssh.txioMap_[txoutkey] = move(txio); //add to spentTxOuts_ batch->spentTxOuts_.push_back(move(stxo)); } } }; //txout loop blockDataLoop(txoutParser); //done with txouts, fill the future flag and wait on the mutex //to move to txins processing batch->flagUtxoScanDone(); unique_lock<mutex> txinLock(batch->parseTxinMutex_); //txins loop blockDataLoop(txinParser); }
inline exception_ptr copy_exception( T&& e ) { try { throw e; } catch (...) { return current_exception(); } return exception_ptr(); }
void SessionImpl::parse_request( const asio::error_code& error, const shared_ptr< Session > session, const function< void ( const shared_ptr< Session > ) >& callback ) try { if ( error ) { throw runtime_error( error.message( ) ); } istream stream( m_buffer.get( ) ); const auto items = parse_request_line( stream ); const auto uri = Uri::parse( "http://localhost" + items.at( "path" ) ); m_request = make_shared< Request >( ); m_request->m_pimpl->m_path = Uri::decode( uri.get_path( ) ); m_request->m_pimpl->m_method = items.at( "method" ); m_request->m_pimpl->m_version = stod( items.at( "version" ) ); m_request->m_pimpl->m_headers = parse_request_headers( stream ); m_request->m_pimpl->m_query_parameters = uri.get_query_parameters( ); callback( session ); } catch ( const int status_code ) { runtime_error re( m_settings->get_status_message( status_code ) ); failure( session, status_code, re ); } catch ( const regex_error& re ) { failure( session, 500, re ); } catch ( const runtime_error& re ) { failure( session, 400, re ); } catch ( const exception& ex ) { failure( session, 500, ex ); } catch ( ... ) { auto cex = current_exception( ); if ( cex not_eq nullptr ) { try { rethrow_exception( cex ); } catch ( const exception& ex ) { failure( session, 500, ex ); } catch ( ... ) { runtime_error re( "Internal Server Error" ); failure( session, 500, re ); } } else { runtime_error re( "Internal Server Error" ); failure( session, 500, re ); } }
void BitcoinP2P::connectLoop(void) { size_t waitBeforeReconnect = 0; promise<bool> shutdownPromise; shutdownFuture_ = shutdownPromise.get_future(); while (run_.load(memory_order_relaxed)) { //clean up stacks dataStack_ = make_shared<BlockingStack<vector<uint8_t>>>(); verackPromise_ = make_unique<promise<bool>>(); auto verackFuture = verackPromise_->get_future(); while (run_.load(memory_order_relaxed)) { if (binSocket_.openSocket(false)) break; if (waitBeforeReconnect < 5000) waitBeforeReconnect += RECONNECT_INCREMENT_MS; this_thread::sleep_for(chrono::milliseconds(waitBeforeReconnect)); } auto processThread = [this](void)->void { try { this->processDataStackThread(); } catch (...) { this->process_except_ = current_exception(); } }; pollSocketThread(); thread processThr(processThread); //send version payload Payload_Version version; auto timestamp = getTimeStamp(); struct sockaddr clientsocketaddr; try { //send version if (binSocket_.getSocketName(clientsocketaddr) != 0) throw SocketError("failed to get client sockaddr"); if (binSocket_.getPeerName(node_addr_) != 0) throw SocketError("failed to get peer sockaddr"); // Services, for future extensibility uint32_t services = NODE_WITNESS; version.setVersionHeaderIPv4(70012, services, timestamp, node_addr_, clientsocketaddr); version.userAgent_ = "Armory:0.95"; version.startHeight_ = -1; sendMessage(move(version)); //wait on verack verackFuture.get(); verackPromise_.reset(); LOGINFO << "Connected to Bitcoin node"; //signal calling thread connectedPromise_->set_value(true); waitBeforeReconnect = 0; } catch (...) { waitBeforeReconnect += RECONNECT_INCREMENT_MS; this_thread::sleep_for(chrono::milliseconds(waitBeforeReconnect)); } //wait on threads if (processThr.joinable()) processThr.join(); //close socket to guarantee select returns if (binSocket_.isValid()) binSocket_.closeSocket(); LOGINFO << "Disconnected from Bitcoin node"; } shutdownPromise.set_value(true); }
void BlockDataManagerConfig::parseArgs(int argc, char* argv[]) { /*** --testnet: run db against testnet bitcoin network --regtest: run db against regression test network --rescan: delete all processed history data and rescan blockchain from the first block --rebuild: delete all DB data and build and scan from scratch --rescanSSH: delete balance and txcount data and rescan it. Much faster than rescan or rebuild. --datadir: path to the operation folder --dbdir: path to folder containing the database files. If empty, a new db will be created there --satoshi-datadir: path to blockchain data folder (blkXXXXX.dat files) --ram_usage: defines the ram use during scan operations. 1 level averages 128MB of ram (without accounting the base amount, ~400MB). Defaults at 4. Can't be lower than 1. Can be changed in between processes --thread-count: defines how many processing threads can be used during db builds and scans. Defaults to maximum available CPU threads. Can't be lower than 1. Can be changed in between processes --zcthread-count: defines the maximum number on threads the zc parser can create for processing incoming transcations from the network node --db-type: sets the db type: DB_BARE: tracks wallet history only. Smallest DB. DB_FULL: tracks wallet history and resolves all relevant tx hashes. ~750MB DB at the time of 0.95 release. Default DB type. DB_SUPER: tracks all blockchain history. XXL DB (100GB+). Not implemented yet db type cannot be changed in between processes. Once a db has been built with a certain type, it will always function according to that type. Specifying another type will do nothing. Build a new db to change type. --cookie: create a cookie file holding a random authentication key to allow local clients to make use of elevated commands, like shutdown. --fcgi-port: sets the DB listening port. --clear-mempool: delete all zero confirmation transactions from the DB. ***/ try { //parse cli args map<string, string> args; for (int i = 1; i < argc; i++) { //check prefix if (strlen(argv[i]) < 2) throw DbErrorMsg("invalid CLI arg"); string prefix(argv[i], 2); if (prefix != "--") throw DbErrorMsg("invalid CLI arg"); //string prefix and tokenize string line(argv[i] + 2); auto&& argkeyval = getKeyValFromLine(line, '='); args.insert(make_pair( argkeyval.first, stripQuotes(argkeyval.second))); } processArgs(args, true); //figure out datadir auto argIter = args.find("datadir"); if (argIter != args.end()) { dataDir_ = argIter->second; args.erase(argIter); } else { if (!testnet_ && !regtest_) dataDir_ = defaultDataDir_; else if (!regtest_) dataDir_ = defaultTestnetDataDir_; else dataDir_ = defaultRegtestDataDir_; } expandPath(dataDir_); //get datadir auto configPath = dataDir_; appendPath(configPath, "armorydb.conf"); if (DBUtils::fileExists(configPath, 2)) { ConfigFile cf(configPath); auto mapIter = cf.keyvalMap_.find("datadir"); if (mapIter != cf.keyvalMap_.end()) throw DbErrorMsg("datadir is illegal in .conf file"); processArgs(cf.keyvalMap_, false); } processArgs(args, false); //figure out defaults bool autoDbDir = false; if (dbDir_.size() == 0) { dbDir_ = dataDir_; appendPath(dbDir_, dbDirExtention_); autoDbDir = true; } if (blkFileLocation_.size() == 0) { if (!testnet_) blkFileLocation_ = defaultBlkFileLocation_; else blkFileLocation_ = defaultTestnetBlkFileLocation_; } //expand paths if necessary expandPath(dbDir_); expandPath(blkFileLocation_); if (blkFileLocation_.size() < 6 || blkFileLocation_.substr(blkFileLocation_.length() - 6, 6) != "blocks") { appendPath(blkFileLocation_, "blocks"); } logFilePath_ = dataDir_; appendPath(logFilePath_, "dbLog.txt"); //test all paths auto testPath = [](const string& path, int mode) { if (!DBUtils::fileExists(path, mode)) { stringstream ss; ss << path << " is not a valid path"; cout << ss.str() << endl; throw DbErrorMsg(ss.str()); } }; testPath(dataDir_, 6); //create dbdir if was set automatically if (autoDbDir) { try { testPath(dbDir_, 0); } catch (DbErrorMsg&) { #ifdef _WIN32 CreateDirectory(dbDir_.c_str(), NULL); #else mkdir(dbDir_.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); #endif } } //now for the regular test, let it throw if it fails testPath(dbDir_, 6); testPath(blkFileLocation_, 2); //fcgi port if (useCookie_ && !customFcgiPort_) { //no custom fcgi port was provided and the db was spawned with a //cookie file, fcgi port will be randomized srand(time(0)); while (1) { auto port = rand() % 15000 + 49150; stringstream portss; portss << port; if (!testConnection("127.0.0.1", portss.str())) { fcgiPort_ = portss.str(); break; } } } } catch (...) { exceptionPtr_ = current_exception(); } }