Пример #1
0
static int64_t AddTx(CWallet& wallet, uint32_t lockTime, int64_t mockTime, int64_t blockTime)
{
    CMutableTransaction tx;
    tx.nLockTime = lockTime;
    SetMockTime(mockTime);
    CBlockIndex* block = nullptr;
    if (blockTime > 0) {
        LOCK(cs_main);
        auto inserted = mapBlockIndex.emplace(GetRandHash(), new CBlockIndex);
        assert(inserted.second);
        const uint256& hash = inserted.first->first;
        block = inserted.first->second;
        block->nTime = blockTime;
        block->phashBlock = &hash;
    }

    CWalletTx wtx(&wallet, MakeTransactionRef(tx));
    if (block) {
        wtx.SetMerkleBranch(block, 0);
    }
    {
        LOCK(cs_main);
        wallet.AddToWallet(wtx);
    }
    LOCK(wallet.cs_wallet);
    return wallet.mapWallet.at(wtx.GetHash()).nTimeSmart;
}
Пример #2
0
double	MlMaximumEntropyModel::calcScoreForClass(const MlSample* sample, int label) const
{
	assert( label>=0 && label < static_cast<int>(weights_.size()));

	vector<double> wtx(weights_.size(),0.0);
	for (size_t c=0; c<weights_.size(); c++)
		for (size_t i=0; i<sample->pairs.size(); i++)
			wtx[c] += static_cast<double>(weights_[c][sample->pairs[i].index] * sample->pairs[i].value);

	double numerator = exp(wtx[static_cast<size_t>(sample->label)]);
	double Z=0.0;
	for (size_t c=0; c<wtx.size(); c++)
		Z+=exp(wtx[c]);
	
	return (numerator/Z);
}
Пример #3
0
// Check that GetImmatureCredit() returns a newly calculated value instead of
// the cached value after a MarkDirty() call.
//
// This is a regression test written to verify a bugfix for the immature credit
// function. Similar tests probably should be written for the other credit and
// debit functions.
BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
{
    CWallet wallet;
    CWalletTx wtx(&wallet, MakeTransactionRef(coinbaseTxns.back()));
    LOCK2(cs_main, wallet.cs_wallet);
    wtx.hashBlock = chainActive.Tip()->GetBlockHash();
    wtx.nIndex = 0;

    // Call GetImmatureCredit() once before adding the key to the wallet to
    // cache the current immature credit amount, which is 0.
    BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 0);

    // Invalidate the cached value, add the key, and make sure a new immature
    // credit amount is calculated.
    wtx.MarkDirty();
    wallet.AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey());
    BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(), 50*COIN);
}
Пример #4
0
void MlMaximumEntropyModel::calcScores(const MlSample* sample, vector<float>& scores) const
{
	vector<double> wtx(weights_.size(),0.0);
	for (size_t c=0; c<weights_.size(); c++)
		for (size_t i=0; i<sample->pairs.size(); i++)
			wtx[c] += static_cast<double>(weights_[c][sample->pairs[i].index] * sample->pairs[i].value);

	vector<double> e(weights_.size());
	double Z=0.0;
	for (size_t c=0; c<weights_.size(); c++)
	{
		e[c]=exp(wtx[c]);
		Z+=e[c];
	}
	
	scores.resize(weights_.size());
	for (size_t c=0; c<weights_.size(); c++)
		scores[c]=e[c]/Z;
}
Пример #5
0
// Check that GetImmatureCredit() returns a newly calculated value instead of
// the cached value after a MarkDirty() call.
//
// This is a regression test written to verify a bugfix for the immature credit
// function. Similar tests probably should be written for the other credit and
// debit functions.
BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
{
    auto chain = interfaces::MakeChain();
    CWallet wallet(*chain, WalletLocation(), WalletDatabase::CreateDummy());
    CWalletTx wtx(&wallet, m_coinbase_txns.back());
    auto locked_chain = chain->lock();
    LOCK(wallet.cs_wallet);
    wtx.hashBlock = chainActive.Tip()->GetBlockHash();
    wtx.nIndex = 0;

    // Call GetImmatureCredit() once before adding the key to the wallet to
    // cache the current immature credit amount, which is 0.
    BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(*locked_chain), 0);

    // Invalidate the cached value, add the key, and make sure a new immature
    // credit amount is calculated.
    wtx.MarkDirty();
    wallet.AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey());
    BOOST_CHECK_EQUAL(wtx.GetImmatureCredit(*locked_chain), 50*COIN);
}
Пример #6
0
static void add_coin(const CAmount& nValue, int nAge = 6*24, bool fIsFromMe = false, int nInput=0)
{
    static int nextLockTime = 0;
    CMutableTransaction tx;
    tx.nLockTime = nextLockTime++;        // so all transactions get different hashes
    tx.vout.resize(nInput+1);
    tx.vout[nInput].nValue = nValue;
    if (fIsFromMe) {
        // IsFromMe() returns (GetDebit() > 0), and GetDebit() is 0 if vin.empty(),
        // so stop vin being empty, and cache a non-zero Debit to fake out IsFromMe()
        tx.vin.resize(1);
    }
    std::unique_ptr<CWalletTx> wtx(new CWalletTx(&testWallet, MakeTransactionRef(std::move(tx))));
    if (fIsFromMe)
    {
        wtx->fDebitCached = true;
        wtx->nDebitCached = 1;
    }
    COutput output(wtx.get(), nInput, nAge, true /* spendable */, true /* solvable */, true /* safe */);
    vCoins.push_back(output);
    wtxn.emplace_back(std::move(wtx));
}
Пример #7
0
void MultisigDialog::commitMultisigTx()
{
    CMutableTransaction tx(multisigTx);
    try{
#ifdef ENABLE_WALLET
        CWalletTx wtx(pwalletMain, tx);
        CReserveKey keyChange(pwalletMain);
        if (!pwalletMain->CommitTransaction(wtx, keyChange))
            throw runtime_error(string("Transaction rejected - Failed to commit"));
#else
        uint256 hashTx = tx.GetHash();
        CCoinsViewCache& view = *pcoinsTip;
        const CCoins* existingCoins = view.AccessCoins(hashTx);
        bool fOverrideFees = false;
        bool fHaveMempool = mempool.exists(hashTx);
        bool fHaveChain = existingCoins && existingCoins->nHeight < 1000000000;

        if (!fHaveMempool && !fHaveChain) {
            // push to local node and sync with wallets
            CValidationState state;
            if (!AcceptToMemoryPool(mempool, state, tx, false, NULL, !fOverrideFees)) {
                if (state.IsInvalid())
                    throw runtime_error(strprintf("Transaction rejected - %i: %s", state.GetRejectCode(), state.GetRejectReason()));
                else
                    throw runtime_error(string("Transaction rejected - ") + state.GetRejectReason());
            }
        } else if (fHaveChain) {
            throw runtime_error("transaction already in block chain");
        }
        RelayTransaction(tx);
#endif
        //disable commit if successfully committed
        ui->commitButton->setEnabled(false);
        ui->signButtonStatus->setText(strprintf("Transaction has been successfully published with transaction ID:\n %s", tx.GetHash().GetHex()).c_str());
    }catch(const runtime_error& e){
        ui->signButtonStatus->setText(e.what());
    }
}
Пример #8
0
void MlMaximumEntropyModel::learnLMBFGS(MlTrainingContainer* params)
{
	params->initialize();

	const MlDataSet* trainingDataSet = params->getTrainingDataSet();
	const MlDataSet* testDataSet     = params->getTestDataSet();
	const vector<size_t>& trainingIdxs = params->getTrainingIdxs();
	const vector<size_t>& testIdxs	   = params->getTestIdxs();
	const size_t numSamples = trainingIdxs.size();
	const bool performTest = (testDataSet && testIdxs.size()>0);

	if (trainingDataSet->getNumClasess() < 2)
		error("learnLMBFGS accepts only datasets with 2 or more classes, yor data has ",
			trainingDataSet->getNumClasess());

	const double  lambda  = params->getLambda();
	const size_t memorySize = params->getLmBfgsMemorySize(); 
	const size_t reportFrequency = params->getVerboseLevel();
	const double perplexityDelta  = params->getPerplexityDelta();
	const size_t numClasses  = trainingDataSet->getNumClasess();
	const size_t numFeatures = trainingDataSet->getNumBasicFeatures(); // F
	const size_t numTraining = trainingIdxs.size();    // N

	const size_t numRestarts=3;

	// data structures used for training
	vector< vector<double> >& w = weights_; // class X features
	vector< vector<double> > wOld(numClasses, vector<double>(numFeatures,0.0)); // class X features
	vector< vector<double> > wtx(numClasses, vector<double>(numTraining, 0.0));  // class X samples
	vector< vector<double> > qtx(numClasses, vector<double>(numTraining, 0.0));  // class X samples
	vector< vector<double> > q(numClasses, vector<double>(numFeatures,0.0));       // class X features
	vector< vector<double> > g(numClasses, vector<double>(numFeatures,0.0));       // class X features
	vector< vector<double> > gOld(numClasses, vector<double>(numFeatures,0.0));  // class X features

	vector< vector<float> > trainingProbs(numClasses, vector<float>(numTraining));
	vector< vector<float> > testProbs(numClasses, vector<float>(numTraining));
	vector< vector<double> > bestW(numClasses, vector<double>(numFeatures));

	// initialize weights
	if (params->getInputPath().length() > 1)
	{
		const string modelFile = params->getInputPath() + "_scr.txt";
		if (readModel(modelFile.c_str()))
			params->setIndClearWeights(false);
	}

	if (params->getIndClearWeights())
		weights_.clear();

	weights_.resize(numClasses, vector<double>(numFeatures,0.0));

	double previousPerplexity = MAX_FLOAT;
	float  bestTestError=1.0;
	size_t bestTestRound=0;
	float  bestTrainingError=1.0;
	size_t bestTrainingRound=0;

	bool terminateTraining = false;
	size_t totalRounds=0;
	size_t megaRound=0;
	for ( megaRound=0; megaRound<numRestarts; megaRound++)
	{
		// first round
		computeGradient(trainingDataSet, trainingIdxs, w, wtx, lambda, g);

		const double gtg = computeDotProduct(g,g);
		const double denominator = 1.0 / sqrt(gtg);
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<numFeatures; i++)
				q[c][i]=g[c][i]*denominator;

		// qtx <- qTx
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<numSamples; i++)
			{
				const MlSample& sample = trainingDataSet->getSample(trainingIdxs[i]);
				qtx[c][i]=computeDotProduct(q[c],sample.pairs);
			}

		// eta <- lineSearch(...)
		double eta = lineSearch(trainingDataSet, trainingIdxs, w, wtx, qtx, g, q, lambda);
		//cout << "eta = " << eta << endl;

		// update wtx <- wtx + eta*qtx
		for (size_t c=0; c<numClasses; c++)
			for (size_t i=0; i<wtx[c].size(); i++)
				wtx[c][i]+=eta*qtx[c][i];

		// update wOld<- w ; w <- w + eta *q ; gOld<-g
		for (size_t c=0; c<numClasses; c++)
		{
			memcpy(&wOld[c][0],&w[c][0],sizeof(double)*w[c].size());
			memcpy(&gOld[c][0],&g[c][0],sizeof(double)*g[c].size());
			for (size_t i=0; i<numFeatures; i++)
				w[c][i]+= eta*q[c][i];
		}


		// initialize memory
		vector< vector< vector<double> > > memoryU(memorySize, vector< vector<double> >(numClasses));
		vector< vector< vector<double> > > memoryD(memorySize, vector< vector<double> >(numClasses));
		vector< double > memoryAlpha(memorySize);
		size_t nextMemPosition=0;
		size_t numMemPushes=0;
		
		// iterate until convergence
		size_t round=1;
		while (round<10000)
		{
			// compute errors and report round results
			{
				double trainingLogLikelihood=0.0, testLogLikelihood=0.0;
				const double trainingError = calcErrorRateWithLogLikelihood(trainingDataSet, trainingIdxs,
																		false, &trainingLogLikelihood);
				double testError=1.0;
				if (performTest)
					testError = calcErrorRateWithLogLikelihood(testDataSet, testIdxs, false, &testLogLikelihood);

				if (reportFrequency>0 && round % reportFrequency == 0)
				{
					cout << round << "\t" << scientific << setprecision(5) << trainingLogLikelihood << "\t" << fixed << setprecision(5) << trainingError;
					if (performTest)
						cout <<"\t" << scientific << testLogLikelihood << "\t" << fixed << setprecision(5)<< testError;
					cout << endl;
				}
				
				if (performTest)
				{
					if (testError<=bestTestError)
					{
						bestTestRound=round;
						bestTestError=testError;
						for (size_t c=0; c<numClasses; c++)
							memcpy(&bestW[c][0],&w[c][0],numFeatures*sizeof(double)); // copy weights
					}
				}
				
				if (trainingError<=bestTrainingError)
				{
					bestTrainingRound=round;
					bestTrainingError=trainingError;
					if (! performTest)
					{
						for (size_t c=0; c<numClasses; c++)
							memcpy(&bestW[c][0],&w[c][0],numFeatures*sizeof(double)); // copy weights
					}
				}		
			}

			// Train new round

			computeGradient(trainingDataSet, trainingIdxs, w, wtx, lambda, g);

			double alpha=0.0;
			double sigma=0.0;
			double utu=0.0;

			// write u=g'-g and d=w'-w onto memory, use them to compute alpha and sigma
			vector< vector<double> >& u = memoryU[nextMemPosition];
			vector< vector<double> >& d = memoryD[nextMemPosition];
			for (size_t c=0; c<numClasses; c++)
			{
				const size_t numFeatures = g[c].size();
				u[c].resize(numFeatures);
				d[c].resize(numFeatures);
				for (size_t i=0; i<numFeatures; i++)
				{
					const double gDiff = g[c][i]-gOld[c][i];
					const double wDiff = w[c][i]-wOld[c][i];
					u[c][i]=gDiff;
					d[c][i]=wDiff;
					alpha += gDiff*wDiff;
					utu += gDiff*gDiff;
				}
			}
			sigma = alpha / utu;
			memoryAlpha[nextMemPosition]=alpha;

			// update memory position
			nextMemPosition++;
			if (nextMemPosition == memorySize)
				nextMemPosition = 0;
			numMemPushes++;

			// q<-g
			for (size_t c=0; c<numClasses; c++)
				memcpy(&q[c][0],&g[c][0],g[c].size()*sizeof(double));
			
			// determine memory evaluation order 1..M (M is the newest)
			vector<size_t> memOrder;
			if (numMemPushes<=memorySize)
			{
				for (size_t i=0; i<numMemPushes; i++)
					memOrder.push_back(i);
			}
			else
			{
				for (size_t i=0; i<memorySize; i++)
					memOrder.push_back((i+nextMemPosition) % memorySize);
			}

			vector<double> beta(memOrder.size(),0.0);
			for (int i=memOrder.size()-1; i>=0; i--)
			{
				const size_t m = memOrder[static_cast<size_t>(i)];
				const double alpha = memoryAlpha[m];
				
				const vector< vector<double> >& dM = memoryD[m];
				double& betaM = beta[m];

				// compute beta[m] = (memory_d[m] dot g)/alpha[m]
				for (size_t c=0; c<dM.size(); c++)
					for (size_t i=0; i<dM[c].size(); i++)
						betaM += dM[c][i]*g[c][i];
				betaM/=alpha;
				
				// q <- q - beta[m]*memory_u[m]
				const vector< vector<double> >& uM = memoryU[m]; 
				for (size_t c=0; c<q.size(); c++)
					for (size_t i=0; i<q[c].size(); i++)
						q[c][i] -= betaM * uM[c][i];

			}

			// q <- sigma*q
			for (size_t c=0; c<q.size(); c++)
				for (size_t i=0; i<q[c].size(); i++)
					q[c][i]*=sigma;


			for (size_t i=0; i<memOrder.size(); i++)
			{
				const size_t m = memOrder[static_cast<size_t>(i)];
				const vector< vector<double> >& uM = memoryU[m];
				const vector< vector<double> >& dM = memoryD[m]; 
				const double betaM = beta[m];
				const double oneOverAlpha = 1.0 / memoryAlpha[m];
				double umq = computeDotProduct(uM,q);
				for (size_t c=0; c<numClasses; c++)
					for (size_t j=0; j<q[c].size(); j++)
					{
						const double dq = dM[c][j] * (betaM - umq*oneOverAlpha);
						umq += uM[c][j]*dq;
						q[c][j] += dq;
					}
		
			}

			// q<- -q
			for (size_t c=0; c<numClasses; c++)
				for (size_t i=0; i<q[c].size(); i++)
					q[c][i]=-q[c][i];
			
			// qtx = q*X
			for (size_t i=0; i<trainingIdxs.size(); i++)
			{
				const MlSample& sample = trainingDataSet->getSample(trainingIdxs[i]);
				for (size_t c=0; c<numClasses; c++)
					qtx[c][i]=computeDotProduct(q[c],sample.pairs);
			}
			

			bool needToRestart=false;
			eta = lineSearch(trainingDataSet, trainingIdxs, w, wtx, qtx, g, q, lambda);
			if (eta<= 0.0)
			{
				// restart ?
				needToRestart = true;
			}

			// update wOld<- w ; w <- w + eta *q ; gOld<- g
			for (size_t c=0; c<numClasses; c++)
			{
				memcpy(&wOld[c][0],&w[c][0],sizeof(double)*w[c].size());
				memcpy(&gOld[c][0],&g[c][0],sizeof(double)*g[c].size());
				for (size_t i=0; i<numFeatures; i++)
					w[c][i]+= eta*q[c][i];
			}

			for (size_t c=0; c<numClasses; c++)
				for (size_t i=0; i<numSamples; i++)
					wtx[c][i]+=eta*qtx[c][i];

			round++;
			totalRounds++;
			if (terminateTraining || needToRestart)
				break;
		}
		
		if (terminateTraining)
			break;
	}

	if (! params->getIndHadInternalError())
	{
		params->setIndNormalTermination(true);
	}
	else
		cout << "Warning: encountered mathemtical error while training!" << endl;

	weights_ = bestW;

	cout << "W=" << endl;
	printVector(weights_);
	cout << endl;

	cout << "Terminated after " << totalRounds << " rounds (" << megaRound << " restarts)" << endl;
	cout << "Best training error  " << fixed << setprecision(8) << bestTrainingError << " (round " << bestTrainingRound << ")" << endl;
	if (performTest)
	cout << "Best test error      "  << bestTestError     << " (round " << bestTestRound << ")" << endl;

	indWasInitialized_ = true;

	//this->calcErrorRateWithPerplexity(trainingDataSet, trainingIdxs, true, NULL);
}