示例#1
0
  void update_delta(const Expression& gradient, int index) {

    if (index >= _delta.size() || index >= _first_moment.size() || index >= _second_moment.size() || index >= _current_iteration.size()) {
      _delta.resize(index + 1);
      _first_moment.resize(index + 1);
      _second_moment.resize(index + 1);
      _current_iteration.resize(index + 1);
    }

    if (!_delta[index] || !_first_moment[index] || !_second_moment[index]) {
      _delta[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _first_moment[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _second_moment[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _current_iteration[index] = 0;
    }

    ++_current_iteration[index];

    // Perform adadelta updates
    *_first_moment[index] = get_beta1() * reshape(gradient, _delta[index]->size()) + (1.0 - get_beta1()) * *_first_moment[index];
    *_second_moment[index] = get_beta2() * reshape(gradient, _delta[index]->size()) * reshape(gradient, _delta[index]->size()) + (1.0 - get_beta2()) * *_second_moment[index];

    *_delta[index] = -get_alpha() * *_first_moment[index] / (value_t(1) - ::pow(value_t(1) - get_beta1(), _current_iteration[index])) /
        (sqrt(*_second_moment[index] / (value_t(1) - ::pow(value_t(1) - get_beta2(), _current_iteration[index]))) + get_epsilon());
  }
示例#2
0
  void update_delta(const Expression& gradient, int index) {

    if (index >= _delta.size() || index >= _d2.size() || index >= _delta2.size()) {
      _delta.resize(index + 1);
      _delta2.resize(index + 1);
      _d2.resize(index + 1);
    }

    if (!_delta[index] || !_delta2[index] || !_d2[index]) {
      _delta[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _delta2[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _d2[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
    }

    // Perform adadelta updates
    *_d2[index] = get_decay_rate() * *_d2[index] + (1.0 - get_decay_rate()) * reshape(gradient, _d2[index]->size()) * reshape(gradient, _d2[index]->size());
    *_delta[index] = -sqrt(*_delta2[index] + get_epsilon()) / sqrt(*_d2[index] + get_epsilon()) * reshape(gradient, _d2[index]->size());
    *_delta2[index] = get_decay_rate() * *_delta2[index] + (1.0 - get_decay_rate()) * *_delta[index] * *_delta[index];
  }
示例#3
0
  void update_delta(const Expression& gradient, int index) {

    if (index >= _deltas.size()) {
      _deltas.resize(index + 1);
    }

    if (!_deltas[index]) {
      _deltas[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
    }
  }
示例#4
0
  void update_delta(const Expression& gradient, int index) {

    if (index >= _delta.size()) {
      _delta.resize(index + 1);
    }

    if (!_delta[index]) {
      _delta[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
    }

    // Block-wise normalize the gradient
    vector_t norm_grad = reshape(gradient, _delta[index]->size()) / (sqrt(dot(reshape(gradient, _delta[index]->size()), reshape(gradient, _delta[index]->size()))) + get_epsilon());

    // Calculate running averages
    mg = get_decay_rate() * mg + (1.0 - get_decay_rate()) * norm_grad;

    gamma_nume_sqr = gamma_nume_sqr * (1 - 1 / taus_x_t) + (norm_grad - old_grad) * (old_grad - mg) * (norm_grad - old_grad) * (old_grad - mg) / taus_x_t;
    gamma_deno_sqr = gamma_dneo_sqr * (1 - 1 / taus_x_t) + (mg - norm_grad) * (old_grad - mg) * (mg - norm_grad) * (old_grad - mg) / taus_x_t;
    _gamma = sqrt(_gamma_nume) / (sqrt(_gamma_deno_sqr) + get_epsilon());

    // Update delta with momentum and epsilon parameter
    *_delta[index] =  _gamma * mg;
  }
int Olap::count(Query* query,WrapDataSource* dataExtractor)//remove 2nd para later
{

	//DataExtraction dataExtractor;
	//bitstring bitstring1,bitstring2;BitStreamInfo*
	//bitstring result;
	BitStreamInfo* bitstring1;
	BitStreamInfo* bitstring2;
	BitStreamInfo* result;
	Expression* postfixExpression = query->getPostfixExpression();
	stack<Symbol*> operandStack;

	for (int i=0;i<postfixExpression->count();i++)
	{

		Symbol* inputSymbol = postfixExpression->getSymbolAt(i);

		if(inputSymbol->getType()==TYPEOPERAND||inputSymbol->getType()==TYPENOT)
		{
			operandStack.push(inputSymbol);
		}
		else
		{
			if (!operandStack.empty())
			{
				Operand* operand2 = dynamic_cast<Operand*>(operandStack.top());
				operandStack.pop();
				bitstring2 = dataExtractor->getBitString(operand2->getAttributeIndex(),operand2->getbitStringIndex());
				if (!operandStack.empty())
				{
					Symbol* possibleNot = operandStack.top();
					if (possibleNot->getType()==TYPENOT)
					{
						operandStack.pop();
						bitstring2=~(*bitstring2);
					}
				}
				if (!operandStack.empty())
				{
					Operand* operand1 = dynamic_cast<Operand*>(operandStack.top());
					operandStack.pop();
					bitstring1 = dataExtractor->getBitString(operand1->getAttributeIndex(),operand1->getbitStringIndex());
					if (!operandStack.empty())
					{
						Symbol* possibleNot = operandStack.top();
						if (possibleNot->getType()==TYPENOT)
						{
							operandStack.pop();
							bitstring1=~(*bitstring1);
						}
					}
				}
			}
			if (inputSymbol->getType()==TYPEAND)
			{
				result=(*bitstring1)&(*bitstring2);
			}
			else if (inputSymbol->getType()==TYPEOR)
			{
				result=(*bitstring1)|(*bitstring2);
			}
		}
	}
	while(!operandStack.empty())
	{
		Operand* operand = dynamic_cast<Operand*>(operandStack.top());
		operandStack.pop();
		result = dataExtractor->getBitString(operand->getAttributeIndex(),operand->getbitStringIndex());
		if (!operandStack.empty())
		{
			Symbol* possibleNot = operandStack.top();
			if (possibleNot->getType()==TYPENOT)
			{
				operandStack.pop();
				result=~(*result);
			}
		}
	}
	return result->count();
}
示例#6
0
  void update_delta(const Expression& gradient, int index) {

    if (index >= _delta.size() || index >= _old_grad.size() || index >= _g.size() || index >= _g2.size() ||
        index >= _h.size() || index >= _h2.size() || index >= _tau.size() || index >= _current_iteration.size())
    {
      _delta.resize(index + 1);
      _old_grad.resize(index + 1);
      _g.resize(index + 1);
      _g2.resize(index + 1);
      _h.resize(index + 1);
      _h2.resize(index + 1);
      _tau.resize(index + 1);
      _current_iteration.resize(index + 1);
    }

    if (!_delta[index] || !_old_grad[index] || !_g[index] || !_g2[index] || !_h[index] || !_h2[index] || !_tau[index]) {
      _delta[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _old_grad[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _g[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _g2[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _h[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _h2[index] = boost::make_shared<vector_t>(zeros<value_t>(gradient.count()));
      _tau[index] = boost::make_shared<vector_t>(ones<value_t>(gradient.count()));
      _current_iteration[index] = 0;
    }

    ++_current_iteration[index];

    // Detect outlier
//    if (abs(reshape(gradient, _delta[index]->size()) - *_g[index]) > 2 * sqrt(*_g2[index] - *_g[index] * *_g[index]) ||
//        abs(abs((reshape(gradient, _delta[index]->size()) - *_old_grad[index]) / (*_delta[index] + get_epsilon())) - *_h[index]) > 2 * sqrt(*_h2[index] - *_h[index] * *_h[index]))
//    {
//      *_tau[index] = *_tau[index] + 1.0;
//    }

    *_tau[index] = *_tau[index] + abs(reshape(gradient, _delta[index]->size()) - *_g[index]) > 2 * sqrt(*_g2[index] - *_g[index] * *_g[index]);

    // Update moving averages
    *_g[index]  = (1.0 - 1.0 / *_tau[index]) * *_g[index]  + 1.0 / *_tau[index] * reshape(gradient, _delta[index]->size());
    *_g2[index] = (1.0 - 1.0 / *_tau[index]) * *_g2[index] + 1.0 / *_tau[index] * reshape(gradient, _delta[index]->size()) * reshape(gradient, _delta[index]->size());

    if (_current_iteration[index] > 1) {
      // Calculate h and do h updates
      // diag(H) = abs((reshape(gradient, _delta[index]->size()) - *_old_grad[index]) / (*_delta[index] + get_epsilon()));

      *_h[index]  = (1.0 - 1.0 / *_tau[index]) * *_h[index]  + 1.0 / *_tau[index] * abs((reshape(gradient, _delta[index]->size()) - *_old_grad[index]) / (*_delta[index] + get_epsilon()));
      *_h2[index]  = (1.0 - 1.0 / *_tau[index]) * *_h[index]  + 1.0 / *_tau[index] * ((reshape(gradient, _delta[index]->size()) - *_old_grad[index]) / (*_delta[index] + get_epsilon())) * ((reshape(gradient, _delta[index]->size()) - *_old_grad[index]) / (*_delta[index] + get_epsilon()));

      // Initialization phase -> multiply with C where C = D/10
      if (_current_iteration[index] == 2) {
        *_g2[index] = *_g2[index] * get_c();
        *_h[index] = *_h[index] * get_c();
        *_h2[index] = *_h2[index] * get_c();
      }

      *_delta[index] = -*_h[index] * *_g[index] * *_g[index] / (*_h2[index] * *_g2[index] + get_epsilon()) * reshape(gradient, _delta[index]->size());
    } else {
      *_delta[index] = get_epsilon() * *_g[index];
    }

    *_tau[index] = (1.0 - *_g[index] * *_g[index] / (*_g2[index] + get_epsilon())) * *_tau[index] + 1;

    *_old_grad[index] = reshape(gradient, _delta[index]->size());
  }