void mlr(TreeNode* root, int level){
     if(!root) return ;
     if(level >= ans.size()){
         vector<int> t;
         ans.push_back(t);
     }
     ans[level].push_back(root->val);
     mlr(root->left, level+1);
     mlr(root->right, level+1);
 }
Ejemplo n.º 2
0
 bool isValidBST(TreeNode *root) {
     // Start typing your C/C++ solution below
     // DO NOT write int main() function
     flag=true;int s,b;
     mlr(root,s,b);
     return flag;
 }
Ejemplo n.º 3
0
	void mlr(TreeNode*node,int&s,int&b)
	{
		if(!node||flag==false)
			return;
		if(!node->left&&!node->right)
		{
			s=b=node->val;
			return;
		}
		else
		{
			int ls,lb;int rs,rb;
			mlr(node->left,ls,lb);mlr(node->right,rs,rb);
			if(flag==false)
				return;
			if(node->left&&node->right)
			{
				if(node->val<=lb||node->val>=rs)
				{
					flag=false;
					return;
				}
				s=ls;b=rb;
			}
			else if(node->left)
			{
				if(node->val<=lb)
				{
					flag=false;
					return;
				}
				s=ls;b=node->val;
			}
			else
			{	
				if(node->val>=rs)
				{
					flag=false;
					return;
				}
				s=node->val;b=rb;
			}
		}
	}
void FastOnlineSupervisedMStep<Scalar>::m_step(
    std::shared_ptr<parameters::Parameters> parameters
) {
    // Check whether we should actually perform the m_step
    if (docs_seen_so_far_ < minibatch_size_)
        return;

    docs_seen_so_far_ = 0;

    // Extract the parameters from the struct
    auto model = std::static_pointer_cast<parameters::SupervisedModelParameters<Scalar> >(parameters);
    MatrixX & beta = model->beta;
    MatrixX & eta = model->eta;

    // update the topic distributions
    // TODO: Change the update to something more formal like the online update
    //       of Hoffman et al.
    beta.array() = (
        beta_weight_ * beta.array() +
        (1-beta_weight_) * (b_.array().colwise() / b_.array().rowwise().sum())
    );

    // update the eta
    optimization::MultinomialLogisticRegression<Scalar> mlr(
        expected_z_bar_,
        y_,
        regularization_penalty_
    );
    mlr.gradient(eta, eta_gradient_);
    eta_velocity_ = eta_momentum_ * eta_velocity_ - eta_learning_rate_ * eta_gradient_;
    eta += eta_velocity_;

    this->get_event_dispatcher()->template dispatch<events::MaximizationProgressEvent<Scalar> >(
        -mlr.value(eta)  // minus the value to be minimized is the log likelihood
    );
}
 vector<vector<int>> levelOrderBottom(TreeNode* root) {
     ans.clear();
     mlr(root, 0);  
     reverse(ans.begin(), ans.end());
     return ans;
 }