Пример #1
0
AR_Process::AR_Process(void)
{
    // State Equation
    F.resize(1,1);
    F(0,0) = 0.8;

    f.resize(1);
    f(0) = 0.;

    G.resize(1,1);
    G.identity();

    Qw.resize(1);
    Qw(0,0)= 0.1;

    // Observation noise
    H.resize(1,1);
    H(0,0) = 1;

    h.resize(1);
    h(0) = 0.;

    Qv.resize(1);
    Qv(0,0)=1;

    // Init state
    X0.resize(1);
    X0(0) = 10.;

    R0.resize(1);
    R0.zero();
}
Пример #2
0
void l2r_rank_fun::grad(double *w, double *g)
{
	int i;
	int l=prob->l;
	double *lg = new double[l];
	double *tmp_vector = new double[l];
	double *gtmp_vector = new double[global_l];

#pragma omp parallel for default(shared) private(i)
	for (i=0;i<l;i++)
	{
		tmp_vector[i] = ATAQb[i] - ATe[i];
	}

	MPI_Allgatherv((void*)tmp_vector, l, MPI_DOUBLE, (void*)gtmp_vector, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);
	Qv(gtmp_vector, lg);
	MPI_Allgatherv((void*)lg, l, MPI_DOUBLE, (void*)g, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);

#pragma omp parallel for default(shared) private(i)
	for(i=0;i<global_l;i++)
	{
		g[i] = gz[i] + 2*C*g[i];
	}

	delete[] tmp_vector;
	delete[] gtmp_vector;
	delete[] lg;
}
Пример #3
0
Van_Der_Pol::Van_Der_Pol(void)
{
      lambda = 3.;

      Qw.resize(1);
      Qw(0,0)= 1.;
      Qv.resize(1);
      Qv(0,0)=0.1;
  
      X0.resize(2);
      X0(0) = 0.5;
      X0(1) = 0.5;
      R0.resize(2);
      R0.zero();
      R0(0,0)=0.;
      R0(1,1)=.1;
      Ts=.1;

}
Пример #4
0
  void Vorticity::element_qoi_derivative( libMesh::DiffContext &context, const libMesh::QoISet & )
  {
    libMesh::FEMContext &c = libmesh_cast_ref<libMesh::FEMContext&>(context);

    if( _subdomain_ids.find( (c.elem)->subdomain_id() ) != _subdomain_ids.end() )
      {
	// Element
	libMesh::FEBase* element_fe;
	c.get_element_fe<libMesh::Real>(this->_u_var, element_fe);

	// Jacobian times integration weights
	const std::vector<libMesh::Real> &JxW = element_fe->get_JxW();

	// Grad of basis functions
	const std::vector<std::vector<libMesh::RealGradient> >& du_phi =
	  c.element_fe_var[_u_var]->get_dphi();
	const std::vector<std::vector<libMesh::RealGradient> >& dv_phi =
	  c.element_fe_var[_v_var]->get_dphi();

	// Local DOF count and quadrature point count
	const unsigned int n_T_dofs = c.dof_indices_var[0].size();
	unsigned int n_qpoints = (c.get_element_qrule())->n_points();  

	// Warning: we assume here that vorticity is the only QoI!
	// This should be consistent with the assertion in grins_mesh_adaptive_solver.C
	/*! \todo Need to generalize this to the multiple QoI case */
	libMesh::DenseSubVector<Number> &Qu = *c.elem_qoi_subderivatives[0][0];
	libMesh::DenseSubVector<Number> &Qv = *c.elem_qoi_subderivatives[0][1];

	// Integration loop
	for( unsigned int qp = 0; qp != n_qpoints; qp++ )
	  {
	    for( unsigned int i = 0; i != n_T_dofs; i++ )
	      {
		Qu(i) += - dv_phi[i][qp](1) * JxW[qp];
		Qv(i) += du_phi[i][qp](0) * JxW[qp];
	      }
	  }
      }

    return;
  }
Пример #5
0
void l2r_rank_fun::Hv(double *s, double *Hs)
{
	int i,j,k;
	int l=prob->l;
	double *wa = new double[global_l];
	double *lHs = new double[l];//
	double *lwa = new double[l];//
	selectiontree *T;
	double *tmp_vector = new double[l];
	double *gtmp_vector = new double[global_l];//
	int tmp_value;
	double gamma_tmp;
	Qv(s, lwa);
	MPI_Allgatherv((void*)lwa, l, MPI_DOUBLE, (void*)wa, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);//

#pragma omp parallel for private(i,j,k,T,gamma_tmp)
	for (i=0;i<nr_subset;i++)
	{
		T=new selectiontree(nr_class[i]);
		k=0;
		for (j=0;j<count[i];j++)
		{
			while (k<count[i]&&(1-pi[i][j].value+pi[i][k].value>0))
			{
				T->insert_node(int_y[pi[i][k].id],lwa[pi[i][k].id]);
				k++;
			}
			T->count_smaller(int_y[pi[i][j].id],&tmp_value, &tmp_vector[pi[i][j].id]);
		}
		delete T;

		k=count[i]-1;
		T = new selectiontree(nr_class[i]);
		for (j=count[i]-1;j>=0;j--)
		{
			while (k>=0&&(1+pi[i][j].value-pi[i][k].value>0))
			{
				T->insert_node(int_y[pi[i][k].id],lwa[pi[i][k].id]);
				k--;
			}
			T->count_larger(int_y[pi[i][j].id],&tmp_value, &gamma_tmp);
			tmp_vector[pi[i][j].id] += gamma_tmp;
		}
		delete T;
	}

#pragma omp parallel for default(shared) private(i)
	for (i=0;i<l;i++)
	{
		tmp_vector[i]=wa[i+start_ptr[current_rank]]*((double)l_plus[i]+(double)l_minus[i])-tmp_vector[i];//
	}
	
	MPI_Allgatherv((void*)tmp_vector, l, MPI_DOUBLE, (void*)gtmp_vector, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);//
	Qv(gtmp_vector, lHs);//
	MPI_Allgatherv((void*)lHs, l, MPI_DOUBLE, (void*)Hs, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);//

#pragma omp parallel for default(shared) private(i)
	for(i=0;i<global_l;i++)
	{
		Hs[i] = wa[i] + 2*C*Hs[i];//
	}
	delete[] wa;
	delete[] lwa;//
	delete[] tmp_vector;
	delete[] gtmp_vector;
	delete[] lHs;
}
Пример #6
0
double l2r_rank_fun::fun(double *w)// w is with the size of global_l 
{
	int i,j,k;
	double f = 0.0;
	double reg = 0.0;
	int l=prob->l;
	selectiontree *T;
	Qv(w,z);
	//generate gz via MPI_Allgatherv
	MPI_Allgatherv((void*)z, l, MPI_DOUBLE, (void*)gz, local_l, start_ptr, MPI_DOUBLE, MPI_COMM_WORLD);

#pragma omp parallel for default(shared) private(i,j,k,T)
	for (i=0;i<nr_subset;i++)
	{
		for (j=0;j<count[i];j++)
		{
			pi[i][j].id= perm[j+start[i]];
			pi[i][j].value = z[perm[j+start[i]]];
		}
		qsort(pi[i], count[i], sizeof(id_and_value), compare_id_and_value);

		T=new selectiontree(nr_class[i]);
		k=0;
		for (j=0;j<count[i];j++)
		{
			while (k<count[i]&&(1-pi[i][j].value+pi[i][k].value>0))
			{
				T->insert_node(int_y[pi[i][k].id],pi[i][k].value);

				k++;
			}
			T->count_smaller(int_y[pi[i][j].id],&l_minus[pi[i][j].id], &gamma_minus[pi[i][j].id]);
		}
		delete T;
		k=count[i]-1;

		T = new selectiontree(nr_class[i]);
		for (j=count[i]-1;j>=0;j--)
		{
			while (k>=0&&(1+pi[i][j].value-pi[i][k].value>0))
			{
				T->insert_node(int_y[pi[i][k].id],pi[i][k].value);
				k--;
			}
			T->count_larger(int_y[pi[i][j].id],&l_plus[pi[i][j].id], &gamma_plus[pi[i][j].id]);
		}
		delete T;
	}

//#pragma omp parallel for default(shared) private(i) reduction(+:f) schedule(dynamic)
	for(i=0;i<global_l;i++)
	{
		f += w[i]*gz[i];
	}

#pragma omp parallel for default(shared) private(i)
	for (i=0;i<l;i++)
	{
		ATe[i] = l_minus[i] - l_plus[i];
		ATAQb[i] = (l_plus[i]+l_minus[i])*gz[i+start_ptr[current_rank]]-gamma_plus[i]-gamma_minus[i];
	}

//#pragma omp parallel for default(shared) //private(i) //reduction(+:reg) schedule(runtime)
	for (int i=0;i<l;i++)
	{
		//#pragma omp atomic
		reg += C*(gz[i+start_ptr[current_rank]]*(ATAQb[i] - 2 * ATe[i]) + l_minus[i]);
	}

	mpi_allreduce(&reg, 1, MPI_DOUBLE, MPI_SUM);	
	f /= 2.0;
	f += reg;
	si->obj=f;
	return(f);
}