コード例 #1
1
ファイル: apf.c プロジェクト: arulk77/gpu.evrc
void    apf(short *in, short *coeff, short *out, long delayi, short alpha,
	    short beta, short u, short agc, short ltgain, short order, short length, short br)
{

	static int FirstTime = 1;

	static short FIRmem[ORDER];	/* FIR filter memory */
	static short IIRmem[ORDER];	/* IIR filter memory */
	static short last;
	static short Residual[ACBMemSize + SubFrameSize];	/* local residual */

	short   wcoef1[ORDER];
	short   wcoef2[ORDER];
	short   scratch[SubFrameSize];
	short   temp[SubFrameSize];
	short   mem[ORDER];
	long	sum1, sum2;
	long    gamma, APFgain;
	short   i, j, n, best;
	short	Stemp, shift1, shift2;
	long	Ltemp;


	/* initialization -- should be done in init routine for implementation */
	if (FirstTime)
	{
		FirstTime = 0;
		for (i = 0; i < ORDER; i++)
			FIRmem[i] = 0;
		for (i = 0; i < ORDER; i++)
			IIRmem[i] = 0;
		for (i = 0; i < ACBMemSize; i++)
			Residual[i] = 0;
		last = 0;
	}

	/* Compute weighted LPC coefficients */
	weight(wcoef1, coeff, alpha, order);
	weight(wcoef2, coeff, beta, order);


	/* Tilt speech  */

	/*...no tilt in non-voiced regions...*/
	for (i = 0, sum2 = 0; i < length - 1; i++)
		sum2 = L_mac(sum2, in[i], in[i + 1]);
	if (sum2 < 0)
		u = 0;		/*...no tilt...*/

	for (i = 0; i < length; i++)
	{
		scratch[i] = msu_r(L_deposit_h(in[i]), u, last);
		last = in[i];
	}

	/* Compute  residual */
	fir(scratch, scratch, wcoef1, FIRmem, order, length);

	for (i = 0; i < SubFrameSize ; i++)
	  Residual[ACBMemSize+i] = scratch[i];

	/* long term filtering */
	/* Find best integer delay around delayi */
	j = extract_h(L_add(delayi, 32768));
	sum1 = 0;
        shift1 = 1;
	best = j;
	for (i = Max(DMIN, j - 3); i <= Min(DMAX, j + 3); i++)
	{
                shift2 = 1;
		for (n = ACBMemSize, sum2 = 0; n < ACBMemSize + length; n++)
		{
			Ltemp = L_mult(Residual[n], Residual[n - i]);
			Ltemp = L_shr(Ltemp, shift2);
			sum2 = L_add(sum2, Ltemp);
			if (sum2 >= 0x40000000)
			{
				sum2 = L_shr(sum2, 1);
				shift2++;
			}
		}

                if( ((shift1 >= shift2) && (L_shr(sum2,sub(shift1,shift2)) > sum1))
                   || ((shift1 < shift2) && (sum2 > L_shr(sum1,sub(shift2,shift1)))))
		{
			sum1 = sum2;
			shift1 = shift2;
			best = i;
		}
	}

	/* Get beta for delayi */
	shift1 = 1;
	for (i = ACBMemSize, sum1 = 0; i < ACBMemSize + length; i++)
	{
		Ltemp = L_mult(Residual[i - best], Residual[i - best]);
		Ltemp = L_shr(Ltemp, shift1);
		sum1 = L_add(sum1, Ltemp);
		if (sum1 >= 0x40000000)
		{
			sum1 = L_shr(sum1, 1);
			shift1++;
		}
	}
	shift2 = 1;
	for (i = ACBMemSize, sum2 = 0; i < ACBMemSize + length; i++)
	{
		Ltemp = L_mult(Residual[i], Residual[i - best]);
		Ltemp = L_shr(Ltemp, shift2);
		sum2 = L_add(sum2, Ltemp);
		if (sum2 >= 0x40000000)
		{
			sum2 = L_shr(sum2, 1);
			shift2++;
		}
	}
	if (shift1 > shift2)
	{
		shift1 = sub(shift1, shift2);
		sum2 = L_shr(sum2, shift1);
	}
	else if (shift1 < shift2)
	{
		shift2 = sub(shift2, shift1);
		sum1 = L_shr(sum1, shift2);
	}

	if ((sum2 == 0) || (sum1 == 0) || (br == 1))
		for (i = 0; i < length; i++)
			temp[i] = Residual[i + ACBMemSize];
	else
	{
		if (sum2 >= sum1)
			gamma = 0x7fffffff;		/* Clip gamma at 1.0 */
		else if (sum2 < 0)
			gamma = 0;
		else
		{
			shift1 = norm_l(sum1);
			sum1 = L_shl(sum1, shift1);
			sum2 = L_shl(sum2, shift1);
			gamma = L_divide(sum2, sum1);
		}

		if (gamma < 0x40000000)
			for (i = 0; i < length; i++)
				temp[i] = Residual[i + ACBMemSize];
		else
		{
			/* Do actual filtering */
			for (i = 0; i < length; i++)
			{
				Ltemp = L_mpy_ls(gamma, ltgain);
				Ltemp = L_mpy_ls(Ltemp, Residual[ACBMemSize + i - best]);
				temp[i] = add(Residual[ACBMemSize + i], round(Ltemp));
			}
		}

	}


	/* iir short term filter - first run */
	for (i = 0; i < length; i++)
		scratch[i] = temp[i];
	for (i = 0; i < order; i++)
		mem[i] = IIRmem[i];
	iir(scratch, scratch, wcoef2, mem, order, length);


	/* Get filter gain */
	shift1 = 1;
	for (i = 0, sum1 = 0; i < length; i++)
	{
		Ltemp = L_mult(in[i], in[i]);
		Ltemp = L_shr(Ltemp, shift1);
		sum1 = L_add(sum1, Ltemp);
		if (sum1 >= 0x40000000)
		{
			sum1 = L_shr(sum1, 1);
			shift1++;
		}
	}
	shift2 = 1;
	for (i = 0, sum2 = 0; i < length; i++)
	{
		Ltemp = L_mult(scratch[i], scratch[i]);
		Ltemp = L_shr(Ltemp, shift2);
		sum2 = L_add(sum2, Ltemp);
		if (sum2 >= 0x40000000)
		{
			sum2 = L_shr(sum2, 1);
			shift2++;
		}
	}
	if (shift1 > shift2)
	{
		shift1 = sub(shift1, shift2);
		sum2 = L_shr(sum2, shift1);
	}
	else if (shift1 < shift2)
	{
		shift2 = sub(shift2, shift1);
		sum1 = L_shr(sum1, shift2);
	}

	if (sum2 != 0)
	{
		shift1 = norm_l(sum2);
		sum2 = L_shl(sum2, shift1);
		shift1 = sub(shift1, 2);	/* For (1. < APFgain < 2.) */
		sum1 = L_shl(sum1, shift1);
		Ltemp = L_divide(sum1, sum2);
		shift1 = norm_l(Ltemp);
		Ltemp = L_shl(Ltemp, shift1);
		Stemp = sqroot(Ltemp);
		if (shift1 & 1)
			APFgain = L_mult(0x5a82, Stemp);
		else
			APFgain = L_deposit_h(Stemp);
		shift1 = shr(shift1, 1);
		APFgain = L_shr(APFgain, shift1);

		/* Re-normalize the speech signal */
		for (i = 0; i < length; i++)
		{
			Ltemp = L_mpy_ls(APFgain, temp[i]);
			Ltemp = L_shl(Ltemp, 1);  /* For (1. < APFgain < 2.) */
			temp[i] = round(Ltemp);
		}
	}
	else
		APFgain = 0x40000000;


	/* iir short term filter - second run */
	iir(out, temp, wcoef2, IIRmem, order, length);

	/* Update residual buffer */
	for (i = 0; i < ACBMemSize; i++)
		Residual[i] = Residual[i + length];
}
コード例 #2
0
 void initialize(const uint& nsecurity, const uint& nstrat = 1, const void* user_data = 0)
 {
     _nsecurity = nsecurity;
     weight() = Matrix::Constant(_nsecurity, nstrat, 0);
     signal = Matrix::Constant(_nsecurity, nstrat, 0);
 }
コード例 #3
0
ファイル: FontDescription.cpp プロジェクト: Jamesducque/mojo
FontTraits FontDescription::traits() const
{
    return FontTraits(style(), variant(), weight(), stretch());
}
コード例 #4
0
 std::string TakagiSugenoTerm::toString() const {
     std::stringstream ss;
     ss << LinguisticTerm::toString();
     ss << "TakagiSugeno (" << value() << " " << weight() << ")";
     return ss.str();
 }
コード例 #5
0
ファイル: item.cpp プロジェクト: RexWolf163ru/Cataclysm-DDA
std::string item::info(bool showtext, std::vector<iteminfo> *dump)
{
 std::stringstream temp1, temp2;

 if( !is_null() )
 {
  dump->push_back(iteminfo("BASE", " Volume: ", "", int(volume()), "", false, true));
  dump->push_back(iteminfo("BASE", "    Weight: ", "", int(weight()), "", true, true));
  dump->push_back(iteminfo("BASE", " Bash: ", "", int(type->melee_dam), "", false));
  dump->push_back(iteminfo("BASE", (has_flag(IF_SPEAR) ? "  Pierce: " : "  Cut: "), "", int(type->melee_cut), "", false));
  dump->push_back(iteminfo("BASE", "  To-hit bonus: ", ((type->m_to_hit > 0) ? "+" : ""), int(type->m_to_hit), ""));
  dump->push_back(iteminfo("BASE", " Moves per attack: ", "", int(attack_time()), "", true, true));

 if (type->techniques != 0)
  for (int i = 1; i < NUM_TECHNIQUES; i++)
   if (type->techniques & mfb(i))
    dump->push_back(iteminfo("TECHNIQUE", " +",default_technique_name( technique_id(i) )));
 }

 if (is_food()) {
  it_comest* food = dynamic_cast<it_comest*>(type);

  dump->push_back(iteminfo("FOOD", " Nutrition: ", "", int(food->nutr)));
  dump->push_back(iteminfo("FOOD", " Quench: ", "", int(food->quench)));
  dump->push_back(iteminfo("FOOD", " Enjoyability: ", "", int(food->fun)));

 } else if (is_food_container()) {
 // added charge display for debugging
  it_comest* food = dynamic_cast<it_comest*>(contents[0].type);

  dump->push_back(iteminfo("FOOD", " Nutrition: ", "", int(food->nutr)));
  dump->push_back(iteminfo("FOOD", " Quench: ", "", int(food->quench)));
  dump->push_back(iteminfo("FOOD", " Enjoyability: ", "", int(food->fun)));
  dump->push_back(iteminfo("FOOD", " Portions: ", "", abs(int(contents[0].charges))));

 } else if (is_ammo()) {
  // added charge display for debugging
  it_ammo* ammo = dynamic_cast<it_ammo*>(type);

  dump->push_back(iteminfo("AMMO", " Type: ", ammo_name(ammo->type)));
  dump->push_back(iteminfo("AMMO", " Damage: ", "", int(ammo->damage)));
  dump->push_back(iteminfo("AMMO", " Armor-pierce: ", "", int(ammo->pierce)));
  dump->push_back(iteminfo("AMMO", " Range: ", "", int(ammo->range)));
  dump->push_back(iteminfo("AMMO", " Accuracy: ", "", int(100 - ammo->accuracy)));
  dump->push_back(iteminfo("AMMO", " Recoil: ", "", int(ammo->recoil), "", true, true));
  dump->push_back(iteminfo("AMMO", " Count: ", "", int(ammo->count)));

 } else if (is_ammo_container()) {
  it_ammo* ammo = dynamic_cast<it_ammo*>(contents[0].type);

  dump->push_back(iteminfo("AMMO", " Type: ", ammo_name(ammo->type)));
  dump->push_back(iteminfo("AMMO", " Damage: ", "", int(ammo->damage)));
  dump->push_back(iteminfo("AMMO", " Armor-pierce: ", "", int(ammo->pierce)));
  dump->push_back(iteminfo("AMMO", " Range: ", "", int(ammo->range)));
  dump->push_back(iteminfo("AMMO", " Accuracy: ", "", int(100 - ammo->accuracy)));
  dump->push_back(iteminfo("AMMO", " Recoil: ", "", int(ammo->recoil), "", true, true));
  dump->push_back(iteminfo("AMMO", " Count: ", "", int(contents[0].charges)));

 } else if (is_gun()) {
  it_gun* gun = dynamic_cast<it_gun*>(type);
  int ammo_dam = 0, ammo_recoil = 0;
  bool has_ammo = (curammo != NULL && charges > 0);
  if (has_ammo) {
   ammo_dam = curammo->damage;
   ammo_recoil = curammo->recoil;
  }

  dump->push_back(iteminfo("GUN", " Skill used: ", gun->skill_used->name()));
  dump->push_back(iteminfo("GUN", " Ammunition: ", "", int(clip_size()), " rounds of " + ammo_name(ammo_type())));

  temp1.str("");
  if (has_ammo)
   temp1 << ammo_dam;

  temp1 << (gun_damage(false) >= 0 ? "+" : "" );

  temp2.str("");
  if (has_ammo)
   temp2 << " = " << gun_damage();

  dump->push_back(iteminfo("GUN", " Damage: ", temp1.str(), int(gun_damage(false)), temp2.str()));
  dump->push_back(iteminfo("GUN", " Accuracy: ", "", int(100 - accuracy())));

  temp1.str("");
  if (has_ammo)
   temp1 << ammo_recoil;

  temp1 << (recoil(false) >= 0 ? "+" : "" );

  temp2.str("");
  if (has_ammo)
   temp2 << " = " << recoil();

  dump->push_back(iteminfo("GUN"," Recoil: ", temp1.str(), int(recoil(false)), temp2.str(), true, true));

  dump->push_back(iteminfo("GUN", " Reload time: ", "", int(gun->reload_time), ((has_flag(IF_RELOAD_ONE)) ? " per round" : ""), true, true));

  if (burst_size() == 0) {
   if (gun->skill_used == Skill::skill("pistol") && has_flag(IF_RELOAD_ONE))
    dump->push_back(iteminfo("GUN", " Revolver."));
   else
    dump->push_back(iteminfo("GUN", " Semi-automatic."));
  } else
   dump->push_back(iteminfo("GUN", " Burst size: ", "", int(burst_size())));

  if (contents.size() > 0)
   dump->push_back(iteminfo("GUN", "\n"));

  temp1.str("");
  for (int i = 0; i < contents.size(); i++)
   temp1 << "\n+" << contents[i].tname();

  dump->push_back(iteminfo("GUN", temp1.str()));

 } else if (is_gunmod()) {
  it_gunmod* mod = dynamic_cast<it_gunmod*>(type);

  if (mod->accuracy != 0)
   dump->push_back(iteminfo("GUNMOD", " Accuracy: ", ((mod->accuracy > 0) ? "+" : ""), int(mod->accuracy)));
  if (mod->damage != 0)
   dump->push_back(iteminfo("GUNMOD", " Damage: ", ((mod->damage > 0) ? "+" : ""), int(mod->damage)));
  if (mod->clip != 0)
   dump->push_back(iteminfo("GUNMOD", " Magazine: ", ((mod->clip > 0) ? "+" : ""), int(mod->clip), "%"));
  if (mod->recoil != 0)
   dump->push_back(iteminfo("GUNMOD", " Recoil: ", ((mod->recoil > 0) ? "+" : ""), int(mod->recoil), "", true, true));
  if (mod->burst != 0)
   dump->push_back(iteminfo("GUNMOD", " Burst: ", (mod->burst > 0 ? "+" : ""), int(mod->burst)));

  if (mod->newtype != AT_NULL)
   dump->push_back(iteminfo("GUNMOD", " " + ammo_name(mod->newtype)));

  temp1.str("");
  temp1 << " Used on: ";
  if (mod->used_on_pistol)
   temp1 << "Pistols.  ";
  if (mod->used_on_shotgun)
   temp1 << "Shotguns.  ";
  if (mod->used_on_smg)
   temp1 << "SMGs.  ";
  if (mod->used_on_rifle)
   temp1 << "Rifles.";

  dump->push_back(iteminfo("GUNMOD", temp1.str()));

 } else if (is_armor()) {
  it_armor* armor = dynamic_cast<it_armor*>(type);
  
  temp1.str("");
  temp1 << " Covers: ";
  if (armor->covers & mfb(bp_head))
   temp1 << "The head. ";
  if (armor->covers & mfb(bp_eyes))
   temp1 << "The eyes. ";
  if (armor->covers & mfb(bp_mouth))
   temp1 << "The mouth. ";
  if (armor->covers & mfb(bp_torso))
   temp1 << "The torso. ";
  if (armor->covers & mfb(bp_arms))
   temp1 << "The arms. ";
  if (armor->covers & mfb(bp_hands))
   temp1 << "The hands. ";
  if (armor->covers & mfb(bp_legs))
   temp1 << "The legs. ";
  if (armor->covers & mfb(bp_feet))
   temp1 << "The feet. ";

  dump->push_back(iteminfo("ARMOR", temp1.str()));

    if (has_flag(IF_FIT))
    {
        dump->push_back(iteminfo("ARMOR", " Encumberment: ", "", int(armor->encumber) - 1, " (fits)", true, true));
    }
    else
    {
        dump->push_back(iteminfo("ARMOR", " Encumberment: ", "", int(armor->encumber), "", true, true));
    }

  dump->push_back(iteminfo("ARMOR", " Bashing protection: ", "", int(armor->dmg_resist)));
  dump->push_back(iteminfo("ARMOR", " Cut protection: ", "", int(armor->cut_resist)));
  dump->push_back(iteminfo("ARMOR", " Environmental protection: ", "", int(armor->env_resist)));
  dump->push_back(iteminfo("ARMOR", " Warmth: ", "", int(armor->warmth)));
  dump->push_back(iteminfo("ARMOR", " Storage: ", "", int(armor->storage)));

} else if (is_book()) {

  it_book* book = dynamic_cast<it_book*>(type);
  if (!book->type)
   dump->push_back(iteminfo("BOOK", " Just for fun."));
  else {
    dump->push_back(iteminfo("BOOK", " Can bring your ", book->type->name() + " skill to ", int(book->level)));

   if (book->req == 0)
    dump->push_back(iteminfo("BOOK", " It can be understood by beginners."));
   else
    dump->push_back(iteminfo("BOOK", " Requires ", book->type->name() + " level ", int(book->req), " to understand.", true, true));
  }

  dump->push_back(iteminfo("BOOK", " Requires intelligence of ", "", int(book->intel), " to easily read.", true, true));
  if (book->fun != 0)
   dump->push_back(iteminfo("BOOK", " Reading this book affects your morale by ", (book->fun > 0 ? "+" : ""), int(book->fun)));

  dump->push_back(iteminfo("BOOK", " This book takes ", "", int(book->time), " minutes to read.", true, true));

 } else if (is_tool()) {
  it_tool* tool = dynamic_cast<it_tool*>(type);

  if ((tool->max_charges)!=0)
   dump->push_back(iteminfo("TOOL", " Maximum ", "", int(tool->max_charges), " charges" + ((tool->ammo == AT_NULL) ? "" : (" of " + ammo_name(tool->ammo))) + "."));

 } else if (is_style()) {
  it_style* style = dynamic_cast<it_style*>(type);

  for (int i = 0; i < style->moves.size(); i++) {
   dump->push_back(iteminfo("STYLE", default_technique_name(style->moves[i].tech), ". Requires Unarmed Skill of ", int(style->moves[i].level)));
  }

 }

 if ( showtext && !is_null() ) {
  dump->push_back(iteminfo("DESCRIPTION", type->description));
    if (is_armor() && has_flag(IF_FIT))
    {
        dump->push_back(iteminfo("DESCRIPTION", "\n\n"));
        dump->push_back(iteminfo("DESCRIPTION", "This piece of clothing fits you perfectly."));
    }  
  if (contents.size() > 0) {
   if (is_gun()) {
    for (int i = 0; i < contents.size(); i++)
     dump->push_back(iteminfo("DESCRIPTION", contents[i].type->description));
   } else
    dump->push_back(iteminfo("DESCRIPTION", contents[0].type->description));
  }
 }

 temp1.str("");
 std::vector<iteminfo>& vecData = *dump; // vector is not copied here
 for (int i = 0; i < vecData.size(); i++) {
  if (vecData[i].sType == "DESCRIPTION")
   temp1 << "\n";

  temp1 << vecData[i].sName;
  temp1 << vecData[i].sPre;

  if (vecData[i].iValue != -999)
   temp1 << vecData[i].iValue;

  temp1 << vecData[i].sPost;
  temp1 << ((vecData[i].bNewLine) ? "\n" : "");
 }

 return temp1.str();
}
コード例 #6
0
bool TQualityMetric::evaluate_with_Hessian_diagonal( 
                                           PatchData& pd,
                                           size_t handle,
                                           double& value,
                                           std::vector<size_t>& indices,
                                           std::vector<Vector3D>& grad,
                                           std::vector<SymMatrix3D>& diagonal,
                                           MsqError& err )
{
  const Sample s = ElemSampleQM::sample( handle );
  const size_t e = ElemSampleQM::  elem( handle );
  MsqMeshEntity& elem = pd.element_by_index( e );
  EntityTopology type = elem.get_element_type();
  unsigned edim = TopologyInfo::dimension( type );
  size_t num_idx = 0;
  const NodeSet bits = pd.non_slave_node_set( e );
  
  bool rval;
  if (edim == 3) { // 3x3 or 3x2 targets ?
    const MappingFunction3D* mf = pd.get_mapping_function_3D( type );
    if (!mf) {
      MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT );
      return false;
    }

    MsqMatrix<3,3> A, W, dmdT, d2mdT2[6];
    mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err );
    MSQ_ERRZERO(err);
    targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err);
    const MsqMatrix<3,3> Winv = inverse(W);
    const MsqMatrix<3,3> T = A*Winv;
    rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err );
    MSQ_ERRZERO(err);
    gradient<3>( num_idx, mDerivs3D, dmdT * transpose(Winv), grad );
    second_deriv_wrt_product_factor( d2mdT2, Winv );
    
    diagonal.resize( num_idx );
    hessian_diagonal<3>(num_idx, mDerivs3D, d2mdT2, arrptr(diagonal) );
#ifdef PRINT_INFO
    print_info<3>( e, s, A, W, A * inverse(W) );
#endif
  }
  else if (edim == 2) {
#ifdef NUMERICAL_2D_HESSIAN
    // use finite diference approximation for now
    return QualityMetric::evaluate_with_Hessian_diagonal( pd, handle,
                                           value, indices, grad, diagonal,
                                           err );
#else
    MsqMatrix<2,2> W, A, dmdT, d2mdT2[3];
    MsqMatrix<3,2> M;
    rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx,
                             mDerivs2D, W, A, M, err ); 
    if (MSQ_CHKERR(err) || !rval)
      return false;
    const MsqMatrix<2,2> Winv = inverse(W);
    const MsqMatrix<2,2> T = A*Winv;
    rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err );
    MSQ_ERRZERO(err);
    gradient<2>( num_idx, mDerivs2D, M * dmdT * transpose(Winv), grad );
    second_deriv_wrt_product_factor( d2mdT2, Winv );

    diagonal.resize( num_idx );
    for (size_t i = 0; i < num_idx; ++i) {
      MsqMatrix<2,2> block2d;
      block2d(0,0) = transpose(mDerivs2D[i]) * d2mdT2[0] * mDerivs2D[i];
      block2d(0,1) = transpose(mDerivs2D[i]) * d2mdT2[1] * mDerivs2D[i];
      block2d(1,0) = block2d(0,1);
      block2d(1,1) = transpose(mDerivs2D[i]) * d2mdT2[2] * mDerivs2D[i];
      MsqMatrix<3,2> p = M * block2d;
      
      SymMatrix3D& H = diagonal[i];
      H[0] = p.row(0) * transpose(M.row(0));
      H[1] = p.row(0) * transpose(M.row(1));
      H[2] = p.row(0) * transpose(M.row(2));
      H[3] = p.row(1) * transpose(M.row(1));
      H[4] = p.row(1) * transpose(M.row(2));
      H[5] = p.row(2) * transpose(M.row(2));
    }
#ifdef PRINT_INFO
    print_info<2>( e, s, J, Wp, A * inverse(W) );
#endif
#endif
  }
  else {
    assert(0);
    return false;
  }
  
    // pass back index list
  indices.resize( num_idx );
  std::copy( mIndices, mIndices+num_idx, indices.begin() );
  
    // apply target weight to value
  if (!num_idx) 
    weight( pd, s, e, num_idx, value, 0, 0, 0, err );
  else
    weight( pd, s, e, num_idx, value, arrptr(grad), arrptr(diagonal), 0, err ); 
  MSQ_ERRZERO(err);
  return rval;
}
コード例 #7
0
///<summary>
///	画像読み込み保存のみOpenCVを使用してピクセル操作はスクラッチ
///</summary>
///<see cite="http://opencv.jp/opencv2-x-samples/non-local-means-filter" />
void CNonlocalMeansFilter::ProcessByPartOpenCV(CString filePath)
{
	//元画像読み込み
	std::string path = this->GetMultiBytePath(filePath);
	cv::Mat src = cv::imread(path, 1);
	cv::Mat snoise;
	cv::Mat dest;

	double h = 15.0;
	double sigma = 15.0;

	if (templeteWindowSize > searchWindowSize)
	{
		AfxMessageBox(L"searchWindowSize should be larger than templeteWindowSize");
		return;
	}

	if (dest.empty())
		dest = cv::Mat::zeros(src.size(), src.type());

	const int tr = templeteWindowSize >> 1;
	const int sr = searchWindowSize >> 1;
	const int bb = sr + tr;
	const int D = searchWindowSize*searchWindowSize;
	const int H = D / 2 + 1;
	const double div = 1.0 / (double)D;//search area div    
	const int tD = templeteWindowSize*templeteWindowSize;
	const double tdiv = 1.0 / (double)(tD);//templete square div   
	//create large size image for bounding box; 
	cv::Mat im;
	cv::copyMakeBorder(src, im, bb, bb, bb, bb, cv::BORDER_DEFAULT);

	//weight computation;     
	cv::vector<double> weight(256 * 256 * src.channels());
	double* w = &weight[0];
	const double gauss_sd = (sigma == 0.0) ? h : sigma;
	double gauss_color_coeff = -(1.0 / (double)(src.channels()))*(1.0 / (h*h));
	int emax;

	for (int i = 0; i < 256 * 256 * src.channels(); i++)
	{
		double v = std::exp(std::max(i - 2.0*gauss_sd*gauss_sd, 0.0)*gauss_color_coeff);
		w[i] = v;

		if (v < 0.001)
		{
			emax = i;
			break;
		}
	}

	for (int i = emax; i < 256 * 256 * src.channels(); i++)
	{
		w[i] = 0.0;
	}

	if (src.channels() == 3)
	{
		const int cstep = im.step - templeteWindowSize * 3;
		const int csstep = im.step - searchWindowSize * 3;

		for (int j = 0; j < src.rows; j++)
		{
			uchar* d = dest.ptr(j);
			int* ww = new int[D];
			double* nw = new double[D];
			for (int i = 0; i < src.cols; i++)
			{
				double tweight = 0.0;
				//search loop            
				uchar* tprt = im.data + im.step*(sr + j) + 3 * (sr + i);
				uchar* sptr2 = im.data + im.step*j + 3 * i;
				for (int l = searchWindowSize, count = D - 1; l--;)
				{
					uchar* sptr = sptr2 + im.step*(l);
					for (int k = searchWindowSize; k--;)
					{
						//templete loop                    
						int e = 0;
						uchar* t = tprt;
						uchar* s = sptr + 3 * k;

						for (int n = templeteWindowSize; n--;)
						{
							for (int m = templeteWindowSize; m--;)
							{
								// computing color L2 norm                       
								e += (s[0] - t[0])*(s[0] - t[0]) + (s[1] - t[1])*(s[1] - t[1]) + (s[2] - t[2])*(s[2] - t[2]);//L2 norm    
								s += 3, t += 3;
							}
							t += cstep;
							s += cstep;
						}

						const int ediv = e*tdiv;
						ww[count--] = ediv;
						//get weighted Euclidean distance            
						tweight += w[ediv];
					}
				}

				//weight normalization            
				if (tweight == 0.0)
				{
					for (int z = 0; z < D; z++) nw[z] = 0;
					nw[H] = 1;
				}
				else
				{
					double itweight = 1.0 / (double)tweight;
					for (int z = 0; z < D; z++) nw[z] = w[ww[z]] * itweight;
				}

				double r = 0.0, g = 0.0, b = 0.0;
				uchar* s = im.ptr(j + tr); s += 3 * (tr + i);

				for (int l = searchWindowSize, count = 0; l--;)
				{
					for (int k = searchWindowSize; k--;)
					{
						r += s[0] * nw[count];
						g += s[1] * nw[count];
						b += s[2] * nw[count++];
						s += 3;
					}
					s += csstep;
				}

				d[0] = cv::saturate_cast<uchar>(r);
				d[1] = cv::saturate_cast<uchar>(g);
				d[2] = cv::saturate_cast<uchar>(b);

				d += 3;
			}         
			delete[] ww;
			delete[] nw;
		}
	}
	else if (src.channels() == 1)
	{
		const int cstep = im.step - templeteWindowSize;
		const int csstep = im.step - searchWindowSize;

		for (int j = 0; j < src.rows; j++)
		{
			uchar* d = dest.ptr(j);
			int* ww = new int[D];
			double* nw = new double[D];
			for (int i = 0; i < src.cols; i++)
			{
				double tweight = 0.0;
				//search loop               
				uchar* tprt = im.data + im.step*(sr + j) + (sr + i);
				uchar* sptr2 = im.data + im.step*j + i;
				for (int l = searchWindowSize, count = D - 1; l--;)
				{
					uchar* sptr = sptr2 + im.step*(l);
					for (int k = searchWindowSize; k--;)
					{
						//templete loop                
						int e = 0;
						uchar* t = tprt;
						uchar* s = sptr + k;

						for (int n = templeteWindowSize; n--;)
						{
							for (int m = templeteWindowSize; m--;)
							{
								// computing color L2 norm                      
								e += (*s - *t)*(*s - *t);
								s++, t++;
							}
							t += cstep;
							s += cstep;
						}

						const int ediv = e*tdiv;
						ww[count--] = ediv;
						//get weighted Euclidean distance              
						tweight += w[ediv];
					}
				}

				//weight normalization          
				if (tweight == 0.0)
				{
					for (int z = 0; z < D; z++) nw[z] = 0;
					nw[H] = 1;
				}
				else
				{
					double itweight = 1.0 / (double)tweight;
					for (int z = 0; z < D; z++) nw[z] = w[ww[z]] * itweight;
				}

				double v = 0.0;
				uchar* s = im.ptr(j + tr); s += (tr + i);

				for (int l = searchWindowSize, count = 0; l--;)
				{
					for (int k = searchWindowSize; k--;)
					{
						v += *(s++)*nw[count++];
					}
					s += csstep;
				}

				*(d++) = cv::saturate_cast<uchar>(v);
			}  
			delete[] ww;
			delete[] nw;
		}
	}

	//表示
	cv::namedWindow("ノンローカルミーンフィルタ画像", cv::WINDOW_AUTOSIZE);
	cv::imshow("ノンローカルミーンフィルタ画像", dest);
}
コード例 #8
0
ファイル: item.cpp プロジェクト: AkrionXxarr/Cataclysm-DDA
bool item::is_two_handed(player *u)
{
  if (is_gun() && (dynamic_cast<it_gun*>(type))->skill_used != Skill::skill("pistol"))
    return true;
  return (weight() > u->str_cur * 4);
}
コード例 #9
0
ファイル: effects_renderer.hpp プロジェクト: Greentwip/Windy
			void render(core_window_t * wd, bool forced, const rectangle* update_area = nullptr)
			{
				bool copy_separately = true;
				std::vector<std::pair<rectangle, core_window_t*>>	rd_set;

				if (wd->root_widget->other.attribute.root->effects_edge_nimbus.size())
				{
					auto root_wd = wd->root_widget;

					auto & nimbus = root_wd->other.attribute.root->effects_edge_nimbus;

					auto focused = root_wd->other.attribute.root->focus;

					const unsigned pixels = weight();

					auto graph = root_wd->root_graph;

					nana::rectangle r;
					for(auto & action : nimbus)
					{
						if(_m_edge_nimbus(focused, action.window) && window_layer::read_visual_rectangle(action.window, r))
						{
							if (action.window == wd)
							{
								if (update_area)
									::nana::overlap(*update_area, rectangle(r), r);
								copy_separately = false;
							}

							//Avoiding duplicated rendering. If the window is declared to lazy refresh, it should be rendered.
							if ((forced && (action.window == wd)) || (focused == action.window) || !action.rendered || (action.window->other.upd_state == core_window_t::update_state::refresh))
							{
								rd_set.emplace_back(r, action.window);
								action.rendered = true;
							}
						}
						else if(action.rendered)
						{
							action.rendered = false;

							if (action.window == wd)
								copy_separately = false;

							::nana::rectangle erase_r(
									action.window->pos_root.x - static_cast<int>(pixels),
									action.window->pos_root.y - static_cast<int>(pixels),
									static_cast<unsigned>(action.window->dimension.width + (pixels << 1)),
									static_cast<unsigned>(action.window->dimension.height + (pixels << 1))
								);

							graph->paste(root_wd->root, erase_r, erase_r.x, erase_r.y);
						}
					}
				}

				if (copy_separately)
				{
					rectangle vr;
					if (window_layer::read_visual_rectangle(wd, vr))
					{
						if (update_area)
							::nana::overlap(*update_area, rectangle(vr), vr);
						wd->root_graph->paste(wd->root, vr, vr.x, vr.y);
					}
				}

				//Render
				for (auto & rd : rd_set)
					_m_render_edge_nimbus(rd.second, rd.first);
			}
コード例 #10
0
ファイル: point_set.hpp プロジェクト: SimonEbner/fl
 /**
  * Sets a given weight of a point at position i
  *
  * \param i         Index of point
  * \param w         Point weights. The weights determinaing the first two
  *                  moments are the same
  *
  * \throws OutOfBoundsException
  * \throws ZeroDimensionException
  */
 void weight(int i, double w)
 {
     weight(i, Weight{w, w});
 }
コード例 #11
0
ファイル: point_set.hpp プロジェクト: SimonEbner/fl
 /**
  * Sets given weights of a point at position i
  *
  * \param i         Index of point
  * \param w_mean    point weight used to compute the first moment
  * \param w_cov     point weight used to compute the second centered moment
  *
  * \throws OutOfBoundsException
  * \throws ZeroDimensionException
  */
 void weight(int i, double w_mean , double w_cov)
 {
     weight(i, Weight{w_mean, w_cov});
 }
コード例 #12
0
ファイル: HWUeigen.cpp プロジェクト: changshuaiwei/gsu
//calculate kinship matrix: weighted IBS(average correlation)
//negative value is possible
void egHWU::weightFromGenomeWIBS()
{
	//initialize tmp weight
	vector<float> tmp2, tmp(_n_sub,0);
	vector< vector<float> > weight(_n_sub,tmp);

	vector<int> mis_idx;
	float tmpgeno=0;
	float tmp_ave=0;
	float tmp_var=0;

	float count_snp=0;

	//cout<<"Relatedness From Whole Genome..\n";
	gfun::printLOG("Relatedness From Whole Genome..\n");

	vector<int> idx;
	double dtotal=_n_snp;

	for(int i=0; i<par::IBS_N; i++){
		int indx=(int)(Stat_fuc::ran1(par::seed)*(dtotal));
		while (indx<0 || indx>(_n_snp-1)) indx=(int)(Stat_fuc::ran1(par::seed)*(dtotal));
		idx.push_back(indx);
	}

	int totalSNP=0;
	if(par::IBS_N>0) totalSNP=par::IBS_N;
	else totalSNP=_n_snp;

	int i=0;
	for(int i_=0; i_<totalSNP; i_++){

		if (par::IBS_N>0) i=idx[i_];
		else i=i_;

		cout<<"include "<<i_+1<<"th SNP..";

		mis_idx.clear();
		tmp_ave=0;
		//tmp_var=0;
		for(int j=0; j<_n_sub; j++){
			tmpgeno=float(_datafile->genotypeToInt(j,i));
			if(tmpgeno<0){
				mis_idx.push_back(j);
			}else{
				tmp[j]=tmpgeno;
				tmp_ave+=tmpgeno;
				//tmp_var+=tmpgeno*tmpgeno;
			}
		}
		tmp_ave/=float(_n_sub-mis_idx.size());
		//tmp_var/=float(_n_sub-mis_idx.size());

		tmp_var=tmp_ave*(2.0-tmp_ave);
		//tmp_var-=tmp_ave*tmp_ave;
		tmp_var=sqrt(tmp_var);

		if(tmp_var==0) {
			cout<<"\r";
			cout.flush();
			continue;
		}

		count_snp+=1.0;

		for(int j=0; j<mis_idx.size(); j++){
			tmp[mis_idx[j]]=tmp_ave;
		}

		for(int j=0; j<_n_sub; j++){
			tmp[j]-=tmp_ave;
			tmp[j]/=tmp_var;
		}

		//now tmp is standardized, we can make outer product and sum to weight

		for(int j=0; j<weight.size(); j++){
			for(int k=j; k<weight[j].size(); k++){
				weight[j][k]+=(tmp[j]*tmp[k]);
				weight[k][j]=weight[j][k];
			}
		}

		cout<<"\r";
		cout.flush();

	}

	//average now
	vector<double> tmpd(_n_sub,0);
	vector< vector<double> > weightd(_n_sub,tmpd);

	for(int i=0; i<_n_sub; i++){
		for(int j=i; j<_n_sub; j++){
			weightd[i][j]=weight[i][j]/count_snp;
			weightd[j][i]=weightd[i][j];
		}
	}

	//trans to correlation (-1,+1), and then distance (0,1)
	vector<double> var_vec;
	for(int i=0; i<_n_sub; i++){
		var_vec.push_back(sqrt(weightd[i][i]));
	}
	for(int i=0; i<_n_sub; i++){
		weightd[i][i]=1.0;
		for(int j=i+1; j<_n_sub; j++){
			double tmpcov=var_vec[i]*var_vec[j];
			if(tmpcov>0){
				weightd[i][j]=(weightd[i][j]/tmpcov)*0.5 +0.5;
			}else{
				weightd[i][j]=0;
				//gfun::error("error in calculating distance matrix\n");
			}
			weightd[j][i]=weightd[i][j];
		}
	}


	//assign to _weight
	_Kappa.resize(_n_sub,_n_sub);
	for(int i=0; i<weightd.size(); i++){
		for(int j=0; j<_n_sub; j++){
			_Kappa(i,j)=weightd[i][j];
		}
	}

	_weight_flag=true;


	//cout<<"\nRelateness Matrix Done.\n\n";
	gfun::printLOG("\nRelateness Matrix Done.\n\n");

}
コード例 #13
0
ファイル: HWUeigen.cpp プロジェクト: changshuaiwei/gsu
void egHWU::weightFastGenome()
{
	vector<float> tmp(_n_sub,0);
	vector< vector<float> > weight(_n_sub,tmp);

	vector<int> tmp2(_n_sub,0);
	vector<bool> flag_9(_n_sub,0), flag_first(_n_sub,0), flag_second(_n_sub,0);
	int tmpgeno=0;
	const float one=1;

	//cout<<"Relatedness From Whole Genome..\n";
	gfun::printLOG("Relatedness From Whole Genome..\n");

	vector<int> idx;
	double dtotal=_n_snp;

	for(int i=0; i<par::IBS_N; i++){
		int indx=(int)(Stat_fuc::ran1(par::seed)*(dtotal));
		while (indx<0 || indx>(_n_snp-1)) indx=(int)(Stat_fuc::ran1(par::seed)*(dtotal));
		idx.push_back(indx);
	}

	int totalSNP=0;
	if(par::IBS_N>0) totalSNP=par::IBS_N;
	else totalSNP=_n_snp;

	int i;
	for(int i_=0; i_<totalSNP; i_++){

		if (par::IBS_N>0) i=idx[i_];
		else i=i_;

		cout<<"include "<<i_+1<<"th SNP..";

		
		for(int j=0; j<_n_sub; j++){
			tmp2[j]=_datafile->allel1ToInt(j,i);
		}

		for(int j=0; j<weight.size(); j++){
			for(int k=j; k<weight[j].size(); k++){
				if(tmp2[j]!=tmp2[k]) weight[j][k]+=one;
			}
		}

		for(int j=0; j<_n_sub; j++){
			tmp2[j]=_datafile->allel2ToInt(j,i);
		}

		for(int j=0; j<weight.size(); j++){
			for(int k=j; k<weight[j].size(); k++){
				if(tmp2[j]!=tmp2[k]) weight[j][k]+=one;
			}
		}
		
		/*
		for(int j=0; j<_n_sub; j++){
			tmpgeno=_datafile->genotypeToInt(j,i);
			if(tmpgeno==-9) flag_9[j]=true;
			else{
				flag_9[j]=false;
				if(tmpgeno<2) flag_first[j]=true; else flag_first[j]=false;
				if(tmpgeno>0) flag_second[j]=true; else flag_second[j]=false;
			}
		}

		for(int j=0; j<_n_sub; j++){
			if(!flag_9[j]){
				for(int k=j; k<_n_sub; k++){
					if(!flag_9[k]){
						float tmpadd=short((flag_first[j]!=flag_first[k]))+ short(flag_second[j]!=flag_second[k]);
						weight[j][k]+=tmpadd;
					}
					
				}
			}
			
		}
		*/

		cout<<"\r";
		cout.flush();

	}

	//average now
	vector<double> tmpd(_n_sub,0);
	vector< vector<double> > weightd(_n_sub,tmpd);

	for(int i=0; i<_n_sub; i++){
		for(int j=i; j<_n_sub; j++){
			weightd[i][j]=double(weight[i][j])/double(totalSNP)/2;
			weightd[i][j]=1-weightd[i][j];
			weightd[j][i]=weightd[i][j];
		}
	}




	//assign to _weight
	_Kappa.resize(_n_sub,_n_sub);
	for(int i=0; i<weightd.size(); i++){
		for(int j=0; j<_n_sub; j++){
			_Kappa(i,j)=weightd[i][j];
		}
	}

	_weight_flag=true;

	//cout<<"\nRelateness Matrix Done.\n\n";
	gfun::printLOG("\nRelateness Matrix Done.\n\n");
}
コード例 #14
0
	void baggage_counter ()
	{
		int run = 1;			// flag to check the pass through this function.
//		kill (getpid(), SIGSTOP);
		
		pid_t lug_sec, wg, bd_ps;
		label_run:
		
//			fflush(stdout);
//			printf ("~~~asdf~~~");
		
			if (run == 1)
			{	
				lug_sec = fork();	
		
				if (!lug_sec)
					luggage_security ();		// initiate luggage counter
				else
				{
					usleep (100);
					wg = fork();		
					if (!wg)
						weight (lug_sec);	// initiate weight counter
					else
					{
						usleep (100);
		//				waitid (P_PID, wg, NULL, WSTOPPED); 
						bd_ps = fork();
					        if (!bd_ps)
							boarding_pass (wg);	// initiate the boarding pass.
						else
						{
							run++;			// inc run flag.
							usleep (100);
							kill (getpid(), SIGSTOP);					
					
							FILE *fp = fopen ("pointer.txt", "r");	
							passenger *temp;
							fscanf (fp, "%p", &temp);	// get the address of the current passenger who has completed baggage counter.
							fclose (fp);
							FILE *ptr = fopen ("pointer2.txt","w");
							fprintf (ptr, "%p", temp); // place this address in the pointer2 file to be accessed by the immigration.
							fflush (ptr);
							fclose (ptr);
					
		//					kill (immigration, SIGCONT);
	
	//						usleep (1000);
							kill (getppid(), SIGCONT);
							kill (getpid(), SIGSTOP);
						
							goto label_run;
						}
					}
				}
			}
			else
			{
				usleep (100);
				kill (lug_sec, SIGCONT);
				usleep (100);
				kill (wg, SIGCONT);
				usleep (100);
				kill (bd_ps, SIGCONT);
				
//				usleep (100);
				kill (getpid(), SIGSTOP);
				
				FILE *fp = fopen ("pointer.txt", "r");
				passenger *temp;
				fscanf (fp, "%p", &temp);
				fclose (fp);
				FILE *ptr = fopen ("pointer2.txt","w");
				fprintf (ptr, "%p", temp);
				fflush (ptr);
				fclose (ptr);
//				kill (immigration, SIGCONT);
				
				kill (getppid(), SIGCONT);
				kill (getpid(), SIGSTOP);
				goto label_run;
			}		
	}
コード例 #15
0
bool TQualityMetric::evaluate_with_gradient( PatchData& pd,
                                             size_t handle,
                                             double& value,
                                             std::vector<size_t>& indices,
                                             std::vector<Vector3D>& grad,
                                             MsqError& err )
{
  const Sample s = ElemSampleQM::sample( handle );
  const size_t e = ElemSampleQM::  elem( handle );
  MsqMeshEntity& elem = pd.element_by_index( e );
  EntityTopology type = elem.get_element_type();
  unsigned edim = TopologyInfo::dimension( type );
  size_t num_idx = 0;
  const NodeSet bits = pd.non_slave_node_set( e );
  
  bool rval;
  if (edim == 3) { // 3x3 or 3x2 targets ?
    const MappingFunction3D* mf = pd.get_mapping_function_3D( type );
    if (!mf) {
      MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT );
      return false;
    }

    MsqMatrix<3,3> A, W, dmdT;
    mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err );
    MSQ_ERRZERO(err);
    targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err);
    const MsqMatrix<3,3> Winv = inverse(W);
    const MsqMatrix<3,3> T = A*Winv;
    rval = targetMetric->evaluate_with_grad( T, value, dmdT, err );
    MSQ_ERRZERO(err);
    gradient<3>( num_idx, mDerivs3D, dmdT * transpose(Winv), grad );
#ifdef PRINT_INFO
    print_info<3>( e, s, A, W, A * inverse(W) );
#endif
  }
  else if (edim == 2) {
    MsqMatrix<2,2> W, A, dmdT;
    MsqMatrix<3,2> S_a_transpose_Theta;
    rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx,
                             mDerivs2D, W, A, S_a_transpose_Theta, err ); 
    if (MSQ_CHKERR(err) || !rval)
      return false;
    const MsqMatrix<2,2> Winv = inverse(W);
    const MsqMatrix<2,2> T = A*Winv;
    rval = targetMetric->evaluate_with_grad( T, value, dmdT, err );
    MSQ_ERRZERO(err);
    gradient<2>( num_idx, mDerivs2D, S_a_transpose_Theta*dmdT*transpose(Winv), grad );
#ifdef PRINT_INFO
    print_info<2>( e, s, J, Wp, A * inverse(W) );
#endif
  }
  else {
    assert(false);
    return false;
  }
  
    // pass back index list
  indices.resize( num_idx );
  std::copy( mIndices, mIndices+num_idx, indices.begin() );
  
    // apply target weight to value
  weight( pd, s, e, num_idx, value, grad.empty() ? 0 : arrptr(grad), 0, 0, err ); MSQ_ERRZERO(err);
  return rval;
}
コード例 #16
0
void InterpolateMechanicsSolutionToNewMesh(QuadraticMesh<3>& rCoarseMesh, std::vector<double>& rCoarseSolution,
                                           QuadraticMesh<3>& rFineMesh, std::vector<double>& rFineSolution,
                                           CompressibilityType compressibilityType)
{
    unsigned NUM_UNKNOWNS = (compressibilityType==INCOMPRESSIBLE ? DIM+1 : DIM);

    if(rCoarseSolution.size() != rCoarseMesh.GetNumNodes()*NUM_UNKNOWNS)
    {
        EXCEPTION("rCoarseSolution not correct size");
    }
    if(rFineSolution.size() != rFineMesh.GetNumNodes()*NUM_UNKNOWNS)
    {
        rFineSolution.resize(rFineMesh.GetNumNodes()*NUM_UNKNOWNS);
    }

    c_vector<double, (DIM+1)*(DIM+2)/2> quad_basis;

    for(unsigned i=0; i<rFineMesh.GetNumNodes(); i++)
    {
        // find containing elements and weights in coarse mesh
        ChastePoint<DIM> point = rFineMesh.GetNode(i)->GetPoint();
        unsigned coarse_element_index = rCoarseMesh.GetContainingElementIndex(point,
                                                                              false);
        Element<DIM,DIM>* p_coarse_element = rCoarseMesh.GetElement(coarse_element_index);
        c_vector<double,DIM+1> weight = p_coarse_element->CalculateInterpolationWeights(point);

        c_vector<double,DIM> xi;
        xi(0) = weight(1);
        xi(1) = weight(2);
        if(DIM==3)
        {
            xi(2) = weight(3);
        }

        QuadraticBasisFunction<DIM>::ComputeBasisFunctions(xi, quad_basis);

        // interpolate (u,p) (don't do anything for p if compressible)
        c_vector<double,DIM+1> fine_solution = zero_vector<double>(DIM+1);

        for(unsigned elem_node_index=0; elem_node_index<(DIM+1)*(DIM+2)/2; elem_node_index++)
        {
            unsigned coarse_node = p_coarse_element->GetNodeGlobalIndex(elem_node_index);
            c_vector<double,DIM+1> coarse_solution_at_node;

            for(unsigned j=0; j<DIM; j++)
            {
                coarse_solution_at_node(j) = rCoarseSolution[NUM_UNKNOWNS*coarse_node + j];
            }
            if(compressibilityType==INCOMPRESSIBLE)
            {
                coarse_solution_at_node(DIM) = rCoarseSolution[NUM_UNKNOWNS*coarse_node + DIM];
            }

            fine_solution += coarse_solution_at_node*quad_basis(elem_node_index);
        }

        for(unsigned j=0; j<DIM; j++)
        {
            rFineSolution[NUM_UNKNOWNS*i + j] = fine_solution(j);
        }

        if(compressibilityType==INCOMPRESSIBLE)
        {
            rFineSolution[NUM_UNKNOWNS*i + DIM] = fine_solution(DIM);

            // Whilst the returned p from a solve is defined properly at all nodes, during the solve linear basis functions are
            // used for p and therefore p not computed explicitly at internal nodes, and the solver solves for p=0 at these internal
            // nodes. (After the solve, p is interpolated from vertices to internal nodes)
            if(rFineMesh.GetNode(i)->IsInternal())
            {
                rFineSolution[NUM_UNKNOWNS*i + DIM] = 0.0;
            }
        }
    }
}
コード例 #17
0
bool TQualityMetric::evaluate_with_Hessian( PatchData& pd,
                                            size_t handle,
                                            double& value,
                                            std::vector<size_t>& indices,
                                            std::vector<Vector3D>& grad,
                                            std::vector<Matrix3D>& Hessian,
                                            MsqError& err )
{
  const Sample s = ElemSampleQM::sample( handle );
  const size_t e = ElemSampleQM::  elem( handle );
  MsqMeshEntity& elem = pd.element_by_index( e );
  EntityTopology type = elem.get_element_type();
  unsigned edim = TopologyInfo::dimension( type );
  size_t num_idx = 0;
  const NodeSet bits = pd.non_slave_node_set( e );
  
  bool rval;
  if (edim == 3) { // 3x3 or 3x2 targets ?
    const MappingFunction3D* mf = pd.get_mapping_function_3D( type );
    if (!mf) {
      MSQ_SETERR(err)( "No mapping function for element type", MsqError::UNSUPPORTED_ELEMENT );
      return false;
    }

    MsqMatrix<3,3> A, W, dmdT, d2mdT2[6];
    mf->jacobian( pd, e, bits, s, mIndices, mDerivs3D, num_idx, A, err );
    MSQ_ERRZERO(err);
    targetCalc->get_3D_target( pd, e, s, W, err ); MSQ_ERRZERO(err);
    const MsqMatrix<3,3> Winv = inverse(W);
    const MsqMatrix<3,3> T = A*Winv;
    rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err );
    MSQ_ERRZERO(err);
    gradient<3>( num_idx, mDerivs3D, dmdT*transpose(Winv), grad );
    second_deriv_wrt_product_factor( d2mdT2, Winv );
    Hessian.resize( num_idx*(num_idx+1)/2 );
    if (num_idx)
      hessian<3>( num_idx, mDerivs3D, d2mdT2, arrptr(Hessian) );
    
#ifdef PRINT_INFO
    print_info<3>( e, s, A, W, A * inverse(W) );
#endif
  }
  else if (edim == 2) {
#ifdef NUMERICAL_2D_HESSIAN
    // return finite difference approximation for now

    return QualityMetric::evaluate_with_Hessian( pd, handle,
                                           value, indices, grad, Hessian,
                                           err );
#else
    MsqMatrix<2,2> W, A, dmdT, d2mdT2[3];
    MsqMatrix<3,2> M;
    rval = evaluate_surface_common( pd, s, e, bits, mIndices, num_idx,
                             mDerivs2D, W, A, M, err ); 
    if (MSQ_CHKERR(err) || !rval)
      return false;
    const MsqMatrix<2,2> Winv = inverse(W);
    const MsqMatrix<2,2> T = A*Winv;
    rval = targetMetric->evaluate_with_hess( T, value, dmdT, d2mdT2, err );
    MSQ_ERRZERO(err);
    gradient<2>( num_idx, mDerivs2D, M * dmdT * transpose(Winv), grad );
      // calculate 2D hessian
    second_deriv_wrt_product_factor( d2mdT2, Winv );
    const size_t n = num_idx*(num_idx+1)/2;
    hess2d.resize(n);
    if (n)
      hessian<2>( num_idx, mDerivs2D, d2mdT2, arrptr(hess2d) );
      // calculate surface hessian as transform of 2D hessian
    Hessian.resize(n);
    for (size_t i = 0; i < n; ++i)
      Hessian[i] = Matrix3D( (M * hess2d[i] * transpose(M)).data() );
#ifdef PRINT_INFO
    print_info<2>( e, s, J, Wp, A * inverse(W) );
#endif
#endif
  }
  else {
    assert(0);
    return false;
  }
  
    // pass back index list
  indices.resize( num_idx );
  std::copy( mIndices, mIndices+num_idx, indices.begin() );
  
    // apply target weight to value
  if (!num_idx) 
    weight( pd, s, e, num_idx, value, 0, 0, 0, err );
  else
    weight( pd, s, e, num_idx, value, arrptr(grad), 0, arrptr(Hessian), err ); 
  MSQ_ERRZERO(err);
  return rval;
}
コード例 #18
0
ファイル: HDR.cpp プロジェクト: brunopop/hdr
	void HDR::gsolve(std::vector<std::vector<uchar>>& z, std::vector<float>& g, std::vector<float>& lE)
	{
		if (z.size() != P || z[0].size() != N)
		{
			std::stringstream ss;
			ss << "Error in HDR::gsolve(): input matrix must be (NxP).\n\tz is of size " << z.size() << " but P=" << P << "\n\tz[0] is of size " << z[0].size() << " but N=" << N;
			throw std::exception(ss.str().c_str());
		}

		int n = 256;
		//lapack_int nlines = N*P+n+1, ncols = n+N;
		int nlins = N*P+n+1, ncols = n+N;

		double* b = new double[nlins];
		double** A = new double*[nlins];
		for (int k=0; k<nlins; k++)
		{
			b[k] = 0;
			A[k] = new double[ncols];
			for (int l=0; l<ncols; l++)
			{
				A[k][l] = 0;
			}
		}

		// Include the data-fitting equations
		int k = 0;
		// For all pixel locations
		for (int i=0; i<N; i++)
		{
			// For all images
			for (int j=0; j<P; j++)
			{
				int wij = weight(z[j][i] + 1);
				A[k][z[j][i]+1] = wij;
				A[k][n+i] = -wij;
				b[k] = wij * B[j];
				k++;
			}
		}

		// Fix the curve by setting its middle value to 0
		A[k][128] = 1;
		k++;

		// Include the smoothness equations
		for (int i=0; i<n-2; i++)
		{
			A[k][i] = lambda * weight(i+1);
			A[k][i+1] = -2.0*lambda*weight(i+1);
			A[k][i+2] = lambda * weight(i+1);
			k++;
		}

		// Reshape matrix (in row order) so that it can be used with lapack
		double* system = new double[nlins*ncols];
		for (int k=0; k<nlins*ncols; k++)
		{
			int i = k / ncols;
			int j = k % ncols;
			system[k] = A[i][j];
		}

		// Solve the system using SVD
		int sz = std::min(nlins, ncols);
		double* s = new double[sz];
		lapack_int rank;
		LAPACKE_dgelsd(LAPACK_ROW_MAJOR, nlins, ncols, 1, system, ncols, b, 1, s, -1.0, &rank);

		// Log exposure for pixel values 0 through 255
		g.resize(n, 0.0);
		for (int i=0; i<n; i++)
		{
			g[i] = (float) b[i];
		}

		// Log film irradiance for every sample pixel
		lE.resize(nlins-n, 0.0);
#ifdef DEBUG
		std::cout << "N=" << N << "\nP=" << P << "\nsize of lE=" << nlins-n << std::endl;
#endif
		for (int i=0; i<nlins-n; i++)
		{
			lE[i] = (float) b[n+i];
		}
	};
コード例 #19
0
	float operator()(
		const float lateness
	) const {
		return (lateness < 0.0f) ? weight() : -weight();
	}
コード例 #20
0
ファイル: HDR.cpp プロジェクト: brunopop/hdr
	void HDR::radianceMap(Image& radianceMap)
	{
		if (!responseFunctionCalculated)
		{
			throw std::exception("Error in HDR::radianceMap(): no response function available to compute the radiance map. Call HDR::responseFunction() first.");
		}

#ifdef DEBUG
		std::cout << "B, vector of log exposure times, has size " << B.size() << std::endl;
#endif

		// Radiance map is floating point values on 3 channels (we use double precision)
		radianceMap = Image(height, width, CV_32FC3);

		// Construct the map from the log irradiance for each channel
		// log(E) = w(z) ( log g(Z) - log(Delta t) )/sum of w(z)
		// For all pixels
		for (int i=0; i<height; i++)
		{
			// Pointer to the corresponding row of the output image
			float* e = radianceMap.ptr<float>(i);
			for (int j=0; j<width; j++)
			{
				float lnEBlue = 0, lnEGreen = 0, lnERed = 0;
				float sumBlue = 0, sumGreen = 0, sumRed = 0;
				// For all exposures: compute the weighted sum
				for (int k=0; k<P; k++)
				{
					// Pointer to the corresponding row in image k
					uchar* p = (*images)[k].ptr<uchar>(i);
					// Blue
					int ZikBlue = (int) p[3*j + 0];
					lnEBlue += weight(ZikBlue) * (gBlue[ZikBlue] - B[k]);
					sumBlue += weight(ZikBlue);
					// Green
					int ZikGreen = (int) p[3*j + 1];
					lnEGreen += weight(ZikGreen) * (gGreen[ZikGreen] - B[k]);
					sumGreen += weight(ZikGreen);
					// Red
					int ZikRed = (int) p[3*j + 2];
					lnERed += weight(ZikRed) * (gRed[ZikRed] - B[k]);
					sumRed += weight(ZikRed);
				}
				try
				{
					// Blue
					e[3*j + 0] = exp(lnEBlue/sumBlue);
					// Green
					e[3*j + 1] = exp(lnEGreen/sumGreen);
					// Red
					e[3*j + 2] = exp(lnERed/sumRed);
				}
				catch (std::exception& e)
				{
					std::cout << e.what() << std::endl;
					std::cout << "\ti=" << i << " and j=" << j << " but E has " << radianceMap.rows << " rows, " << radianceMap.cols << " cols and " << radianceMap.channels() << " channels." << std::endl;
					std::cout << "\tlnEBlue=" << lnEBlue << " and sumBlue=" << sumBlue << std::endl;
					std::cout << "\tlnEGreen=" << lnEGreen << " and sumGreen=" << sumGreen << std::endl;
					std::cout << "\tlnERed=" << lnERed << " and sumRed=" << sumRed << std::endl;
				}
			}
		}

		// Set sentinel variable to true
		radianceMapCalculated = true;
	};
コード例 #21
0
ファイル: hack.mkobj.c プロジェクト: jyin0813/OpenBSD-src
struct obj *
mksobj(int otyp)
{
	struct obj *otmp;
	char let = objects[otyp].oc_olet;

	otmp = newobj(0);
	*otmp = zeroobj;
	otmp->age = moves;
	otmp->o_id = flags.ident++;
	otmp->quan = 1;
	otmp->olet = let;
	otmp->otyp = otyp;
	otmp->dknown = strchr("/=!?*", let) ? 0 : 1;
	switch(let) {
	case WEAPON_SYM:
		otmp->quan = (otmp->otyp <= ROCK) ? rn1(6,6) : 1;
		if(!rn2(11)) otmp->spe = rnd(3);
		else if(!rn2(10)) {
			otmp->cursed = 1;
			otmp->spe = -rnd(3);
		}
		break;
	case FOOD_SYM:
		if(otmp->otyp >= CORPSE) break;
#ifdef NOT_YET_IMPLEMENTED
		/* if tins are to be identified, need to adapt doname() etc */
		if(otmp->otyp == TIN)
			otmp->spe = rnd(...);
#endif /* NOT_YET_IMPLEMENTED */
		/* fall into next case */
	case GEM_SYM:
		otmp->quan = rn2(6) ? 1 : 2;
	case TOOL_SYM:
	case CHAIN_SYM:
	case BALL_SYM:
	case ROCK_SYM:
	case POTION_SYM:
	case SCROLL_SYM:
	case AMULET_SYM:
		break;
	case ARMOR_SYM:
		if(!rn2(8)) otmp->cursed = 1;
		if(!rn2(10)) otmp->spe = rnd(3);
		else if(!rn2(9)) {
			otmp->spe = -rnd(3);
			otmp->cursed = 1;
		}
		break;
	case WAND_SYM:
		if(otmp->otyp == WAN_WISHING) otmp->spe = 3; else
		otmp->spe = rn1(5,
			(objects[otmp->otyp].bits & NODIR) ? 11 : 4);
		break;
	case RING_SYM:
		if(objects[otmp->otyp].bits & SPEC) {
			if(!rn2(3)) {
				otmp->cursed = 1;
				otmp->spe = -rnd(2);
			} else otmp->spe = rnd(2);
		} else if(otmp->otyp == RIN_TELEPORTATION ||
			  otmp->otyp == RIN_AGGRAVATE_MONSTER ||
			  otmp->otyp == RIN_HUNGER || !rn2(9))
			otmp->cursed = 1;
		break;
	default:
		panic("impossible mkobj");
	}
	otmp->owt = weight(otmp);
	return(otmp);
}
コード例 #22
0
void FlowGraph::_invalidateEdge(Edge& e) {
    weight(e, INFINITE_WEIGHT);
    releaseAll(e);
    capacity(e, NO_CAPACITY);
}
コード例 #23
0
ファイル: item.cpp プロジェクト: RexWolf163ru/Cataclysm-DDA
bool item::is_two_handed(player *u)
{
  return (weight() > u->str_cur * 4);
}
コード例 #24
0
void FlowGraph::_invalidateArc (Arc& a) {
    weight(a, INFINITE_WEIGHT);
    releaseAll(a);
    capacity(a, NO_CAPACITY);
}
コード例 #25
0
ファイル: item.cpp プロジェクト: RexWolf163ru/Cataclysm-DDA
int item::attack_time()
{
 int ret = 65 + 4 * volume() + 2 * weight();
 return ret;
}
コード例 #26
0
void FlowGraph::addDemand(Demand& d) {
    if (!facilityNodesGenerated()) {
        throw std::logic_error("Before calling addDemand function, facilities must be generated (generateFacilityNodes must be called)!");
    }//if
    if (d.identifier() == INVALID_ID) {
        throw std::invalid_argument("Demand's id is not valid.");
    }//if
    
    Node src, trg, kariz;
    Edge srcEdge, trgEdge;
    Arc  srcArc1, trgArc1, srcArc2, trgArc2;
    
    src = this->nodeFromId(d.source());
    trg = this->nodeFromId(d.target());
    
    //Find a free kariz node to assign
    kariz = _demandKariz(d, srcEdge, trgEdge, INVALID_ID);
    
    if (kariz == INVALID) {
        //If no free kariz is found, add kariz and arcs
        kariz   = this->addNode();
        isKariz_[kariz] = true;
        // If src and trg are different nodes, we add two edges
        if (src != trg) {
            srcEdge = this->addEdge(src, kariz);
            trgEdge = this->addEdge(trg, kariz);
        }//if
        else {
            //If src and trg are same, we add one edge
            srcEdge = trgEdge = this->addEdge(src, kariz);
        }//else
    }//if
    
    //Arcs:
    srcArc1 = arcFromId(2 * id(srcEdge) + 1);
    srcArc2 = arcFromId(2 * id(srcEdge));
    if (id(source(srcArc1)) > id(target(srcArc1)))
        std::swap(srcArc1, srcArc2);
    
    trgArc1 = arcFromId(2 * id(trgEdge) + 1);
    trgArc2 = arcFromId(2 * id(trgEdge));
    if (id(source(trgArc1)) > id(target(trgArc1)))
        std::swap(trgArc1, trgArc2);
    
    //set demand d's kariz id
    d.kariz(id(kariz));
//    supply_  [kariz] = KARIZ_DEMAND_VAL;
    demandId_[kariz] = d.identifier();
    
    if (src != trg) {
        //The capacity of these arcs are set to a single unit of bandwidth.
        capacity(srcArc1, BANDWIDTH_UNIT);
        capacity(trgArc1, BANDWIDTH_UNIT);
    }//if
    else {
        //The capacity of these arcs are set to two unit of bandwidth.
        capacity(srcArc1, 2 * BANDWIDTH_UNIT);
    }//else
    
    // The capacity of oposite direction is set to zero
    //to make sure no flow initiate from a kariz
    capacity(srcArc2, NO_CAPACITY);
    capacity(trgArc2, NO_CAPACITY);
    
    //The weights of these arcs are set to 0 to add no extra cost
    weight(srcArc1, NO_WEIGHT);
    weight(trgArc1, NO_WEIGHT);
    weight(srcArc2, INFINITE_WEIGHT);
    weight(trgArc2, INFINITE_WEIGHT);
    
    capacity(srcEdge, BANDWIDTH_UNIT);
    capacity(trgEdge, BANDWIDTH_UNIT);
    weight(srcEdge, NO_WEIGHT);
    weight(trgEdge, NO_WEIGHT);
}
コード例 #27
0
ファイル: bisect.c プロジェクト: 4rch17/git
/*
 * zero or positive weight is the number of interesting commits it can
 * reach, including itself.  Especially, weight = 0 means it does not
 * reach any tree-changing commits (e.g. just above uninteresting one
 * but traversal is with pathspec).
 *
 * weight = -1 means it has one parent and its distance is yet to
 * be computed.
 *
 * weight = -2 means it has more than one parent and its distance is
 * unknown.  After running count_distance() first, they will get zero
 * or positive distance.
 */
static struct commit_list *do_find_bisection(struct commit_list *list,
					     int nr, int *weights,
					     int find_all)
{
	int n, counted;
	struct commit_list *p;

	counted = 0;

	for (n = 0, p = list; p; p = p->next) {
		struct commit *commit = p->item;
		unsigned flags = commit->object.flags;

		p->item->util = &weights[n++];
		switch (count_interesting_parents(commit)) {
		case 0:
			if (!(flags & TREESAME)) {
				weight_set(p, 1);
				counted++;
				show_list("bisection 2 count one",
					  counted, nr, list);
			}
			/*
			 * otherwise, it is known not to reach any
			 * tree-changing commit and gets weight 0.
			 */
			break;
		case 1:
			weight_set(p, -1);
			break;
		default:
			weight_set(p, -2);
			break;
		}
	}

	show_list("bisection 2 initialize", counted, nr, list);

	/*
	 * If you have only one parent in the resulting set
	 * then you can reach one commit more than that parent
	 * can reach.  So we do not have to run the expensive
	 * count_distance() for single strand of pearls.
	 *
	 * However, if you have more than one parents, you cannot
	 * just add their distance and one for yourself, since
	 * they usually reach the same ancestor and you would
	 * end up counting them twice that way.
	 *
	 * So we will first count distance of merges the usual
	 * way, and then fill the blanks using cheaper algorithm.
	 */
	for (p = list; p; p = p->next) {
		if (p->item->object.flags & UNINTERESTING)
			continue;
		if (weight(p) != -2)
			continue;
		weight_set(p, count_distance(p));
		clear_distance(list);

		/* Does it happen to be at exactly half-way? */
		if (!find_all && halfway(p, nr))
			return p;
		counted++;
	}

	show_list("bisection 2 count_distance", counted, nr, list);

	while (counted < nr) {
		for (p = list; p; p = p->next) {
			struct commit_list *q;
			unsigned flags = p->item->object.flags;

			if (0 <= weight(p))
				continue;
			for (q = p->item->parents; q; q = q->next) {
				if (q->item->object.flags & UNINTERESTING)
					continue;
				if (0 <= weight(q))
					break;
			}
			if (!q)
				continue;

			/*
			 * weight for p is unknown but q is known.
			 * add one for p itself if p is to be counted,
			 * otherwise inherit it from q directly.
			 */
			if (!(flags & TREESAME)) {
				weight_set(p, weight(q)+1);
				counted++;
				show_list("bisection 2 count one",
					  counted, nr, list);
			}
			else
				weight_set(p, weight(q));

			/* Does it happen to be at exactly half-way? */
			if (!find_all && halfway(p, nr))
				return p;
		}
	}

	show_list("bisection 2 counted all", counted, nr, list);

	if (!find_all)
		return best_bisection(list, nr);
	else
		return best_bisection_sorted(list, nr);
}
コード例 #28
0
ファイル: PathTracing.cpp プロジェクト: cslroot/nico
    glm::vec3
    PathTracing::Radiance(const Ray& ray, Random *rnd, int depth)
    {
        Intersection isect;
        bool hit = this->scene_->Intersect(ray, &isect);
        if (!hit) {
            return BACKGROUND_COLOR;
        }

        
        const Primitive* p = isect.hitPrimitive_;
        const dvec3 orienting_normal = dot(isect.normal_, ray.direction_) < 0.0 ? isect.normal_ : -isect.normal_;
        
        // russian roulette
        // TODO: 確率しきい値は,色の反射率にする
        float russian_roulette_prob = glm::max(p->material_.color_.r, p->material_.color_.g, p->material_.color_.b);
        
        if (depth > MAX_DEPTH) {
            russian_roulette_prob *= pow(0.5, depth - MAX_DEPTH);
        }
        
        if (depth > MIN_DEPTH) {
            if (rnd->next() >= russian_roulette_prob) {
                return p->material_.emission_;
            }
        }
        else {
            russian_roulette_prob = 1.0f;    // 強制的にMIN_DEPTH回は実行
        }
        
        
        vec3 incoming_radiance;
        vec3 weight(1.0);
        
        
        switch (p->material_.reftype_) {
            case REFLECTION_TYPE_DIFFUSE:
            {
                // orienting_normalの方向を基準とした正規直交基底(w, u, v)を作る。この基底に対する半球内で次のレイを飛ばす。
                glm::dvec3 w, u, v;
                w = orienting_normal;
                if (fabs(w.x) > Constants::EPS) // ベクトルwと直交するベクトルを作る。w.xが0に近い場合とそうでない場合とで使うベクトルを変える。
                    u = normalize(cross(dvec3(0.0, 1.0, 0.0), w));
                else
                    u = normalize(cross(dvec3(1.0, 0.0, 0.0), w));
                
                v = cross(w, u);
                
                // コサイン項を使った重点的サンプリング
                const double r1 = 2 * M_PI * rnd->next();
                const double r2 = rnd->next(), r2s = sqrt(r2);
                dvec3 dir = normalize((
                                      u * cos(r1) * r2s +
                                      v * sin(r1) * r2s +
                                      w * sqrt(1.0 - r2)));
                
                incoming_radiance = Radiance(Ray(isect.position_, dir), rnd, depth+1);
                // レンダリング方程式に対するモンテカルロ積分を考えると、outgoing_radiance = weight * incoming_radiance。
                // ここで、weight = (ρ/π) * cosθ / pdf(ω) / R になる。
                // ρ/πは完全拡散面のBRDFでρは反射率、cosθはレンダリング方程式におけるコサイン項、pdf(ω)はサンプリング方向についての確率密度関数。
                // Rはロシアンルーレットの確率。
                // 今、コサイン項に比例した確率密度関数によるサンプリングを行っているため、pdf(ω) = cosθ/π
                // よって、weight = ρ/ R。
                weight = p->material_.color_ / (float)russian_roulette_prob;
            }
                break;
            case REFLECTION_TYPE_SPECULAR:
            {
                // 完全鏡面なのでレイの反射方向は決定的。
                // ロシアンルーレットの確率で除算するのは上と同じ。
                incoming_radiance = Radiance(Ray(isect.position_,
                                                 ray.direction_ - isect.normal_ * 2.0 * dot(isect.normal_, ray.direction_)),
                                             rnd, depth+1);
                weight = p->material_.color_ / russian_roulette_prob;
            }
                break;
            case REFLECTION_TYPE_REFRACTION:
            {
                const Ray reflection_ray = Ray(isect.position_, ray.direction_ - isect.normal_ * 2.0 * dot(isect.normal_, ray.direction_));
                const bool into = dot(isect.normal_, orienting_normal) > 0.0; // レイがオブジェクトから出るのか、入るのか
                
                // Snellの法則
                const double nc = 1.0; // 真空の屈折率
                const double nt = p->material_.Ior_; // オブジェクトの屈折率
                const double nnt = into ? nc / nt : nt / nc;
                const double ddn = dot(ray.direction_, orienting_normal);
                const double cos2t = 1.0 - nnt * nnt * (1.0 - ddn * ddn);
                
                if (cos2t < 0.0) { // 全反射
                    incoming_radiance = Radiance(reflection_ray, rnd, depth+1);
                    weight = p->material_.color_ / (float)russian_roulette_prob;
                    break;
                }
                // 屈折の方向
                const Ray refraction_ray = Ray(isect.position_,
                                               normalize(ray.direction_ * nnt - isect.normal_ * (into ? 1.0 : -1.0) * (ddn * nnt + sqrt(cos2t))));
                
                // SchlickによるFresnelの反射係数の近似を使う
                const double a = nt - nc, b = nt + nc;
                const double R0 = (a * a) / (b * b);
                
                const double c = 1.0 - (into ? -ddn : dot(refraction_ray.direction_, -1.0 * orienting_normal));
                const double Re = R0 + (1.0 - R0) * pow(c, 5.0); // 反射方向の光が反射してray.dirの方向に運ぶ割合。同時に屈折方向の光が反射する方向に運ぶ割合。
                const double nnt2 = pow(into ? nc / nt : nt / nc, 2.0); // レイの運ぶ放射輝度は屈折率の異なる物体間を移動するとき、屈折率の比の二乗の分だけ変化する。
                const double Tr = (1.0 - Re) * nnt2; // 屈折方向の光が屈折してray.dirの方向に運ぶ割合
                
                // 一定以上レイを追跡したら屈折と反射のどちらか一方を追跡する。(さもないと指数的にレイが増える)
                // ロシアンルーレットで決定する。
                const double probability  = 0.25 + 0.5 * Re;
                if (depth > 2) {
                    if (rnd->next() < probability) { // 反射
                        incoming_radiance = Radiance(reflection_ray, rnd, depth+1) * (float)Re;
                        weight = p->material_.color_ / (float)probability * russian_roulette_prob;
                    } else { // 屈折
                        incoming_radiance = Radiance(refraction_ray, rnd, depth+1) * (float)Tr;
                        weight = p->material_.color_ / (float)(1.0 - probability) * russian_roulette_prob;
                    }
                } else { // 屈折と反射の両方を追跡
                    incoming_radiance =
                        Radiance(reflection_ray, rnd, depth+1) * (float)Re +
                        Radiance(refraction_ray, rnd, depth+1) * (float)Tr;
                    weight = p->material_.color_ / (russian_roulette_prob);
                }
            }
                break;
            default:
                break;
        }
        
        return p->material_.emission_ + incoming_radiance * weight;
    }
コード例 #29
0
bool PartawareProvider::computation() {
	if (!this->m_model) {
		_GT_LOGGER_ERR(this->name(), "Subroutine <computation> failed. Missing model.");
		return (false);
	}
	const openmesh::Mesh &mesh(this->m_model->mesh());
	const algo::KdTree &kd_tree(this->m_model->kdTree());

	algo::Randomizer rand((uint32_t)std::time(NULL));

	// Prepares rays for Volumetric Shape Images.
	_GT_LOGGER_MSG(this->name(), "Preparing rays for Volumetric Shape Images.");
	std::vector<openmesh::Normal> vsi_rays(this->m_vsi_rays_count.data());
	for (int count = 0; count < this->m_vsi_rays_count.data(); ) {
		openmesh::Normal direct(
				2.0 * rand.nextReal() - 1.0,
				2.0 * rand.nextReal() - 1.0,
				2.0 * rand.nextReal() - 1.0);
		if (direct.sqrnorm() > 1) continue;
		vsi_rays[count++] = direct.normalize();
	}

	// Computes Volumetric Shape Images.
	_GT_LOGGER_MSG(this->name(), "Computing Volumetric Shape Images.");
	std::vector<eigen::Vector> vsis(mesh.n_faces());
	std::vector<bool> vsi_omit(mesh.n_faces(), false);
	int cmc_ray_max_trial = this->m_cmc_rays_count.data() * 50;
	real_t reject_angle = std::cos(this->m_cmc_rays_theta.data() * GT_RAD_PER_DGR);
	real_t inf_distance = std::max<real_t>(this->m_model->bound().radius() * this->m_model->bound().radius(), 1e8);
	_GT_LOGGER_BEG_PROG;
	for (size_t i = 0; i < mesh.n_faces(); ++i) vsis[i] = eigen::Vector::Zero(this->m_vsi_rays_count.data());
	for (size_t i = 0; i < mesh.n_faces(); ++i) {
		_GT_LOGGER_UPD_PROG(i, mesh.n_faces());

		// Computes Corresponding Medial Center (CMC) points.
		openmesh::Mesh::FaceHandle handle = mesh.face_handle(i);
		const openmesh::Normal &normal(mesh.normal(handle));
		const openmesh::Point &centroid(this->m_centroids[handle.idx()]);
		real_t min_dist = GT_REAL_MAX;
		if (algo::Util::zero(normal.norm())) continue;
		for (int count = 0, trial = 0; count < this->m_cmc_rays_count.data(); ) {
			if (!(trial < cmc_ray_max_trial)) {
				vsi_omit[i] = true;
				break;
			}
			openmesh::Normal direct(
					2.0 * rand.nextReal() - 1.0,
					2.0 * rand.nextReal() - 1.0,
					2.0 * rand.nextReal() - 1.0);
			if (direct.sqrnorm() > 1) continue;
			direct.normalize();
			real_t angle = -(direct | normal);
			if (angle < reject_angle) continue;
			++trial;
			algo::KdTree::Ray ray(centroid + direct * algo::KdTree::epsilon(), direct);
			const algo::KdTree::Triangle *triangle;
			real_t dist = GT_REAL_MAX;
			if (kd_tree.findNearest(ray, &dist, &triangle)) {
				dist /= angle;
				if (dist < min_dist) min_dist = dist;
				++count;
			}
		}
		if (vsi_omit[i]) continue;

		// Computes VSI local reaches.
		openmesh::Point origin = centroid - normal * min_dist * 0.5;
		for (int count = 0; count < this->m_vsi_rays_count.data(); ++count) {
			openmesh::Normal direct_forwd( vsi_rays[count]);
			openmesh::Normal direct_bckwd(-vsi_rays[count]);
			algo::KdTree::Ray ray_forwd(origin, direct_forwd);
			algo::KdTree::Ray ray_bckwd(origin, direct_bckwd);
			real_t dist_forwd = inf_distance;
			real_t dist_bckwd = inf_distance;
			const algo::KdTree::Triangle *triangle;
			kd_tree.findNearest(ray_forwd, &dist_forwd, &triangle);
			kd_tree.findNearest(ray_bckwd, &dist_bckwd, &triangle);
			vsis[i][count] = dist_forwd + dist_bckwd;
		}
	}
	_GT_LOGGER_END_PROG;

	// Interpolates Volumetric Shape Images of omitted faces.
	_GT_LOGGER_MSG(this->name(), "Interpolating Volumetric Shape Images of omitted faces.");
	while (true) {
		std::vector<size_t> vsi_adopt;
		for (size_t i = 0; i < vsi_omit.size(); ++i) {
			if (!vsi_omit[i]) continue;
			size_t valid_neib_count = 0;
			eigen::Vector accum_neib_vsi = eigen::Vector::Zero(this->m_vsi_rays_count.data());
			for (openmesh::Mesh::ConstFaceFaceIter cff_iter = mesh.cff_iter(mesh.face_handle(i)); cff_iter; ++cff_iter) {
				size_t index = cff_iter.handle().idx();
				if (!vsi_omit[index]) {
					++valid_neib_count;
					accum_neib_vsi += vsis[index];
				}
			}
			if (valid_neib_count) {
				vsi_adopt.push_back(i);
				vsis[i] = accum_neib_vsi / valid_neib_count;
			}
		}
		if (vsi_adopt.empty()) break;
		for (size_t i = 0; i < vsi_adopt.size(); ++i) vsi_omit[vsi_adopt[i]] = false;
	}

	// Computes Volumetric Shape Images weighted dual graph.
	_GT_LOGGER_MSG(this->name(), "Computing Volumetric Shape Images weighted dual graph.");
	this->m_vsi_graph.resize(mesh.n_edges());
	for (openmesh::Mesh::ConstEdgeIter ce_iter = mesh.edges_begin(); ce_iter != mesh.edges_end(); ++ce_iter) {
		openmesh::Mesh::EdgeHandle handle = ce_iter.handle();
		if (mesh.is_boundary(handle)) {
			this->m_vsi_graph(handle.idx()) = 0;
		} else {
			const eigen::Vector &vsi_a(vsis[mesh.face_handle(mesh.halfedge_handle(handle, 0)).idx()]);
			const eigen::Vector &vsi_b(vsis[mesh.face_handle(mesh.halfedge_handle(handle, 1)).idx()]);
			eigen::Vector diff = (vsi_a - vsi_b).cwiseAbs2();
			real_t miu = diff.mean();
			eigen::Vector diff_miu = diff.array() - miu;
			real_t sigma2 = diff_miu.cwiseAbs2().sum() / diff_miu.rows();
			real_t sigma = std::sqrt(sigma2);
			eigen::Vector weight(diff.rows());
			for (int i = 0; i < diff.rows(); ++i) {
				weight(i) = (diff_miu(i) < (2 * sigma)) ? std::exp(-(diff_miu(i) * diff_miu(i)) / (2 * sigma2)) : 0;
			}
			this->m_vsi_graph(handle.idx()) = diff.cwiseProduct(weight).sum() / weight.sum();
		}
	}
	if (!algo::Util::zero(this->m_vsi_graph.maxCoeff())) this->m_vsi_graph /= this->m_vsi_graph.maxCoeff();

	// Computes overall weighted dual graph.
	_GT_LOGGER_MSG(this->name(), "Computing overall weighted dual graph.");
	this->m_graph = this->m_geodesic_graph * this->m_geodestic_weight.data()
			+ this->m_angular_graph * this->m_angular_weight.data()
			+ this->m_vsi_graph * this->m_vsi_weight.data();

	return (true);
}
コード例 #30
0
ファイル: trans-cfg.cpp プロジェクト: AojiaoZero/hhvm
TransCFG::TransCFG(FuncId funcId,
                   const ProfData* profData,
                   const SrcDB& srcDB,
                   const TcaTransIDMap& jmpToTransID) {
  assert(profData);

  // add nodes
  for (auto tid : profData->funcProfTransIDs(funcId)) {
    assert(profData->transRegion(tid) != nullptr);
    // This will skip DV Funclets if they were already
    // retranslated w/ the prologues:
    if (!profData->optimized(profData->transSrcKey(tid))) {
      int64_t counter = profData->transCounter(tid);
      int64_t weight  = RuntimeOption::EvalJitPGOThreshold - counter;
      addNode(tid, weight);
    }
  }

  // add arcs
  for (TransID dstId : nodes()) {
    SrcKey dstSK = profData->transSrcKey(dstId);
    RegionDesc::BlockPtr dstBlock = profData->transRegion(dstId)->blocks[0];
    const SrcRec* dstSR = srcDB.find(dstSK);
    FTRACE(5, "TransCFG: adding incoming arcs in dstId = {}\n", dstId);
    TransIDSet predIDs = findPredTrans(dstSR, jmpToTransID);
    for (auto predId : predIDs) {
      if (hasNode(predId)) {
        auto predPostConds =
          profData->transRegion(predId)->blocks.back()->postConds();
        SrcKey predSK = profData->transSrcKey(predId);
        if (preCondsAreSatisfied(dstBlock, predPostConds) &&
            predSK.resumed() == dstSK.resumed()) {
          FTRACE(5, "TransCFG: adding arc {} -> {} ({} -> {})\n",
                 predId, dstId, showShort(predSK), showShort(dstSK));
          addArc(predId, dstId, TransCFG::Arc::kUnknownWeight);
        }
      }
    }
  }

  // infer arc weights
  bool changed;
  do {
    changed = false;
    for (TransID tid : nodes()) {
      int64_t nodeWeight = weight(tid);
      if (inferredArcWeight(inArcs(tid),  nodeWeight)) changed = true;
      if (inferredArcWeight(outArcs(tid), nodeWeight)) changed = true;
    }
  } while (changed);

  // guess weight or non-inferred arcs
  for (TransID tid : nodes()) {
    for (auto arc : outArcs(tid)) {
      if (arc->weight() == Arc::kUnknownWeight) {
        arc->setGuessed();
        int64_t arcWgt = std::min(weight(arc->src()), weight(arc->dst())) / 2;
        arc->setWeight(arcWgt);
      }
    }
  }
}