inline float gain(float value) const
 {
     if (value < 0.5f)
         return bias(value * 2.0f) * 0.5f;
     else
         return bias(value * 2.0f - 1.0f) * 0.5f + 0.5f;
 }
Example #2
0
 // apply a gain to x
 float gain(float gain, float x)
 {
     if(x < 0.5f)
         return bias(1.f-gain, 2.f*x)/2.f;
     else
         return 1.f - bias(1.f-gain, 2.f-2.f*x)/2.f;
 }
Example #3
0
void
Dynamic::AndBias::fromXml( QXmlStreamReader *reader )
{
    while (!reader->atEnd()) {
        reader->readNext();

        if( reader->isStartElement() )
        {
            Dynamic::BiasPtr bias( Dynamic::BiasFactory::fromXml( reader ) );
            if( bias )
            {
                appendBias( bias );
            }
            else
            {
                warning()<<"Unexpected xml start element"<<reader->name()<<"in input";
                reader->skipCurrentElement();
            }
        }
        else if( reader->isEndElement() )
        {
            break;
        }
    }
}
void TextureBuilder::createModules() {
    QSharedPointer<ModuleDescriptor> mod;
    foreach (mod,_modDesc) {
        auto modPtr = mod.data();
        if (this->useRandomFactors() &&  modPtr->enableRandom() ) {
            modPtr->setBias(this->applyRandomFactor(modPtr->bias()) );
            modPtr->setDispl(this->applyRandomFactor(modPtr->displ()) );
            modPtr->setExp(this->applyRandomFactor(modPtr->exp()) );
            modPtr->setFreq(this->applyRandomFactor(modPtr->freq()) );
            modPtr->setLac(this->applyRandomFactor(modPtr->lac()) );
            modPtr->setLbound(this->applyRandomFactor(modPtr->lBound()) );
            modPtr->setPers(this->applyRandomFactor(modPtr->pers()) );
            modPtr->setPow(this->applyRandomFactor(modPtr->pow()) );
            modPtr->setRough(this->applyRandomFactor(modPtr->rough()) );
            modPtr->setScale(this->applyRandomFactor(modPtr->scale()) );
            modPtr->setValue(this->applyRandomFactor(modPtr->value()) );
            modPtr->setX(this->applyRandomFactor(modPtr->x()) );
            modPtr->setY(this->applyRandomFactor(modPtr->y()) );
            modPtr->setZ(this->applyRandomFactor(modPtr->z()) );
        }
        mod.data()->setModules(_modules);
        auto ptr = mod.data()->makeModule();
        qDebug() << "Module " << modPtr->name() << " After module creation...";
        modPtr->dumpModule();
        _modules.insert(mod.data()->name(), ptr);
    }
Dynamic::BiasPtr
Dynamic::AbstractBiasFactory::createFromXml( QXmlStreamReader *reader )
{
    Dynamic::BiasPtr bias( createBias() );
    bias->fromXml( reader );
    return bias;
}
Example #6
0
// Recompute gradients of hyperparameters and latent coordinates.
void GPCMMLPKernel::recompute(
    const MatrixXd &gK,                     // Gradient of objective with respect to kernel.
    const MatrixXd &gKd,                    // Gradient of objective with respect to diagonal kernel.
    const MatrixXd* const *X,               // Current latent positions.
    MatrixXd **Xgrad                        // Latent position gradient.
    )
{
    assert(X[0] != NULL && X[1] == NULL); // Make sure we have only one element.
    assert(Xgrad[1] == NULL); // Make sure we have only one element.

    // Constants.
    int N = gKd.rows();

    // Compute gradient of variance.
    vargrad(0,0) += kmat.cwiseProduct(gKd).sum();

    // Compute gradient of weight and bias.
    denominatorCubed.noalias() = denominator.array().pow(3).matrix();
    baseCovGrad.noalias() = var(0,0)*gKd.cwiseQuotient((MatrixXd::Ones(arg.rows(),arg.cols()) - arg.cwiseProduct(arg)).cwiseSqrt());
    vec.noalias() = innerProducts.diagonal();
    wtgrad(0,0) += (innerProducts.cwiseQuotient(denominator) -
        0.5*(numerator.cwiseQuotient(denominatorCubed)).cwiseProduct(
            (wt(0,0)*vec + MatrixXd::Constant(vec.rows(),vec.cols(),bias(0,0)+1.0))*vec.transpose() +
            vec*(wt(0,0)*vec + MatrixXd::Constant(vec.rows(),vec.cols(),bias(0,0)+1.0)).transpose())).cwiseProduct(baseCovGrad).sum();
    biasgrad(0,0) += (denominator.array().inverse().matrix() -
        0.5*(numerator.cwiseQuotient(denominatorCubed)).cwiseProduct(
        (wt(0,0)*vec + MatrixXd::Constant(vec.rows(),vec.cols(),2.0*bias(0,0)+2.0)).replicate(1,vec.rows()) +
        (wt(0,0)*vec.transpose()).replicate(vec.rows(),1))).cwiseProduct(baseCovGrad).sum();

    // Compute X gradients.
    if (Xgrad[0])
    {
        for (int d = 0; d < X[0]->cols(); d++)
        {
            MatrixXd b = (X[0]->rowwise().squaredNorm()*wt(0,0) + VectorXd::Constant(N,bias(0,0)+1.0)).replicate(1,N).cwiseProduct(
                     numerator).cwiseProduct((X[0]->col(d)*wt(0,0)*2.0).transpose().replicate(N,1)).cwiseQuotient(denominatorCubed);
            Xgrad[0]->col(d) += ((X[0]->col(d)*(2.0*wt(0,0))).replicate(1,N).cwiseQuotient(denominator) -
                ((X[0]->rowwise().squaredNorm()*wt(0,0) + VectorXd::Constant(N,bias(0,0)+1.0)).replicate(1,N).cwiseProduct(
                     numerator).cwiseProduct((X[0]->col(d)*wt(0,0)*2.0).transpose().replicate(N,1)).cwiseQuotient(denominatorCubed))).cwiseProduct(
                     baseCovGrad).transpose().rowwise().sum();
        }
    }
}
Example #7
0
PassRefPtr<FilterEffect> SVGFEConvolveMatrixElement::build(SVGFilterBuilder* filterBuilder)
{
    FilterEffect* input1 = filterBuilder->getEffectById(in1());

    if (!input1)
        return 0;

    Vector<float> kernelMatrixValues;
    SVGNumberList* numbers = kernelMatrix();

    ExceptionCode ec = 0;
    int numberOfItems = numbers->numberOfItems();
    for (int i = 0; i < numberOfItems; ++i)
        kernelMatrixValues.append(numbers->getItem(i, ec));

    int orderXValue = orderX();
    int orderYValue = orderY();
    if (!hasAttribute(SVGNames::orderAttr)) {
        orderXValue = 3;
        orderYValue = 3;
    }
    // The spec says this is a requirement, and should bail out if fails
    if (orderXValue * orderYValue != numberOfItems)
        return 0;

    int targetXValue = targetX();
    int targetYValue = targetY();
    if (hasAttribute(SVGNames::targetXAttr) && (targetXValue < 0 || targetXValue >= orderXValue))
        return 0;
    // The spec says the default value is: targetX = floor ( orderX / 2 ))
    if (!hasAttribute(SVGNames::targetXAttr))
        targetXValue = static_cast<int>(floorf(orderXValue / 2));
    if (hasAttribute(SVGNames::targetYAttr) && (targetYValue < 0 || targetYValue >= orderYValue))
        return 0;
    // The spec says the default value is: targetY = floor ( orderY / 2 ))
    if (!hasAttribute(SVGNames::targetYAttr))
        targetYValue = static_cast<int>(floorf(orderYValue / 2));

    float divisorValue = divisor();
    if (hasAttribute(SVGNames::divisorAttr) && !divisorValue)
        return 0;
    if (!hasAttribute(SVGNames::divisorAttr)) {
        for (int i = 0; i < numberOfItems; ++i)
            divisorValue += kernelMatrixValues[i];
        if (!divisorValue)
            divisorValue = 1;
    }

    RefPtr<FilterEffect> effect = FEConvolveMatrix::create(
                                      IntSize(orderXValue, orderYValue), divisorValue,
                                      bias(), IntPoint(targetXValue, targetYValue), static_cast<EdgeModeType>(edgeMode()),
                                      FloatPoint(kernelUnitLengthX(), kernelUnitLengthX()), preserveAlpha(), kernelMatrixValues);
    effect->inputEffects().append(input1);
    return effect.release();
}
Example #8
0
bvt float_utilst::build_constant(const ieee_floatt &src)
{
  unbiased_floatt result;

  result.sign=const_literal(src.get_sign());
  result.NaN=const_literal(src.is_NaN());
  result.infinity=const_literal(src.is_infinity());
  result.exponent=bv_utils.build_constant(src.get_exponent(), spec.e);
  result.fraction=bv_utils.build_constant(src.get_fraction(), spec.f+1);

  return pack(bias(result));
}
 inline float apply_float_controls(float value) const
 {
     value = (value - m_contrast_pivot) * m_contrast + m_contrast_pivot;
     if (m_bias != 0.5f)
         value = bias(value);
     if (m_gain != 0.5f)
         value = gain(value);
     value = value * m_output_range + m_output_min;
     if (m_clamp_min)
         value = std::max(value, m_output_min);
     if (m_clamp_max)
         value = std::min(value, m_output_max);
     return value;
 }
Example #10
0
//----------------------------------------------------------------------------//
void ScrollablePane::updateContainerPosition(void)
{
    // basePos is the position represented by the scrollbars
    // (these are negated so pane is scrolled in the correct directions)
    UVector2 basePos(cegui_absdim(-getHorzScrollbar()->getScrollPosition()),
                     cegui_absdim(-getVertScrollbar()->getScrollPosition()));
    
    // this bias is the absolute position that 0 on the scrollbars represent.
    // Allows the pane to function correctly with negatively positioned content.
    UVector2 bias(cegui_absdim(d_contentRect.d_min.d_x),
                  cegui_absdim(d_contentRect.d_min.d_y));
    
    // set the new container pane position to be what the scrollbars request
    // minus any bias generated by the location of the content.
    getScrolledContainer()->setPosition(basePos - bias);
}
Example #11
0
void testBivariateStats(){

 
 float x[]={95.0,85.0,80.0,70.0,60.0};
 float y[]={85.0,95.0,70.0,65.0,70.0};
 regressionCoefficients coeffs;
 
 coeffs=regression(x,y,5);
 
 printf("\nBivariate Stats\n"); 
 printf("correlation =%f\n",correlation(x,y,5));
 printf("covariance =%f\n",covariance(x,y,5));
 printf("rmse =%f\n",rmse(x,y,5));
 printf("bias =%f\n",bias(x,y,5));
 printf("m =%f\n",coeffs.m);
 printf("c =%f\n",coeffs.c);
 
}
    void ScrollablePane::updateContainerPosition(void)
    {
        assert(d_container != 0);
        assert(d_horzScrollbar != 0);
        assert(d_vertScrollbar != 0);

        // basePos is the position represented by the scrollbars
        // (these are negated so pane is scrolled in the correct directions)
        Point basePos(-d_horzScrollbar->getScrollPosition(), -d_vertScrollbar->getScrollPosition());

        // this bias is the absolute position that 0 on the scrollbars represent.
        // effectively removes un-used empty space from the pane.
        Point bias(d_contentRect.d_left, d_contentRect.d_top);

        // set the new container pane position to be what the scrollbars request
        // minus any bias generated by the location of the content.
        d_container->setPosition(Absolute, basePos - bias);
    }
Example #13
0
	// ---------------------------------------------------------------------------------------
	//
	void update()
	{
		float time = ofGetElapsedTimef();
		
		ocl.begin();
		
			cl::ImageGL cl_destImage;
			ofxCL::convert( destImage.getTextureReference(), cl_destImage );
				
			ofxCL::Kernel::Ref noiseKernel = noiseProgram.getKernel( methodNames.at(methodIndex) );
			noiseKernel->global( destImage.getWidth(), destImage.getHeight() );

			float t = time * 0.1f;
		
			// ofParameters do not seem to get passed on correctly. Todo: avoid this workaround.
			float tmpLacunarity = Lacunarity;
			float tmpIncrement = Increment;
			float tmpOctaves = Octaves;
			float tmpAmplitude = Amplitude;
		
			ofVec2f bias(0,0);
		
			if( AutoMove )
			{
				bias.x = ofSignedNoise(  t, t * -0.30f ) * AutoMoveMagnitude;
				bias.y = ofSignedNoise( -t, t *  0.33f ) * AutoMoveMagnitude;
			}
				
			float tmpScale = Scale * ScaleMultiplier;
	
			if( methodIndex == 0 )
			{
				noiseKernel->call( cl_destImage, bias, ofVec2f(tmpScale,tmpScale), tmpAmplitude );
			}
			else
			{
				noiseKernel->call( cl_destImage, bias, ofVec2f(tmpScale,tmpScale), tmpLacunarity, tmpIncrement, tmpOctaves, tmpAmplitude );
			}
			
		ocl.end();
		
	}
Example #14
0
unsigned int Forge::CalculateOddsRange(const SpecialInfo & specialInfo, Weapon weapon, const std::vector<int> & materials)
{
    // Determine the bias
    Bias bias(DetermineWeaponBias(specialInfo.special, weapon));
    for (size_t i(0); i < materials.size(); ++i)
        bias = ResolveBiases(bias, DetermineMaterialBias(specialInfo.special, materials[i]));

    // Factor in the basic range for the special
    int range(specialInfo.baseOdds);
    switch (bias)
    {
        case Prohibited:    return 0;
        case Guaranteed:    return GuaranteedRange;
        case Likely:        range = (range * 2) + 20; break;
        case Unlikely:      range = (range / 2) - 20; break;
        default:            break;
    }

    return static_cast<unsigned int>(UMAX(range, 0));
}
Example #15
0
bvt float_utilst::rounder(const unbiased_floatt &src)
{
  // incoming: some fraction (with explicit 1),
  //           some exponent without bias
  // outgoing: rounded, with right size, with hidden bit, bias

  bvt aligned_fraction=src.fraction,
      aligned_exponent=src.exponent;

  {
    std::size_t exponent_bits=
      std::max((std::size_t)integer2size_t(address_bits(spec.f)),
               (std::size_t)spec.e)+1;

    // before normalization, make sure exponent is large enough
    if(aligned_exponent.size()<exponent_bits)
    {
      // sign extend
      aligned_exponent=
        bv_utils.sign_extension(aligned_exponent, exponent_bits);
    }
  }

  // align it!
  normalization_shift(aligned_fraction, aligned_exponent);
  denormalization_shift(aligned_fraction, aligned_exponent);

  unbiased_floatt result;
  result.fraction=aligned_fraction;
  result.exponent=aligned_exponent;
  result.sign=src.sign;
  result.NaN=src.NaN;
  result.infinity=src.infinity;

  round_fraction(result);
  round_exponent(result);

  return pack(bias(result));
}
Example #16
0
// Write kernel data to file.
void GPCMMLPKernel::write(
    GPCMMatWriter *writer                   // Writing interface.
    )
{
    GPCMKernel::write(writer); // Let superclass write first.

    // Write parameters.
    writer->writeDouble(wt(0,0),"weightVariance");
    writer->writeDouble(bias(0,0),"biasVariance");
    writer->writeDouble(var(0,0),"variance");

    // Write priors.
    GPCMMatWriter *cellWriter = writer->writeCell("priors",1,3);
    GPCMMatWriter *priorStruct = cellWriter->writeStruct("",1,1);
    priorStruct->writeDouble(1.0,"index");
    wtprior->write(priorStruct);
    priorStruct = cellWriter->closeStruct();
    priorStruct = cellWriter->writeStruct("",1,1);
    priorStruct->writeDouble(2.0,"index");
    biasprior->write(priorStruct);
    priorStruct = cellWriter->closeStruct();
    priorStruct = cellWriter->writeStruct("",1,1);
    priorStruct->writeDouble(3.0,"index");
    varprior->write(priorStruct);
    cellWriter->closeStruct();
    writer->closeCell();

    // Write transforms struct.
    GPCMMatWriter *xformStruct = writer->writeStruct("transforms",1,3);
    xformStruct->writeDouble(1.0,"index");
    xformStruct->writeString("exp","type");
    xformStruct = writer->closeStruct();
    xformStruct->writeDouble(2.0,"index");
    xformStruct->writeString("exp","type");
    xformStruct = writer->closeStruct();
    xformStruct->writeDouble(3.0,"index");
    xformStruct->writeString("exp","type");
    writer->closeStruct();
}
VectorXd LayeredFeedForwardNeuralNet::FireSingleLayer(const VectorXd& inputActivations, long layerIndex) const
{
    // get layer input weights (also checks valid layerIndex)
    const MatrixXd& layerInputWeights = GetLayerInputWeights(layerIndex);
    
    if (layerInputWeights.cols() - 1 != inputActivations.size())
    {
        // input is invalid for this neural net topology
        throw NeuralNetTopologyMismatch("activation input must match number of units in neural network layer");
    }
    
    // get the activation function
    auto expressionParser = UnaryExpressionParserFactory::CreateParser();
    UnaryFunction activationFunction = expressionParser->GetFunctionForExpression(m_activationFunction);
    
    // bias activation
    VectorXd bias(1);
    bias << -1.0;
    
    // calculate layer net inputs
    VectorXd inputPlusBias(layerInputWeights.cols());
    inputPlusBias << inputActivations, bias;
    
    //std::cout << "layer " << layerIndex << " input activations +bias : " << std::endl << inputPlusBias << std::endl << std::endl;
    //std::cout << "layer " << layerIndex << " input weights : " << std::endl << layerInputWeights << std::endl << std::endl;
    
    VectorXd layerNetInputs = layerInputWeights * inputPlusBias;
    
    //std::cout << "layer " << layerIndex << " net inputs : " << std::endl << layerNetInputs << std::endl << std::endl;
    
    // calculate layer activations
    VectorXd layerActivations = layerNetInputs.unaryExpr(activationFunction);
    
    //std::cout << "layer " << layerIndex << " output activations : " << layerActivations << std::endl << std::endl;
    
    return layerActivations;
}
Example #18
0
int ColormapEdit::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
    _id = QDialog::qt_metacall(_c, _id, _a);
    if (_id < 0)
        return _id;
    if (_c == QMetaObject::InvokeMetaMethod) {
        switch (_id) {
        case 0: newColorList((*reinterpret_cast< QList<QColor>(*)>(_a[1]))); break;
        case 1: rotate((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 2: bias((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 3: contrast((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 4: setColor(); break;
        case 5: okPressed(); break;
        case 6: applyPressed(); break;
        case 7: biasReset(); break;
        case 8: rotateReset(); break;
        case 9: contrastRest(); break;
        case 10: updateColormap(); break;
        case 11: resetMap((*reinterpret_cast< int(*)>(_a[1]))); break;
        }
        _id -= 12;
    }
    return _id;
}
Example #19
0
// Return covariance of a single set of points.
MatrixXd GPCMMLPKernel::covariance(
    const MatrixXd* const *X                // Data matrix.
    )
{
    assert(X[0] != NULL && X[1] == NULL); // Make sure we have only one element.

    innerProducts.noalias() = (*X[0])*X[0]->transpose();
    numerator.noalias() = innerProducts*wt(0,0) + MatrixXd::Constant(X[0]->rows(),X[0]->rows(),bias(0,0));
    vec.noalias() = numerator.diagonal() + VectorXd::Constant(X[0]->rows(),1.0);
    denominator.noalias() = (vec*vec.transpose()).cwiseSqrt();
    arg.noalias() = numerator.cwiseQuotient(denominator);
    kmat.noalias() = arg.array().asin().matrix();

    return var(0,0)*kmat;
}
Example #20
0
void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
  guarantee(_biased_base != NULL, "Array not initialized");
  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
            "Biased index out of inclusive bounds, index: " SIZE_FORMAT " bias: " SIZE_FORMAT " length: " SIZE_FORMAT,
            biased_index, bias(), length());
}
Example #21
0
bvt float_utilst::add_sub(
  const bvt &src1,
  const bvt &src2,
  bool subtract)
{
  unbiased_floatt unpacked1=unpack(src1);
  unbiased_floatt unpacked2=unpack(src2);

  // subtract?
  if(subtract)
    unpacked2.sign=!unpacked2.sign;

  // figure out which operand has the bigger exponent
  const bvt exponent_difference=subtract_exponents(unpacked1, unpacked2);
  literalt src2_bigger=exponent_difference.back();

  const bvt bigger_exponent=
    bv_utils.select(src2_bigger, unpacked2.exponent, unpacked1.exponent);

  // swap fractions as needed
  const bvt new_fraction1=
    bv_utils.select(src2_bigger, unpacked2.fraction, unpacked1.fraction);

  const bvt new_fraction2=
    bv_utils.select(src2_bigger, unpacked1.fraction, unpacked2.fraction);

  // compute distance
  const bvt distance=bv_utils.absolute_value(exponent_difference);

  // limit the distance: shifting more than f+3 bits is unnecessary
  const bvt limited_dist=limit_distance(distance, spec.f+3);

  // pad fractions with 2 zeros from below
  const bvt fraction1_padded=bv_utils.concatenate(bv_utils.zeros(3), new_fraction1);
  const bvt fraction2_padded=bv_utils.concatenate(bv_utils.zeros(3), new_fraction2);

  // shift new_fraction2
  literalt sticky_bit;
  const bvt fraction1_shifted=fraction1_padded;
  const bvt fraction2_shifted=sticky_right_shift(
    fraction2_padded, limited_dist, sticky_bit);

  // sticky bit: or of the bits lost by the right-shift
  bvt fraction2_stickied=fraction2_shifted;
  fraction2_stickied[0]=prop.lor(fraction2_shifted[0], sticky_bit);

  // need to have two extra fraction bits for addition and rounding
  const bvt fraction1_ext=bv_utils.zero_extension(fraction1_shifted, fraction1_shifted.size()+2);
  const bvt fraction2_ext=bv_utils.zero_extension(fraction2_stickied, fraction2_stickied.size()+2);

  unbiased_floatt result;

  // now add/sub them
  literalt subtract_lit=prop.lxor(unpacked1.sign, unpacked2.sign);
  result.fraction=
    bv_utils.add_sub(fraction1_ext, fraction2_ext, subtract_lit);

  // sign of result
  literalt fraction_sign=result.fraction.back();
  result.fraction=bv_utils.absolute_value(result.fraction);

  result.exponent=bigger_exponent;

  // adjust the exponent for the fact that we added two bits to the fraction
  result.exponent=
    bv_utils.add(bv_utils.sign_extension(result.exponent, result.exponent.size()+1),
      bv_utils.build_constant(2, result.exponent.size()+1));

  // NaN?
  result.NaN=prop.lor(
      prop.land(prop.land(unpacked1.infinity, unpacked2.infinity),
                prop.lxor(unpacked1.sign, unpacked2.sign)),
      prop.lor(unpacked1.NaN, unpacked2.NaN));

  // infinity?
  result.infinity=prop.land(
      !result.NaN,
      prop.lor(unpacked1.infinity, unpacked2.infinity));

  // zero?
  // Note that:
  //  1. The zero flag isn't used apart from in divide and
  //     is only set on unpack
  //  2. Subnormals mean that addition or subtraction can't round to 0,
  //     thus we can perform this test now
  //  3. The rules for sign are different for zero
  result.zero = prop.land(
      !prop.lor(result.infinity, result.NaN),
      !prop.lor(result.fraction));


  // sign
  literalt add_sub_sign=
    prop.lxor(prop.lselect(src2_bigger, unpacked2.sign, unpacked1.sign),
              fraction_sign);

  literalt infinity_sign=
    prop.lselect(unpacked1.infinity, unpacked1.sign, unpacked2.sign);

  #if 1
  literalt zero_sign=
    prop.lselect(rounding_mode_bits.round_to_minus_inf,
                 prop.lor(unpacked1.sign, unpacked2.sign),
                 prop.land(unpacked1.sign, unpacked2.sign));

  result.sign=prop.lselect(
    result.infinity,
    infinity_sign,
    prop.lselect(result.zero,
                 zero_sign,
                 add_sub_sign));
  #else
  result.sign=prop.lselect(
    result.infinity,
    infinity_sign,
    add_sub_sign);
  #endif

  #if 0
  result.sign=const_literal(false);
  result.fraction.resize(spec.f+1, const_literal(true));
  result.exponent.resize(spec.e, const_literal(false));
  result.NaN=const_literal(false);
  result.infinity=const_literal(false);
  //for(std::size_t i=0; i<result.fraction.size(); i++)
  //  result.fraction[i]=const_literal(true);

  for(std::size_t i=0; i<result.fraction.size(); i++)
    result.fraction[i]=new_fraction2[i];

  return pack(bias(result));
  #endif

  return rounder(result);
}
/********************************************************
 *
 *
 * Draw Robot
 *
 *
 ********************************************************/
void Virtual3dCharacterViewer::glDrawBody(Character3DBody& body)
{
    glPushMatrix();


    // getHipPos() ; // get Hip's Position from the IK calculator .
    LocateHip (body);
    body.HipPosInCamera = getCurTranslation () ;
    body.HipPosInScreen = getScreenPos(body.HipPosInCamera) ;
    RotateHip(body) ;
    glPushMatrix();     // save the Hip Matrix
    drawHip (body) ;

    glTranslatef( 0.0, body.Model.LenHipToChest, 0.0 ) ;
    RotateChest (body);
    drawChest (body) ;


    glTranslatef(0.0, body.Model.LenChestToNeck, 0.0);

    body.NeckPosInCamera = getCurTranslation () ;
    body.NeckPos = TransformVector( WorldMatInv, body.NeckPosInCamera ) ;
    body.NeckPosInScreen = getScreenPos(body.NeckPosInCamera) ;


    glPushMatrix();     // save the Neck Matrix

    RotateLeftShoulder(body);
    drawLeftShoulder(body);

    glTranslatef(body.Model.LenNeckToShoulder, 0.0, 0.0);
    RotateLeftUpperArm(body);
    drawLeftUpperArm(body);


    glTranslatef(body.Model.LenShoulderToElbow, 0.0, 0.0);
    RotateLeftLowerArm(body);
    drawLeftLowerArm(body);



    glTranslatef(body.Model.LenElbowToWrist, 0.0, 0.0);

    body.LeftWristPosInCamera = getCurTranslation () ;
    Vector3D bias(-1,0,0) ;
    body.LeftWristPos = TransformVector( WorldMatInv, body.LeftWristPosInCamera );
    body.LeftWristPosInScreen = getScreenPos(body.LeftWristPosInCamera) ;

    RotateLeftHand(body);
    drawLeftHand(body);


    glPopMatrix();      // reload the Neck Matrix
    glPushMatrix();     // restore the Neck Matrix again .


    RotateRightShoulder(body);
    drawRightShoulder(body);

    glTranslatef(-body.Model.LenNeckToShoulder, 0.0, 0.0);
    RotateRightUpperArm(body);
    drawRightUpperArm(body);


    glTranslatef(-body.Model.LenShoulderToElbow, 0.0, 0.0);
    RotateRightLowerArm(body);
    drawRightLowerArm(body);

    glTranslatef(-body.Model.LenElbowToWrist, 0.0, 0.0);

    body.RightWristPosInCamera = getCurTranslation () ;
    body.RightWristPos = TransformVector( WorldMatInv, body.RightWristPosInCamera ) ;
    body.RightWristPosInScreen = getScreenPos(body.RightWristPosInCamera) ;

    RotateRightHand(body);
    drawRightHand(body);


    glPopMatrix();      // reload the Neck Matrix again .

    RotateHead(body);
    drawHead (body);
    glTranslatef(0.0, body.Model.LenNeckToHead, 0.0);
    body.HeadPosInCamera = getCurTranslation () ;
    body.HeadPos = TransformVector( WorldMatInv, body.HeadPosInCamera ) ;
    body.HeadPosInScreen = getScreenPos(body.HeadPosInCamera) ;

    glPopMatrix();      // reload the Hip Matrix .
    glPushMatrix();     // restore the Hip Matrix again.

    RotateLeftThighRoot (body);
    drawLeftThighRoot(body);

    glTranslatef(body.Model.LenHipToThigh, 0.0, 0.0);

    RotateLeftThigh (body) ;
    drawLeftThigh(body);

    glTranslatef(0.0, -body.Model.LenThighToKnee, 0.0);

    RotateLeftShank(body);
    drawLeftShank(body);

    glTranslatef(0.0, -body.Model.LenKneeToAnkle, 0.0);
    body.LeftAnklePosInCamera = getCurTranslation();
    body.LeftAnklePos = TransformVector( WorldMatInv, body.LeftAnklePosInCamera ) ;
    body.LeftAnklePosInScreen = getScreenPos(body.LeftAnklePosInCamera) ;

    RotateLeftFoot (body);
    drawLeftFoot(body);


    glPopMatrix();      // reload the Hip Matrix again.


    RotateRightThighRoot (body);
    drawRightThighRoot(body);

    glTranslatef(-body.Model.LenHipToThigh, 0.0, 0.0);

    RotateRightThigh (body) ;
    drawRightThigh(body);

    glTranslatef(0.0, -body.Model.LenThighToKnee, 0.0);

    RotateRightShank(body);
    drawRightShank(body);

    glTranslatef(0.0, -body.Model.LenKneeToAnkle, 0.0);
    body.RightAnklePosInCamera = getCurTranslation();
    body.RightAnklePos = TransformVector( WorldMatInv, body.RightAnklePosInCamera ) ;
    body.RightAnklePosInScreen = getScreenPos(body.RightAnklePosInCamera) ;



    RotateRightFoot (body);
    drawRightFoot(body);


    glPopMatrix();
}
Example #23
0
void MD5::prepModel(void)
{
    // Prepare Buffer Objects
    int offset = 0;
    size_t vboSize = sizeof(glm::vec2) + sizeof(glm::vec4) + sizeof(glm::mat4);
    glGenBuffers(1, &vbo);
    glBindBuffer(GL_ARRAY_BUFFER, vbo);
    glBufferData(GL_ARRAY_BUFFER, vboSize*numVerts, NULL, GL_STATIC_DRAW);

    // Generate Vertex Buffer Object
    for(int i = 0; i < numMeshes; ++i)
    {
        for(int j = 0; j < meshList[i].getNumVert(); ++j)
        {
            Mesh::Vertex v = meshList[i].getVerts(j);
            // Add UV coords
            glBufferSubData(GL_ARRAY_BUFFER, offset, sizeof(glm::vec2), glm::value_ptr(v.uv));
            offset += sizeof(glm::vec2);

            // Add vertex weights
            glm::vec4 bias(0.0f);
            glm::mat4 pos(0.0f);
            int index = v.weightIndex;
            for(int k = 0; k < v.weightElem; ++k)
            {
                Mesh::Weight w = meshList[i].getWeight(index+k);
                bias[k] = w.value;
                glm::vec4 posi(0, 0, 0, -1);
                posi.x = w.position.x;
                posi.y = w.position.y;
                posi.z = w.position.z;
                posi.w = w.jointIndex;
                pos[k] = posi;
            }

            glBufferSubData(GL_ARRAY_BUFFER, offset, sizeof(glm::vec4), glm::value_ptr(bias));
            offset += sizeof(glm::vec4);
            glBufferSubData(GL_ARRAY_BUFFER, offset, sizeof(glm::mat4), glm::value_ptr(pos));

            offset += sizeof(glm::mat4) + (sizeof(float)*2);
        }
    }

    offset = 0;

    // Generate Index Buffer Object
    glGenBuffers(1, &ibo);
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLushort)*3*numTris, NULL, GL_STATIC_DRAW);

    for(int i = 0; i < numMeshes; ++i)
    {
        for(int j = 0; j < meshList[i].getNumTri(); ++j)
        {
            Mesh::Triangle t = meshList[i].getTris(j);
            t.v1 += offset;
            t.v2 += offset;
            t.v3 += offset;
            glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset, sizeof(GLushort)*3, (GLvoid*)&t);
        }

        offset += meshList[i].getNumTri();
    }

    GLuint uvLoc = 0;
    GLuint wBias = 1;
    GLuint wPos = 2;

    if(GLEW_ARB_uniform_buffer_object)
    {
        glBindAttribLocation(shaderProgram, 0, "uv");
        glBindAttribLocation(shaderProgram, 1, "wBias");
        glBindAttribLocation(shaderProgram, 2, "wPos");
    }
    else
    {
        uvLoc = glGetAttribLocation(shaderProgram, "uv");
        wBias = glGetAttribLocation(shaderProgram, "wBias");
        wPos = glGetAttribLocation(shaderProgram, "wPos");
    }

        // Generate and bind Vertex Array Object
    glGenVertexArrays(1, &vao);
    glBindVertexArray(vao);

    //Enable and link attributes
    glEnableVertexAttribArray(uvLoc);
    glEnableVertexAttribArray(wBias);
    glEnableVertexAttribArray(wPos);
    glEnableVertexAttribArray(wPos+1);
    glEnableVertexAttribArray(wPos+2);
    glEnableVertexAttribArray(wPos+3);

    glBindBuffer(GL_ARRAY_BUFFER, vbo);
    glVertexAttribPointer(uvLoc, 2, GL_FLOAT, GL_FALSE, vboSize, 0);
    glVertexAttribPointer(wBias, 4, GL_FLOAT, GL_FALSE, vboSize, (GLvoid*)(sizeof(glm::vec2)));
    for(int i = 0; i < 4; ++i)
    {
        glVertexAttribPointer(wPos+i, 4, GL_FLOAT, GL_FALSE, vboSize, (GLvoid*)(sizeof(glm::vec2)+sizeof(glm::vec4)+(sizeof(glm::vec4)*i)));
    }
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);

    glBindVertexArray(0);
}
Example #24
0
// Return coviarance of two sets of points.
MatrixXd GPCMMLPKernel::covariance(
    const MatrixXd* const *X1,              // First data matrix.
    const MatrixXd* const *X2               // Second data matrix.
    )
{
    assert(X1[0] != NULL && X1[1] == NULL); // Make sure we have only one element.
    assert(X2[0] != NULL && X2[1] == NULL); // Make sure we have only one element.

    return var(0,0)*(((((*X1[0])*X2[0]->transpose())*wt(0,0) + MatrixXd::Constant(X1[0]->rows(),X1[0]->rows(),bias(0,0))).cwiseQuotient(
        ((X1[0]->rowwise().squaredNorm()*wt(0,0) + MatrixXd::Constant(X1[0]->rows(),1,bias(0,0)+1.0))*
         (X2[0]->rowwise().squaredNorm()*wt(0,0) + MatrixXd::Constant(X1[0]->rows(),1,bias(0,0)+1.0)).transpose()).cwiseSqrt())).array().asin().matrix());
}
void _jit_avx512_core_fp32_wino_conv_4x3_t<is_fwd>::_execute_data_W_SGD(
        float *inp_ptr, float *out_ptr, float *wei_ptr, float *bias_ptr,
        const memory_tracking::grantor_t &scratchpad) const {
    const auto &jcp = kernel_->jcp;
    const auto &p_ops = attr_->post_ops_;

    const int inph = is_fwd ? jcp.ih : jcp.oh;
    const int inpw = is_fwd ? jcp.iw : jcp.ow;
    const int outh = is_fwd ? jcp.oh : jcp.ih;
    const int outw = is_fwd ? jcp.ow : jcp.iw;

    array_offset_calculator<float, 5> input(inp_ptr,
        jcp.mb, jcp.dimK/jcp.dimK_reg_block, inph, inpw, jcp.dimK_reg_block);
    array_offset_calculator<float, 5> output(out_ptr,
        jcp.mb, jcp.dimM/jcp.dimM_simd_block, outh, outw, jcp.dimM_simd_block);
    array_offset_calculator<float, 6> weights(wei_ptr,
        jcp.oc/jcp.oc_simd_block, jcp.ic/jcp.ic_simd_block, jcp.kh, jcp.kw,
        jcp.ic_simd_block, jcp.oc_simd_block);
    array_offset_calculator<float, 2> bias(bias_ptr,
        jcp.oc/jcp.oc_simd_block, jcp.oc_simd_block);

    auto wino_wei = (jcp.prop_kind == prop_kind::forward_inference)
                ? wei_ptr
                : scratchpad.template get<float>(key_wino_U);

    array_offset_calculator<float, 8> U(wino_wei,
            jcp.dimM_nb_block,
            alpha, alpha,
            jcp.dimK_nb_block,
            jcp.dimM_block  * jcp.dimM_reg_block, jcp.dimK_block,
            jcp.dimK_reg_block, jcp.dimM_simd_block);

    array_offset_calculator<float, 8> M(is_fwd
            ? scratchpad.template get<float>(key_wino_M)
            : scratchpad.template get<float>(key_wino_V),
            0, jcp.dimM_nb_block, alpha, alpha,
            jcp.dimN_block, jcp.dimM_block * jcp.dimM_reg_block,
            jcp.dimN_reg_block, jcp.dimM_simd_block);
    array_offset_calculator<float, 8> V(is_fwd
            ? scratchpad.template get<float>(key_wino_V)
            : scratchpad.template get<float>(key_wino_M),
            0, alpha, alpha, jcp.dimN_block,
            jcp.dimK_nb_block, jcp.dimK_block,
            jcp.dimN_reg_block, jcp.dimK_reg_block);

    const bool wants_padded_bias = jcp.with_bias
        && jcp.oc_without_padding != jcp.oc;
    float last_slice_bias[simd_w] = {0};
    if (wants_padded_bias) {
        for (int oc = 0; oc < jcp.oc_without_padding % jcp.oc_simd_block; ++oc)
            last_slice_bias[oc] = bias(jcp.dimM / jcp.dimM_simd_block - 1, oc);
    }

    if (jcp.prop_kind != prop_kind::forward_inference) {

        parallel_nd(jcp.nb_oc, jcp.nb_ic, (jcp.oc_block * jcp.oc_reg_block), (jcp.ic_block * jcp.ic_reg_block),
                    [&](int ofm1, int ifm1, int ofm2, int ifm2) {
            float *U_base_ptr = is_fwd
                              ? &(U(ofm1, 0, 0, ifm1, ofm2, ifm2, 0, 0))
                              : &(U(ifm1, 0, 0, ofm1, ifm2, ofm2, 0, 0));
            weight_transform_data(jcp,
                    &(weights(
                        ofm1 * jcp.oc_block * jcp.oc_reg_block + ofm2,
                        ifm1 * jcp.ic_block * jcp.ic_reg_block + ifm2,
                        0, 0, 0, 0)),
                    U_base_ptr);
        });
    }

PRAGMA_OMP(parallel)
    {

    int ithr = mkldnn_get_thread_num();

PRAGMA_OMP(for schedule(static))
    for (int tile_block = 0; tile_block < jcp.tile_block; tile_block++) {
        for (int K_blk1 = 0; K_blk1 < jcp.dimK_nb_block; K_blk1++) {
            for (int K_blk2 = 0; K_blk2 < jcp.dimK_block; K_blk2++) {

                input_transform_tileblock_data(
                        tile_block, jcp,
                        &(input(0, K_blk1 * jcp.dimK_block + K_blk2, 0, 0, 0)),
                        &(V(ithr, 0, 0, 0, K_blk1, K_blk2, 0, 0)));
            }
        }

        for (int oj = 0; oj < alpha; oj++) {
            for (int oi = 0; oi < alpha; oi++) {
                for (int M_blk1 = 0; M_blk1 < jcp.dimM_nb_block; M_blk1++)
                for (int K_blk1 = 0; K_blk1 < jcp.dimK_nb_block; K_blk1++)
                for (int N_blk = 0; N_blk < jcp.dimN_block; N_blk++)
                    kernel_->gemm_loop_ker(
                            (float *)&(M(ithr, M_blk1, oj, oi,
                                    N_blk, 0, 0, 0)),
                            (const float *)&(U(M_blk1, oj, oi, K_blk1,
                                    0, 0, 0, 0)),
                            (const float *)&(V(ithr, oj, oi,
                                    N_blk, K_blk1, 0, 0, 0)), K_blk1);
            }
        }

        for (int M_blk1 = 0; M_blk1 < jcp.dimM_nb_block; M_blk1++) {
            for (int M_blk2 = 0; M_blk2 < jcp.dimM_block * jcp.dimM_reg_block;
                  M_blk2++) {
                const int M_blk =
                    M_blk1 * jcp.dimM_block  * jcp.dimM_reg_block + M_blk2;

                float *bias_ptr = wants_padded_bias
                    && M_blk == jcp.dimM / jcp.dimM_simd_block - 1
                    ? last_slice_bias : &bias(M_blk, 0);

                output_transform_tileblock_data(tile_block, jcp, p_ops,
                        &(M(ithr, M_blk1, 0, 0, 0, M_blk2, 0, 0)),
                        &(output(0, M_blk, 0, 0, 0)), bias_ptr);
            }
        }
    }
    }
}
void _jit_avx512_core_fp32_wino_conv_4x3_t<is_fwd>::_execute_data_W_S_G_D(
        float *inp_ptr, float *out_ptr, float *wei_ptr, float *bias_ptr,
        const memory_tracking::grantor_t &scratchpad) const {
    const auto &jcp = kernel_->jcp;
    const auto &p_ops = attr_->post_ops_;

    const int inph = is_fwd ? jcp.ih : jcp.oh;
    const int inpw = is_fwd ? jcp.iw : jcp.ow;
    const int outh = is_fwd ? jcp.oh : jcp.ih;
    const int outw = is_fwd ? jcp.ow : jcp.iw;

    /* Notation:
       FWD: dimM:oc, dimN:ntiles, dimK:ic,
       BWD: dimM:ic, dimN:ntiles, dimK:oc,
       FWD/BWD: V: src/diff_dst transform, U:weight transform,
                M:dst/diff_src transform  */
    array_offset_calculator<float, 5> input(inp_ptr,
            jcp.mb, jcp.dimK/jcp.dimK_reg_block, inph, inpw,
            jcp.dimK_reg_block);
    array_offset_calculator<float, 5> output(out_ptr,
            jcp.mb, jcp.dimM/jcp.dimM_simd_block, outh, outw,
            jcp.dimM_simd_block);
    array_offset_calculator<float, 6> weights(wei_ptr,
            jcp.oc/jcp.oc_simd_block, jcp.ic/jcp.ic_simd_block, jcp.kh, jcp.kw,
            jcp.ic_simd_block, jcp.oc_simd_block);
    array_offset_calculator<float, 2> bias(bias_ptr,
            jcp.dimM/jcp.dimM_simd_block, jcp.dimM_simd_block);

    array_offset_calculator<float, 8> M(is_fwd
            ? scratchpad.template get<float>(key_wino_M)
            : scratchpad.template get<float>(key_wino_V),
            jcp.dimN_nb_block, jcp.dimM_nb_block,
            alpha, alpha,
            jcp.dimN_block, jcp.dimM_block * jcp.dimM_reg_block,
            jcp.dimN_reg_block, jcp.dimM_simd_block);

    auto wino_wei = (jcp.prop_kind == prop_kind::forward_inference)
            ? wei_ptr
            : scratchpad.template get<float>(key_wino_U);

    array_offset_calculator<float, 8> U(wino_wei,
            jcp.dimM_nb_block,
            alpha, alpha,
            jcp.dimK_nb_block,
            jcp.dimM_block * jcp.dimM_reg_block, jcp.dimK_block,
            jcp.dimK_reg_block, jcp.dimM_simd_block);
    array_offset_calculator<float, 8> V(is_fwd
            ? scratchpad.template get<float>(key_wino_V)
            : scratchpad.template get<float>(key_wino_M),
            jcp.dimN_nb_block, alpha, alpha,
            jcp.dimN_block, jcp.dimK_nb_block,
            jcp.dimK_block, jcp.dimN_reg_block, jcp.dimK_reg_block);

    const bool wants_padded_bias = jcp.with_bias
        && jcp.oc_without_padding != jcp.oc;
    float last_slice_bias[simd_w] = {0};
    if (wants_padded_bias) {
        for (int oc = 0; oc < jcp.oc_without_padding % jcp.oc_simd_block; ++oc)
            last_slice_bias[oc] = bias(jcp.dimM / jcp.dimM_simd_block - 1, oc);
    }

PRAGMA_OMP(parallel)
    {

        parallel_nd_in_omp(jcp.mb, jcp.dimK_nb_block, jcp.dimK_block,
                [&](int img, int K_blk1, int K_blk2) {
                input_transform_data(img, jcp,
                    &(input(img, K_blk1 * jcp.dimK_block + K_blk2,
                            0, 0, 0)),
                        &(V(0, 0, 0, 0, K_blk1, K_blk2, 0, 0)));
                });

        if (jcp.prop_kind != prop_kind::forward_inference) {
            parallel_nd_in_omp(jcp.nb_oc, jcp.nb_ic, (jcp.oc_block * jcp.oc_reg_block),
                (jcp.ic_block * jcp.ic_reg_block),
                [&](int ofm1, int ifm1, int ofm2, int ifm2) {
                    float *U_base_ptr = is_fwd
                        ? &(U(ofm1, 0, 0, ifm1, ofm2, ifm2, 0, 0))
                        : &(U(ifm1, 0, 0, ofm1, ifm2, ofm2, 0, 0));
                    weight_transform_data(jcp,
                        &(weights(
                                ofm1 * jcp.oc_block * jcp.oc_reg_block + ofm2,
                                ifm1 * jcp.ic_block * jcp.ic_reg_block + ifm2,
                                0, 0, 0, 0)),
                        U_base_ptr);
            });
        }

PRAGMA_OMP(barrier)

        parallel_nd_in_omp(jcp.dimN_nb_block, alpha, alpha, jcp.dimM_nb_block,
            [&](int N_blk1, int oj, int oi, int M_blk1) {
            for (int K_blk1 = 0; K_blk1 < jcp.dimK_nb_block;
                 K_blk1++)
            for (int N_blk2 = 0; N_blk2 < jcp.dimN_block; N_blk2++)
                kernel_->gemm_loop_ker(
                        (float *)&(M(N_blk1, M_blk1, oj, oi,
                            N_blk2, 0, 0, 0)),
                        (const float *)&(U(M_blk1, oj, oi,
                            K_blk1, 0, 0, 0, 0)),
                        (const float *)&(V(N_blk1, oj, oi,
                            N_blk2, K_blk1, 0, 0, 0)), K_blk1);
        });

PRAGMA_OMP(barrier)

        parallel_nd_in_omp(jcp.mb, jcp.dimM_nb_block, (jcp.dimM_block * jcp.dimM_reg_block),
                    [&](int img, int M_blk1, int M_blk2) {
            const int M_blk =
                M_blk1 * jcp.dimM_block  * jcp.dimM_reg_block + M_blk2;

            float *bias_ptr = wants_padded_bias
                && M_blk == jcp.dimM / jcp.dimM_simd_block - 1
                ? last_slice_bias : &bias(M_blk, 0);
            output_transform_data(img, jcp, p_ops,
                    &(M(0, M_blk1, 0, 0, 0, M_blk2, 0, 0)),
                    &(output(img, M_blk, 0, 0, 0)), bias_ptr);
        });

    }
}
Example #27
0
void ShadowMap::setShaderArgsRead(UniformTable& args, const String& prefix) const {
    // The notNull macro is set by the depthTexture()
    args.setUniform(prefix + "MVP",    unitLightMVP());
    args.setUniform(prefix + "bias", bias());
    depthTexture()->setShaderArgs(args, prefix, Sampler::shadow());
}
Example #28
0
void ofxVectorField::bias(float amt) {

    bias(amt, amt);
}
Example #29
0
/* Just like the halo mass function, we'll set this up such that
 * you just need to interpolate.
 *
 * Now this has been changed to calculate the spatial-scale dependence
 * of the bias factor. If you don't want the scale dep. b, then just
 * input r<0.
 *
 * If the global flag LINEAR_PSP==0, uses the scale dependence calculated for
 * for halo bias relative to the non-linear matter \xi_m(r):
 *
 * f^2(r) = (1.0+xi*1.17)^1.49/(1.0+xi*0.69)^2.09  --> b(r) = b0*f(r)
 *
 * For LINEAR_PSP==1, use scale dependence determined for the linear P(k):
 *
 * f(r) = 1 + exp[-(r/A)^0.7] --> where A is a parameter that we're gonna have to 
 *                                determine in more detail, but now A \approx 1
 */
double bias_interp(double m, double r)
{
  static int flag=0,prev_cosmo=0, n;
  static double *x,*y,*y2, pnorm;
  int i;
  double dm,max=16.3,min=9,a,b,m1,m2,dm1,xi,power,rm,sig,b1,b2,mass,rvir,a1;
  double c1,c2,d1,d2;

  if(!flag || RESET_COSMOLOGY!=prev_cosmo)
    {
      n = 100;
      if(!ThisTask && OUTPUT)
	fprintf(stdout,"RESET: resetting bias for %f %f\n",OMEGA_M,SIGMA_8);
      if(!flag)
	{
	  x=dvector(1,n);
	  y=dvector(1,n);
	  y2=dvector(1,n);
	}
      flag=1;
      dm=(double)(max-min)/n;
      for(i=1;i<=n;++i)
	{
	  x[i]=pow(10.0,min+i*dm);
	  y[i]=log(bias(x[i]));
	  //printf("BIAS %e %e\n",x[i], exp(y[i]));
	  if(isinf(y[i])){ n = i-1; break; }
	  if(isnan(y[i])){ n = 1-1; break; }
	  x[i] = log(x[i]);
	  continue;

	  // no longer need to do this part, since we're taking into account
	  // halo overdensity in the bias formula.
	  /*
	  if(DELTA_HALO!=200)
	    {
	      x[i]=log(halo_mass_conversion2(x[i],halo_c200(x[i]),200.0,DELTA_HALO));
	    }
	  else
	    {
	      x[i]=log(x[i]);
	    }
	  */
	}
      spline(x,y,n,2.0E+30,2.0E+30,y2);
      prev_cosmo=RESET_COSMOLOGY;
      pnorm=SIGMA_8/sigmac(8.0);

    }


  m=log(m);
  splint(x,y,y2,n,m,&a);
  a = exp(a);

  // if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
  if(USE_ERRORS)
    a *= M2N.bias_amp;

  // if no scale-dependence required, return the large-scale value
  if(r<0) return a;


  /* 
   * SCALE DEPENDENT BIAS!!!
   *----------------------------------------------------------------------
   */

  /* FOR M/N analysis, use the Tinker etal 2005 result:
   */  
  xi = xi_interp(r);  
  if(M2N.scalebias_selfcal)
    {
      b = pow(1.0+xi*M2N.bias_amp1,M2N.bias_pow1)*pow(1.0+xi*0.69,-1.045);
    }
  else
    {
	  //Attempting to correct scale-dep bias issue
	  //SCALE-DEP BIAS CORRECTION
	  rvir = pow(3*exp(m)/(4*PI*200*RHO_CRIT*OMEGA_M),THIRD);
//      if(r<2.4*rvir)r=2.4*rvir;
      if(r<2.8*rvir)r=2.8*rvir;
	  	
      //rvir = pow(3*exp(m)/(4*PI*DELTA_HALO*RHO_CRIT*OMEGA_M),THIRD);
      //if(r<2*rvir)r=2*rvir;
	  
      xi = xi_interp(r);
      //parameters for scale-dep bias
      //original values
      //c1 = 1.17;
      //c2 = 0.69;
      //d1 = 1.49;
      //d2 = -2.09;
      c1 = HBIAS_C1;
      c2 = HBIAS_C2;
      d1 = HBIAS_D1;
      d2 = (-1.)*HBIAS_D2;
      /**
      c1 = 1.52;
      c2 = 0.84;
      d1 = 1.49;
      d2 = -2.09;
      **/
      b = pow(1.0+xi*c1,d1/2.)*pow(1.0+xi*c2,d2/2.0);
      
      // if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
      if(USE_ERRORS) {
	b = (b-1)*M2N.scalebias_amp + 1;
	if(b<0)b=0;
      }
    }
  return b*a;
  

  /* first, calculate the standard Zheng-like, mass-independent scale bias
   * based on the non-linear correlation function
   * (re-calibrated using the SO catalogs)
   */
  xi = xi_interp(r);  
  b = pow(1.0+xi*0.92,2.08)*pow(1.0+xi*0.74,-2.37);
  if(b<0.6)b=0.6;

  /* Now the mass-dependent term.
   * Where the mass-dependence comes in the form of the large-scale bias itself
   */
  sig = sigmac_radius_interp(r);
  //if(a<0)
  //b *= pow(1 + 0.028*pow(a,1.53)*pow(sig,1.57),2.59)/
  //pow(1 + 0.253*pow(a,1.24)*pow(sig,1.71),0.397);

  // if we're using systematic errors in an MCMC, adjust parameter a1 (amplitude)
  if(USE_ERRORS) {
    b = (b-1)*M2N.scalebias_amp + 1;
    if(b<0)b=0;
  }
  return b*a;

}
Example #30
0
void ONNXImporter::populateNet(Net dstNet)
{
    CV_Assert(model_proto.has_graph());
    opencv_onnx::GraphProto graph_proto = model_proto.graph();
    std::map<std::string, Mat> constBlobs = getGraphTensors(graph_proto);
    // List of internal blobs shapes.
    std::map<std::string, MatShape> outShapes;
    // Add all the inputs shapes. It includes as constant blobs as network's inputs shapes.
    for (int i = 0; i < graph_proto.input_size(); ++i)
    {
        opencv_onnx::ValueInfoProto valueInfoProto = graph_proto.input(i);
        CV_Assert(valueInfoProto.has_type());
        opencv_onnx::TypeProto typeProto = valueInfoProto.type();
        CV_Assert(typeProto.has_tensor_type());
        opencv_onnx::TypeProto::Tensor tensor = typeProto.tensor_type();
        CV_Assert(tensor.has_shape());
        opencv_onnx::TensorShapeProto tensorShape = tensor.shape();

        MatShape inpShape(tensorShape.dim_size());
        for (int j = 0; j < inpShape.size(); ++j)
        {
            inpShape[j] = tensorShape.dim(j).dim_value();
        }
        outShapes[valueInfoProto.name()] = inpShape;
    }

    std::string framework_name;
    if (model_proto.has_producer_name()) {
        framework_name = model_proto.producer_name();
    }

    // create map with network inputs (without const blobs)
    std::map<std::string, LayerInfo> layer_id;
    std::map<std::string, LayerInfo>::iterator layerId;
    std::map<std::string, MatShape>::iterator shapeIt;
    // fill map: push layer name, layer id and output id
    std::vector<String> netInputs;
    for (int j = 0; j < graph_proto.input_size(); j++)
    {
        const std::string& name = graph_proto.input(j).name();
        if (constBlobs.find(name) == constBlobs.end()) {
            netInputs.push_back(name);
            layer_id.insert(std::make_pair(name, LayerInfo(0, netInputs.size() - 1)));
        }
    }
    dstNet.setInputsNames(netInputs);

    int layersSize = graph_proto.node_size();
    LayerParams layerParams;
    opencv_onnx::NodeProto node_proto;

    for(int li = 0; li < layersSize; li++)
    {
        node_proto = graph_proto.node(li);
        layerParams = getLayerParams(node_proto);
        CV_Assert(node_proto.output_size() >= 1);
        layerParams.name = node_proto.output(0);

        std::string layer_type = node_proto.op_type();
        layerParams.type = layer_type;


        if (layer_type == "MaxPool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "MAX");
            layerParams.set("ceil_mode", isCeilMode(layerParams));
        }
        else if (layer_type == "AveragePool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "AVE");
            layerParams.set("ceil_mode", isCeilMode(layerParams));
            layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
        }
        else if (layer_type == "GlobalAveragePool")
        {
            layerParams.type = "Pooling";
            layerParams.set("pool", "AVE");
            layerParams.set("global_pooling", true);
        }
        else if (layer_type == "Add" || layer_type == "Sum")
        {
            if (layer_id.find(node_proto.input(1)) == layer_id.end())
            {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
                    layerParams.type = "Power";
                    layerParams.set("shift", blob.at<float>(0));
                }
                else {
                    layerParams.type = "Scale";
                    layerParams.set("bias_term", true);
                    layerParams.blobs.push_back(blob);
                }
            }
            else {
                layerParams.type = "Eltwise";
            }
        }
        else if (layer_type == "Sub")
        {
            Mat blob = getBlob(node_proto, constBlobs, 1);
            if (blob.total() == 1) {
                layerParams.type = "Power";
                layerParams.set("shift", -blob.at<float>(0));
            }
            else {
                layerParams.type = "Scale";
                layerParams.set("has_bias", true);
                layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
            }
        }
        else if (layer_type == "Div")
        {
            Mat blob = getBlob(node_proto, constBlobs, 1);
            CV_Assert_N(blob.type() == CV_32F, blob.total());
            if (blob.total() == 1)
            {
                layerParams.set("scale", 1.0f / blob.at<float>(0));
                layerParams.type = "Power";
            }
            else
            {
                layerParams.type = "Scale";
                divide(1.0, blob, blob);
                layerParams.blobs.push_back(blob);
                layerParams.set("bias_term", false);
            }
        }
        else if (layer_type == "Constant")
        {
            CV_Assert(node_proto.input_size() == 0);
            CV_Assert(layerParams.blobs.size() == 1);
            constBlobs.insert(std::make_pair(layerParams.name, layerParams.blobs[0]));
            continue;
        }
        else if (layer_type == "ImageScaler")
        {
            const float scale = layerParams.has("scale") ? layerParams.get<float>("scale") : 1.0f;
            layerParams.erase("scale");

            if (layerParams.has("bias"))
            {
                layerParams.type = "Scale";
                layerParams.blobs.push_back(
                    Mat(Size(1,  layerParams.get("bias").size()), CV_32FC1, scale));

                layerParams.set("bias_term", true);
                Mat bias(1, layerParams.get("bias").size(), CV_32FC1);
                for (int j = 0; j < bias.total(); j++) {
                    bias.at<float>(0, j) = layerParams.get("bias").getRealValue(j);
                }
                layerParams.blobs.push_back(bias);
                layerParams.erase("bias");
            }
            else {
                layerParams.set("scale", scale);
                layerParams.type = "Power";
            }
        }
        else if (layer_type == "LeakyRelu")
        {
            layerParams.type = "ReLU";
            replaceLayerParam(layerParams, "alpha", "negative_slope");
        }
        else if (layer_type == "LRN")
        {
            replaceLayerParam(layerParams, "size", "local_size");
        }
        else if (layer_type == "BatchNormalization")
        {
            if (node_proto.input_size() != 5)
                CV_Error(Error::StsNotImplemented,
                         "Expected input, scale, bias, mean and var");

            layerParams.type = "BatchNorm";
            replaceLayerParam(layerParams, "epsilon", "eps");
            replaceLayerParam(layerParams, "spatial", "use_global_stats");

            Mat meanData = getBlob(node_proto, constBlobs, 3);
            Mat stdData =  getBlob(node_proto, constBlobs, 4);

            layerParams.blobs.push_back(meanData);
            layerParams.blobs.push_back(stdData);

            if (!node_proto.input(1).empty()) {
                layerParams.set("has_weight", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 1));  // weightData
            } else {
                layerParams.set("has_weight", false);
            }

            if (!node_proto.input(2).empty()) {
                layerParams.set("has_bias", true);
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, 2)); // biasData
            } else {
                layerParams.set("has_bias", false);
            }
        }
        else if (layer_type == "Gemm")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "InnerProduct";
            Mat weights = getBlob(node_proto, constBlobs, 1);
            int ind_num_out = 0;
            if (layerParams.has("transB") && !layerParams.get<int>("transB")) {
                transpose(weights, weights);
                ind_num_out = 1;
            }
            layerParams.blobs.push_back(weights);

            if (node_proto.input_size() == 3) {
                Mat bias = getBlob(node_proto, constBlobs, 2);
                layerParams.blobs.push_back(bias);
            }

            layerParams.set("num_output", layerParams.blobs[0].size[ind_num_out]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "MatMul")
        {
            CV_Assert(node_proto.input_size() == 2);
            layerParams.type = "InnerProduct";
            Mat blob = getBlob(node_proto, constBlobs, 1);
            layerParams.blobs.push_back(blob.t());
            layerParams.set("bias_term", false);
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
        }
        else if (layer_type == "Mul")
        {
            CV_Assert(node_proto.input_size() == 2);
            if (layer_id.find(node_proto.input(1)) == layer_id.end()) {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                blob = blob.reshape(1, 1);
                if (blob.total() == 1) {
                    layerParams.set("scale", blob.at<float>(0));
                    layerParams.type = "Power";
                }
                else {
                    layerParams.blobs.push_back(blob);
                    layerParams.type = "Scale";
                }
            }
            else {
                layerParams.type = "Eltwise";
                layerParams.set("operation", "prod");
            }
        }
        else if (layer_type == "Conv")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Convolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
            layerParams.set("num_output", layerParams.blobs[0].size[0]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "ConvTranspose")
        {
            CV_Assert(node_proto.input_size() >= 2);
            layerParams.type = "Deconvolution";
            for (int j = 1; j < node_proto.input_size(); j++) {
                layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
            layerParams.set("num_output", layerParams.blobs[0].size[1]);
            layerParams.set("bias_term", node_proto.input_size() == 3);
        }
        else if (layer_type == "Transpose")
        {
            layerParams.type = "Permute";
            replaceLayerParam(layerParams, "perm", "order");
        }
        else if (layer_type == "Unsqueeze")
        {
            CV_Assert(node_proto.input_size() == 1);
            Mat input = getBlob(node_proto, constBlobs, 0);

            DictValue axes = layerParams.get("axes");
            std::vector<int> dims;
            for (int j = 0; j < input.dims; j++) {
                dims.push_back(input.size[j]);
            }
            CV_Assert(axes.getIntValue(axes.size()-1) <= dims.size());
            for (int j = 0; j < axes.size(); j++) {
                dims.insert(dims.begin() + axes.getIntValue(j), 1);
            }

            Mat out = input.reshape(0, dims);
            constBlobs.insert(std::make_pair(layerParams.name, out));
            continue;
        }
        else if (layer_type == "Reshape")
        {
            CV_Assert(node_proto.input_size() == 2 || layerParams.has("shape"));

            if (node_proto.input_size() == 2) {
                Mat blob = getBlob(node_proto, constBlobs, 1);
                CV_Assert(blob.type() == CV_32SC1);

                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
                    Mat input = getBlob(node_proto, constBlobs, 0);
                    Mat out = input.reshape(0, static_cast<std::vector<int> >(blob));
                    constBlobs.insert(std::make_pair(layerParams.name, out));
                    continue;
                }
                layerParams.set("dim", DictValue::arrayInt<int*>(
                            blob.ptr<int>(), blob.total() ));
            }
            else {
                DictValue shape = layerParams.get("shape");
                std::vector<int> dim;
                for (int j = 0; j < shape.size(); j++) {
                    dim.push_back(shape.getIntValue(j));
                }

                if (layer_id.find(node_proto.input(0)) == layer_id.end()) {
                    Mat input = getBlob(node_proto, constBlobs, 0);
                    Mat out = input.reshape(0, dim);
                    constBlobs.insert(std::make_pair(layerParams.name, out));
                    continue;
                }
                replaceLayerParam(layerParams, "shape", "dim");
            }
        }
        else if (layer_type == "Pad")
        {
            layerParams.type = "Padding";
        }
        else if (layer_type == "Shape")
        {
            CV_Assert(node_proto.input_size() == 1);
            shapeIt = outShapes.find(node_proto.input(0));
            CV_Assert(shapeIt != outShapes.end());
            MatShape inpShape = shapeIt->second;

            Mat shapeMat(inpShape.size(), 1, CV_32S);
            for (int j = 0; j < inpShape.size(); ++j)
                shapeMat.at<int>(j) = inpShape[j];
            shapeMat.dims = 1;

            constBlobs.insert(std::make_pair(layerParams.name, shapeMat));
            continue;
        }
        else if (layer_type == "Gather")
        {
            CV_Assert(node_proto.input_size() == 2);
            CV_Assert(layerParams.has("axis"));
            Mat input = getBlob(node_proto, constBlobs, 0);
            Mat indexMat = getBlob(node_proto, constBlobs, 1);
            CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1);
            int index = indexMat.at<int>(0);
            int axis = layerParams.get<int>("axis");

            std::vector<cv::Range> ranges(input.dims, Range::all());
            ranges[axis] = Range(index, index + 1);

            Mat out = input(ranges);
            constBlobs.insert(std::make_pair(layerParams.name, out));
            continue;
        }
        else if (layer_type == "Concat")
        {
            bool hasVariableInps = false;
            for (int i = 0; i < node_proto.input_size(); ++i)
            {
                if (layer_id.find(node_proto.input(i)) != layer_id.end())
                {
                    hasVariableInps = true;
                    break;
                }
            }

            if (!hasVariableInps)
            {
                std::vector<Mat> inputs(node_proto.input_size()), concatenated;
                for (size_t i = 0; i < inputs.size(); ++i)
                {
                    inputs[i] = getBlob(node_proto, constBlobs, i);
                }
                Ptr<Layer> concat = ConcatLayer::create(layerParams);
                runLayer(concat, inputs, concatenated);

                CV_Assert(concatenated.size() == 1);
                constBlobs.insert(std::make_pair(layerParams.name, concatenated[0]));
                continue;
            }
        }
        else
        {
            for (int j = 0; j < node_proto.input_size(); j++) {
                if (layer_id.find(node_proto.input(j)) == layer_id.end())
                    layerParams.blobs.push_back(getBlob(node_proto, constBlobs, j));
            }
         }

         int id = dstNet.addLayer(layerParams.name, layerParams.type, layerParams);
         layer_id.insert(std::make_pair(layerParams.name, LayerInfo(id, 0)));


         std::vector<MatShape> layerInpShapes, layerOutShapes, layerInternalShapes;
         for (int j = 0; j < node_proto.input_size(); j++) {
             layerId = layer_id.find(node_proto.input(j));
             if (layerId != layer_id.end()) {
                 dstNet.connect(layerId->second.layerId, layerId->second.outputId, id, j);
                 // Collect input shapes.
                 shapeIt = outShapes.find(node_proto.input(j));
                 CV_Assert(shapeIt != outShapes.end());
                 layerInpShapes.push_back(shapeIt->second);
             }
         }

         // Compute shape of output blob for this layer.
         Ptr<Layer> layer = dstNet.getLayer(id);
         layer->getMemoryShapes(layerInpShapes, 0, layerOutShapes, layerInternalShapes);
         CV_Assert(!layerOutShapes.empty());
         outShapes[layerParams.name] = layerOutShapes[0];
     }
 }