Exemplo n.º 1
0
/*function to discriminate null and alternative models
based on less-stringent hypothesis*/
void calc_lessstring_fn(int *nsamp1, int *totmods1, int *models_num, int *hyp)
{
	/*'nsamp1' is the number of samples
	'totmods1' is the total number of rows in 'models_num'
	'models_num' is a matrix (in vector form) containing all unique models
	'hyp' is binary vector of length 'totmods1' recording whether
		a model is a null (0) or alternative (1) model*/
	
	int i,j,nsamp,totmods,inimod,iniind;
	nsamp=*nsamp1;
	totmods=*totmods1;
	
	//calculate PPAs
	for(i=0;i<totmods;i++)
	{
		inimod=models_num[index2(i,0,totmods)];
		iniind=models_num[index2(i,nsamp,totmods)];
		hyp[i]=0;
		j=1;
		while(j<nsamp&&hyp[i]==0)
		{
			if(models_num[index2(i,j,totmods)]>=5) if(models_num[index2(i,j,totmods)]!=inimod||models_num[index2(i,j+nsamp,totmods)]!=iniind) hyp[i]=1;
			j++;
		}
	}
	return;
}
Exemplo n.º 2
0
TypeSpec
ASTindex::typecheck (TypeSpec expected)
{
    typecheck_children ();
    const char *indextype = "";
    TypeSpec t = lvalue()->typespec();
    if (t.is_structure()) {
        error ("Cannot use [] indexing on a struct");
        return TypeSpec();
    }
    if (t.is_closure()) {
        error ("Cannot use [] indexing on a closure");
        return TypeSpec();
    }
    if (index3()) {
        if (! t.is_array() && ! t.elementtype().is_matrix())
            error ("[][][] only valid for a matrix array");
        m_typespec = TypeDesc::FLOAT;
    } else if (t.is_array()) {
        indextype = "array";
        m_typespec = t.elementtype();
        if (index2()) {
            if (t.aggregate() == TypeDesc::SCALAR)
                error ("can't use [][] on a simple array");
            m_typespec = TypeDesc::FLOAT;
        }
    } else if (t.aggregate() == TypeDesc::VEC3) {
        indextype = "component";
        TypeDesc tnew = t.simpletype();
        tnew.aggregate = TypeDesc::SCALAR;
        tnew.vecsemantics = TypeDesc::NOXFORM;
        m_typespec = tnew;
        if (index2())
            error ("can't use [][] on a %s", type_c_str(t));
    } else if (t.aggregate() == TypeDesc::MATRIX44) {
        indextype = "component";
        TypeDesc tnew = t.simpletype();
        tnew.aggregate = TypeDesc::SCALAR;
        tnew.vecsemantics = TypeDesc::NOXFORM;
        m_typespec = tnew;
        if (! index2())
            error ("must use [][] on a matrix, not just []");
    } else {
        error ("can only use [] indexing for arrays or multi-component types");
        return TypeSpec();
    }

    // Make sure the indices (children 1+) are integers
    for (size_t c = 1;  c < nchildren();  ++c)
        if (! child(c)->typespec().is_int())
            error ("%s index must be an integer, not a %s", 
                   indextype, type_c_str(index()->typespec()));

    // If the thing we're indexing is an lvalue, so is the indexed element
    m_is_lvalue = lvalue()->is_lvalue();

    return m_typespec;
}
Exemplo n.º 3
0
// make C8 element stiffness matrix
void CHak3DCont_8::makeKe()
{
	int i,j,k,inc; //,err;
	double sum, Jdet;
	Coord3D gp;
	Coord3D np[8];

	if(Ke){delete [] Ke;}
	Ke = new double [300]();

	// read in node coordinates
	getNcrd(np);

	// get material property matrix
    double *Em = Mat->getD();

	double B[144];	// Strain displacement matrix (6 x 24)
	double Bt[144];	// Auxillary matrix

	// use 2 point Gauss rule (8 points total)
	for(i=0;i<8;i++)
	{
		gp.x = gauss2 * c8_pos[i].x;
		gp.y = gauss2 * c8_pos[i].y;
		gp.z = gauss2 * c8_pos[i].z;

		// compute isoparametric B matrix (6 x 24)
		Jdet = C8_isoBMat(gp, np, B);

        if(Jdet < 1.0E-12 || Jdet > 1.0E+15)
        {
            std::cout << "\nERROR in makeKe for C8 element, Jdet = " << Jdet;
        }

		// first multiply B^T * D
		cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, 24, 6, 6, 1.0, B, 24, Em, 6, 0.0, Bt, 6);

		// complete multiplication (for upper triangle only)
		for(j=0;j<24;j++) // rows
		{
			for(k=j;k<24;k++) // columns (upper triangle)
			{
				sum = 0.0;
				for(inc=0;inc<6;inc++)
				{
					sum += Bt[index2(j,inc,6)]*B[index2(inc,k,24)];
				}
				Ke[tri_ind(24,j,k)] += sum*Jdet; //*weight;
			}
		}
	}
}
Exemplo n.º 4
0
//recursive function to be called from 'genfullmodels'
void genfullmodels_recur(int nhier, int currhier1, int *temp, int *tempind, int q, int nrow, int ncol, int *structure, int *r, int *totmods, int incmods, int **models_num)
{
	/*'nhier' is number of hierarchies
	'currhier1' is current hierarchy
	'temp' and 'tempind' are vectors recording structural information
	'q' is specific row of 'structure' we wish to extract
	'nrow' and 'ncol' are using for indexing
	'structure' is a matrix containing intermediate model structures
	'r' denotes current model in 'models_num'
	'totmods' is maximum number of models in 'models_num'
	'incmods' is arbitrary number of models to increase if reallocation necessary
	'models_num' is a matrix recording final models*/
	
	//declare necessary variables
	int j,k,currhier,nsamp,nmod=10;
	nsamp=ncol;
	currhier=currhier1+1;
	//continue working through hierarchies in order and expand model set
	if(temp[currhier]==1)
	{
		for(j=0;j<nmod;j++)
		{
			(*models_num)[index2(*r,tempind[currhier],*totmods)]=j;
			if(currhier<(nhier-1)) genfullmodels_recur(nhier,currhier,temp,tempind,q,nrow,ncol,structure,r,totmods,incmods,models_num);
			else
			{
				*r=*r+1;
				//perform memory reallocation if required
				if(*r>=*totmods) realloc_maxmod_full(nsamp,totmods,incmods,models_num);
				//initialise new model structure
				for(k=0;k<(2*nsamp);k++) (*models_num)[index2(*r,k,*totmods)]=(*models_num)[index2(*r-1,k,*totmods)];
			}
		}
	}
	else
	{
		for(j=0;j<nmod;j++)
		{
			//now fill in gaps and pass to recursive function if necessary
			for(k=0;k<nsamp;k++)
			{
				if(structure[index2(q,k,nrow)]==tempind[currhier])
				{
					(*models_num)[index2(*r,k,*totmods)]=j;
					(*models_num)[index2(*r,k+nsamp,*totmods)]=tempind[currhier];
				}
			}
			if(currhier<(nhier-1)) genfullmodels_recur(nhier,currhier,temp,tempind,q,nrow,ncol,structure,r,totmods,incmods,models_num);
			else
			{
				*r=*r+1;
				//perform memory reallocation if required
				if(*r>=*totmods) realloc_maxmod_full(nsamp,totmods,incmods,models_num);
				//initialise new model structure
				for(k=0;k<(2*nsamp);k++) (*models_num)[index2(*r,k,*totmods)]=(*models_num)[index2(*r-1,k,*totmods)];
			}
		}
	}
	return;
}
Exemplo n.º 5
0
/***********************************************************************//**
 * @brief Returns model energy flux between [emin, emax] (units: erg/cm2/s)
 *
 * @param[in] emin Minimum photon energy.
 * @param[in] emax Maximum photon energy.
 * @return Energy flux (erg/cm2/s).
 *
 * Computes
 *
 * \f[
 *    \int_{\tt emin}^{\tt emax} S_{\rm E}(E | t) E \, dE
 * \f]
 *
 * where
 * - [@p emin, @p emax] is an energy interval, and
 * - \f$S_{\rm E}(E | t)\f$ is the spectral model (ph/cm2/s/MeV).
 * The integration is done numerically.
 ***************************************************************************/
double GModelSpectralSmoothBrokenPlaw::eflux(const GEnergy& emin,
                                             const GEnergy& emax) const
{
    // Initialise flux
    double eflux = 0.0;
    
    // Compute only if integration range is valid
    if (emin < emax) {
        
        // Initialise function to integrate
        eflux_kern kernel(prefactor(), index1(), pivot(),
                          index2(), breakenergy(), beta());
        
        // Initialise integral class with function
        GIntegral integral(&kernel);
        
        // Set integration precision
        integral.eps(1.0e-8);
        
        // Calculate integral between emin and emax
        eflux = integral.romberg(emin.MeV(), emax.MeV());
        
        // Convert from MeV/cm2/s to erg/cm2/s
        eflux *= gammalib::MeV2erg;
        
    } // endif: integration range was valid
    
    // Return flux
    return eflux;
}
//recursive function to be called from 'genfullmodels_priors'
void genfullmodels_recur_priors(int nhier, int currhier1, int *temp, int *tempind, int q, int nrow, int ncol, int *structure, int *r, int *models_num, int *nalt_ls, int *nalt_s)
{
	/*'nhier' is number of hierarchies
	'currhier1' is current hierarchy
	'temp' and 'tempind' are vectors recording structural information
	'q' is specific row of 'structure' we wish to extract
	'nrow' and 'ncol' are using for indexing
	'structure' is a matrix containing intermediate model structures
	'r' denotes current model
	'models_num' is a vector recording model structure
	'nalt_ls' records the number of alternative models 
		according to "less stringent" criteria
	'nalt_s' records the number of alternative models 
		according to "stringent" criteria*/
		
	//declare necessary variables
	int j,k,currhier,nsamp,nmod=10;
	nsamp=ncol;
	currhier=currhier1+1;
	//continue working through hierarchies in order and expand model set
	if(temp[currhier]==1)
	{
		for(j=0;j<nmod;j++)
		{
			models_num[tempind[currhier]]=j;
			if(currhier<(nhier-1)) genfullmodels_recur_priors(nhier,currhier,temp,tempind,q,nrow,ncol,structure,r,models_num,nalt_ls,nalt_s);
			else
			{
				*r=*r+1;
				//now calculate prior specification based on model
				*nalt_ls=*nalt_ls+calc_lessstring_single(nsamp,models_num);
				*nalt_s=*nalt_s+calc_string_single(nsamp,models_num);
			}
		}
	}
	else
	{
		for(j=0;j<nmod;j++)
		{
			//now fill in gaps and pass to recursive function if necessary
			for(k=0;k<nsamp;k++)
			{
				if(structure[index2(q,k,nrow)]==tempind[currhier])
				{
					models_num[k]=j;
					models_num[k+nsamp]=tempind[currhier];
				}
			}
			if(currhier<(nhier-1)) genfullmodels_recur_priors(nhier,currhier,temp,tempind,q,nrow,ncol,structure,r,models_num,nalt_ls,nalt_s);
			else
			{
				*r=*r+1;
				//now calculate prior specification based on model
				*nalt_ls=*nalt_ls+calc_lessstring_single(nsamp,models_num);
				*nalt_s=*nalt_s+calc_string_single(nsamp,models_num);
			}
		}
	}
	return;
}
Exemplo n.º 7
0
/*
 * getnum --
 *   Convert one term to a line number.
 */
int getnum(const char line[], int *pi, int *pnum, int *status)
{
     int i, s;
     const char *digits = "0123456789";

     s = OK;
     if (index2(digits, line[i]) >= 0) {
          *pnum = ctoi(line, &i);
          i--;                  /* move back; to be advanced at the end */
     } else if (line[i] == CURLINE) {
          *pnum = curln;
     } else if (line[i] == LASTLINE) {
          *pnum = lastln;
     } else if (line[i] == SCAN || line[i] == BACKSCAN) {
          if (optpat(line, &i) == ERR) { /* build the pattern */
               s = ERR;
          } else if (line[i] == SCAN) {
               s = ptscan(FORWARD, pnum);
          } else {
               s = ptscan(BACKWARD, pnum);
          }
     } else {
          s = EOF;
     }
     if (s == OK)
          i++;
     *pi = i;
     *status = s;
     return s;
}
Exemplo n.º 8
0
void MainWindow::on_RegisterButton_clicked()
{
    int bytes_available;
    int i;
    for(i = 0; i < FINGERS && positions[i] == false; i++){
        if((i == (FINGERS - 1)) && positions[i] == false){
            QMessageBox::warning(this, tr("Bio-guised"),
                                           tr("Please register at least 1 fingerprint"),
                                           QMessageBox::Ok);
            return;
        }
    }

    MainWindow::hide();
    QSerialPort serial;
    serial.setPortName("COM6");
    serial.open(QIODevice::ReadWrite);
    serial.setBaudRate(QSerialPort::Baud9600);
    serial.setDataBits(QSerialPort::Data8);
    serial.setParity(QSerialPort::NoParity);
    serial.setStopBits(QSerialPort::OneStop);
    serial.setFlowControl(QSerialPort::NoFlowControl);
    qDebug() << "connected";

    while(!serial.isWritable());
    QByteArray index("v");
    serial.write(index);
    do{
        serial.waitForReadyRead(1000);
        bytes_available = serial.bytesAvailable();
    }while(bytes_available <= 0);

    QByteArray byte_array = serial.read(bytes_available);
    qDebug() << byte_array;

    char *rawData = byte_array.data();
    int data = (int)*rawData;

    delay(2000);
    while(!serial.isWritable());
    QByteArray index2("k");
    serial.write(index2);
    serial.waitForReadyRead(2000);
    bytes_available = serial.bytesAvailable();


    QByteArray byte_array2 = serial.read(bytes_available);
    qDebug() << byte_array2;
    std::string key = byte_array2.toStdString();
    biomodel->encryptKey(key);


    if(data > 0 )
        bioGuised->show();

}
Exemplo n.º 9
0
//internal function to check for duplicated combinations
void check_duplicates(int start, int ncomb, int ncol, int *comb, int *ind)
{
	/*'start' corresponds to which element of 'comb' is the current
		comparison combination
	'ncomb' is the length of 'comb' (e.g. the number of rows)
	'ncol' is the number of columns of 'comb'
	'comb' is a matrix where each row corresponds to a unique
		combination of length 'ncol'
	'ind' is binary vector of length 'ncomb' recording unique combinations*/
		
	int i,j,k;
	
	for(i=(start+1);i<ncomb;i++)
	{
		ind[i]=1;
		for(j=0;j<ncol;j++) for(k=0;k<ncol;k++) if(comb[index2(i,j,ncomb)]==comb[index2(start,k,ncomb)]) ind[i]=0;
	}
	return;
}
Exemplo n.º 10
0
/*** Parse a slice/select assign statement ***/
Node* Parser::parseSliceSelectAssign()
{
	if(!hasTokens() || peekToken().getType() != T_IDENTIFIER)
		throw ParserSyntaxException(getToken(), "Expected variable identifier!");

	// Store the variable
	std::auto_ptr<Variable> target(new Variable(getToken()));

	if(!hasTokens() || peekToken().getType() != T_LBRACKET)
		throw ParserSyntaxException(getToken(), "Expected '['!");
	Token tok = getToken();

	std::auto_ptr<Object> index1(parseExpression());

	if(hasTokens() && peekToken().getType() == T_RBRACKET)
	{
		getToken();

		if(!hasTokens() || peekToken().getType() != T_ASSIGN)
			throw ParserSyntaxException(getToken(), "Expected ':='!");
		getToken();

		std::auto_ptr<Object> expr(parseExpression());

		SelectAssign* result = new SelectAssign(target.release(), index1.release(), expr.release());
		result->setLineNumber(tok.getLineNumber());
		result->setColumnNumber(tok.getColumnNumber());
		return result;
	}

	if(!hasTokens() || peekToken().getType() != T_COLON)
		throw ParserSyntaxException(getToken(), "Expected ':' or ']'!");
	getToken();

	std::auto_ptr<Object> index2(parseExpression());

	if(!hasTokens() || peekToken().getType() != T_RBRACKET)
		throw ParserSyntaxException(getToken(), "Expected ']'!");
	getToken();

	if(!hasTokens() || peekToken().getType() != T_ASSIGN)
		throw ParserSyntaxException(getToken(), "Expected ':='!");
	getToken();

	std::auto_ptr<Object> expr(parseExpression());

	SliceAssign* result = new SliceAssign(target.release(), index1.release(), index2.release(), expr.release());
	result->setLineNumber(tok.getLineNumber());
	result->setColumnNumber(tok.getColumnNumber());
	return result;
}
Exemplo n.º 11
0
/*
 * Performs the calculations for a single particle for grid_mpole
 * Equivalent to the calculations in the grid_mpole method in pmestuff.f
 */
void mpole_math(double* fmp, int* igrid, double* qgrid, double* thetai1, double* thetai2, double* thetai3, int bsorder, int nfft1, int nfft2, int nfft3, int npole, int n, int m){
    int i, j, k;
    int i0, j0, k0;
    int it1, it2, it3;
    int igrd0, jgrd0, kgrd0;
    double v0, u0, t0;
    double v1, u1, t1;
    double v2, u2, t2;
    double term0, term1, term2;
    
    igrd0 = igrid[index2(0, m, 3, maxatm)];
    jgrd0 = igrid[index2(1, m, 3, maxatm)];
    kgrd0 = igrid[index2(2, m, 3, maxatm)];
    k0 = kgrd0;
    
    for (it3 = 0; it3 < bsorder; ++it3){
        k0 = k0 + 1;
        k = k0 + 1 + (nfft3-intsign(nfft3,k0))/2;
        v0 = thetai3[index3(0,it3,m, 4,bsorder,n)];
        v1 = thetai3[index3(1,it3,m, 4,bsorder,n)];
        v2 = thetai3[index3(2,it3,m, 4,bsorder,n)];
        j0 = jgrd0;
        
        for (it2 = 0; it2 < bsorder; ++it2){
            j0 = j0 + 1;
            j = j0 + 1 + (nfft2-intsign(nfft2,j0))/2;
            u0 = thetai2[index3(0,it2,m, 4,bsorder,n)];
            u1 = thetai2[index3(1,it2,m, 4,bsorder,n)];
            u2 = thetai2[index3(2,it2,m, 4,bsorder,n)];
            term0 = fmp[index2(0,m, 10, maxatm)]*u0*v0 + fmp[index2(2,m, 10, maxatm)]*u1*v0 + fmp[index2(3,m, 10, maxatm)]*u0*v1 + fmp[index2(5,m, 10, maxatm)]*u2*v0 + fmp[index2(6,m, 10, maxatm)]*u0*v2 + fmp[index2(9,m, 10, maxatm)]*u1*v1;
            term1 = fmp[index2(1,m, 10, maxatm)]*u0*v0 + fmp[index2(7,m, 10, maxatm)]*u1*v0 + fmp[index2(8,m, 10, maxatm)]*u0*v1;
            term2 = fmp[index2(4,m, 10, maxatm)] * u0 * v0;
            i0 = igrd0;
            
            for (it1 = 0; it1 < bsorder; ++it1){
                i0 = i0 + 1;
                i = i0 + 1 + (nfft1-intsign(nfft1,i0))/2;
                
                t0 = thetai1[index3(0,it1,m, 4,bsorder,n)];
                t1 = thetai1[index3(1,it1,m, 4,bsorder,n)];
                t2 = thetai1[index3(2,it1,m, 4,bsorder,n)];
                
                qgrid[index4(0,i-1,j-1,k-1,2, nfft3,nfft2,nfft1)] = qgrid[index4(0,i-1,j-1,k-1,2, nfft3,nfft2,nfft1)] + term0*t0 + term1*t1 + term2*t2;
            }
        }
    }
    
}
Exemplo n.º 12
0
/***********************************************************************//**
 * @brief Update Monte Carlo pre computation cache
 *
 * Updates the precomputation cache for Monte Carlo simulations.
 ***************************************************************************/
void GModelSpectralSmoothBrokenPlaw::update_mc_cache(void) const
{
    // Check if we need to update the cache
    if (prefactor()         != m_mc_prefactor ||
        index1()            != m_mc_index1    ||
        index2()            != m_mc_index2    ||
        pivot().MeV()       != m_mc_pivot     ||
        breakenergy().MeV() != m_mc_breakenergy) {

        // Set parameters for which Monte Carlo cache was computed
        m_mc_prefactor = prefactor();
        m_mc_index1      = index1();
        m_mc_index2      = index2();
        m_mc_pivot       = pivot().MeV();
        m_mc_breakenergy = breakenergy().MeV();

        // Compute prefactor at pivot energy
        double pre = prefactor() *
                     std::pow(breakenergy().MeV()/pivot().MeV(), index1());

        // Find out which index is harder. This is important since the smoothly
        // broken power law follows the hard index below the break energy and
        // the softer index above the break energy.
        double index1 = (m_mc_index1 > m_mc_index2) ? m_mc_index1 : m_mc_index2;
        double index2 = (m_mc_index1 > m_mc_index2) ? m_mc_index2 : m_mc_index1;

        // Set broken power law for Monte Carlo simulations
        m_mc_brokenplaw = GModelSpectralBrokenPlaw(pre,
                                                   index1,
                                                   breakenergy(),
                                                   index2);
        
    } // endif: Update was required
    
    // Return
    return;
}
Exemplo n.º 13
0
index2 m::argMin(matrixn const& a)
{
	index2 argMinV;
	m_real minV=DBL_MAX;
	for(int i=0; i<a.rows(); i++)
	{
		for(int j=0; j<a.cols(); j++)
		{
			if(a(i,j)<minV)
			{
				minV=a(i,j);
				argMinV=index2(i,j);
			}
		}
	}
	return argMinV;
}
Exemplo n.º 14
0
/*
 * ctoi --
 *   Convert string at line[i] to integer.
 */
int ctoi(const char line[], int *pi)
{
     const char *digits = "0123456789";
     int i, d, n;

     i = *pi;
     while (isblank(line[i]))
          i++;
     for (n = 0; line[i] != '\0'; i++) {
          d = index2(digits, line[i]);
          if (d < 0)            /* non-digit */
               break;
          n = n*10 + d;
     }
     *pi = i;
     return n;
}
  void EpetraOpMultiVec::SetBlock( const MultiVec<double>& A, const std::vector<int>& index ) 
  {
    // this should be revisited to e
    EpetraOpMultiVec temp_vec(Epetra_OP, Epetra_DataAccess::View, *Epetra_MV, index);

    int numvecs = index.size();
    if ( A.GetNumberVecs() != numvecs ) {
      std::vector<int> index2( numvecs );
      for(int i=0; i<numvecs; i++)
        index2[i] = i;
      EpetraOpMultiVec *tmp_vec = dynamic_cast<EpetraOpMultiVec *>(&const_cast<MultiVec<double> &>(A)); 
      TEUCHOS_TEST_FOR_EXCEPTION( tmp_vec==NULL, std::invalid_argument, "Anasazi::EpetraOpMultiVec::SetBlocks() cast of MultiVec<double> to EpetraOpMultiVec failed.");
      EpetraOpMultiVec A_vec(Epetra_OP, Epetra_DataAccess::View, *(tmp_vec->GetEpetraMultiVector()), index2);
      temp_vec.MvAddMv( 1.0, A_vec, 0.0, A_vec );
    }
    else {
      temp_vec.MvAddMv( 1.0, A, 0.0, A );
    }
  }
Exemplo n.º 16
0
// compute Shell4 flutter sensitivity - complex Eigenvalue
void CHak3DShell_4::CEigSens(int numDual, int numEig, dcompLPK **eig_flut, double **eig_vec,
                             dcompLPK *eig, dcompLPK *wgt_case, double *elSens, int eNum)
{
    int i;
    double *sens = new double [numDual](); // sum element contributions from sub elems (for each dual)

    for(i=0; i<4; i++)
    {
        sub_shells[i]->CEigSens(numDual, numEig, eig_flut, eig_vec, eig, wgt_case, sens, 0);
    }

    // for each dual response
    for(i=0; i<numDual; i++)
    {
        // add to overall sensitivity
        elSens[index2(eNum,i,numDual)] += 0.5*sens[i]; // half as each sub-elem only contributes half stiffness and mass
    }

    delete [] sens;
}
Exemplo n.º 17
0
// compute Shell4 element thickness sensitivity
void CHak3DShell_4::elemSens(int numDual, int numCase, double **disp_prim, double **disp_dual,
                             double *wgt_fact, double *wgt_case, double *elSens, int eNum, double *Nf, int mode)
{
    int i;
    double *sens = new double [numDual](); // sum element contributions from sub elems (for each dual)

    for(i=0; i<4; i++)
    {
        sub_shells[i]->elemSens(numDual, numCase, disp_prim, disp_dual, wgt_fact, wgt_case, sens, 0, Nf, mode);
    }

    // for each dual response
    for(i=0; i<numDual; i++)
    {
        // add to overall sensitivity
        elSens[index2(eNum,i,numDual)] += 0.5*sens[i]; // half as each sub-elem only contributes half stiffness and mass
    }

    delete [] sens;
}
Exemplo n.º 18
0
index2 m::argMinRand(matrixn const& a, m_real thr)
{
	index2 argMinV=argMin(a);

	std::vector<index2> indexes;

	m_real minV=a(argMinV(0), argMinV(1))*thr;
	for(int i=0; i<a.rows(); i++)
	{
		for(int j=0; j<a.cols(); j++)
		{
			if(a(i,j)<minV)
			{
				indexes.push_back(index2(i,j));
			}
		}
	}

	return indexes[rand()%indexes.size()];
}
Exemplo n.º 19
0
int read_vocab(char* vocab_filename, 
	       int verbosity,
	       struct idngram_hash_table* vocabulary,
	       int M
	       )
{
  FILE *vocab_file;
  int vocab_size;
  char temp_word[MAX_WORD_LENGTH];
  char temp_word2[MAX_WORD_LENGTH];

  vocab_size = 0;
  vocab_file = rr_iopen(vocab_filename);

  pc_message(verbosity,2,"Reading vocabulary... \n");

  while (fgets (temp_word, sizeof(temp_word),vocab_file)) {
    if (strncmp(temp_word,"##",2)==0) continue;
    sscanf (temp_word, "%s ",temp_word2);

    /*    printf("hey hey %s %d\n ", temp_word2, idngram_hash(temp_word2,M));*/

    /* Check for repeated words in the vocabulary */    
    if (index2(vocabulary,temp_word2) != 0)
      warn_on_repeated_words(temp_word2);

    warn_on_wrong_vocab_comments(temp_word);
    vocab_size++;
    /*    printf("%s %d\n ", temp_word2, idngram_hash(temp_word2,M));*/

    add_to_idngram_hashtable(vocabulary,idngram_hash(temp_word2,M),temp_word2,vocab_size);
    if(vocab_size == M){
      quit(-1, "Number of entries reached the size of the hash.  Run the program again with a larger has size -hash \n");
    }
  }

  if (vocab_size > MAX_VOCAB_SIZE)    
    fprintf(stderr,"text2idngram : vocab_size %d\n is larger than %d\n",vocab_size,MAX_VOCAB_SIZE);

  return 0;
}
// -----------------------------------------------------------------------------
// PrintTopicCollectionContentL
// Prints the contentent of the Topic Collection
// Returns: None
// -----------------------------------------------------------------------------
//
void PrintTopicCollectionContentL( const CFileStore* aStore )
{
    RDebug::Print(_L("Current topic collection content: "));
    TStreamId rootStreamId = aStore->Root();
    if ( rootStreamId == 0 )
    {
        RDebug::Print(_L("Store contains no root stream."));
        return;
    }
    RStoreReadStream rootStream;
    rootStream.OpenLC( *aStore, rootStreamId ); // on CS
    TInt identityCount( rootStream.ReadUint16L() );

    for ( TInt index( 0 ); index < identityCount; index++ )
    {
        TCbsDbTopicIdentity id;
        id.iName.Zero();
        TInt length( rootStream.ReadInt16L() );
        for ( TInt index2( 0 ); index2 < length; index2++ )
        {
            TInt character( rootStream.ReadInt16L() );
            id.iName.Append( TChar(character) );
        }
        id.iName.SetLength( length );

        id.iNumber = rootStream.ReadUint16L();

        TBuf<255> buf = _L("Identity name: ");
        buf.Append( id.iName );
        buf.Append( _L(", number: ") );
        buf.AppendNum( static_cast<TInt>(id.iNumber) );
        RDebug::Print( buf );
    }

    CleanupStack::PopAndDestroy();    // root stream
}
Exemplo n.º 21
0
//function to generate all possible models based on an intermediate set of structures
void genfullmodels(int q, int nrow, int ncol, int *structure, int *r, int *totmods, int incmods, int **models_num)
{
	/*'q' is specific row of 'structure' we wish to extract
	'nrow' and 'ncol' are using for indexing
	'structure' is a matrix containing intermediate model structures
	'r' denotes current model in 'models_num'
	'totmods' is maximum number of models in 'models_num'
	'incmods' is arbitrary number of models to increase if reallocation necessary
	'models_num' is a matrix recording final models*/
	
	//declare necessary variables
	int i,j,k,nhier,nsamp,nmod=10;
	nsamp=ncol;
	int * temp = (int *) Calloc(nsamp,int);
	int * tempind = (int *) Calloc(nsamp,int);
	//count up how many hierarchies
	for(i=0;i<nsamp;i++)
	{
		temp[i]=0;
		for(j=0;j<nsamp;j++) temp[i]+=(structure[index2(q,j,nrow)]==i ? 1:0);
	}
	nhier=0;
	for(j=0;j<nsamp;j++) nhier+=(temp[j]>0 ? 1:0);
	//order hierarchies
	bubble_sort_dec_int(temp,tempind,nsamp);
	//now work through hierarchies in order and expand model set
	if(nhier==1)
	{
		for(i=0;i<nmod;i++)
		{
			for(j=0;j<nsamp;j++)
			{
				(*models_num)[index2(*r,j,*totmods)]=i;
				(*models_num)[index2(*r,j+nsamp,*totmods)]=0;
			}
			*r=*r+1;
			//perform memory reallocation if required
			if(*r>=*totmods) realloc_maxmod_full(nsamp,totmods,incmods,models_num);
		}
	}
	else
	{
		//temp[0] must be >1 in this loop
		for(j=0;j<nmod;j++)
		{
			//initialise structures
			for(k=0;k<nsamp;k++) (*models_num)[index2(*r,k+nsamp,*totmods)]=k;
			//now fill in gaps and pass to recursive function
			for(k=0;k<nsamp;k++)
			{
				if(structure[index2(q,k,nrow)]==tempind[0])
				{
					(*models_num)[index2(*r,k,*totmods)]=j;
					(*models_num)[index2(*r,k+nsamp,*totmods)]=tempind[0];
				}
			}
			genfullmodels_recur(nhier,0,temp,tempind,q,nrow,ncol,structure,r,totmods,incmods,models_num);
		}
	}
	//free memory from the heap (automatically sets pointer to NULL)
	Free(temp);Free(tempind);
	return;
}
Exemplo n.º 22
0
void main(int argc, char *argv[]) {

  int i,j;

  char *vocab_filename;
  FILE *tempfile;
  char tempfiles_directory[1000];
  int vocab_size;
  FILE *vocab_file;

  int verbosity;

  int buffer_size;
  int position_in_buffer;
  int number_of_tempfiles;
  int max_files;
  int fof_size;

  unsigned short *buffer;
  unsigned short *placeholder;
  unsigned short *temp_ngram;
  int temp_count;
  
  char temp_word[500];
  char temp_word2[500];

  char *temp_file_root;
  char *temp_file_ext;
  char *host_name;
  int proc_id;
  struct utsname uname_info;

  flag write_ascii;

  /* Vocab hash table things */

  struct hash_table vocabulary;
  unsigned long hash_size;
  unsigned long M;

  tempfile = NULL; /* Just to prevent compilation warnings. */

  report_version(&argc,argv);

  verbosity = pc_intarg(&argc,argv,"-verbosity",DEFAULT_VERBOSITY);

  /* Process command line */
  
  if (pc_flagarg( &argc, argv,"-help") || argc==1) {
    fprintf(stderr,"text2idngram - Convert a text stream to an id n-gram stream.\n");
    fprintf(stderr,"Usage : text2idngram  -vocab .vocab \n");
    fprintf(stderr,"                    [ -buffer 100 ]\n");
    fprintf(stderr,"                    [ -hash %d ]\n",DEFAULT_HASH_SIZE);
    fprintf(stderr,"                    [ -temp %s ]\n",DEFAULT_TEMP);
    fprintf(stderr,"                    [ -files %d ]\n",DEFAULT_MAX_FILES);
    fprintf(stderr,"                    [ -gzip | -compress ]\n");
    fprintf(stderr,"                    [ -verbosity %d ]\n",
	    DEFAULT_VERBOSITY);
    fprintf(stderr,"                    [ -n 3 ]\n");
    fprintf(stderr,"                    [ -write_ascii ]\n");
    fprintf(stderr,"                    [ -fof_size 10 ]\n");
    exit(1);
  }

  pc_message(verbosity,2,"text2idngram\n");

  n = pc_intarg( &argc, argv, "-n",DEFAULT_N);

  placeholder = (unsigned short *) rr_malloc(sizeof(unsigned short)*n);
  temp_ngram = (unsigned short *) rr_malloc(sizeof(unsigned short)*n);
  hash_size = pc_intarg( &argc, argv, "-hash",DEFAULT_HASH_SIZE);
  buffer_size = pc_intarg( &argc, argv, "-buffer",STD_MEM);

  write_ascii = pc_flagarg(&argc,argv,"-write_ascii");

  fof_size = pc_intarg(&argc,argv,"-fof_size",10);

  max_files = pc_intarg( &argc, argv, "-files",DEFAULT_MAX_FILES);

  vocab_filename = salloc(pc_stringarg( &argc, argv, "-vocab", "" ));
  
  if (!strcmp("",vocab_filename)) {
    quit(-1,"text2idngram : Error : Must specify a vocabulary file.\n");
  }
    
  strcpy(tempfiles_directory,pc_stringarg( &argc, argv, "-temp", 
					   DEFAULT_TEMP));

  if (pc_flagarg(&argc,argv,"-compress")) {
    temp_file_ext = salloc(".Z");
  }
  else {
    if (pc_flagarg(&argc,argv,"-gzip")) {
      temp_file_ext = salloc(".gz");
    }
    else {
      temp_file_ext = salloc("");
    }
  }

  uname(&uname_info);

  host_name = salloc(uname_info.nodename);

  proc_id = getpid();

  sprintf(temp_word,"%s%s.%d.",TEMP_FILE_ROOT,host_name,proc_id);

  temp_file_root = salloc(temp_word);

  pc_report_unk_args(&argc,argv,verbosity);
  
  /* If the last charactor in the directory name isn't a / then add one. */
  
  if (tempfiles_directory[strlen(tempfiles_directory)-1] != '/') {
    strcat(tempfiles_directory,"/");
  }
  
  pc_message(verbosity,2,"Vocab                  : %s\n",vocab_filename);
  pc_message(verbosity,2,"N-gram buffer size     : %d\n",buffer_size);
  pc_message(verbosity,2,"Hash table size        : %d\n",hash_size);
  pc_message(verbosity,2,"Temp directory         : %s\n",tempfiles_directory);
  pc_message(verbosity,2,"Max open files         : %d\n",max_files);
  pc_message(verbosity,2,"FOF size               : %d\n",fof_size);  
  pc_message(verbosity,2,"n                      : %d\n",n);

  buffer_size *= (1000000/(sizeof(unsigned short)*n));

  /* Allocate memory for hash table */

  fprintf(stderr,"Initialising hash table...\n");

  M = nearest_prime(hash_size);

  new_hashtable(&vocabulary,M);

  /* Read in the vocabulary */

  vocab_size = 0;

  vocab_file = rr_iopen(vocab_filename);

  pc_message(verbosity,2,"Reading vocabulary...\n");

  while (fgets (temp_word, sizeof(temp_word),vocab_file)) {
    if (strncmp(temp_word,"##",2)==0) continue;
    sscanf (temp_word, "%s ",temp_word2);

    /* Check for repeated words in the vocabulary */

    if (index2(&vocabulary,temp_word2) != 0) {
      fprintf(stderr,"======================================================\n");
      fprintf(stderr,"WARNING: word %s is repeated in the vocabulary.\n",temp_word);
      fprintf(stderr,"=======================================================\n");
    }
    if (strncmp(temp_word,"#",1)==0) {
      fprintf(stderr,"\n\n===========================================================\n");
      fprintf(stderr,":\nWARNING: line assumed NOT a comment:\n");
      fprintf(stderr,     ">>> %s <<<\n",temp_word);
      fprintf(stderr,     "         '%s' will be included in the vocabulary.\n",temp_word2);
      fprintf(stderr,     "         (comments must start with '##')\n");
      fprintf(stderr,"===========================================================\n\n");
    }
    vocab_size++;
    add_to_hashtable(&vocabulary,hash(temp_word2,M),temp_word2,vocab_size);
  }

  if (vocab_size > MAX_VOCAB_SIZE) {
    quit(-1,"text2idngram : Error : Vocabulary size exceeds maximum.\n");
  }   
  
  pc_message(verbosity,2,"Allocating memory for the n-gram buffer...\n");

  buffer=(unsigned short*) rr_malloc(n*(buffer_size+1)*sizeof(unsigned short));

  number_of_tempfiles = 0;

  /* Read text into buffer */

  /* Read in the first ngram */

  position_in_buffer = 0;

  for (i=0;i<=n-1;i++) {
    get_word(stdin,temp_word);
    add_to_buffer(index2(&vocabulary,temp_word),0,i,buffer);
  }

  while (!rr_feof(stdin)) {

    /* Fill up the buffer */

    pc_message(verbosity,2,"Reading text into the n-gram buffer...\n");
    pc_message(verbosity,2,"20,000 n-grams processed for each \".\", 1,000,000 for each line.\n");
    while ((position_in_buffer<buffer_size) && (!rr_feof(stdin))) {
      position_in_buffer++;
      if (position_in_buffer % 20000 == 0) {
	if (position_in_buffer % 1000000 == 0) {
	  pc_message(verbosity,2,".\n");
	}
	else {
	  pc_message(verbosity,2,".");
	}
      }
      for (i=1;i<=n-1;i++) {
	add_to_buffer(buffer_contents(position_in_buffer-1,i,buffer),
		      position_in_buffer,i-1,buffer);
      }
      if (get_word(stdin,temp_word) == 1) {
	add_to_buffer(index2(&vocabulary,temp_word),position_in_buffer,
		      n-1,buffer);
      }
    }

    for (i=0;i<=n-1;i++) {
      placeholder[i] = buffer_contents(position_in_buffer,i,buffer);
    }

    /* Sort buffer */
    
    pc_message(verbosity,2,"\nSorting n-grams...\n");
    
    qsort((void*) buffer,(size_t) position_in_buffer,
	  n*sizeof(unsigned short),compare_ngrams);

    /* Output the buffer to temporary BINARY file */
    
    number_of_tempfiles++;

    sprintf(temp_word,"%s%s%hu%s",tempfiles_directory,temp_file_root,
	    number_of_tempfiles,temp_file_ext);

    pc_message(verbosity,2,"Writing sorted n-grams to temporary file %s\n",
	       temp_word);

    tempfile = rr_oopen(temp_word);

    for (i=0;i<=n-1;i++) {
      temp_ngram[i] = buffer_contents(0,i,buffer);
      if (temp_ngram[i] > MAX_VOCAB_SIZE) {
	quit(-1,"Invalid trigram in buffer.\nAborting");

      }
    }
    temp_count = 1;

    for (i=1;i<=position_in_buffer;i++) {
 
      if (!compare_ngrams(temp_ngram,&buffer[i*n])) {
	temp_count++;
      }
      else {
	for (j=0;j<=n-1;j++) {
	  rr_fwrite(&temp_ngram[j],sizeof(unsigned short),1,
		    tempfile,"temporary n-gram ids");
	  temp_ngram[j] = buffer_contents(i,j,buffer);
	}
	rr_fwrite(&temp_count,sizeof(int),1,tempfile,
		  "temporary n-gram counts");
	temp_count = 1;
      }
    }
    
    rr_oclose(tempfile);

    for (i=0;i<=n-1;i++) {
      add_to_buffer(placeholder[i],0,i,buffer);
    }

    position_in_buffer = 0;

  }

  /* Merge the temporary files, and output the result to standard output */

  pc_message(verbosity,2,"Merging temporary files...\n");
  
  merge_tempfiles(1,
		  number_of_tempfiles,
		  temp_file_root,
		  temp_file_ext,
		  max_files,
		  tempfiles_directory,
		  stdout,
		  write_ascii,
		  fof_size); 

  pc_message(verbosity,0,"text2idngram : Done.\n");

  exit(0);
  
}
Exemplo n.º 23
0
static void pick_minima(const char *logfile, int *ibox, int ndim, int len, real W[])
{
    FILE      *fp;
    int        i, j, k, nmin;
    t_minimum *mm, this_min;
    int       *this_point;
    int        loopmax, loopcounter;

    snew(mm, len);
    nmin = 0;
    fp   = gmx_ffopen(logfile, "w");
    /* Loop over each element in the array of dimenion ndim seeking
     * minima with respect to every dimension. Specialized loops for
     * speed with ndim == 2 and ndim == 3. */
    switch (ndim)
    {
        case 0:
            /* This is probably impossible to reach anyway. */
            break;
        case 2:
            for (i = 0; (i < ibox[0]); i++)
            {
                for (j = 0; (j < ibox[1]); j++)
                {
                    /* Get the index of this point in the flat array */
                    this_min.index = index2(ibox, i, j);
                    this_min.ener  = W[this_min.index];
                    if (is_local_minimum_from_below(&this_min, i, 0,         index2(ibox, i-1, j  ), W) &&
                        is_local_minimum_from_above(&this_min, i, ibox[0]-1, index2(ibox, i+1, j  ), W) &&
                        is_local_minimum_from_below(&this_min, j, 0,         index2(ibox, i, j-1), W) &&
                        is_local_minimum_from_above(&this_min, j, ibox[1]-1, index2(ibox, i, j+1), W))
                    {
                        add_minimum(fp, nmin, &this_min, mm);
                        nmin++;
                    }
                }
            }
            break;
        case 3:
            for (i = 0; (i < ibox[0]); i++)
            {
                for (j = 0; (j < ibox[1]); j++)
                {
                    for (k = 0; (k < ibox[2]); k++)
                    {
                        /* Get the index of this point in the flat array */
                        this_min.index = index3(ibox, i, j, k);
                        this_min.ener  = W[this_min.index];
                        if (is_local_minimum_from_below(&this_min, i, 0,         index3(ibox, i-1, j, k  ), W) &&
                            is_local_minimum_from_above(&this_min, i, ibox[0]-1, index3(ibox, i+1, j, k  ), W) &&
                            is_local_minimum_from_below(&this_min, j, 0,         index3(ibox, i, j-1, k  ), W) &&
                            is_local_minimum_from_above(&this_min, j, ibox[1]-1, index3(ibox, i, j+1, k  ), W) &&
                            is_local_minimum_from_below(&this_min, k, 0,         index3(ibox, i, j, k-1), W) &&
                            is_local_minimum_from_above(&this_min, k, ibox[2]-1, index3(ibox, i, j, k+1), W))
                        {
                            add_minimum(fp, nmin, &this_min, mm);
                            nmin++;
                        }
                    }
                }
            }
            break;
        default:
            /* Note this treats ndim == 1 and ndim > 3 */

            /* Set up an ndim-dimensional vector to loop over the points
             * on the grid. (0,0,0, ... 0) is an acceptable place to
             * start. */
            snew(this_point, ndim);

            /* Determine the number of points of the ndim-dimensional
             * grid. */
            loopmax = ibox[0];
            for (i = 1; i < ndim; i++)
            {
                loopmax *= ibox[i];
            }

            loopcounter = 0;
            while (loopmax > loopcounter)
            {
                gmx_bool bMin = TRUE;

                /* Get the index of this_point in the flat array */
                this_min.index = indexn(ndim, ibox, this_point);
                this_min.ener  = W[this_min.index];

                /* Is this_point a minimum from above and below in each
                 * dimension? */
                for (i = 0; bMin && (i < ndim); i++)
                {
                    /* Save the index of this_point within the curent
                     * dimension so we can change that index in the
                     * this_point array for use with indexn(). */
                    int index = this_point[i];
                    this_point[i]--;
                    bMin = bMin &&
                        is_local_minimum_from_below(&this_min, index, 0,         indexn(ndim, ibox, this_point), W);
                    this_point[i] += 2;
                    bMin           = bMin &&
                        is_local_minimum_from_above(&this_min, index, ibox[i]-1, indexn(ndim, ibox, this_point), W);
                    this_point[i]--;
                }
                if (bMin)
                {
                    add_minimum(fp, nmin, &this_min, mm);
                    nmin++;
                }

                /* update global loop counter */
                loopcounter++;

                /* Avoid underflow of this_point[i] */
                if (loopmax > loopcounter)
                {
                    /* update this_point non-recursively */
                    i = ndim-1;
                    this_point[i]++;
                    while (ibox[i] == this_point[i])
                    {
                        this_point[i] = 0;
                        i--;
                        /* this_point[i] cannot underflow because
                         * loopmax > loopcounter. */
                        this_point[i]++;
                    }
                }
            }

            sfree(this_point);
            break;
    }
    qsort(mm, nmin, sizeof(mm[0]), comp_minima);
    fprintf(fp, "Minima sorted after energy\n");
    for (i = 0; (i < nmin); i++)
    {
        print_minimum(fp, i, &mm[i]);
    }
    gmx_ffclose(fp);
    sfree(mm);
}
//function to generate top subset of fully and patially dependent models based on an intermediate set of structures
void genfullmodels_approx_recur(int nhier, int currhier1, int *temp, int *tempind, int *currmods, int q, int nrow, int ncol, int *structure, int *r, int *totmods, int incmods, int nmodcol, double *lPDM_int_mat, int *lPDM_int_ind, int ntotcol, int **models_num, int **hyp, double **lPPA_mat, double logthresh, int *ncomb_sub, double curr_lPPA, double *max_lPPA)
{
	/*'nhier' is number of hierarchies
	'currhier1' is current hierarchy
	'temp' and 'tempind' are vectors recording structural information
	'currmods' is model structure for current model
	'q' is specific row of 'structure' we wish to extract
	'nrow' and 'ncol' are using for indexing (based on 'structure')
	'structure' is a matrix containing intermediate model structures
	'r' denotes current model in 'models_num'
	'totmods' is maximum number of models in 'models_num'
	'incmods' is arbitrary number of models to increase if reallocation necessary
	'nmodcol' is number of columns of 'models_num'
	'lPDM_int_mat' and 'lPDM_int_ind' are intermediate matrices (in vector form)
		containing log[P'(D|M)] information
	'ntotcol' is number of columns of 'lPDM_int_ind' and 'lPDM_int_mat'
	'models_num' is a matrix (in vector form) recording final models
	'hyp' is a binary vector of length 'incmods', with 0=null and 1=alt
	'lPPA_mat' is an output vector of log[P'(M|D)]s corresponding to the set of output models
	'logthresh' is the log-threshold relative to the maximum model
	'ncomb_sub' is intermediate vector used for indexing
	'curr_lPPA' is current log-PDM at previous hierarchy
	'max_lPPA' is current maximum log-PDM*/
	
	//declare necessary variables
	int i,j,k,nsamp,nmod=10,valid;
	nsamp=ncol;
		
	//work out variables relating to maximum independent model
	double temp_lPPA;
	int corrcol;
	int * currmods1 = (int *) Calloc(nmodcol,int);
	int * temp_index = (int *) Calloc(nsamp,int);
	
	int currhier=currhier1+1;
	//if only independent structures left to evaluate
	if(temp[currhier]==1)
	{
		corrcol=tempind[currhier];
		i=0;valid=0;
		while(i<nmod&&valid==0)
		{
			//reset model structure
			for(j=0;j<nmodcol;j++) (*models_num)[index2_col(*r,j,nmodcol)]=currmods[j];
			(*models_num)[index2_col(*r,corrcol,nmodcol)]=lPDM_int_ind[index2(i,corrcol,nmod)];
			//change model
			temp_lPPA=curr_lPPA-lPDM_int_mat[index2(0,corrcol,nmod)]+lPDM_int_mat[index2(i,corrcol,nmod)];
			//if final choice then record model and update system
			if(currhier==(nhier-1))
			{
				//check if new model is selected
				if(((*max_lPPA)-temp_lPPA)<logthresh)
				{
					(*lPPA_mat)[*r]=temp_lPPA;
					*r=*r+1;
					//increase size of output vectors if necessary
					if(*r>(*totmods-1)) realloc_approx(nmodcol,totmods,incmods,models_num,hyp,lPPA_mat);
					/*if new model is maximum model then recalculate log-acceptance threshold then reset maximum model*/
					if(temp_lPPA>(*max_lPPA)) *max_lPPA=temp_lPPA;
				}
				else valid=1;
			}
			else
			{
				for(j=0;j<nmodcol;j++) currmods1[j]=(*models_num)[index2_col(*r,j,nmodcol)];
				//enter further hierarchy
				genfullmodels_approx_recur(nhier,currhier,temp,tempind,currmods1,q,nrow,ncol,structure,r,totmods,incmods,nmodcol,lPDM_int_mat,lPDM_int_ind,ntotcol,models_num,hyp,lPPA_mat,logthresh,ncomb_sub,temp_lPPA,max_lPPA);
			}
			i++;
		}
	}
	else
	{
		//calculate correct column of lPDM_int_mat based on structure of current hierarchy
		corrcol=calc_corr_col(q,nrow,structure,nsamp,ncomb_sub,tempind[currhier],temp[currhier],temp_index);
		//calculate PPAs
		for(i=0;i<nmod;i++)
		{
/*			if(lPDM_int_ind[index2(i,corrcol,nmod)]>0)*/
/*			{*/
				//reset model structure
				for(j=0;j<nmodcol;j++) (*models_num)[index2_col(*r,j,nmodcol)]=currmods[j];
				//now fill in gaps and initialise calculation of changes to lPDM
				temp_lPPA=curr_lPPA;
				for(k=0;k<nsamp;k++)
				{
					if(structure[index2(q,k,nrow)]==tempind[currhier])
					{
						(*models_num)[index2_col(*r,k,nmodcol)]=lPDM_int_ind[index2(i,corrcol,nmod)];
						(*models_num)[index2_col(*r,k+nsamp,nmodcol)]=tempind[currhier];
						temp_lPPA-=lPDM_int_mat[index2(0,k,nmod)];
					}
				}
				//adjust lPDM to correct structure
				temp_lPPA+=lPDM_int_mat[index2(i,corrcol,nmod)];
				//if final choice then record model and update system
				if(currhier==(nhier-1))
				{
					//check if new model is selected
					if(((*max_lPPA)-temp_lPPA)<logthresh)
					{
						(*lPPA_mat)[*r]=temp_lPPA;
						*r=*r+1;
						//increase size of output vectors if necessary
						if(*r>(*totmods-1)) realloc_approx(nmodcol,totmods,incmods,models_num,hyp,lPPA_mat);
						/*if new model is maximum model then recalculate log-acceptance threshold then reset maximum model*/
						if(temp_lPPA>(*max_lPPA)) *max_lPPA=temp_lPPA;
					}
				}
				else
				{
					for(j=0;j<nmodcol;j++) currmods1[j]=(*models_num)[index2_col(*r,j,nmodcol)];
					//enter further hierarchy
					genfullmodels_approx_recur(nhier,currhier,temp,tempind,currmods1,q,nrow,ncol,structure,r,totmods,incmods,nmodcol,lPDM_int_mat,lPDM_int_ind,ntotcol,models_num,hyp,lPPA_mat,logthresh,ncomb_sub,temp_lPPA,max_lPPA);
				}
/*			}*/
		}
	}
	//free memory from the heap (automatically sets pointers to NULL)
	Free(currmods1);Free(temp_index);
	return;
}
Exemplo n.º 25
0
MojErr MojDbIndexTest::defaultValuesTest()
{
	MojDbIndex index(NULL, NULL);
	MojRefCountedPtr<TestIndex> storageIndex(new TestIndex(false));
	MojAllocCheck(storageIndex.get());
	TestIndex& ti = *storageIndex;

	MojObject prop;
	MojErr err = prop.putString(MojDbIndex::NameKey, _T("foo"));
	MojTestErrCheck(err);
	err = prop.putInt(MojDbIndex::DefaultKey, 100);
	MojTestErrCheck(err);
	err = index.addProp(prop);
	MojTestErrCheck(err);
	MojDbReq req;
	err = index.open(storageIndex.get(), (MojInt64) 0, req);
	MojTestErrCheck(err);

	err = put(index, 1, _T("{\"bar\":1}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti.m_putCount == 1 && ti.m_delCount == 0 && ti.m_set.size() == 1);
	err = put(index, 1, _T("{\"foo\":5}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti.m_putCount == 2 && ti.m_delCount == 0 && ti.m_set.size() == 2);
	err = put(index, 1, _T("{\"foo\":{\"bar\":3}}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti.m_putCount == 3 && ti.m_delCount == 0 && ti.m_set.size() == 3);

	err = index.close();
	MojTestErrCheck(err);

	MojDbIndex index2(NULL, NULL);
	MojRefCountedPtr<TestIndex> storageIndex2(new TestIndex(false));
	MojAllocCheck(storageIndex2.get());
	TestIndex& ti2 = *storageIndex2;

	MojObject prop2;
	err = prop2.putString(MojDbIndex::NameKey, _T("bar"));
	MojTestErrCheck(err);
	err = index2.addProp(prop2);
	MojTestErrCheck(err);
	err = index2.open(storageIndex2.get(), (MojInt64) 0, req);
	MojTestErrCheck(err);

	err = put(index2, 1, _T("{\"bar\":1}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti2.m_putCount == 1 && ti2.m_delCount == 0 && ti2.m_set.size() == 1);
	err = put(index2, 1, _T("{\"foo\":5}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti2.m_putCount == 1 && ti2.m_delCount == 0 && ti2.m_set.size() == 1);
	err = put(index2, 1, _T("{\"foo\":{\"bar\":3}}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti2.m_putCount == 1 && ti2.m_delCount == 0 && ti2.m_set.size() == 1);

	err = index2.close();
	MojTestErrCheck(err);

	MojDbIndex index3(NULL, NULL);
	MojRefCountedPtr<TestIndex> storageIndex3(new TestIndex(false));
	MojAllocCheck(storageIndex3.get());
	TestIndex& ti3 = *storageIndex3;

	MojObject prop3;
	err = prop3.putString(MojDbIndex::NameKey, _T("bar"));
	MojTestErrCheck(err);
	err = index3.addProp(prop3);
	MojTestErrCheck(err);
	err = prop3.putString(MojDbIndex::NameKey, _T("foo"));
	MojTestErrCheck(err);
	err = prop3.putInt(MojDbIndex::DefaultKey, 100);
	MojTestErrCheck(err);
	err = index3.addProp(prop3);
	MojTestErrCheck(err);
	err = index3.open(storageIndex3.get(), (MojInt64) 0, req);
	MojTestErrCheck(err);

	err = put(index3, 1, _T("{\"bar\":1}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti3.m_putCount == 1 && ti3.m_delCount == 0 && ti3.m_set.size() == 1);
	err = put(index3, 1, _T("{\"foo\":5}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti3.m_putCount == 1 && ti3.m_delCount == 0 && ti3.m_set.size() == 1);
	err = put(index3, 1, _T("{\"foo\":{\"bar\":3}}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti3.m_putCount == 1 && ti3.m_delCount == 0 && ti3.m_set.size() == 1);
	err = put(index3, 1, _T("{\"foo\":5, \"bar\":5}"), NULL);
	MojTestErrCheck(err);
	MojTestAssert(ti3.m_putCount == 2 && ti3.m_delCount == 0 && ti3.m_set.size() == 2);

	err = index3.close();
	MojTestErrCheck(err);

	return MojErrNone;
}
Exemplo n.º 26
0
int main(int argc, char *argv[]) {

  int verbosity;
  int vocab_size;
  FILE *vocab_file;
  int buffer_size;
  flag write_ascii;
  int max_files;
  int number_of_tempfiles;
  char *vocab_filename;
  char *idngram_filename;
  char temp_word[MAX_WORD_LENGTH];
  char temp_word2[MAX_WORD_LENGTH];
  char temp_word3[MAX_WORD_LENGTH];
  flag contains_unks;
  int position_in_buffer;
  FILE *outfile;
  FILE *tempfile;
  FILE *non_unk_fp;
  ngram_rec *buffer;
  flag same_ngram;
  int i;
  int j;
  int fof_size;
  int size_of_rec;

  char temp_directory[1000];
  char *temp_file_ext;

  /* Vocab hash table things */

  struct idngram_hash_table vocabulary;
  unsigned long hash_size;
  unsigned long M;

  wordid_t *current_ngram;
  int current_count;
  wordid_t *sort_ngram;
  int sort_count;
  
  /* Process command line */

  report_version(&argc,argv);
  
  if (argc == 1 || pc_flagarg(&argc, argv,"-help")) {    
    /* Display help message */    
    help_message();
    exit(1);
  }


  n = pc_intarg( &argc, argv, "-n",DEFAULT_N);
  hash_size = pc_intarg( &argc, argv, "-hash",DEFAULT_HASH_SIZE);
  buffer_size = pc_intarg( &argc, argv, "-buffer",STD_MEM);
  write_ascii = pc_flagarg(&argc,argv,"-write_ascii");
  verbosity = pc_intarg(&argc,argv,"-verbosity",DEFAULT_VERBOSITY);
  max_files = pc_intarg( &argc, argv, "-files",DEFAULT_MAX_FILES);
  fof_size = pc_intarg(&argc,argv,"-fof_size",10);
  vocab_filename = salloc(pc_stringarg( &argc, argv, "-vocab", "" ));
  idngram_filename = salloc(pc_stringarg( &argc, argv, "-idngram", "" ));
  
  if (!strcmp("",vocab_filename)) 
    quit(-1,"Error : Must specify a vocabulary file.\n");

  if (!strcmp("",idngram_filename)) 
    quit(-1,"text2idngram : Error : Must specify idngram file.\n");
    
  if (pc_flagarg(&argc,argv,"-compress")) 
    temp_file_ext = salloc(".Z");
  else {
    if (pc_flagarg(&argc,argv,"-gzip")) 
      temp_file_ext = salloc(".gz");
    else 
      temp_file_ext = salloc("");
  }

  strcpy(temp_directory, "cmuclmtk-XXXXXX");
  if (mkdtemp(temp_directory) == NULL) {
     quit(-1, "Failed to create temporary folder: %s\n", strerror(errno));
  }

  pc_report_unk_args(&argc,argv,verbosity);

  outfile = rr_fopen(idngram_filename,"wb");
  
  pc_message(verbosity,2,"Vocab           : %s\n",vocab_filename);
  pc_message(verbosity,2,"Output idngram  : %s\n",idngram_filename);
  pc_message(verbosity,2,"Buffer size     : %d\n",buffer_size);
  pc_message(verbosity,2,"Hash table size : %d\n",hash_size);
  pc_message(verbosity,2,"Max open files  : %d\n",max_files);
  pc_message(verbosity,2,"n               : %d\n",n);
  pc_message(verbosity,2,"FOF size               : %d\n",fof_size);  

  size_of_rec = (sizeof(wordid_t) * n) + 16 - (( n* sizeof(wordid_t)) % 16);
  buffer_size *= (1000000/((sizeof(ngram_rec) + size_of_rec)));
  fprintf(stderr,"buffer size = %d\n",buffer_size);

  /* Allocate memory for hash table */

  fprintf(stderr,"Initialising hash table...\n");

  M = nearest_prime(hash_size);

  new_idngram_hashtable(&vocabulary,M);

  /* Read in the vocabulary */

  vocab_size = 0;

  vocab_file = rr_iopen(vocab_filename);

  pc_message(verbosity,2,"Reading vocabulary...\n");

  while (fgets (temp_word, sizeof(temp_word),vocab_file)) {
    if (strncmp(temp_word,"##",2)==0) continue;
    sscanf (temp_word, "%s ",temp_word2);

    /* Check for vocabulary order */
    if (vocab_size > 0 && strcmp(temp_word2,temp_word3)<0) 
      quit(-1,"wngram2idngram : Error : Vocabulary is not alphabetically ordered.\n");

    /* Check for repeated words in the vocabulary */

    if (index2(&vocabulary,temp_word2) != 0) 
      warn_on_repeated_words(temp_word);

    warn_on_wrong_vocab_comments(temp_word);

    vocab_size++;
    
    add_to_idngram_hashtable(&vocabulary,idngram_hash(temp_word2,M),temp_word2,vocab_size);
    strcpy(temp_word3,temp_word2);
  }

  if (vocab_size > MAX_VOCAB_SIZE) 
    quit(-1,"Error : Vocabulary size exceeds maximum.\n");
  
  pc_message(verbosity,2,"Allocating memory for the buffer...\n");

  buffer=(ngram_rec *) rr_malloc((buffer_size+1)*sizeof(ngram_rec));
  
  for (i=0;i<=buffer_size;i++) 
    buffer[i].word = (wordid_t *) rr_malloc(n*sizeof(wordid_t));

  /* Open the "non-OOV" tempfile */

  sprintf(temp_word, "%s/1%s", temp_directory, temp_file_ext);
  
  non_unk_fp = rr_fopen(temp_word,"w");

  pc_message(verbosity,2,"Writing non-OOV counts to temporary file %s\n",
	     temp_word);
  number_of_tempfiles = 1;

  current_ngram = (wordid_t *) rr_malloc(n*sizeof(wordid_t));
  sort_ngram = (wordid_t *) rr_malloc(n*sizeof(wordid_t));

  /* Read text into buffer */
  position_in_buffer = 0;

  while (!rr_feof(stdin)) {
    
    for (i=0;i<=n-1;i++) {
      get_word(stdin,temp_word);
      current_ngram[i]=index2(&vocabulary,temp_word);
    }
    if (scanf("%d",&current_count) != 1) 
      if (!rr_feof(stdin)) 
	quit(-1,"Error reading n-gram count from stdin.\n");

    if (!rr_feof(stdin)) {

      contains_unks = 0;
      for (i=0;i<=n-1;i++) {
	if (!current_ngram[i]) 
	  contains_unks = 1;
      }

      if (contains_unks) {
	/* Write to buffer */

	position_in_buffer++;

	if (position_in_buffer >= buffer_size) {

	  /* Sort buffer */
	  pc_message(verbosity,2,
		     "Sorting n-grams which include an OOV word...\n");

	  qsort((void*) buffer,(size_t) position_in_buffer,
		sizeof(ngram_rec),compare_ngrams2);

	  pc_message(verbosity,2,"Done.\n");

	  /* Write buffer to temporary file */

	  number_of_tempfiles++;
	  
	  sprintf(temp_word,"%s/%hu%s", temp_directory,
		  number_of_tempfiles,temp_file_ext);
	  
	  pc_message(verbosity,2,
		     "Writing sorted OOV-counts buffer to temporary file %s\n",
		     temp_word);

	  tempfile = rr_fopen(temp_word,"w");
	  
	  for (i=0;i<=n-1;i++) 
	    sort_ngram[i] = buffer[0].word[i];

	  sort_count = buffer[0].count;

	  for (i=0;i<=position_in_buffer-2;i++) {
	    
	    same_ngram = 1;
	    for (j=n-1;j>=0;j--) {
	      if (buffer[i].word[j] != sort_ngram[j]) {
		same_ngram = 0;
		j = -1;
	      }
	    }

	    if (same_ngram) 
	      sort_count += buffer[i].count;
	    else {
	      for (j=0;j<=n-1;j++) {
		rr_fwrite((char*)&sort_ngram[j],sizeof(wordid_t),1,
			  tempfile,"temporary n-gram ids");
		sort_ngram[j] = buffer[i].word[j];
	      }
	      rr_fwrite((char*)&sort_count,sizeof(int),1,tempfile,
			"temporary n-gram counts");
	      sort_count = buffer[i].count;
	    }
	  }	    
	  for (j=0;j<=n-1;j++) 
	    rr_fwrite((char*)&sort_ngram[j],sizeof(wordid_t),1,
		      tempfile,"temporary n-gram ids");

	  rr_fwrite((char*)&sort_count,sizeof(int),1,tempfile,
		    "temporary n-gram counts");
	  rr_oclose(tempfile);
	  position_in_buffer = 1;

	}
	
	for (i=0;i<=n-1;i++) 
	  buffer[position_in_buffer-1].word[i] = current_ngram[i];

	buffer[position_in_buffer-1].count = current_count;

      }else {
	/* Write to temporary file */
	for (i=0;i<=n-1;i++) 
	  rr_fwrite((char*)&current_ngram[i],sizeof(wordid_t),1,
		    non_unk_fp,"temporary n-gram ids");

	rr_fwrite((char*)&current_count,sizeof(int),1,non_unk_fp,
		  "temporary n-gram counts");
      }
    }
  }

  if (position_in_buffer > 0) {

    /* Only do this bit if we have actually seen some OOVs */
    /* Sort final buffer */    
    pc_message(verbosity,2,"Sorting final buffer...\n");

    qsort((void*) buffer,(size_t) position_in_buffer,
	  sizeof(ngram_rec),compare_ngrams2);
    
    /* Write final buffer */
    
    number_of_tempfiles++;
  
    sprintf(temp_word,"%s/%hu%s", temp_directory,
	    number_of_tempfiles,temp_file_ext);
    
    pc_message(verbosity,2,"Writing sorted buffer to temporary file %s\n", temp_word);

    tempfile = rr_fopen(temp_word,"w");
    
    for (i=0;i<=n-1;i++) 
      sort_ngram[i] = buffer[0].word[i];

    sort_count = buffer[0].count;
    
    for (i=1;i<=position_in_buffer-1;i++) {
      
      same_ngram = 1;
      for (j=n-1;j>=0;j--) {
	if (buffer[i].word[j] != sort_ngram[j]) {
	  same_ngram = 0;
	  j = -1;
	}
      }
      
      if (same_ngram) 
	sort_count += buffer[i].count;
      else {
	for (j=0;j<=n-1;j++) {
	  rr_fwrite((char*)&sort_ngram[j],sizeof(wordid_t),1,
		    tempfile,"temporary n-gram ids");
	  sort_ngram[j] = buffer[i].word[j];
	}
	rr_fwrite((char*)&sort_count,sizeof(int),1,tempfile,
		  "temporary n-gram counts");
	sort_count = buffer[i].count;
      }
    }	    
    for (j=0;j<=n-1;j++) 
      rr_fwrite((char*)&sort_ngram[j],sizeof(wordid_t),1,
		tempfile,"temporary n-gram ids");

    rr_fwrite((char*)&sort_count,sizeof(int),1,tempfile,
	      "temporary n-gram counts");
    fclose(tempfile);
    

  }
  

  /* Merge the temporary files, and output the result */
  fclose(non_unk_fp);
  pc_message(verbosity,2,"Merging temporary files...\n");
  merge_idngramfiles(1,
		     number_of_tempfiles,
		     temp_directory,
		     temp_file_ext,
		     max_files,
		     outfile,
		     write_ascii,
		     fof_size,
		     n);

  fclose(outfile);

  rmdir(temp_directory);
  pc_message(verbosity,0,"wngram2idngram : Done.\n");

  return 0;
}
Exemplo n.º 27
0
    std::vector< toppers::xml::container::object* > cfg1_out::xml_parser_init( std::string input_file )
    {
      SAX2XMLReader::ValSchemes    valScheme    = SAX2XMLReader::Val_Auto;
      bool                         doNamespaces = true;
      bool                         doSchema = true;
      bool                         schemaFullChecking = false;
      bool                         identityConstraintChecking = true;
      bool                         namespacePrefixes = true;
      bool                         recognizeNEL = false;

      // Initialize the XML4C2 system
      try
      {
        XMLPlatformUtils::Initialize();
      }
      catch (const XMLException& toCatch)
      {
        std::vector<toppers::xml::container::object*> object_array;
        fatal( _("Error during initialization! Message:\n%" ), toNative(toCatch.getMessage()));
        return object_array;
      }

      //
      //  Create a SAX parser object. Then, according to what we were told on
      //  the command line, set it to validate or not.
      //
      SAX2XMLReader* parser = XMLReaderFactory::createXMLReader();
      parser->setFeature(XMLUni::fgSAX2CoreNameSpaces, doNamespaces);
      parser->setFeature(XMLUni::fgXercesSchema, doSchema);
      parser->setFeature(XMLUni::fgXercesHandleMultipleImports, true);
      parser->setFeature(XMLUni::fgXercesSchemaFullChecking, schemaFullChecking);
      parser->setFeature(XMLUni::fgXercesIdentityConstraintChecking, identityConstraintChecking);
      parser->setFeature(XMLUni::fgSAX2CoreNameSpacePrefixes, namespacePrefixes);

      if (valScheme == SAX2XMLReader::Val_Auto)
      {
        parser->setFeature(XMLUni::fgSAX2CoreValidation, true);
        parser->setFeature(XMLUni::fgXercesDynamic, true);
      }
      if (valScheme == SAX2XMLReader::Val_Never)
      {
        parser->setFeature(XMLUni::fgSAX2CoreValidation, false);
      }
      if (valScheme == SAX2XMLReader::Val_Always)
      {
        parser->setFeature(XMLUni::fgSAX2CoreValidation, true);
        parser->setFeature(XMLUni::fgXercesDynamic, false);
      }

      /* External Schema file */
      // XMLファイルのチェック
      namespace fs = boost::filesystem;

      if( !fs::exists( input_file ) )
      {
        fatal( _("'%1%` is not exist."), input_file );
      }
      /* 設定ファイルがある場合はパラメータ名のチェックを行う */
      std::string paraname( get_global_string( "ini-file" ) );
      std::string strAUTOSARVersion;
      std::string strSchema;
      std::string strSchemaLocation;
      std::string strContainerPath;
      std::string strModuleName;
      //std::cout << "AUTOSAR ini-file (ini file name):[" << paraname << "]" << std::endl;
      if( !paraname.empty() )
      {
        strAUTOSARVersion = get_global_string( "XML_AUTOSARVersion" );
        if( strAUTOSARVersion.empty() )
        {
          strAUTOSARVersion = "4";
          warning( _( " \"AUTOSARVersion\" parameter is not found in AUTOSAR ini-file. Use default value." ) );
        }
        strSchema = get_global_string( "XML_Schema" );
        if( strSchema.empty() )
        {
          strSchema = "AUTOSAR_4-0-3_STRICT.xsd";
          warning( _( " \"Schema\" parameter is not found in AUTOSAR ini-file. Use default value." ) );
        }
        strSchemaLocation = get_global_string( "XML_SchemaLocation" );
        if( strSchemaLocation.empty() )
        {
          strSchemaLocation = "http://autosar.org/schema/r4.0";
          warning( _( " \"SchemaLocation\" parameter is not found in AUTOSAR ini-file. Use default value." ) );
        }
        strContainerPath = get_global_string( "XML_ContainerPath" );
        if( strContainerPath.empty() )
        {
          strContainerPath = "/AUTOSAR/EcucDefs";
          warning( _( " \"ContainerPath\" parameter is not found in AUTOSAR ini-file. Use default value." ) );
        }
      }
      toppers::global( "XML_AUTOSARVersion" ) = strAUTOSARVersion;
      toppers::global( "XML_Schema" )         = strSchema;
      toppers::global( "XML_SchemaLocation" ) = strSchemaLocation;
      toppers::global( "XML_ContainerPath" )  = strContainerPath;

      // XMLファイルの中にxsi:schemaLocation属性があればその要素を取得
      std::string sstr( "xsi:schemaLocation" );
      std::string buf;
      toppers::read( input_file, buf );

      std::list<std::string> results;
      string::size_type index( buf.find( sstr ) );
      if( index != string::npos )
      {
        string::size_type index2( buf.substr( index ).find( "\"" ) );
        string::size_type index3( buf.substr( index+index2+1 ).find( "\"" ) );
        sstr = buf.substr( index+index2+1, index3 );
        split( results, sstr, boost::is_space() );
      }

      // スキーマファイルのチェック
      std::ostringstream ostream;
      if( results.size() == 2 && fs::exists( results.back() ) )
      {
        ostream << sstr;
      }
      else
      {
        ostream << get_global_string( "XML_SchemaLocation" ) << " " << fs::absolute( get_global_string( "cfg-directory" ).c_str() ).string() 
         << "/" << get_global_string( "XML_Schema" );
      }
      XMLCh* str (XMLString::transcode (ostream.str().c_str()));

      parser->setProperty(XMLUni::fgXercesSchemaExternalSchemaLocation, str);

      //
      //  Create our SAX handler object and install it on the parser, as the
      //  document and error handler.
      //
      SAX2Handlers handler;
      parser->setContentHandler(&handler);
      parser->setErrorHandler(&handler);

      //reset error count first
      handler.resetErrors();

      handler.filename = input_file;

      try
      {
        parser->parse(input_file.c_str());
      }
      catch (const OutOfMemoryException&)
      {
        warning("OutOfMemoryException");
      }
      catch (const XMLException& e)
      {
        warning( _("\nError during parsing: '%'\nException message is:  \n%\n"), input_file, toNative(e.getMessage()));
      }
      catch (...)
      {
        warning( _("\nUnexpected exception during parsing: '%'\n"), input_file);
      }

      delete parser;

      XMLPlatformUtils::Terminate();

      return handler.object_array;
    }
Exemplo n.º 28
0
// compute C8 sensitivities at all gauss points
void CHak3DCont_8::shpSens(int numDual, int numCase, double **disp_prim, double **disp_dual,
                  double *wgt_fact, double *wgt_case, double *gSens, int pNum, double *Nf, double alpha)
{
    int i,j,p,l,d,ind;
    double const_fact, ftemp;
    Coord3D np[8], gp;
	double B[144];	// Strain displacement matrix
    double Ba[54];  // Strain displacement matrix for internal dof
    double ud[24];  // Primary element displacement array
    double dd[24];  // Dual element displacement array
    double stn[6];  // strain tensor
    double strs[6];  // stress tensor
    double sens; // variable to keep running total of a sensitivity value
    double /*ux[8], uy[8],*/ uz[8];
    double /*px[8], py[8],*/ pz[8];
    double sw_fact;

	// get element nodal coords
	getNcrd(np);

    // compute material property matrix
    double *Em = Mat->getD();

    // for each gauss point
    for(p=0;p<8;p++)
    {
        gp.x = gauss2 * c8_pos[p].x;
		gp.y = gauss2 * c8_pos[p].y;
		gp.z = gauss2 * c8_pos[p].z; // non-dim gauss point coords

        // compute isoparametric B matrix (6 x 24) & Ba (6 x 9)
        C8_isoBMat(gp, np, B); // NB: gp should be in non-dim coordinates (i.e center = 0,0,0)
        C8M_isoBMat(gp, np, Ba);

        // for each load case
        for(l=0;l<numCase;l++)
        {
            // compute the constant sensitivty factor for the element
            const_fact = wgt_fact[l] * Mat->getDens(); // DO NOT multiply by the volume ratio

            // get the primary element displacements
            getDisp(ud, disp_prim[l]);

            for(i=0;i<8;i++)
            {
                j=i*3;
                //ux[i] = ud[j];
                //uy[i] = ud[j+1];
                uz[i] = ud[j+2];
            }

            // compute self weight factor fg * u
            ftemp = Mat->getDens() * Nf[l] * -9.81 * alpha; // mass times acceleration (in +ve z direction)
            sw_fact = ftemp * C8_interpolate(gp,uz); //( C8_interpolate(gp,ux) + C8_interpolate(gp,uy) + C8_interpolate(gp,uz) );;

            // compute stress tensor Ee(u) = strs
            // multiply: strain = B x u
            cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 24, 1.0, B, 24, ud, 1, 0.0, stn, 1);

            // Finally multiply: stress = material matrix x strain
            cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 6, 1.0, Em, 6, stn, 1, 0.0, strs, 1);

            // for each dual response p
            for(d=0;d<numDual;d++)
            {
                // add the 3 componnents (multipled by the load case weight)
                // Ee(u)e(p) - fg(u+p) - const_fact

                // get the dual element displacements
                getDisp(dd, disp_dual[index2(l,d,numDual)]);

                // compute dual strain tensor e(p) = stn
                // multiply: strain = B x u
                cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 24, 1.0, B, 24, dd, 1, 0.0, stn, 1);

                // compute stress(u) x strain(p)
                sens = 0.0; // reset to zero
                for(i=0;i<6;i++)
                {
                    sens += strs[i]*stn[i];
                }
                sens *= alpha ; // need to multiply by the volume ratio (account for modified modulus)

                // interpolate, then multiply
                // need to extract u, v, w disp and forces in seperate vectors
                for(i=0;i<8;i++)
                {
                    j=i*3;
                    //px[i] = dd[j];
                    //py[i] = dd[j+1];
                    pz[i] = dd[j+2];
                }

                // now interpolate and add to sens
                sens -= sw_fact;
                sens -= ftemp * C8_interpolate(gp,pz); //( C8_interpolate(gp,px) + C8_interpolate(gp,py) + C8_interpolate(gp,pz) );
                sens -= const_fact;
                sens *= wgt_case[l]; // multiply by load case weight

                // add to overall sensitivity
                ind=pNum+p;
                gSens[index2(ind,d,numDual)] += sens; // add value for this load case
            }
        }
    }
}
Exemplo n.º 29
0
/*
  @return number_of_tempfiles
 */
int  read_txt2ngram_buffer(FILE* infp, 
			   struct idngram_hash_table *vocabulary, 
			   int32 verbosity,
			   wordid_t *buffer,
			   int buffer_size,
			   unsigned int n,
			   char* temp_file_root,
			   char* temp_file_ext,
			   FILE* temp_file
			   )
{
  /* Read text into buffer */
  char temp_word[MAX_WORD_LENGTH];
  int position_in_buffer;
  int number_of_tempfiles;
  unsigned int i,j;
  wordid_t *placeholder;
  wordid_t *temp_ngram;
  int temp_count;

#if 1
  int tmpval;
#endif

  temp_ngram  = (wordid_t *) rr_malloc(sizeof(wordid_t)*n);
  placeholder = (wordid_t *) rr_malloc(sizeof(wordid_t)*n);

  ng=n;

  position_in_buffer = 0;
  number_of_tempfiles = 0;

  //tk: looks like things may croak if the corpus has less than n words
  //not that such a corpus would be useful anyway
  for (i=0;i<=n-1;i++) {
    get_word(infp,temp_word);
    /*
        fprintf(stderr,"%s \n",temp_word);
	fprintf(stderr,"%d \n",index2(vocabulary,temp_word));
        fflush(stderr);
    */
    add_to_buffer(index2(vocabulary,temp_word),0,i,buffer);
  }

  while (!rr_feof(infp)) {
    /* Fill up the buffer */
    pc_message(verbosity,2,"Reading text into the n-gram buffer...\n");
    pc_message(verbosity,2,"20,000 n-grams processed for each \".\", 1,000,000 for each line.\n");

    while ((position_in_buffer<buffer_size) && (!rr_feof(infp))) {
      position_in_buffer++;
      show_idngram_nlines(position_in_buffer,verbosity);

      for (i=1;i<=n-1;i++) 
	add_to_buffer(buffer_contents(position_in_buffer-1,i,buffer),
		      position_in_buffer,i-1,buffer);
      
      if (get_word(infp,temp_word) == 1) {
      /*
	fprintf(stderr,"%s \n",temp_word);
	fprintf(stderr,"%d \n",index2(vocabulary,temp_word));
	fflush(stderr);
      */
	add_to_buffer(index2(vocabulary,temp_word),position_in_buffer,
		      n-1,buffer);
      }
    }

    for (i=0;i<=n-1;i++) 
      placeholder[i] = buffer_contents(position_in_buffer,i,buffer);

    /* Sort buffer */
    
    pc_message(verbosity,2,"\nSorting n-grams...\n");    
    
    qsort((void*) buffer,(size_t) position_in_buffer,n*sizeof(wordid_t),compare_ngrams);

    /* Output the buffer to temporary BINARY file */    
    number_of_tempfiles++;

    sprintf(temp_word,"%s/%hu%s",temp_file_root,
	    number_of_tempfiles,temp_file_ext);

    pc_message(verbosity,2,"Writing sorted n-grams to temporary file %s\n",
	       temp_word);

    temp_file = rr_oopen(temp_word);

    for (i=0;i<=n-1;i++) {
      temp_ngram[i] = buffer_contents(0,i,buffer);
#if MAX_VOCAB_SIZE < 65535
      /* This check is well-meaning but completely useless since
	 buffer_contents() can never return something greater than
	 MAX_VOCAB_SIZE (dhuggins@cs, 2006-03) */
      if (temp_ngram[i] > MAX_VOCAB_SIZE)
	quit(-1,"Invalid trigram in buffer.\nAborting");
#endif
    }
    temp_count = 1;

    for (i=1;i<=position_in_buffer;i++) {

      tmpval=compare_ngrams(temp_ngram,&buffer[i*n]);

      /*      for(k=0;k<=n-1;k++){
	fprintf(stderr, "tmpval: %d k %d, temp_ngram %d, &buffer[i*n] %d\n",tmpval, k, temp_ngram[k], (&buffer[i*n])[k]);
	}*/

      if (!compare_ngrams(temp_ngram,&buffer[i*n])) 
	temp_count++;
      else {
	/*	printf("Have been here?\n");*/
	for (j=0;j<=n-1;j++) {
	  rr_fwrite((char*) &temp_ngram[j],sizeof(wordid_t),1,
		    temp_file,"temporary n-gram ids");
	  temp_ngram[j] = buffer_contents(i,j,buffer);
	}
	rr_fwrite((char*)&temp_count,sizeof(int),1,temp_file,
		  "temporary n-gram counts");

	/*	for(j=0 ; j<=n-1;j++)
	  fprintf(stderr,"%d ",temp_ngram[j]);
	  fprintf(stderr,"%d\n",temp_count);*/

	temp_count = 1;
      }
    }
    
    rr_oclose(temp_file);

    for (i=0;i<=n-1;i++) 
      add_to_buffer(placeholder[i],0,i,buffer);

    position_in_buffer = 0;

  }

  return number_of_tempfiles;
}
Exemplo n.º 30
0
// compute C8 sensitivities at all gauss points, for eigenvalue problems
void CHak3DCont_8::shpSens_Eig(int numDual, int numEig, double **disp_prim, double **disp_dual,
                      double *eig, double *wgt_case, double *gSens, int pNum, double alpha)
{
    int i,j,p,l,d,ind;
    double ftemp;
    Coord3D np[8], gp;
	double B[144];	// Strain displacement matrix
    double ud[24];  // Primary element displacement array
    double dd[24];  // Dual element displacement array
    double stn[6];  // strain tensor
    double strs[6];  // stress tensor
    double sens; // variable to keep running total of a sensitivity value
    double ux[8], uy[8], uz[8];
    double px[8], py[8], pz[8];
    double ug[3];

	// get element nodal coords
	getNcrd(np);

    // get material property matrix
    double *Em = Mat->getD();
    double rho = Mat->getDens();

    // for each gauss point
    for(p=0;p<8;p++)
    {
        gp.x = gauss2 * c8_pos[p].x;
		gp.y = gauss2 * c8_pos[p].y;
		gp.z = gauss2 * c8_pos[p].z; // non-dim gauss point coords

        // compute isoparametric B matrix (6 x 24) & Ba (6 x 9)
        C8_isoBMat(gp, np, B); // NB: gp should be in non-dim coordinates (i.e center = 0,0,0)

        // for each eigenvalue
        for(l=0;l<numEig;l++)
        {
            // get the primary element displacements
            getDisp(ud, disp_prim[l]);

            for(i=0;i<8;i++)
            {
                j=i*3;
                ux[i] = ud[j];
                uy[i] = ud[j+1];
                uz[i] = ud[j+2];
            }
            ug[0] = C8_interpolate(gp,ux); // x disp at Gauss point
            ug[1] = C8_interpolate(gp,uy); // x disp at Gauss point
            ug[2] = C8_interpolate(gp,uz); // x disp at Gauss point

            // compute stress tensor Ee(u) = strs
            // multiply: strain = B x u
            cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 24, 1.0, B, 24, ud, 1, 0.0, stn, 1);

            // Finally multiply: stress = material matrix x strain
            cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 6, 1.0, Em, 6, stn, 1, 0.0, strs, 1);

            // for each dual response p
            for(d=0;d<numDual;d++)
            {
                // add the 3 componnents (multipled by the load case weight)
                // Ee(u)e(p) - fg(u+p) - const_fact

                // get the dual element displacements
                getDisp(dd, disp_dual[index2(l,d,numDual)]);

                // compute dual strain tensor e(p) = stn
                // multiply: strain = B x u
                cblas_dgemv(CblasRowMajor, CblasNoTrans, 6, 24, 1.0, B, 24, dd, 1, 0.0, stn, 1);

                // compute stress(u) x strain(p)
                sens = 0.0; // reset to zero
                for(i=0;i<6;i++)
                {
                    sens += strs[i]*stn[i];
                }
                sens *= alpha ; // need to multiply by the volume ratio (account for modified modulus)

                // interpolate, then multiply
                // need to extract u, v, w disp and forces in seperate vectors
                for(i=0;i<8;i++)
                {
                    j=i*3;
                    px[i] = dd[j];
                    py[i] = dd[j+1];
                    pz[i] = dd[j+2];
                }

                // now interpolate and add to sens
                ftemp =  ug[0]*C8_interpolate(gp,px) + ug[1]*C8_interpolate(gp,py) + ug[2]*C8_interpolate(gp,pz);
                sens -= eig[l]*ftemp*rho*alpha;

                // add to overall sensitivity
                ind=pNum+p;
                gSens[index2(ind,d,numDual)] += sens*wgt_case[l]; // add value for this load case
            }
        }
    }
}