예제 #1
0
파일: vetor.c 프로젝트: jefponte/exercicios
void createVectorWithIndexPair(Vector *vetA, Vector *vetB) {
    vetB->numberOfElements = returnNumberColOfIndexPair(vetB);
    int i, col = 0;
    if(allocateVector(vetB)) {
        for(i=0; i< vetA->numberOfElements; i++) {
            if(i%2 == 0) {
                *(vetB->ptr + col) = *(vetA->ptr + i);
                col++;
            }
        }
    } else {
        printf("Não conseguiu alocar memória pra VetorB.\n");
    }
}
예제 #2
0
파일: vetor.c 프로젝트: jefponte/exercicios
void createVector(Vector *vetA, Vector *vetB) {
    vetB->numberOfElements = returnNumberOfCol(vetA);
    int i, col = 0;
    if(allocateVector(vetB)) {
        for(i=0; i< vetA->numberOfElements; i++) {
            if(*(vetA->ptr+i) > 10 && *(vetA->ptr+i)  < 40) {
                *(vetB->ptr+col)  = *(vetA->ptr+i);
                col++;
            }
        }
    } else {
        printf("Não conseguiu alocar memória para VetorB.\n");
    }
}
예제 #3
0
파일: vetor.c 프로젝트: jefponte/exercicios
void createVectorStartingTwoVector(Vector *vetA, Vector *vetB, Vector *vetC) {
    vetB->numberOfElements = returnNumberColOfIndexPair(vetB);
    int i;
    if(allocateVector(vetB)) {
        for (i = 0; i< vetA->numberOfElements; i++) {
            if(i%2==0) {
                *(vetC->ptr + i)=   *(vetB->ptr + i);
            } else {
                *(vetC->ptr + i)=   *(vetA->ptr + i);
            }
        }
    } else {
        printf("Não conseguiu alocar o VetorC. \n");
    }

}
예제 #4
0
파일: CRF.cpp 프로젝트: Maple-Wang/libLAML
/**
 * Compute the objective function value (the mean log-likelihood on training
 * data for CRFs). Gradient is also calculated if required.
 *
 * @param F d x 1 feature vector for D training data sequences
 *
 * @param Ms transition matrices
 *
 * @param calcGrad if gradient required
 *
 * @param Grad gradient to be assigned in place if required
 *
 * @param W model parameters
 *
 * @return objective function value
 *
 */
double CRF::computeObjectiveFunctionValue(Vector& F, Matrix** Ms, bool calcGrad, Vector& Grad, Vector& W) {

	double fval = 0;

	// int[] Y = null;

	/*
	 * d x 1 array conditional expectation of feature functions
	 * for D training data sequences, i.e.,
	 * EF = sum_k E_{P_{\bf \lambda}(Y|x_k)}[F(Y, x_k)]
	 */
	Vector* EF = null;
	double* EFArr = allocateVector(d);

	/*
	 * Compute global feature vector for all training data sequences,
	 * i.e., sum_k F(y_k, x_k)
	 */
	int n_x = 0;
	Matrix*** Fs_k = null;

	Matrix* f_j_x_i = null;

	for (int k = 0; k < D; k++) {
		Fs_k = Fs[k];
		n_x = lengths[k];
		// Ms = new Matrix[n_x];

		/*
		 * Compute transition matrix set
		 */
		for (int i = 0; i < n_x; i++) {
			// Ms[i] = new DenseMatrix(numStates, numStates, 0);
			Ms[i]->clear();
			for (int j = 0; j < d; j++) {
				f_j_x_i = Fs_k[i][j];
				if (j == 0)
					// Ms[i] = times(W.get(j), f_j_x_i);
					times(*Ms[i], W.get(j), *f_j_x_i);
				else
					// Ms[i] = plus(Ms[i], times(W.get(j), f_j_x_i));
					plusAssign(*Ms[i], W.get(j), *f_j_x_i);
			}
			expAssign(*Ms[i]);
			/*for (int s = 0; s < numStates; s++) {
								if (sum(Ms[i].getRow(s)) == 0) {
									Ms[i].setRow(s, allocateVector(numStates, 1e-10));
								}
							}*/
		}

		/*
		 * Forward recursion with scaling
		 */
		/*Matrix Alpha_hat = new BlockRealMatrix(numStates, n_x);
						Matrix Alpha_hat_0 = new BlockRealMatrix(numStates, 1);
						RealVector e_start = new ArrayRealVector(numStates);
						e_start.setEntry(startIdx, 1);*/

		Vector** Alpha_hat = new Vector*[n_x];
		/*for (int i = 0; i < n_x; i++) {
							Alpha_hat[i] = new DenseVector(numStates);
						}*/
		// Vector* Alpha_hat_0 = null;
		Vector& e_start = *new SparseVector(numStates);
		e_start.set(startIdx, 1);


		double* c = allocateVector(n_x);

		// Alpha_hat_0.setColumnVector(0, e_start);
		Vector& Alpha_hat_0 = e_start;

		for (int i = 0; i < n_x; i++) {
			if (i == 0) {
				// Alpha_hat.setColumnMatrix(i, Ms[i].transpose().multiply(Alpha_hat_0));
				Alpha_hat[i] = &Alpha_hat_0.operate(*Ms[i]);
			} else {
				// Alpha_hat.setColumnMatrix(i, Ms[i].transpose().multiply(Alpha_hat.getColumnMatrix(i - 1)));
				Alpha_hat[i] = &Alpha_hat[i - 1]->operate(*Ms[i]);
			}
			// c[i] = 1.0 / sum(Alpha_hat.getColumnVector(i));
			c[i] = 1.0 / sum(*Alpha_hat[i]);
			/*if (Double.isInfinite(c[i])) {
								int a = 1;
								a = a + 1;
							}*/
			// Alpha_hat.setColumnMatrix(i, times(c[i], Alpha_hat.getColumnMatrix(i)));
			timesAssign(*Alpha_hat[i], c[i]);
		}

		/*
		 * Backward recursion with scaling
		 */
		// Matrix Beta_hat = new BlockRealMatrix(numStates, n_x);
		Vector** Beta_hat = new Vector*[n_x];
		for (int i = n_x - 1; i >= 0; i--) {
			if ( i == n_x - 1) {
				// Beta_hat.setColumnMatrix(i, ones(numStates, 1));
				Beta_hat[i] = new DenseVector(numStates, 1);
			} else {
				// Beta_hat.setColumnMatrix(i, mtimes(Ms[i + 1], Beta_hat.getColumnMatrix(i + 1)));
				Beta_hat[i] = &Ms[i + 1]->operate(*Beta_hat[i + 1]);
			}
			// Beta_hat.setColumnMatrix(i, times(c[i], Beta_hat.getColumnMatrix(i)));
			timesAssign(*Beta_hat[i], c[i]);
		}

		/*
		 * Accumulate the negative conditional log-likelihood on the
		 * D training data sequences
		 */
		for (int i = 0; i < n_x; i++) {
			fval -= log(c[i]);
		}

		/*if (Double.isNaN(fval)) {
							int a = 1;
							a = a + 1;
						}*/

		if (!calcGrad)
			continue;
		/*
		 * Compute E_{P_{\bf \lambda}(Y|x_k)}[F(Y, x_k)]
		 */
		for (int j = 0; j < d; j++) {
			/*
			 * Compute E_{P_{\bf \lambda}(Y|x_k)}[F_{j}(Y, x_k)]
			 */
			for (int i = 0; i < n_x; i++) {
				if (i == 0) {
					// EFArr[j] += Alpha_hat_0.transpose().multiply(times(Ms[i], f_j_x_i)).multiply(Beta_hat.getColumnMatrix(i)).getEntry(0, 0);
					EFArr[j] += innerProduct(Alpha_hat_0, Ms[i]->times(*f_j_x_i).operate(*Beta_hat[i]));
				} else {
					// EFArr[j] += Alpha_hat.getColumnMatrix(i - 1).transpose().multiply(times(Ms[i], f_j_x_i)).multiply(Beta_hat.getColumnMatrix(i)).getEntry(0, 0);
					EFArr[j] += innerProduct(*Alpha_hat[i - 1], Ms[i]->times(*f_j_x_i).operate(*Beta_hat[i]));
				}
			}
		}
	}

	/*
	 * Calculate the eventual negative conditional log-likelihood
	 */
	fval -= innerProduct(W, F);
	fval += sigma * innerProduct(W, W);
	fval /= D;

	if (!calcGrad) {
		return fval;
	}

	/*
	 * Calculate the gradient of negative conditional log-likelihood
	 * w.r.t. W. on the D training data sequences
	 */
	// EF.setColumn(0, EFArr);
	EF = new DenseVector(EFArr, d);
	times(Grad, 1.0 / D, plus(minus(*EF, F), W.times(2 * sigma)));

	return fval;

}
예제 #5
0
파일: CRF.cpp 프로젝트: Maple-Wang/libLAML
/**
 * Predict the single best label sequence given the features for an
 * observation sequence by Viterbi algorithm.
 *
 * @param Fs a 2D {@code Matrix} array, where F[i][j] is the sparse
 * 			 feature matrix for the j-th feature of the observation sequence
 *	 	 	 at position i, i.e., f_{j}^{{\bf x}, i}
 *
 * @return the single best label sequence for an observation sequence
 *
 */
int* CRF::predict(Matrix*** Fs, int length) {

	Matrix** Ms = computeTransitionMatrix(Fs, length);

	/*
	 * Alternative backward recursion with scaling for the Viterbi
	 * algorithm
	 */
	int n_x = length;
	double* b = allocateVector(n_x);
	// Matrix Beta_tilta = new BlockRealMatrix(numStates, n_x);
	Vector** Beta_tilta = new Vector*[n_x];

	for (int i = n_x - 1; i >= 0; i--) {
		if ( i == n_x - 1) {
			// Beta_tilta.setColumnMatrix(i, ones(numStates, 1));
			Beta_tilta[i] = new DenseVector(numStates, 1);
		} else {
			// Beta_tilta.setColumnMatrix(i, mtimes(Ms[i + 1], Beta_tilta.getColumnMatrix(i + 1)));
			Beta_tilta[i] = &Ms[i + 1]->operate(*Beta_tilta[i + 1]);
		}
		b[i] = 1.0 / sum(*Beta_tilta[i]);
		// Beta_tilta.setColumnMatrix(i, times(b[i], Beta_tilta.getColumnMatrix(i)));
		timesAssign(*Beta_tilta[i], b[i]);
	}

	/*fprintf("Beta:\n");
				display(Beta_tilta);*/

	/*
	 * Gammas[i](y_{i-1}, y_[i]) is P(y_i|y_{i-1}, Lambda), thus each row of
	 * Gammas[i] should be sum to one.
	 */

	double** Gamma_i = allocate2DArray(numStates, numStates, 0);
	double** Phi =  allocate2DArray(n_x, numStates, 0);
	double** Psi =  allocate2DArray(n_x, numStates, 0);
	double** M_i = null;
	double* M_i_Row = null;
	double* Gamma_i_Row = null;
	double* Beta_tilta_i = null;
	double* Phi_i = null;
	double* Phi_im1 = null;
	double** maxResult = null;
	for (int i = 0; i < n_x; i++) {
		M_i = ((DenseMatrix*) Ms[i])->getData();
		Beta_tilta_i = ((DenseVector*) Beta_tilta[i])->getPr();
		for (int y_im1 = 0; y_im1 < numStates; y_im1++) {
			M_i_Row = M_i[y_im1];
			Gamma_i_Row = Gamma_i[y_im1];
			assign(Gamma_i_Row, M_i_Row, numStates);
			timesAssign(Gamma_i_Row, Beta_tilta_i, numStates);
			sum2one(Gamma_i_Row, numStates);
		}
		Phi_i = Phi[i];
		if (i == 0) { // Initialization
			log(Phi_i, Gamma_i[startIdx], numStates);
		} else {
			Phi_im1 = Phi[i - 1];
			for (int y_im1 = 0; y_im1 < numStates; y_im1++) {
				Gamma_i_Row = Gamma_i[y_im1];
				logAssign(Gamma_i_Row, numStates);
				plusAssign(Gamma_i_Row, Phi_im1[y_im1], numStates);
			}
			maxResult = max(Gamma_i, numStates, numStates, 1);
			Phi[i] = maxResult[0];
			Psi[i] = maxResult[1];
		}

	}

	/*
	 *  Predict the single best label sequence.
	 */
	// double[] phi_n_x = Phi.getRow(n_x - 1);
	double* phi_n_x = Phi[n_x - 1];
	int* YPred = allocateIntegerVector(n_x);
	for (int i = n_x - 1; i >= 0; i--) {
		if (i == n_x - 1) {
			YPred[i] = argmax(phi_n_x, numStates);
		} else {
			// YPred[i] = (int)Psi.getEntry(i + 1, YPred[i + 1]);
			YPred[i] = (int) Psi[i + 1][YPred[i + 1]];
		}
	}

	/*display(Phi);
				display(Psi);*/

	/*
	 *  Predict the optimal conditional probability: P*(y|x)
	 */
	double p = exp(phi_n_x[YPred[n_x - 1]]);
	fprintf("P*(YPred|x) = %g\n", p);

	return YPred;

}