Exemplo n.º 1
0
	// Linear Prediction in information form as in Ref[2]
	Float predict (Linear_invertable_predict_model& f)
	{
		Linear_predict_byproducts b(f.Fx.size1(),f.q.size());
		return predict (f, b);	
	}
Exemplo n.º 2
0
bool KNN::train_(LabelledClassificationData &trainingData,UINT K){

    //Clear any previous models
    clear();

    if( trainingData.getNumSamples() == 0 ){
        errorLog << "train(LabelledClassificationData &trainingData) - Training data has zero samples!" << endl;
        return false;
    }

    //Set the dimensionality of the input data
    this->K = K;
    this->numFeatures = trainingData.getNumDimensions();
    this->numClasses = trainingData.getNumClasses();

    //TODO: In the future need to build a kdtree from the training data to allow better realtime prediction
    this->trainingData = trainingData;

    if( useScaling ){
        ranges = this->trainingData.getRanges();
        this->trainingData.scale(ranges, 0, 1);
    }

    //Set the class labels
    classLabels.resize(numClasses);
    for(UINT k=0; k<numClasses; k++){
        classLabels[k] = trainingData.getClassTracker()[k].classLabel;
    }

    //Flag that the algorithm has been trained so we can compute the rejection thresholds
    trained = true;
    
    //If null rejection is enabled then compute the null rejection thresholds
    if( useNullRejection ){

        //Set the null rejection to false so we can compute the values for it (this will be set back to its current value later)
        bool tempUseNullRejection = useNullRejection;
        useNullRejection = false;
        rejectionThresholds.clear();

        //Compute the rejection thresholds for each of the K classes
        vector< double > counter(numClasses,0);
        trainingMu.resize( numClasses, 0 );
        trainingSigma.resize( numClasses, 0 );
        rejectionThresholds.resize( numClasses, 0 );

        //Compute Mu for each of the classes
        const unsigned int numTrainingExamples = trainingData.getNumSamples();
        vector< IndexedDouble > predictionResults( numTrainingExamples );
        for(UINT i=0; i<numTrainingExamples; i++){
            predict( trainingData[i].getSample(), K);

            UINT classLabelIndex = 0;
            for(UINT k=0; k<numClasses; k++){
                if( predictedClassLabel == classLabels[k] ){
                    classLabelIndex = k;
                    break;
                }
            }

            predictionResults[ i ].index = classLabelIndex;
            predictionResults[ i ].value = classDistances[ classLabelIndex ];

            trainingMu[ classLabelIndex ] += predictionResults[ i ].value;
            counter[ classLabelIndex ]++;
        }

        for(UINT j=0; j<numClasses; j++){
            trainingMu[j] /= counter[j];
        }

        //Compute Sigma for each of the classes
        for(UINT i=0; i<numTrainingExamples; i++){
            trainingSigma[predictionResults[i].index] += SQR(predictionResults[i].value - trainingMu[predictionResults[i].index]);
        }

        for(UINT j=0; j<numClasses; j++){
            double count = counter[j];
            if( count > 1 ){
                trainingSigma[ j ] = sqrt( trainingSigma[j] / (count-1) );
            }else{
                trainingSigma[ j ] = 1.0;
            }
        }

        //Check to see if any of the mu or sigma values are zero or NaN
        bool errorFound = false;
        for(UINT j=0; j<numClasses; j++){
            if( trainingMu[j] == 0 ){
                warningLog << "TrainingMu[ " << j << " ] is zero for a K value of " << K << endl;
            }
            if( trainingSigma[j] == 0 ){
                warningLog << "TrainingSigma[ " << j << " ] is zero for a K value of " << K << endl;
            }
            if( isnan( trainingMu[j] ) ){
                errorLog << "TrainingMu[ " << j << " ] is NAN for a K value of " << K << endl;
                errorFound = true;
            }
            if( isnan( trainingSigma[j] ) ){
                errorLog << "TrainingSigma[ " << j << " ] is NAN for a K value of " << K << endl;
                errorFound = true;
            }
        }

        if( errorFound ){
            trained = false;
            return false;
        }

        //Recompute the rejection thresholds
        recomputeNullRejectionThresholds();

        //Restore the actual state of the null rejection
        useNullRejection = tempUseNullRejection;
        
    }else{
        //Resize the rejection thresholds but set the values to 0
        rejectionThresholds.clear();
        rejectionThresholds.resize( numClasses, 0 );
    }

    return true;
}
Exemplo n.º 3
0
  void FilterBase::processMeasurement(const Measurement &measurement)
  {
    FB_DEBUG("------ FilterBase::processMeasurement (" << measurement.topicName_ << ") ------\n");

    double delta = 0.0;

    // If we've had a previous reading, then go through the predict/update
    // cycle. Otherwise, set our state and covariance to whatever we get
    // from this measurement.
    if (initialized_)
    {
      // Determine how much time has passed since our last measurement
      delta = measurement.time_ - lastMeasurementTime_;

      FB_DEBUG("Filter is already initialized. Carrying out predict/correct loop...\n"
               "Measurement time is " << std::setprecision(20) << measurement.time_ <<
               ", last measurement time is " << lastMeasurementTime_ << ", delta is " << delta << "\n");

      // Only want to carry out a prediction if it's
      // forward in time. Otherwise, just correct.
      if (delta > 0)
      {
        validateDelta(delta);

        predict(delta);

        // Return this to the user
        predictedState_ = state_;
      }

      correct(measurement);
    }
    else
    {
      FB_DEBUG("First measurement. Initializing filter.\n");

      // Initialize the filter, but only with the values we're using
      size_t measurementLength = measurement.updateVector_.size();
      for (size_t i = 0; i < measurementLength; ++i)
      {
        state_[i] = (measurement.updateVector_[i] ? measurement.measurement_[i] : state_[i]);
      }

      // Same for covariance
      for (size_t i = 0; i < measurementLength; ++i)
      {
        for (size_t j = 0; j < measurementLength; ++j)
        {
          estimateErrorCovariance_(i, j) = (measurement.updateVector_[i] && measurement.updateVector_[j] ?
                                            measurement.covariance_(i, j) :
                                            estimateErrorCovariance_(i, j));
        }
      }

      initialized_ = true;
    }

    if (delta >= 0.0)
    {
      // Update the last measurement and update time.
      // The measurement time is based on the time stamp of the
      // measurement, whereas the update time is based on this
      // node's current ROS time. The update time is used to
      // determine if we have a sensor timeout, whereas the
      // measurement time is used to calculate time deltas for
      // prediction and correction.
      lastMeasurementTime_ = measurement.time_;
    }

    FB_DEBUG("------ /FilterBase::processMeasurement (" << measurement.topicName_ << ") ------\n");
  }
double RobotTracker::stuck(double time)
{
    Matrix x = predict(time);
    return bound(x.e(6,0), 0, 1);
}
Exemplo n.º 5
0
 float predict2(float*values, int num_values, bool returnDFval)
 {
     return predict(values, num_values, returnDFval);
 }
Exemplo n.º 6
0
void CameraRig::predictAndSetRotation(float time) {
    glm::quat headRotation = predict(time);
    setRotation(headRotation);
}
//返回预测的方向
double RobotTracker::direction(double time)
{
    Matrix x = predict(time);
    return x.e(2,0);
}
int main(int argc, char* argv[]){
    //random time for random values later for weights and biases
	srand(time( NULL));

    //load the input files
	FILE* trainingData = fopen(argv[1], "r");
    if(argc < 4){
        printf("Not enough arguments\n");
        return 1;
    }

    if(trainingData == NULL){
        printf("File not found!");
        return 1;
    }

    int INPUTLAYERNODES = atoi(argv[2]);
    int HIDDENLAYERNODES = atoi(argv[3]);
    int OUTPUTLAYERNODES = atoi(argv[4]);

    DATASET trainData[DATATOREAD];

    char line[127];
    int lineCount = 0;
    while (fgets ( line, sizeof line, trainingData )!= NULL && lineCount < DATATOREAD){
        int currentValueCount = 0;
        char currentValue[10];
        for(int i = 0; i < 10; i++){ currentValue[i] ==(char) 0;}
        int currentValuePos = 0;
        for(int i = 0; i < sizeof(line); i++){
            if(line[i] != ',' && line[i] != '\n'){
                currentValue[currentValuePos] = line[i];
                currentValuePos++;
            }else{
                trainData[lineCount].value[currentValueCount] = atof(currentValue);
                for(int j = 0; j < 10; j++){
                    currentValue[j] = (char) 0;
                }
                currentValueCount++;
                currentValuePos = 0;
            }
        }
        lineCount++;
    }
	normalize(trainData);
    for(int i = 0; i < DATATOREAD; i++){
        trainData[i].value[7] -= 1;
    }

    for(int i = 0; i < DATATOREAD; i++){
        printf("Data %i | ", i);
        for(int j = 0; j < INPUTLAYERNODES+1; j++){
            printf("%f|",trainData[i].value[j]);
        }
        printf("\n");
    }

	//setting up the network
	float weightsFromInputToHidden[INPUTLAYERNODES * HIDDENLAYERNODES];
	for (int i = 0; i < INPUTLAYERNODES * HIDDENLAYERNODES; i++) {
		weightsFromInputToHidden[i] = (float) rand() / RAND_MAX * 2.0 - 1.0;
	}
	float weightsFromHiddenToOutput[HIDDENLAYERNODES * OUTPUTLAYERNODES];
	for (int i = 0; i < HIDDENLAYERNODES * OUTPUTLAYERNODES; i++) {
		weightsFromHiddenToOutput[i] = (float) rand() / RAND_MAX * 2.0 - 1.0;
	}
	float biasesOfHidden[HIDDENLAYERNODES];
	for (int i = 0; i < HIDDENLAYERNODES; i++) {
		biasesOfHidden[i] = (float) rand() / RAND_MAX * 2.0 - 1.0;
	}
	float biasesOfOutput[OUTPUTLAYERNODES];
	for (int i = 0; i < OUTPUTLAYERNODES; i++) {
		biasesOfOutput[i] = (float) rand() / RAND_MAX * 2.0 - 1.0;
	}
	trainNetwork(trainData, weightsFromInputToHidden, weightsFromHiddenToOutput, biasesOfHidden, biasesOfOutput, INPUTLAYERNODES, HIDDENLAYERNODES, OUTPUTLAYERNODES);
    fclose(trainingData);
	printf("predicting...\n");
	int right = 0;
    for(int i = 0; i < DATATOREAD; i++){
        float data[INPUTLAYERNODES+1];
        data[0] = trainData[i].value[0];
        data[1] = trainData[i].value[1];
		data[2] = trainData[i].value[2];
        data[3] = trainData[i].value[3];
		data[4] = trainData[i].value[4];
		data[5] = trainData[i].value[5];
		data[6] = trainData[i].value[6];
        data[7] = trainData[i].value[7];
        right += predict(data, weightsFromInputToHidden, weightsFromHiddenToOutput, biasesOfHidden, biasesOfOutput, INPUTLAYERNODES, HIDDENLAYERNODES, OUTPUTLAYERNODES);
    }
	printf("I got %i / %i\n", right, DATATOREAD);
    printf("end.\n");
    return 0;
}
Exemplo n.º 9
0
static av_always_inline void decode_line(FFV1Context *s, int w,
                                         int16_t *sample[2],
                                         int plane_index, int bits)
{
    PlaneContext *const p = &s->plane[plane_index];
    RangeCoder *const c   = &s->c;
    int x;
    int run_count = 0;
    int run_mode  = 0;
    int run_index = s->run_index;

    if (s->slice_coding_mode == 1) {
        int i;
        for (x = 0; x < w; x++) {
            int v = 0;
            for (i=0; i<bits; i++) {
                uint8_t state = 128;
                v += v + get_rac(c, &state);
            }
            sample[1][x] = v;
        }
        return;
    }

    for (x = 0; x < w; x++) {
        int diff, context, sign;

        context = get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
        if (context < 0) {
            context = -context;
            sign    = 1;
        } else
            sign = 0;

        av_assert2(context < p->context_count);

        if (s->ac) {
            diff = get_symbol_inline(c, p->state[context], 1);
        } else {
            if (context == 0 && run_mode == 0)
                run_mode = 1;

            if (run_mode) {
                if (run_count == 0 && run_mode == 1) {
                    if (get_bits1(&s->gb)) {
                        run_count = 1 << ff_log2_run[run_index];
                        if (x + run_count <= w)
                            run_index++;
                    } else {
                        if (ff_log2_run[run_index])
                            run_count = get_bits(&s->gb, ff_log2_run[run_index]);
                        else
                            run_count = 0;
                        if (run_index)
                            run_index--;
                        run_mode = 2;
                    }
                }
                run_count--;
                if (run_count < 0) {
                    run_mode  = 0;
                    run_count = 0;
                    diff      = get_vlc_symbol(&s->gb, &p->vlc_state[context],
                                               bits);
                    if (diff >= 0)
                        diff++;
                } else
                    diff = 0;
            } else
                diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);

            av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
                    run_count, run_index, run_mode, x, get_bits_count(&s->gb));
        }

        if (sign)
            diff = -diff;

        sample[1][x] = (predict(sample[1] + x, sample[0] + x) + diff) &
                       ((1 << bits) - 1);
    }
    s->run_index = run_index;
}
Exemplo n.º 10
0
int main()
{
	int i=0;
	init_devices();
	lcd_set_4bit();
	lcd_init();
	color_sensor_scaling();
	
	/*
	//variable 'i' scales at 13,14 for sharp sensor for velocitty 240 240
	//u turn 1600ms at 200,200 velocity
	velocity(200,200);
	left();
	_delay_ms(1600);
	stop();
	while(1);
	
	threshold=6000;
	
	right();
	while(ADC_Conversion(11)<65)
	{
		i++;
		lcd_print(1,11,i,3);
	}
	stop();
	lcd_print(2,11,scan(),1);
	stop();
	while(1);
	*/
	setIndicatorAndColor();
	
	threshold=6000;
	ct = 0; adj = 2;
	//lcd("Begin");
	forwardJaa();
	stop();
	servo_1(0);
	servo_2(90);
	servo_3(0);
	while (sorted<total)
	{
		canDrop();
		//buzzer_on();
		//_delay_ms(500);
		//buzzer_off();
		if (visitedCount == 3)
			predict();
		if (sorted == total)
			break;
		pickup();
		traverseToSort(ct, ct % 2 + 4);
		sortCheck();
	}
	for (i = 0; i<4; i++);
	while(1);
		//..printf("%d %d\n", term[i][0], term[i][1]);
	//..printf("Sort 0=%dSort 1=%d\nArm 0=%dArm 1=%d\n", sort[0], sort[1], arm[0], arm[1]);
	//..printf("Cost=%d\nSORTED!!!!!\n", cost + 7);
	//getch();
	return 0;
}
Exemplo n.º 11
0
void binary_class_predict(FILE *input, FILE *output){
	int    total = 0;
	int    *labels;
	int    max_nr_attr = 64;
	struct feature_node *x = Malloc(struct feature_node, max_nr_attr);
	dvec_t dec_values;
	ivec_t true_labels;
	int n;
	if(model_->bias >= 1)
		n = get_nr_feature(model_) + 1;	
	else
		n = get_nr_feature(model_);


	labels = Malloc(int, get_nr_class(model_));
	get_labels(model_, labels);
	
	max_line_len = 1024;
	line = (char *)malloc(max_line_len*sizeof(char));
	while(readline(input) != NULL)
	{
		int i = 0;
		double target_label, predict_label;
		char *idx, *val, *label, *endptr;
		int inst_max_index = -1; // strtol gives 0 if wrong format, and precomputed kernel has <index> start from 0

		label = strtok(line," \t");
		target_label = strtod(label,&endptr);
		if(endptr == label)
			exit_input_error(total+1);

		while(1)
		{
			if(i>=max_nr_attr - 2)	// need one more for index = -1
			{
				max_nr_attr *= 2;
				x = (struct feature_node *) realloc(x,max_nr_attr*sizeof(struct feature_node));
			}

			idx = strtok(NULL,":");
			val = strtok(NULL," \t");

			if(val == NULL)
				break;
			errno = 0;
			x[i].index = (int) strtol(idx,&endptr,10);
			if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index)
				exit_input_error(total+1);
			else
				inst_max_index = x[i].index;

			errno = 0;
			x[i].value = strtod(val,&endptr);
			if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
				exit_input_error(total+1);

			++i;
		}
		
		if(model_->bias >= 0){
			x[i].index = n;
			x[i].value = model_->bias; 
			++i;	
		}
	
		x[i].index = -1;

		predict_label = predict(model_,x);
		fprintf(output,"%g\n",predict_label);


		double dec_value;
		predict_values(model_, x, &dec_value);
		true_labels.push_back((target_label > 0)? 1: -1);
		if(labels[0] <= 0) dec_value *= -1;
		dec_values.push_back(dec_value);
	}	

	validation_function(dec_values, true_labels);

	free(labels);
	free(x);
}
Exemplo n.º 12
0
void do_predict(FILE *input, FILE *output)
{
	int total=0;
	int n;
	int nr_feature=get_nr_feature(model_);
	double *dvec_t;
	double *ivec_t;
	int *query;
	n=nr_feature;

	max_line_len = 1024;
	line = (char *)malloc(max_line_len*sizeof(char));
	while(readline(input) != NULL)
		total++;
	rewind(input);
	dvec_t = new double[total];
	ivec_t = new double[total];
	query = new int[total];
	total = 0;
	while(readline(input) != NULL)
	{
		int i = 0;
		double target_label, predict_label;
		char *idx, *val, *label, *endptr;
		int inst_max_index = 0; // strtol gives 0 if wrong format

		query[total] = 0;
		label = strtok(line," \t\n");
		if(label == NULL) // empty line
			exit_input_error(total+1);

		target_label = strtod(label,&endptr);
		if(endptr == label || *endptr != '\0')
			exit_input_error(total+1);
		ivec_t[total] = target_label;

		while(1)
		{
			if(i>=max_nr_attr-2)	// need one more for index = -1
			{
				max_nr_attr *= 2;
				x = (struct feature_node *) realloc(x,max_nr_attr*sizeof(struct feature_node));
			}

			idx = strtok(NULL,":");
			val = strtok(NULL," \t");

			if(val == NULL)
				break;

			if (strcmp(idx,"qid") == 0)
			{
				errno = 0;
				query[total] = (int) strtol(val,&endptr,10);
				if(endptr == val || errno != 0 || *endptr != '\0')
					exit_input_error(i+1);
				continue;
			}
			errno = 0;
			x[i].index = (int) strtol(idx,&endptr,10);
			if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index)
				exit_input_error(total+1);
			else
				inst_max_index = x[i].index;

			errno = 0;
			x[i].value = strtod(val,&endptr);
			if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
				exit_input_error(total+1);

			// feature indices larger than those in training are not used
			if(x[i].index <= nr_feature)
				++i;
		}
		x[i].index = -1;

		predict_label = predict(model_,x);
		fprintf(output,"%.10f\n",predict_label);
		dvec_t[total++] = predict_label;
	}
	double result[3];
	eval_list(ivec_t,dvec_t,query,total,result);
	info("Pairwise Accuracy = %g%%\n",result[0]*100);
	info("MeanNDCG (LETOR) = %g\n",result[1]);
	info("NDCG (YAHOO) = %g\n",result[2]);
}
// Optimise the two lagrange multipliers, if successful then return 1
int SMO::takeStep(int i1, int i2)
{
    // Old values of alpha[i1] and alpha[i2]
    double alpha1, alpha2;
    // New values for alpha[i1] and alpha[i2]
    double a1, a2;
    int y1, y2, s;
    double E1, E2, L, H, k11, k22, k12, eta, Lobj, Hobj, dT;

    if (i1 == i2) return 0;

    alpha1 = alpha[i1];
    y1 = target[i1];
    // Check E1 in error cache, if not present then E1 = SVM output on point[i1] - y1.
    if (alpha1 > 0 && alpha1 < C)
        E1 = errorCache[i1];
    else 
        E1 = predict(points[i1]) - y1;

    alpha2 = alpha[i2];
    y2 = target[i2];
    if (alpha2 > 0 && alpha2 < C)
        E2 = errorCache[i2];
    else 
        E2 = predict(points[i2]) - y2;

    s = y1 * y2;
    // Compute L and H
    if (y1 == y2)
    {
        L = std::max(0.0, alpha1 + alpha2 - C);
        H = std::min(C, alpha1 + alpha2);
    }
    else
    {
        L = std::max(0.0, alpha2 - alpha1);
        H = std::min(C, alpha2 - alpha1 + C);
    }

    if (L == H)
        return 0;

    k11 = kernel(points[i1], points[i1]);
    k12 = kernel(points[i1], points[i2]);
    k22 = kernel(points[i2], points[i2]);

    eta = 2 * k12 - k11 - k22;

    if (eta < 0)
    {
        a2 = alpha2 + y2 * (E2 - E1) / eta;
        if (a2 < L) a2 = L;
        else if (a2 > H) a2 = H;
    }
    else
    {
        double c1 = eta/2;
        double c2 = y2 * (E1-E2)- eta * alpha2;
        Lobj = c1 * L * L + c2 * L;
        Hobj = c1 * H * H + c2 * H;

        if (Lobj > Hobj+epsilon) a2 = L;
        else if (Lobj < Hobj-epsilon) a2 = H;
        else a2 = alpha2;
    }

    if (fabs(a2-alpha2) < epsilon*(a2+alpha2+epsilon))
        return 0;

    a1 = alpha1 - s * (a2 - alpha2);
    if (a1 < 0)
    {
        a2 += s * a1;
        a1 = 0;
    }
    else if (a1 > C)
    {
        double t = a1-C;
        a2 += s * t;
        a1 = C;
    }

    double b1, b2, newThreshold;

    if (a1 > 0 && a1 < C)
        newThreshold = threshold + E1 + y1 * (a1 - alpha1) * k11 + y2 * (a2 - alpha2) * k12;
    else
    {
        if (a2 > 0 && a2 < C)
            newThreshold = threshold + E2 + y1 * (a1 - alpha1) * k12 + y2 * (a2 - alpha2) * k22;
        else 
        {
            b1 = threshold + E1 + y1 * (a1 - alpha1) * k11 + y2 * (a2 - alpha2) * k12;
            b2 = threshold + E2 + y1 * (a1 - alpha1) * k12 + y2 * (a2 - alpha2) * k22;
            newThreshold = (b1 + b2) / 2;
        }
    }
    // Change in threshold value
    dT = newThreshold - threshold;
    // Update threshold to reflect changes in lagrange multipliers.
    threshold = newThreshold;

    double t1 = y1 * (a1-alpha1);
    double t2 = y2 * (a2-alpha2);

    // For linear kernel update weights to reflect changes in a1 and a2.
    for (unsigned int i=0; i < points[0].size(); i++)
        w[i] += points[i1][i] * t1 + points[i2][i] * t2;
    // Update error cache using new lagrange's multipliers.
    for (unsigned int i=0; i<points.size(); i++)
        if (0 < alpha[i] && alpha[i] < C)
            errorCache[i] +=  t1 * kernel(points[i1],points[i]) + t2 * kernel(points[i2],points[i]) - dT;

    errorCache[i1] = 0.0;
    errorCache[i2] = 0.0;

    // Update alpha with a1 and a2
    alpha[i1] = a1;
    alpha[i2] = a2;

    return 1;
}
int SMO::examineExample(int i1)
{
    double y1, alpha1, E1, r1;
    y1 = target[i1];
    alpha1 = alpha[i1];
    // Check E1 in error cache, if not present then E1 = SVM output on point[i1] - y1.
    if (alpha1 > 0 && alpha1 < C)
        E1 = errorCache[i1];
    else 
        E1 = predict(points[i1]) - y1;

    r1 = y1 * E1;
    // Check if alpha1 violated KKT condition by more than tolerance, if it does then look for alpha2 and optimise them by calling takeStep(i1,i2)
    if ((r1 < -tolerance && alpha1 < C)
        || (r1 > tolerance && alpha1 > 0))
    { 
        unsigned int k;
        int i2;
        double dEmax = 0.0;
        i2 = -1;
        // Try i2 using second choice heuristic as described in section 2.2 by choosing an error to maximize step size.
        // The i2 is taken which maximize dE.
        for (k = 0; k < points.size(); k++)
            if (alpha[k] > 0 && alpha[k] < C)
            {
                double E2, dE;
                E2 = errorCache[k];
                dE = fabs(E1 - E2);
                if (dE > dEmax)
                {
                    dEmax = dE;
                    i2 = k;
                }
            }
            if (i2 >= 0) 
            {
                if (takeStep (i1, i2))
                    return 1;
            }
            // Loop over all non-zero and non-C alpha, starting at a random point.  
            i2 = (int)(rand() % points.size());
            for (k = 0; k < points.size(); k++)
            {
                if (alpha[i2] > 0 && alpha[i2] < C) 
                {
                    if (takeStep(i1, i2))
                        return 1;
                }
                i2 = (i2 + 1)%points.size();
            }

            // Loop over all possible i2, starting at a random point.
            i2 = (int)(rand() % points.size());
            for (k = 0; k < points.size(); k++) 
            {
                if (takeStep(i1, i2))
                    return 1;
                i2 = (i2 + 1)%points.size();
            }
    }

    return 0;
}
void Segmentor::train(const string& trainFile, const string& devFile, const string& testFile, const string& modelFile, const string& optionFile,
    const string& wordEmbFile) {
  if (optionFile != "")
    m_options.load(optionFile);

  m_options.showOptions();
  vector<Instance> trainInsts, devInsts, testInsts;
  m_pipe.readInstances(trainFile, trainInsts, m_classifier.MAX_SENTENCE_SIZE - 2, m_options.maxInstance);
  if (devFile != "")
    m_pipe.readInstances(devFile, devInsts, m_classifier.MAX_SENTENCE_SIZE - 2, m_options.maxInstance);
  if (testFile != "")
    m_pipe.readInstances(testFile, testInsts, m_classifier.MAX_SENTENCE_SIZE - 2, m_options.maxInstance);

  vector<vector<Instance> > otherInsts(m_options.testFiles.size());
  for (int idx = 0; idx < m_options.testFiles.size(); idx++) {
    m_pipe.readInstances(m_options.testFiles[idx], otherInsts[idx], m_classifier.MAX_SENTENCE_SIZE - 2, m_options.maxInstance);
  }

  createAlphabet(trainInsts);

  addTestWordAlpha(devInsts);
  addTestWordAlpha(testInsts);
  for (int idx = 0; idx < otherInsts.size(); idx++) {
    addTestWordAlpha(otherInsts[idx]);
  }


  m_classifier.init();
  m_classifier.setDropValue(m_options.dropProb);

  vector<vector<CAction> > trainInstGoldactions;
  getGoldActions(trainInsts, trainInstGoldactions);
  double bestFmeasure = 0;

  int inputSize = trainInsts.size();

  std::vector<int> indexes;
  for (int i = 0; i < inputSize; ++i)
    indexes.push_back(i);

  static Metric eval, metric_dev, metric_test;

  int maxIter = m_options.maxIter * (inputSize / m_options.batchSize + 1);
  int oneIterMaxRound = (inputSize + m_options.batchSize -1) / m_options.batchSize;
  std::cout << "maxIter = " << maxIter << std::endl;
  int devNum = devInsts.size(), testNum = testInsts.size();

  static vector<vector<string> > decodeInstResults;
  static vector<string> curDecodeInst;
  static bool bCurIterBetter;
  static vector<vector<string> > subInstances;
  static vector<vector<CAction> > subInstGoldActions;

  for (int iter = 0; iter < maxIter; ++iter) {
    std::cout << "##### Iteration " << iter << std::endl;
    srand(iter);
    random_shuffle(indexes.begin(), indexes.end());
    std::cout << "random: " << indexes[0] << ", " << indexes[indexes.size() - 1] << std::endl;    
    bool bEvaluate = false;
    if(m_options.batchSize == 1){
    	eval.reset();
	    bEvaluate = true;
	    for (int idy = 0; idy < inputSize; idy++) {
	      subInstances.clear();
	      subInstGoldActions.clear();
	      subInstances.push_back(trainInsts[indexes[idy]].chars);
	      subInstGoldActions.push_back(trainInstGoldactions[indexes[idy]]);
	      	
	      double cost = m_classifier.train(subInstances, subInstGoldActions);
	
	      eval.overall_label_count += m_classifier._eval.overall_label_count;
	      eval.correct_label_count += m_classifier._eval.correct_label_count;
	
	      if ((idy + 1) % (m_options.verboseIter*10) == 0) {
	        std::cout << "current: " << idy + 1 << ", Cost = " << cost << ", Correct(%) = " << eval.getAccuracy() << std::endl;
	      }
	      m_classifier.updateParams(m_options.regParameter, m_options.adaAlpha, m_options.adaEps);
	    }
	    std::cout << "current: " << iter + 1  << ", Correct(%) = " << eval.getAccuracy() << std::endl;
    }
    else{
    	if(iter == 0)eval.reset();
  		subInstances.clear();
      subInstGoldActions.clear();
    	for (int idy = 0; idy < m_options.batchSize; idy++) {
	      subInstances.push_back(trainInsts[indexes[idy]].chars);
	      subInstGoldActions.push_back(trainInstGoldactions[indexes[idy]]);    		
    	}
      double cost = m_classifier.train(subInstances, subInstGoldActions);

      eval.overall_label_count += m_classifier._eval.overall_label_count;
      eval.correct_label_count += m_classifier._eval.correct_label_count;
      
      if ((iter + 1) % (m_options.verboseIter) == 0) {
      	std::cout << "current: " << iter + 1 << ", Cost = " << cost << ", Correct(%) = " << eval.getAccuracy() << std::endl;
      	eval.reset();
      	bEvaluate = true;
      }
      
      m_classifier.updateParams(m_options.regParameter, m_options.adaAlpha, m_options.adaEps);
    }
    
    if (bEvaluate && devNum > 0) {
      bCurIterBetter = false;
      if (!m_options.outBest.empty())
        decodeInstResults.clear();
      metric_dev.reset();
      for (int idx = 0; idx < devInsts.size(); idx++) {
        predict(devInsts[idx], curDecodeInst);
        devInsts[idx].evaluate(curDecodeInst, metric_dev);
        if (!m_options.outBest.empty()) {
          decodeInstResults.push_back(curDecodeInst);
        }
      }
      std::cout << "dev:" << std::endl;
      metric_dev.print();

      if (!m_options.outBest.empty() && metric_dev.getAccuracy() > bestFmeasure) {
        m_pipe.outputAllInstances(devFile + m_options.outBest, decodeInstResults);
        bCurIterBetter = true;
      }

      if (testNum > 0) {
        if (!m_options.outBest.empty())
          decodeInstResults.clear();
        metric_test.reset();
        for (int idx = 0; idx < testInsts.size(); idx++) {
          predict(testInsts[idx], curDecodeInst);
          testInsts[idx].evaluate(curDecodeInst, metric_test);
          if (bCurIterBetter && !m_options.outBest.empty()) {
            decodeInstResults.push_back(curDecodeInst);
          }
        }
        std::cout << "test:" << std::endl;
        metric_test.print();

        if (!m_options.outBest.empty() && bCurIterBetter) {
          m_pipe.outputAllInstances(testFile + m_options.outBest, decodeInstResults);
        }
      }

      for (int idx = 0; idx < otherInsts.size(); idx++) {
        std::cout << "processing " << m_options.testFiles[idx] << std::endl;
        if (!m_options.outBest.empty())
          decodeInstResults.clear();
        metric_test.reset();
        for (int idy = 0; idy < otherInsts[idx].size(); idy++) {
          predict(otherInsts[idx][idy], curDecodeInst);
          otherInsts[idx][idy].evaluate(curDecodeInst, metric_test);
          if (bCurIterBetter && !m_options.outBest.empty()) {
            decodeInstResults.push_back(curDecodeInst);
          }
        }
        std::cout << "test:" << std::endl;
        metric_test.print();

        if (!m_options.outBest.empty() && bCurIterBetter) {
          m_pipe.outputAllInstances(m_options.testFiles[idx] + m_options.outBest, decodeInstResults);
        }
      }


      if (m_options.saveIntermediate && metric_dev.getAccuracy() > bestFmeasure) {
        std::cout << "Exceeds best previous DIS of " << bestFmeasure << ". Saving model file.." << std::endl;
        bestFmeasure = metric_dev.getAccuracy();
        writeModelFile(modelFile);
      }
    }
  }
}
int main(int argc, char* argv[])
{
	// Read parameters from console.
	CCmdLine cmdLine;

	if(cmdLine.SplitLine(argc, argv) < 5)
	{
		cerr << "Usage: ./bbnet -s score_file -n node -b bkg -f func_depth -o output" << endl;
		cerr << endl << "Additional parameter:" << endl;
		cerr << "-k\tPenalty parameter(logK, Default = 5.0)" << endl;
		cerr << "-c\tnumber of candidate motifs (Default=50)" << endl;
		cerr << "-d\tpositive negative (for prediction using BN)" << endl;
		cerr << "-l\toutput of all training samples' information." << endl;
		cerr << "-t\ttranslational(transcriptional) start sites.(Default = right end)" << endl;
		cerr << "-rb\tbit-string to determine which rules to include.(Default = 111110)" << endl;
		cerr << "-i\tUse mutual information instead of Bayesian score" << endl;
		cerr << endl << "Contact: \"Li Shen\"<*****@*****.**>" << endl;
		return 1;
	}

	string s, n, b, f, o;
	try
	{
		s = cmdLine.GetArgument("-s", 0);	// score file.
		n = cmdLine.GetArgument("-n", 0);	// node gene list file.
		b = cmdLine.GetArgument("-b", 0);	// bkg gene list file.
		f = cmdLine.GetArgument("-f", 0);	// func depth folder.
		o = cmdLine.GetArgument("-o", 0);	// results output file.
	}
	catch(int)
	{
		cerr << "Wrong arguments!" << endl;
		return 1;
	}

	if(cmdLine.HasSwitch("-i"))
		itag = true;

	//itag = true;
	//string s = "../gbnet/data/Beer/scor_test.list";
	//string n = "../gbnet/data/Beer/node.list";
	//string b = "../gbnet/data/Beer/bkg.list";
	//string f = "../gbnet/data/Beer/func";
	//string k = "0.015";
	//logK = atof(k.data());
	//string o = "../gbnet/data/Beer/bb_res_test2.txt";

	string k;	// Penalty parameter; logK value.
	if(!itag)
		k = cmdLine.GetSafeArgument("-k", 0, "5.0");
	else
		k = cmdLine.GetSafeArgument("-k", 0, "0.015");
	logK = atof(k.data());

	string c = cmdLine.GetSafeArgument("-c", 0, "50");	// number of candidate motifs. default = 50.
	motifcand = atoi(c.data());

	// Use prior counts for some motifs if specified.
	string p = cmdLine.GetSafeArgument("-p", 0, "0");	// prior counts.
	pricnt = atoi(p.data());
	if(pricnt > 0)	// read preferred motifs list from file.
	{
		prior = 1;
		string fPrim = cmdLine.GetSafeArgument("-p", 1, "primot.txt");
		vector<string> primv;
		if(get1stcol(fPrim, primv) < 0)
			return 1;
		for(size_t i = 0; i < primv.size(); i++)
			primo.insert(primv[i]);
	}

	// File names for positive, negative and left-out testing lists.
	string pos = cmdLine.GetSafeArgument("-d", 0, "");	// positive testing cases.
	string neg = cmdLine.GetSafeArgument("-d", 1, "");	// negative testing cases.
	string res = cmdLine.GetSafeArgument("-d", 2, "");	// left-out testing cases.
	vector<string> plst, nlst, rlst;	// positive, negative and left-out lists.
	if(pos != "" && neg != "")
	{
		if(get1stcol(pos, plst) < 0)
			return 1;
		if(get1stcol(neg, nlst) < 0)
			return 1;
	}
	if(res != "")
	{
		if(get1stcol(res, rlst) < 0)
			return 1;
	}

	// File for output of all training samples' information.
	string finfo = cmdLine.GetSafeArgument("-l", 0, "");

	// File to store all genes' translational/transcriptional start sites.
	string ftss = cmdLine.GetSafeArgument("-t", 0, "");
	if(ftss != "")
		loadtss(ftss, mtss);

	// A bit-string to determine which rules to include.
	rb = cmdLine.GetSafeArgument("-rb", 0, "111110");

	string bp = cmdLine.GetSafeArgument("-bp", 0, "");	// Output each gene's probability like in Beer's prediction.
	
	// Load motif Bayesian score file.
	if(loadscor(mscor, s) != 0)
	{
		cerr << "Load motif scores eror!" << endl;
		return 1;
	}
	else
	{
#ifdef VERBOSE
		cout << "Display candidate motifs that are loaded:" << endl;
		dispscor(mscor);
#endif
	}
	vector<MotifScore> oscor = mscor;	// Save an original copy of motif scores.

	// Load gene list.
	vector<Case> tlst, blst, genlst;
	set<string> genset;
	if(loadgene(tlst, blst, n, b) != 0)
	{
		cerr << "Load gene lists error!" << endl;
		return 1;
	}
	else
	{
		genlst.insert(genlst.end(), tlst.begin(), tlst.end());
		genlst.insert(genlst.end(), blst.begin(), blst.end());
#ifdef VERBOSE
		cout << "Load gene list completed!" << endl;
#endif
		// All training and testing gene names are put into genmap.
		for(size_t i = 0; i < genlst.size(); i++)
			genset.insert(genlst[i].name);
		for(size_t i = 0; i < plst.size(); i++)
			genset.insert(plst[i]);
		for(size_t i = 0; i < nlst.size(); i++)
			genset.insert(nlst[i]);
		for(size_t i = 0; i < rlst.size(); i++)
			genset.insert(rlst[i]);
	}

	// Load motif binding information of genes in genmap.
	if(loadbind(allbind, mscor, genset, f) != 0)
	{
		cerr << "Load binding information error!" << endl;
		return 1;
	}
	else
	{
#ifdef VERBOSE
		cout << "Load binding information completed!" << endl;
#endif
	}

	// File for output.
	ofstream hOut(o.data());
	if(!hOut)
	{
		cerr << "Can't open " << o << endl;
		return 1;
	}
	hOut << "Number of genes in category 1: " << tlst.size() << endl;
	hOut << "Number of genes in category 0: " << blst.size() << endl << endl;

#ifdef VERBOSE
	cout << endl << "Running on original data." << endl;
#endif
	vector<Constraint> cons;
	vector<CPTRow> cpt;
	clock_t start = clock();
	double scor = bbnet(cons, cpt, genlst);
	clock_t finish = clock();
	if(outbayes(hOut, scor, cons, cpt, oscor, tlst.size(), blst.size()) != 0)
	{
		cerr << "Output Bayesian network results error!" << endl;
		return 1;
	}
	if(finfo != "")
	{
		if(outgene(finfo, tlst, blst, cons) != 0)
			cerr << "Output training samples' information error!" << endl;
		return 1;
	}
	if(pos != "" && neg != "")
	{
		Pred d = predict(cons, cpt, plst, nlst);
		outpred(hOut, d, n, b, pos, neg);
		if(bp != "")	// output each gene's probability being in this cluster if output file is specified.
		{
			ofstream hbp(bp.data());
			if(!hbp)
			{
				cerr << "Can't open " << bp << endl;
				return 1;
			}
			vector<BPred> trnbp = predict(cons, cpt, genlst, 0);	// probabilities for training genes.
			outpred(hbp, trnbp);
			vector<string> tstlst;	// probabilities for testing genes.
			tstlst.insert(tstlst.end(), plst.begin(), plst.end());	// positive testings.
			tstlst.insert(tstlst.end(), nlst.begin(), nlst.end());	// negative testings.
			vector<BPred> tstbp = predict(cons, cpt, tstlst, 1);
			outpred(hbp, tstbp);
			if(res != "")	// probabilities for left-out genes if the left-out file is specified.
			{
				vector<BPred> lefbp = predict(cons, cpt, rlst, -1);
				outpred(hbp, lefbp);
			}
			hbp.close();
		}
	}
	hOut << endl << "Bayesian network occupied CPU " << (double)(finish-start)/CLOCKS_PER_SEC << " seconds." << endl;
	hOut.close();

	return 0;
}
Exemplo n.º 17
0
void MainWindow::createActions() {

    _openiTunesAction = new QAction(QIcon(":/images/iTunes.png"),
                                    tr("Open i&Tunes Library..."), this);
    _openiTunesAction->setShortcut(tr("Ctrl+T"));
    _openiTunesAction->setStatusTip(tr("Open iTunes Library file"));
    connect(_openiTunesAction, SIGNAL(triggered()), this, SLOT(openiTunesLibrary()));

    _openCollectionAction = new QAction(QIcon(":/images/openCollection.png"),
                                        tr("Open Collection File"), this);
    _openCollectionAction->setShortcut(tr("Ctrl+T"));
    _openCollectionAction->setStatusTip(tr("Open Collection file"));
    connect(_openCollectionAction, SIGNAL(triggered()), this, SLOT(openCollectionFile()));

    _exitAction = new QAction(tr("E&xit"), this);
    _exitAction->setShortcut(tr("Ctrl+Q"));
    connect(_exitAction, SIGNAL(triggered()), this, SLOT(close()));

    _coutAction = new QAction(tr("&Cout Library"), this);
    _coutAction->setShortcut(tr("Ctrl+E"));
    connect(_coutAction, SIGNAL(triggered()), this, SLOT(display()));

    _predictAction = new QAction(QIcon(":/images/predict.png"),
                                 tr("&Predict"), this);
    _predictAction->setShortcut(tr("Ctrl+3"));
    _predictAction->setStatusTip(tr("Predict the the placement of the prediction tracks"));
    connect(_predictAction, SIGNAL(triggered()), _display, SLOT(predict()));

    _extractAction = new QAction(QIcon(":/images/extract.png"),
                                 tr("&Extract"), this);
    _extractAction->setShortcut(tr("Ctrl+1"));
    _extractAction->setStatusTip(tr("Extract features from the defined training tracks"));
    connect(_extractAction, SIGNAL(triggered()), _display, SLOT(extract()));

    _trainingAction = new QAction(QIcon(":/images/train.png"),
                                  tr("&Train"), this);
    _trainingAction->setShortcut(tr("Ctrl+2"));
    _trainingAction->setStatusTip(tr("Train the grid using the defined training tracks"));
    connect(_trainingAction, SIGNAL(triggered()), _display, SLOT(train()));

    _initAction = new QAction(QIcon(":/images/init.png"), tr("&Initlize"), this);
    connect(_initAction, SIGNAL(triggered()), _display, SLOT(init()));

    _aboutAction = new QAction(tr("&About"), this);
    connect(_aboutAction, SIGNAL(triggered()), this, SLOT(about()));

    _saveGridAction = new QAction(tr("&Save Grid"), this);
    connect(_saveGridAction, SIGNAL(triggered()), this, SLOT(saveCurrentGrid()) );

    _loadGridAction = new QAction(tr("&Load Saved Grid"),this);
    connect(_loadGridAction, SIGNAL(triggered()), this, SLOT(openSavedGrid()));
    _playModeAction = new QAction(tr("&Continuous"), this);
    connect(_playModeAction, SIGNAL(triggered()), this, SLOT(changedPlayMode()));

    _cancelAction = new QAction(tr("&Cancel Action"), this);
    connect(_cancelAction, SIGNAL(triggered()), this, SLOT(cancelButton()));

    _normHashAction = new QAction(tr("Open Saved Hash"), this);
    connect(_normHashAction, SIGNAL(triggered()), _display, SLOT(hashLoad()));

    _fullScreenAction = new QAction (tr("&Full Screen Mouse Mode"), this);
    connect(_fullScreenAction, SIGNAL(triggered()), _display, SLOT(fullScreenMouse()));

    _colourMapMode = new QAction (tr("&Colour Mapping Mode"),this);
    connect(_colourMapMode, SIGNAL(triggered()), _display, SLOT(colourMapMode()));

    _resetButtonAction = new QAction (tr("&Reset"), this);
    connect (_resetButtonAction, SIGNAL(triggered()), this, SLOT(resetButtonPressed()));

    _optionsDialogAction = new QAction(tr("&Options"), this);
    connect(_optionsDialogAction, SIGNAL(triggered()), this, SLOT(optionsDialogTriggered()));
}
Exemplo n.º 18
0
bool KNN::train_(const ClassificationData &trainingData,const UINT K){
    
    //Set the dimensionality of the input data
    this->K = K;
    
    //Flag that the algorithm has been trained so we can compute the rejection thresholds
    trained = true;
    
    //If null rejection is enabled then compute the null rejection thresholds
    if( useNullRejection ){
        
        //Set the null rejection to false so we can compute the values for it (this will be set back to its current value later)
        useNullRejection = false;
        nullRejectionThresholds.clear();
        
        //Compute the rejection thresholds for each of the K classes
        VectorFloat counter(numClasses,0);
        trainingMu.resize( numClasses, 0 );
        trainingSigma.resize( numClasses, 0 );
        nullRejectionThresholds.resize( numClasses, 0 );
        
        //Compute Mu for each of the classes
        const unsigned int numTrainingExamples = trainingData.getNumSamples();
        Vector< IndexedDouble > predictionResults( numTrainingExamples );
        for(UINT i=0; i<numTrainingExamples; i++){
            predict( trainingData[i].getSample(), K);
            
            UINT classLabelIndex = 0;
            for(UINT k=0; k<numClasses; k++){
                if( predictedClassLabel == classLabels[k] ){
                    classLabelIndex = k;
                    break;
                }
            }
            
            predictionResults[ i ].index = classLabelIndex;
            predictionResults[ i ].value = classDistances[ classLabelIndex ];
            
            trainingMu[ classLabelIndex ] += predictionResults[ i ].value;
            counter[ classLabelIndex ]++;
        }
        
        for(UINT j=0; j<numClasses; j++){
            trainingMu[j] /= counter[j];
        }
        
        //Compute Sigma for each of the classes
        for(UINT i=0; i<numTrainingExamples; i++){
            trainingSigma[predictionResults[i].index] += SQR(predictionResults[i].value - trainingMu[predictionResults[i].index]);
        }
        
        for(UINT j=0; j<numClasses; j++){
            Float count = counter[j];
            if( count > 1 ){
                trainingSigma[ j ] = sqrt( trainingSigma[j] / (count-1) );
            }else{
                trainingSigma[ j ] = 1.0;
            }
        }
        
        //Check to see if any of the mu or sigma values are zero or NaN
        bool errorFound = false;
        for(UINT j=0; j<numClasses; j++){
            if( trainingMu[j] == 0 ){
                warningLog << "TrainingMu[ " << j << " ] is zero for a K value of " << K << std::endl;
                }
            if( trainingSigma[j] == 0 ){
                warningLog << "TrainingSigma[ " << j << " ] is zero for a K value of " << K << std::endl;
                }
            if( grt_isnan( trainingMu[j] ) ){
                errorLog << "TrainingMu[ " << j << " ] is NAN for a K value of " << K << std::endl;
                    errorFound = true;
            }
            if( grt_isnan( trainingSigma[j] ) ){
                errorLog << "TrainingSigma[ " << j << " ] is NAN for a K value of " << K << std::endl;
                    errorFound = true;
            }
        }
        
        if( errorFound ){
            trained = false;
            return false;
        }
        
        //Compute the rejection thresholds
        for(unsigned int j=0; j<numClasses; j++){
            nullRejectionThresholds[j] = trainingMu[j] + (trainingSigma[j]*nullRejectionCoeff);
        }
        
        //Restore the actual state of the null rejection
        useNullRejection = true;
        
    }else{
        //Resize the rejection thresholds but set the values to 0
        nullRejectionThresholds.clear();
        nullRejectionThresholds.resize( numClasses, 0 );
    }
    
    return true;
}
void Labeler::train(const string& trainFile, const string& devFile, const string& testFile, const string& modelFile, const string& optionFile,
                    const string& wordEmbFile, const string& charEmbFile) {
    if (optionFile != "")
        m_options.load(optionFile);

    m_options.showOptions();

    m_linearfeat = 0;

    vector<Instance> trainInsts, devInsts, testInsts;
    static vector<Instance> decodeInstResults;
    static Instance curDecodeInst;
    bool bCurIterBetter = false;

    m_pipe.readInstances(trainFile, trainInsts, m_options.maxInstance);
    if (devFile != "")
        m_pipe.readInstances(devFile, devInsts, m_options.maxInstance);
    if (testFile != "")
        m_pipe.readInstances(testFile, testInsts, m_options.maxInstance);

    //Ensure that each file in m_options.testFiles exists!
    vector<vector<Instance> > otherInsts(m_options.testFiles.size());
    for (int idx = 0; idx < m_options.testFiles.size(); idx++) {
        m_pipe.readInstances(m_options.testFiles[idx], otherInsts[idx], m_options.maxInstance);
    }

    //std::cout << "Training example number: " << trainInsts.size() << std::endl;
    //std::cout << "Dev example number: " << trainInsts.size() << std::endl;
    //std::cout << "Test example number: " << trainInsts.size() << std::endl;

    createAlphabet(trainInsts);

    if (!m_options.wordEmbFineTune) {
        addTestWordAlpha(devInsts);
        addTestWordAlpha(testInsts);
        for (int idx = 0; idx < otherInsts.size(); idx++) {
            addTestWordAlpha(otherInsts[idx]);
        }
        cout << "Remain words num: " << m_wordAlphabet.size() << endl;
    }

    if (!m_options.charEmbFineTune) {
        addTestCharAlpha(devInsts);
        addTestCharAlpha(testInsts);
        for (int idx = 0; idx < otherInsts.size(); idx++) {
            addTestCharAlpha(otherInsts[idx]);
        }
        cout << "Remain char num: " << m_charAlphabet.size() << endl;
    }

    NRMat<dtype> wordEmb;
    if (wordEmbFile != "") {
        readWordEmbeddings(wordEmbFile, wordEmb);
    } else {
        wordEmb.resize(m_wordAlphabet.size(), m_options.wordEmbSize);
        wordEmb.randu(1000);
    }

    NRMat<dtype> charEmb;
    if (charEmbFile != "") {
        readWordEmbeddings(charEmbFile, charEmb);
    } else {
        charEmb.resize(m_charAlphabet.size(), m_options.charEmbSize);
        charEmb.randu(1001);
    }

    m_classifier.init(wordEmb, charEmb, m_options.wordcontext, m_options.charcontext, m_labelAlphabet.size(), m_options.wordHiddenSize, m_options.charHiddenSize,
                      m_options.hiddenSize);
    m_classifier.resetRemove(m_options.removePool);
    m_classifier.setDropValue(m_options.dropProb);
    m_classifier.setWordEmbFinetune(m_options.wordEmbFineTune);

    vector<Example> trainExamples, devExamples, testExamples;
    initialExamples(trainInsts, trainExamples);
    initialExamples(devInsts, devExamples);
    initialExamples(testInsts, testExamples);

    vector<int> otherInstNums(otherInsts.size());
    vector<vector<Example> > otherExamples(otherInsts.size());
    for (int idx = 0; idx < otherInsts.size(); idx++) {
        initialExamples(otherInsts[idx], otherExamples[idx]);
        otherInstNums[idx] = otherExamples[idx].size();
    }

    dtype bestDIS = 0;

    int inputSize = trainExamples.size();

    srand(0);
    std::vector<int> indexes;
    for (int i = 0; i < inputSize; ++i)
        indexes.push_back(i);

    static Metric eval, metric_dev, metric_test;
    static vector<Example> subExamples;
    int devNum = devExamples.size(), testNum = testExamples.size();

    int maxIter = m_options.maxIter;
    if (m_options.batchSize > 1)
        maxIter = m_options.maxIter * (inputSize / m_options.batchSize + 1);

    dtype cost = 0.0;
    std::cout << "maxIter = " << maxIter << std::endl;
    for (int iter = 0; iter < m_options.maxIter; ++iter) {
        std::cout << "##### Iteration " << iter << std::endl;
        eval.reset();
        if (m_options.batchSize == 1) {
            random_shuffle(indexes.begin(), indexes.end());
            for (int updateIter = 0; updateIter < inputSize; updateIter++) {
                subExamples.clear();
                int start_pos = updateIter;
                int end_pos = (updateIter + 1);
                if (end_pos > inputSize)
                    end_pos = inputSize;

                for (int idy = start_pos; idy < end_pos; idy++) {
                    subExamples.push_back(trainExamples[indexes[idy]]);
                }

                int curUpdateIter = iter * inputSize + updateIter;
                cost = m_classifier.process(subExamples, curUpdateIter);

                eval.overall_label_count += m_classifier._eval.overall_label_count;
                eval.correct_label_count += m_classifier._eval.correct_label_count;

                if ((curUpdateIter + 1) % m_options.verboseIter == 0) {
                    //m_classifier.checkgrads(subExamples, curUpdateIter+1);
                    std::cout << "current: " << updateIter + 1 << ", total instances: " << inputSize << std::endl;
                    std::cout << "Cost = " << cost << ", SA Correct(%) = " << eval.getAccuracy() << std::endl;
                }
                m_classifier.updateParams(m_options.regParameter, m_options.adaAlpha, m_options.adaEps);
            }
        } else {
            cost = 0.0;
            for (int updateIter = 0; updateIter < m_options.verboseIter; updateIter++) {
                random_shuffle(indexes.begin(), indexes.end());
                subExamples.clear();
                for (int idy = 0; idy < m_options.batchSize; idy++) {
                    subExamples.push_back(trainExamples[indexes[idy]]);
                }
                int curUpdateIter = iter * m_options.verboseIter + updateIter;
                cost += m_classifier.process(subExamples, curUpdateIter);
                //m_classifier.checkgrads(subExamples, curUpdateIter);
                eval.overall_label_count += m_classifier._eval.overall_label_count;
                eval.correct_label_count += m_classifier._eval.correct_label_count;

                m_classifier.updateParams(m_options.regParameter, m_options.adaAlpha, m_options.adaEps);
            }
            std::cout << "current iter: " << iter + 1 << ", total iter: " << maxIter << std::endl;
            std::cout << "Cost = " << cost << ", SA Correct(%) = " << eval.getAccuracy() << std::endl;
        }

        if (devNum > 0) {
            bCurIterBetter = false;
            if (!m_options.outBest.empty())
                decodeInstResults.clear();
            metric_dev.reset();
            for (int idx = 0; idx < devExamples.size(); idx++) {
                string result_label;
                dtype confidence = predict(devExamples[idx].m_linears, devExamples[idx].m_features, result_label);

                devInsts[idx].Evaluate(result_label, metric_dev);

                if (!m_options.outBest.empty()) {
                    curDecodeInst.copyValuesFrom(devInsts[idx]);
                    curDecodeInst.assignLabel(result_label, confidence);
                    decodeInstResults.push_back(curDecodeInst);
                }
            }
            metric_dev.print();

            if ((!m_options.outBest.empty() && metric_dev.getAccuracy() > bestDIS)) {
                m_pipe.outputAllInstances(devFile + m_options.outBest, decodeInstResults);
                bCurIterBetter = true;
            }

            if (testNum > 0) {
                if (!m_options.outBest.empty())
                    decodeInstResults.clear();
                metric_test.reset();
                for (int idx = 0; idx < testExamples.size(); idx++) {
                    string result_label;
                    dtype confidence = predict(testExamples[idx].m_linears, testExamples[idx].m_features, result_label);
                    testInsts[idx].Evaluate(result_label, metric_test);

                    if (bCurIterBetter && !m_options.outBest.empty()) {
                        curDecodeInst.copyValuesFrom(testInsts[idx]);
                        curDecodeInst.assignLabel(result_label, confidence);
                        decodeInstResults.push_back(curDecodeInst);
                    }
                }
                std::cout << "test:" << std::endl;
                metric_test.print();

                if ((!m_options.outBest.empty() && bCurIterBetter)) {
                    m_pipe.outputAllInstances(testFile + m_options.outBest, decodeInstResults);
                }
            }

            for (int idx = 0; idx < otherExamples.size(); idx++) {
                std::cout << "processing " << m_options.testFiles[idx] << std::endl;
                if (!m_options.outBest.empty())
                    decodeInstResults.clear();
                metric_test.reset();
                for (int idy = 0; idy < otherExamples[idx].size(); idy++) {
                    string result_label;
                    dtype confidence = predict(otherExamples[idx][idy].m_linears, otherExamples[idx][idy].m_features, result_label);

                    otherInsts[idx][idy].Evaluate(result_label, metric_test);

                    if (bCurIterBetter && !m_options.outBest.empty()) {
                        curDecodeInst.copyValuesFrom(otherInsts[idx][idy]);
                        curDecodeInst.assignLabel(result_label, confidence);
                        decodeInstResults.push_back(curDecodeInst);
                    }
                }
                std::cout << "test:" << std::endl;
                metric_test.print();

                if ((!m_options.outBest.empty() && bCurIterBetter)) {
                    m_pipe.outputAllInstances(m_options.testFiles[idx] + m_options.outBest, decodeInstResults);
                }
            }

            if ((m_options.saveIntermediate && metric_dev.getAccuracy() > bestDIS)) {
                if (metric_dev.getAccuracy() > bestDIS) {
                    std::cout << "Exceeds best previous performance of " << bestDIS << ". Saving model file.." << std::endl;
                    bestDIS = metric_dev.getAccuracy();
                }
                writeModelFile(modelFile);
            }

        }
        // Clear gradients
    }

    if (devNum > 0) {
        bCurIterBetter = false;
        if (!m_options.outBest.empty())
            decodeInstResults.clear();
        metric_dev.reset();
        for (int idx = 0; idx < devExamples.size(); idx++) {
            string result_label;
            dtype confidence = predict(devExamples[idx].m_linears, devExamples[idx].m_features, result_label);

            devInsts[idx].Evaluate(result_label, metric_dev);

            if (!m_options.outBest.empty()) {
                curDecodeInst.copyValuesFrom(devInsts[idx]);
                curDecodeInst.assignLabel(result_label, confidence);
                decodeInstResults.push_back(curDecodeInst);
            }
        }
        metric_dev.print();

        if ((!m_options.outBest.empty() && metric_dev.getAccuracy() > bestDIS)) {
            m_pipe.outputAllInstances(devFile + m_options.outBest, decodeInstResults);
            bCurIterBetter = true;
        }

        if (testNum > 0) {
            if (!m_options.outBest.empty())
                decodeInstResults.clear();
            metric_test.reset();
            for (int idx = 0; idx < testExamples.size(); idx++) {
                string result_label;
                dtype confidence = predict(testExamples[idx].m_linears, testExamples[idx].m_features, result_label);
                testInsts[idx].Evaluate(result_label, metric_test);

                if (bCurIterBetter && !m_options.outBest.empty()) {
                    curDecodeInst.copyValuesFrom(testInsts[idx]);
                    curDecodeInst.assignLabel(result_label, confidence);
                    decodeInstResults.push_back(curDecodeInst);
                }
            }
            std::cout << "test:" << std::endl;
            metric_test.print();

            if ((!m_options.outBest.empty() && bCurIterBetter)) {
                m_pipe.outputAllInstances(testFile + m_options.outBest, decodeInstResults);
            }
        }

        for (int idx = 0; idx < otherExamples.size(); idx++) {
            std::cout << "processing " << m_options.testFiles[idx] << std::endl;
            if (!m_options.outBest.empty())
                decodeInstResults.clear();
            metric_test.reset();
            for (int idy = 0; idy < otherExamples[idx].size(); idy++) {
                string result_label;
                dtype confidence = predict(otherExamples[idx][idy].m_linears, otherExamples[idx][idy].m_features, result_label);

                otherInsts[idx][idy].Evaluate(result_label, metric_test);

                if (bCurIterBetter && !m_options.outBest.empty()) {
                    curDecodeInst.copyValuesFrom(otherInsts[idx][idy]);
                    curDecodeInst.assignLabel(result_label, confidence);
                    decodeInstResults.push_back(curDecodeInst);
                }
            }
            std::cout << "test:" << std::endl;
            metric_test.print();

            if ((!m_options.outBest.empty() && bCurIterBetter)) {
                m_pipe.outputAllInstances(m_options.testFiles[idx] + m_options.outBest, decodeInstResults);
            }
        }

        if ((m_options.saveIntermediate && metric_dev.getAccuracy() > bestDIS)) {
            if (metric_dev.getAccuracy() > bestDIS) {
                std::cout << "Exceeds best previous performance of " << bestDIS << ". Saving model file.." << std::endl;
                bestDIS = metric_dev.getAccuracy();
            }
            writeModelFile(modelFile);
        }

    } else {
        writeModelFile(modelFile);
    }
}
Exemplo n.º 20
0
bool KNN::train_(ClassificationData &trainingData){
    
    //Clear any previous models
    clear();
    
    if( trainingData.getNumSamples() == 0 ){
        errorLog << "train_(ClassificationData &trainingData) - Training data has zero samples!" << std::endl;
        return false;
    }
    
    //Get the ranges of the data
    ranges = trainingData.getRanges();
    if( useScaling ){
        //Scale the training data between 0 and 1
        trainingData.scale(0, 1);
    }
    
    //Store the number of features, classes and the training data
    this->numInputDimensions = trainingData.getNumDimensions();
    this->numClasses = trainingData.getNumClasses();
    
    //TODO: In the future need to build a kdtree from the training data to allow better realtime prediction
    this->trainingData = trainingData;
    
    //Set the class labels
    classLabels.resize( numClasses );
    for(UINT k=0; k<numClasses; k++){
        classLabels[k] = trainingData.getClassTracker()[k].classLabel;
    }
    
    //If we do not need to search for the best K value, then call the sub training function and return the result
    if( !searchForBestKValue ){
        return train_(trainingData,K);
    }
    
    //If we have got this far then we are going to search for the best K value
    UINT index = 0;
    Float bestAccuracy = 0;
    Vector< IndexedDouble > trainingAccuracyLog;
    
    for(UINT k=minKSearchValue; k<=maxKSearchValue; k++){
        //Randomly spilt the data and use 80% to train the algorithm and 20% to test it
        ClassificationData trainingSet(trainingData);
        ClassificationData testSet = trainingSet.split(80,true);
        
        if( !train_(trainingSet, k) ){
            errorLog << "Failed to train model for a k value of " << k << std::endl;
        }else{
                
            //Compute the classification error
            Float accuracy = 0;
            for(UINT i=0; i<testSet.getNumSamples(); i++){
                
                VectorFloat sample = testSet[i].getSample();
                
                if( !predict( sample , k) ){
                    errorLog << "Failed to predict label for test sample with a k value of " << k << std::endl;
                        return false;
                }
                
                if( testSet[i].getClassLabel() == predictedClassLabel ){
                    accuracy++;
                }
            }
            
            accuracy = accuracy /Float( testSet.getNumSamples() ) * 100.0;
            trainingAccuracyLog.push_back( IndexedDouble(k,accuracy) );
            
            trainingLog << "K:\t" << k << "\tAccuracy:\t" << accuracy << std::endl;
            
            if( accuracy > bestAccuracy ){
                bestAccuracy = accuracy;
            }
            
            index++;
        }
            
    }
        
    if( bestAccuracy > 0 ){
        //Sort the training log by value
        std::sort(trainingAccuracyLog.begin(),trainingAccuracyLog.end(),IndexedDouble::sortIndexedDoubleByValueDescending);
        
        //Copy the top matching values into a temporary buffer
        Vector< IndexedDouble > tempLog;
        
        //Add the first value
        tempLog.push_back( trainingAccuracyLog[0] );
        
        //Keep adding values until the value changes
        for(UINT i=1; i<trainingAccuracyLog.size(); i++){
            if( trainingAccuracyLog[i].value == tempLog[0].value ){
                tempLog.push_back( trainingAccuracyLog[i] );
            }else break;
        }
        
        //Sort the temp values by index (the index is the K value so we want to get the minimum K value with the maximum accuracy)
        std::sort(tempLog.begin(),tempLog.end(),IndexedDouble::sortIndexedDoubleByIndexAscending);
        
        trainingLog << "Best K Value: " << tempLog[0].index << "\tAccuracy:\t" << tempLog[0].value << std::endl;
        
        //Use the minimum index, this should give us the best accuracy with the minimum K value
        //We now need to train the model again to make sure all the training metrics are computed correctly
        return train_(trainingData,tempLog[0].index);
    }
        
    return false;
}
Exemplo n.º 21
0
//返回预测的角速度
double RobotTracker::angular_velocity(double time)
{
    Matrix x = predict(time);
    return x.e(5,0);
}
Exemplo n.º 22
0
void mexFunction( int nlhs, mxArray *plhs[],
		 int nrhs, const mxArray *prhs[] )
{
	int prob_estimate_flag = 0;
	struct svm_model *model;

	if(nrhs > 4 || nrhs < 3)
	{
		exit_with_help();
		fake_answer(plhs);
		return;
	}

	if(!mxIsDouble(prhs[0]) || !mxIsDouble(prhs[1])) {
		mexPrintf("Error: label vector and instance matrix must be double\n");
		fake_answer(plhs);
		return;
	}

	if(mxIsStruct(prhs[2]))
	{
		const char *error_msg;

		/* parse options */
		if(nrhs==4)
		{
			int i, argc = 1;
			char cmd[CMD_LEN], *argv[CMD_LEN/2];

			/* put options in argv[] */
			mxGetString(prhs[3], cmd,  mxGetN(prhs[3]) + 1);
			if((argv[argc] = strtok(cmd, " ")) != NULL)
				while((argv[++argc] = strtok(NULL, " ")) != NULL)
					;

			for(i=1;i<argc;i++)
			{
				if(argv[i][0] != '-') break;
				if(++i>=argc)
				{
					exit_with_help();
					fake_answer(plhs);
					return;
				}
				switch(argv[i-1][1])
				{
					case 'b':
						prob_estimate_flag = atoi(argv[i]);
						break;
					default:
						mexPrintf("Unknown option: -%c\n", argv[i-1][1]);
						exit_with_help();
						fake_answer(plhs);
						return;
				}
			}
		}

		model = matlab_matrix_to_model(prhs[2], &error_msg);
		if (model == NULL)
		{
			mexPrintf("Error: can't read model: %s\n", error_msg);
			fake_answer(plhs);
			return;
		}

		if(prob_estimate_flag)
		{
			if(svm_check_probability_model(model)==0)
			{
				mexPrintf("Model does not support probabiliy estimates\n");
				fake_answer(plhs);
				svm_free_and_destroy_model(&model);
				return;
			}
		}
		else
		{
			if(svm_check_probability_model(model)!=0)
				printf("Model supports probability estimates, but disabled in predicton.\n");
		}

		predict(plhs, prhs, model, prob_estimate_flag);
		/* destroy model */
		svm_free_and_destroy_model(&model);
	}
	else
	{
		mexPrintf("model file should be a struct array\n");
		fake_answer(plhs);
	}

	return;
}
Exemplo n.º 23
0
void RTFusionKalman4::newIMUData(RTIMU_DATA& data, const RTIMUSettings *settings)
{
    if (m_enableGyro)
        m_gyro = data.gyro;
    else
        m_gyro = RTVector3();
    
    m_accel = data.accel;
    m_compass = data.compass;
    m_compassValid = data.compassValid;

    if (m_firstTime) {
        m_lastFusionTime = data.timestamp;
        calculatePose(m_accel, m_compass, settings->m_compassAdjDeclination);
        m_Fk.fill(0);

        //  init covariance matrix to something

        m_Pkk.fill(0);
        for (int i = 0; i < 4; i++)
            for (int j = 0; j < 4; j++)
                m_Pkk.setVal(i,j, 0.5);

        // initialize the observation model Hk
        // Note: since the model is the state vector, this is an identity matrix so it won't be used

        //  initialize the poses

        m_stateQ.fromEuler(m_measuredPose);
        m_fusionQPose = m_stateQ;
        m_fusionPose = m_measuredPose;
        m_firstTime = false;
    } else {
        m_timeDelta = (RTFLOAT)(data.timestamp - m_lastFusionTime) / (RTFLOAT)1000000;
        m_lastFusionTime = data.timestamp;
        if (m_timeDelta <= 0)
            return;

        if (m_debug) {
            HAL_INFO("\n------\n");
            HAL_INFO1("IMU update delta time: %f\n", m_timeDelta);
        }

        calculatePose(data.accel, data.compass, settings->m_compassAdjDeclination);

        predict();
        update();
        m_stateQ.toEuler(m_fusionPose);
        m_fusionQPose = m_stateQ;

        if (m_debug | settings->m_fusionDebug) {
            HAL_INFO(RTMath::displayRadians("Measured pose", m_measuredPose));
            HAL_INFO(RTMath::displayRadians("Kalman pose", m_fusionPose));
            HAL_INFO(RTMath::displayRadians("Measured quat", m_measuredPose));
            HAL_INFO(RTMath::display("Kalman quat", m_stateQ));
            HAL_INFO(RTMath::display("Error quat", m_stateQError));
         }
    }
    data.fusionPoseValid = true;
    data.fusionQPoseValid = true;
    data.fusionPose = m_fusionPose;
    data.fusionQPose = m_fusionQPose;
}
Exemplo n.º 24
0
void FunctionApproximator::train(const MatrixXd& inputs, const MatrixXd& targets, string save_directory, bool overwrite)
{
  train(inputs,targets);
  
  if (save_directory.empty())
    return;
  
  if (!isTrained())
    return;
  
  if (getExpectedInputDim()<3)
  {
    
    VectorXd min = inputs.colwise().minCoeff();
    VectorXd max = inputs.colwise().maxCoeff();
    
    int n_samples_per_dim = 100;
    if (getExpectedInputDim()==2) n_samples_per_dim = 40;
    VectorXi n_samples_per_dim_vec = VectorXi::Constant(getExpectedInputDim(),n_samples_per_dim);

    MatrixXd inputs_grid;
    FunctionApproximator::generateInputsGrid(min, max, n_samples_per_dim_vec, inputs_grid);
    
    MatrixXd outputs_grid(inputs_grid.rows(),1);
    predict(inputs_grid,outputs_grid);
    
    saveMatrix(save_directory,"n_samples_per_dim.txt",n_samples_per_dim_vec,overwrite);
    saveMatrix(save_directory,"inputs_grid.txt",inputs_grid,overwrite);
    saveMatrix(save_directory,"outputs_grid.txt",outputs_grid,overwrite);


    MatrixXd variances_grid;
    predictVariance(inputs_grid,variances_grid);
    if (!variances_grid.size()==0)
    {
      variances_grid = variances_grid.array().sqrt();
      saveMatrix(save_directory,"variances_grid.txt",variances_grid,overwrite);
    }
    
    model_parameters_->saveGridData(min, max, n_samples_per_dim_vec, save_directory, overwrite);
    
  }

  MatrixXd outputs;
  predict(inputs,outputs);

    

  saveMatrix(save_directory,"inputs.txt",inputs,overwrite);
  saveMatrix(save_directory,"targets.txt",targets,overwrite);
  saveMatrix(save_directory,"outputs.txt",outputs,overwrite);
  
  
  
  string filename = save_directory+"/plotdata.py";
  ofstream outfile;
  outfile.open(filename.c_str()); 
  if (!outfile.is_open())
  {
    cerr << __FILE__ << ":" << __LINE__ << ":";
    cerr << "Could not open file " << filename << " for writing." << endl;
  } 
  else
  {
    // Python code generation in C++. Rock 'n' roll! ;-)
    if (inputs.cols()==2) {                                                                                           
      outfile << "from mpl_toolkits.mplot3d import Axes3D                                       \n";
    }
    outfile   << "import numpy                                                                  \n";
    outfile   << "import matplotlib.pyplot as plt                                               \n";
    outfile   << "directory = '" << save_directory << "'                                        \n";
    outfile   << "inputs   = numpy.loadtxt(directory+'/inputs.txt')                             \n";
    outfile   << "targets  = numpy.loadtxt(directory+'/targets.txt')                            \n";
    outfile   << "outputs  = numpy.loadtxt(directory+'/outputs.txt')                            \n";
    outfile   << "fig = plt.figure()                                                            \n";
    if (inputs.cols()==2) {                                                                                           
      outfile << "ax = Axes3D(fig)                                                              \n";
      outfile << "ax.plot(inputs[:,0],inputs[:,1],targets, '.', label='targets',color='black')  \n";
      outfile << "ax.plot(inputs[:,0],inputs[:,1],outputs, '.', label='predictions',color='red')\n";
      outfile << "ax.set_xlabel('input_1'); ax.set_ylabel('input_2'); ax.set_zlabel('output')   \n";
      outfile << "ax.legend(loc='lower right')                                                  \n";
    } else {                                                                                           
      outfile << "plt.plot(inputs,targets, '.', label='targets',color='black')                  \n";
      outfile << "plt.plot(inputs,outputs, '.', label='predictions',color='red')                \n";
      outfile << "plt.xlabel('input'); plt.ylabel('output');                                    \n";
      outfile << "plt.legend(loc='lower right')                                                 \n";
    }                                                                                           
    outfile   << "plt.show()                                                                    \n";
    outfile << endl;

    outfile.close();
    //cout << "        ______________________________________________________________" << endl;
    //cout << "        | Plot saved data with:" << " 'python " << filename << "'." << endl;
    //cout << "        |______________________________________________________________" << endl;
  }
  
}
Exemplo n.º 25
0
void Recommender::cache_recommendations(set<int>& users, 
                                        set<int>& articles,
                                        bool startFresh)
{
  

  time_t _start_time = time(NULL);
  int terminator = -10;
  int num_total_recs  = 0;

  for(set<int>::iterator user = users.begin();
      user != users.end() && terminator--; user++) {
    char buffer[50];
    sprintf(buffer, "recommend_%d", (*user));

    User u;
    u.clear();
    if (!startFresh)
      u.decachify(buffer);

    time_t start_time = time(NULL);
    int n_attempts  = 0, n_attempts_max = 10;

    do {
      int rec_count = 0;
      for(set<int>::iterator art = articles.begin();
          art != articles.end(); art++) {
        if(!this->storeModdedArticles && 
           mods[(*user)].has_key(*art)) continue;
        float q = predict(*user, *art, n_attempts != 0 && art == articles.begin());
        int a = (*art);
        if(q != 0) {
          rec_count++;
          u.add(a, q);
        }
      }
      if(!rec_count && articles.size() > 100) {
        cout << " +++ Resorting to cluster behavior" << endl;
        double fudge0 = fudge;
        fudge = 1;
        for(set<int>::iterator art = articles.begin();
            art != articles.end(); art++) {
          if(mods[(*user)].has_key(*art)) continue;
          float q = predict(*user, *art);
          int a = (*art);
          if(q != 0) {
            rec_count++;
            u.add(a, q);
          }
        }
        fudge = fudge0;
      }

      num_total_recs += rec_count;
      
      if(rec_count > 20) {
        cout << " --> " << (*user) 
             << ": \x1b[32m\x1b[1mGenerated " << rec_count << " additional recommendations\x1b[0m"
             << endl;
        break;
      }
      else {
        cout << " --> \x1b[33m\x1b[1mFAILED TO GENERATE RECOMMENDATIONS (only " << rec_count << ")!!!\x1b[0m" << endl;
        cout << "     Available mods: " << mods[(*user)].size() << endl;
        char buffer[100];
        if(mods[(*user)].size() > 5) {
          cout << " --> \x1b[31m\x1b[1mFAILED TO GENERATE RECOMMENDATIONS (in the bad way)!!!\x1b[0m" << endl;
        }
      }
    } while(++n_attempts < n_attempts_max);

    if(u.size())
      u.memcachify(buffer);

    //      recs[*user].memcachify(buffer);

    cout << "\tFinished user " << (*user) << " (" 
         << difftime(time(NULL), start_time) << " sec, " 
         << articles.size() << " arts)" << endl;
  }
  cout << "Total recommendation time: " << difftime(time(NULL), _start_time)
       << " / " << users.size() << " users * " 
       << articles.size() << " articles \n\t --> " 
       << num_total_recs << " recs (@ " 
       << 1000./float(num_total_recs)*float(difftime(time(NULL), _start_time)) << " ms each)" << endl;

  //  fclose(toFile(fopen("user.data", "wb")));
}
Exemplo n.º 26
0
void InertiaSensorFilter::update(OrientationData& orientationData,
        const InertiaSensorData& theInertiaSensorData,
        const SensorData& theSensorData,
        const RobotModel& theRobotModel,
        const FrameInfo& theFrameInfo,
        const MotionInfo& theMotionInfo,
        const WalkingEngineOutput& theWalkingEngineOutput)
{
//  MODIFY("module:InertiaSensorFilter:parameters", p);
//
//  DECLARE_PLOT("module:InertiaSensorFilter:expectedGyroX");
//  DECLARE_PLOT("module:InertiaSensorFilter:gyroX");
//  DECLARE_PLOT("module:InertiaSensorFilter:expectedGyroY");
//  DECLARE_PLOT("module:InertiaSensorFilter:gyroY");
//  DECLARE_PLOT("module:InertiaSensorFilter:expectedAccX");
//  DECLARE_PLOT("module:InertiaSensorFilter:accX");
//  DECLARE_PLOT("module:InertiaSensorFilter:expectedAccY");
//  DECLARE_PLOT("module:InertiaSensorFilter:accY");
//  DECLARE_PLOT("module:InertiaSensorFilter:expectedAccZ");
//  DECLARE_PLOT("module:InertiaSensorFilter:accZ");

  // check whether the filter shall be reset
  if(!lastTime || theFrameInfo.time <= lastTime)
  {
    if(theFrameInfo.time == lastTime)
      return; // weird log file replaying?
        x = State<>();
        cov = p.processCov;

        lastLeftFoot = lastRightFoot = Pose3D();
        lastTime = theFrameInfo.time - (unsigned int)(theFrameInfo.cycleTime * 1000.f);
  }

  // get foot positions
  const Pose3D& leftFoot(theRobotModel.limbs[MassCalibration::footLeft]);
  const Pose3D& rightFoot(theRobotModel.limbs[MassCalibration::footRight]);
  const Pose3D leftFootInvert(leftFoot.invert());
  const Pose3D rightFootInvert(rightFoot.invert());

  // calculate rotation and position offset using the robot model (joint data)
  const Pose3D leftOffset(lastLeftFoot.translation.z != 0.f ? Pose3D(lastLeftFoot).conc(leftFootInvert) : Pose3D());
  const Pose3D rightOffset(lastRightFoot.translation.z != 0.f ? Pose3D(lastRightFoot).conc(rightFootInvert) : Pose3D());

  // detect the foot that is on ground
  bool useLeft = true;
  if(theMotionInfo.motion == MotionRequest::walk && theWalkingEngineOutput.speed.translation.x != 0)
    useLeft = theWalkingEngineOutput.speed.translation.x > 0 ?
              (leftOffset.translation.x > rightOffset.translation.x) :
              (leftOffset.translation.x < rightOffset.translation.x);
  else
  {
    Pose3D left(x.rotation);
    Pose3D right(x.rotation);
    left.conc(leftFoot);
    right.conc(rightFoot);
    useLeft = left.translation.z < right.translation.z;
  }

  // calculate velocity
  Vector3<> calcVelocity, lastCalcVelocity;
  float timeScale = 1.f / (float(theFrameInfo.time - lastTime) * 0.001f);
  calcVelocity = useLeft ? leftOffset.translation : rightOffset.translation;
  calcVelocity *= timeScale * 0.001f; // => m/s

  // update the filter
  timeScale = float(theFrameInfo.time - lastTime) * 0.001f;
  predict(theInertiaSensorData.gyro.x != InertiaSensorData::off ?
          RotationMatrix(Vector3<>(theInertiaSensorData.gyro.x * timeScale, theInertiaSensorData.gyro.y * timeScale, 0)) :
          (useLeft ? leftOffset.rotation :  rightOffset.rotation));

  // insert calculated rotation
  if(theInertiaSensorData.acc.x != InertiaSensorData::off)
    safeRawAngle = Vector2<>(theSensorData.data[SensorData::angleX], theSensorData.data[SensorData::angleY]);
  if((theMotionInfo.motion == MotionRequest::walk || theMotionInfo.motion == MotionRequest::stand ||
      (theMotionInfo.motion == MotionRequest::specialAction && theMotionInfo.specialActionRequest.specialAction == SpecialActionRequest::sitDownKeeper)) &&
     abs(safeRawAngle.x) < p.calculatedAccLimit.x && abs(safeRawAngle.y) < p.calculatedAccLimit.y)
  {
    const RotationMatrix& usedRotation(useLeft ? leftFootInvert.rotation : rightFootInvert.rotation);
    RotationMatrix calculatedRotation(Vector3<>(
                                        atan2(usedRotation.c1.z, usedRotation.c2.z),
                                        atan2(-usedRotation.c0.z, usedRotation.c2.z), 0.f));
    Vector3<> accGravOnly(calculatedRotation.c0.z, calculatedRotation.c1.z, calculatedRotation.c2.z);
    accGravOnly *= -9.80665f;
    readingUpdate(accGravOnly);
  }
  else // insert acceleration sensor values
  {
    if(theInertiaSensorData.acc.x != InertiaSensorData::off)
      readingUpdate(theInertiaSensorData.acc);
  }

  // fill the representation
  orientationData.orientation = Vector2<>(
                                  atan2(x.rotation.c1.z, x.rotation.c2.z),
                                  atan2(-x.rotation.c0.z, x.rotation.c2.z));
  // this removes any kind of z-rotation from internal rotation
  if(orientationData.orientation.squareAbs() < 0.04f * 0.04f)
    x.rotation = RotationMatrix(Vector3<>(orientationData.orientation.x, orientationData.orientation.y, 0.f));
  orientationData.velocity = calcVelocity;

  // store some data for the next iteration
  lastLeftFoot = leftFoot;
  lastRightFoot = rightFoot;
  lastTime = theFrameInfo.time;

  // plots
//  PLOT("module:InertiaSensorFilter:orientationX", orientationData.orientation.x);
//  PLOT("module:InertiaSensorFilter:orientationY", orientationData.orientation.y);
//  PLOT("module:InertiaSensorFilter:velocityX", orientationData.velocity.x);
//  PLOT("module:InertiaSensorFilter:velocityY", orientationData.velocity.y);
//  PLOT("module:InertiaSensorFilter:velocityZ", orientationData.velocity.z);
}
Exemplo n.º 27
0
bool KNN::train(LabelledClassificationData &trainingData){

	if( !searchForBestKValue ){
        return train_(trainingData,K);
    }

    UINT bestIndex = 0;
    UINT index = 0;
    double bestAccuracy = 0;
    vector< IndexedDouble > trainingAccuracyLog;

    for(UINT k=minKSearchValue; k<=maxKSearchValue; k++){
        //Randomly spilt the data and use 80% to train the algorithm and 20% to test it
        LabelledClassificationData trainingSet(trainingData);
        LabelledClassificationData testSet = trainingSet.partition(80,true);

        if( !train_(trainingSet, k) ){
            errorLog << "Failed to train model for a k value of " << k << endl;
        }else{

            //Compute the classification error
            double accuracy = 0;
            for(UINT i=0; i<testSet.getNumSamples(); i++){

                vector< double > sample = testSet[i].getSample();

                if( !predict( sample ) ){
                    errorLog << "Failed to predict label for test sample with a k value of " << k << endl;
                    return false;
                }

                if( testSet[i].getClassLabel() == predictedClassLabel ){
                    accuracy++;
                }
            }

            accuracy = accuracy /double( testSet.getNumSamples() ) * 100.0;
            trainingAccuracyLog.push_back( IndexedDouble(k,accuracy) );
			
			trainingLog << "K:\t" << k << "\tAccuracy:\t" << accuracy << endl;

            if( accuracy > bestAccuracy ){
                bestIndex = index;
                bestAccuracy = accuracy;
            }

            index++;

        }

    }

    if( bestAccuracy > 0 ){
        //Sort the training log by value
        std::sort(trainingAccuracyLog.begin(),trainingAccuracyLog.end(),IndexedDouble::sortIndexedDoubleByValueAscending);

        //Copy the top matching values into a temporary buffer
        vector< IndexedDouble > tempLog;

        //Add the first value
        tempLog.push_back( trainingAccuracyLog[0] );

        //Keep adding values until the value changes
        for(UINT i=1; i<trainingAccuracyLog.size(); i++){
            if( trainingAccuracyLog[i].value == tempLog[0].value ){
                tempLog.push_back( trainingAccuracyLog[i] );
            }else break;
        }

        //Sort the temp values by index
        std::sort(tempLog.begin(),tempLog.end(),IndexedDouble::sortIndexedDoubleByIndexDescending);

		trainingLog << "Best K Value: " << tempLog[0].index << "\tAccuracy:\t" << tempLog[0].value << endl;

        //Use the minimum index, this should give us the best accuracy with the minimum K value
        return train_(trainingData,tempLog[0].index);
    }

    return false;
}
Exemplo n.º 28
0
	/* Use linear form for r, computes inverse model using inverse_Fx */
	Float predict (Linear_invertable_predict_model& f)
	/* Use linear form for r, and use inv.Fx from invertible model */
	{
		return predict(f, f.inv.Fx, true);
	}
Exemplo n.º 29
0
int main(int argc, char **argv)
{
	FILE *input, *output;
	int i;
	// parse options
	for(i=1;i<argc;i++)
	{
		if(argv[i][0] != '-') break;
		++i;
		switch(argv[i-1][1])
		{
			case 'b':
				predict_probability = atoi(argv[i]);
				break;
			case 'q':
				info = &print_null;
				i--;
				break;
			default:
				fprintf(stderr,"Unknown option: -%c\n", argv[i-1][1]);
				exit_with_help();
		}
	}

	if(i>=argc-2)
		exit_with_help();

	input = fopen(argv[i],"r");
	if(input == NULL)
	{
		fprintf(stderr,"can't open input file %s\n",argv[i]);
		exit(1);
	}

	output = fopen(argv[i+2],"w");
	if(output == NULL)
	{
		fprintf(stderr,"can't open output file %s\n",argv[i+2]);
		exit(1);
	}

	if((model=svm_load_model(argv[i+1]))==0)
	{
		fprintf(stderr,"can't open model file %s\n",argv[i+1]);
		exit(1);
	}

	#ifdef _DENSE_REP
		x.dim = 0;
		x.values = (double*) malloc( max_nr_attr*sizeof(double) );
	#else
		x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node));
	#endif
	if(predict_probability)
	{
		if(svm_check_probability_model(model)==0)
		{
			fprintf(stderr,"Model does not support probabiliy estimates\n");
			exit(1);
		}
	}
	else
	{
		if(svm_check_probability_model(model)!=0)
			info("Model supports probability estimates, but disabled in prediction.\n");
	}

	predict(input,output);
	svm_free_and_destroy_model(&model);
	#ifdef CL_SVM
		svm_teardown_prediction();
	#endif
	#ifdef _DENSE_REP
		free( x.values );
	#else
		free(x);
	#endif
	free(line);
	fclose(input);
	fclose(output);
	return 0;
}
	void ParticleFilter::filter(double camX, double camY, double camD, double* resX, double* resY, double* resD){
		resample();
		predict();
		weight(camX, camY, camD);
		measure(resX, resY, resD);
	}