Exemple #1
0
void PosEst::initPos(float x, float y, float z, float roll, float pitch, float yaw)
{
    pos_init.x = x;
    pos_init.y = y;
    pos_init.z = z;
    pos_init.roll = roll;
    pos_init.pitch = pitch;
    pos_init.yaw = yaw;
    pos_updated = pos_init;

    particles.resize(numParticles);
    initSample();
}
fmiStatus fmiInitialize(fmiComponent c, fmiBoolean toleranceControlled, fmiReal relativeTolerance, fmiEventInfo* eventInfo)
{
  double nextSampleEvent=0;
  ModelInstance* comp = (ModelInstance *)c;
  threadData_t *threadData = comp->threadData;
  threadData->currentErrorStage = ERROR_SIMULATION;

  if (invalidState(comp, "fmiInitialize", modelInstantiated))
    return fmiError;
  if (nullPointer(comp, "fmiInitialize", "eventInfo", eventInfo))
    return fmiError;
  if (comp->loggingOn) comp->functions.logger(c, comp->instanceName, fmiOK, "log",
      "fmiInitialize: toleranceControlled=%d relativeTolerance=%g",
      toleranceControlled, relativeTolerance);

  /* set zero-crossing tolerance */
  setZCtol(relativeTolerance);

  setStartValues(comp);
  copyStartValuestoInitValues(comp->fmuData);

  /* try */
  MMC_TRY_INTERNAL(simulationJumpBuffer)

    if(initialization(comp->fmuData, comp->threadData, "", "", 0.0, 5))
    {
      comp->state = modelError;
      if(comp->loggingOn) comp->functions.logger(c, comp->instanceName, fmiOK, "log",
          "fmiInitialization: failed");
    }
    else
    {
      comp->state = modelInitialized;
      if(comp->loggingOn) comp->functions.logger(c, comp->instanceName, fmiOK, "log",
          "fmiInitialization: succeed");
    }

    /*TODO: Simulation stop time is need to calculate in before hand all sample events
            We shouldn't generate them all in beforehand */
    initSample(comp->fmuData, comp->threadData, comp->fmuData->localData[0]->timeValue, 100 /*should be stopTime*/);
    initDelay(comp->fmuData, comp->fmuData->localData[0]->timeValue);

    /* due to an event overwrite old values */
    overwriteOldSimulationData(comp->fmuData);

    eventInfo->iterationConverged = fmiTrue;
    eventInfo->stateValueReferencesChanged = fmiFalse;
    eventInfo->stateValuesChanged = fmiTrue;
    eventInfo->terminateSimulation = fmiFalse;

    /* Get next event time (sample calls)*/
    nextSampleEvent = getNextSampleTimeFMU(comp->fmuData);
    if (nextSampleEvent == -1){
      eventInfo->upcomingTimeEvent = fmiFalse;
    }else{
      eventInfo->upcomingTimeEvent = fmiTrue;
      eventInfo->nextEventTime = nextSampleEvent;
      fmiEventUpdate(comp, fmiFalse, eventInfo);
    }

    return fmiOK;

  /* catch */
  MMC_CATCH_INTERNAL(simulationJumpBuffer)

    comp->functions.logger(c, comp->instanceName, fmiError, "error", "fmiInitialize: terminated by an assertion.");
    return fmiError;
}
Exemple #3
0
int Sci1SongIterator::initSong() {
	int last_time;
	uint offset = 0;
	_numChannels = 0;
	_samples.clear();
//	_deviceId = 0x0c;

	if (_data[offset] == 0xf0) {
		priority = _data[offset + 1];

		offset += 8;
	}

	while (_data[offset] != 0xff
	        && _data[offset] != _deviceId) {
		offset++;
		CHECK_FOR_END_ABSOLUTE(offset + 1);
		while (_data[offset] != 0xff) {
			CHECK_FOR_END_ABSOLUTE(offset + 7);
			offset += 6;
		}
		offset++;
	}

	if (_data[offset] == 0xff) {
		warning("[iterator] Song does not support hardware 0x%02x", _deviceId);
		return 1;
	}

	offset++;

	while (_data[offset] != 0xff) { /* End of list? */
		uint track_offset;
		int end;
		offset += 2;

		CHECK_FOR_END_ABSOLUTE(offset + 4);

		track_offset = READ_LE_UINT16(_data.begin() + offset);
		end = READ_LE_UINT16(_data.begin() + offset + 2);

		CHECK_FOR_END_ABSOLUTE(track_offset - 1);

		if (_data[track_offset] == 0xfe) {
			if (initSample(track_offset))
				return 1; /* Error */
		} else {
			/* Regular MIDI channel */
			if (_numChannels >= MIDI_CHANNELS) {
				warning("[iterator] Song has more than %d channels, cutting them off",
				          MIDI_CHANNELS);
				break; /* Scan for remaining samples */
			} else {
				int channel_nr = _data[track_offset] & 0xf;
				SongIteratorChannel &channel = _channels[_numChannels++];

				/*
				if (_data[track_offset] & 0xf0)
					printf("Channel %d has mapping bits %02x\n",
					       channel_nr, _data[track_offset] & 0xf0);
				*/

				// Add 2 to skip over header bytes */
				channel.init(channel_nr, track_offset + 2, track_offset + end);
				channel.resetSynthChannels();

				_polyphony[_numChannels - 1] = _data[channel.offset - 1] & 15;
				_importance[_numChannels - 1] = _data[channel.offset - 1] >> 4;

				channel.playmask = ~0; /* Enable all */
				channel_mask |= (1 << channel_nr);

				CHECK_FOR_END_ABSOLUTE(offset + end);
			}
		}
		offset += 4;
		CHECK_FOR_END_ABSOLUTE(offset);
	}

	/* Now ensure that sample deltas are relative to the previous sample */
	last_time = 0;
	_numActiveChannels = _numChannels;
	_numLoopedChannels = 0;

	for (Common::List<Sci1Sample>::iterator seeker = _samples.begin();
			seeker != _samples.end(); ++seeker) {
		int prev_last_time = last_time;
		//printf("[iterator] Detected sample: %d Hz, %d bytes at time %d\n",
		//          seeker->format.rate, seeker->size, seeker->delta);
		last_time = seeker->delta;
		seeker->delta -= prev_last_time;
	}

	return 0; /* Success */
}
int main(int argc, char *argv[]) {
    /* START DEFINING */
    /* start iteration parameters; */
    int TCOUNT;
    int NUMRUNS;
    /* done iteration parameters */
    
    /* start final monte carlo sample */
    int **final_mc;
    
    /* start checks */
    double *lambda_backup;
    /* done checks */
    
    /* start expectations */
    double *expec_algor;
	
    double Z_algor_frac_accum_algor_immed;
    /* done expectations */
        
    /* start files */
    char trackFile[300];
    char paramsFile[300];
    char mcFile[300];
	char numrunsString[30];
	char paramsFileTemp[300]; 
    
    /* start counters, miscellaneous */
	/* clock watchers */
    double start_time, learn_time, sample_time;
    int mc, j, i0, i1;
    int numruns, tsample;
	double exit_conditions[4];
	int stop_me;
    /* recalculate time 
    int recalc_expec;
	recalc_expec=0;*/
    
    /*some variables for timing the duration of each learning and sampling step*/
    learn_time=0;
    sample_time=0;
    
	/* DONE DEFINING */

    /* START INITIALIZING */
	
	/*****************************************************************************/
    /*************************** START READING INPUTS  ***************************/
	/*****************************************************************************/
	
   Inputs(argv,argc, &NUMVARS, &NUMRUNS, &TCOUNT, &NUMSAMPS, &NUMDATA,trackFile,paramsFile,mcFile,expec_empir,init_lambda,exit_conditions);
	
    /***************************** END READING INPUTS  ***********************/
    
    /*****************************************************************************/
    /*************************** START ALLOCATING SPACE  *************************/
	/*****************************************************************************/
	
    NUMFCNS = NUMVARS + NUMVARS * (NUMVARS - 1) / 2;
    
    /* start Monte Carlo samples */
    count_monte = (int *) malloc(NUMFCNS * sizeof(int));
    bound_monte = (int *) malloc(NUMFCNS * sizeof(int));
    sInds_monte = (int **) malloc(NUMFCNS * sizeof(int *));
    for(j = 0; j < NUMFCNS; j++) {
        bound_monte[j] = 1;
        sInds_monte[j] = (int *) malloc(bound_monte[j] * sizeof(int));
        assert(sInds_monte[j] != NULL);
    }
    /* lambda */
    lambda = (double *) malloc(NUMFCNS * sizeof(double));
    g_monte_lambda = (double *) malloc(NUMSAMPS * sizeof(double));
    /* eta */
    eta = (double *) malloc(NUMFCNS * sizeof(double));
    g_monte_eta = (double *) malloc(NUMSAMPS * sizeof(double));
    sum_fjexpg_monte_eta = (double *) malloc(NUMFCNS * sizeof(double));
    /* done Monte Carlo samples */
    
    /* start final Monte Carlo sample */
    final_mc = (int **) malloc(NUMVARS * sizeof(int *));
    for(i0 = 0; i0 < NUMVARS; i0++) {
        final_mc[i0] = (int *) malloc(NUMDATA * sizeof(int));
    }
    /* done final Monte Carlo sample */
     
    
    /* start expectations */
    sigma = (int **) malloc(NUMVARS * sizeof(int *));
    for(i0 = 0; i0 < NUMVARS; i0++) {
        sigma[i0] = (int *) malloc(NUMDATA * sizeof(int));
    }
    /* done expectations */

    /* start checks */
    lambda_backup = (double *) malloc(NUMFCNS * sizeof(double));
    expec_algor = (double *) malloc(NUMFCNS * sizeof(double));
    /* done checks */
    
    /* start counters */
    first = (int *) malloc(NUMFCNS * sizeof(int));
    second = (int *) malloc(NUMFCNS * sizeof(int));
    combine = (int **) malloc(NUMVARS * sizeof(int *));
    for(i0 = 0; i0 < NUMVARS; i0++) {
        combine[i0] = (int *) malloc(NUMVARS * sizeof(int));
    }
    /* done counters */
	
    /**********************done allocating space *****************/
    
    /********************** start setting counters **********************/
    
	j = NUMVARS;
    for(i0 = 0; i0 < NUMVARS; i0++) {
        combine[i0][i0] = -1;
        for(i1 = i0 + 1; i1 < NUMVARS; i1++) {
            first[j] = i0;
            second[j] = i1;
            combine[i0][i1] = j;
            combine[i1][i0] = j;
            j++;
        }
    }
    assert(j == NUMFCNS);
    
	/********************** done setting counters **********************/
    /*********************** DONE INITIALIZING ************************/
    
    /*********************************************************************************/
    /***********************START ITERATIVE OPTIMIZATION *****************************/
    /*********************************************************************************/
	
    Z_algor_frac_accum_algor_immed = 1;
    /* set lambdas to specified initial values */
	for(j=0; j< NUMFCNS; j++) {
		lambda[j] = init_lambda[j];
	}
   
	    
    /* initialize the sampler */
    initSampling();
    
    /* do Monte Carlo to get expectations with independent parameters */
    initSample(BURNINLENGTH, SEED);
	
    for(mc = 0; mc < NUMDATA; mc++) {
        sample();
    }
    
    /* done Monte Carlo to get expectations */
    
    /* start the clock */
    start_time = (double) clock(); 
    
    /* start iterating over new Monte Carlo runs */
    numruns = 0;
	/* add in exit condition */
	
    while((stop_me==0) && (numruns < NUMRUNS)) {
        numruns++;
 
        /* start (re)sampling */
        sample_time = (double) clock(); 
        initSample(BURNINLENGTH, SEED);
        
        for(j = 0; j < NUMFCNS; j++) {
            count_monte[j] = 0;
            bound_monte[j] = 1;
            sum_fjexpg_monte_eta[j] = 0;
            eta[j] = 0;
        }
        
        Z_monte_lambda = 0;
        Z_monte_eta = 0;
 
        for(mc = 0; mc < NUMSAMPS; mc++) {
            g_monte_lambda[mc] = sample();
            Z_monte_lambda += exp(g_monte_lambda[mc]);
            g_monte_eta[mc] = 0;
            Z_monte_eta++;
            
            if(sampleCountFcns > 0) {
                for(j = 0; j < sampleCountFcns; j++) {
                    i0 = prevSample[j];
                    if(count_monte[i0] >= bound_monte[i0]) {
                        bound_monte[i0] *= 2;
                        sInds_monte[i0] = (int *) realloc((void *) (sInds_monte[i0]), bound_monte[i0] * sizeof(int));
                        if(sInds_monte[i0] == NULL) {
                            fprintf(stderr, "Couldn't allocate to sInds_monte[%d], bound=%d, count=%d\n", i0, bound_monte[i0],count_monte[i0]);
                            exit(-1);
                        }
                    }
                    sInds_monte[i0][count_monte[i0]] = mc;
                    count_monte[i0]++;
                    sum_fjexpg_monte_eta[i0]++;
                }
            }
        }
    	for(j = 0; j < NUMFCNS; j++) {  
        	expec_algor[j] = ((double) count_monte[j] / (double) NUMSAMPS);
    	}
        sample_time = (double) clock()-sample_time; 
        
        /* start initializing monte checks */
        sum_gexpg_monte_eta = 0;
        sum_energyexpg_monte_eta = 0;
        /* done initializing monte checks */
        
        /* start learning from this Monte Carlo run */
        tsample = 0;
        while(tsample < TCOUNT) {
             
            
            /* start LEARNING ALGORITHM */
            learn_time = (double) clock();
            learn_alg();
            learn_time = (double) clock()-learn_time; 
        
            /* done LEARNING ALGORITHM */
            stop_me=WriteMetrics(trackFile,expec_empir,expec_algor,NUMVARS,numruns,tsample,start_time,sample_time, learn_time,mean_energy_monte_eta,rel_entropy_monte_theta_lambda,exit_conditions);
			sample_time=0;
            learn_time=0;
            
            tsample++;
        }
        /* done learning from this Monte Carlo run */
        
        /* start updating the lambdas for next run */
        for(j = 0; j < NUMFCNS; j++) {
            lambda[j] += eta[j];
        }
       
        /* done updating the lambdas for next run */
        
        
        
        Z_algor_frac_accum_algor_immed *= Z_monte_eta / NUMSAMPS;
		sprintf(numrunsString,"%d",numruns);
		strcpy(paramsFileTemp,paramsFile);
		strcat(paramsFileTemp,numrunsString);
		WriteParams(paramsFileTemp,lambda,first,second,NUMVARS,NUMFCNS);
        
        strcpy(paramsFileTemp,trackFile);
        strcat(paramsFileTemp,"Moments");
        strcat(paramsFileTemp,numrunsString);
        WriteParams(paramsFileTemp,expec_algor,first,second,NUMVARS,NUMFCNS);
        
    }
    /* done iterating over new Monte Carlo runs */
    /* DONE WITH ITERATIVE OPTIMIZATION */

    
	fflush(stderr);
    
    /* start comparing expectations */
    /* do Monte Carlo to get expectations with algorithm parameters */
	
    initSample(BURNINLENGTH, SEED);
    
    for(j = 0; j < NUMFCNS; j++) {
        expec_algor[j] = 0;
    }
	
   for(mc = 0; mc < NUMDATA; mc++) {
       for(j=0; j < NUMVARS; j++) {
           final_mc[j][mc]=0;
       }
   }
    
    for(mc = 0; mc < NUMDATA; mc++) {
        sample();
        
	
		if(sampleCountFcns > 0) {
            for(j = 0; j < sampleCountFcns; j++) {
                expec_algor[prevSample[j]]++;
					if(prevSample[j]<NUMVARS){
                    final_mc[prevSample[j]][mc] = 1;
                }
			}
        }
    }
    
    for(j = 0; j < NUMFCNS; j++) {
        expec_algor[j] /= NUMDATA;
	}
    fflush(stderr);

    /* end Monte Carlo to get expectations */
    /* start parameter output */
	WriteParams(paramsFile,lambda,first,second,NUMVARS,NUMFCNS);
	WriteMC(mcFile,final_mc,NUMVARS,NUMDATA);
	stop_me=WriteMetrics(trackFile,expec_empir,expec_algor,NUMVARS,numruns,tsample,start_time,sample_time, learn_time,mean_energy_monte_eta,rel_entropy_monte_theta_lambda,exit_conditions);
    /* DONE RESULTS */
    /* START FREEING MEMORY */
    fflush(stderr);
    
    endSampling();
	
    free(count_monte);
	/* free(init_lambda); */
    free(bound_monte);
    for(j = 0; j < NUMFCNS; j++) {
        free(sInds_monte[j]);
    }
    free(sInds_monte);
    free(lambda);
    free(g_monte_lambda);
    free(eta);
    free(g_monte_eta);
    free(sum_fjexpg_monte_eta);
    
    for(i0 = 0; i0 < NUMVARS; i0++) {
        free(sigma[i0]);
    }
    free(sigma);
    free(lambda_backup);
    free(expec_algor);
    free(first);
    free(second);
    for(i0 = 0; i0 < NUMVARS; i0++) {
        free(combine[i0]);
    }
    free(combine);
    free(final_mc);
    /* DONE FREEING MEMORY */
    
    return 0;
}