/*
 * Start transmision...
 *
 */
void nRF24l01plus::startPTX()
{   //
    if(getCE() == false || isPWRUP() == false)return;
    if(isRX_MODE())return;
    tMsgFrame * packetToSend = getTXpacket();
    if(packetToSend == NULL)return;
    //packet found in TX that is not ACK packet
    //load with TX address
    packetToSend->Address = getTXaddress();
    emit sendMsgToET((tMsgFrame*)packetToSend);
    //check if ack expected
    if((packetToSend->Packet_Control_Field.NP_ACK == 0) && (getARC()!=0))
    {
        //set for ACK recipt..
        ACK_address = getTXaddress();
        waitingForACK = true;
        clearARC_CNT();
        theTimer->start(getARD()*10);
        TXpacket = packetToSend;
    }
    else
    {//no ack last transmited is the one that is
        if(lastTransmited != NULL)
        {
            delete lastTransmited;
            lastTransmited = NULL;
        }
        lastTransmited = packetToSend;
        setTX_DS_IRQ();
    }
}
void nRF24l01plus::reciveMsgFromET(tMsgFrame *theMSG)
{
    if(!coalision)
    {//coalision did not happen
        if(isPWRUP() && getCE())
        {//in Standby mode (PWRUP = 1 && CE = 1)
            byte pipe = addressToPype(theMSG->Address);
            if(isRX_MODE())
            {//receving
             //check if address is one off th
                if(pipe != 0xFF)
                {//pipe is open ready to receve
                    //fill RX buffer
                    receve_frame(theMSG,pipe);
                }
            }
            else if(waitingForACK)
            {//waiting for ack
                if(pipe == addressToPype(getTXaddress()))
                {//addess is the P0 address, this is ACK packet
                    ackReceved(theMSG,pipe);
                }
            }
        }
    }
    coalision = false;
}
void nRF24l01plus::PWRUPset()
{
    if(getCE() == false || isRX_MODE() == true)return;
    startPTX();
}
void nRF24l01plus::TXpacketAdded()
{
    if(getCE() == false || isRX_MODE() == true || isPWRUP() == false) return;
    startPTX();
}
/*
 * If TX mode is set while CE is high and there is a packet waiting transmit packet
 *
 */
void nRF24l01plus::TXmodeSet()
{
    if(getCE() == false || isPWRUP() == false) return;
    startPTX();

}
Example #6
0
void MultilayerPerceptron::run()
{
	vector<vector<double> >
			inputs = ts->getInputs(),
			targets = ts->getTargets();

	StopCondition
			//BP parameters
			sc = (StopCondition)mlpbpts->getStopParameter();

	double
			//SA parameters
			startCondition = 0,
			To = 0,
			minNoise = 0,
			maxNoise = 0,
			tempDecFactor = 0,
			Tmin = 0,

			//BP parameters
			learningRate = mlpbpts->getLearningRate(),
			MSEmin = mlpbpts->getMinMSE(),
			RMSEmin = mlpbpts->getMinRMSE(),
			CEmin = mlpbpts->getMinCE();
	unsigned int
			//SA parameters
			nChanges = 0,

			//BP parameters
			epochs = mlpbpts->getMaxEpochs();

	if(sa){
		startCondition = mlpsats->getLocalMinimumCondition();
		To = mlpsats->getTo();
		minNoise = mlpsats->getMinNoise();
		maxNoise = mlpsats->getMaxNoise();
		tempDecFactor = mlpsats->getTempDecrementFactor();
		Tmin = mlpsats->getMinTemperature();
		nChanges = mlpsats->getChanges();
	}

	size_t
			nPatterns,
			nNeurons,
			nBOutputs,
			nOutputs;

	vector<double>
			yObtained,
			deltaOut(outputWeights.size(), 0);
	//	vector<vector<double> > deltaHidden(layerWeights.size());
	deltaHidden.resize(layerWeights.size());
	for(size_t i = 0; i < deltaHidden.size(); i++){
		size_t sLayer = layerWeights[i].size();
		deltaHidden[i].resize(sLayer, 0);
	}


	nPatterns = inputs.size();
	int nLayers  = int(layerWeights.size());

	double pMSE;
	//	unsigned long epc;

	double sumDeltas;
	nOutputs = getOutputSize();
	//	MultilayerPerceptron::TrainingResult tr;

	tres->time = 0;
	tres->epochs = 0;

	tres->MSEHistory.clear();
	tres->MSE = getMSE(inputs, targets);
	tres->MSEHistory.push_back(tres->MSE);

	tres->RMSEHistory.clear();
	tres->RMSE = getRMSE(inputs, targets);
	tres->RMSEHistory.push_back(tres->RMSE);

	tres->CEHistory.clear();
	tres->CE = getCE(inputs, targets);
	tres->CEHistory.push_back(tres->CE);

	//	tres.layerWeightsHistory.clear();
	//	tres.layerWeightsHistory.push_back(layerWeights);
	//	tres.outputWeightsHistory.clear();
	//	tres.outputWeightsHistory.push_back(outputWeights);
	vector<vector<double> > layerOutputs;

	long double
			T = 0,
			sumDeltaF = 0,
			deltaF = 0,
			Pa = 0,
			avgDeltaF = 0;
	int c = 0;

	training = true;
	clock_t t_ini = clock();
	do{
		//		tr.MSE = 0;
		//				pMSE = 0;
		for(size_t p = 0; p < nPatterns; p++){

			//Se obtienen las salidas para cada una de las capas
			layerOutputs = getLayerOutputs(inputs[p]);
			yObtained = layerOutputs[layerOutputs.size() - 1];
			for(int layer = nLayers; layer >= 0; layer--){
				nNeurons = (layer == nLayers ? outputWeights.size() : layerWeights[layer].size());
				//				deltaOut = vector<double>(nNeurons, 0);
				for(size_t neuron = 0; neuron <= nNeurons; neuron++){

					//Se inicia el calculo de todos los deltas
					if(layer == nLayers){ //Si es la capa de salida
						if(neuron < nNeurons){
							switch(tf){
								case Sigmoid:
									deltaOut[neuron] = alfa * yObtained[neuron] * (1 - yObtained[neuron]) * (targets[p][neuron] - yObtained[neuron]);
									break;
								case Tanh:
									deltaOut[neuron] = alfa * (1 - (yObtained[neuron]*yObtained[neuron])) * (targets[p][neuron] - yObtained[neuron]);
									break;
							}
						}else{
							continue;
						}
					}else{
						size_t nDeltaElements = (layer == nLayers - 1 ? outputWeights.size() : layerWeights[layer + 1].size());
						sumDeltas = 0;
						for(size_t element = 0; element < nDeltaElements; element++){
							if(layer == nLayers - 1){
								sumDeltas += deltaOut[element] * outputWeights[element][neuron];
							}else{
								sumDeltas += deltaHidden[layer+1][element] * layerWeights[layer+1][element][neuron];
							}
						}

						switch(tf){
							case Sigmoid:
								deltaHidden[layer][neuron] = alfa * layerOutputs[layer][neuron] * (1 - layerOutputs[layer][neuron]) * sumDeltas;
								break;
							case Tanh:
								deltaHidden[layer][neuron] = alfa * (1 - (layerOutputs[layer][neuron]*layerOutputs[layer][neuron])) * sumDeltas;
								break;
						}
					}
				}
			}

			//Comienza la actualizacion de los pesos
			for(int layer = nLayers; layer >= 0; layer--){
				nNeurons = (layer == nLayers ? nOutputs : layerWeights[layer].size());
				for(size_t i = 0; i < nNeurons; i++){
					nBOutputs = (layer == 0 ? inputs[p].size() : layerWeights[layer - 1].size());
					for(size_t j = 0; j <= nBOutputs; j++){
						if(layer == nLayers){
							outputWeights[i][j] += (j == nBOutputs ? -learningRate*deltaOut[i] : learningRate*deltaOut[i]*layerOutputs[layer-1][j]);
						}else if(layer == 0){
							layerWeights[layer][i][j] += (j == nBOutputs ?
															  -learningRate*deltaHidden[layer][i] :
															  learningRate*deltaHidden[layer][i]*inputs[p][j]);
						}else{
							layerWeights[layer][i][j] += (j == nBOutputs ? -learningRate*deltaHidden[layer][i] : learningRate*deltaHidden[layer][i]*layerOutputs[layer-1][j]);
						}
					}
				}
			}
		}

		pMSE = getMSE(inputs, targets);

		if(sa){//if Simulated annealing activated
			deltaF = pMSE - tres->MSE;
			sumDeltaF += deltaF; // Se calcula deltaF promedio
			c++;
			avgDeltaF = sumDeltaF / c;
		}

		tres->MSE = pMSE;
		tres->MSEHistory.push_back(tres->MSE);

		tres->RMSE = getRMSE(inputs, targets);
		tres->RMSEHistory.push_back(tres->RMSE);

		tres->CE = getCE(inputs, targets);
		tres->CEHistory.push_back(tres->CE);
		//		tr.layerWeightsHistory.push_back(layerWeights);
		//		tr.outputWeightsHistory.push_back(outputWeights);
		//		epc++;

		if(sa){
			if(fabs(avgDeltaF) < startCondition && c > 999){
				//					double avgDeltaF = sumDeltaF / c;
				//					T = avgDeltaF / log(initialAcceptance);
				//                    T = 1 / log(initialAcceptance) * avgDeltaF;
				//                    T = deltaF / log(Pa);
				//                    T = -deltaF;
				//                    T = To;
				T = To;
				double fNew;
				NewState ns;
				//					int n = 0;
				double fOld = tres->MSE;
				double rnd = 0;
				do{
					for(unsigned int i = 0; i < nChanges; i++){
						ns = addNoise(minNoise, maxNoise);
						fNew = getNewMSE(ns.newWeights, ns.newOutputWeights, inputs, targets);
						deltaF = fNew - fOld;
						Pa = exp(-deltaF/T);
						rnd = randomNumber(0,1);
						if(deltaF < 0
						   || rnd < Pa
						   ){
							layerWeights = ns.newWeights;
							outputWeights = ns.newOutputWeights;
							fOld = getMSE(inputs, targets);
						}
					}
					//						T = T / (1 + n);
					T = tempDecFactor*T;
					//						n++;
				}while(T > Tmin);
				c = 0;
				sumDeltaF = 0;
			}
		}

		tres->epochs++;
	}while(((tres->MSE >= MSEmin && sc == MSE) ||
			(tres->RMSE >= RMSEmin && sc == RMSE) ||
			(tres->CE >= CEmin && sc == CE)) &&
		   tres->epochs < epochs &&
		   training);
	training = false;
	tres->time = double(clock() - t_ini)/CLOCKS_PER_SEC;

}