Example #1
0
//
// Query context operation output sizes.
//    
uint32 Context::getOutputSize(uint32 inputSize, bool encrypt /*= true*/)
{
    CSSM_QUERY_SIZE_DATA data;
    data.SizeInputBlock = inputSize;
    getOutputSize(data, 1, encrypt);
    return data.SizeOutputBlock;
}
Example #2
0
void ConnSW::_transmit(const Layer *from, Layer *to) const
{
	const LayerSW *sw_from = dynamic_cast<const LayerSW *>(from);
	if(sw_from == nullptr)
		throw Exception("input layer is not derived from LayerSW");
	
	LayerSW *sw_to = dynamic_cast<LayerSW *>(to);
	if(sw_to == nullptr)
		throw Exception("output layer is not derived from LayerSW");
	
	const float *input = sw_from->getOutput().getData();
	float *output = sw_to->getInput().getData();
	const int out_size = getOutputSize();
	const int in_size = getInputSize();
	
	const float *weight = _weight.getData();
	const float *bias = _bias.getData();
	for(int j = 0; j < out_size; ++j)
	{
		float sum = 0.0;
		for(int i = 0; i < in_size; ++i)
		{
			sum += input[i]*weight[in_size*j + i];
		}
		output[j] += sum + bias[j];
	}
}
Example #3
0
void ConnSW_BP::_backprop(const Layer *to, const Layer_BP *from)
{
	const LayerSW *to_sw = dynamic_cast<const LayerSW *>(to);
	if(to_sw == nullptr)
		throw Exception("output layer is not derived from LayerSW");
	
	const LayerSW_BP *from_sw = dynamic_cast<const LayerSW_BP *>(from);
	if(from_sw == nullptr)
		throw Exception("input layer is not derived from LayerSW_BP");
	
	const float *input_error = from_sw->getInputError().getData();
	int sx = getInputSize(), sy = getOutputSize();
	
	float *weight_grad = getWeightGrad().getData();
	float *bias_grad = getBiasGrad().getData();
	for(int i = 0; i < sy; ++i)
	{
		bias_grad[i] = input_error[i];
	}
	
	const float *output = to_sw->getOutput().getData();
	for(int iy = 0; iy < sy; ++iy)
	{
		for(int ix = 0; ix < sx; ++ix)
		{
			weight_grad[iy*sx + ix] += output[ix]*input_error[iy];
		}
	}
}
Example #4
0
void ConnSW_BP::_backprop(Layer_BP *to, const Layer_BP *from)
{
	LayerSW_BP *to_sw = dynamic_cast<LayerSW_BP *>(to);
	if(to_sw == nullptr)
		throw Exception("output layer is not derived from LayerSW_BP");
	
	const LayerSW_BP *from_sw = dynamic_cast<const LayerSW_BP *>(from);
	if(from_sw == nullptr)
		throw Exception("input layer is not derived from LayerSW_BP");
	
	float *output_error = to_sw->getOutputError().getData();
	const float *input_error = from_sw->getInputError().getData();
	const float *weight = getWeight().getData();
	int sx = getInputSize(), sy = getOutputSize();
	for(int ix = 0; ix < sx; ++ix)
	{
		for(int iy = 0; iy < sy; ++iy)
		{
			// TODO: fence in opencl to optimize cache
			output_error[ix] += weight[iy*sx + ix]*input_error[iy];
		}
	}
	
	_backprop(static_cast<const Layer *>(to), from);
}
Example #5
0
double MultilayerPerceptron::getCE(const vector<vector<double> > &inputs, const vector<vector<double> > &targets)
{
	size_t sInputs = inputs.size();
	int os = getOutputSize();
	int errCount = 0;
	int n = 0;
	vector<int> yObtained;
	for(size_t p = 0; p < sInputs; p++){
		yObtained = getClasifierOutput(inputs[p],
									   (ot == UnipolarClasifier ? 0.5 : 0),
									   ot);
		for(int element = 0; element < os; element++){
			switch(ot){
				case UnipolarClasifier:
					if(toUnipolar(targets[p], 0.5)[element] != yObtained[element]){
						errCount++;
					}
					break;
				case BipolarClasifier:
					if(toBipolar(targets[p], 0)[element] != yObtained[element]){
						errCount++;
					}
					break;
			}
			n++;
		}
	}

	return errCount/n;
}
Example #6
0
void V3Ntk::buildNtkBdd() {
  // TODO: build BDD for ntk here
  // Perform DFS traversal from DFF inputs, inout, and output gates.
  // Collect ordered nets to a V3NetVec
  // Construct BDDs in the DFS order

  _isBddBuilt = true;
  for(unsigned i = 0, n = getLatchSize(); i < n; ++i) {
    const V3NetId& nId =  getLatch(i);
	BddNodeV ret = BddNodeV::_one;
	buildBdd(nId);
  }

  for(unsigned i = 0, n = getOutputSize(); i < n; ++i) {
    const V3NetId& nId =  getOutput(i);
	BddNodeV ret = BddNodeV::_one;
	buildBdd(nId);
	
  }

  for(unsigned i = 0, n = getInoutSize(); i < n; ++i) {
    const V3NetId& nId =  getInout(i);
	BddNodeV ret = BddNodeV::_one;
	buildBdd(nId);
	
  }
}
Example #7
0
double MultilayerPerceptron::getRMSE(const vector<vector<double> > &inputs, const vector<vector<double> > &targets)
{
	size_t nPatterns = inputs.size();
	double pMSE = 0;
	vector<double> yObtained;
	size_t nOutputs = getOutputSize();
	for(size_t p = 0; p < nPatterns; p++){
		yObtained = getOutput(inputs[p]);
		for(size_t neuron = 0; neuron < nOutputs; neuron++){
			pMSE += (targets[p][neuron] - yObtained[neuron])*(targets[p][neuron] - yObtained[neuron]);
		}
	}
	return sqrt(pMSE)/(nPatterns*nOutputs);
}
Example #8
0
double MultilayerPerceptron::getNewMSE(const vector<vector<vector<double> > > &lweights, const vector<vector<double> > &oweights, const vector<vector<double> > &inputs, const vector<vector<double> > &targets)
{
	size_t nPatterns = inputs.size();
	double pMSE = 0;
	vector<double> yObtained;
	size_t nOutputs = getOutputSize();
	for(size_t p = 0; p < nPatterns; p++){
		yObtained = getAuxOutput(lweights, oweights, inputs[p]);
		for(size_t neuron = 0; neuron < nOutputs; neuron++){
			pMSE += (targets[p][neuron] - yObtained[neuron])*(targets[p][neuron] - yObtained[neuron]);
		}
	}
	return pMSE / 2;
}
Example #9
0
vector<int> MultilayerPerceptron::getClasifierOutput(const vector<double> &inputs, double threshold, ClasifierOutputType cot)
{
	vector<double> yObtained = getOutput(inputs);
    vector<int> out(yObtained.size());
	int os = getOutputSize();
	for(int i = 0; i < os; i++){
		switch(cot){
			case UnipolarClasifier:
				out[i] = (yObtained[i] >= threshold ? 1 : 0);
				break;
			case BipolarClasifier:
				out[i] = (yObtained[i] > threshold ? 1 : -1);
				break;
		}

	}
	return out;
}
Example #10
0
    //-------------------------------------------------------------------------
    std::unique_ptr<char[]> Generator::write(const NodePtr &onlyThisNode, size_t *outLength) const
    {
      size_t totalSize = getOutputSize(onlyThisNode);

      mGeneratorRoot = onlyThisNode;

      NodePtr root = onlyThisNode->getRoot();
      if (root->isDocument()) {
        mCaseSensitive = root->toDocument()->isElementNameIsCaseSensative();
      }

      std::unique_ptr<char[]> buffer(new char[totalSize+1]);
      char *ioPos = buffer.get();
      *ioPos = 0;

      bool objectOpen = objectObjectCheck(onlyThisNode);

      if (GeneratorMode_JSON == mGeneratorMode) {
        if (objectOpen) {
          plusDepth();
          fill(ioPos, mStrs.mChildComplexOpen);
        }
      }

      Generator::writeBuffer(mThis.lock(), onlyThisNode, ioPos);

      if (GeneratorMode_JSON == mGeneratorMode) {
        if (objectOpen) {
          minusDepth();
          fill(ioPos, mStrs.mChildComplexClose);
        }
      }

      *ioPos = 0;
      if (NULL != outLength)
        *outLength = totalSize;

      mGeneratorRoot.reset();

      ZS_THROW_BAD_STATE_IF(0 != mDepth)

      return buffer;
    }
Example #11
0
size_t SoftmaxLayer::getOutputCount() const
{
    return getOutputSize().product();
}
Example #12
0
ConnSW::ConnSW() : ConnSW(getID(), getInputSize(), getOutputSize())
{
	
}
Example #13
0
	ConnSW_BP() : ConnSW_BP(getID(), getInputSize(), getOutputSize()) {}
Example #14
0
void MultilayerPerceptron::run()
{
	vector<vector<double> >
			inputs = ts->getInputs(),
			targets = ts->getTargets();

	StopCondition
			//BP parameters
			sc = (StopCondition)mlpbpts->getStopParameter();

	double
			//SA parameters
			startCondition = 0,
			To = 0,
			minNoise = 0,
			maxNoise = 0,
			tempDecFactor = 0,
			Tmin = 0,

			//BP parameters
			learningRate = mlpbpts->getLearningRate(),
			MSEmin = mlpbpts->getMinMSE(),
			RMSEmin = mlpbpts->getMinRMSE(),
			CEmin = mlpbpts->getMinCE();
	unsigned int
			//SA parameters
			nChanges = 0,

			//BP parameters
			epochs = mlpbpts->getMaxEpochs();

	if(sa){
		startCondition = mlpsats->getLocalMinimumCondition();
		To = mlpsats->getTo();
		minNoise = mlpsats->getMinNoise();
		maxNoise = mlpsats->getMaxNoise();
		tempDecFactor = mlpsats->getTempDecrementFactor();
		Tmin = mlpsats->getMinTemperature();
		nChanges = mlpsats->getChanges();
	}

	size_t
			nPatterns,
			nNeurons,
			nBOutputs,
			nOutputs;

	vector<double>
			yObtained,
			deltaOut(outputWeights.size(), 0);
	//	vector<vector<double> > deltaHidden(layerWeights.size());
	deltaHidden.resize(layerWeights.size());
	for(size_t i = 0; i < deltaHidden.size(); i++){
		size_t sLayer = layerWeights[i].size();
		deltaHidden[i].resize(sLayer, 0);
	}


	nPatterns = inputs.size();
	int nLayers  = int(layerWeights.size());

	double pMSE;
	//	unsigned long epc;

	double sumDeltas;
	nOutputs = getOutputSize();
	//	MultilayerPerceptron::TrainingResult tr;

	tres->time = 0;
	tres->epochs = 0;

	tres->MSEHistory.clear();
	tres->MSE = getMSE(inputs, targets);
	tres->MSEHistory.push_back(tres->MSE);

	tres->RMSEHistory.clear();
	tres->RMSE = getRMSE(inputs, targets);
	tres->RMSEHistory.push_back(tres->RMSE);

	tres->CEHistory.clear();
	tres->CE = getCE(inputs, targets);
	tres->CEHistory.push_back(tres->CE);

	//	tres.layerWeightsHistory.clear();
	//	tres.layerWeightsHistory.push_back(layerWeights);
	//	tres.outputWeightsHistory.clear();
	//	tres.outputWeightsHistory.push_back(outputWeights);
	vector<vector<double> > layerOutputs;

	long double
			T = 0,
			sumDeltaF = 0,
			deltaF = 0,
			Pa = 0,
			avgDeltaF = 0;
	int c = 0;

	training = true;
	clock_t t_ini = clock();
	do{
		//		tr.MSE = 0;
		//				pMSE = 0;
		for(size_t p = 0; p < nPatterns; p++){

			//Se obtienen las salidas para cada una de las capas
			layerOutputs = getLayerOutputs(inputs[p]);
			yObtained = layerOutputs[layerOutputs.size() - 1];
			for(int layer = nLayers; layer >= 0; layer--){
				nNeurons = (layer == nLayers ? outputWeights.size() : layerWeights[layer].size());
				//				deltaOut = vector<double>(nNeurons, 0);
				for(size_t neuron = 0; neuron <= nNeurons; neuron++){

					//Se inicia el calculo de todos los deltas
					if(layer == nLayers){ //Si es la capa de salida
						if(neuron < nNeurons){
							switch(tf){
								case Sigmoid:
									deltaOut[neuron] = alfa * yObtained[neuron] * (1 - yObtained[neuron]) * (targets[p][neuron] - yObtained[neuron]);
									break;
								case Tanh:
									deltaOut[neuron] = alfa * (1 - (yObtained[neuron]*yObtained[neuron])) * (targets[p][neuron] - yObtained[neuron]);
									break;
							}
						}else{
							continue;
						}
					}else{
						size_t nDeltaElements = (layer == nLayers - 1 ? outputWeights.size() : layerWeights[layer + 1].size());
						sumDeltas = 0;
						for(size_t element = 0; element < nDeltaElements; element++){
							if(layer == nLayers - 1){
								sumDeltas += deltaOut[element] * outputWeights[element][neuron];
							}else{
								sumDeltas += deltaHidden[layer+1][element] * layerWeights[layer+1][element][neuron];
							}
						}

						switch(tf){
							case Sigmoid:
								deltaHidden[layer][neuron] = alfa * layerOutputs[layer][neuron] * (1 - layerOutputs[layer][neuron]) * sumDeltas;
								break;
							case Tanh:
								deltaHidden[layer][neuron] = alfa * (1 - (layerOutputs[layer][neuron]*layerOutputs[layer][neuron])) * sumDeltas;
								break;
						}
					}
				}
			}

			//Comienza la actualizacion de los pesos
			for(int layer = nLayers; layer >= 0; layer--){
				nNeurons = (layer == nLayers ? nOutputs : layerWeights[layer].size());
				for(size_t i = 0; i < nNeurons; i++){
					nBOutputs = (layer == 0 ? inputs[p].size() : layerWeights[layer - 1].size());
					for(size_t j = 0; j <= nBOutputs; j++){
						if(layer == nLayers){
							outputWeights[i][j] += (j == nBOutputs ? -learningRate*deltaOut[i] : learningRate*deltaOut[i]*layerOutputs[layer-1][j]);
						}else if(layer == 0){
							layerWeights[layer][i][j] += (j == nBOutputs ?
															  -learningRate*deltaHidden[layer][i] :
															  learningRate*deltaHidden[layer][i]*inputs[p][j]);
						}else{
							layerWeights[layer][i][j] += (j == nBOutputs ? -learningRate*deltaHidden[layer][i] : learningRate*deltaHidden[layer][i]*layerOutputs[layer-1][j]);
						}
					}
				}
			}
		}

		pMSE = getMSE(inputs, targets);

		if(sa){//if Simulated annealing activated
			deltaF = pMSE - tres->MSE;
			sumDeltaF += deltaF; // Se calcula deltaF promedio
			c++;
			avgDeltaF = sumDeltaF / c;
		}

		tres->MSE = pMSE;
		tres->MSEHistory.push_back(tres->MSE);

		tres->RMSE = getRMSE(inputs, targets);
		tres->RMSEHistory.push_back(tres->RMSE);

		tres->CE = getCE(inputs, targets);
		tres->CEHistory.push_back(tres->CE);
		//		tr.layerWeightsHistory.push_back(layerWeights);
		//		tr.outputWeightsHistory.push_back(outputWeights);
		//		epc++;

		if(sa){
			if(fabs(avgDeltaF) < startCondition && c > 999){
				//					double avgDeltaF = sumDeltaF / c;
				//					T = avgDeltaF / log(initialAcceptance);
				//                    T = 1 / log(initialAcceptance) * avgDeltaF;
				//                    T = deltaF / log(Pa);
				//                    T = -deltaF;
				//                    T = To;
				T = To;
				double fNew;
				NewState ns;
				//					int n = 0;
				double fOld = tres->MSE;
				double rnd = 0;
				do{
					for(unsigned int i = 0; i < nChanges; i++){
						ns = addNoise(minNoise, maxNoise);
						fNew = getNewMSE(ns.newWeights, ns.newOutputWeights, inputs, targets);
						deltaF = fNew - fOld;
						Pa = exp(-deltaF/T);
						rnd = randomNumber(0,1);
						if(deltaF < 0
						   || rnd < Pa
						   ){
							layerWeights = ns.newWeights;
							outputWeights = ns.newOutputWeights;
							fOld = getMSE(inputs, targets);
						}
					}
					//						T = T / (1 + n);
					T = tempDecFactor*T;
					//						n++;
				}while(T > Tmin);
				c = 0;
				sumDeltaF = 0;
			}
		}

		tres->epochs++;
	}while(((tres->MSE >= MSEmin && sc == MSE) ||
			(tres->RMSE >= RMSEmin && sc == RMSE) ||
			(tres->CE >= CEmin && sc == CE)) &&
		   tres->epochs < epochs &&
		   training);
	training = false;
	tres->time = double(clock() - t_ini)/CLOCKS_PER_SEC;

}
Example #15
0
void
V3Ntk::replaceOutput(const uint32_t& index, const V3NetId& id) {
   assert (index < getOutputSize()); assert (validNetId(id)); _IOList[1][index] = id;
}
size_t CTCDecoderLayer::getOutputCount() const
{
    return getOutputSize().product();
}