size_t ConvolutionalNetworkLayer::addNonLinearToCNN(int visualRow, int visualColumn, int num, size_t type){ shared_ptr<NonLinearComponent> nonLinear(new NonLinearComponent(visualRow, visualColumn, num, type)); shared_ptr<ComponentNode> node(new ComponentNode(currentId, nonLinear)); idMap.insert(Component_Pair(currentId, node)); currentId++; nodes.push_back(node); return node->getId(); }
void processAudio(AudioInputBuffer &input, AudioOutputBuffer &output){ float drive = 1+ getParameterValue(PARAMETER_A) * 30 ; // get input drive value float gain = getParameterValue(PARAMETER_C) / 2.0 ; // get output gain value int size = input.getSize(); float* x = input.getSamples(); float* y = output.getSamples(); for(int i=0; i<size; i++) y[i] = gain*clip(nonLinear((x[i])*drive)); // process each sample }
bool Function::operator()(double &x, double &y, double &r, double &g, double &b) { double ox = x; double oy = y; x = (_coefs[0] * ox) + (_coefs[1] * oy) + _coefs[2]; y = (_coefs[3] * ox) + (_coefs[4] * oy) + _coefs[5]; r = (r + _red) / 2; g = (g + _green) / 2; b = (b + _blue) / 2; return nonLinear(x, y); }
void processAudio(AudioBuffer &buffer) { float delayTime, feedback, wetDry,drive; delayTime = getParameterValue(PARAMETER_A); feedback = getParameterValue(PARAMETER_B); drive = getParameterValue(PARAMETER_C); wetDry = getParameterValue(PARAMETER_D); drive += 0.03; drive *= 40; int newDelay; newDelay = delayTime * (delayBuffer.getSize()-1); float* x = buffer.getSamples(0); float y = 0; int size = buffer.getSize(); for (int n = 0; n < size; n++) { y = (delayBuffer.read(delay)*(size-1-n) + delayBuffer.read(newDelay)*n)/size + x[n]; y = nonLinear(y * 1.5); delayBuffer.write(feedback * y); y = (nonLinear(y * drive)) * 0.25; x[n] = (y * (1 - wetDry)) + (x[n] * wetDry); } delay=newDelay; }
void processAudio(AudioBuffer &buffer){ float drive = getParameterValue(PARAMETER_A); // get input drive value float offset = getParameterValue(PARAMETER_B); // get offset value float gain = getParameterValue(PARAMETER_D); // get output gain value offset /= 10; drive += 0.03; drive *= 40; gain/= 2; int size = buffer.getSize(); for (int ch = 0; ch<buffer.getChannels(); ++ch) { //for each channel float* buf = buffer.getSamples(ch); for (int i = 0; i < size; ++i) { //process each sample buf[i] = gain*nonLinear((buf[i]+offset)*drive); } } }
timeVaryingSolidTractionFvPatchVectorField:: timeVaryingSolidTractionFvPatchVectorField ( const fvPatch& p, const DimensionedField<vector, volMesh>& iF, const dictionary& dict ) : solidTractionFvPatchVectorField(p, iF), timeSeries_(dict) { fieldName() = dimensionedInternalField().name(); traction() = vector::zero; pressure() = 0.0; nonLinear() = nonLinearGeometry::nonLinearNames_.read(dict.lookup("nonLinear")); //- the leastSquares has zero non-orthogonal correction //- on the boundary //- so the gradient scheme should be extendedLeastSquares if ( Foam::word ( dimensionedInternalField().mesh().schemesDict().gradScheme ( "grad(" + fieldName() + ")" ) ) != "extendedLeastSquares" ) { Warning << "The gradScheme for " << fieldName() << " should be \"extendedLeastSquares 0\" for the boundary " << "non-orthogonal correction to be right" << endl; } }
void HeuristicInnerApproximation::extractInnerApproximation(Bonmin::OsiTMINLPInterface & nlp, OsiSolverInterface &si, const double * x, bool getObj) { printf("************ Start extracting inner approx"); int n; int m; int nnz_jac_g; int nnz_h_lag; Ipopt::TNLP::IndexStyleEnum index_style; Bonmin::TMINLP2TNLP * problem = nlp.problem(); //Get problem information problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style); Bonmin::vector<int> jRow(nnz_jac_g); Bonmin::vector<int> jCol(nnz_jac_g); Bonmin::vector<double> jValues(nnz_jac_g); problem->eval_jac_g(n, NULL, 0, m, nnz_jac_g, jRow(), jCol(), NULL); if(index_style == Ipopt::TNLP::FORTRAN_STYLE)//put C-style { for(int i = 0 ; i < nnz_jac_g ; i++){ jRow[i]--; jCol[i]--; } } //get Jacobian problem->eval_jac_g(n, x, 1, m, nnz_jac_g, NULL, NULL, jValues()); Bonmin::vector<double> g(m); problem->eval_g(n, x, 1, m, g()); Bonmin::vector<int> nonLinear(m); //store non linear constraints (which are to be removed from IA) int numNonLinear = 0; const double * rowLower = nlp.getRowLower(); const double * rowUpper = nlp.getRowUpper(); const double * colLower = nlp.getColLower(); const double * colUpper = nlp.getColUpper(); assert(m == nlp.getNumRows()); double infty = si.getInfinity(); double nlp_infty = nlp.getInfinity(); Bonmin::vector<Ipopt::TNLP::LinearityType> constTypes(m); Bonmin::vector<Ipopt::TNLP::LinearityType> varTypes(n); problem->get_constraints_linearity(m, constTypes()); problem->get_variables_linearity(n, varTypes()); for (int i = 0; i < m; i++) { if (constTypes[i] == Ipopt::TNLP::NON_LINEAR) { nonLinear[numNonLinear++] = i; } } Bonmin::vector<double> rowLow(m - numNonLinear); Bonmin::vector<double> rowUp(m - numNonLinear); int ind = 0; for (int i = 0; i < m; i++) { if (constTypes[i] != Ipopt::TNLP::NON_LINEAR) { if (rowLower[i] > -nlp_infty) { // printf("Lower %g ", rowLower[i]); rowLow[ind] = (rowLower[i]); } else rowLow[ind] = -infty; if (rowUpper[i] < nlp_infty) { // printf("Upper %g ", rowUpper[i]); rowUp[ind] = (rowUpper[i]); } else rowUp[ind] = infty; ind++; } } CoinPackedMatrix mat(true, jRow(), jCol(), jValues(), nnz_jac_g); mat.setDimensions(m, n); // In case matrix was empty, this should be enough //remove non-linear constraints mat.deleteRows(numNonLinear, nonLinear()); int numcols = nlp.getNumCols(); Bonmin::vector<double> obj(numcols); for (int i = 0; i < numcols; i++) obj[i] = 0.; si.loadProblem(mat, nlp.getColLower(), nlp.getColUpper(), obj(), rowLow(), rowUp()); const Bonmin::TMINLP::VariableType* variableType = problem->var_types(); for (int i = 0; i < n; i++) { if ((variableType[i] == Bonmin::TMINLP::BINARY) || (variableType[i] == Bonmin::TMINLP::INTEGER)) si.setInteger(i); } if (getObj) { bool addObjVar = false; if (problem->hasLinearObjective()) { double zero; Bonmin::vector<double> x0(n, 0.); problem->eval_f(n, x0(), 1, zero); si.setDblParam(OsiObjOffset, -zero); //Copy the linear objective and don't create a dummy variable. problem->eval_grad_f(n, x, 1, obj()); si.setObjective(obj()); } else { addObjVar = true; } if (addObjVar) { nlp.addObjectiveFunction(si, x); } } // Hassan IA initial description int InnerDesc = 1; if (InnerDesc == 1) { OsiCuts cs; double * p = CoinCopyOfArray(colLower, n); double * pp = CoinCopyOfArray(colLower, n); double * up = CoinCopyOfArray(colUpper, n); for (int i = 0; i < n; i++) { if (p[i] < -1e3){ p[i] = pp[i] = -1e3; } if (up[i] > 1e2){ up[i] = 1e2; } } const int& nbAp = nbAp_; printf("Generating approximation with %i points.\n", nbAp); std::vector<double> step(n); int n_lin = 0; for (int i = 0; i < n; i++) { //if ((variableType[i] == Bonmin::TMINLP::BINARY) || (variableType[i] == Bonmin::TMINLP::INTEGER)) { if (varTypes[i] == Ipopt::TNLP::LINEAR) { n_lin ++; step[i] = 0; p[i] = pp[i] = up[i] = 0; } else { step[i] = (up[i] - p[i]) / (nbAp); } } printf("Number of linears %i\n", n_lin); for (int j = 1; j < nbAp; j++) { for (int i = 0; i < n; i++) { pp[i] += step[i]; } for (int i = 0; (i < m ); i++) { if (constTypes[i] == Ipopt::TNLP::LINEAR) continue; bool status = getMyInnerApproximation(nlp, cs, i, p, pp);// Generate a chord connecting the two points if(status == false){ printf("Error in generating inner approximation\n"); exit(1); } } std::copy(pp, pp+n, p); } for(int i = 0; (i< m); i++) { if (constTypes[i] == Ipopt::TNLP::LINEAR) continue; getMyInnerApproximation(nlp, cs, i, p, up);// Generate a chord connecting the two points } delete [] p; delete [] pp; delete [] up; si.applyCuts(cs); } printf("************ Done extracting inner approx ********"); }
void HeuristicInnerApproximation::extractInnerApproximation(OsiTMINLPInterface & nlp, OsiSolverInterface &si, const double * x, bool getObj) { int n; int m; int nnz_jac_g; int nnz_h_lag; Ipopt::TNLP::IndexStyleEnum index_style; TMINLP2TNLP * problem = nlp.problem(); //Get problem information problem->get_nlp_info(n, m, nnz_jac_g, nnz_h_lag, index_style); vector<int> jRow(nnz_jac_g); vector<int> jCol(nnz_jac_g); vector<double> jValues(nnz_jac_g); problem->eval_jac_g(n, NULL, 0, m, nnz_jac_g, jRow(), jCol(), NULL); if(index_style == Ipopt::TNLP::FORTRAN_STYLE)//put C-style { for(int i = 0 ; i < nnz_jac_g ; i++){ jRow[i]--; jCol[i]--; } } //get Jacobian problem->eval_jac_g(n, x, 1, m, nnz_jac_g, NULL, NULL, jValues()); vector<double> g(m); problem->eval_g(n, x, 1, m, g()); vector<int> nonLinear(m); //store non linear constraints (which are to be removed from IA) int numNonLinear = 0; const double * rowLower = nlp.getRowLower(); const double * rowUpper = nlp.getRowUpper(); const double * colLower = nlp.getColLower(); const double * colUpper = nlp.getColUpper(); assert(m == nlp.getNumRows()); double infty = si.getInfinity(); double nlp_infty = nlp.getInfinity(); vector<Ipopt::TNLP::LinearityType> constTypes(m); problem->get_constraints_linearity(m, constTypes()); for (int i = 0; i < m; i++) { if (constTypes[i] == Ipopt::TNLP::NON_LINEAR) { nonLinear[numNonLinear++] = i; } } vector<double> rowLow(m - numNonLinear); vector<double> rowUp(m - numNonLinear); int ind = 0; for (int i = 0; i < m; i++) { if (constTypes[i] != Ipopt::TNLP::NON_LINEAR) { if (rowLower[i] > -nlp_infty) { // printf("Lower %g ", rowLower[i]); rowLow[ind] = (rowLower[i]); } else rowLow[ind] = -infty; if (rowUpper[i] < nlp_infty) { // printf("Upper %g ", rowUpper[i]); rowUp[ind] = (rowUpper[i]); } else rowUp[ind] = infty; ind++; } } CoinPackedMatrix mat(true, jRow(), jCol(), jValues(), nnz_jac_g); mat.setDimensions(m, n); // In case matrix was empty, this should be enough //remove non-linear constraints mat.deleteRows(numNonLinear, nonLinear()); int numcols = nlp.getNumCols(); vector<double> obj(numcols); for (int i = 0; i < numcols; i++) obj[i] = 0.; si.loadProblem(mat, nlp.getColLower(), nlp.getColUpper(), obj(), rowLow(), rowUp()); const Bonmin::TMINLP::VariableType* variableType = problem->var_types(); for (int i = 0; i < n; i++) { if ((variableType[i] == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) si.setInteger(i); } if (getObj) { bool addObjVar = false; if (problem->hasLinearObjective()) { double zero; vector<double> x0(n, 0.); problem->eval_f(n, x0(), 1, zero); si.setDblParam(OsiObjOffset, -zero); //Copy the linear objective and don't create a dummy variable. problem->eval_grad_f(n, x, 1, obj()); si.setObjective(obj()); } else { addObjVar = true; } if (addObjVar) { nlp.addObjectiveFunction(si, x); } } // Hassan IA initial description int InnerDesc = 1; if (InnerDesc == 1) { OsiCuts cs; double * p = CoinCopyOfArray(colLower, n); double * pp = CoinCopyOfArray(colLower, n); double * up = CoinCopyOfArray(colUpper, n); const int& nbAp = nbAp_; std::vector<int> nbG(m, 0);// Number of generated points for each nonlinear constraint std::vector<double> step(n); for (int i = 0; i < n; i++) { if (colUpper[i] > 1e08) { up[i] = 0; } if (colUpper[i] > 1e08 || colLower[i] < -1e08 || (variableType[i] == TMINLP::BINARY) || (variableType[i] == TMINLP::INTEGER)) { step[i] = 0; } else step[i] = (up[i] - colLower[i]) / (nbAp); if (colLower[i] < -1e08) { p[i] = 0; pp[i] = 0; } } vector<double> g_p(m); vector<double> g_pp(m); for (int j = 1; j <= nbAp; j++) { for (int i = 0; i < n; i++) { pp[i] += step[i]; } problem->eval_g(n, p, 1, m, g_p()); problem->eval_g(n, pp, 1, m, g_pp()); double diff = 0; int varInd = 0; for (int i = 0; (i < m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) { if (varInd == n - 1) varInd = 0; diff = std::abs(g_p[i] - g_pp[i]); if (nbG[i] < nbAp - 1) { getMyInnerApproximation(nlp, cs, i, p, pp);// Generate a chord connecting the two points p[varInd] = pp[varInd]; nbG[i]++; } varInd++; } } for(int i = 0; (i< m && constTypes[i] == Ipopt::TNLP::NON_LINEAR); i++) { // getConstraintOuterApproximation(cs, i, colUpper, NULL, true);// Generate Tangents at current point getMyInnerApproximation(nlp, cs, i, p, up);// Generate a chord connecting the two points } delete [] p; delete [] pp; delete [] up; si.applyCuts(cs); } }