/** * ANNWrapper::trainArtificialNeuroNets * @brief trains all artificial neuro nets inside this class */ void ANNWrapper::trainArtificialNeuroNets() { log << SLevel(INFO) << "Successfully trained Artificial Neuro Net for Temperature 2m" << endl; vector< vector<double> > inputValues; vector<double> expectedOutputValues; // define training parameter // measurements to use for prediction vector<string> predictorList = {"Lufttemperatur_2m"}; // measurement to predict string predictant = "Lufttemperatur_2m"; // dataSources to use for prediction vector<string> dataSourceList = {"WeatherStation","Forecast"}; // amount of previous values to consider int predictionWindowSize = 3; // yes, generate datasets for training, not for evaluating bool trainingDataSet = true; // generate input and expected output data generateDataSets(&inputValues,&expectedOutputValues,predictorList,predictant,dataSourceList,predictionWindowSize,trainingDataSet); // normalize and scale data-sets inputValues = zTransformVector(inputValues); inputValues = scaleVector(inputValues,SCALING_FACTOR,true); expectedOutputValues = zTransformVector(expectedOutputValues); expectedOutputValues = scaleVector(expectedOutputValues,SCALING_FACTOR,true); // start training ANNTemperature.train(inputValues,expectedOutputValues); /* --- ANN for Pressure --- */ // define training parameter // measurements to use for prediction predictorList = {"Luftdruck_2m"}; // measurement to predict predictant = "Luftdruck_2m"; // dataSources to use for prediction dataSourceList = {"WeatherStation","Forecast"}; // amount of previous values to consider predictionWindowSize = 3; // yes, generate datasets for training, not for evaluating trainingDataSet = true; // generate input and expected output data generateDataSets(&inputValues,&expectedOutputValues,predictorList,predictant,dataSourceList,predictionWindowSize,trainingDataSet); // normalize and scale data-sets inputValues = zTransformVector(inputValues); inputValues = scaleVector(inputValues,SCALING_FACTOR,true); expectedOutputValues = zTransformVector(expectedOutputValues); expectedOutputValues = scaleVector(expectedOutputValues,SCALING_FACTOR,true); // start training ANNAirPressure.train(inputValues,expectedOutputValues); }
bool generateMidamble(signalVector &gsmPulse, int samplesPerSymbol, int TSC) { if ((TSC < 0) || (TSC > 7)) return false; if (gMidambles[TSC]) { if (gMidambles[TSC]->sequence!=NULL) delete gMidambles[TSC]->sequence; if (gMidambles[TSC]->sequenceReversedConjugated!=NULL) delete gMidambles[TSC]->sequenceReversedConjugated; } signalVector emptyPulse(1); *(emptyPulse.begin()) = 1.0; // only use middle 16 bits of each TSC signalVector *middleMidamble = modulateBurst(gTrainingSequence[TSC].segment(5,16), emptyPulse, 0, samplesPerSymbol); signalVector *midamble = modulateBurst(gTrainingSequence[TSC], gsmPulse, 0, samplesPerSymbol); if (midamble == NULL) return false; if (middleMidamble == NULL) return false; // NOTE: Because ideal TSC 16-bit midamble is 66 symbols into burst, // the ideal TSC has an + 180 degree phase shift, // due to the pi/2 frequency shift, that // needs to be accounted for. // 26-midamble is 61 symbols into burst, has +90 degree phase shift. scaleVector(*middleMidamble,complex(-1.0,0.0)); scaleVector(*midamble,complex(0.0,1.0)); signalVector *autocorr = correlate(midamble,middleMidamble,NULL,NO_DELAY); if (autocorr == NULL) return false; gMidambles[TSC] = new CorrelationSequence; gMidambles[TSC]->sequence = middleMidamble; gMidambles[TSC]->sequenceReversedConjugated = reverseConjugate(middleMidamble); gMidambles[TSC]->gain = peakDetect(*autocorr,&gMidambles[TSC]->TOA,NULL); LOG(DEBUG) << "midamble autocorr: " << *autocorr; LOG(DEBUG) << "TOA: " << gMidambles[TSC]->TOA; //gMidambles[TSC]->TOA -= 5*samplesPerSymbol; delete autocorr; delete midamble; return true; }
void ParticleSimulation::midpoint() { particleDerivate(temp1); scaleVector(temp1, mp_constant_tstep2); particleGetState(temp2); addVectors(temp1, temp2, temp3); particleSetState(temp3); particleDerivate(temp1); scaleVector(temp1, tstep); addVectors(temp1, temp2, temp2); particleSetState(temp2); ps.t += tstep; }
void SceneDelegate::animate(float time) { shape_[1].rotation.z = cos(time); shape_[3].position.z = 3.f + 2.f * abs(cos(0.2f * time) * cos(time)); light_[0].position = Float3(25.f * cos(time), 25.f * sin(time), 10.f); light_[0].direction = scaleVector(light_[0].position, -1.f / vectorMagnitude(light_[0].position)); }
Point *findIntersectionPoint(Ray ray, Sphere sphere) { Vector sphereToRay = fromToVector(sphere.center, ray.point); double a = dotVector(ray.direction, ray.direction); double b = dotVector(scaleVector(sphereToRay, 2.0), ray.direction); double c = dotVector(sphereToRay, sphereToRay) - sphere.radius * sphere.radius; double determinant = b * b - 4 * a * c; if (determinant >= 0) { double root1 = (-b + sqrt(determinant)) / (2.0 * a); double root2 = (-b - sqrt(determinant)) / (2.0 * a); if (root1 >= 0 && root2 >= 0) { return findPointFromRoot(min(root1, root2), ray); } else if (root1 >= 0 || root2 >= 0) { return findPointFromRoot(max(root1, root2), ray); } return (Point *) NULL; } return (Point *) NULL; }
XMMATRIX Transform::GetTransformMatrix() { XMVECTOR scaleVector(m_Scale.AsXMVECTOR()); XMVECTOR rotationVector(m_Rotation.AsXMVECTOR()); XMVECTOR positionVector(m_Position.AsXMVECTOR()); return XMMatrixScalingFromVector(scaleVector) * XMMatrixRotationQuaternion(rotationVector) * XMMatrixTranslationFromVector(positionVector); }
void cg(eval_t A, Matrix b, double tolerance, void* ctx) { Matrix r = createMatrix(b->rows, b->cols); Matrix p = createMatrix(b->rows, b->cols); Matrix buffer = createMatrix(b->rows, b->cols); double dotp = 1000; double rdr = dotp; copyVector(r->as_vec,b->as_vec); fillVector(b->as_vec, 0.0); int i=0; while (i < b->as_vec->len && rdr > tolerance) { ++i; if (i == 1) { copyVector(p->as_vec,r->as_vec); dotp = innerproduct(r->as_vec,r->as_vec); } else { double dotp2 = innerproduct(r->as_vec,r->as_vec); double beta = dotp2/dotp; dotp = dotp2; scaleVector(p->as_vec,beta); axpy(p->as_vec,r->as_vec,1.0); } A(buffer,p,ctx); double alpha = dotp/innerproduct(p->as_vec,buffer->as_vec); axpy(b->as_vec,p->as_vec,alpha); axpy(r->as_vec,buffer->as_vec,-alpha); rdr = sqrt(innerproduct(r->as_vec,r->as_vec)); } printf("%i iterations\n",i); freeMatrix(r); freeMatrix(p); freeMatrix(buffer); }
int cg(Matrix A, Vector b, double tolerance) { int i=0, j; double rl; Vector r = createVector(b->len); Vector p = createVector(b->len); Vector buffer = createVector(b->len); double dotp = 1000; double rdr = dotp; copyVector(r,b); fillVector(b, 0.0); rl = sqrt(dotproduct(r,r)); while (i < b->len && rdr > tolerance*rl) { ++i; if (i == 1) { copyVector(p,r); dotp = dotproduct(r,r); } else { double dotp2 = dotproduct(r,r); double beta = dotp2/dotp; dotp = dotp2; scaleVector(p,beta); axpy(p,r,1.0); } MxV(buffer, p); double alpha = dotp/dotproduct(p,buffer); axpy(b,p,alpha); axpy(r,buffer,-alpha); rdr = sqrt(dotproduct(r,r)); } freeVector(r); freeVector(p); freeVector(buffer); return i; }
vector<double> PageRank::metodoPotencia() { int n = A.size(); // creo el x aleatorio vector<double> x(n, 0); for (int i = 0; i < n; i++) { x[i] = random_in_range(1,50); } // traspongo x para poder multiplicarlo por A vector< vector<double> > aux = row2Column(x); vector< vector<double> > B = multiply(A, A); vector< vector<double> > C = A; double delta = phi(column2Row(multiply(B, aux))) / phi(column2Row(multiply(C, aux))); double last_delta = INFINITY; while (fabs(delta - last_delta) > precision) { C = B; B = multiply(B, A); last_delta = delta; delta = phi(column2Row(multiply(B, aux))) / phi(column2Row(multiply(C, aux))); } vector<double> v = column2Row(multiply(B, aux)); return scaleVector(v, 1/norma1(v)); }
void dgSphere::SetDimensions(const dgFloat32 vertex[], dgInt32 strideInBytes, const dgInt32 triangles[], dgInt32 indexCount, const dgMatrix *basis) { dgVector eigen; dgVector scaleVector(dgFloat32(1.0f), dgFloat32(1.0f), dgFloat32(1.0f), dgFloat32(0.0f)); if (indexCount < 3) { return; } dgInt32 stride = dgInt32(strideInBytes / sizeof(dgFloat32)); if (!basis) { InternalSphere::Statistics(*this, eigen, scaleVector, vertex, triangles, indexCount, stride); dgInt32 k = 0; for (dgInt32 i = 0; i < 3; i++) { if (k >= 6) { break; } for (dgInt32 j = i + 1; j < 3; j++) { dgFloat32 aspect = InternalSphere::AspectRatio(eigen[i], eigen[j]); if (aspect > dgFloat32(0.9f)) { scaleVector[i] *= dgFloat32(2.0f); InternalSphere::Statistics(*this, eigen, scaleVector, vertex, triangles, indexCount, stride); k++; i = -1; break; } } } } else { *this = *basis; } dgVector min; dgVector max; InternalSphere::BoundingBox(*this, vertex, stride, triangles, indexCount, min, max); dgVector massCenter(max + min); massCenter = massCenter.Scale(dgFloat32(0.5f)); m_posit = TransformVector(massCenter); dgVector dim(max - min); dim = dim.Scale(dgFloat32(0.5f)); SetDimensions(dim.m_x, dim.m_y, dim.m_z); }
void ParticleSimulation::rungekutta() { particleGetState(temp2);//p0 //-------------------------------- particleDerivate(temp1);//k1 f scaleVector(temp1, rk_constant_k1_k4);//f * h * 1/6 addVectors(temp1, temp2, temp2);// p0 + 1/6*k1 //-------------------------------- scaleVector(temp1, rk_constant_a);//set f to h*f / 2 addVectors(temp1, temp2, temp3); particleSetState(temp3); //--------------------------------k1 particleDerivate(temp1); scaleVector(temp1, rk_constant_k2_k3); addVectors(temp1, temp2, temp2); //-------------------------------- scaleVector(temp1, rk_constant_b); addVectors(temp1, temp3, temp3); particleSetState(temp3); //--------------------------------k2 particleDerivate(temp1); scaleVector(temp1, rk_constant_k2_k3); addVectors(temp1, temp2, temp2); //-------------------------------- scaleVector(temp1, 3.0); addVectors(temp1, temp3, temp3); particleSetState(temp3); //--------------------------------k3 particleDerivate(temp1); scaleVector(temp1, rk_constant_k1_k4); addVectors(temp1, temp2, temp2); //--------------------------------k4 particleSetState(temp2); ps.t += tstep; }
vector<vector<double> > ANNWrapper::scaleVector(const vector<vector<double> > &vectorToScale_, double scaleFactor_, bool minimize_) { vector< vector<double> > result = vectorToScale_; for (int i = 0 ; i < vectorToScale_.size(); i++) { result[i] = scaleVector(vectorToScale_[i],scaleFactor_,minimize_); } return result; }
void scaleVecArray(VecArray a, double f) { int N, j; g_assert(a); N = VecArraySize(a); for(j = 0; j < N; j++) if(a[j]) scaleVector(a[j], f); }
void ParticleSimulation::euler() { particleDerivate(temp1); scaleVector(temp1, tstep); particleGetState(temp2); addVectors(temp1, temp2, temp2); particleSetState(temp2); ps.t += tstep; }
vector_t averageListedPoints(const points_t points, const list_t list) { index_t i; vector_t m = initVector(0.0f, 0.0f, 0.0f); for (i=0; i<getLength(list); i++) { entry_t e=getEntry(list,i); m=addVectors(getPoint(points,entry_getIndex(&e)),m); } m=scaleVector(m,1.0f/((float)getLength(list))); return m; }
void dgSphere::SetDimensions ( const dgFloat32 vertex[], dgInt32 strideInBytes, dgInt32 count, const dgMatrix *basis) { dgInt32 i; dgInt32 j; dgInt32 k; dgInt32 stride; dgFloat32 aspect; dgVector eigen; dgVector scaleVector (1.0f, 1.0f, 1.0f, 0.0f); stride = dgInt32 (strideInBytes / sizeof (dgFloat32)); if (!basis) { InternalSphere::Statistics (*this, eigen, scaleVector, vertex, count, stride); k = 0; for (i = 0; i < 3; i ++) { if (k >= 6) { break; } for (j = i + 1; j < 3; j ++) { aspect = InternalSphere::AspectRatio (eigen[i], eigen[j]); if (aspect > 0.9) { scaleVector[i] *= 2.0f; InternalSphere::Statistics (*this, eigen, scaleVector, vertex, count, stride); k ++; i = -1; break; } } } } else { *this = *basis; } dgVector min; dgVector max; InternalSphere::BoundingBox (*this, vertex, count, stride, min, max); dgVector massCenter (max + min); massCenter = massCenter.Scale (0.5); m_posit = TransformVector (massCenter); dgVector dim (max - min); dim = dim.Scale (0.5f); SetDimensions (dim.m_x + InternalSphere::SPHERE_TOL, dim.m_y + InternalSphere::SPHERE_TOL, dim.m_z + InternalSphere::SPHERE_TOL); }
int main(int argc, char** argv) { int rank, size; init_app(argc, argv, &rank, &size); if (argc < 2) { printf("usage: %s <N> [L]\n",argv[0]); close_app(); return 1; } /* the total number of grid points in each spatial direction is (N+1) */ /* the total number of degrees-of-freedom in each spatial direction is (N-1) */ int N = atoi(argv[1]); int M = N-1; double L=1.0; if (argc > 2) L = atof(argv[2]); double h = L/N; poisson_info_t ctx; ctx.A = createPoisson1D(M); Vector grid = createVector(M); for (int i=0;i<M;++i) grid->data[i] = (i+1)*h; Matrix u = createMatrix(M, M); evalMesh(u->as_vec, grid, grid, poisson_source); scaleVector(u->as_vec, h*h); double time = WallTime(); cg(evaluate, u, 1.e-6, &ctx); evalMesh2(u->as_vec, grid, grid, exact_solution, -1.0); double max = maxNorm(u->as_vec); if (rank == 0) { printf("elapsed: %f\n", WallTime()-time); printf("max: %f\n", max); } freeMatrix(u); freeVector(grid); freeMatrix(ctx.A); close_app(); return 0; }
vector<double> PageRankEsparso::metodoPotencia() { int n = A.size(); // creo el x aleatorio vector<double> x(n, 0); for (int i = 0; i < n; i++) { x[i] = random_in_range(1,50); } // creo el v aleatorio vector<double> v(n, double(1)/double(n)); vector<double> y = x; double delta = INFINITY; double last_delta; do { x = y; y = scaleVector(multiplyEsparso(A, x), teletransportacion); double w = norma1(x) - norma1(y); y = sumVector(y, scaleVector(v, w)); last_delta = delta; delta = phi(y) / phi(x); } while (fabs(delta - last_delta) > precision); return scaleVector(y, 1/norma1(y)); }
Vector VecArrayVectoraverage(const VecArray a) { int N, M, j; Vector v; g_assert(a); N = VecArrayVectorSize(a); if(N <= 0) return 0; M = VecArraySize(a); v = newVector(N); zeroVector(v); for(j = 0; j < M; j++) addtoVector(v, a[j]); scaleVector(v, 1.0/M); return v; }
// helper to correct normals and winding order static void normalizeMesh(bool invertNormals, Mesh* mesh) { // unitize normal vectors for (std::vector<Float3>::iterator it = mesh->normalArray.begin(); it != mesh->normalArray.end(); it++) { const float length = invertNormals ? -1.f : 1.f * vectorMagnitude(*it); *it = scaleVector(*it, 1.f / length); } // change winding order if necessary if (invertNormals) { const unsigned int numberOfTriangles = mesh->indexArray.size() / 3; for (unsigned int i=0; i<numberOfTriangles; ++i) { const unsigned int idx = 3 * i; const unsigned int tmp = mesh->indexArray[idx]; mesh->indexArray[idx] = mesh->indexArray[idx + 1]; mesh->indexArray[idx + 1] = tmp; } } }
SoftVector *demodulateBurst(signalVector &rxBurst, const signalVector &gsmPulse, int samplesPerSymbol, complex channel, float TOA) { scaleVector(rxBurst,((complex) 1.0)/channel); delayVector(rxBurst,-TOA); signalVector *shapedBurst = &rxBurst; // shift up by a quarter of a frequency // ignore starting phase, since spec allows for discontinuous phase GMSKReverseRotate(*shapedBurst); // run through slicer if (samplesPerSymbol > 1) { signalVector *decShapedBurst = decimateVector(*shapedBurst,samplesPerSymbol); shapedBurst = decShapedBurst; } LOG(DEEPDEBUG) << "shapedBurst: " << *shapedBurst; vectorSlicer(shapedBurst); SoftVector *burstBits = new SoftVector(shapedBurst->size()); SoftVector::iterator burstItr = burstBits->begin(); signalVector::iterator shapedItr = shapedBurst->begin(); for (; shapedItr < shapedBurst->end(); shapedItr++) *burstItr++ = shapedItr->real(); if (samplesPerSymbol > 1) delete shapedBurst; return burstBits; }
static void Statistics (dgObb &sphere, dgVector &eigenValues, const dgVector &scale, const dgFloat32 vertex[], const dgInt32 faceIndex[], dgInt32 indexCount, dgInt32 stride) { dgVector var (dgFloat32 (0.0f)); dgVector cov (dgFloat32 (0.0f)); dgVector centre (dgFloat32 (0.0f)); dgVector massCenter (dgFloat32 (0.0f)); dgVector scaleVector (scale & dgVector::m_triplexMask); dgFloat64 totalArea = dgFloat32 (0.0f); const dgFloat32* const ptr = vertex; for (dgInt32 i = 0; i < indexCount; i += 3) { dgInt32 index = faceIndex[i] * stride; dgVector p0 (&ptr[index]); p0 = p0 * scaleVector; index = faceIndex[i + 1] * stride;; dgVector p1 (&ptr[index]); p1 = p1 * scaleVector; index = faceIndex[i + 2] * stride;; dgVector p2 (&ptr[index]); p2 = p2 * scaleVector; dgVector normal ((p1 - p0).CrossProduct(p2 - p0)); dgFloat64 area = dgFloat32 (0.5f) * sqrt (normal.DotProduct3(normal)); centre = p0 + p1 + p2; centre = centre.Scale (dgFloat32 (1.0f / 3.0f)); // Inertia of each point in the triangle dgFloat64 Ixx = p0.m_x * p0.m_x + p1.m_x * p1.m_x + p2.m_x * p2.m_x; dgFloat64 Iyy = p0.m_y * p0.m_y + p1.m_y * p1.m_y + p2.m_y * p2.m_y; dgFloat64 Izz = p0.m_z * p0.m_z + p1.m_z * p1.m_z + p2.m_z * p2.m_z; dgFloat64 Ixy = p0.m_x * p0.m_y + p1.m_x * p1.m_y + p2.m_x * p2.m_y; dgFloat64 Iyz = p0.m_y * p0.m_z + p1.m_y * p1.m_z + p2.m_y * p2.m_z; dgFloat64 Ixz = p0.m_x * p0.m_z + p1.m_x * p1.m_z + p2.m_x * p2.m_z; if (area > dgEPSILON * 10.0) { dgFloat64 K = area / dgFloat64 (12.0); //Coriolis theorem for Inertia of a triangle in an arbitrary orientation Ixx = K * (Ixx + 9.0 * centre.m_x * centre.m_x); Iyy = K * (Iyy + 9.0 * centre.m_y * centre.m_y); Izz = K * (Izz + 9.0 * centre.m_z * centre.m_z); Ixy = K * (Ixy + 9.0 * centre.m_x * centre.m_y); Ixz = K * (Ixz + 9.0 * centre.m_x * centre.m_z); Iyz = K * (Iyz + 9.0 * centre.m_y * centre.m_z); centre = centre.Scale ((dgFloat32)area); } totalArea += area; massCenter += centre; var += dgVector ((dgFloat32)Ixx, (dgFloat32)Iyy, (dgFloat32)Izz, dgFloat32 (0.0f)); cov += dgVector ((dgFloat32)Ixy, (dgFloat32)Ixz, (dgFloat32)Iyz, dgFloat32 (0.0f)); } if (totalArea > dgEPSILON * 10.0) { dgFloat64 K = dgFloat64 (1.0) / totalArea; var = var.Scale ((dgFloat32)K); cov = cov.Scale ((dgFloat32)K); massCenter = massCenter.Scale ((dgFloat32)K); } dgFloat64 Ixx = var.m_x - massCenter.m_x * massCenter.m_x; dgFloat64 Iyy = var.m_y - massCenter.m_y * massCenter.m_y; dgFloat64 Izz = var.m_z - massCenter.m_z * massCenter.m_z; dgFloat64 Ixy = cov.m_x - massCenter.m_x * massCenter.m_y; dgFloat64 Ixz = cov.m_y - massCenter.m_x * massCenter.m_z; dgFloat64 Iyz = cov.m_z - massCenter.m_y * massCenter.m_z; sphere.m_front = dgVector ((dgFloat32)Ixx, (dgFloat32)Ixy, (dgFloat32)Ixz, dgFloat32 (0.0f)); sphere.m_up = dgVector ((dgFloat32)Ixy, (dgFloat32)Iyy, (dgFloat32)Iyz, dgFloat32 (0.0f)); sphere.m_right = dgVector ((dgFloat32)Ixz, (dgFloat32)Iyz, (dgFloat32)Izz, dgFloat32 (0.0f)); sphere.EigenVectors(eigenValues); }
/** * ANNWrapper::calculateOutput * @brief calculates the output of all artificial neuron nets inside this class and outputs them * @return returns DataBuffer which contains all outputs of all neuro nets inside this class */ DataBuffer ANNWrapper::calculateOutput() { DataBuffer result; /* --- ANN Temperature --- */ vector< vector<double> > inputValuesALL; vector<double> expectedOutputValuesALL; // define training parameter // measurements to use for prediction vector<string> predictorList = {"Lufttemperatur_2m"}; // measurement to predict string predictant = "Lufttemperatur_2m"; // dataSources to use for prediction vector<string> dataSourceList = {"WeatherStation","Forecast"}; // amount of previous values to consider int predictionWindowSize = 3; // yes, generate datasets for training/calculating output, not for evaluating bool trainingDataSet = true; // generate input and expected output data generateDataSets(&inputValuesALL,&expectedOutputValuesALL,predictorList,predictant,dataSourceList,predictionWindowSize,trainingDataSet); // normalize and scale data-sets inputValuesALL = zTransformVector(inputValuesALL); inputValuesALL = scaleVector(inputValuesALL,SCALING_FACTOR,true); // get last dataset for last calculateable forecast vector<double> inputValues = inputValuesALL[inputValuesALL.size()-1]; // set path for trained weights ANNTemperature.setTrainedWeightsCaffemodelPath(PATH_OF_TRAINED_WEIGHTS); // calculate output of net double scaledAndTransformedOutput = ANNTemperature.forward(inputValues); // redo z-transformation double scaledOutput = reZTransformVector({scaledAndTransformedOutput},expectedOutputValuesALL)[0]; // redo scaling double realOutput = scaleVector(scaledOutput,SCALING_FACTOR,false); result.data["ANNTemperature"] = realOutput; /* --- ANN Air Pressure --- */ // define training parameter // measurements to use for prediction predictorList = {"Luftdruck_2m"}; // measurement to predict predictant = "Luftdruck_2m"; // dataSources to use for prediction dataSourceList = {"WeatherStation","Forecast"}; // amount of previous values to consider predictionWindowSize = 3; // yes, generate datasets for training/calculating output, not for evaluating trainingDataSet = true; // generate input and expected output data generateDataSets(&inputValuesALL,&expectedOutputValuesALL,predictorList,predictant,dataSourceList,predictionWindowSize,trainingDataSet); // normalize and scale data-sets inputValuesALL = zTransformVector(inputValuesALL); inputValuesALL = scaleVector(inputValuesALL,SCALING_FACTOR,true); // get last dataset for last calculateable forecast inputValues = inputValuesALL[inputValuesALL.size()-1]; // set path for trained weights ANNAirPressure.setTrainedWeightsCaffemodelPath(PATH_OF_TRAINED_WEIGHTS); // calculate output of net scaledAndTransformedOutput = ANNAirPressure.forward(inputValues); // redo z-transformation scaledOutput = reZTransformVector({scaledAndTransformedOutput},expectedOutputValuesALL)[0]; // redo scaling realOutput = scaleVector(scaledOutput,SCALING_FACTOR,false); result.data["ANNAirPressure"] = realOutput; return result; }
int main(int argc, char** argv) { int i, j, N, flag; Matrix A=NULL, Q=NULL; Vector b, grid, e, lambda=NULL; double time, sum, h, tol=1e-6; int rank, size; int mpi_top_coords; int mpi_top_sizes; init_app(argc, argv, &rank, &size); if (argc < 3) { printf("need two parameters, N and flag [and tolerance]\n"); printf(" - N is the problem size (in each direction\n"); printf(" - flag = 1 -> Matrix-free Gauss-Jacobi iterations\n"); printf(" - flag = 2 -> Matrix-free red-black Gauss-Seidel iterations\n"); printf(" - flag = 3 -> Matrix-free CG iterations\n"); printf(" - flag = 4 -> Matrix-free additive schwarz preconditioned+Cholesky CG iterations\n"); printf(" - flag = 5 -> Matrix-free additive schwarz preconditioned+CG CG iterations\n"); return 1; } N=atoi(argv[1]); flag=atoi(argv[2]); if (argc > 3) tol=atof(argv[3]); if (N < 0) { if (rank == 0) printf("invalid problem size given\n"); close_app(); return 2; } if (flag < 0 || flag > 5) { if (rank == 0) printf("invalid flag given\n"); close_app(); return 3; } if (flag == 2 && (N-1)%2 != 0 && ((N-1)/size) % 2 != 0) { if (rank == 0) printf("need an even size (per process) for red-black iterations\n"); close_app(); return 4; } // setup topology mpi_top_coords = 0; mpi_top_sizes = 0; MPI_Dims_create(size, 1, &mpi_top_sizes); int periodic = 0; MPI_Comm comm; MPI_Cart_create(MPI_COMM_WORLD, 1, &mpi_top_sizes, &periodic, 0, &comm); MPI_Cart_coords(comm, rank, 1, &mpi_top_coords); b = createVectorMPI(N+1, &comm, 1, 1); e = createVectorMPI(N+1, &comm, 1, 1); grid = equidistantMesh(0.0, 1.0, N); h = 1.0/N; evalMeshDispl(b, grid, source); scaleVector(b, pow(h, 2)); evalMeshDispl(e, grid, exact); axpy(b, e, alpha); b->data[0] = b->data[b->len-1] = 0.0; if (flag == 4) { int size = b->len; if (b->comm_rank == 0) size--; if (b->comm_rank == b->comm_size-1) size--; A1D = createMatrix(size, size); A1Dfactored = 0; diag(A1D, -1, -1.0); diag(A1D, 0, 2.0+alpha); diag(A1D, 1, -1.0); } int its=-1; char method[128]; time = WallTime(); if (flag == 1) { its=GaussJacobiPoisson1D(b, tol, 1000000); sprintf(method,"Gauss-Jacobi"); } if (flag == 2) { its=GaussSeidelPoisson1Drb(b, tol, 1000000); sprintf(method,"Gauss-Seidel"); } if (flag == 3) { its=cgMatrixFree(Poisson1D, b, tol); sprintf(method,"CG"); } if (flag == 4 || flag == 5) { its=pcgMatrixFree(Poisson1D, Poisson1DPre, b, tol); sprintf(method,"PCG"); } if (rank == 0) { printf("%s used %i iterations\n", method, its); printf("elapsed: %f\n", WallTime()-time); } evalMeshDispl(e, grid, exact); axpy(b,e,-1.0); b->data[0] = b->data[b->len-1] = 0.0; h = maxNorm(b); if (rank == 0) printf("max error: %e\n", h); if (A) freeMatrix(A); if (Q) freeMatrix(Q); freeVector(grid); freeVector(b); freeVector(e); if (lambda) freeVector(lambda); if (A1D) freeMatrix(A1D); MPI_Comm_free(&comm); close_app(); return 0; }
void RadioInterface::pushBuffer(void) { if (sendBuffer->size() < INCHUNK) { return; } int numChunks = sendBuffer->size()/INCHUNK; signalVector* truncatedBuffer = new signalVector(numChunks*INCHUNK); sendBuffer->segmentCopyTo(*truncatedBuffer,0,numChunks*INCHUNK); if (!sendLPF) { int P = OUTRATE; int Q = INRATE; float cutoffFreq = (P < Q) ? (1.0/(float) Q) : (1.0/(float) P); sendLPF = createLPF(cutoffFreq,651,P); } // resample data to USRP sample rate signalVector *inputVector = new signalVector(*sendHistory,*truncatedBuffer); signalVector *resampledVector = polyphaseResampleVector(*inputVector, OUTRATE, INRATE,sendLPF); delete inputVector; // Set transmit gain and power here. scaleVector(*resampledVector, usrp->fullScaleInputValue()); short *resampledVectorShort = USRPifyVector(*resampledVector); // start the USRP when we actually have data to send to the USRP. if (!started) { started = true; LOG(INFO) << "Starting USRP"; usrp->start(); LOG(DEBUG) << "USRP started"; usrp->updateAlignment(10000); usrp->updateAlignment(10000); } // send resampleVector writingRadioLock.lock(); int samplesWritten = usrp->writeSamples(resampledVectorShort+OUTHISTORY*2, (resampledVector->size()-OUTHISTORY), &underrun, writeTimestamp); //LOG(DEEPDEBUG) << "writeTimestamp: " << writeTimestamp << ", samplesWritten: " << samplesWritten; writeTimestamp += (TIMESTAMP) samplesWritten; wroteRadioSignal.signal(); writingRadioLock.unlock(); LOG(DEEPDEBUG) << "converted " << truncatedBuffer->size() << " transceiver samples into " << samplesWritten << " radio samples "; delete resampledVector; delete []resampledVectorShort; // update the history of sent data truncatedBuffer->segmentCopyTo(*sendHistory,truncatedBuffer->size()-INHISTORY, INHISTORY); // update the buffer, i.e. keep the samples we didn't send signalVector *tmp = sendBuffer; sendBuffer = new signalVector(sendBuffer->size()-truncatedBuffer->size()); tmp->segmentCopyTo(*sendBuffer,truncatedBuffer->size(), sendBuffer->size()); delete tmp; delete truncatedBuffer; }
void SelectionPointer::renderSelectionPointer(GLuint defaultFboHandle, bool useOrtho) { Q_UNUSED(defaultFboHandle) glViewport(m_mainViewPort.x(), m_mainViewPort.y(), m_mainViewPort.width(), m_mainViewPort.height()); Q3DCamera *camera = m_cachedScene->activeCamera(); QMatrix4x4 itModelMatrix; // Get view matrix QMatrix4x4 viewMatrix; QMatrix4x4 projectionMatrix; GLfloat viewPortRatio = (GLfloat)m_mainViewPort.width() / (GLfloat)m_mainViewPort.height(); if (m_cachedIsSlicingActivated) { GLfloat sliceUnitsScaled = sliceUnits / m_autoScaleAdjustment; viewMatrix.lookAt(QVector3D(0.0f, 0.0f, 1.0f), zeroVector, upVector); projectionMatrix.ortho(-sliceUnitsScaled * viewPortRatio, sliceUnitsScaled * viewPortRatio, -sliceUnitsScaled, sliceUnitsScaled, -1.0f, 4.0f); } else if (useOrtho) { viewMatrix = camera->d_ptr->viewMatrix(); GLfloat orthoRatio = 2.0f; projectionMatrix.ortho(-viewPortRatio * orthoRatio, viewPortRatio * orthoRatio, -orthoRatio, orthoRatio, 0.0f, 100.0f); } else { viewMatrix = camera->d_ptr->viewMatrix(); projectionMatrix.perspective(45.0f, viewPortRatio, 0.1f, 100.0f); } QMatrix4x4 modelMatrix; QMatrix4x4 MVPMatrix; // Position the pointer ball modelMatrix.translate(m_position); if (!m_rotation.isIdentity()) { modelMatrix.rotate(m_rotation); itModelMatrix.rotate(m_rotation); } // Scale the point with fixed values (at this point) QVector3D scaleVector(0.05f, 0.05f, 0.05f); modelMatrix.scale(scaleVector); itModelMatrix.scale(scaleVector); MVPMatrix = projectionMatrix * viewMatrix * modelMatrix; QVector3D lightPos = m_cachedScene->activeLight()->position(); // // Draw the point // m_pointShader->bind(); m_pointShader->setUniformValue(m_pointShader->lightP(), lightPos); m_pointShader->setUniformValue(m_pointShader->view(), viewMatrix); m_pointShader->setUniformValue(m_pointShader->model(), modelMatrix); m_pointShader->setUniformValue(m_pointShader->nModel(), itModelMatrix.inverted().transposed()); m_pointShader->setUniformValue(m_pointShader->color(), m_highlightColor); m_pointShader->setUniformValue(m_pointShader->MVP(), MVPMatrix); m_pointShader->setUniformValue(m_pointShader->ambientS(), m_cachedTheme->ambientLightStrength()); m_pointShader->setUniformValue(m_pointShader->lightS(), m_cachedTheme->lightStrength() * 2.0f); m_pointShader->setUniformValue(m_pointShader->lightColor(), Utils::vectorFromColor(m_cachedTheme->lightColor())); m_drawer->drawObject(m_pointShader, m_pointObj); }
bool analyzeTrafficBurst(signalVector &rxBurst, unsigned TSC, float detectThreshold, int samplesPerSymbol, complex *amplitude, float *TOA, unsigned maxTOA, bool requestChannel, signalVector **channelResponse, float *channelResponseOffset) { assert(TSC<8); assert(amplitude); assert(TOA); assert(gMidambles[TSC]); if (maxTOA < 3*samplesPerSymbol) maxTOA = 3*samplesPerSymbol; unsigned spanTOA = maxTOA; if (spanTOA < 5*samplesPerSymbol) spanTOA = 5*samplesPerSymbol; unsigned startIx = (66-spanTOA)*samplesPerSymbol; unsigned endIx = (66+16+spanTOA)*samplesPerSymbol; unsigned windowLen = endIx - startIx; unsigned corrLen = 2*maxTOA+1; unsigned expectedTOAPeak = (unsigned) round(gMidambles[TSC]->TOA + (gMidambles[TSC]->sequenceReversedConjugated->size()-1)/2); signalVector burstSegment(rxBurst.begin(),startIx,windowLen); static complex staticData[200]; signalVector correlatedBurst(staticData,0,corrLen); correlate(&burstSegment, gMidambles[TSC]->sequenceReversedConjugated, &correlatedBurst, CUSTOM,true, expectedTOAPeak-maxTOA,corrLen); float meanPower; *amplitude = peakDetect(correlatedBurst,TOA,&meanPower); float valleyPower = 0.0; //amplitude->norm2(); complex *peakPtr = correlatedBurst.begin() + (int) rint(*TOA); // check for bogus results if ((*TOA < 0.0) || (*TOA > correlatedBurst.size())) { *amplitude = 0.0; return false; } int numRms = 0; for (int i = 2*samplesPerSymbol; i <= 5*samplesPerSymbol;i++) { if (peakPtr - i >= correlatedBurst.begin()) { valleyPower += (peakPtr-i)->norm2(); numRms++; } if (peakPtr + i < correlatedBurst.end()) { valleyPower += (peakPtr+i)->norm2(); numRms++; } } if (numRms < 2) { // check for bogus results *amplitude = 0.0; return false; } float RMS = sqrtf(valleyPower/(float)numRms)+0.00001; float peakToMean = (amplitude->abs())/RMS; // NOTE: Because ideal TSC is 66 symbols into burst, // the ideal TSC has an +/- 180 degree phase shift, // due to the pi/4 frequency shift, that // needs to be accounted for. *amplitude = (*amplitude)/gMidambles[TSC]->gain; *TOA = (*TOA) - (maxTOA); LOG(DEBUG) << "TCH peakAmpl=" << amplitude->abs() << " RMS=" << RMS << " peakToMean=" << peakToMean << " TOA=" << *TOA; LOG(DEBUG) << "autocorr: " << correlatedBurst; if (requestChannel && (peakToMean > detectThreshold)) { float TOAoffset = maxTOA; //gMidambles[TSC]->TOA+(66*samplesPerSymbol-startIx); delayVector(correlatedBurst,-(*TOA)); // midamble only allows estimation of a 6-tap channel signalVector channelVector(6*samplesPerSymbol); float maxEnergy = -1.0; int maxI = -1; for (int i = 0; i < 7; i++) { if (TOAoffset+(i-5)*samplesPerSymbol + channelVector.size() > correlatedBurst.size()) continue; if (TOAoffset+(i-5)*samplesPerSymbol < 0) continue; correlatedBurst.segmentCopyTo(channelVector,(int) floor(TOAoffset+(i-5)*samplesPerSymbol),channelVector.size()); float energy = vectorNorm2(channelVector); if (energy > 0.95*maxEnergy) { maxI = i; maxEnergy = energy; } } *channelResponse = new signalVector(channelVector.size()); correlatedBurst.segmentCopyTo(**channelResponse,(int) floor(TOAoffset+(maxI-5)*samplesPerSymbol),(*channelResponse)->size()); scaleVector(**channelResponse,complex(1.0,0.0)/gMidambles[TSC]->gain); LOG(DEEPDEBUG) << "channelResponse: " << **channelResponse; if (channelResponseOffset) *channelResponseOffset = 5*samplesPerSymbol-maxI; } return (peakToMean > detectThreshold); }
Point *findPointFromRoot(double root, Ray ray) { Point *result = (Point *) malloc(sizeof(Point)); Vector distanceToRoot = scaleVector(ray.direction, root); *result = translatePoint(ray.point, distanceToRoot); return result; }
int main(int argc, char** argv) { int i, j, N, flag; Matrix A=NULL, Q=NULL; Vector b, grid, e, lambda=NULL; double time, sum, h, tol=1e-4; if (argc < 3) { printf("need two parameters, N and flag [and tolerance]\n"); printf(" - N is the problem size (in each direction\n"); printf(" - flag = 1 -> Dense LU\n"); printf(" - flag = 2 -> Dense Cholesky\n"); printf(" - flag = 3 -> Full Gauss-Jacobi iterations\n"); printf(" - flag = 4 -> Full Gauss-Jacobi iterations using BLAS\n"); printf(" - flag = 5 -> Full Gauss-Seidel iterations\n"); printf(" - flag = 6 -> Full Gauss-Seidel iterations using BLAS\n"); printf(" - flag = 7 -> Full CG iterations\n"); printf(" - flag = 8 -> Matrix-less Gauss-Jacobi iterations\n"); printf(" - flag = 9 -> Matrix-less Gauss-Seidel iterations\n"); printf(" - flag = 10 -> Matrix-less Red-Black Gauss-Seidel iterations\n"); printf(" - flag = 11 -> Diagonalization\n"); printf(" - flag = 12 -> Diagonalization - FST\n"); printf(" - flag = 13 -> Matrix-less CG iterations\n"); return 1; } N=atoi(argv[1]); flag=atoi(argv[2]); if (argc > 3) tol = atof(argv[3]); if (N < 0) { printf("invalid problem size given\n"); return 2; } if (flag < 0 || flag > 13) { printf("invalid flag given\n"); return 3; } if (flag == 10 && (N-1)%2 != 0) { printf("need an even size for red-black iterations\n"); return 4; } if (flag == 12 && (N & (N-1)) != 0) { printf("need a power-of-two for fst-based diagonalization\n"); return 5; } h = 1.0/N; grid = equidistantMesh(0.0, 1.0, N); b = createVector(N-1); e = createVector(N-1); evalMeshInternal(b, grid, source); evalMeshInternal(e, grid, exact); scaleVector(b, pow(h, 2)); axpy(b, e, alpha); if (flag < 8) { A = createMatrix(N-1,N-1); diag(A, -1, -1.0); diag(A, 0, 2.0+alpha); diag(A, 1, -1.0); } if (flag >= 11 && flag < 13) lambda = generateEigenValuesP1D(N-1); if (flag == 11) Q = generateEigenMatrixP1D(N-1); time = WallTime(); if (flag == 1) { int* ipiv=NULL; lusolve(A, b, &ipiv); free(ipiv); } else if (flag == 2) llsolve(A,b,0); else if (flag == 3) printf("Gauss-Jacobi used %i iterations\n", GaussJacobi(A, b, tol, 10000000)); else if (flag == 4) printf("Gauss-Jacobi used %i iterations\n", GaussJacobiBlas(A, b, tol, 10000000)); else if (flag == 5) printf("Gauss-Seidel used %i iterations\n", GaussSeidel(A, b, tol, 10000000)); else if (flag == 6) printf("Gauss-Seidel used %i iterations\n", GaussSeidelBlas(A, b, tol, 10000000)); else if (flag == 7) printf("CG used %i iterations\n", cg(A, b, 1e-8)); else if (flag == 8) printf("Gauss-Jacobi used %i iterations\n", GaussJacobiPoisson1D(b, tol, 10000000)); else if (flag == 9) printf("Gauss-Jacobi used %i iterations\n", GaussSeidelPoisson1D(b, tol, 10000000)); else if (flag == 10) printf("Gauss-Jacobi used %i iterations\n", GaussSeidelPoisson1Drb(b, tol, 10000000)); else if (flag == 11) DiagonalizationPoisson1D(b,lambda,Q); else if (flag == 12) DiagonalizationPoisson1Dfst(b,lambda); else if (flag == 13) printf("CG used %i iterations\n", cgMatrixFree(Poisson1D, b, tol)); printf("elapsed: %f\n", WallTime()-time); evalMeshInternal(e, grid, exact); axpy(b,e,-1.0); printf("max error: %e\n", maxNorm(b)); if (A) freeMatrix(A); if (Q) freeMatrix(Q); freeVector(grid); freeVector(b); freeVector(e); if (lambda) freeVector(lambda); return 0; }
// Assumes symbol-spaced sampling!!! // Based upon paper by Al-Dhahir and Cioffi bool designDFE(signalVector &channelResponse, float SNRestimate, int Nf, signalVector **feedForwardFilter, signalVector **feedbackFilter) { signalVector G0(Nf); signalVector G1(Nf); signalVector::iterator G0ptr = G0.begin(); signalVector::iterator G1ptr = G1.begin(); signalVector::iterator chanPtr = channelResponse.begin(); int nu = channelResponse.size()-1; *G0ptr = 1.0/sqrtf(SNRestimate); for(int j = 0; j <= nu; j++) { *G1ptr = chanPtr->conj(); G1ptr++; chanPtr++; } signalVector *L[Nf]; signalVector::iterator Lptr; float d; for(int i = 0; i < Nf; i++) { d = G0.begin()->norm2() + G1.begin()->norm2(); L[i] = new signalVector(Nf+nu); Lptr = L[i]->begin()+i; G0ptr = G0.begin(); G1ptr = G1.begin(); while ((G0ptr < G0.end()) && (Lptr < L[i]->end())) { *Lptr = (*G0ptr*(G0.begin()->conj()) + *G1ptr*(G1.begin()->conj()) )/d; Lptr++; G0ptr++; G1ptr++; } complex k = (*G1.begin())/(*G0.begin()); if (i != Nf-1) { signalVector G0new = G1; scaleVector(G0new,k.conj()); addVector(G0new,G0); signalVector G1new = G0; scaleVector(G1new,k*(-1.0)); addVector(G1new,G1); delayVector(G1new,-1.0); scaleVector(G0new,1.0/sqrtf(1.0+k.norm2())); scaleVector(G1new,1.0/sqrtf(1.0+k.norm2())); G0 = G0new; G1 = G1new; } } *feedbackFilter = new signalVector(nu); L[Nf-1]->segmentCopyTo(**feedbackFilter,Nf,nu); scaleVector(**feedbackFilter,(complex) -1.0); conjugateVector(**feedbackFilter); signalVector v(Nf); signalVector::iterator vStart = v.begin(); signalVector::iterator vPtr; *(vStart+Nf-1) = (complex) 1.0; for(int k = Nf-2; k >= 0; k--) { Lptr = L[k]->begin()+k+1; vPtr = vStart + k+1; complex v_k = 0.0; for (int j = k+1; j < Nf; j++) { v_k -= (*vPtr)*(*Lptr); vPtr++; Lptr++; } *(vStart + k) = v_k; } *feedForwardFilter = new signalVector(Nf); signalVector::iterator w = (*feedForwardFilter)->begin(); for (int i = 0; i < Nf; i++) { delete L[i]; complex w_i = 0.0; int endPt = ( nu < (Nf-1-i) ) ? nu : (Nf-1-i); vPtr = vStart+i; chanPtr = channelResponse.begin(); for (int k = 0; k < endPt+1; k++) { w_i += (*vPtr)*(chanPtr->conj()); vPtr++; chanPtr++; } *w = w_i/d; w++; } return true; }