float TExamplesDistance_DTW::operator ()(const TExample &e1, const TExample &e2) const { vector<float> seq1, seq2, der1, der2; getNormalized(e1, seq1); getNormalized(e2, seq2); TdtwMatrix mtrx; switch (dtwDistance) { case DTW_EUCLIDEAN: { initMatrix(seq1, seq2, mtrx); break; } case DTW_DERIVATIVE: { getDerivatives(seq1, der1); getDerivatives(seq2, der2); initMatrix(der1, der2, mtrx); break; } } float dist = calcDistance(mtrx); return dist; }
// create derivaton perceptron NN *NNRegressionFactory::createDerivationPerceptron( NNVariables *vars , NN *p , NNSamples *pSamples , int sensorFrom , int sensorTo , float ( *targetFunction )( NN *p ) ) { int nDerivationSensors = sensorTo - sensorFrom + 1; ASSERT( nDerivationSensors > 0 ); // create perceptron - sensors are the same, targets are required derivatives of target function by given sensor int nSrcSensors = p -> getNSensors(); int nSrcTargets = p -> getNTargets(); int nDstTargets = nDerivationSensors; int hiddenLayerSize = ( nDstTargets + nSrcSensors ) / 2; // variables // sensors are the same ClassList<NNVariable> sensors; for( int k = 0; k < p -> getNSensors(); k++ ) sensors.add( p -> getSensorVariable( k ) ); // targets are derivatives - owned by NN variables ClassList<NNVariable> targets; for( int k1 = sensorFrom; k1 <= sensorTo; k1++ ) for( int k = 0; k < p -> getNSensors(); k++ ) targets.add( vars -> addCommonNumberDerivative() ); NN *pd = createDefaultPerceptron( String( p -> getId() ) + "#D" , sensors , targets , hiddenLayerSize ); // create main strategy String instance; const char *pInstance = p -> getInstance(); if( pInstance != NULL && *pInstance != 0 ) instance = String( pInstance ) + "::derivative"; NNStrategyBackPropagation *psBPd = createDefaultStrategy( instance , pd ); // populate derivation samples - in the same points as training set NNSamples pdSamples( nSrcSensors , nDstTargets ); for( int m = 0; m < pSamples -> count(); m++ ) { NNSample *sample = pSamples -> getByPos( m ); NNSample *sampleD = pdSamples.add(); sampleD -> setSensors( sample -> getSensors() ); getDerivatives( p , sensorFrom , sensorTo , sample , sampleD -> getTargets() , targetFunction ); } // learn derivative perceptron bool res = psBPd -> learn( &pdSamples , NULL , NULL ); ASSERT( res ); return( pd ); }
void SpherePrimitive::execute(Geo::Geometry::CPtr geometry, ScalarBuffer::Ptr buffer) const { Log::print("Rasterizing SpherePrimitive instances"); SamplingDerivatives wsDerivs; getDerivatives(buffer->mapping(), 0, 0, 0, wsDerivs); FieldMapping::Ptr mapping = buffer->mapping(); AttrVisitor visitor(geometry->particles()->pointAttrs(), m_params); Attr<V3f> wsCenter("P"); Attr<float> radius ("radius"); Attr<float> density ("density"); Timer timer; for (AttrVisitor::const_iterator i = visitor.begin(), end = visitor.end(); i != end; ++i) { // Check if user terminated Sys::Interrupt::throwOnAbort(); // Update per-point attributes i.update(wsCenter); i.update(radius); i.update(density); // Calculate rasterization bounds BBox vsBounds = calculateVsBounds(mapping, wsCenter.as<Vector>(), radius); DiscreteBBox dvsBounds = Math::discreteBounds(vsBounds); // Iterate over voxels for (ScalarBuffer::iterator i = buffer->begin(dvsBounds), end = buffer->end(dvsBounds); i != end; ++i) { Vector vsP = discToCont(V3i(i.x, i.y, i.z)), wsP; mapping->voxelToWorld(vsP, wsP); float value = evaluateSphere(wsP, wsCenter.as<Vector>(), radius, density, 0.9f); if (value > 0.0f) { *i += value; } } } Log::print("Sphere primitive processed " + str(points.size()) + " input points"); Log::print(" Time elapsed: " + str(timer.elapsed())); }
/* * Iterate over every contour point in contours and compute the * priority of path centered at point using grayMat and confidenceMat */ void computePriority(const contours_t& contours, const cv::Mat& grayMat, const cv::Mat& confidenceMat, cv::Mat& priorityMat) { assert(grayMat.type() == CV_32FC1 && priorityMat.type() == CV_32FC1 && confidenceMat.type() == CV_32FC1 ); // define some patches cv::Mat confidencePatch; cv::Mat magnitudePatch; cv::Point2f normal; cv::Point maxPoint; cv::Point2f gradient; double confidence; // get the derivatives and magnitude of the greyscale image cv::Mat dx, dy, magnitude; getDerivatives(grayMat, dx, dy); cv::magnitude(dx, dy, magnitude); // mask the magnitude cv::Mat maskedMagnitude(magnitude.size(), magnitude.type(), cv::Scalar_<float>(0)); magnitude.copyTo(maskedMagnitude, (confidenceMat != 0.0f)); cv::erode(maskedMagnitude, maskedMagnitude, cv::Mat()); assert(maskedMagnitude.type() == CV_32FC1); // for each point in contour cv::Point point; for (int i = 0; i < contours.size(); ++i) { contour_t contour = contours[i]; for (int j = 0; j < contour.size(); ++j) { point = contour[j]; confidencePatch = getPatch(confidenceMat, point); // get confidence of patch confidence = cv::sum(confidencePatch)[0] / (double) confidencePatch.total(); assert(0 <= confidence && confidence <= 1.0f); // get the normal to the border around point normal = getNormal(contour, point); // get the maximum gradient in source around patch magnitudePatch = getPatch(maskedMagnitude, point); cv::minMaxLoc(magnitudePatch, NULL, NULL, NULL, &maxPoint); gradient = cv::Point2f( -getPatch(dy, point).ptr<float>(maxPoint.y)[maxPoint.x], getPatch(dx, point).ptr<float>(maxPoint.y)[maxPoint.x] ); // set the priority in priorityMat priorityMat.ptr<float>(point.y)[point.x] = std::abs((float) confidence * gradient.dot(normal)); assert(priorityMat.ptr<float>(point.y)[point.x] >= 0); } } }