static void launchMaximaWithLogging(){ class LogForwarder : public jalib::JMultiSocketProgram { public: void onConnect ( const jalib::JSocket& /*sock*/, const struct sockaddr* /*remoteAddr*/, socklen_t /*remoteLen*/ ){JASSERT(false);} void onDisconnect ( jalib::JReaderInterface* /*sock*/ ) { exit(0); }; void onData ( jalib::JReaderInterface* sock ){ static int stdinfd = fileno(stdin); static int stdoutfd = fileno(stdout); char buf = *sock->buffer(); log(buf); if(sock->socket().sockfd() == stdinfd) JASSERT(write(maximaFd, &buf, 1)==1); else JASSERT(write(stdoutfd, &buf, 1)==1); } void log(char c){ static FILE* fd = fopen("maxima.log","w"); JASSERT(fwrite(&c, 1,1, fd) == 1); fflush(fd); } int maximaFd; } forwarder; forwarder.maximaFd = forkopen(launchMaxima); jalib::JChunkReader maxima(forwarder.maximaFd, 1); jalib::JChunkReader petabricks(fileno(stdin), 1); forwarder.addDataSocket(&maxima); forwarder.addDataSocket(&petabricks); forwarder.monitorSockets(); JASSERT(false); }
void Chart::scan (std::vector <Task>& tasks) { generateBars (); // Not quantized, so that "while (xxx < now)" is inclusive. Date now; time_t epoch; std::vector <Task>::iterator task; for (task = tasks.begin (); task != tasks.end (); ++task) { // The entry date is when the counting starts. Date from = quantize (Date (task->get_date ("entry"))); epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].added; // e--> e--s--> // ppp> pppsss> Task::status status = task->getStatus (); if (status == Task::pending || status == Task::waiting) { if (task->has ("start")) { Date start = quantize (Date (task->get_date ("start"))); while (from < start) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } while (from < now) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].started; from = increment (from); } } else { while (from < now) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } } } // e--C e--s--C // pppd> pppsssd> else if (status == Task::completed) { // Truncate history so it starts at 'earliest' for completed tasks. Date end = quantize (Date (task->get_date ("end"))); epoch = end.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].removed; // Maintain a running total of 'done' tasks that are off the left of the // chart. if (end < earliest) { ++carryover_done; continue; } if (task->has ("start")) { Date start = quantize (Date (task->get_date ("start"))); while (from < start) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } while (from < end) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].started; from = increment (from); } while (from < now) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].done; from = increment (from); } } else { Date end = quantize (Date (task->get_date ("end"))); while (from < end) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } while (from < now) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].done; from = increment (from); } } } // e--D e--s--D // ppp pppsss else if (status == Task::deleted) { // Skip old deleted tasks. Date end = quantize (Date (task->get_date ("end"))); epoch = end.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].removed; if (end < earliest) continue; if (task->has ("start")) { Date start = quantize (Date (task->get_date ("start"))); while (from < start) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } while (from < end) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].started; from = increment (from); } } else { Date end = quantize (Date (task->get_date ("end"))); while (from < end) { epoch = from.toEpoch (); if (bars.find (epoch) != bars.end ()) ++bars[epoch].pending; from = increment (from); } } } } // Size the data. maxima (); }
template <typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::canny ( const pcl::PointCloud<PointInT> &input_x, const pcl::PointCloud<PointInT> &input_y, pcl::PointCloud<PointOutT> &output) { float tHigh = hysteresis_threshold_high_; float tLow = hysteresis_threshold_low_; const int height = input_x.height; const int width = input_x.width; output.resize (height * width); output.height = height; output.width = width; // Noise reduction using gaussian blurring pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>); kernel_.setKernelSize (3); kernel_.setKernelSigma (1.0); kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN); kernel_.fetchKernel (*gaussian_kernel); convolution_.setKernel (*gaussian_kernel); PointCloudIn smoothed_cloud_x; convolution_.setInputCloud (input_x.makeShared()); convolution_.filter (smoothed_cloud_x); PointCloudIn smoothed_cloud_y; convolution_.setInputCloud (input_y.makeShared()); convolution_.filter (smoothed_cloud_y); // Edge detection usign Sobel pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>); sobelMagnitudeDirection (smoothed_cloud_x, smoothed_cloud_y, *edges.get ()); // Edge discretization discretizeAngles (*edges); pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>); suppressNonMaxima (*edges, *maxima, tLow); // Edge tracing for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) continue; (*maxima)(j, i).intensity = std::numeric_limits<float>::max (); cannyTraceEdge ( 1, 0, i, j, *maxima); cannyTraceEdge (-1, 0, i, j, *maxima); cannyTraceEdge ( 1, 1, i, j, *maxima); cannyTraceEdge (-1, -1, i, j, *maxima); cannyTraceEdge ( 0, -1, i, j, *maxima); cannyTraceEdge ( 0, 1, i, j, *maxima); cannyTraceEdge (-1, 1, i, j, *maxima); cannyTraceEdge ( 1, -1, i, j, *maxima); } } // Final thresholding for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) output (j, i).magnitude = 255; else output (j, i).magnitude = 0; } } }
template<typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::detectEdgeCanny (pcl::PointCloud<PointOutT> &output) { float tHigh = hysteresis_threshold_high_; float tLow = hysteresis_threshold_low_; const int height = input_->height; const int width = input_->width; output.resize (height * width); output.height = height; output.width = width; //pcl::console::TicToc tt; //tt.tic (); // Noise reduction using gaussian blurring pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>); PointCloudInPtr smoothed_cloud (new PointCloudIn); kernel_.setKernelSize (3); kernel_.setKernelSigma (1.0); kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN); kernel_.fetchKernel (*gaussian_kernel); convolution_.setKernel (*gaussian_kernel); convolution_.setInputCloud (input_); convolution_.filter (*smoothed_cloud); //PCL_ERROR ("Gaussian blur: %g\n", tt.toc ()); tt.tic (); // Edge detection usign Sobel pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>); setInputCloud (smoothed_cloud); detectEdgeSobel (*edges); //PCL_ERROR ("Sobel: %g\n", tt.toc ()); tt.tic (); // Edge discretization discretizeAngles (*edges); //PCL_ERROR ("Discretize: %g\n", tt.toc ()); tt.tic (); // tHigh and non-maximal supression pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>); suppressNonMaxima (*edges, *maxima, tLow); //PCL_ERROR ("NM suppress: %g\n", tt.toc ()); tt.tic (); // Edge tracing for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) continue; (*maxima)(j, i).intensity = std::numeric_limits<float>::max (); cannyTraceEdge ( 1, 0, i, j, *maxima); cannyTraceEdge (-1, 0, i, j, *maxima); cannyTraceEdge ( 1, 1, i, j, *maxima); cannyTraceEdge (-1, -1, i, j, *maxima); cannyTraceEdge ( 0, -1, i, j, *maxima); cannyTraceEdge ( 0, 1, i, j, *maxima); cannyTraceEdge (-1, 1, i, j, *maxima); cannyTraceEdge ( 1, -1, i, j, *maxima); } } //PCL_ERROR ("Edge tracing: %g\n", tt.toc ()); // Final thresholding for (size_t i = 0; i < input_->size (); ++i) { if ((*maxima)[i].intensity == std::numeric_limits<float>::max ()) output[i].magnitude = 255; else output[i].magnitude = 0; } }
int main(){ //////////////////// // 1. Set Up PDFs // //////////////////// // Set up binning AxisCollection axes; axes.AddAxis(PdfAxis("energy", 2, 3, 10, "Energy")); // Only interested in first bit of data ntuple DataRepresentation dataRep(0); // Set up pdf with these bins in this observable BinnedPdf bgPdf(axes); bgPdf.SetDataRep(dataRep); BinnedPdf signalPdf(axes); signalPdf.SetDataRep(dataRep); std::cout << "Initialised Pdfs" << std::endl; ///////////////////////////////////// // 2. Fill with data and normalise // ///////////////////////////////////// ROOTNtuple bgMC("filename.root", "treename"); ROOTNtuple signalMC("filename.root", "treename"); for(size_t i = 0; i < 10; i++){ bgPdf.Fill(bgMC.GetEntry(i)); } for(size_t i = 0; i < 10; i++){ signalPdf.Fill(signalMC.GetEntry(i)); } bgPdf.Normalise(); signalPdf.Normalise(); std::cout << "Filled pdfs " << std::endl; //////////////////////////// // 3. Set Up LH function // //////////////////////////// BinnedNLLH lhFunction; lhFunction.SetDataSet(&signalMC); // initialise withe the data set lhFunction.AddPdf(bgPdf); lhFunction.AddPdf(signalPdf); std::cout << "Built LH function " << std::endl; // Set up the optimisation GridSearch gSearch(&lhFunction); std::vector<double> minima(2, 0); std::vector<double> maxima(2, 100); std::vector<double> stepsizes(2, 1); gSearch.SetMaxima(maxima); gSearch.SetMinima(minima); gSearch.SetStepSizes(stepsizes); //////////// // 4. Fit // //////////// gSearch.Optimise(); std::vector<double> fit = gSearch.GetFitResult().GetBestFit(); std::cout << "Best Fit: " << std::endl; for(size_t i = 0; i < fit.size(); i++) std::cout << fit.at(i) << "\t"; std::cout << std::endl; return 0; }
int ChromaFeat::Chroma(const float* buffer) { // check if the input arguments are legal if ( buffer == NULL || (buffer+length-1) == NULL) { printf("Error: illegal arguments."); return -1; } unsigned long i, j, k; if (FFT_Point < length) { printf("Error: FFT_Point is larger than frame length."); return -1; } // We don't do 'in-place' FFT so we copy it into a new array first // In the meantime, we multiply a hamming window float* X = new float[FFT_Point]; for (i=0; i<length; i++) X[i] = (float) buffer[i] * hammingWin[i]; for (i=length; i<FFT_Point; i++) X[i] = 0; fft->XForm(X); // we will use the formula f = (2^(1/12))^n * f_ref // to transform midi notes into corresponding frequencies // and further, k = f/delta_f to transform into FFT indices float* indexBoundary = new float[NUMNOTES + 1]; float freqResolution = (float)FS / FFT_Point; // transformation from midi note to FFT index for (i=0; i<=NUMNOTES; i++) indexBoundary[i] = (float) (powerOf2ROOT12[i] * FREQREF / freqResolution); // We can safely calculate chroma vector now unsigned long left; unsigned long right; // the 'i' loop covers each note, where '0' indicates C, '6' indicates 'F#', etc. for (i=0; i<NUMCHROMAGRAM; i++) { chromagram[i] = 0; for (j=0; j<NUMNOTES; j=j+NUMCHROMAGRAM){ // so this is how we determine both sides of FFT index left = (unsigned long) ceil(indexBoundary[i + j]); right = (unsigned long) floor(indexBoundary[i + j + 1]); float* FFT_sub = new float[right - left + 1]; for (k=left; k<=right; k++) FFT_sub[k-left] = X[k]; // use tonality feature, by taking the maxima instead of the mean value within one pitch chromagram[i] += maxima(FFT_sub, right - left +1); delete FFT_sub; } } // clean up delete []X; delete []indexBoundary; return 0; }