AlprFullDetails AlprImpl::recognizeFullDetails(cv::Mat img, std::vector<cv::Rect> regionsOfInterest) { timespec startTime; getTimeMonotonic(&startTime); AlprFullDetails response; response.results.epoch_time = getEpochTimeMs(); response.results.img_width = img.cols; response.results.img_height = img.rows; for (unsigned int i = 0; i < regionsOfInterest.size(); i++) { response.results.regionsOfInterest.push_back(AlprRegionOfInterest(regionsOfInterest[i].x, regionsOfInterest[i].y, regionsOfInterest[i].width, regionsOfInterest[i].height)); } if (!img.data) { // Invalid image if (this->config->debugGeneral) std::cerr << "Invalid image" << std::endl; return response; } // Convert image to grayscale if required Mat grayImg = img; if (img.channels() > 2) cvtColor( img, grayImg, CV_BGR2GRAY ); // Prewarp the image and ROIs if configured] std::vector<cv::Rect> warpedRegionsOfInterest = regionsOfInterest; // Warp the image if prewarp is provided grayImg = prewarp->warpImage(grayImg); warpedRegionsOfInterest = prewarp->projectRects(regionsOfInterest, grayImg.cols, grayImg.rows, false); vector<PlateRegion> warpedPlateRegions; // Find all the candidate regions if (config->skipDetection == false) { warpedPlateRegions = plateDetector->detect(grayImg, warpedRegionsOfInterest); } else { // They have elected to skip plate detection. Instead, return a list of plate regions // based on their regions of interest for (unsigned int i = 0; i < warpedRegionsOfInterest.size(); i++) { PlateRegion pr; pr.rect = cv::Rect(warpedRegionsOfInterest[i]); warpedPlateRegions.push_back(pr); } } queue<PlateRegion> plateQueue; for (unsigned int i = 0; i < warpedPlateRegions.size(); i++) plateQueue.push(warpedPlateRegions[i]); int platecount = 0; while(!plateQueue.empty()) { PlateRegion plateRegion = plateQueue.front(); plateQueue.pop(); PipelineData pipeline_data(img, grayImg, plateRegion.rect, config); timespec platestarttime; getTimeMonotonic(&platestarttime); LicensePlateCandidate lp(&pipeline_data); lp.recognize(); bool plateDetected = false; if (!pipeline_data.disqualified) { AlprPlateResult plateResult; plateResult.region = defaultRegion; plateResult.regionConfidence = 0; plateResult.plate_index = platecount++; // If using prewarp, remap the plate corners to the original image vector<Point2f> cornerPoints = pipeline_data.plate_corners; cornerPoints = prewarp->projectPoints(cornerPoints, true); for (int pointidx = 0; pointidx < 4; pointidx++) { plateResult.plate_points[pointidx].x = (int) cornerPoints[pointidx].x; plateResult.plate_points[pointidx].y = (int) cornerPoints[pointidx].y; } if (detectRegion) { stateIdentifier->recognize(&pipeline_data); if (pipeline_data.region_confidence > 0) { plateResult.region = pipeline_data.region_code; plateResult.regionConfidence = (int) pipeline_data.region_confidence; } } if (plateResult.region.length() > 0 && ocr->postProcessor.regionIsValid(plateResult.region) == false) { std::cerr << "Invalid pattern provided: " << plateResult.region << std::endl; std::cerr << "Valid patterns are located in the " << config->country << ".patterns file" << std::endl; } ocr->performOCR(&pipeline_data); ocr->postProcessor.analyze(plateResult.region, topN); timespec resultsStartTime; getTimeMonotonic(&resultsStartTime); const vector<PPResult> ppResults = ocr->postProcessor.getResults(); int bestPlateIndex = 0; cv::Mat charTransformMatrix = getCharacterTransformMatrix(&pipeline_data); for (unsigned int pp = 0; pp < ppResults.size(); pp++) { // Set our "best plate" match to either the first entry, or the first entry with a postprocessor template match if (bestPlateIndex == 0 && ppResults[pp].matchesTemplate) bestPlateIndex = plateResult.topNPlates.size(); AlprPlate aplate; aplate.characters = ppResults[pp].letters; aplate.overall_confidence = ppResults[pp].totalscore; aplate.matches_template = ppResults[pp].matchesTemplate; // Grab detailed results for each character for (unsigned int c_idx = 0; c_idx < ppResults[pp].letter_details.size(); c_idx++) { AlprChar character_details; character_details.character = ppResults[pp].letter_details[c_idx].letter; character_details.confidence = ppResults[pp].letter_details[c_idx].totalscore; cv::Rect char_rect = pipeline_data.charRegions[ppResults[pp].letter_details[c_idx].charposition]; std::vector<AlprCoordinate> charpoints = getCharacterPoints(char_rect, charTransformMatrix ); for (int cpt = 0; cpt < 4; cpt++) character_details.corners[cpt] = charpoints[cpt]; aplate.character_details.push_back(character_details); } plateResult.topNPlates.push_back(aplate); } if (plateResult.topNPlates.size() > bestPlateIndex) { AlprPlate bestPlate; bestPlate.characters = plateResult.topNPlates[bestPlateIndex].characters; bestPlate.matches_template = plateResult.topNPlates[bestPlateIndex].matches_template; bestPlate.overall_confidence = plateResult.topNPlates[bestPlateIndex].overall_confidence; bestPlate.character_details = plateResult.topNPlates[bestPlateIndex].character_details; plateResult.bestPlate = bestPlate; } timespec plateEndTime; getTimeMonotonic(&plateEndTime); plateResult.processing_time_ms = diffclock(platestarttime, plateEndTime); if (config->debugTiming) { cout << "Result Generation Time: " << diffclock(resultsStartTime, plateEndTime) << "ms." << endl; } if (plateResult.topNPlates.size() > 0) { plateDetected = true; response.results.plates.push_back(plateResult); } } if (!plateDetected) { // Not a valid plate // Check if this plate has any children, if so, send them back up for processing for (unsigned int childidx = 0; childidx < plateRegion.children.size(); childidx++) { plateQueue.push(plateRegion.children[childidx]); } } } // Unwarp plate regions if necessary prewarp->projectPlateRegions(warpedPlateRegions, grayImg.cols, grayImg.rows, true); response.plateRegions = warpedPlateRegions; timespec endTime; getTimeMonotonic(&endTime); response.results.total_processing_time_ms = diffclock(startTime, endTime); if (config->debugTiming) { cout << "Total Time to process image: " << diffclock(startTime, endTime) << "ms." << endl; } if (config->debugGeneral && config->debugShowImages) { for (unsigned int i = 0; i < regionsOfInterest.size(); i++) { rectangle(img, regionsOfInterest[i], Scalar(0,255,0), 2); } for (unsigned int i = 0; i < response.plateRegions.size(); i++) { rectangle(img, response.plateRegions[i].rect, Scalar(0, 0, 255), 2); } for (unsigned int i = 0; i < response.results.plates.size(); i++) { // Draw a box around the license plate for (int z = 0; z < 4; z++) { AlprCoordinate* coords = response.results.plates[i].plate_points; Point p1(coords[z].x, coords[z].y); Point p2(coords[(z + 1) % 4].x, coords[(z + 1) % 4].y); line(img, p1, p2, Scalar(255,0,255), 2); } // Draw the individual character boxes for (int q = 0; q < response.results.plates[i].bestPlate.character_details.size(); q++) { AlprChar details = response.results.plates[i].bestPlate.character_details[q]; line(img, Point(details.corners[0].x, details.corners[0].y), Point(details.corners[1].x, details.corners[1].y), Scalar(0,255,0), 1); line(img, Point(details.corners[1].x, details.corners[1].y), Point(details.corners[2].x, details.corners[2].y), Scalar(0,255,0), 1); line(img, Point(details.corners[2].x, details.corners[2].y), Point(details.corners[3].x, details.corners[3].y), Scalar(0,255,0), 1); line(img, Point(details.corners[3].x, details.corners[3].y), Point(details.corners[0].x, details.corners[0].y), Scalar(0,255,0), 1); } } displayImage(config, "Main Image", img); // Sleep 1ms sleep_ms(1); } if (config->debugPauseOnFrame) { // Pause indefinitely until they press a key while ((char) cv::waitKey(50) == -1) {} } return response; }
void plateAnalysisThread(void* arg) { PlateDispatcher* dispatcher = (PlateDispatcher*) arg; if (dispatcher->config->debugGeneral) cout << "Thread: " << tthread::this_thread::get_id() << " Initialized" << endl; int loop_count = 0; while (true) { PlateRegion plateRegion; if (dispatcher->nextPlate(&plateRegion) == false) break; if (dispatcher->config->debugGeneral) cout << "Thread: " << tthread::this_thread::get_id() << " loop " << ++loop_count << endl; Mat img = dispatcher->getImageCopy(); timespec platestarttime; getTime(&platestarttime); LicensePlateCandidate lp(img, plateRegion.rect, dispatcher->config); lp.recognize(); if (lp.confidence <= 10) { // Not a valid plate // Check if this plate has any children, if so, send them back up to the dispatcher for processing for (int childidx = 0; childidx < plateRegion.children.size(); childidx++) { dispatcher->appendPlate(plateRegion.children[childidx]); } } else { AlprResult plateResult; plateResult.region = dispatcher->defaultRegion; plateResult.regionConfidence = 0; for (int pointidx = 0; pointidx < 4; pointidx++) { plateResult.plate_points[pointidx].x = (int) lp.plateCorners[pointidx].x; plateResult.plate_points[pointidx].y = (int) lp.plateCorners[pointidx].y; } if (dispatcher->detectRegion) { char statecode[4]; plateResult.regionConfidence = dispatcher->stateIdentifier->recognize(img, plateRegion.rect, statecode); if (plateResult.regionConfidence > 0) { plateResult.region = statecode; } } // Tesseract OCR does not appear to be threadsafe dispatcher->ocrMutex.lock(); dispatcher->ocr->performOCR(lp.charSegmenter->getThresholds(), lp.charSegmenter->characters); dispatcher->ocr->postProcessor->analyze(plateResult.region, dispatcher->topN); const vector<PPResult> ppResults = dispatcher->ocr->postProcessor->getResults(); dispatcher->ocrMutex.unlock(); int bestPlateIndex = 0; for (int pp = 0; pp < ppResults.size(); pp++) { if (pp >= dispatcher->topN) break; // Set our "best plate" match to either the first entry, or the first entry with a postprocessor template match if (bestPlateIndex == 0 && ppResults[pp].matchesTemplate) bestPlateIndex = pp; if (ppResults[pp].letters.size() >= dispatcher->config->postProcessMinCharacters && ppResults[pp].letters.size() <= dispatcher->config->postProcessMaxCharacters) { AlprPlate aplate; aplate.characters = ppResults[pp].letters; aplate.overall_confidence = ppResults[pp].totalscore; aplate.matches_template = ppResults[pp].matchesTemplate; plateResult.topNPlates.push_back(aplate); } } plateResult.result_count = plateResult.topNPlates.size(); if (plateResult.topNPlates.size() > 0) plateResult.bestPlate = plateResult.topNPlates[bestPlateIndex]; timespec plateEndTime; getTime(&plateEndTime); plateResult.processing_time_ms = diffclock(platestarttime, plateEndTime); if (plateResult.result_count > 0) { // Synchronized section dispatcher->addResult(plateResult); } } if (dispatcher->config->debugTiming) { timespec plateEndTime; getTime(&plateEndTime); cout << "Thread: " << tthread::this_thread::get_id() << " Finished loop " << loop_count << " in " << diffclock(platestarttime, plateEndTime) << "ms." << endl; } } if (dispatcher->config->debugGeneral) cout << "Thread: " << tthread::this_thread::get_id() << " Complete" << endl; }
std::vector<AlprResult> AlprImpl::recognize(cv::Mat img) { timespec startTime; getTime(&startTime); // Find all the candidate regions vector<PlateRegion> plateRegions = plateDetector->detect(img); // Get the number of threads specified and make sure the value is sane (cannot be greater than CPU cores or less than 1) int numThreads = config->multithreading_cores; if (numThreads > tthread::thread::hardware_concurrency()) numThreads = tthread::thread::hardware_concurrency(); if (numThreads <= 0) numThreads = 1; PlateDispatcher dispatcher(plateRegions, &img, config, stateIdentifier, ocr, topN, detectRegion, defaultRegion); // Spawn n threads to process all of the candidate regions and recognize list<tthread::thread*> threads; for (int i = 0; i < numThreads; i++) { tthread::thread * t = new tthread::thread(plateAnalysisThread, (void *) &dispatcher); threads.push_back(t); } // Wait for all threads to finish for(list<tthread::thread *>::iterator i = threads.begin(); i != threads.end(); ++ i) { tthread::thread* t = *i; t->join(); delete t; } if (config->debugTiming) { timespec endTime; getTime(&endTime); cout << "Total Time to process image: " << diffclock(startTime, endTime) << "ms." << endl; } if (config->debugGeneral && config->debugShowImages) { for (int i = 0; i < plateRegions.size(); i++) { rectangle(img, plateRegions[i].rect, Scalar(0, 0, 255), 2); } for (int i = 0; i < dispatcher.getRecognitionResults().size(); i++) { for (int z = 0; z < 4; z++) { AlprCoordinate* coords = dispatcher.getRecognitionResults()[i].plate_points; Point p1(coords[z].x, coords[z].y); Point p2(coords[(z + 1) % 4].x, coords[(z + 1) % 4].y); line(img, p1, p2, Scalar(255,0,255), 2); } } displayImage(config, "Main Image", img); cv::waitKey(1); } if (config->debugPauseOnFrame) { // Pause indefinitely until they press a key while ((char) cv::waitKey(50) == -1) {} } return dispatcher.getRecognitionResults(); }
int main(int argc, char* argv[]) { if ( argc != 4 ) { printf("Usage: %s configFile outputName randomSeed\n", argv[0]); printf("You entered %i arguments.\n", argc-1); return 0; } const char* configFile = argv[1]; const char* outArg = argv[2]; const long int randomSeed = atol(argv[3]); char outTrajFile[256]; char outCurrFile[256]; sprintf(outTrajFile, "%s.pdb", outArg); sprintf(outCurrFile, "curr_%s.dat", outArg); printf("Brownian Dynamics initiated with command:\n"); for (int i = 0; i < argc; i++) printf("%s\n", argv[i]); // Read the parameter file. Reader config(configFile); printf("Read config file %s.\n", configFile); const int numParams = config.length(); const int numParts = config.countParameter("particle"); BrownianParticle* part = new BrownianParticle[numParts]; String* partGridFile = new String[numParts]; // Set the defaults. String outName("out"); double timestep = 1e-4; long int steps = 100; int interparticleForce = 1; int fullElect = 1; double kT = 1.0; double coulombConst = 566.440698/92.0; double electricField = 0.0; double cutoff = 10.0; int outPeriod = 200; // Set other parameters. const int decompPeriod = 4; const double switchLen = 2.0; const double switchStart = cutoff-switchLen; const double maxInitialPot = 0.5; int currPart = -1; for (int i = 0; i < numParams; i++) { String param = config.getParameter(i); String value = config.getValue(i); if (param == String("outName")) outName = value; else if (param == String("timestep")) timestep = strtod(value.val(), NULL); else if (param == String("steps")) steps = atol(value.val()); else if (param == String("interparticleForce")) interparticleForce = atoi(value.val()); else if (param == String("fullElect")) fullElect = atoi(value.val()); else if (param == String("kT")) kT = strtod(value.val(), NULL); else if (param == String("coulombConst")) coulombConst = strtod(value.val(), NULL); else if (param == String("electricField")) electricField = strtod(value.val(), NULL); else if (param == String("cutoff")) cutoff = strtod(value.val(), NULL); else if (param == String("outPeriod")) outPeriod = atoi(value.val()); else if (param == String("particle")) { currPart++; part[currPart] = BrownianParticle(value); } else if (param == String("num")) part[currPart].num = atoi(value.val()); else if (param == String("gridFile")) partGridFile[currPart] = value; else if (param == String("diffusion")) part[currPart].diffusion = strtod(value.val(), NULL); else if (param == String("charge")) part[currPart].charge = strtod(value.val(), NULL); else if (param == String("radius")) part[currPart].radius = strtod(value.val(), NULL); else if (param == String("eps")) part[currPart].eps = strtod(value.val(), NULL); } // Write the parameters. printf("\nParameters: \n"); printf("outName %s\n", outName.val()); printf("timestep %.10g\n", timestep); printf("steps %ld\n", steps); printf("interparticleForce %d\n", interparticleForce); printf("fullElect %d\n", fullElect); printf("kT %.10g\n", kT); printf("coulombConst %.10g\n", coulombConst); printf("electricField %.10g\n", electricField); printf("cutoff %.10g\n", cutoff); printf("outPeriod %d\n", outPeriod); // Write the particles. printf("\nParticles:\n"); for (int i = 0; i < numParts; i++) { printf("particle %s\n", part[i].name.val()); printf("num %d\n", part[i].num); printf("gridFile %s\n", partGridFile[i].val()); printf("diffusion %.10g\n", part[i].diffusion); printf("charge %.10g\n", part[i].charge); printf("radius %.10g\n", part[i].radius); printf("eps %.10g\n\n", part[i].eps); } // Load the potential grids. printf("Loading the potential grids...\n"); for (int i = 0; i < numParts; i++) { part[i].grid = new Grid(partGridFile[i].val()); printf("Loaded %s.\n", partGridFile[i].val()); printf("System size %s.\n", part[i].grid->getExtent().toString().val()); } // Instantiate the Brownian Dynamics object. BrownTown brown(kT, timestep, *(part[0].grid)); brown.setPeriodic(1,1,1); // Seed the random number generator. long int randomSeed1 = randomSeed; for (int i = 0; i < 4; i++) randomSeed1 *= randomSeed1 + 1; int seed = (unsigned int)time((time_t *)NULL) + randomSeed1; printf("\nRandom number generator seed: %i\n", seed); Random randoGen(seed); // Get the total number of particles. int num = 0; for (int i = 0; i < numParts; i++) num += part[i].num; // Get the system dimensions. Vector3 sysDim = part[0].grid->getExtent(); Vector3 origin = part[0].grid->getOrigin(); Vector3 destin = part[0].grid->getDestination(); // Set initial conditions. Vector3* pos = new Vector3[num]; Vector3* pos1 = new Vector3[num]; Vector3* posLast = new Vector3[num]; int* type = new int[num]; String* name = new String[num]; int pn = 0; int p = 0; for (int i = 0; i < num; i++) { type[i] = p; name[i] = part[p].name; // Get the initial positions. do { pos[i] = brown.wrap(Vector3(sysDim.x*randoGen.uniform(), sysDim.y*randoGen.uniform(), sysDim.z*randoGen.uniform())); } while (part[type[i]].grid->interpolatePotential(pos[i]) > maxInitialPot); //pos[i] = part[0].grid->getCenter(); pos1[i] = pos[i]; posLast[i] = pos[i]; pn++; if (pn >= part[p].num) { p++; pn = 0; } } // Trajectory PDB writePdbTraj(outTrajFile, pos, name, num, sysDim, 0.0); //FILE* out = fopen(outCurrFile, "w"); // Prepare the force object. ComputeForce internal(num, part, numParts, *(part[0].grid), switchStart, switchLen, coulombConst); internal.decompose(pos); Vector3* forceInternal = new Vector3[num]; for (int i = 0; i < num; i++) forceInternal[i] = 0.0; Vector3 rando = randoGen.gaussian_vector(); //////////////////////////////////////////////////////////////// // Run the Brownian Dynamics steps. clock_t clock0 = clock(); for (long int s = 0; s < steps; s++) { // Compute the internal forces. if (interparticleForce) { switch (fullElect) { case 0: // Remake the cell decomposition. if (s % decompPeriod == 0) internal.decompose(pos); // Compute using the cell decomposition. internal.compute(forceInternal, pos, type); break; case 1: // Compute long range electrostatic forces. internal.computeFull(forceInternal, pos, type); break; case 2: // Compute only hardcore forces. internal.computeHardcoreFull(forceInternal, pos, type); break; } } // Loop through the particles. for (int i = 0; i < num; i++) { // Compute the external forces. Vector3 forceExternal = Vector3(0.0, 0.0, part[type[i]].charge*electricField); Vector3 forceGrid = part[type[i]].grid->interpolateForce(pos[i]); //Vector3 forceGrid = 0.0; // Compute the total force. Vector3 force = forceInternal[i] + forceExternal + forceGrid; // Get the random kick. Vector3 rando = randoGen.gaussian_vector(); // Step pos1[i] = brown.stepPeriodic(pos[i], force, rando, part[type[i]].diffusion); } if (s % outPeriod == 0) { appendPdbTraj(outTrajFile, pos1, name, num, sysDim, 0.0); //computeCurrent(pos1, posLast); for (int i = 0; i < num; i++) posLast[i] = pos1[i]; if (s % (10*outPeriod) == 0) { double percent = (100.0*s)/steps; clock_t clock1 = clock(); double stepTime = diffclock(clock0, clock1)/(10*outPeriod); printf("step %ld, time %g, %.2f percent complete, %.1f ms/step\n", s, s*timestep, percent, stepTime); clock0 = clock1; } } // Swap the position pointers. Vector3* temp = pos; pos = pos1; pos1 = temp; } //fclose(out); delete[] pos; delete[] pos1; delete[] posLast; delete[] type; delete[] name; delete[] part; delete[] partGridFile; delete[] forceInternal; return 0; }
void Audio::render(int screenWidth, int screenHeight) { if (_stream) { glLineWidth(2.0); glBegin(GL_LINES); glColor3f(1,1,1); int startX = 20.0; int currentX = startX; int topY = screenHeight - 40; int bottomY = screenHeight - 20; float frameWidth = 20.0; float halfY = topY + ((bottomY - topY) / 2.0); // draw the lines for the base of the ring buffer glVertex2f(currentX, topY); glVertex2f(currentX, bottomY); for (int i = 0; i < RING_BUFFER_LENGTH_FRAMES / 2; i++) { glVertex2f(currentX, halfY); glVertex2f(currentX + frameWidth, halfY); currentX += frameWidth; glVertex2f(currentX, topY); glVertex2f(currentX, bottomY); } glEnd(); // Show a bar with the amount of audio remaining in ring buffer beyond current playback float remainingBuffer = 0; timeval currentTime; gettimeofday(¤tTime, NULL); float timeLeftInCurrentBuffer = 0; if (_lastCallbackTime.tv_usec > 0) { timeLeftInCurrentBuffer = AUDIO_CALLBACK_MSECS - diffclock(&_lastCallbackTime, ¤tTime); } if (_ringBuffer.getEndOfLastWrite() != NULL) remainingBuffer = _ringBuffer.diffLastWriteNextOutput() / PACKET_LENGTH_SAMPLES * AUDIO_CALLBACK_MSECS; if (_wasStarved == 0) { glColor3f(0, 1, 0); } else { glColor3f(0.5 + (_wasStarved / 20.0f), 0, 0); _wasStarved--; } glBegin(GL_QUADS); glVertex2f(startX, topY + 2); glVertex2f(startX + (remainingBuffer + timeLeftInCurrentBuffer)/AUDIO_CALLBACK_MSECS*frameWidth, topY + 2); glVertex2f(startX + (remainingBuffer + timeLeftInCurrentBuffer)/AUDIO_CALLBACK_MSECS*frameWidth, bottomY - 2); glVertex2f(startX, bottomY - 2); glEnd(); if (_averagedLatency == 0.0) { _averagedLatency = remainingBuffer + timeLeftInCurrentBuffer; } else { _averagedLatency = 0.99f * _averagedLatency + 0.01f * (remainingBuffer + timeLeftInCurrentBuffer); } // Show a yellow bar with the averaged msecs latency you are hearing (from time of packet receipt) glColor3f(1,1,0); glBegin(GL_QUADS); glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, topY - 2); glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, topY - 2); glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth + 2, bottomY + 2); glVertex2f(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 2, bottomY + 2); glEnd(); char out[40]; sprintf(out, "%3.0f\n", _averagedLatency); drawtext(startX + _averagedLatency / AUDIO_CALLBACK_MSECS * frameWidth - 10, topY - 9, 0.10, 0, 1, 0, out, 1,1,0); // Show a red bar with the 'start' point of one frame plus the jitter buffer glColor3f(1, 0, 0); int jitterBufferPels = (1.f + (float)getJitterBufferSamples() / (float)PACKET_LENGTH_SAMPLES_PER_CHANNEL) * frameWidth; sprintf(out, "%.0f\n", getJitterBufferSamples() / SAMPLE_RATE * 1000.f); drawtext(startX + jitterBufferPels - 5, topY - 9, 0.10, 0, 1, 0, out, 1, 0, 0); sprintf(out, "j %.1f\n", _measuredJitter); if (Application::getInstance()->shouldDynamicallySetJitterBuffer()) { drawtext(startX + jitterBufferPels - 5, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0); } else { drawtext(startX, bottomY + 12, 0.10, 0, 1, 0, out, 1, 0, 0); } glBegin(GL_QUADS); glVertex2f(startX + jitterBufferPels - 2, topY - 2); glVertex2f(startX + jitterBufferPels + 2, topY - 2); glVertex2f(startX + jitterBufferPels + 2, bottomY + 2); glVertex2f(startX + jitterBufferPels - 2, bottomY + 2); glEnd(); } }
void CharacterAnalysis::analyze() { timespec startTime; getTimeMonotonic(&startTime); if (config->always_invert) bitwise_not(pipeline_data->crop_gray, pipeline_data->crop_gray); pipeline_data->clearThresholds(); pipeline_data->thresholds = produceThresholds(pipeline_data->crop_gray, config); timespec contoursStartTime; getTimeMonotonic(&contoursStartTime); pipeline_data->textLines.clear(); for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++) { TextContours tc(pipeline_data->thresholds[i]); allTextContours.push_back(tc); } if (config->debugTiming) { timespec contoursEndTime; getTimeMonotonic(&contoursEndTime); cout << " -- Character Analysis Find Contours Time: " << diffclock(contoursStartTime, contoursEndTime) << "ms." << endl; } //Mat img_equalized = equalizeBrightness(img_gray); timespec filterStartTime; getTimeMonotonic(&filterStartTime); for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++) { this->filter(pipeline_data->thresholds[i], allTextContours[i]); if (config->debugCharAnalysis) cout << "Threshold " << i << " had " << allTextContours[i].getGoodIndicesCount() << " good indices." << endl; } if (config->debugTiming) { timespec filterEndTime; getTimeMonotonic(&filterEndTime); cout << " -- Character Analysis Filter Time: " << diffclock(filterStartTime, filterEndTime) << "ms." << endl; } PlateMask plateMask(pipeline_data); plateMask.findOuterBoxMask(allTextContours); pipeline_data->hasPlateBorder = plateMask.hasPlateMask; pipeline_data->plateBorderMask = plateMask.getMask(); if (plateMask.hasPlateMask) { // Filter out bad contours now that we have an outer box mask... for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++) { filterByOuterMask(allTextContours[i]); } } int bestFitScore = -1; int bestFitIndex = -1; for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++) { int segmentCount = allTextContours[i].getGoodIndicesCount(); if (segmentCount > bestFitScore) { bestFitScore = segmentCount; bestFitIndex = i; bestThreshold = pipeline_data->thresholds[i]; bestContours = allTextContours[i]; } } if (this->config->debugCharAnalysis) cout << "Best fit score: " << bestFitScore << " Index: " << bestFitIndex << endl; if (bestFitScore <= 1) { pipeline_data->disqualified = true; pipeline_data->disqualify_reason = "Low best fit score in characteranalysis"; return; } //getColorMask(img, allContours, allHierarchy, charSegments); if (this->config->debugCharAnalysis) { Mat img_contours = bestContours.drawDebugImage(bestThreshold); displayImage(config, "Matching Contours", img_contours); } LineFinder lf(pipeline_data); vector<vector<Point> > linePolygons = lf.findLines(pipeline_data->crop_gray, bestContours); vector<TextLine> tempTextLines; for (unsigned int i = 0; i < linePolygons.size(); i++) { vector<Point> linePolygon = linePolygons[i]; LineSegment topLine = LineSegment(linePolygon[0].x, linePolygon[0].y, linePolygon[1].x, linePolygon[1].y); LineSegment bottomLine = LineSegment(linePolygon[3].x, linePolygon[3].y, linePolygon[2].x, linePolygon[2].y); vector<Point> textArea = getCharArea(topLine, bottomLine); TextLine textLine(textArea, linePolygon, pipeline_data->crop_gray.size()); tempTextLines.push_back(textLine); } filterBetweenLines(bestThreshold, bestContours, tempTextLines); // Sort the lines from top to bottom. std::sort(tempTextLines.begin(), tempTextLines.end(), sort_text_line); // Now that we've filtered a few more contours, re-do the text area. for (unsigned int i = 0; i < tempTextLines.size(); i++) { vector<Point> updatedTextArea = getCharArea(tempTextLines[i].topLine, tempTextLines[i].bottomLine); vector<Point> linePolygon = tempTextLines[i].linePolygon; if (updatedTextArea.size() > 0 && linePolygon.size() > 0) { pipeline_data->textLines.push_back(TextLine(updatedTextArea, linePolygon, pipeline_data->crop_gray.size())); } } if (config->auto_invert) pipeline_data->plate_inverted = isPlateInverted(); else pipeline_data->plate_inverted = config->always_invert; if (config->debugGeneral) cout << "Plate inverted: " << pipeline_data->plate_inverted << endl; if (pipeline_data->textLines.size() > 0) { int confidenceDrainers = 0; int charSegmentCount = this->bestContours.getGoodIndicesCount(); if (charSegmentCount == 1) confidenceDrainers += 91; else if (charSegmentCount < 5) confidenceDrainers += (5 - charSegmentCount) * 10; // Use the angle for the first line -- assume they'll always be parallel for multi-line plates int absangle = abs(pipeline_data->textLines[0].topLine.angle); if (absangle > config->maxPlateAngleDegrees) confidenceDrainers += 91; else if (absangle > 1) confidenceDrainers += (config->maxPlateAngleDegrees - absangle) ; // If a multiline plate has only one line, disqualify if (pipeline_data->isMultiline && pipeline_data->textLines.size() < 2) { if (config->debugCharAnalysis) std::cout << "Did not detect multiple lines on multi-line plate" << std::endl; confidenceDrainers += 95; } if (confidenceDrainers >= 90) { pipeline_data->disqualified = true; pipeline_data->disqualify_reason = "Low confidence in characteranalysis"; } else { float confidence = 100 - confidenceDrainers; pipeline_data->confidence_weights.setScore("CHARACTER_ANALYSIS_SCORE", confidence, 1.0); } } else { pipeline_data->disqualified = true; pipeline_data->disqualify_reason = "No text lines found in characteranalysis"; } if (config->debugTiming) { timespec endTime; getTimeMonotonic(&endTime); cout << "Character Analysis Time: " << diffclock(startTime, endTime) << "ms." << endl; } // Draw debug dashboard if (this->pipeline_data->config->debugCharAnalysis && pipeline_data->textLines.size() > 0) { vector<Mat> tempDash; for (unsigned int z = 0; z < pipeline_data->thresholds.size(); z++) { Mat tmp(pipeline_data->thresholds[z].size(), pipeline_data->thresholds[z].type()); pipeline_data->thresholds[z].copyTo(tmp); cvtColor(tmp, tmp, CV_GRAY2BGR); tempDash.push_back(tmp); } Mat bestVal(this->bestThreshold.size(), this->bestThreshold.type()); this->bestThreshold.copyTo(bestVal); cvtColor(bestVal, bestVal, CV_GRAY2BGR); for (unsigned int z = 0; z < this->bestContours.size(); z++) { Scalar dcolor(255,0,0); if (this->bestContours.goodIndices[z]) dcolor = Scalar(0,255,0); drawContours(bestVal, this->bestContours.contours, z, dcolor, 1); } tempDash.push_back(bestVal); displayImage(config, "Character Region Step 1 Thresholds", drawImageDashboard(tempDash, bestVal.type(), 3)); } }
void TestDistcalcPerf :: testn (size_t n) { clock_t end_clock; time_t end_time; std::cout << " - running TestDistcalcPerf with " << n << " feature vectors of length "+stringify(FVLEN)+" - \n"; size_t memreq = n * (sizeof(double) * FVLEN + sizeof(long)); std::cout << " - estimated memory consumption is " << memreq/1000000 << " MB- \n"; // create fvs std::cout << " - generating feature vectors - \n"; populate_fvs(n); std::cout << "PID = " << getpid() << std::endl; // char t; // std::cout << "CHECK1 Press a key then press enter: "; // std::cin >> t; // res mem ds = new double[fvs_len-1]; track_ids = new long[fvs_len-1]; // std::cout << "CHECK2 Press a key then press enter: "; // std::cin >> t; // for "benchmark" std::cout << " - calculating distances - \n"; clock_t begin_clock=clock(); time_t begin_time = time (NULL); // at least two vecs; assert(fvs_len > 2); // do it #pragma omp parallel for for (long i = 1; i < fvs_len; i++) { if (i == 1) std::cout << "Hello from thread " <<omp_get_thread_num() << ", nthreads " << omp_get_num_threads() << std::endl; // the 0 is a dummy value: all distance calc functions have the same signature! double d = SmafeDistancesCalc::getDistance_L1(0, fvs[0]->buffer, fvs[0]->buflen, fvs[i]->buffer, fvs[i]->buflen); //std::cout << d << '\n'; // insert in <dist, id> multimap // at each insert, map must be sorted again // -> inefficient // ds.insert(std::pair<double, size_t>(d, i)); // insert it in <id, dist> map // effiecent, since position will alwyas be end of map because track-ids are sorted here // ds.insert(ds.end(), std::pair<size_t, double>(i, d)); // insert this new distance vlaue right after last element // ds.insert(std::pair<size_t, double>(i, d)); // insert this new distance vlaue right after last element // -> effiecent // http://www.cplusplus.com/reference/stl/map/insert/; ds[i-1] = d; track_ids[i-1] = i; } end_clock=clock(); end_time = time (NULL); sprintf(strings_in_c_suck, "\n==Execution time: %.0f s (wall clock time) / %.0f ms (usr time) .==\n\n", difftime (end_time,begin_time) , diffclock(end_clock,begin_clock)); std::cout << strings_in_c_suck; std::cout << " - sorting - \n"; // sorting // MySet s( ds.begin(), ds.end() ); // MySet::iterator it = s.begin(); // size_t i = 0; // while ( it != s.end() && i < 100 ) { // std:: // cout << i << ": " << *it++ << '\n'; // } // sorting variant with two arrays (c style) qsort_pairs_topk(ds, track_ids, 0, fvs_len - 1 - 1, TOPK); // highest index, not number of elementsHERE //qsort_pairs(ds, track_ids, 0, fvs_len - 1 - 1); // highest index, not number of elements // execution time end_clock=clock(); end_time = time (NULL); sprintf(strings_in_c_suck, "\n==Execution time: %.0f s (wall clock time) / %.0f ms (usr time) .==\n\n", difftime (end_time,begin_time) , diffclock(end_clock,begin_clock)); std::cout << strings_in_c_suck; // output //std::cout.precision(2); // output variant multimap <double, long> // tDsMultimap::iterator it = ds.begin(); // for (int i=0; i < 20 && it != ds.end(); i++, it++) { // std::cout << (i+1) << ". Track_id=" << it->second << ", dist=" << it->first << std::endl; // } // output variant two arrays c style for (size_t i=0; i < 20; i++) { std::cout << (i+1) << ". Track_id=" << track_ids[i] << ", dist=" << ds[i] << std::endl; } // std::cout << "CHECK3 Press a key then press enter: "; // std::cin >> t; // make ready for next test; tearDownTest(); }
void SerialInterface::readData(float deltaTime) { #ifdef __APPLE__ int initialSamples = totalSamples; if (USING_INVENSENSE_MPU9150) { unsigned char sensorBuffer[36]; // ask the invensense for raw gyro data write(_serialDescriptor, "RD683B0E\n", 9); read(_serialDescriptor, sensorBuffer, 36); int accelXRate, accelYRate, accelZRate; convertHexToInt(sensorBuffer + 6, accelZRate); convertHexToInt(sensorBuffer + 10, accelYRate); convertHexToInt(sensorBuffer + 14, accelXRate); const float LSB_TO_METERS_PER_SECOND2 = 1.f / 16384.f * GRAVITY_EARTH; // From MPU-9150 register map, with setting on // highest resolution = +/- 2G _lastAcceleration = glm::vec3(-accelXRate, -accelYRate, -accelZRate) * LSB_TO_METERS_PER_SECOND2; int rollRate, yawRate, pitchRate; convertHexToInt(sensorBuffer + 22, rollRate); convertHexToInt(sensorBuffer + 26, yawRate); convertHexToInt(sensorBuffer + 30, pitchRate); // Convert the integer rates to floats const float LSB_TO_DEGREES_PER_SECOND = 1.f / 16.4f; // From MPU-9150 register map, 2000 deg/sec. glm::vec3 rotationRates; rotationRates[0] = ((float) -pitchRate) * LSB_TO_DEGREES_PER_SECOND; rotationRates[1] = ((float) -yawRate) * LSB_TO_DEGREES_PER_SECOND; rotationRates[2] = ((float) -rollRate) * LSB_TO_DEGREES_PER_SECOND; // update and subtract the long term average _averageRotationRates = (1.f - 1.f/(float)LONG_TERM_RATE_SAMPLES) * _averageRotationRates + 1.f/(float)LONG_TERM_RATE_SAMPLES * rotationRates; rotationRates -= _averageRotationRates; // compute the angular acceleration glm::vec3 angularAcceleration = (deltaTime < EPSILON) ? glm::vec3() : (rotationRates - _lastRotationRates) / deltaTime; _lastRotationRates = rotationRates; // Update raw rotation estimates glm::quat estimatedRotation = glm::quat(glm::radians(_estimatedRotation)) * glm::quat(glm::radians(deltaTime * _lastRotationRates)); // Update acceleration estimate: first, subtract gravity as rotated into current frame _estimatedAcceleration = (totalSamples < GRAVITY_SAMPLES) ? glm::vec3() : _lastAcceleration - glm::inverse(estimatedRotation) * _gravity; // update and subtract the long term average _averageAcceleration = (1.f - 1.f/(float)LONG_TERM_RATE_SAMPLES) * _averageAcceleration + 1.f/(float)LONG_TERM_RATE_SAMPLES * _estimatedAcceleration; _estimatedAcceleration -= _averageAcceleration; // Consider updating our angular velocity/acceleration to linear acceleration mapping if (glm::length(_estimatedAcceleration) > EPSILON && (glm::length(_lastRotationRates) > EPSILON || glm::length(angularAcceleration) > EPSILON)) { // compute predicted linear acceleration, find error between actual and predicted glm::vec3 predictedAcceleration = _angularVelocityToLinearAccel * _lastRotationRates + _angularAccelToLinearAccel * angularAcceleration; glm::vec3 error = _estimatedAcceleration - predictedAcceleration; // the "error" is actually what we want: the linear acceleration minus rotational influences _estimatedAcceleration = error; // adjust according to error in each dimension, in proportion to input magnitudes for (int i = 0; i < 3; i++) { if (fabsf(error[i]) < EPSILON) { continue; } const float LEARNING_RATE = 0.001f; float rateSum = fabsf(_lastRotationRates.x) + fabsf(_lastRotationRates.y) + fabsf(_lastRotationRates.z); if (rateSum > EPSILON) { for (int j = 0; j < 3; j++) { float proportion = LEARNING_RATE * fabsf(_lastRotationRates[j]) / rateSum; if (proportion > EPSILON) { _angularVelocityToLinearAccel[j][i] += error[i] * proportion / _lastRotationRates[j]; } } } float accelSum = fabsf(angularAcceleration.x) + fabsf(angularAcceleration.y) + fabsf(angularAcceleration.z); if (accelSum > EPSILON) { for (int j = 0; j < 3; j++) { float proportion = LEARNING_RATE * fabsf(angularAcceleration[j]) / accelSum; if (proportion > EPSILON) { _angularAccelToLinearAccel[j][i] += error[i] * proportion / angularAcceleration[j]; } } } } } // rotate estimated acceleration into global rotation frame _estimatedAcceleration = estimatedRotation * _estimatedAcceleration; // Update estimated position and velocity float const DECAY_VELOCITY = 0.975f; float const DECAY_POSITION = 0.975f; _estimatedVelocity += deltaTime * _estimatedAcceleration; _estimatedPosition += deltaTime * _estimatedVelocity; _estimatedVelocity *= DECAY_VELOCITY; // Attempt to fuse gyro position with webcam position Webcam* webcam = Application::getInstance()->getWebcam(); if (webcam->isActive()) { const float WEBCAM_POSITION_FUSION = 0.5f; _estimatedPosition = glm::mix(_estimatedPosition, webcam->getEstimatedPosition(), WEBCAM_POSITION_FUSION); } else { _estimatedPosition *= DECAY_POSITION; } // Accumulate a set of initial baseline readings for setting gravity if (totalSamples == 0) { _gravity = _lastAcceleration; } else { if (totalSamples < GRAVITY_SAMPLES) { _gravity = (1.f - 1.f/(float)GRAVITY_SAMPLES) * _gravity + 1.f/(float)GRAVITY_SAMPLES * _lastAcceleration; } else { // Use gravity reading to do sensor fusion on the pitch and roll estimation estimatedRotation = safeMix(estimatedRotation, rotationBetween(estimatedRotation * _lastAcceleration, _gravity) * estimatedRotation, 1.0f / SENSOR_FUSION_SAMPLES); // Without a compass heading, always decay estimated Yaw slightly const float YAW_DECAY = 0.999f; glm::vec3 forward = estimatedRotation * glm::vec3(0.0f, 0.0f, -1.0f); estimatedRotation = safeMix(glm::angleAxis(glm::degrees(atan2f(forward.x, -forward.z)), glm::vec3(0.0f, 1.0f, 0.0f)) * estimatedRotation, estimatedRotation, YAW_DECAY); } } _estimatedRotation = safeEulerAngles(estimatedRotation); totalSamples++; } if (initialSamples == totalSamples) { timeval now; gettimeofday(&now, NULL); if (diffclock(&lastGoodRead, &now) > NO_READ_MAXIMUM_MSECS) { printLog("No data - Shutting down SerialInterface.\n"); resetSerial(); } } else { gettimeofday(&lastGoodRead, NULL); } #endif }
// Must delete this pointer in parent class void LicensePlateCandidate::recognize() { charSegmenter = NULL; pipeline_data->plate_area_confidence = 0; pipeline_data->isMultiline = config->multiline; Rect expandedRegion = this->pipeline_data->regionOfInterest; pipeline_data->crop_gray = Mat(this->pipeline_data->grayImg, expandedRegion); resize(pipeline_data->crop_gray, pipeline_data->crop_gray, Size(config->templateWidthPx, config->templateHeightPx)); CharacterAnalysis textAnalysis(pipeline_data); if (textAnalysis.confidence > 10) { EdgeFinder edgeFinder(pipeline_data); pipeline_data->plate_corners = edgeFinder.findEdgeCorners(); if (edgeFinder.confidence > 0) { timespec startTime; getTime(&startTime); Mat originalCrop = pipeline_data->crop_gray; Transformation imgTransform(this->pipeline_data->grayImg, pipeline_data->crop_gray, expandedRegion); Size cropSize = imgTransform.getCropSize(pipeline_data->plate_corners, Size(pipeline_data->config->ocrImageWidthPx, pipeline_data->config->ocrImageHeightPx)); Mat transmtx = imgTransform.getTransformationMatrix(pipeline_data->plate_corners, cropSize); pipeline_data->crop_gray = imgTransform.crop(cropSize, transmtx); if (this->config->debugGeneral) displayImage(config, "quadrilateral", pipeline_data->crop_gray); // Apply a perspective transformation to the TextLine objects // to match the newly deskewed license plate crop vector<TextLine> newLines; for (unsigned int i = 0; i < pipeline_data->textLines.size(); i++) { vector<Point2f> textArea = imgTransform.transformSmallPointsToBigImage(pipeline_data->textLines[i].textArea); vector<Point2f> linePolygon = imgTransform.transformSmallPointsToBigImage(pipeline_data->textLines[i].linePolygon); vector<Point2f> textAreaRemapped; vector<Point2f> linePolygonRemapped; textAreaRemapped = imgTransform.remapSmallPointstoCrop(textArea, transmtx); linePolygonRemapped = imgTransform.remapSmallPointstoCrop(linePolygon, transmtx); newLines.push_back(TextLine(textAreaRemapped, linePolygonRemapped)); } pipeline_data->textLines.clear(); for (unsigned int i = 0; i < newLines.size(); i++) pipeline_data->textLines.push_back(newLines[i]); if (config->debugTiming) { timespec endTime; getTime(&endTime); cout << "deskew Time: " << diffclock(startTime, endTime) << "ms." << endl; } charSegmenter = new CharacterSegmenter(pipeline_data); pipeline_data->plate_area_confidence = 100; } } }
void* sendthread(void* shit) { bool lastrecdiff = false; while (true) { _sleep(1); int recdiff = (int)abs((double)diffclock(last_packet_received, clock())); if (recdiff > 20000) { NET_Reconnect(); } if (bconnectstep) { if (bconnectstep == 1) { char challengepkg[100]; bf_write writechallenge(challengepkg, sizeof(challengepkg)); writechallenge.WriteLong(-1); writechallenge.WriteByte('q'); writechallenge.WriteLong(ourchallenge); writechallenge.WriteString("0000000000"); net.SendTo(serverip, serverport, challengepkg, writechallenge.GetNumBytesWritten()); _sleep(500); } _sleep(500); } if (!bconnectstep && !netchan->NeedsFragments() && recdiff >= 15 && !lastrecdiff) { NET_ResetDatagram(); senddata.WriteOneBit(0); senddata.WriteOneBit(0); NET_SendDatagram(true); lastrecdiff = true; } else { lastrecdiff = false; } if (netchan->m_nInSequenceNr < 130) { NET_SendDatagram();//netchan is volatile without this for some reason continue; } static int skipwalks = 0; if(skipwalks) skipwalks--; if (!skipwalks ) { /* senddata.WriteUBitLong(9, 6); senddata.WriteUBitLong(1, 4); senddata.WriteUBitLong(0, 3); int curbit = senddata.m_iCurBit; senddata.WriteWord(1337); int len = 21; for (int a = 1; a < 22; a++) { if ( a == 3)//pitch { senddata.WriteOneBit(1); static float pitch = 90; static bool bdown = true; if (bdown) pitch -= 2; else pitch += 2; if (pitch < -89 ) bdown = false; if (pitch > 89) bdown = true; senddata.WriteFloat(pitch); len += 32; continue; } if (a == 6&&GetAsyncKeyState(VK_UP)) { senddata.WriteOneBit(1); senddata.WriteFloat(500); len += 32; continue; } else { if (a == 6 && GetAsyncKeyState(VK_DOWN)) { senddata.WriteOneBit(1); senddata.WriteFloat(-500); len += 32; continue; } } if (a == 7 && GetAsyncKeyState(VK_RIGHT)) { senddata.WriteOneBit(1); senddata.WriteFloat(500); len += 32; continue; } else { if (a == 7 && GetAsyncKeyState(VK_LEFT)) { senddata.WriteOneBit(1); senddata.WriteFloat(-500); len += 32; continue; } } if (a == 8) { senddata.WriteOneBit(1); senddata.WriteFloat(500); len += 32; continue;att } senddata.WriteOneBit(0); } int now = senddata.m_iCurBit; senddata.m_iCurBit = curbit; senddata.WriteWord(len); senddata.m_iCurBit = now; */ senddata.WriteUBitLong(3, 6); senddata.WriteLong(net_tick); senddata.WriteUBitLong(net_hostframetime, 16); senddata.WriteUBitLong(net_hostframedeviation, 16); skipwalks = 50;//12 seems best } { /* enum types_t { TYPE_NONE = 0, TYPE_STRING = 1, TYPE_INT = 2, TYPE_FLOAT = 3, TYPE_PTR = 4, TYPE_WSTRING = 5, TYPE_COLOR = 6, TYPE_UINT64 = 7, TYPE_NUMTYPES = 8, }; */ /* CUtlBuffer sheet(0, 8); sheet.PutUnsignedChar(1); sheet.PutString("AchievementEarned"); sheet.PutUnsignedChar(0); int id = rand() % 30; sheet.PutUnsignedChar(1); sheet.PutString("achievementID"); sheet.PutUnsignedChar(2); sheet.PutInt(id); sheet.PutUnsignedChar(8); sheet.PutUnsignedChar(8); senddata.WriteUBitLong(16, 6); senddata.WriteLong(sheet.TellPut()); senddata.WriteBytes(sheet.Base(), sheet.TellPut()); */ } if (strlen(runcmd) > 0) { printf("Sending cmd: %s\n", runcmd); senddata.WriteUBitLong(4, 6); senddata.WriteString(runcmd); memset(runcmd, 0, sizeof(runcmd)); } NET_SendDatagram(); } }
void OCR::performOCR(PipelineData* pipeline_data) { const int SPACE_CHAR_CODE = 32; timespec startTime; getTimeMonotonic(&startTime); postProcessor.clear(); // Don't waste time on OCR processing if it is impossible to get sufficient characters int total_char_spaces = 0; for (unsigned int i = 0; i < pipeline_data->charRegions.size(); i++) total_char_spaces += pipeline_data->charRegions[i].size(); if (total_char_spaces < config->postProcessMinCharacters) { pipeline_data->disqualify_reason = "Insufficient character boxes detected. No OCR performed."; pipeline_data->disqualified = true; return; } for (unsigned int i = 0; i < pipeline_data->thresholds.size(); i++) { // Make it black text on white background bitwise_not(pipeline_data->thresholds[i], pipeline_data->thresholds[i]); tesseract.SetImage((uchar*) pipeline_data->thresholds[i].data, pipeline_data->thresholds[i].size().width, pipeline_data->thresholds[i].size().height, pipeline_data->thresholds[i].channels(), pipeline_data->thresholds[i].step1()); int absolute_charpos = 0; for (unsigned int line_idx = 0; line_idx < pipeline_data->charRegions.size(); line_idx++) { for (unsigned int j = 0; j < pipeline_data->charRegions[line_idx].size(); j++) { Rect expandedRegion = expandRect( pipeline_data->charRegions[line_idx][j], 2, 2, pipeline_data->thresholds[i].cols, pipeline_data->thresholds[i].rows) ; tesseract.SetRectangle(expandedRegion.x, expandedRegion.y, expandedRegion.width, expandedRegion.height); tesseract.Recognize(NULL); tesseract::ResultIterator* ri = tesseract.GetIterator(); tesseract::PageIteratorLevel level = tesseract::RIL_SYMBOL; do { const char* symbol = ri->GetUTF8Text(level); float conf = ri->Confidence(level); bool dontcare; int fontindex = 0; int pointsize = 0; const char* fontName = ri->WordFontAttributes(&dontcare, &dontcare, &dontcare, &dontcare, &dontcare, &dontcare, &pointsize, &fontindex); // Ignore NULL pointers, spaces, and characters that are way too small to be valid if(symbol != 0 && symbol[0] != SPACE_CHAR_CODE && pointsize >= config->ocrMinFontSize) { postProcessor.addLetter(string(symbol), line_idx, absolute_charpos, conf); if (this->config->debugOcr) printf("charpos%d line%d: threshold %d: symbol %s, conf: %f font: %s (index %d) size %dpx", absolute_charpos, line_idx, i, symbol, conf, fontName, fontindex, pointsize); bool indent = false; tesseract::ChoiceIterator ci(*ri); do { const char* choice = ci.GetUTF8Text(); postProcessor.addLetter(string(choice), line_idx, absolute_charpos, ci.Confidence()); if (this->config->debugOcr) { if (indent) printf("\t\t "); printf("\t- "); printf("%s conf: %f\n", choice, ci.Confidence()); } indent = true; } while(ci.Next()); } if (this->config->debugOcr) printf("---------------------------------------------\n"); delete[] symbol; } while((ri->Next(level))); delete ri; absolute_charpos++; } } } if (config->debugTiming) { timespec endTime; getTimeMonotonic(&endTime); cout << "OCR Time: " << diffclock(startTime, endTime) << "ms." << endl; } }
void CharacterAnalysis::analyze() { thresholds = produceThresholds(img_gray, config); /* // Morph Close the gray image to make it easier to detect blobs int morph_elem = 1; int morph_size = 1; Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) ); for (int i = 0; i < thresholds.size(); i++) { //morphologyEx( mask, mask, MORPH_CLOSE, element ); morphologyEx( thresholds[i], thresholds[i], MORPH_OPEN, element ); //dilate( thresholds[i], thresholds[i], element ); } */ timespec startTime; getTime(&startTime); for (int i = 0; i < thresholds.size(); i++) { vector<vector<Point> > contours; vector<Vec4i> hierarchy; Mat tempThreshold(thresholds[i].size(), CV_8U); thresholds[i].copyTo(tempThreshold); findContours(tempThreshold, contours, // a vector of contours hierarchy, CV_RETR_TREE, // retrieve all contours CV_CHAIN_APPROX_SIMPLE ); // all pixels of each contours allContours.push_back(contours); allHierarchy.push_back(hierarchy); } if (config->debugTiming) { timespec endTime; getTime(&endTime); cout << " -- Character Analysis Find Contours Time: " << diffclock(startTime, endTime) << "ms." << endl; } //Mat img_equalized = equalizeBrightness(img_gray); getTime(&startTime); for (int i = 0; i < thresholds.size(); i++) { vector<bool> goodIndices = this->filter(thresholds[i], allContours[i], allHierarchy[i]); charSegments.push_back(goodIndices); if (config->debugCharAnalysis) cout << "Threshold " << i << " had " << getGoodIndicesCount(goodIndices) << " good indices." << endl; } if (config->debugTiming) { timespec endTime; getTime(&endTime); cout << " -- Character Analysis Filter Time: " << diffclock(startTime, endTime) << "ms." << endl; } this->plateMask = findOuterBoxMask(); if (hasPlateMask) { // Filter out bad contours now that we have an outer box mask... for (int i = 0; i < thresholds.size(); i++) { charSegments[i] = filterByOuterMask(allContours[i], allHierarchy[i], charSegments[i]); } } int bestFitScore = -1; int bestFitIndex = -1; for (int i = 0; i < thresholds.size(); i++) { //vector<bool> goodIndices = this->filter(thresholds[i], allContours[i], allHierarchy[i]); //charSegments.push_back(goodIndices); int segmentCount = getGoodIndicesCount(charSegments[i]); if (segmentCount > bestFitScore) { bestFitScore = segmentCount; bestFitIndex = i; bestCharSegments = charSegments[i]; bestThreshold = thresholds[i]; bestContours = allContours[i]; bestHierarchy = allHierarchy[i]; bestCharSegmentsCount = segmentCount; } } if (this->config->debugCharAnalysis) cout << "Best fit score: " << bestFitScore << " Index: " << bestFitIndex << endl; if (bestFitScore <= 1) return; //getColorMask(img, allContours, allHierarchy, charSegments); if (this->config->debugCharAnalysis) { Mat img_contours(bestThreshold.size(), CV_8U); bestThreshold.copyTo(img_contours); cvtColor(img_contours, img_contours, CV_GRAY2RGB); vector<vector<Point> > allowedContours; for (int i = 0; i < bestContours.size(); i++) { if (bestCharSegments[i]) allowedContours.push_back(bestContours[i]); } drawContours(img_contours, bestContours, -1, // draw all contours cv::Scalar(255,0,0), // in blue 1); // with a thickness of 1 drawContours(img_contours, allowedContours, -1, // draw all contours cv::Scalar(0,255,0), // in green 1); // with a thickness of 1 displayImage(config, "Matching Contours", img_contours); } //charsegments = this->getPossibleCharRegions(img_threshold, allContours, allHierarchy, STARTING_MIN_HEIGHT + (bestFitIndex * HEIGHT_STEP), STARTING_MAX_HEIGHT + (bestFitIndex * HEIGHT_STEP)); this->linePolygon = getBestVotedLines(img_gray, bestContours, bestCharSegments); if (this->linePolygon.size() > 0) { this->topLine = LineSegment(this->linePolygon[0].x, this->linePolygon[0].y, this->linePolygon[1].x, this->linePolygon[1].y); this->bottomLine = LineSegment(this->linePolygon[3].x, this->linePolygon[3].y, this->linePolygon[2].x, this->linePolygon[2].y); //this->charArea = getCharSegmentsBetweenLines(bestThreshold, bestContours, this->linePolygon); filterBetweenLines(bestThreshold, bestContours, bestHierarchy, linePolygon, bestCharSegments); this->charArea = getCharArea(); if (this->charArea.size() > 0) { this->charBoxTop = LineSegment(this->charArea[0].x, this->charArea[0].y, this->charArea[1].x, this->charArea[1].y); this->charBoxBottom = LineSegment(this->charArea[3].x, this->charArea[3].y, this->charArea[2].x, this->charArea[2].y); this->charBoxLeft = LineSegment(this->charArea[3].x, this->charArea[3].y, this->charArea[0].x, this->charArea[0].y); this->charBoxRight = LineSegment(this->charArea[2].x, this->charArea[2].y, this->charArea[1].x, this->charArea[1].y); } } this->thresholdsInverted = isPlateInverted(); }