void main() { unsigned fg = BG0ROMDrawable::SOLID_FG ^ BG0ROMDrawable::BLUE; unsigned bg = BG0ROMDrawable::SOLID_FG ^ BG0ROMDrawable::BLACK; vid.initMode(BG0_ROM); vid.attach(cube); vid.bg0rom.erase(bg); vid.bg0rom.fill(vec(0,0), vec(3,3), fg); synthInit(); float hz = 0; while (1) { // Scale to [-1, 1] auto accel = cube.accel() / 128.f; // Glide to the target note (half-steps above or below middle C) float note = 261.6f * pow(1.05946f, 8 + round(accel.y * 24.f)); hz += (note - hz) * 0.4f; synthesize(hz, accel.x - 0.2f, clamp(accel.x + 0.5f, 0.f, 1.f)); const Int2 center = LCD_center - vec(24,24)/2; vid.bg0rom.setPanning(-(center + accel.xy() * 60.f)); System::paint(); } }
void main() { //SETUP VIDEO BUFFER static VideoBuffer vid; //create video buffer for each cube vid.initMode(BG0_SPR_BG1); //set video buffer to BG0_SPR_BG1 mode vid.attach(0); //attach video buffer to cube with ID 0 //BACKGROUND LAYER vid.bg0.image(vec(0,0), MyBG0Image); //Set the image `Background` defined in assets.lua to the VideoBuffer's BG0 layer //SPRITES LAYER vid.sprites[0].setImage(MyRedSprite); //assign our first sprite vid.sprites[0].move(15,15); //move it to where we want it vid.sprites[1].setImage(MyBlueSprite); vid.sprites[1].move(93, 60); //FOREGROUND LAYER vid.bg1.setMask(BG1Mask::filled(vec(4,4), vec(8,8))); //Mask an area in the location and size of our BG1 image vid.bg1.image(vec(4,4), MyBG1Image); //Place a BG1 image in the same space as the mask. while (1) { //game looop System::paint(); } }
void main() { CubeID cube(0); VideoBuffer vid; vid.initMode(BG0); vid.attach(cube); for (int x = -1; x < 17; x++) { drawColumn(vid, x); } /* * Scroll horizontally through our background based on the accelerometer's X axis. * * We can scroll with pixel accuracy within a column of tiles via bg0.setPanning(). * When we get to either edge of the currently plotted tiles, draw a new column * of tiles from the source image. * * Because BG0 is 1 tile wider and taller than the viewport itself, we can pre-load * the next column of tiles into the column at the edge before it comes into view. */ float x = 0; int prev_xt = 0; for (;;) { // Scroll based on accelerometer tilt Int2 accel = vid.physicalAccel().xy(); // Floating point pixels x += accel.x * (40.0f / 128.0f); // Integer pixels int xi = x + 0.5f; // Integer tiles int xt = x / 8; while (prev_xt < xt) { // Fill in new tiles, just past the right edge of the screen drawColumn(vid, prev_xt + 17); prev_xt++; } while (prev_xt > xt) { // Fill in new tiles, just past the left edge drawColumn(vid, prev_xt - 2); prev_xt--; } // pixel-level scrolling within the current column vid.bg0.setPanning(vec(xi, 0)); System::paint(); } }
void CaptureCallback::ContextData::readImage() { int width = _graphicsContext->getTraits()->width; int height = _graphicsContext->getTraits()->height; if (width!=_width || _height!=height) { _width = width; _height = height; } if (_delegate) { VideoBuffer *buffer = _delegate->getVideoBuffer(); GLint internalFormat = GL_BGRA; // This is fine for iOS #ifdef ANDROID // Depending in the platform / android version, the pixel format will change glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &internalFormat); if (internalFormat == GL_RGB) internalFormat = GL_RGBA; #endif buffer->pixelFormat = internalFormat; size_t bpp = buffer->bpp(); size_t bpr = buffer->bpr(); GLubyte *pixelBufferData = (GLubyte *)buffer->data(); glReadPixels(0, 0, width, height, internalFormat, GL_UNSIGNED_BYTE, pixelBufferData); if (bpr != width*bpp){ // There is some padding expected in the buffer, let's move stuff around for (int y = height-1; y > 0; --y){ memmove(pixelBufferData + y * bpr, pixelBufferData + y * width * bpp, width * bpp); } } _delegate->didCaptureImage(); } }
VideoBuffer * SignTool::GetIcon(int toolID, int width, int height) { VideoBuffer * newTexture = new VideoBuffer(width, height); for (int y=0; y<height; y++) { for (int x=0; x<width; x++) { pixel pc = x==0||x==width-1||y==0||y==height-1 ? PIXPACK(0xA0A0A0) : PIXPACK(0x000000); newTexture->SetPixel(x, y, PIXR(pc), PIXG(pc), PIXB(pc), 255); } } newTexture->AddCharacter((width/2)-5, (height/2)-5, 0xE021, 32, 64, 128, 255); newTexture->BlendCharacter((width/2)-5, (height/2)-5, 0xE020, 255, 255, 255, 255); return newTexture; }
void VideoHeader::setup(VideoBuffer & buffer){ //newFrameEvent.init("Playmodes.VideoHeader.newFrame"); this->buffer= &buffer; fps = buffer.getFps(); position = buffer.size(); oneFrame = (pmTimeDiff)round(1000000.0/(float)fps); speed = 1; prevBufferPos = 0; pct = 1; pctHasChanged = true; in = 0; out = 1; loopMode = 0; delay = 0; }
void main() { void* buff; if (totalTouchCount.readObject(buff,0) <= 0) { //no data has been saved previously totalTouchCount.writeObject(0); LOG("\ntotalTouchCount has been written to\n"); } else { LOG("\ntotalTouchCount hasn't been written to\n"); } Colormap colourList; colourList.setEGA(); vid.setMode(SOLID_MODE); vid.colormap[0].set(colourList[1].get()); Events::cubeTouch.set(&onTouch); while(1) { System::paint(); int temp = 0; totalTouchCount.readObject(temp,0); LOG("%d \n",temp); } }
void main() { vid.initMode(BG0_ROM); vid.attach(0); String <128> text; text << "Hello World" << "\n"; vid.bg0rom.text(vec(0,0),text); playSfx (CountSound); // AudioTracker::play(Count); while (1) System::paint(); }
VideoBuffer * SaveRenderer::Render(unsigned char * saveData, int dataSize, bool decorations, bool fire) { GameSave * tempSave; try { tempSave = new GameSave((char*)saveData, dataSize); } catch (std::exception & e) { //Todo: make this look a little less shit VideoBuffer * buffer = new VideoBuffer(64, 64); buffer->BlendCharacter(32, 32, 'x', 255, 255, 255, 255); return buffer; } VideoBuffer * thumb = Render(tempSave, decorations, fire); delete tempSave; return thumb; }
VideoBuffer *DecorationTool::GetIcon(int toolID, int width, int height) { VideoBuffer * newTexture = new VideoBuffer(width, height); for (int y=0; y<height; y++) { for (int x=0; x<width; x++) { //if (toolID == DECO_LIGH) // vid_buf[WINDOWW*(y+j)+(x+i)] = PIXRGB(PIXR(pc)-10*j, PIXG(pc)-10*j, PIXB(pc)-10*j); //else if (toolID == DECO_DARK) // vid_buf[WINDOWW*(y+j)+(x+i)] = PIXRGB(PIXR(pc)+10*j, PIXG(pc)+10*j, PIXB(pc)+10*j); if (toolID == DECO_SMUDGE) newTexture->SetPixel(x, y, 0, 255-5*x, 5*x, 255); else if (toolID == DECO_DRAW || toolID == DECO_CLEAR) newTexture->SetPixel(x, y, Red, Green, Blue, Alpha); else newTexture->SetPixel(x, y, 50, 50, 50, 255); } } if (toolID == DECO_CLEAR) { int reverseRed = (Red+127)%256; int reverseGreen = (Green+127)%256; int reverseBlue = (Blue+127)%256; for (int y=4; y<12; y++) { newTexture->SetPixel(y+5, y-1, reverseRed, reverseGreen, reverseBlue, 255); newTexture->SetPixel(y+6, y-1, reverseRed, reverseGreen, reverseBlue, 255); newTexture->SetPixel(20-y, y-1, reverseRed, reverseGreen, reverseBlue, 255); newTexture->SetPixel(21-y, y-1, reverseRed, reverseGreen, reverseBlue, 255); } } else if (toolID == DECO_ADD) newTexture->AddCharacter(11, 4, '+', Red, Green, Blue, 255); else if (toolID == DECO_SUBTRACT) newTexture->AddCharacter(11, 4, '-', Red, Green, Blue, 255); else if (toolID == DECO_MULTIPLY) newTexture->AddCharacter(11, 3, 'x', Red, Green, Blue, 255); else if (toolID == DECO_DIVIDE) newTexture->AddCharacter(11, 4, '/', Red, Green, Blue, 255); return newTexture; }
RequestBroker::ProcessResponse ThumbRenderRequest::Process(RequestBroker & rb) { VideoBuffer * thumbnail = SaveRenderer::Ref().Render(Save, Decorations, Fire); delete Save; Save = NULL; if(thumbnail) { thumbnail->Resize(Width, Height, true); ResultObject = (void*)thumbnail; rb.requestComplete((Request*)this); return RequestBroker::Finished; } else { return RequestBroker::Failed; } return RequestBroker::Failed; }
void writePacket() { /* * This is one way to write packets to the UsbPipe; using reserve() * and commit(). If you already have a buffer that you want to copy to the * UsbPipe, you can use write(). */ if (Usb::isConnected() && usbPipe.writeAvailable()) { /* * Access some buffer space for writing the next packet. This * is the zero-copy API for writing packets. Both reading and writing * have a traditional (one copy) API and a zero-copy API. */ UsbPacket &packet = usbPipe.sendQueue.reserve(); /* * Fill most of the packet with dummy data */ // 28-bit type code, for our own application's use packet.setType(0x5A); packet.resize(packet.capacity()); for (unsigned i = 0; i < packet.capacity(); ++i) { packet.bytes()[i] = 'a' + i; } /* * Fill the first 3 bytes with accelerometer data from Cube 0 */ Byte3 accel = vid.physicalAccel(); packet.bytes()[0] = accel.x; packet.bytes()[1] = accel.y; packet.bytes()[2] = accel.z; /* * Log the packet for debugging, and commit it to the FIFO. * The system will asynchronously send it to our peer. */ LOG("Sending: %d bytes, type=%02x, data=%19h\n", packet.size(), packet.type(), packet.bytes()); usbPipe.sendQueue.commit(); updatePacketCounts(1, 0); } }
void main() { /* * Display text in BG0_ROM mode on Cube 0 */ CubeID cube = 0; vid.initMode(BG0_ROM); vid.attach(cube); vid.bg0rom.text(vec(0,0), " USB Demo ", vid.bg0rom.WHITE_ON_TEAL); // Zero out our counters usbCounters.reset(); /* * When we transmit packets in this example, we'll fill them with our * cube's accelerometer state. When we receive packets, they'll * be hex-dumped to the screen. We also keep counters that show how many * packets have been processed. * * If possible, applications are encouraged to use event handlers so that * they only try to read packets when packets are available, and they only * write packets when buffer space is available. In this example, we always * want to read packets when they arrive, so we keep an onRead() handler * registered at all times. We also want to write as long as there's buffer * space, but only when a peer is connected. So we'll register and unregister * our onWrite() handler in onConnect() and onDisconnect(), respectively. * * Note that attach() will empty our transmit and receive queues. If we want * to enqueue write packets in onConnct(), we need to be sure the pipe is * attached before we set up onConnect/onDisconnect. */ Events::usbReadAvailable.set(onReadAvailable); usbPipe.attach(); updatePacketCounts(0, 0); /* * Watch for incoming connections, and display some text on the screen to * indicate connection state. */ Events::usbConnect.set(onConnect); Events::usbDisconnect.set(onDisconnect); if (Usb::isConnected()) { onConnect(); } else { onDisconnect(); } /* * Everything else happens in event handlers, nothing to do in our main loop. */ while (1) { for (unsigned n = 0; n < 60; n++) { readPacket(); writePacket(); System::paint(); } /* * For debugging, periodically log the USB packet counters. */ usbCounters.capture(); LOG("USB-Counters: rxPackets=%d txPackets=%d rxBytes=%d txBytes=%d rxUserDropped=%d\n", usbCounters.receivedPackets(), usbCounters.sentPackets(), usbCounters.receivedBytes(), usbCounters.sentBytes(), usbCounters.userPacketsDropped()); } }
int main( int argc, const char** argv ) { std::vector<std::string> filenames; std::string configFile = ""; bool outputJson = false; int seektoms = 0; bool detectRegion = false; std::string country; int topn; bool debug_mode = false; TCLAP::CmdLine cmd("OpenAlpr Command Line Utility", ' ', Alpr::getVersion()); TCLAP::UnlabeledMultiArg<std::string> fileArg( "image_file", "Image containing license plates", true, "", "image_file_path" ); TCLAP::ValueArg<std::string> countryCodeArg("c","country","Country code to identify (either us for USA or eu for Europe). Default=us",false, "us" ,"country_code"); TCLAP::ValueArg<int> seekToMsArg("","seek","Seek to the specified millisecond in a video file. Default=0",false, 0 ,"integer_ms"); TCLAP::ValueArg<std::string> configFileArg("","config","Path to the openalpr.conf file",false, "" ,"config_file"); TCLAP::ValueArg<std::string> templatePatternArg("p","pattern","Attempt to match the plate number against a plate pattern (e.g., md for Maryland, ca for California)",false, "" ,"pattern code"); TCLAP::ValueArg<int> topNArg("n","topn","Max number of possible plate numbers to return. Default=10",false, 10 ,"topN"); TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false); TCLAP::SwitchArg debugSwitch("","debug","Enable debug output. Default=off", cmd, false); TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. [Experimental] Default=off", cmd, false); TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false); TCLAP::SwitchArg motiondetect("", "motion", "Use motion detection on video file or stream. Default=off", cmd, false); try { cmd.add( templatePatternArg ); cmd.add( seekToMsArg ); cmd.add( topNArg ); cmd.add( configFileArg ); cmd.add( fileArg ); cmd.add( countryCodeArg ); if (cmd.parse( argc, argv ) == false) { // Error occurred while parsing. Exit now. return 1; } filenames = fileArg.getValue(); country = countryCodeArg.getValue(); seektoms = seekToMsArg.getValue(); outputJson = jsonSwitch.getValue(); debug_mode = debugSwitch.getValue(); configFile = configFileArg.getValue(); detectRegion = detectRegionSwitch.getValue(); templatePattern = templatePatternArg.getValue(); topn = topNArg.getValue(); measureProcessingTime = clockSwitch.getValue(); do_motiondetection = motiondetect.getValue(); } catch (TCLAP::ArgException &e) // catch any exceptions { std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl; return 1; } cv::Mat frame; Alpr alpr(country, configFile); alpr.setTopN(topn); if (debug_mode) { alpr.getConfig()->setDebug(true); } if (detectRegion) alpr.setDetectRegion(detectRegion); if (templatePattern.empty() == false) alpr.setDefaultRegion(templatePattern); if (alpr.isLoaded() == false) { std::cerr << "Error loading OpenALPR" << std::endl; return 1; } for (unsigned int i = 0; i < filenames.size(); i++) { std::string filename = filenames[i]; if (filename == "-") { std::vector<uchar> data; int c; while ((c = fgetc(stdin)) != EOF) { data.push_back((uchar) c); } frame = cv::imdecode(cv::Mat(data), 1); if (!frame.empty()) { detectandshow(&alpr, frame, "", outputJson); } else { std::cerr << "Image invalid: " << filename << std::endl; } } else if (filename == "stdin") { std::string filename; while (std::getline(std::cin, filename)) { if (fileExists(filename.c_str())) { frame = cv::imread(filename); detectandshow(&alpr, frame, "", outputJson); } else { std::cerr << "Image file not found: " << filename << std::endl; } } } else if (filename == "webcam" || startsWith(filename, WEBCAM_PREFIX)) { int webcamnumber = 0; // If they supplied "/dev/video[number]" parse the "number" here if(startsWith(filename, WEBCAM_PREFIX) && filename.length() > WEBCAM_PREFIX.length()) { webcamnumber = atoi(filename.substr(WEBCAM_PREFIX.length()).c_str()); } int framenum = 0; cv::VideoCapture cap(webcamnumber); if (!cap.isOpened()) { std::cerr << "Error opening webcam" << std::endl; return 1; } while (cap.read(frame)) { if (framenum == 0) motiondetector.ResetMotionDetection(&frame); detectandshow(&alpr, frame, "", outputJson); sleep_ms(10); framenum++; } } else if (startsWith(filename, "http://") || startsWith(filename, "https://")) { int framenum = 0; VideoBuffer videoBuffer; videoBuffer.connect(filename, 5); cv::Mat latestFrame; while (program_active) { std::vector<cv::Rect> regionsOfInterest; int response = videoBuffer.getLatestFrame(&latestFrame, regionsOfInterest); if (response != -1) { if (framenum == 0) motiondetector.ResetMotionDetection(&latestFrame); detectandshow(&alpr, latestFrame, "", outputJson); } // Sleep 10ms sleep_ms(10); framenum++; } videoBuffer.disconnect(); std::cout << "Video processing ended" << std::endl; } else if (hasEndingInsensitive(filename, ".avi") || hasEndingInsensitive(filename, ".mp4") || hasEndingInsensitive(filename, ".webm") || hasEndingInsensitive(filename, ".flv") || hasEndingInsensitive(filename, ".mjpg") || hasEndingInsensitive(filename, ".mjpeg") || hasEndingInsensitive(filename, ".mkv") ) { if (fileExists(filename.c_str())) { int framenum = 0; cv::VideoCapture cap = cv::VideoCapture(); cap.open(filename); cap.set(CV_CAP_PROP_POS_MSEC, seektoms); while (cap.read(frame)) { if (SAVE_LAST_VIDEO_STILL) { cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame); } if (!outputJson) std::cout << "Frame: " << framenum << std::endl; if (framenum == 0) motiondetector.ResetMotionDetection(&frame); detectandshow(&alpr, frame, "", outputJson); //create a 1ms delay sleep_ms(1); framenum++; } } else { std::cerr << "Video file not found: " << filename << std::endl; } } else if (is_supported_image(filename)) { if (fileExists(filename.c_str())) { frame = cv::imread(filename); bool plate_found = detectandshow(&alpr, frame, "", outputJson); if (!plate_found && !outputJson) std::cout << "No license plates found." << std::endl; } else { std::cerr << "Image file not found: " << filename << std::endl; } } else if (DirectoryExists(filename.c_str())) { std::vector<std::string> files = getFilesInDir(filename.c_str()); std::sort(files.begin(), files.end(), stringCompare); for (int i = 0; i < files.size(); i++) { if (is_supported_image(files[i])) { std::string fullpath = filename + "/" + files[i]; std::cout << fullpath << std::endl; frame = cv::imread(fullpath.c_str()); if (detectandshow(&alpr, frame, "", outputJson)) { //while ((char) cv::waitKey(50) != 'c') { } } else { //cv::waitKey(50); } } } } else { std::cerr << "Unknown file type" << std::endl; return 1; } } return 0; }
RequestBroker::ProcessResponse ImageRequest::Process(RequestBroker & rb) { VideoBuffer * image = NULL; //Have a look at the thumbnail cache for(std::deque<std::pair<std::string, VideoBuffer*> >::iterator iter = rb.imageCache.begin(), end = rb.imageCache.end(); iter != end; ++iter) { if((*iter).first == URL) { image = (*iter).second; #ifdef DEBUG std::cout << typeid(*this).name() << " " << URL << " found in cache" << std::endl; #endif } } if(!image) { if(HTTPContext) { if(http_async_req_status(HTTPContext)) { pixel * imageData; char * data; int status, data_size, imgw, imgh; data = http_async_req_stop(HTTPContext, &status, &data_size); if (status == 200 && data) { imageData = Graphics::ptif_unpack(data, data_size, &imgw, &imgh); free(data); if(imageData) { //Success! image = new VideoBuffer(imageData, imgw, imgh); free(imageData); } else { //Error thumbnail image = new VideoBuffer(32, 32); image->SetCharacter(14, 14, 'x', 255, 255, 255, 255); } if(rb.imageCache.size() >= THUMB_CACHE_SIZE) { //Remove unnecessary from thumbnail cache delete rb.imageCache.front().second; rb.imageCache.pop_front(); } rb.imageCache.push_back(std::pair<std::string, VideoBuffer*>(URL, image)); } else { #ifdef DEBUG std::cout << typeid(*this).name() << " Request for " << URL << " failed with status " << status << std::endl; #endif free(data); return RequestBroker::Failed; } } } else { //Check for ongoing requests for(std::vector<Request*>::iterator iter = rb.activeRequests.begin(), end = rb.activeRequests.end(); iter != end; ++iter) { if((*iter)->Type != Request::Image) continue; ImageRequest * otherReq = (ImageRequest*)(*iter); if(otherReq->URL == URL && otherReq != this) { #ifdef DEBUG std::cout << typeid(*this).name() << " Request for " << URL << " found, appending." << std::endl; #endif //Add the current listener to the item already being requested (*iter)->Children.push_back(this); return RequestBroker::Duplicate; } } //If it's not already being requested, request it #ifdef DEBUG std::cout << typeid(*this).name() << " Creating new request for " << URL << std::endl; #endif HTTPContext = http_async_req_start(NULL, (char *)URL.c_str(), NULL, 0, 0); RequestTime = time(NULL); } } if(image) { //Create a copy, to seperate from the cache std::vector<Request *> children(Children.begin(), Children.end()); Children.clear(); VideoBuffer * myVB = new VideoBuffer(*image); myVB->Resize(Width, Height, true); ResultObject = (void*)myVB; rb.requestComplete(this); for(std::vector<Request*>::iterator childIter = children.begin(), childEnd = children.end(); childIter != childEnd; ++childIter) { if((*childIter)->Type == Request::Image) { ImageRequest * childReq = (ImageRequest*)*childIter; VideoBuffer * tempImage = new VideoBuffer(*image); tempImage->Resize(childReq->Width, childReq->Height, true); childReq->ResultObject = (void*)tempImage; rb.requestComplete(*childIter); } } return RequestBroker::Finished; } return RequestBroker::OK; }
int main(int argc, char *argv[]) { ui::Engine * engine; std::string outputPrefix, inputFilename; std::vector<char> inputFile; std::string ppmFilename, ptiFilename, ptiSmallFilename, pngFilename, pngSmallFilename; std::vector<char> ppmFile, ptiFile, ptiSmallFile, pngFile, pngSmallFile; inputFilename = std::string(argv[1]); outputPrefix = std::string(argv[2]); ppmFilename = outputPrefix+".ppm"; ptiFilename = outputPrefix+".pti"; ptiSmallFilename = outputPrefix+"-small.pti"; pngFilename = outputPrefix+".png"; pngSmallFilename = outputPrefix+"-small.png"; readFile(inputFilename, inputFile); ui::Engine::Ref().g = new Graphics(); engine = &ui::Engine::Ref(); engine->Begin(WINDOWW, WINDOWH); GameSave * gameSave = NULL; try { gameSave = new GameSave(inputFile); } catch (ParseException e) { //Render the save again later or something? I don't know if (e.what() == "Save from newer version") throw e; } Simulation * sim = new Simulation(); Renderer * ren = new Renderer(ui::Engine::Ref().g, sim); if (gameSave) { sim->Load(gameSave); //Render save ren->decorations_enable = true; ren->blackDecorations = true; int frame = 15; while(frame) { frame--; ren->render_parts(); ren->render_fire(); ren->clearScreen(1.0f); } } else { int w = Graphics::textwidth("Save file invalid")+16, x = (XRES-w)/2, y = (YRES-24)/2; ren->drawrect(x, y, w, 24, 192, 192, 192, 255); ren->drawtext(x+8, y+8, "Save file invalid", 192, 192, 240, 255); } ren->RenderBegin(); ren->RenderEnd(); VideoBuffer screenBuffer = ren->DumpFrame(); //ppmFile = format::VideoBufferToPPM(screenBuffer); ptiFile = format::VideoBufferToPTI(screenBuffer); pngFile = format::VideoBufferToPNG(screenBuffer); screenBuffer.Resize(1.0f/3.0f, true); ptiSmallFile = format::VideoBufferToPTI(screenBuffer); pngSmallFile = format::VideoBufferToPNG(screenBuffer); //writeFile(ppmFilename, ppmFile); writeFile(ptiFilename, ptiFile); writeFile(ptiSmallFilename, ptiSmallFile); writeFile(pngFilename, pngFile); writeFile(pngSmallFilename, pngSmallFile); }
void main() { const CubeID cube(0); static VideoBuffer vid; vid.attach(cube); /* * Blank the screen. This also blanks the one-pixel region between * the bottom of the fractal and the top of the elapsed time indicator * below. */ vid.initMode(SOLID_MODE); vid.colormap[0] = RGB565::fromRGB(0xFFFFFF); System::paint(); /* * We use STAMP mode in a special way here, to do (slow) true-color * rendering: The framebuffer is simply set up as an identity mapping * that shows each of the 16 colors in our colormap. Now we can put * a row of 16 pixels directly into the colormap, and render the screen * using 1024 of these little 16x1 pixel "frames". * * Clearly this is really slow, and this technique is unlikely to be * frequently useful, but it's a fun parlour trick :) */ SystemTime startTime = SystemTime::now(); vid.initMode(STAMP); vid.stamp.disableKey(); auto &fb = vid.stamp.initFB<16,1>(); for (unsigned i = 0; i < 16; i++) fb.plot(vec(i, 0U), i); for (unsigned y = 0; y < LCD_height - 9; y++) for (unsigned x = 0; x < LCD_width; x += 16) { /* * Render 16 pixels at a time, into a buffer in RAM. */ static RGB565 pixels[16]; for (unsigned i = 0; i < 16; i++) pixels[i] = calculateMandelbrot(vec(x+i, y)); /* * Now copy to VRAM and start painting. By waiting until * now to call finish(), we're allowing the calculation above * to run concurrently with the cube's paint operation. * * Note that our "frames" are actually just tiny pieces of the * screen, so we need to avoid the default frame rate limits * in order to render at an at all reasonable rate. This is * where paintUnlimited() comes into play. */ System::finish(); vid.stamp.setBox(vec(x,y), vec(16,1)); vid.colormap.set(pixels); System::paintUnlimited(); } /* * Use BG0_ROM mode to draw the elapsed time at the bottom of the screen. */ TimeDelta elapsed = SystemTime::now() - startTime; String<16> message; message << (elapsed.milliseconds() / 1000) << "." << Fixed(elapsed.milliseconds() % 1000, 3) << " sec"; LOG("Elapsed time: %s\n", message.c_str()); vid.initMode(BG0_ROM); vid.bg0rom.text(vec(1,0), message); vid.setWindow(LCD_height - 8, 8); // Kill time (efficiently) while (1) System::paint(); }
int main( int argc, const char** argv ) { std::string filename; std::string configFile = ""; bool outputJson = false; int seektoms = 0; bool detectRegion = false; std::string templateRegion; std::string country; int topn; TCLAP::CmdLine cmd("OpenAlpr Command Line Utility", ' ', Alpr::getVersion()); TCLAP::UnlabeledValueArg<std::string> fileArg( "image_file", "Image containing license plates", false, "", "image_file_path" ); TCLAP::ValueArg<std::string> countryCodeArg("c","country","Country code to identify (either us for USA or eu for Europe). Default=us",false, "us" ,"country_code"); TCLAP::ValueArg<int> seekToMsArg("","seek","Seek to the specied millisecond in a video file. Default=0",false, 0 ,"integer_ms"); TCLAP::ValueArg<std::string> configFileArg("","config","Path to the openalpr.conf file",false, "" ,"config_file"); TCLAP::ValueArg<std::string> templateRegionArg("t","template_region","Attempt to match the plate number against a region template (e.g., md for Maryland, ca for California)",false, "" ,"region code"); TCLAP::ValueArg<int> topNArg("n","topn","Max number of possible plate numbers to return. Default=10",false, 10 ,"topN"); TCLAP::SwitchArg jsonSwitch("j","json","Output recognition results in JSON format. Default=off", cmd, false); TCLAP::SwitchArg detectRegionSwitch("d","detect_region","Attempt to detect the region of the plate image. Default=off", cmd, false); TCLAP::SwitchArg clockSwitch("","clock","Measure/print the total time to process image and all plates. Default=off", cmd, false); try { cmd.add( templateRegionArg ); cmd.add( seekToMsArg ); cmd.add( topNArg ); cmd.add( configFileArg ); cmd.add( fileArg ); cmd.add( countryCodeArg ); if (cmd.parse( argc, argv ) == false) { // Error occured while parsing. Exit now. return 1; } filename = fileArg.getValue(); country = countryCodeArg.getValue(); seektoms = seekToMsArg.getValue(); outputJson = jsonSwitch.getValue(); configFile = configFileArg.getValue(); detectRegion = detectRegionSwitch.getValue(); templateRegion = templateRegionArg.getValue(); topn = topNArg.getValue(); measureProcessingTime = clockSwitch.getValue(); } catch (TCLAP::ArgException &e) // catch any exceptions { std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl; return 1; } cv::Mat frame; Alpr alpr(country, configFile); alpr.setTopN(topn); if (detectRegion) alpr.setDetectRegion(detectRegion); if (templateRegion.empty() == false) alpr.setDefaultRegion(templateRegion); if (alpr.isLoaded() == false) { std::cerr << "Error loading OpenALPR" << std::endl; return 1; } if (filename.empty()) { std::string filename; while (std::getline(std::cin, filename)) { if (fileExists(filename.c_str())) { frame = cv::imread( filename ); detectandshow( &alpr, frame, "", outputJson); } else { std::cerr << "Image file not found: " << filename << std::endl; } } } else if (filename == "webcam") { int framenum = 0; cv::VideoCapture cap(0); if (!cap.isOpened()) { std::cout << "Error opening webcam" << std::endl; return 1; } while (cap.read(frame)) { detectandshow(&alpr, frame, "", outputJson); usleep(1000); framenum++; } } else if (startsWith(filename, "http://") || startsWith(filename, "https://")) { int framenum = 0; VideoBuffer videoBuffer; videoBuffer.connect(filename, 5); cv::Mat latestFrame; while (program_active) { int response = videoBuffer.getLatestFrame(&latestFrame); if (response != -1) { detectandshow( &alpr, latestFrame, "", outputJson); } // Sleep 10ms usleep(10000); } videoBuffer.disconnect(); std::cout << "Video processing ended" << std::endl; } else if (hasEndingInsensitive(filename, ".avi") || hasEndingInsensitive(filename, ".mp4") || hasEndingInsensitive(filename, ".webm") || hasEndingInsensitive(filename, ".flv") || hasEndingInsensitive(filename, ".mjpg") || hasEndingInsensitive(filename, ".mjpeg")) { if (fileExists(filename.c_str())) { int framenum = 0; cv::VideoCapture cap=cv::VideoCapture(); cap.open(filename); cap.set(CV_CAP_PROP_POS_MSEC, seektoms); while (cap.read(frame)) { if (SAVE_LAST_VIDEO_STILL) { cv::imwrite(LAST_VIDEO_STILL_LOCATION, frame); } std::cout << "Frame: " << framenum << std::endl; detectandshow( &alpr, frame, "", outputJson); //create a 1ms delay usleep(1000); framenum++; } } else { std::cerr << "Video file not found: " << filename << std::endl; } } else if (hasEndingInsensitive(filename, ".png") || hasEndingInsensitive(filename, ".jpg") || hasEndingInsensitive(filename, ".jpeg") || hasEndingInsensitive(filename, ".gif")) { if (fileExists(filename.c_str())) { frame = cv::imread( filename ); detectandshow( &alpr, frame, "", outputJson); } else { std::cerr << "Image file not found: " << filename << std::endl; } } else if (DirectoryExists(filename.c_str())) { std::vector<std::string> files = getFilesInDir(filename.c_str()); std::sort( files.begin(), files.end(), stringCompare ); for (int i = 0; i< files.size(); i++) { if (hasEndingInsensitive(files[i], ".jpg") || hasEndingInsensitive(files[i], ".png")) { std::string fullpath = filename + "/" + files[i]; std::cout << fullpath << std::endl; frame = cv::imread( fullpath.c_str() ); if (detectandshow( &alpr, frame, "", outputJson)) { //while ((char) cv::waitKey(50) != 'c') { } } else { //cv::waitKey(50); } } } } else { std::cerr << "Unknown file type" << std::endl; return 1; } return 0; }
int main(int argc, char **argv) { // setup Network tables for this client to talk to the robot NetworkTable::SetClientMode(); NetworkTable::SetIPAddress("10.36.18.2"); // where is the robot? NetworkTable *table = NetworkTable::GetTable("SmartDashboard"); // what table will we interface with? cout << "Got through the network tables\n"; int width = 320; int height = 240; const float SeekWidth[] = {23.50, 4.0}; // inches const float SeekHeight[] = {4.0, 32.0}; // inches const float SeekRatio[2] = {SeekWidth[0] / SeekHeight[0], SeekWidth[1] / SeekHeight[1]}; // 24:18 = 1.333:1 float alpha = 1.0; // constrast -- don't change it int c; opterr = 0; // "I'll create all the error messages, not getopt()" bool ShowMask = true; // flag to make the masked image where 'target' is seen, otherwise displays raw source image bool ShowVideo = false; while ((c = getopt (argc, argv, "b:mvs")) != -1) switch (c) { case 'b': { char* endptr; errno = 0; /* To distinguish success/failure after call */ long val = strtol(optarg, &endptr, 0); /* Check for various possible errors */ if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0) || (val > 100) || (val < 0)) { fprintf(stderr, "Invalid integer for 'b'eta: '%s'\n",optarg); exit(EXIT_FAILURE); } type = val; break; } case 'm': ShowMask = true; break; case 'v': // VGA resolution (640x480, but is slow) width = 640; height = 480; break; case 's': // Show the video ShowVideo = true; break; case '?': if (optopt == 'b') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } cout << "Attempting to initialize capturing\n"; RaspiVid v("/dev/video0", width, height); cout << "Calling constructor(s)\n"; if (!v.initialize(RaspiVid::METHOD_MMAP)) { cout << "Unable to initialize!\n"; return -1; } cout << "Successfully initialized!\n"; v.setBrightness(50); // 10 for processing; 50 for visible image v.startCapturing(); long start[10]; if(ShowVideo) { namedWindow("Vision",1); //createTrackbar("Thresh Type", "Vision", &type, 4, NULL); // callback not needed //createTrackbar("Min Value", "Vision", &defMin, 255, NULL); // callback not needed //createTrackbar("Max Value", "Vision", &defMax, 255, NULL); // callback not needed createTrackbar("Percent Tall", "Vision", &defTall, 20, NULL); // callback not needed createTrackbar("Percent Narr", "Vision", &defNarr, 20, NULL); // callback not needed } else { table->PutNumber("Horizontal Percent Error", defTall); table->PutNumber("Vertical Percent Error", defNarr); } for (int i = 0; i<10; i++) start[i] = getmsofday(); // pre-load with 'now' for (int i = 0; 1; i++) { // Receive key-press updates, it is required if you want to output images, // so the task takes a moment to update the display. if (waitKey(1) > 0) break; string fileStream = "Mask"; // Default if no table present if (table->IsConnected()) { NetworkTable *StreamsTable = table->GetSubTable("File Streams"); if (StreamsTable && StreamsTable->ContainsKey("selected")) { fileStream = StreamsTable->GetString("selected"); } } ShowMask = (fileStream == "Mask"); // Grab a frame from the vision API VideoBuffer buffer = v.grabFrame(); // Put the frame into an OpenCV image matrix with a single color (gray scale) Mat image(height, width, CV_8UC1, buffer.data(), false); // AKA 'Y' Mat dst; // this will be a RGB version of the source image #if defined(YOU_WANT_RGB_COLOR_INSTEAD_OF_GREYSCALE) // There is more data after the gray scale (Y) that contains U&V Mat cb(height/2, width/2, CV_8UC1, buffer.data()+(height*width), false); // 'U' Mat cr(height/2, width/2, CV_8UC1, buffer.data()+(height*width)*5/4, false); // 'V' // size up cb and cr to be same as y Mat CB; resize(cb,CB,cvSize(width,height)); Mat CR; resize(cr,CR,cvSize(width,height)); // empty image same as full (gray scale) image, but 3 channels: Mat ycbcr(height,width, CV_8UC3); Mat in[] = {image, CB, CR}; int fromto[] = {0,0, 1,1, 2,2}; // YUV // mash 3 channels from 2 matrix into a single 3 channel matrix: mixChannels(in,3, &ycbcr,1, fromto,3); // convert that 3 channel YUV matrix into 3 channel RGB (displayable) cvtColor(ycbcr,image,CV_YCrCb2RGB); if (ShowMask) { dst = image.clone(); // make a copy, as we want dst to have the same RGB version } #else // After calculates, we want to draw 'on' the image, showing our results // graphically in some fashion -- that has to happen on a RGB if (ShowMask) { cvtColor(image,dst,CV_GRAY2RGB); // create CV_8UC3 version of same image // which will allow us to draw some color on top of the gray } #endif int Found = 0; if (!ShowMask) { // Show the original image with OpenCV on the screen (could be Grey or RGB) if(ShowVideo) { imshow("Vision", image); } if(fileStream == "Raw") { imwrite("/tmp/stream/pic.jpg", image); } } // alter the brightness to work better with contour finding //Mat new_image = image.clone(); //image.copyTo(new_image); //Threshold the image into a new matrix with a minimum value of 1 and maximum of 255 Mat thresh; //inRange(image, Scalar(1), Scalar(255), thresh); //Takes the source image and takes a thresh image //applying a min search value and a max search value with a threshold type threshold(image, thresh, defMin, defMax, type); // Show the thresholded image with OpenCV on the screen if(ShowVideo) { imshow("Threshold", thresh); } if(fileStream == "Threshold") { imwrite("/tmp/stream/pic.jpg", thresh); } // Find all the contours in the thresholded image // The original thresholded image will be destroyed while finding contours vector <vector<Point> > contours; // CV_RETR_EXTERNAL retrieves only the extreme outer contours. // It sets hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours. // CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal segments and leaves only their end points. // For example, an up-right rectangular contour is encoded with 4 points. findContours(thresh, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // Output information for (int c=0; c<contours.size(); c++) { vector<Point> hull; convexHull(contours[c],hull,true); // examine each contours[c] for width / height, and if within 10% of SeekRatio keep it int MinX, MinY, MaxX, MaxY; MinX = MaxX = contours[c][0].x; MinY = MaxY = contours[c][0].y; #ifdef DEBUG cout << "[" << c << "].size()=" << contours[c].size() << ":"; #endif for (int q=1; q<contours[c].size(); q++) { #ifdef DEBUG cout << contours[c][q] << ","; #endif MaxX = max(MaxX,contours[c][q].x); MaxY = max(MaxY,contours[c][q].y); MinX = min(MinX,contours[c][q].x); MinY = min(MinY,contours[c][q].y); } // now the extents of the contour (rectangle?) are [MinX,MinY]==>[MaxX,MaxY] int Width = MaxX - MinX; int Height = MaxY - MinY; if (Height > 10) { // at least 10 pixels, otherwise it's probably noise float ThisRatio = float(Width) / float(Height); cout << "W/H=(" << Width << " " << Height << " " << ThisRatio << ") "; // defNarr is the wide one // defTall is the tall one int narrErr = table->GetNumber("Horizontal Percent Error"); int tallErr = table->GetNumber("Vertical Percent Error"); if ((ThisRatio >= SeekRatio[0] * (1.0 - (narrErr / 100.0)) && ThisRatio <= SeekRatio[0] * (1.0 + (narrErr / 100.0))) || (ThisRatio >= SeekRatio[1] * (1.0 - (tallErr / 100.0)) && ThisRatio <= SeekRatio[1] * (1.0 + (tallErr / 100.0)))) { // close enough to say "this one could count" Found++; cout << "F"; if (ShowMask) { // draw this contour on a copy of the image Scalar color( 0, 0, 255 ); drawContours( dst, contours, c, color, CV_FILLED ); } } } } // Output values that the Driver Station can analyze bool hotness = false; if(Found == 2) { hotness = true; } else { hotness = false; } table->PutBoolean("Hotness", hotness); if (ShowVideo && ShowMask) { imshow("Vision", dst); } if(ShowMask) { imwrite("/tmp/stream/pic.jpg", dst); } long now = getmsofday(); cout << "NumRects:" << Found << " "<< (10000 / (now-start[i%10])) << " FPS \n"; cout.flush(); start[i%10] = now; } cout << "\n"; // save the last line of text v.destroy(); }