static int __init imx175_i2c_add_driver( void) { LINFO("%s called\n", __func__); return i2c_add_driver(imx175_act_t.i2c_driver); }
// ###################################################################### Image<int> IntegerSimpleChannel::getOutputInt() { GVX_TRACE(__PRETTY_FUNCTION__); if (!this->hasInput()) // if you think this LFATAL() has been triggered incorrectly, then // first make sure that somebody has called setInputDims() CLFATAL("Oops! can't get output -- I don't even have any input yet"); if (!this->outputAvailable()) { // it's possible that we have input but don't yet have output in // the case of a channel that requires several input frames // before it can start generating output (such as a flicker or // motion channel); in that case we just return an empty image // of the appropriate size LERROR("No %s channel yet! -- IGNORING.", this->tagName().c_str()); return Image<int>(this->getMapDims(), ZEROS); } if (!itsOutputCache.initialized()) { itsOutputCache = Image<int>(getMapDims(), ZEROS); // compute max-normalized weighted sum of center-surround at all levels: for (uint idx = 0; idx < itsLevelSpec.getVal().maxIndex(); ++idx) { const Image<int> submap = getSubmapInt(idx); // get the unweighted map // add submap to our sum itsOutputCache += (submap / int(itsLevelSpec.getVal().maxIndex())); if (MYLOGVERB >= LOG_DEBUG) { uint clev = 0, slev = 0; itsLevelSpec.getVal().indexToCS(idx, clev, slev); LDEBUG("%s(%d,%d): weight %f", tagName().c_str(), clev, slev, 1.0f); } } // apply max-normalization on the output as needed: if (itsNormalizeOutput.getVal()) { LDEBUG("%s: Normalizing output: %s(%d .. %d)", tagName().c_str(), maxNormTypeName(itsNormType.getVal()), itsOutputRangeMin.getVal(), itsOutputRangeMax.getVal()); itsOutputCache = intgMaxNormalize(itsOutputCache, itsOutputRangeMin.getVal(), itsOutputRangeMax.getVal(), itsNormType.getVal()); } // print some debug info if in debug mode: if (MYLOGVERB >= LOG_DEBUG) { int mi, ma; getMinMax(itsOutputCache, mi, ma); LDEBUG("%s: final range [%d .. %d]", tagName().c_str(), mi, ma); } LINFO("Computed %s Conspicuity Map", descriptiveName().c_str()); } return itsOutputCache; }
void OTBSpectralAngleDistanceImageFilterProcessor::process() { try { //Detect the number of spectral bands the input image has. nbBands = inPort_.getData()->GetNumberOfComponentsPerPixel(); LINFO("Number of Bands detected: " << nbBands); updateBands(nbBands); MultiSpectralImageType::PixelType pixelRef; //Pass the parameters to filter //depending on input image's spectral bands. switch (nbBands) { case 1: { pixelRef.SetSize(1); pixelRef[0] = refPixel0_.get(); break; } case 2: { pixelRef.SetSize(2); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); break; } case 3: { pixelRef.SetSize(3); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); break; } case 4: { pixelRef.SetSize(4); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); pixelRef[3] = refPixel3_.get(); break; } case 5: { pixelRef.SetSize(5); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); pixelRef[3] = refPixel3_.get(); pixelRef[4] = refPixel4_.get(); break; } case 6: { pixelRef.SetSize(6); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); pixelRef[3] = refPixel3_.get(); pixelRef[4] = refPixel4_.get(); pixelRef[5] = refPixel5_.get(); break; } case 7: { pixelRef.SetSize(7); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); pixelRef[3] = refPixel3_.get(); pixelRef[4] = refPixel4_.get(); pixelRef[5] = refPixel5_.get(); pixelRef[6] = refPixel6_.get(); break; } case 8: { pixelRef.SetSize(8); pixelRef[0] = refPixel0_.get(); pixelRef[1] = refPixel1_.get(); pixelRef[2] = refPixel2_.get(); pixelRef[3] = refPixel3_.get(); pixelRef[4] = refPixel4_.get(); pixelRef[5] = refPixel5_.get(); pixelRef[6] = refPixel6_.get(); pixelRef[7] = refPixel7_.get(); break; } } filter->SetInput(inPort_.getData()); filter->SetReferencePixel(pixelRef); filter->UpdateLargestPossibleRegion(); filter->Update(); outPort_.setData(filter->GetOutput()); LINFO("Spectral Angle Distance Image Filter Connected!"); } catch (int e) { LERROR("Error in Spectral Angle Distance Image Filter"); return; } }
void FilteringForwarder::forward(const RuntimeStatus & s){ LINFO("RC") << "forward status" << LE; wrappedForwarder->forward(s); }
void FilteringForwarder::forward(const Notification & t){ LINFO("RC") << "forward notification" << LE; wrappedForwarder->forward(t); }
// ############################################################################################################## void jevois::Camera::setFormat(jevois::VideoMapping const & m) { JEVOIS_TRACE(2); JEVOIS_TIMED_LOCK(itsMtx); // Get current format: itsFormat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; XIOCTL(itsFd, VIDIOC_G_FMT, &itsFormat); // Set desired format: itsFormat.fmt.pix.width = m.cw; itsFormat.fmt.pix.height = m.ch; itsFormat.fmt.pix.pixelformat = m.cfmt; itsFormat.fmt.pix.field = V4L2_FIELD_NONE; itsFps = m.cfps; LDEBUG("Requesting video format " << itsFormat.fmt.pix.width << 'x' << itsFormat.fmt.pix.height << ' ' << jevois::fccstr(itsFormat.fmt.pix.pixelformat)); XIOCTL(itsFd, VIDIOC_S_FMT, &itsFormat); // Get the format back as the driver may have adjusted some sizes, etc: XIOCTL(itsFd, VIDIOC_G_FMT, &itsFormat); // The driver returns a different format code, may be the mbus code instead of the v4l2 fcc... itsFormat.fmt.pix.pixelformat = v4l2sunxiFix(itsFormat.fmt.pix.pixelformat); LINFO("Camera set video format to " << itsFormat.fmt.pix.width << 'x' << itsFormat.fmt.pix.height << ' ' << jevois::fccstr(itsFormat.fmt.pix.pixelformat)); // Because modules may rely on the exact format that they request, throw if the camera modified it: if (itsFormat.fmt.pix.width != m.cw || itsFormat.fmt.pix.height != m.ch || itsFormat.fmt.pix.pixelformat != m.cfmt) LFATAL("Camera did not accept the requested video format as specified"); // Reset cropping parameters. NOTE: just open()'ing the device does not reset it, according to the unix toolchain // philosophy. Hence, although here we do not provide support for cropping, we still need to ensure that it is // properly reset. Note that some cameras do not support this so here we swallow that exception: try { struct v4l2_cropcap cropcap = { }; cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; XIOCTL_QUIET(itsFd, VIDIOC_CROPCAP, &cropcap); struct v4l2_crop crop = { }; crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; crop.c = cropcap.defrect; XIOCTL_QUIET(itsFd, VIDIOC_S_CROP, &crop); LDEBUG("Set cropping rectangle to " << cropcap.defrect.width << 'x' << cropcap.defrect.height << " @ (" << cropcap.defrect.left << ", " << cropcap.defrect.top << ')'); } catch (...) { LDEBUG("Querying/setting crop rectangle not supported"); } // Set frame rate: try { struct v4l2_streamparm parms = { }; parms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; parms.parm.capture.timeperframe = jevois::VideoMapping::fpsToV4l2(m.cfps); parms.parm.capture.capturemode = 2; // V4L2_MODE_VIDEO not defined in our headers? its value is 2. XIOCTL(itsFd, VIDIOC_S_PARM, &parms); LDEBUG("Set framerate to " << m.cfps << " fps"); } catch (...) { LERROR("Setting frame rate to " << m.cfps << " fps failed -- IGNORED"); } }
// ###################################################################### // open a testing file containing images and corresponding ground truth void setupCases(std::string folder, std::string fname, bool equalize) { char comment[200]; FILE *fp; char inLine[100]; // open a file that lists the sample with ground truth std::string name = folder + fname; if((fp = fopen(name.c_str(),"rb")) == NULL) { LINFO("samples file: %s not found", name.c_str()); // input and output vector out.resize(0); in.resize(0); nSamples = 0; return; } LINFO("tName: %s",name.c_str()); // get number of samples if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%d %s", &nSamples, comment); // the number of categories -> has to agree with the training file uint tNout; if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%d %s", &tNout, comment); if(tNout != info->nOutput) LFATAL("Num categories differ: %d != %d", tNout, info->nOutput); // get the type of ground truth char gtOpt[100]; int gtType = -1; if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); sscanf(inLine, "%s %s", gtOpt, comment); if(strcmp(gtOpt,"ABSOLUTE") == 0) gtType = ABSOLUTE; else if(strcmp(gtOpt,"MIXTURE" ) == 0) gtType = MIXTURE; else LFATAL("unknown ground truth type %s",gtOpt); // set up the size input and output vector out.resize(nSamples); in.resize(nSamples); // skip column headers if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed"); char cName[100]; char sName[100]; char iName[100]; char ext[100]; int cStart, cNum; int gTruth; FILE *ifp; int count = 0; int tSamples = 0; std::vector<uint> nSamples; while(fgets(inLine, 1000, fp) != NULL) { if(gtType == ABSOLUTE) { // get the files in this category and ground truth sscanf(inLine, "%s %d %d %d %s", cName, &cStart, &cNum, &gTruth, ext); sprintf(sName,"%s%s", folder.c_str(), cName); printf(" sName: %s %d %d %d %s\n",sName, cStart, cNum, gTruth, ext); } else if(gtType == MIXTURE) { // get the files in this category and ground truth //char tStr[300]; //sscanf(inLine, "%s %d %d %s %s", cName, &cStart, &cNum, tStr, ext); //sprintf(sName,"%s%s", folder, cName); //printf(" sName: %s %d %d %d %s\n",sName, cStart, cNum, gTruth, ext); // change to mixture values LFATAL("MIXTURE ground truth type not yet implemented"); } else LFATAL("unknown ground truth type %s",gtOpt); nSamples.push_back(cNum); // go through every sample for(int j = cStart; j < cStart+cNum; j++) { tSamples++; // get the corresponding vector file (if exist) sprintf(iName,"%s%06d%s", sName,j,ext); // open the file if((ifp = fopen(iName,"rb")) != NULL) { Image<double> tData(1,info->oriFeatSize, NO_INIT); Image<double>::iterator aptr = tData.beginw(); for(int i = 0; i < tData.getSize(); i++) { double val; if (fread(&val, sizeof(double), 1, ifp) != 1) LFATAL("fread failed"); *aptr++ = val; } LINFO("feature file found: %s (%d)",//%7.4f %7.4f %7.4f %7.4f\n", iName,gTruth);//,tData[0], tData[21], tData[42], tData[63]); fclose(ifp); // calculate the reduced features if(info->isPCA) in[count] = matrixMult(pcaIcaMatrix, tData); else in[count] = tData; // load the ground truth if(gtType == ABSOLUTE) { Image<double> res(1,info->nOutput, ZEROS); res.setVal(0, gTruth, 1.0); out[count] = res; } else if(gtType == MIXTURE) { LFATAL("MIXTURE ground truth type not yet implemented"); } else LFATAL("unknown ground truth type %s",gtOpt); // // just to test stuff // for(int k = 0; k < info->oriFeatSize; k++) // printf("ori[%7d]: %f \n", k, tData.getVal(k)); // printf("\n"); // for(int k = 0; k < info->redFeatSize; k++) // printf("red[%7d]: %f \n", k, in[count].getVal(k)); // printf("\n"); // //for(uint k = 0; k < info->nOutput; k++) // // printf("%f \n",out[count].getVal(k)); // Raster::waitForKey(); count++; } else LFATAL("file: %s not found\n",iName); } } // equalize the number of samples if requested if(equalize) { // find the max uint max = 0; // for(uint i = 0; i < nSamples.size(); i++) // if(max < nSamples[i]) max = nSamples[i]; max = *max_element(nSamples.begin(),nSamples.end()); LINFO("max element: %d", max); uint offset = 0; for(uint i = 0; i < nSamples.size(); i++) { LINFO("extra samples for class[%3d]: %d - %d -> %d", i, max, nSamples[i], max - nSamples[i]); for(uint j = 0; j < max - nSamples[i]; j++) { // index to be copied uint index = rand()/(RAND_MAX + 1.0) * nSamples[i]; LINFO("[%d] Duplicating class[%3d] sample[%3d]" " -> actual ind: %3d", j, i, index, index + offset); index = index + offset; in.push_back(in[index]); out.push_back(out[index]); } offset += nSamples[i]; } LINFO("Total samples before equalized: %d \n",tSamples); tSamples = in.size(); } LINFO("Actual total samples: %d \n",tSamples); fclose(fp); }
int main(const int argc, const char **argv) { // instantiate a model manager: ModelManager manager("Frame Grabber Tester"); // Instantiate our various ModelComponents: nub::soft_ref<FrameGrabberConfigurator> gbc(new FrameGrabberConfigurator(manager)); manager.addSubComponent(gbc); nub::soft_ref<CameraControl> camera(new CameraControl(manager, "Camera Controller", "CameraControl", 0, true, 0, 1, 1)); manager.addSubComponent(camera); // Parse command-line: if (manager.parseCommandLine(argc, argv, "", 0, 0) == false) return(1); // do post-command-line configs: nub::soft_ref<FrameIstream> gb = gbc->getFrameGrabber(); if (gb.isInvalid()) LFATAL("You need to select a frame grabber type via the " "--fg-type=XX command-line option for this program " "to be useful"); int width = gb->getWidth(), height = gb->getHeight(); float delay = 0; // let's get all our ModelComponent instances started: manager.start(); XWindow wini(Dims(width, height), 0, 0, "test-input window"); XWindow wino1(Dims(width/4, height/4), 0, 0, "test-output window 1"); XWindow wino2(Dims(width/4, height/4), 0, 0, "test-output window 2"); XWindow winAux1(Dims(100, 450), 0, 0, "HSV levels 1"); XWindow winAux2(Dims(100, 450), 0, 0, "HSV levels 2"); Timer tim; Image< PixRGB<byte> > ima; Image< PixRGB<float> > fima; Image< PixRGB<byte> > display; Timer camPause; // to pause the move command camPause.reset(); uint64 t[NAVG]; int frame = 0; segmentImageMerge segmenter(2); // set up tracking parameters //segmenter.setTrackColor(10,10,0.15,0.20,150,150,0,true,15); segmenter.setTrackColor(13,7,0.17,0.3,156,30,0,true,15); //segmenter.setTrackColor(10,10,0.15,0.20,150,150,1,false,15); segmenter.setTrackColor(270,10,0.18,0.25,60,60,1,true,15); segmenter.setAdaptBound(20,5,.30,.15,170,100,0); //segmenter.setAdaptBound(15,5,.30,.25,140,100,0); segmenter.setAdaptBound(285,265,.25,.15,80,40,1); segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,0); segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,1); segmenter.setCircleColor(0,255,0,0); segmenter.setCircleColor(0,0,255,1); segmenter.setBoxColor(255,255,0,0); segmenter.setBoxColor(255,0,255,1); segmenter.setAdapt(3,true,3,true,3,true,0); segmenter.setAdapt(3,true,3,true,3,true,1); while(1) { tim.reset(); ima = gb->readRGB(); uint64 t0 = tim.get(); // to measure display time Image<PixRGB<byte> > Aux1; Image<PixRGB<byte> > Aux2; Aux1.resize(100,450,true); Aux2.resize(100,450,true); Image<byte> outputI1; Image<byte> outputI2; display = ima; segmenter.trackImage(ima,&display,0,&Aux1); segmenter.trackImage(ima,&display,1,&Aux2); segmenter.mergeImages(&display); if(camPause.get() > delay) { int modi,modj; segmenter.getImageTrackXY(&modi,&modj,0); //segmenter.getImageTrackXYMerge(&modi,&modj); modi = modi*8; modj = 480-modj*8; if(modi > 0 && modi < 640 && modj > 0 && modj < 480) { if(segmenter.returnLOT(0) == false) { camPause.reset(); delay = camera->moveCamXYFrame(modi,modj); } } } Image<byte> temp1 = segmenter.returnCandidateImage(0); Image<byte> temp2 = segmenter.returnCandidateImage(1); wini.drawImage(display); //wino1.drawImage(outputI1); wino1.drawImage(temp1); wino2.drawImage(temp2); winAux1.drawImage(Aux1); winAux2.drawImage(Aux2); t[frame % NAVG] = tim.get(); t0 = t[frame % NAVG] - t0; if (t0 > 28) LINFO("Display took %llums", t0); // compute and show framerate over the last NAVG frames: if (frame % NAVG == 0 && frame > 0) { uint64 avg = 0; for (int i = 0; i < NAVG; i ++) avg += t[i]; float avg2 = 1000.0 / (float)avg * NAVG; printf("Framerate: %.1f fps\n", avg2); } frame ++; } manager.stop(); return 0; }
void CanvasRenderer::process() { if (!canvas_) return; canvas_->getGLFocus(); glViewport(0, 0, canvas_->getSize().x, canvas_->getSize().y); if (inport_.isReady()) { // render inport to image, if renderToImage flag has been set if (renderToImage_) { try { renderInportToImage(renderToImageFilename_); LINFO("Saved rendering with dimensions " << inport_.getSize() << " to file: " << tgt::FileSystem::cleanupPath(renderToImageFilename_)); } catch (std::bad_alloc& /*e*/) { LERROR("Exception in CanvasRenderer::renderInportToImage(): bad allocation (" << getID() << ")"); renderToImageError_ = "Not enough system memory (bad allocation)"; } catch (VoreenException& e) { LERROR(e.what()); renderToImageError_ = std::string(e.what()); } catch (std::exception& e) { LERROR("Exception in CanvasRenderer::renderInportToImage(): " << e.what() << " (" << getID() << ")"); renderToImageError_ = std::string(e.what()); } renderToImage_ = false; } // map texture of input target onto a screen-aligned quad else { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // activate shader shader_->activate(); // set common uniforms setGlobalShaderParameters(shader_); // manually pass the viewport dimensions to the shader, // since setGlobalShaderParameters() expects a render outport, which we do not have shader_->setIgnoreUniformLocationError(true); shader_->setUniform("screenDim_", tgt::vec2(canvas_->getSize())); shader_->setUniform("screenDimRCP_", 1.f / tgt::vec2(canvas_->getSize())); shader_->setIgnoreUniformLocationError(false); // bind input textures inport_.bindTextures(GL_TEXTURE0, GL_TEXTURE1); // pass texture parameters to the shader shader_->setUniform("colorTex_", 0); shader_->setUniform("depthTex_", 1); inport_.setTextureParameters(shader_, "texParams_"); LGL_ERROR; // execute the shader renderQuad(); shader_->deactivate(); LGL_ERROR; } } else { // render error texture if (!errorTex_) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); return; } glClear(GL_DEPTH_BUFFER_BIT); glActiveTexture(GL_TEXTURE0); errorTex_->bind(); errorTex_->enable(); glColor3f(1.f, 1.f, 1.f); renderQuad(); errorTex_->disable(); } glActiveTexture(GL_TEXTURE0); LGL_ERROR; }
// ###################################################################### void XCgrabberFlex::start1() { #ifndef HAVE_XCLIB LFATAL("you must have XC support and the xclib library in order to use XCgrabberFlex"); #else // open the XC cameralink imaging board int i; if(!strcmp(itsFormatFile.getVal().c_str(),"noFile")) { LINFO("use default setup configure format"); char* format =(char*)("default"); i = xclib::xclib_open(&itsXclib, NULL,NULL, format, NULL); } else { LINFO("use input format file as configure file"); char* formatFile = (char*)(itsFormatFile.getVal().c_str()); i = xclib::xclib_open(&itsXclib, NULL, NULL, NULL, formatFile); } if(i != 0) { LINFO("error code %d\n", i); LFATAL("can not open the XC camera"); } switch(itsGrabMode.getVal()) { case VIDFMT_BAYER_GB12: itsBitDepth = 12; break; case VIDFMT_BAYER_GR12: itsBitDepth = 12; break; case VIDFMT_BAYER_RG12: itsBitDepth = 12; break; case VIDFMT_BAYER_BG12: itsBitDepth = 12; break; case VIDFMT_BAYER_GB: itsBitDepth = 8; break; case VIDFMT_BAYER_GR: itsBitDepth = 8; break; case VIDFMT_BAYER_RG: itsBitDepth = 8; break; case VIDFMT_BAYER_BG: itsBitDepth = 8; break; default: LFATAL("ERROR in specify the xc grab mode"); } // list basic camera info struct xclib::pxdevinfo pxinfo; memset(&pxinfo, 0, sizeof(pxinfo)); pxinfo.ddch.len = sizeof(pxinfo); pxinfo.ddch.mos = PXMOS_DEVINFO; itsXclib.pxdev.getDevInfo(&(itsXclib.pxdev), UNITMAP, 0, &pxinfo); LINFO("find %d baords, frame buffer memory = %.4f Kbytes", pxinfo.nunits,(double)pxinfo.memsize/1024); //white balance WhiteBalance(); struct xclib::pxlibservice pxlib = itsXclib.pxlib; struct xclib::xcdevservice xcdev = itsXclib.xcdev; // initialize pxvidstate i = pxlib.allocStateCopy(&pxlib, 0, 0, &itsStatep); LINFO("allocate state copy (video state), result code: %d", i); i = pxlib.initStateCopy(&pxlib, 0, 0, itsStatep, &pxinfo, (char*)("default"), PXMODE_DIGI); LINFO("init state copy (video state), result code: %d",i); pxlib.defineState(&pxlib, 0, itsStateid, itsStatep); // itsStatep->vidres->x.vidoffset = 640;//1920/2-itsDims.getVal().w()/2; //itsStatep->vidres->x.vidoffsend = 1920; //1920/2+itsDims.getVal().w()/2; LINFO("pxvidimage dims = %d,%d\n",itsStatep->vidres->x.vidoffset, itsStatep->vidres->x.vidoffsend); //! show some info of pxvidstate structure /* LINFO("the pxvidimage bayerpattern: %d, %d, %d, %d, %d, %d\n", itsStatep->vidimage->bp.order, itsStatep->vidimage->bp.mode, itsStatep->vidimage->bp.arg[0], itsStatep->vidimage->bp.arg[1], itsStatep->vidimage->bp.arg[2], itsStatep->vidimage->bp.arg[3]); LINFO("the pxvidimage colorspace: %d, %d, %d", itsStatep->vidimage->cs.order, itsStatep->vidimage->cs.mode, (int)itsStatep->vidimage->cs.scale); LINFO("the pxvid image whitebalance: %d, %d, %d", itsStatep->vidimage->wb.order, itsStatep->vidimage->wb.mode, (int)itsStatep->vidimage->wb.gamma[0][0]); LINFO("the pxvid image sharp :%d, %d, %d, %d, %d", itsStatep->vidimage->sh.order, itsStatep->vidimage->sh.mode, itsStatep->vidimage->sh.scale, itsStatep->vidimage->sh.into[0], itsStatep->vidimage->sh.from[0]); for(int i=0; i<6; i++) for(int j=0; j<4; j++) { itsStatep->vidimage->wb.gamma[i][j] = 100; itsStatep->vidimage->wb.darkreference[i][j] = 30; itsStatep->vidimage->wb.darktarget[i][j] = 30; itsStatep->vidimage->wb.brightreference[i][j] = 120; itsStatep->vidimage->wb.brighttarget[i][j] = 200; } itsStatep->vidimage->wb.mode = 3; itsStatep->vidimage->wb.order = 1; itsStatep->vidimage->sh.order = 1; itsStatep->vidimage->sh.mode = 3; itsStatep->vidimage->sh.into[0] = 120; itsStatep->vidimage->sh.into[1] = 120; itsStatep->vidimage->sh.into[2] = 120; itsStatep->vidimage->sh.from[0] = 200; itsStatep->vidimage->sh.from[1] = 200; itsStatep->vidimage->sh.from[2] = 200; itsStatep->vidimage->sh.scale = 2; itsStatep->vidimage->sh.arg[0] = 1; itsStatep->vidimage->sh.arg[1] = 1; i = xcdev.setCameraConfig(&xcdev, UNITMAP, 0, 0, itsStatep, NULL); LINFO("set camera config res code: %d", i); LINFO("after wb, gamma is %d", (int)itsStatep->vidimage->wb.gamma[0][0]); i = pxlib.exportStateCopy(&pxlib,0, itsStateid, itsStatep, 0,(char*) "trash_xc_format.txt",NULL,NULL,NULL); LINFO("export state res code: %d", i); */ i = xcdev.setVideoConfig(&xcdev,UNITMAP, 0, 0, itsStatep, NULL); LINFO("set video configure code %d", i); i = xcdev.setLiveSeqBuf (&xcdev, UNITMAP, 0, 0, itsStatep, NULL, 1, USEDBUFFER, 1, 0, 1, 0); if(i != 0) { LINFO("start capture error code %d", i); LFATAL("the imaging board can not work on live mode\n"); } // make sure the camera start to work for capture // get the captured buffer ID xclib::pxbuffer_t bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus(&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER); if(bufferID ==0) LINFO("Grab not ready..."); while( bufferID == 0 ) { usleep(XCWAIT); bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus (&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER); } const unsigned int bufSz = itsDims.getVal().sz() * (int)ceil(itsBitDepth/8); itsImgBuf = (byte*)malloc(bufSz); itsCameraOk = true; #endif // HAVE_XCLIB }
Image<PixRGB<T> > downscaleFancy(const Image<PixRGB<T> >& src, int width, int height, int weighting_slope, bool no_weight_black) { GVX_TRACE(__PRETTY_FUNCTION__); PixRGB<T> pix(0); Image<PixRGB<T> > buffer; Image<PixRGB<T> > out; Image<T> bufferWeight; buffer.resize(width,height,true); out.resize(width,height,true); bufferWeight.resize(width,height,true); T scalex = (T)width / (T)src.getWidth(); T scaley = (T)height / (T)src.getHeight(); T dx, dy, weight, xweight, yweight, xfrac, yfrac; typename Image<PixRGB<T> >::iterator bufelem; typename Image<T>::iterator bufw; dx = (T)0.0; dy = (T)0.0; for (int sy = (int)0; sy < (int)src.getHeight(); sy++, dy += scaley) { // outer loop for Y axis yfrac = dy - (T)floor(dy); switch (weighting_slope) { case 5: yweight = yfrac < 0.5 ? 0 : 1; break; case 4: yweight = (T)(0.5 + 0.5*tanh(15*(yfrac - 0.5))); break; case 3: yweight = (T)(0.5 + 0.5*tanh(8*(yfrac - 0.5))); break; case 2: yweight = (T)(0.5 + 0.5*tanh(5*(yfrac - 0.5))); break; case 1: yweight = (T)(0.5 - 0.5*cos(yfrac*M_PI)); break; case 0: yweight = yfrac; break; default : LERROR("illegal weighting slope"); yweight = yfrac; } // inner loop for X axis dx = (T)0; for (int sx = (int)0; sx < (int)src.getWidth(); sx++, dx += scalex) { //LINFO("X %d",sx); xfrac = dx - (T)floor(dx); switch (weighting_slope) { case 5: xweight = xfrac < 0.5 ? 0 : 1; break; case 4: xweight = (T)(0.5 + 0.5*tanh(15*(xfrac - 0.5))); break; case 3: xweight = (T)(0.5 + 0.5*tanh(8*(xfrac - 0.5))); break; case 2: xweight = (T)(0.5 + 0.5*tanh(5*(xfrac - 0.5))); break; case 1: xweight = (T)(0.5 - 0.5*cos(xfrac*M_PI)); /*almost same as tanh(4*x)*/ break; case 0: xweight = xfrac; break; default : LINFO("illegal weighting slope"); xweight = xfrac; } //LINFO("XWEIGHT %f",xweight); int floordx = (int)floor((T)dx); int floordy = (int)floor((T)dy); const PixRGB<T> *in_sy_sx = &src.getVal(sx,sy); if (no_weight_black) if (in_sy_sx->red() == 0 && in_sy_sx->green() == 0 && in_sy_sx->blue() == 0) continue; bufelem = buffer.beginw() + buffer.getWidth()*floordy + floordx; bufw = bufferWeight.beginw() + bufferWeight.getWidth()*floordy + floordx; ADD_RGB(bufelem, ((T)1.0-xweight)*((T)1.0-yweight), bufw, in_sy_sx); if (dx < width - 1) { bufelem++; bufw++; ADD_RGB(bufelem, xweight*((T)1.0-yweight), bufw, in_sy_sx); } if (dy < height - 1) { bufelem = buffer.beginw() + buffer.getWidth()*(floordy+1) + floordx; bufw = bufferWeight.beginw() + bufferWeight.getWidth()*(floordy+1) + floordx; ADD_RGB(bufelem, ((T)1.0-xweight)*yweight, bufw, in_sy_sx); if (dx < width - 1) { bufelem++; bufw++; //bufelem = &(buf[BUFIDX(floordx+1,floordy+1)]); ADD_RGB(bufelem, xweight*yweight, bufw, in_sy_sx); } } } if (floorf(dy + scaley) > floorf(dy)) { /* line finished -> write to out */ int dsty = (int)floor(dy); for (int dstx = 0; dstx < width; dstx++) { weight = bufferWeight.getVal(dstx,dsty); if(weight != 0.0) { pix = buffer.getVal(dstx,dsty); pix /= weight; out.setVal(dstx,dsty,pix); } PixRGB<T> zero(0); buffer.setVal(dstx,dsty,zero); bufferWeight.setVal(dstx,dsty,0); } } } return out; }
// ###################################################################### VideoFrame XCgrabberFlex::grabRaw() { #ifndef HAVE_XCLIB LFATAL("you must have XC support and the xclib library in order to use XCgrabberFlex"); return VideoFrame(); /* can't happen */ #else ASSERT(itsCameraOk); int i = 0; struct xclib::xcdevservice xcdev = itsXclib.xcdev; struct xclib::pxlibservice pxlib = itsXclib.pxlib; // get the captured buffer ID xclib::pxbuffer_t bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus (&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER); while( bufferID == itsLastBuf) { bufferID = (xclib::pxbuffer_t)xcdev.getLiveStatus (&xcdev, UNITMAP, 0, PXVIST_DONE | PXVIST_BUFFER); usleep(100); } if(itsLastBuf != 0 && bufferID != (itsLastBuf)%USEDBUFFER + 1) { LINFO("last buf id= %4d, curr buf id= %4d",(int)itsLastBuf,(int)bufferID); LERROR("buffer error: buffer mis order"); } pthread_mutex_lock(&qmutex_buf); itsLastBuf = bufferID; pthread_mutex_unlock(&qmutex_buf); // is the captured image base on byte or uint16 type int dataMode = (itsBitDepth == 8 ? PXDATUINT8:PXDATUINT16); const unsigned int bufSz = itsDims.getVal().sz() * (int)ceil(itsBitDepth/8); const unsigned int imgSz = itsDims.getVal().sz(); //! define the image from frame buffer struct xclib::pximage pximg; i = pxlib.initPximage(&pxlib, UNITMAP, &pximg, 1, PXHINTBAYER, 0, itsStateid, bufferID, 0); pximg.wind.nw.x = 1920/2 - itsDims.getVal().w()/2; pximg.wind.nw.y = 1080/2 - itsDims.getVal().h()/2; pximg.wind.se.x = 1920/2 + itsDims.getVal().w()/2; pximg.wind.se.y = 1080/2 + itsDims.getVal().h()/2; LINFO("pximgsize %d,%d", pximg.wind.nw.x,pximg.wind.se.x); if (i<1) LFATAL("error, can not define a pximage, code: %d",i); if(pximg.ioset(&pximg, PXRXSCAN | PXIWRAP, dataMode, 0x01) < 0) { LFATAL("error in ioset, can not set frame buffer read"); return VideoFrame(); } if(imgSz != pximg.ioread(&pximg, PXRXSCAN | PXIWRAP, itsImgBuf,bufSz,0,0)) { LFATAL("error in reading frame buffer(size error)," "expected size = %d", imgSz); return VideoFrame(); } return VideoFrame(itsImgBuf, bufSz, itsDims.getVal(), itsGrabMode.getVal(), itsByteSwap.getVal(), false); #endif // HAVE_XCLIB }
// ###################################################################### void GistEstimatorGen::getFeatureVector(rutz::shared_ptr<ChannelMaps> chanMaps) { //! first get the gist feature size and allocate the gist vector size int sz = 0, sz_cs=0, sz_nocs = 0; if(itsUseCS.getVal() == 1 || itsUseCS.getVal() == 2) sz_cs += chanMaps->numSubmaps(); // sz_nocs is the number of how many raw pyramid types if(itsUseCS.getVal() == 0 || itsUseCS.getVal() == 2) for(uint i=0; i < chanMaps->numSubchans(); i++) { rutz::shared_ptr<ChannelMaps> currChan = chanMaps->subChanMaps(i); if(currChan->numSubchans() == 0) sz_nocs++; else sz_nocs += currChan->numSubchans(); } sz_nocs *= PYR_LEVEL; sz = sz_cs + sz_nocs; LINFO("there are in total %4d gist feature chans", sz); itsGistVector.resize(1,NUM_GIST_FEAT * sz, NO_INIT); int count = 0; //! get the center-surround feature values if(itsUseCS.getVal() == 1 || itsUseCS.getVal() == 2) for(int i = 0; i<sz_cs; i++) { inplacePaste(itsGistVector,getSubSumGen(chanMaps->getRawCSmap(i)), Point2D<int>(0, count*NUM_GIST_FEAT)); count++; } //! get the non center-surround feature values if(itsUseCS.getVal() == 0 || itsUseCS.getVal() == 2) for(uint i=0; i<chanMaps->numSubchans(); i++) { rutz::shared_ptr<ChannelMaps> currChan = chanMaps->subChanMaps(i); if(currChan->numSubchans() == 0) { ASSERT(currChan->hasPyramid()); for(uint j=0; j<PYR_LEVEL; j++) { inplacePaste(itsGistVector,getSubSumGen (currChan->getPyramid().getImage(j)), Point2D<int>(0,count*NUM_GIST_FEAT)); count++; } } else { for(uint i=0; i<currChan->numSubchans(); i++) { rutz::shared_ptr<ChannelMaps> currSubChan = currChan->subChanMaps(i); ASSERT(currSubChan->hasPyramid()); for(uint j=0; j<PYR_LEVEL; j++) { inplacePaste(itsGistVector,getSubSumGen (currSubChan->getPyramid().getImage(j)), Point2D<int>(0,count*NUM_GIST_FEAT)); count++; } } } } ASSERT(count == sz); itsGistSize = sz; }
static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) { int ro; struct file *filp = NULL; int rc = -EINVAL; struct inode *inode = NULL; struct backing_dev_info *bdi; loff_t size; loff_t num_sectors; loff_t min_sectors; unsigned int blkbits; unsigned int blksize; /* R/W if we can, R/O if we must */ ro = curlun->initially_ro; if (!ro) { filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) ro = 1; } if (ro) filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { LINFO(curlun, "unable to open backing file: %s\n", filename); return PTR_ERR(filp); } if (!(filp->f_mode & FMODE_WRITE)) ro = 1; inode = file_inode(filp); if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } /* * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } if (!(filp->f_op->write || filp->f_op->aio_write)) ro = 1; size = i_size_read(inode->i_mapping->host); if (size < 0) { LINFO(curlun, "unable to find file size: %s\n", filename); rc = (int) size; goto out; } if (curlun->cdrom) { blksize = 2048; blkbits = 11; } else if (inode->i_bdev) { blksize = bdev_logical_block_size(inode->i_bdev); blkbits = blksize_bits(blksize); bdi = &inode->i_bdev->bd_queue->backing_dev_info; if (bdi->capabilities & BDI_CAP_STRICTLIMIT) { curlun->max_ratio = bdi->max_ratio; curlun->nofua = 1; if (bdi_set_max_ratio(bdi, uicc_ums_max_ratio)) pr_debug("%s, error in setting max_ratio\n", __func__); } } else { blksize = 512; blkbits = 9; } num_sectors = size >> blkbits; /* File size in logic-block-size blocks */ min_sectors = 1; if (curlun->cdrom) { min_sectors = 300; /* Smallest track is 300 frames */ if (num_sectors >= 256*60*75) { num_sectors = 256*60*75 - 1; LINFO(curlun, "file too big: %s\n", filename); LINFO(curlun, "using only first %d blocks\n", (int) num_sectors); } } if (num_sectors < min_sectors) { LINFO(curlun, "file too small: %s\n", filename); rc = -ETOOSMALL; goto out; } if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; curlun->filp = filp; curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); return 0; out: fput(filp); return rc; }
bool SceneGraph::loadFromFile(const std::string& sceneDescription) { clear(); // Move this to a later stage to retain a proper scenegraph when the loading fails ---abock std::string absSceneFile = absPath(sceneDescription); // See if scene file exists if (!FileSys.fileExists(absSceneFile, true)) { LERROR("Could not load scene file '" << absSceneFile << "'. " << "File not found"); return false; } LINFO("Loading SceneGraph from file '" << absSceneFile << "'"); // Load dictionary ghoul::Dictionary sceneDictionary; try { ghoul::lua::loadDictionaryFromFile(absSceneFile, sceneDictionary); } catch (...) { return false; } std::string sceneDescriptionDirectory = ghoul::filesystem::File(absSceneFile, true).directoryName(); std::string sceneDirectory("."); sceneDictionary.getValue(KeyPathScene, sceneDirectory); // The scene path could either be an absolute or relative path to the description // paths directory std::string relativeCandidate = sceneDescriptionDirectory + ghoul::filesystem::FileSystem::PathSeparator + sceneDirectory; std::string absoluteCandidate = absPath(sceneDirectory); if (FileSys.directoryExists(relativeCandidate)) sceneDirectory = relativeCandidate; else if (FileSys.directoryExists(absoluteCandidate)) sceneDirectory = absoluteCandidate; else { LERROR("The '" << KeyPathScene << "' pointed to a " "path '" << sceneDirectory << "' that did not exist"); return false; } ghoul::Dictionary moduleDictionary; bool success = sceneDictionary.getValue(KeyModules, moduleDictionary); if (!success) // There are no modules that are loaded return true; lua_State* state = ghoul::lua::createNewLuaState(); OsEng.scriptEngine().initializeLuaState(state); // Get the common directory bool commonFolderSpecified = sceneDictionary.hasKey(KeyCommonFolder); bool commonFolderCorrectType = sceneDictionary.hasKeyAndValue<std::string>(KeyCommonFolder); if (commonFolderSpecified) { if (commonFolderCorrectType) { std::string commonFolder = sceneDictionary.value<std::string>(KeyCommonFolder); std::string fullCommonFolder = FileSys.pathByAppendingComponent( sceneDirectory, commonFolder ); if (!FileSys.directoryExists(fullCommonFolder)) LERROR("Specified common folder '" << fullCommonFolder << "' did not exist"); else { if (!commonFolder.empty()) { FileSys.registerPathToken(_commonModuleToken, commonFolder); size_t nKeys = moduleDictionary.size(); moduleDictionary.setValue(std::to_string(nKeys + 1), commonFolder); } } } else LERROR("Specification for 'common' folder has invalid type"); } std::vector<std::string> keys = moduleDictionary.keys(); std::map<std::string, std::vector<std::string>> dependencies; std::map<std::string, std::string> parents; _rootNode = new SceneGraphNode; _rootNode->setName(SceneGraphNode::RootNodeName); SceneGraphNodeInternal* internalRoot = new SceneGraphNodeInternal; internalRoot->node = _rootNode; _nodes.push_back(internalRoot); std::sort(keys.begin(), keys.end()); ghoul::filesystem::Directory oldDirectory = FileSys.currentDirectory(); for (const std::string& key : keys) { std::string moduleName = moduleDictionary.value<std::string>(key); std::string modulePath = FileSys.pathByAppendingComponent(sceneDirectory, moduleName); if (!FileSys.directoryExists(modulePath)) { LERROR("Could not load module '" << moduleName << "'. Directory did not exist"); continue; } std::string moduleFile = FileSys.pathByAppendingComponent( modulePath, moduleName + _moduleExtension ); if (!FileSys.fileExists(moduleFile)) { LERROR("Could not load module file '" << moduleFile << "'. File did not exist"); continue; } ghoul::Dictionary moduleDictionary; try { ghoul::lua::loadDictionaryFromFile(moduleFile, moduleDictionary, state); } catch (...) { continue; } std::vector<std::string> keys = moduleDictionary.keys(); for (const std::string& key : keys) { if (!moduleDictionary.hasValue<ghoul::Dictionary>(key)) { LERROR("SceneGraphNode '" << key << "' is not a table in module '" << moduleFile << "'"); continue; } ghoul::Dictionary element; std::string nodeName; std::string parentName; moduleDictionary.getValue(key, element); element.setValue(KeyPathModule, modulePath); element.getValue(SceneGraphNode::KeyName, nodeName); element.getValue(SceneGraphNode::KeyParentName, parentName); FileSys.setCurrentDirectory(modulePath); SceneGraphNode* node = SceneGraphNode::createFromDictionary(element); if (node == nullptr) { LERROR("Error loading SceneGraphNode '" << nodeName << "' in module '" << moduleName << "'"); continue; //clear(); //return false; } dependencies[nodeName].push_back(parentName); parents[nodeName] = parentName; // Also include loaded dependencies if (element.hasKey(SceneGraphNode::KeyDependencies)) { if (element.hasValue<ghoul::Dictionary>(SceneGraphNode::KeyDependencies)) { ghoul::Dictionary nodeDependencies; element.getValue(SceneGraphNode::KeyDependencies, nodeDependencies); std::vector<std::string> keys = nodeDependencies.keys(); for (const std::string& key : keys) { std::string value = nodeDependencies.value<std::string>(key); dependencies[nodeName].push_back(value); } } else { LERROR("Dependencies did not have the corrent type"); } } SceneGraphNodeInternal* internalNode = new SceneGraphNodeInternal; internalNode->node = node; _nodes.push_back(internalNode); } } ghoul::lua::destroyLuaState(state); FileSys.setCurrentDirectory(oldDirectory); for (SceneGraphNodeInternal* node : _nodes) { if (node->node == _rootNode) continue; std::string parent = parents[node->node->name()]; SceneGraphNode* parentNode = sceneGraphNode(parent); if (parentNode == nullptr) { LERROR("Could not find parent '" << parent << "' for '" << node->node->name() << "'"); } node->node->setParent(parentNode); } // Setup dependencies for (SceneGraphNodeInternal* node : _nodes) { std::vector<std::string> nodeDependencies = dependencies[node->node->name()]; for (const std::string& dep : nodeDependencies) { SceneGraphNodeInternal* n = nodeByName(dep); if (n == nullptr) { LERROR("Dependent node '" << dep << "' was not loaded for '" <<node->node->name() << "'"); continue; } node->outgoingEdges.push_back(n); n->incomingEdges.push_back(node); } } std::vector<SceneGraphNodeInternal*> nodesToDelete; for (SceneGraphNodeInternal* node : _nodes) { if (!nodeIsDependentOnRoot(node)) { LERROR("Node '" << node->node->name() << "' has no direct connection to Root."); nodesToDelete.push_back(node); } } for (SceneGraphNodeInternal* node : nodesToDelete) { _nodes.erase(std::find(_nodes.begin(), _nodes.end(), node)); delete node; } bool s = sortTopologically(); if (!s) { LERROR("Topological sort failed"); return false; } return true; }
// ###################################################################### void SimulationViewerSurpCont::saveResults(const nub::ref<FrameOstream>& ofs) { // update our internal time: double msecs = itsCurrTime.msecs(); LINFO("Running Surprise Control on Sample Input time %f ms",msecs); LFATAL("FIXME"); //// itsScaleSurpriseControl.SSCprocessFrame(itsBrain); LINFO("Saving Surprise Control Output"); Image<PixRGB<byte> > bimage; Image<PixRGB<float> > outImage = itsScaleSurpriseControl.SSCgetFrame(); bimage = outImage; ofs->writeRGB(bimage, "SSC", FrameInfo("ScaleSurpriseControl final image", SRC_POS)); Image<PixRGB<float> > diffImage = itsScaleSurpriseControl.SSCgetDiffImage(false); bimage = diffImage; ofs->writeRGB(bimage, "SSC-diff", FrameInfo("ScaleSurpriseControl diff image",SRC_POS)); diffImage = itsScaleSurpriseControl.SSCgetDiffImage(true); bimage = diffImage; ofs->writeRGB(bimage, "SSC-diff-norm", FrameInfo("ScaleSurpriseControl diff image normalized",SRC_POS)); if(itsDrawDiffParts.getVal()) { std::vector<Image<PixRGB<float> > > diffParts = itsScaleSurpriseControl.SSCgetDiffParts(); std::vector<Image<PixRGB<float> > >::const_iterator diffPartsItr = diffParts.begin(); ushort type = 0; while(diffPartsItr != diffParts.end()) { bimage = *diffPartsItr; char name[100]; if(type == 0) sprintf(name,"SSC-diffParts-H1-"); else if(type == 1) sprintf(name,"SSC-diffParts-H2-"); else if(type == 2) sprintf(name,"SSC-diffParts-S-"); else if(type == 3) sprintf(name,"SSC-diffParts-V-"); else sprintf(name,"SSC-diffParts-%d-",type); std::string prefix = name; std::string frameInfo = "ScaleSurpriseControl difference "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); ++diffPartsItr; type++; } } if(itsDrawBetaParts.getVal()) { std::vector<Image<float> > betaParts = itsScaleSurpriseControl.SSCgetBetaParts(false); std::vector<Image<float> >::const_iterator betaPartsItr = betaParts.begin(); ushort type = 0; while(betaPartsItr != betaParts.end()) { bimage = *betaPartsItr; char name[100]; sprintf(name,"SSC-betaParts-%s-",sc_channel_name_abv[type].c_str()); std::string prefix = name; std::string frameInfo = "ScaleSurpriseControl beta "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); ++betaPartsItr; type++; } betaParts = itsScaleSurpriseControl.SSCgetBetaParts(true); betaPartsItr = betaParts.begin(); type = 0; while(betaPartsItr != betaParts.end()) { bimage = *betaPartsItr; char name[100]; sprintf(name,"SSC-betaParts-norm-%s-",sc_channel_name_abv[type].c_str()); std::string prefix = name; std::string frameInfo = "ScaleSurpriseControl beta norm"; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); ++betaPartsItr; type++; } } if(itsDrawBiasParts.getVal()) { std::vector<Image<PixRGB<float> > > biasH1; std::vector<Image<PixRGB<float> > > biasH2; std::vector<Image<PixRGB<float> > > biasS; std::vector<Image<PixRGB<float> > > biasV; itsScaleSurpriseControl.SSCgetBiasParts(biasH1,biasH2,biasS,biasV); std::vector<Image<PixRGB<float> > >::const_iterator biasH1Itr = biasH1.begin(); std::vector<Image<PixRGB<float> > >::const_iterator biasH2Itr = biasH2.begin(); std::vector<Image<PixRGB<float> > >::const_iterator biasSItr = biasS.begin(); std::vector<Image<PixRGB<float> > >::const_iterator biasVItr = biasV.begin(); ushort scale = 0; while(biasH1Itr != biasH1.end()) { char name[100]; bimage = *biasH1Itr; sprintf(name,"SSC-biasParts-H1-%d-",scale); std::string prefix = name; std::string frameInfo = "ScaleSurpriseControl biasH1 "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); bimage = *biasH2Itr; sprintf(name,"SSC-biasParts-H2-%d-",scale); prefix = name; frameInfo = "ScaleSurpriseControl biasH2 "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); bimage = *biasSItr; sprintf(name,"SSC-biasParts-S-%d-",scale); prefix = name; frameInfo = "ScaleSurpriseControl biasS "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); bimage = *biasVItr; sprintf(name,"SSC-biasParts-V-%d-",scale); prefix = name; frameInfo = "ScaleSurpriseControl biasV "; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); ++biasH1Itr; ++biasH2Itr; ++biasSItr; ++biasVItr; scale++; } } if(itsDrawSeperableParts.getVal()) { std::vector<Image<PixRGB<float> > > Zimgs; std::vector<Image<PixRGB<float> > > Yimgs; itsScaleSurpriseControl.SSCgetSeperableParts(Zimgs,Yimgs,false); std::vector<Image<PixRGB<float> > >::const_iterator Zitr = Zimgs.begin(); std::vector<Image<PixRGB<float> > >::const_iterator Yitr = Yimgs.begin(); ushort scale = 0; while(Zitr != Zimgs.end()) { char name[100]; bimage = *Zitr; sprintf(name,"SSC-seperable-parts-Z-%d-",scale); std::string prefix = name; std::string frameInfo = "Seperable Parts Z"; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); bimage = *Yitr; sprintf(name,"SSC-seperable-parts-Y-%d-",scale); prefix = name; frameInfo = "Seperable Parts Y"; frameInfo = frameInfo + prefix; ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS)); ++Zitr; ++Yitr; scale++; } } }
bool LocalErrorHistogramManager::buildHistograms(int numBins) { LINFO("Build histograms with " << numBins << " bins each"); _numBins = numBins; _file = &(_tsp->file()); if (!_file->is_open()) { return false; } _minBin = 0.0; // Should be calculated from tsp file _maxBin = 1.0; // Should be calculated from tsp file as (maxValue - minValue) unsigned int numOtLevels = _tsp->numOTLevels(); unsigned int numOtLeaves = pow(8, numOtLevels - 1); unsigned int numBstLeaves = pow(2, _tsp->numBSTLevels() - 1); _numInnerNodes = _tsp->numTotalNodes() - numOtLeaves * numBstLeaves; _spatialHistograms = std::vector<Histogram>(_numInnerNodes); _temporalHistograms = std::vector<Histogram>(_numInnerNodes); for (unsigned int i = 0; i < _numInnerNodes; i++) { _spatialHistograms[i] = Histogram(_minBin, _maxBin, numBins); _temporalHistograms[i] = Histogram(_minBin, _maxBin, numBins); } // All TSP Leaves int numOtNodes = _tsp->numOTNodes(); int otOffset = (pow(8, numOtLevels - 1) - 1) / 7; int numBstNodes = _tsp->numBSTNodes(); int bstOffset = numBstNodes / 2; int numberOfLeaves = numOtLeaves * numBstLeaves; LINFO("Building spatial histograms"); ProgressBar pb1(numberOfLeaves); int processedLeaves = 0; pb1.print(processedLeaves); bool success = true; for (int bst = bstOffset; bst < numBstNodes; bst++) { for (int ot = otOffset; ot < numOtNodes; ot++) { success &= buildFromOctreeChild(bst, ot); if (!success) LERROR("Failed in buildFromOctreeChild"); if (!success) return false; pb1.print(processedLeaves++); } } //pb1.stop(); LINFO("Building temporal histograms"); ProgressBar pb2(numberOfLeaves); processedLeaves = 0; pb2.print(processedLeaves); for (int ot = otOffset; ot < numOtNodes; ot++) { for (int bst = bstOffset; bst < numBstNodes; bst++) { success &= buildFromBstChild(bst, ot); if (!success) LERROR("Failed in buildFromBstChild"); if (!success) return false; pb2.print(processedLeaves++); } } //pb2.stop(); return success; }
static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) { int ro; struct file *filp = NULL; int rc = -EINVAL; struct inode *inode = NULL; loff_t size; loff_t num_sectors; loff_t min_sectors; unsigned int blkbits; unsigned int blksize; #ifdef CONFIG_MTK_ICUSB_SUPPORT #define ICUSB_STORAGE_LABEL "/dev/block/vold/8:" if(strstr(filename, ICUSB_STORAGE_LABEL)) { printk(KERN_WARNING "filename : %s, set isICUSB to 0\n", filename); curlun->isICUSB = 1; } else { printk(KERN_WARNING "filename : %s, set isICUSB to 1\n", filename); curlun->isICUSB = 0; } #endif /* R/W if we can, R/O if we must */ ro = curlun->initially_ro; if (!ro) { filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) ro = 1; } if (ro) filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { LINFO(curlun, "unable to open backing file: %s\n", filename); return PTR_ERR(filp); } if (!(filp->f_mode & FMODE_WRITE)) ro = 1; inode = file_inode(filp); if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } /* * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } if (!(filp->f_op->write || filp->f_op->aio_write)) ro = 1; size = i_size_read(inode->i_mapping->host); if (size < 0) { LINFO(curlun, "unable to find file size: %s\n", filename); rc = (int) size; goto out; } if (curlun->cdrom) { blksize = 2048; blkbits = 11; } else if (inode->i_bdev) { blksize = bdev_logical_block_size(inode->i_bdev); blkbits = blksize_bits(blksize); } else { blksize = 512; blkbits = 9; } num_sectors = size >> blkbits; /* File size in logic-block-size blocks */ min_sectors = 1; if (curlun->cdrom) { min_sectors = 300; /* Smallest track is 300 frames */ if (num_sectors >= 256*60*75) { num_sectors = 256*60*75 - 1; LINFO(curlun, "file too big: %s\n", filename); LINFO(curlun, "using only first %d blocks\n", (int) num_sectors); } } if (num_sectors < min_sectors) { LINFO(curlun, "file too small: %s\n", filename); rc = -ETOOSMALL; goto out; } if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; curlun->filp = filp; curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); return 0; out: fput(filp); return rc; }
// ###################################################################### void FoeMSTChannel::buildSubChans() { GVX_TRACE(__PRETTY_FUNCTION__); // kill any subchans we may have had... this->removeAllSubChans(); LINFO("Using %d directions spanning [0..360]deg", itsNumDirs.getVal()); for (uint ori = 0; ori < 8; ) { switch (ori) { case 0: //The point in the center POINT 0 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 1,1,1,1,1,1,1,1 )), "", 1.0, /* exportOpts = */ true); break; case 1: //The point in the center POINT 1 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 1,0,0,0,0,0,0,0 )), "", 1.0, /* exportOpts = */ true); break; case 2: //The point in the center POINT 2 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 1,1,1,0,0,0,0,0 )), "", 1.0, /* exportOpts = */ true); break; case 3://The point in the center POINT 3 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 0,0,1,0,0,0,0,0 )), "", 1.0, /* exportOpts = */ true); break; case 4: //The point in the center POINT 4 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 0,1,1,1,0,0,0,0 )), "", 1.0, /* exportOpts = */ true); break; case 5: //The point in the center POINT 5 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 0,0,0,1,0,0,0,0 )), "", 1.0, /* exportOpts = */ true); break; case 6: //The point in the center POINT 6 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 0,0,0,0,1,1,1,0 )), "", 1.0, /* exportOpts = */ true); break; case 7: //The point in the center POINT 7 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 0,1,0,0,0,0,1,0 )), "", 1.0, /* exportOpts = */ true); break; case 8: //The point in the center POINT 8 addSubChan(makeSharedComp (new MSTChannel(getManager(), itsOriChan, visualFeature(), 1,0,0,0,0,0,1,1 )), "", 1.0, /* exportOpts = */ true); break; default: break; } ori += 8 / itsNumDirs.getVal(); } }
// ###################################################################### Image<float> AttentionGuidanceMapSC::getV() const { LINFO("Superior Colliculus Guidance Map is an ImageSet, so this function is depricated here"); return Image<float>(); }
// ###################################################################### // train or test the network void run(int isTest) { LINFO("Run the samples"); double errSum = double(nSamples); double err; Image<double> ffnOut; int nfc = nSamples; int fc; int nfcClass[info->nOutput][info->nOutput];//confusion matrix[target][output] int nTrials = 0; int target = 0; if(nSamples == 0) return; int order[nSamples]; for(int i = 0; i < nSamples; i++) order[i] = i; while(nTrials < MAX_EPOCH && !isTest && nfc > int(nSamples*ERR_THRESHOLD)) { // reinitialize statistic variables for(uint i = 0; i < info->nOutput; i++) for(uint j = 0; j < info->nOutput; j++) nfcClass[i][j] = 0; errSum = 0.0; nfc = 0; // run the input in random order randShuffle(order, nSamples); for(int i = 0; i < nSamples; i++) { // run the input ffn->run3L(in[order[i]]); ffnOut = ffn->getOutput(); // get the error diff(out[order[i]], ffnOut, err, fc, target); // add misclassification count if(fc != -1) { nfc++; nfcClass[target][fc]++; } else nfcClass[target][target]++; // and the numerical deviation errSum += err; if(fc != -1) { //ffn->setLearnRate(learnRate*10); ffn->backprop3L(out[order[i]]); //ffn->setLearnRate(learnRate); } } nTrials++; // periodically report progress if(nTrials %1 == 0) { printf("Trial_%04d_Err: %f nfc: %5d/%5d -> %f%%\n", nTrials, errSum/nSamples, nfc,nSamples,(double)(nfc)/(0.0 + nSamples)*100.0); printf("class |"); for(uint k = 0; k < info->nOutput; k++) printf(" %4d", k); printf("\n"); for(uint k = 0; k < info->nOutput; k++) printf("------"); printf("\n"); for(uint k = 0; k < info->nOutput; k++) { printf("%6d|",k); for(uint j = 0; j < info->nOutput; j++) printf(" %4d",nfcClass[k][j]); printf("\n"); } } printf("\n"); } // print the results if testing if(isTest) { nfc = 0; errSum = 0.0; err = 0; for(uint i = 0; i < info->nOutput; i++) for(uint j = 0; j < info->nOutput; j++) nfcClass[i][j] = 0; for(int i = 0; i < nSamples; i++) { // run the input ffn->run3L(in[i]); // get the output ffnOut = ffn->getOutput(); // get the error diff(out[i], ffnOut, err, fc, target); // add misclassification count if(fc != -1) { nfc++; nfcClass[target][fc]++; } else nfcClass[target][target]++; // and the numerical deviation errSum += err; if((fc != -1) | 1) { printf("sample %5d: ",i); for(uint j = 0; j < info->nOutput; j++) printf("%.3f ",out[i][j]); printf(" -:- "); for(uint j = 0; j < info->nOutput; j++) printf("%.3f ",ffnOut[j]); } if(fc != -1) printf(" WRONG! NO:%d [%d][%d] = %d \n", nfc, target, fc, nfcClass[target][fc]); else printf("\n"); } } // final error count printf("Final Trial_%04d_Err: %f nfc: %5d/%5d -> %.3f%%\n", nTrials,errSum/nSamples, nfc,nSamples,(double)(nfc)/(0.0 + nSamples)*100.0); printf("class |"); for(uint k = 0; k < info->nOutput; k++) printf(" %5d",k); printf(" Total pct. err \n-------"); for(uint k = 0; k < info->nOutput; k++) printf("------"); printf("\n"); for(uint k = 0; k < info->nOutput; k++) { int t = 0, e = 0; printf("%6d|",k); for(uint j = 0; j < info->nOutput; j++) { printf(" %5d",nfcClass[k][j]); if(k == j) t = nfcClass[k][j]; else e += nfcClass[k][j]; } if(e+t == 0) printf(" %6d/%6d N/A%%\n",0,0); else printf(" %6d/%6d %6.2f%%\n",e,e+t, float(e)/float(e+t)*100.0); } for(uint k = 0; k < info->nOutput; k++) printf("------"); printf("-------\nFalse+|"); for(uint k = 0; k < info->nOutput; k++) { int e = 0; for(uint j = 0; j < info->nOutput; j++) { if(k == j) ; //t = nfcClass[j][k]; else e += nfcClass[j][k]; } printf(" %5d",e); } printf("\ntotal |"); for(uint k = 0; k < info->nOutput; k++) { int t = 0, e = 0; for(uint j = 0; j < info->nOutput; j++) { if(k == j) t = nfcClass[j][k]; else e += nfcClass[j][k]; } printf(" %5d",e+t); } printf("\nerr: |"); for(uint k = 0; k < info->nOutput; k++) { int t = 0, e = 0; for(uint j = 0; j < info->nOutput; j++) { if(k == j) t = nfcClass[j][k]; else e += nfcClass[j][k]; } if(e+t == 0) printf(" N/A"); else printf(" %5.2f",float(e)/float(e+t)*100.0); } printf("\n"); }
static int __init s5k4e5yx_i2c_add_driver( void) { LINFO("%s called\n", __func__); return i2c_add_driver(s5k4e5yx_act_t.i2c_driver); }
void FilteringForwarder::forward(const Task & t){ LINFO("RC") << "forward task" << LE; wrappedForwarder->forward(t); }
int32_t s5k4e5yx_msm_actuator_init_table( struct msm_actuator_ctrl_t *a_ctrl) { int32_t rc = 0; LINFO("%s called\n", __func__); if (a_ctrl->func_tbl.actuator_set_params) a_ctrl->func_tbl.actuator_set_params(a_ctrl); #if 0 if (s5k4e5yx_act_t.step_position_table) { LINFO("%s table inited\n", __func__); return rc; } #endif if (s5k4e5yx_msm_actuator_info->use_rawchip_af && a_ctrl->af_algo == AF_ALGO_RAWCHIP) a_ctrl->set_info.total_steps = S5K4E5YX_TOTAL_STEPS_NEAR_TO_FAR_RAWCHIP_AF; else a_ctrl->set_info.total_steps = S5K4E5YX_TOTAL_STEPS_NEAR_TO_FAR; if (a_ctrl->step_position_table != NULL) { kfree(a_ctrl->step_position_table); a_ctrl->step_position_table = NULL; } a_ctrl->step_position_table = kmalloc(sizeof(uint16_t) * (a_ctrl->set_info.total_steps + 1), GFP_KERNEL); if (a_ctrl->step_position_table != NULL) { uint16_t i = 0; uint16_t s5k4e5yx_nl_region_boundary1 = 2; uint16_t s5k4e5yx_nl_region_code_per_step1 = 32; uint16_t s5k4e5yx_l_region_code_per_step = 16; uint16_t s5k4e5yx_max_value = 1023; a_ctrl->step_position_table[0] = a_ctrl->initial_code; for (i = 1; i <= a_ctrl->set_info.total_steps; i++) { if (s5k4e5yx_msm_actuator_info->use_rawchip_af && a_ctrl->af_algo == AF_ALGO_RAWCHIP) a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + 4; else { if (i <= s5k4e5yx_nl_region_boundary1) { a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + s5k4e5yx_nl_region_code_per_step1; } else { a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + s5k4e5yx_l_region_code_per_step; } if (a_ctrl->step_position_table[i] > s5k4e5yx_max_value) a_ctrl->step_position_table[i] = s5k4e5yx_max_value; } } a_ctrl->curr_step_pos = 0; a_ctrl->curr_region_index = 0; } else { pr_err("%s table init failed\n", __func__); rc = -EFAULT; } return rc; }
int32_t ti201_msm_actuator_init_table( struct msm_actuator_ctrl_t *a_ctrl) { int32_t rc = 0; LINFO("%s called\n", __func__); if (a_ctrl->func_tbl.actuator_set_params) a_ctrl->func_tbl.actuator_set_params(a_ctrl); if (ti201_act_t.step_position_table) { LINFO("%s table inited\n", __func__); return rc; } if (a_ctrl->step_position_table != NULL) { kfree(a_ctrl->step_position_table); a_ctrl->step_position_table = NULL; } a_ctrl->step_position_table = kmalloc(sizeof(uint16_t) * (a_ctrl->set_info.total_steps + 1), GFP_KERNEL); if (a_ctrl->step_position_table != NULL) { uint16_t i = 0; uint16_t ti201_nl_region_boundary1 = 2; uint16_t ti201_nl_region_code_per_step1 = 32; uint16_t ti201_l_region_code_per_step = 16; uint16_t ti201_max_value = 1023; a_ctrl->step_position_table[0] = a_ctrl->initial_code; for (i = 1; i <= a_ctrl->set_info.total_steps; i++) { #ifdef USE_RAWCHIP_AF if (ti201_msm_actuator_info->use_rawchip_af) a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + 4; else #endif { if (i <= ti201_nl_region_boundary1) { a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + ti201_nl_region_code_per_step1; } else { a_ctrl->step_position_table[i] = a_ctrl->step_position_table[i-1] + ti201_l_region_code_per_step; } if (a_ctrl->step_position_table[i] > ti201_max_value) a_ctrl->step_position_table[i] = ti201_max_value; } } a_ctrl->curr_step_pos = 0; a_ctrl->curr_region_index = 0; } else { pr_err("%s table init failed\n", __func__); rc = -EFAULT; } return rc; }
VolumeCollection* ITKVolumeReader::read(const std::string &url) throw (tgt::CorruptedFileException, tgt::IOException, std::bad_alloc) { VolumeURL origin(url); std::string fileName = origin.getPath(); LINFO("Reading file " << fileName); //Get OutputInformation of an arbitrary reader to find out pixel type etc: typedef itk::Image<char,3> TestImageType; // pixel type doesn't matter for current purpose typedef itk::ImageFileReader<TestImageType> TestFileReaderType; // reader for testing a file TestFileReaderType::Pointer onefileReader = TestFileReaderType::New(); onefileReader->SetFileName(fileName.c_str()); try { onefileReader->GenerateOutputInformation(); } catch(itk::ExceptionObject& excp) { throw tgt::CorruptedFileException("Failed to read OutputInformation! " + std::string(excp.GetDescription()), fileName); } // grab the ImageIO instance for the reader itk::ImageIOBase *imageIO = onefileReader->GetImageIO(); unsigned int NumberOfDimensions = imageIO->GetNumberOfDimensions(); LINFO("Number of Dimensions: " << NumberOfDimensions); if(NumberOfDimensions != 3) { throw tgt::UnsupportedFormatException("Unsupported number of dimensions!"); } // PixelType is SCALAR, RGB, RGBA, VECTOR, COVARIANTVECTOR, POINT, INDEX itk::ImageIOBase::IOPixelType pixelType = imageIO->GetPixelType(); LINFO("PixelType: " << imageIO->GetPixelTypeAsString(pixelType)); // IOComponentType is UCHAR, CHAR, USHORT, SHORT, UINT, INT, ULONG, LONG, FLOAT, DOUBLE itk::ImageIOBase::IOComponentType componentType = imageIO->GetComponentType(); LINFO("ComponentType: " << imageIO->GetComponentTypeAsString(componentType)); // NumberOfComponents is usually one, but for non-scalar pixel types, it can be anything unsigned int NumberOfComponents = imageIO->GetNumberOfComponents(); LINFO("Number of Components: " << NumberOfComponents); if(NumberOfComponents != 1) { throw tgt::UnsupportedFormatException("Unsupported number of components!"); } //-------Info we don't need here:--------------- //unsigned dims[32]; // almost always no more than 4 dims, but ... //unsigned origin[32]; double spacing[32]; //std::vector<double> directions[32]; for(unsigned i = 0; i < NumberOfDimensions && i < 32; i++) { //dims[i] = imageIO->GetDimensions(i); //origin[i] = imageIO->GetOrigin(i); spacing[i] = imageIO->GetSpacing(i); //directions[i] = imageIO->GetDirection(i); } Volume* dataset; switch(pixelType) { case itk::ImageIOBase::SCALAR: switch(componentType) { case itk::ImageIOBase::UCHAR: dataset = readScalarVolume<uint8_t>(fileName); break; case itk::ImageIOBase::CHAR: dataset = readScalarVolume<int8_t>(fileName); break; case itk::ImageIOBase::USHORT: dataset = readScalarVolume<uint16_t>(fileName); break; case itk::ImageIOBase::SHORT: dataset = readScalarVolume<int16_t>(fileName); break; case itk::ImageIOBase::UINT: dataset = readScalarVolume<uint32_t>(fileName); break; case itk::ImageIOBase::INT: dataset = readScalarVolume<int32_t>(fileName); break; #ifndef WIN32 case itk::ImageIOBase::ULONG: dataset = readScalarVolume<uint64_t>(fileName); break; case itk::ImageIOBase::LONG: dataset = readScalarVolume<int64_t>(fileName); break; #endif case itk::ImageIOBase::FLOAT: dataset = readScalarVolume<float>(fileName); break; case itk::ImageIOBase::DOUBLE: dataset = readScalarVolume<double>(fileName); break; default: throw tgt::UnsupportedFormatException("Unsupported component type!"); } break; case itk::ImageIOBase::RGB: case itk::ImageIOBase::RGBA: case itk::ImageIOBase::VECTOR: case itk::ImageIOBase::COVARIANTVECTOR: case itk::ImageIOBase::POINT: default: throw tgt::UnsupportedFormatException("Unsupported pixel type!"); return 0; } VolumeCollection* volumeCollection = new VolumeCollection(); dataset->setOrigin(fileName); volumeCollection->add(dataset); return volumeCollection; }
void NrrdVolumeWriter::write(const std::string& filename, const VolumeBase* volumeHandle) throw (tgt::IOException) { tgtAssert(volumeHandle, "No volume"); const VolumeRAM* volume = volumeHandle->getRepresentation<VolumeRAM>(); if (!volume) { LWARNING("No volume"); return; } std::string nhdrname = filename; std::string rawname = getFileNameWithoutExtension(filename) + ".raw"; LINFO("saving " << nhdrname << " and " << rawname); std::fstream nhdrout(nhdrname.c_str(), std::ios::out); std::fstream rawout(rawname.c_str(), std::ios::out | std::ios::binary); if (nhdrout.bad() || rawout.bad()) { LWARNING("Can't open file"); throw tgt::IOException(); } // write nrrd header std::string type; const char* data = 0; size_t numbytes = 0; if (const VolumeRAM_UInt8* vol = dynamic_cast<const VolumeRAM_UInt8*>(volume)) { type = "uchar"; data = reinterpret_cast<const char*>(vol->voxel()); numbytes = vol->getNumBytes(); } else if (const VolumeRAM_UInt16* vol = dynamic_cast<const VolumeRAM_UInt16*>(volume)) { type = "ushort"; data = reinterpret_cast<const char*>(vol->voxel()); numbytes = vol->getNumBytes(); } else if (const VolumeRAM_4xUInt8* vol = dynamic_cast<const VolumeRAM_4xUInt8*>(volume)) { type = "uint"; data = reinterpret_cast<const char*>(vol->voxel()); numbytes = vol->getNumBytes(); } else LERROR("Format currently not supported"); tgt::ivec3 dimensions = volumeHandle->getDimensions(); tgt::vec3 spacing = volumeHandle->getSpacing(); nhdrout << "NRRD0001" << std::endl; // magic number nhdrout << "content: " << tgt::FileSystem::fileName(filename) << std::endl; nhdrout << "dimension: 3" << std::endl; nhdrout << "type: " << type << std::endl; nhdrout << "sizes: " << dimensions.x << " " << dimensions.y << " " << dimensions.z << std::endl; nhdrout << "spacings: " << spacing.x << " " << spacing.y << " " << spacing.z << std::endl; nhdrout << "datafile: " << tgt::FileSystem::fileName(rawname) << std::endl; nhdrout << "encoding: raw" << std::endl; nhdrout.close(); // write raw file rawout.write(data, numbytes); rawout.close(); }
void Logger::saveFeatures(int frameNum, MbariVisualEvent::VisualEventSet& eventSet, \ Image< PixRGB<byte> > &in, Image<byte> &prevmmap, HistogramOfGradients &hog3x3, HistogramOfGradients &hog8x8, MbariImage< PixRGB<byte> > &input, MbariImage< PixRGB<byte> > &prevInput, Dims scaledDims) { if (itsSaveEventFeatures.getVal().length() > 0) { std::list<MbariVisualEvent::VisualEvent *> eventFrameList; eventFrameList = eventSet.getEventsForFrame(frameNum - 1); const std::string::size_type hashpos1 = itsOutputFrameSink.getVal().find_first_of(':'); const std::string::size_type hashpos2 = itsOutputFrameSink.getVal().find_last_of('/'); const std::string outputDir = itsOutputFrameSink.getVal().substr(hashpos1+1, hashpos2+1); // for each bit object, extract features and save the output std::list<MbariVisualEvent::VisualEvent *>::iterator event; for (event = eventFrameList.begin(); event != eventFrameList.end(); ++event) { LINFO("Getting features for event %d", (*event)->getEventNum()); Rectangle bbox = (*event)->getToken(frameNum-1).bitObject.getBoundingBox(); std::vector<double> featuresHOG3 = getFeaturesHOG(prevmmap, in, hog3x3, scaledDims, bbox); std::vector<double> featuresHOGMMAP3 = getFeaturesHOGMMAP(prevmmap, in, hog3x3, scaledDims, bbox); std::vector<double> featuresMBH3 = getFeaturesMBH(prevInput, input, hog3x3, scaledDims, bbox); std::vector<double> featuresHOG8 = getFeaturesHOG(prevmmap, in, hog8x8, scaledDims, bbox); std::vector<double> featuresHOGMAP8 = getFeaturesHOGMMAP(prevmmap, in, hog8x8, scaledDims, bbox); std::vector<double> featuresMBH8 = getFeaturesMBH(prevInput, input, hog8x8, scaledDims, bbox); // try to classify using histogram features //double prob = 0.; // int cls = bn.classify(features, &prob); // create the file stem and write out the features std::string evnumHOG3(sformat("%s%s_evt%04d_%06d_HOG_3.dat", outputDir.c_str(), itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1 )); std::string evnumHOGMMAP3(sformat("%s%s_evt%04d_%06d_HOGMMAP_3.dat", outputDir.c_str(),itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1 )); std::string evnumMBH3(sformat("%s%s_evt%04d_%06d_MBH_3.dat", outputDir.c_str(),itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1 )); std::string evnumHOG8(sformat("%s%s_evt%04d_%06d_HOG_8.dat", outputDir.c_str(),itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1 )); std::string evnumHOGMMAP8(sformat("%s%s_evt%04d_%06d_HOGMMAP_8.dat", outputDir.c_str(),itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1 )); std::string evnumMBH8(sformat("%s%s_evt%04d_%06d_MBH_8.dat", outputDir.c_str(),itsSaveEventFeatures.getVal().c_str(), (*event)->getEventNum(), frameNum-1)); std::ofstream eofsHOG3(evnumHOG3.c_str()); std::ofstream eofsHOGMMAP3(evnumHOGMMAP3.c_str()); std::ofstream eofsMBH3(evnumMBH3.c_str()); std::ofstream eofsHOG8(evnumHOG8.c_str()); std::ofstream eofsHOGMMAP8(evnumHOGMMAP8.c_str()); std::ofstream eofsMBH8(evnumMBH8.c_str()); eofsHOG3.precision(12); eofsHOGMMAP3.precision(12); eofsMBH3.precision(12); eofsHOG8.precision(12); eofsHOGMMAP8.precision(12); eofsMBH8.precision(12); std::vector<double>::iterator eitrHOG3 = featuresHOG3.begin(), stopHOG3 = featuresHOG3.end(); std::vector<double>::iterator eitrHOGMMAP3 = featuresHOGMMAP3.begin(), stopHOGMMAP3 = featuresHOGMMAP3.end(); std::vector<double>::iterator eitrMBH3 = featuresMBH3.begin(), stopMBH3 = featuresMBH3.end(); std::vector<double>::iterator eitrHOG8 = featuresHOG8.begin(), stopHOG8 = featuresHOG8.end(); std::vector<double>::iterator eitrHOGMMAP8 = featuresHOGMAP8.begin(), stopHOGMMAP8 = featuresHOGMAP8.end(); std::vector<double>::iterator eitrMBH8 = featuresMBH8.begin(), stopMBH8 = featuresMBH8.end(); while(eitrHOG3 != stopHOG3) eofsHOG3 << *eitrHOG3++ << " "; eofsHOG3.close(); while(eitrHOGMMAP3 != stopHOGMMAP3) eofsHOGMMAP3 << *eitrHOGMMAP3++ << " "; eofsHOGMMAP3.close(); while(eitrMBH3 != stopMBH3) eofsMBH3 << *eitrMBH3++ << " "; eofsMBH3.close(); while(eitrHOG8 != stopHOG8) eofsHOG8 << *eitrHOG8++ << " "; eofsHOG3.close(); while(eitrHOGMMAP8 != stopHOGMMAP8) eofsHOGMMAP8 << *eitrHOGMMAP8++ << " "; eofsHOGMMAP8.close(); while(eitrMBH8 != stopMBH8) eofsMBH8 << *eitrMBH8++ << " "; eofsMBH8.close(); // if probability small, no matches found, so add a new class by event number //if (prob < 0.1) { //if ((*event)->getEventNum() < maxClasses) { // bn.learn(features, (*event)->getEventNum()); // } } } }
void OTBSpectralAngleDistanceImageFilterProcessor::updateBands(int bands) { //Display the corresponding number of properties //depending on input image's spectral bands. switch (bands) { case 1: { refPixel0_.setVisible(true); refPixel1_.setVisible(false); refPixel2_.setVisible(false); refPixel3_.setVisible(false); refPixel4_.setVisible(false); refPixel5_.setVisible(false); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 2: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(false); refPixel3_.setVisible(false); refPixel4_.setVisible(false); refPixel5_.setVisible(false); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 3: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(false); refPixel4_.setVisible(false); refPixel5_.setVisible(false); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 4: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(true); refPixel4_.setVisible(false); refPixel5_.setVisible(false); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 5: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(true); refPixel4_.setVisible(true); refPixel5_.setVisible(false); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 6: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(true); refPixel4_.setVisible(true); refPixel5_.setVisible(true); refPixel6_.setVisible(false); refPixel7_.setVisible(false); break; } case 7: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(true); refPixel4_.setVisible(true); refPixel5_.setVisible(true); refPixel6_.setVisible(true); refPixel7_.setVisible(false); break; } case 8: { refPixel0_.setVisible(true); refPixel1_.setVisible(true); refPixel2_.setVisible(true); refPixel3_.setVisible(true); refPixel4_.setVisible(true); refPixel5_.setVisible(true); refPixel6_.setVisible(true); refPixel7_.setVisible(true); break; } } LINFO("Pixel size has been updated!"); }
int32_t s5k3h1gx_msm_actuator_move_focus( struct msm_actuator_ctrl_t *a_ctrl, int dir, int32_t num_steps) { int32_t rc = 0; int8_t sign_dir = 0; int16_t dest_step_pos = 0; int16_t curr_lens_pos; int16_t next_lens_pos; int16_t dest_lens_pos; int16_t target_dist; uint16_t sw_damping_time_wait; int32_t sw_damping_step_dynamic; int16_t time_wait_per_step; uint32_t time_wait; uint16_t small_step; uint8_t mode_mask; LINFO("%s called, dir %d, num_steps %d\n", __func__, dir, num_steps); if (dir == MOVE_NEAR) sign_dir = 1; else if (dir == MOVE_FAR) sign_dir = -1; else { pr_err("Illegal focus direction\n"); rc = -EINVAL; return rc; } dest_step_pos = a_ctrl->curr_step_pos + (sign_dir * num_steps); if (dest_step_pos < 0) dest_step_pos = 0; else if (dest_step_pos > a_ctrl->set_info.total_steps) dest_step_pos = a_ctrl->set_info.total_steps; if (dest_step_pos == a_ctrl->curr_step_pos) return rc; curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos]; dest_lens_pos = a_ctrl->step_position_table[dest_step_pos]; target_dist = sign_dir * (dest_lens_pos - curr_lens_pos); if (sign_dir < 0 && target_dist >= a_ctrl->step_position_table[5]) { sw_damping_step_dynamic = 10; sw_damping_time_wait = 1; time_wait = 1000000 / 30 - 10000; } else { if (num_steps > 2) { sw_damping_step_dynamic = 4; sw_damping_time_wait = 4; } else { sw_damping_step_dynamic = 2; sw_damping_time_wait = 2; } time_wait = 1000000 / 30; } time_wait_per_step = (int16_t)(time_wait / target_dist); if (time_wait_per_step >= 800) mode_mask = 0x5; else if (time_wait_per_step >= 400) mode_mask = 0x4; else if (time_wait_per_step >= 200) mode_mask = 0x3; else if (time_wait_per_step >= 100) mode_mask = 0x2; else if (time_wait_per_step >= 50) mode_mask = 0x1; else { if (time_wait >= 17600) mode_mask = 0x0D; else if (time_wait >= 8800) mode_mask = 0x0C; else if (time_wait >= 4400) mode_mask = 0x0B; else if (time_wait >= 2200) mode_mask = 0x0A; else mode_mask = 0x09; } small_step = (uint16_t)(target_dist / sw_damping_step_dynamic); if ((target_dist % sw_damping_step_dynamic) != 0) { small_step++; } for (next_lens_pos = curr_lens_pos + (sign_dir * small_step); (sign_dir * next_lens_pos) <= (sign_dir * dest_lens_pos); next_lens_pos += sign_dir * small_step) { rc = a_ctrl->func_tbl.actuator_i2c_write(a_ctrl, next_lens_pos, &mode_mask); if (rc < 0) { pr_err("%s: focus move failed\n", __func__); return rc; } mdelay(sw_damping_time_wait); curr_lens_pos = next_lens_pos; } if(curr_lens_pos != dest_lens_pos) { rc = a_ctrl->func_tbl.actuator_i2c_write(a_ctrl, dest_lens_pos, &mode_mask); if (rc < 0) { pr_err("%s: focus move failed\n", __func__); return rc; } mdelay(sw_damping_time_wait); } a_ctrl->curr_step_pos = dest_step_pos; return rc; }