// Process mouse click void MyWindow::onButtonPress(XEvent& event) { int x = event.xbutton.x; int y = event.xbutton.y; mouseButton = event.xbutton.button; printf("Mouse click: x=%d, y=%d, button=%d\n", x, y, mouseButton); lastClick = invMap(I2Point(x, y)); clicked = true; redraw(); }
// create a Cartesian image suitable for texture mapping from the raw // bearing/range measurements; also return a mask of valid image regions shared_ptr<DidsonCartesian> Didson::getCartesian(int width, int widthTmp) const { // generate map for Cartesian image vector<int> map; int height; pair<vector<int>, int> tmp1 = createMapping(width, maxRange(), minRange(), consts.bearingFov * 0.5, numBearings(), numRanges()); map = tmp1.first; height = tmp1.second; // avoid having to write out the inverse mapping function by creating // a map with sufficiently high resolution as a lookup table for the inverse map // not ideal, but works... vector<int> invMap(consts.numRanges * consts.numBearings); vector<int> mapTmp; int heightTmp; pair<vector<int>, int> tmp2 = createMapping(widthTmp, maxRange(), minRange(), consts.bearingFov * 0.5, numBearings(), consts.numRanges); mapTmp = tmp2.first; heightTmp = tmp2.second; int c = 0; for (int y = 0; y < heightTmp; y++) { for (int x = 0; x < widthTmp; x++) { int idx = mapTmp[c]; if (idx != -1) { int icol = x * ((double) width / (double) widthTmp); int irow = y * ((double) height / (double) heightTmp); int i = irow * width + icol; invMap[idx] = i; } c++; } } shared_ptr<DidsonCartesian> cartesian(new DidsonCartesian(map, invMap)); cartesian->image = cv::Mat(height, width, CV_8UC1); cartesian->mask = cv::Mat(height, width, CV_8UC1); for (int i = 0; i < width * height; i++) { if (map[i] == -1) { cartesian->image.data[i] = 0; cartesian->mask.data[i] = 0; } else { cartesian->image.data[i] = _image.data[map[i]]; cartesian->mask.data[i] = 255; } } return cartesian; }
void GWindow::drawLine(const I2Point& p1, const I2Point& p2) { //... drawLine(invMap(p1), invMap(p2)); if ( abs(p1.x) < SHRT_MAX && abs(p1.y) < SHRT_MAX && abs(p2.x) < SHRT_MAX && abs(p2.y) < SHRT_MAX ) { ::XDrawLine( m_Display, m_Window, m_GC, p1.x, p1.y, p2.x, p2.y ); } else { R2Point c1, c2; if ( R2Rectangle( m_IWinRect.left(), m_IWinRect.top(), m_IWinRect.width(), m_IWinRect.height() ).clip( R2Point(p1.x, p1.y), R2Point(p1.x, p1.y), c1, c2 ) ) { ::XDrawLine( m_Display, m_Window, m_GC, (int)(c1.x + 0.5), (int)(c1.y + 0.5), (int)(c2.x + 0.5), (int)(c2.y + 0.5) ); } } moveTo(invMap(p2)); }
inline void NestedDissectionRecursion ( const Graph& graph, const vector<Int>& perm, Separator& sep, NodeInfo& node, Int off, const BisectCtrl& ctrl ) { DEBUG_CSE const Int numSources = graph.NumSources(); const Int* offsetBuf = graph.LockedOffsetBuffer(); const Int* sourceBuf = graph.LockedSourceBuffer(); const Int* targetBuf = graph.LockedTargetBuffer(); if( numSources <= ctrl.cutoff ) { // Filter out the graph of the diagonal block Int numValidEdges = 0; const Int numEdges = graph.NumEdges(); for( Int e=0; e<numEdges; ++e ) if( targetBuf[e] < numSources ) ++numValidEdges; vector<Int> subOffsets(numSources+1), subTargets(Max(numValidEdges,1)); Int sourceOff = 0; Int validCounter = 0; Int prevSource = -1; for( Int e=0; e<numEdges; ++e ) { const Int source = sourceBuf[e]; const Int target = targetBuf[e]; while( source != prevSource ) { subOffsets[sourceOff++] = validCounter; ++prevSource; } if( target < numSources ) subTargets[validCounter++] = target; } while( sourceOff <= numSources ) { subOffsets[sourceOff++] = validCounter; } // Technically, SuiteSparse expects column-major storage, but since // the matrix is structurally symmetric, it's okay to pass in the // row-major representation vector<Int> amdPerm; AMDOrder( subOffsets, subTargets, amdPerm ); // Compute the symbolic factorization of this leaf node using the // reordering just computed node.LOffsets.resize( numSources+1 ); node.LParents.resize( numSources ); vector<Int> LNnz( numSources ), Flag( numSources ), amdPermInv( numSources ); suite_sparse::ldl::Symbolic ( numSources, subOffsets.data(), subTargets.data(), node.LOffsets.data(), node.LParents.data(), LNnz.data(), Flag.data(), amdPerm.data(), amdPermInv.data() ); // Fill in this node of the local separator tree sep.off = off; sep.inds.resize( numSources ); for( Int i=0; i<numSources; ++i ) sep.inds[i] = perm[amdPerm[i]]; // TODO: Replace with better deletion mechanism SwapClear( sep.children ); // Fill in this node of the local elimination tree node.size = numSources; node.off = off; // TODO: Replace with better deletion mechanism SwapClear( node.children ); set<Int> lowerStruct; for( Int s=0; s<node.size; ++s ) { const Int edgeOff = offsetBuf[s]; const Int numConn = offsetBuf[s+1] - edgeOff; for( Int t=0; t<numConn; ++t ) { const Int target = targetBuf[edgeOff+t]; if( target >= numSources ) lowerStruct.insert( off+target ); } } CopySTL( lowerStruct, node.origLowerStruct ); } else { DEBUG_ONLY( if( !IsSymmetric(graph) ) { Print( graph, "graph" ); LogicError("Graph was not symmetric"); } ) // Partition the graph and construct the inverse map Graph leftChild, rightChild; vector<Int> map; const Int sepSize = Bisect( graph, leftChild, rightChild, map, ctrl ); vector<Int> invMap( numSources ); for( Int s=0; s<numSources; ++s ) invMap[map[s]] = s; DEBUG_ONLY( if( !IsSymmetric(leftChild) ) { Print( graph, "graph" ); Print( leftChild, "leftChild" ); LogicError("Left child was not symmetric"); } )
word ColorQuantizer::medianCut(word hist[], byte colMap[][3], int maxCubes) { byte lr, lg, lb; word i, median, color; long count; int k, level, ncubes, splitpos; void *base; size_t num, width; cube_t cube, cubeA, cubeB; //Create initial cube ncubes = 0; cube.count = 0; for (i=0, color=0; i<=HSIZE-1; i++) { if (hist[i] != 0) { histPtr[color++] = i; cube.count = cube.count + hist[i]; } } cube.lower = 0; cube.upper = color-1; cube.level = 0; shrink(&cube); cubeList[ncubes++] = cube; //main loop while (ncubes < maxCubes) { level = 255; splitpos = -1; for (k = 0; k <= ncubes-1; k++) { if ((cubeList[k].lower != cubeList[k].upper) && cubeList[k].level < level) { level = cubeList[k].level; splitpos = k; } } if (splitpos == -1) { break; } cube = cubeList[splitpos]; lr = cube.rmax - cube.rmin; lg = cube.gmax - cube.gmin; lb = cube.bmax - cube.bmin; if (lr >= lg && lr >= lb) longdim = 0; if (lg >= lr && lg >= lb) longdim = 1; if (lb >= lr && lb >= lg) longdim = 2; base = (void *)&histPtr[cube.lower]; num = (size_t)(cube.upper - cube.lower + 1); width = (size_t)sizeof(histPtr[0]); qsort(base,num,width,compare); //Find median count = 0; for (i=cube.lower; i<=cube.upper-1; i++) { if (count >= cube.count/2) break; color = histPtr[i]; count = count + hist[color]; } median = i; //Split cube at the median. cubeA = cube; cubeA.upper = median - 1; cubeA.count = count; cubeA.level = cube.level + 1; shrink(&cubeA); cubeList[splitpos] = cubeA; cubeB = cube; cubeB.lower = median; cubeB.count = cube.count - count; cubeB.level = cube.level + 1; shrink(&cubeB); cubeList[ncubes++] = cubeB; if ((ncubes % 10) == 0) { std::cerr << "."; } } invMap(hist, colMap, ncubes); return ((word)ncubes); }
inline void NaturalNestedDissectionRecursion ( Int nx, Int ny, Int nz, const Graph& graph, const vector<Int>& perm, Separator& sep, NodeInfo& node, Int off, Int cutoff ) { EL_DEBUG_CSE const Int numSources = graph.NumSources(); const Int* offsetBuf = graph.LockedOffsetBuffer(); const Int* sourceBuf = graph.LockedSourceBuffer(); const Int* targetBuf = graph.LockedTargetBuffer(); if( numSources <= cutoff ) { // Filter out the graph of the diagonal block Int numValidEdges = 0; const Int numEdges = graph.NumEdges(); for( Int e=0; e<numEdges; ++e ) if( targetBuf[e] < numSources ) ++numValidEdges; vector<Int> subOffsets(numSources+1), subTargets(Max(numValidEdges,1)); Int sourceOff = 0; Int validCounter = 0; Int prevSource = -1; for( Int e=0; e<numEdges; ++e ) { const Int source = sourceBuf[e]; const Int target = targetBuf[e]; while( source != prevSource ) { subOffsets[sourceOff++] = validCounter; ++prevSource; } if( target < numSources ) subTargets[validCounter++] = target; } while( sourceOff <= numSources) { subOffsets[sourceOff++] = validCounter; } // Technically, SuiteSparse expects column-major storage, but since // the matrix is structurally symmetric, it's okay to pass in the // row-major representation vector<Int> amdPerm; AMDOrder( subOffsets, subTargets, amdPerm ); // Compute the symbolic factorization of this leaf node using the // reordering just computed node.LOffsets.resize( numSources+1 ); node.LParents.resize( numSources ); vector<Int> LNnz( numSources ), Flag( numSources ), amdPermInv( numSources ); suite_sparse::ldl::Symbolic ( numSources, subOffsets.data(), subTargets.data(), node.LOffsets.data(), node.LParents.data(), LNnz.data(), Flag.data(), amdPerm.data(), amdPermInv.data() ); // Fill in this node of the local separator tree sep.off = off; sep.inds.resize( numSources ); for( Int i=0; i<numSources; ++i ) sep.inds[i] = perm[amdPerm[i]]; // Fill in this node of the local elimination tree node.size = numSources; node.off = off; set<Int> lowerStruct; for( Int s=0; s<node.size; ++s ) { const Int edgeOff = offsetBuf[s]; const Int numConn = offsetBuf[s+1] - edgeOff; for( Int t=0; t<numConn; ++t ) { const Int target = targetBuf[edgeOff+t]; if( target >= numSources ) lowerStruct.insert( off+target ); } } CopySTL( lowerStruct, node.origLowerStruct ); } else { // Partition the graph and construct the inverse map Int nxLeft, nyLeft, nzLeft, nxRight, nyRight, nzRight; Graph leftChild, rightChild; vector<Int> map; const Int sepSize = NaturalBisect ( nx, ny, nz, graph, nxLeft, nyLeft, nzLeft, leftChild, nxRight, nyRight, nzRight, rightChild, map ); vector<Int> invMap( numSources ); for( Int s=0; s<numSources; ++s ) invMap[map[s]] = s; // Mostly compute this node of the local separator tree // (we will finish computing the separator indices soon) sep.off = off + (numSources-sepSize); sep.inds.resize( sepSize ); for( Int s=0; s<sepSize; ++s ) { const Int mappedSource = s + (numSources-sepSize); sep.inds[s] = invMap[mappedSource]; } // Fill in this node in the local elimination tree node.size = sepSize; node.off = sep.off; set<Int> lowerStruct; for( Int s=0; s<sepSize; ++s ) { const Int source = sep.inds[s]; const Int edgeOff = offsetBuf[source]; const Int numConn = offsetBuf[source+1] - edgeOff; for( Int t=0; t<numConn; ++t ) { const Int target = targetBuf[edgeOff+t]; if( target >= numSources ) lowerStruct.insert( off+target ); } } CopySTL( lowerStruct, node.origLowerStruct ); // Finish computing the separator indices for( Int s=0; s<sepSize; ++s ) sep.inds[s] = perm[sep.inds[s]]; // Construct the inverse maps from the child indices to the original // degrees of freedom const Int leftChildSize = leftChild.NumSources(); vector<Int> leftPerm( leftChildSize ); for( Int s=0; s<leftChildSize; ++s ) leftPerm[s] = perm[invMap[s]]; const Int rightChildSize = rightChild.NumSources(); vector<Int> rightPerm( rightChildSize ); for( Int s=0; s<rightChildSize; ++s ) rightPerm[s] = perm[invMap[s+leftChildSize]]; sep.children.resize( 2 ); node.children.resize( 2 ); sep.children[0] = new Separator(&sep); sep.children[1] = new Separator(&sep); node.children[0] = new NodeInfo(&node); node.children[1] = new NodeInfo(&node); NaturalNestedDissectionRecursion ( nxLeft, nyLeft, nzLeft, leftChild, leftPerm, *sep.children[0], *node.children[0], off, cutoff ); NaturalNestedDissectionRecursion ( nxRight, nyRight, nzRight, rightChild, rightPerm, *sep.children[1], *node.children[1], off+leftChildSize, cutoff ); } }
void GWindow::moveTo(const I2Point& p) { m_ICurPos = p; m_RCurPos = invMap(m_ICurPos); }
void Map::warp(const Mat& img1, Mat& img2) const { Ptr<Map> invMap(inverseMap()); invMap->inverseWarp(img1, img2); }