Beispiel #1
0
    Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) {
      // Find the inner blocks in this loop. Proceed backwards from the entries until
      // you reach a seen block, collecting as you go.
      BlockSet InnerBlocks;
      BlockSet Queue = Entries;
      while (Queue.size() > 0) {
        Block *Curr = *(Queue.begin());
        Queue.erase(Queue.begin());
        if (InnerBlocks.find(Curr) == InnerBlocks.end()) {
          // This element is new, mark it as inner and remove from outer
          InnerBlocks.insert(Curr);
          Blocks.erase(Curr);
          // Add the elements prior to it
          for (BlockBranchMap::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) {
            Queue.insert(iter->first);
          }
        }
      }
      assert(InnerBlocks.size() > 0);

      for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) {
        Block *Curr = *iter;
        for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) {
          Block *Possible = iter->first;
          if (InnerBlocks.find(Possible) == InnerBlocks.end() &&
              NextEntries.find(Possible) == NextEntries.find(Possible)) {
            NextEntries.insert(Possible);
          }
        }
      }

      PrintDebug("creating loop block:\n");
      DebugDump(InnerBlocks, "  inner blocks:");
      DebugDump(Entries, "  inner entries:");
      DebugDump(Blocks, "  outer blocks:");
      DebugDump(NextEntries, "  outer entries:");

      // TODO: Optionally hoist additional blocks into the loop

      LoopShape *Loop = new LoopShape();
      Notice(Loop);

      // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations)
      // A. Branches to the loop entries become a continue to this shape
      for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) {
        Solipsize(*iter, Branch::Continue, Loop, InnerBlocks);
      }
      // B. Branches to outside the loop (a next entry) become breaks on this shape
      for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) {
        Solipsize(*iter, Branch::Break, Loop, InnerBlocks);
      }
      // Finish up
      Shape *Inner = Process(InnerBlocks, Entries, NULL);
      Loop->Inner = Inner;
      return Loop;
    }
/**
 * Generate side walks
 */
void VBOPmBlocks::generateSideWalk(VBORenderManager* renderManager, BlockSet& blocks) {
	for (int i = 0; i < blocks.size(); ++i) {
		if (!blocks[i].valid) continue;

		BBox3D bbox;
		blocks[i].sidewalkContour.getBBox3D(bbox.minPt, bbox.maxPt);

		// If the block is too narrow, make it invalid.
		if (blocks[i].sidewalkContour.isTooNarrow(8.0f, 18.0f) || blocks[i].sidewalkContour.isTooNarrow(1.0f, 3.0f)) {
			blocks[i].valid = false;
			continue;
		}

		// If the block is close or under waterlevel, or on a steep terain, make it invalid.
		float min_z = std::numeric_limits<float>::max();
		float max_z = 0.0f;
		for (int pi = 0; pi < blocks[i].sidewalkContour.contour.size(); ++pi) {
			int next_pi = (pi + 1) % blocks[i].sidewalkContour.contour.size();
			for (int k = 0; k <= 20; ++k) {
				QVector3D pt = blocks[i].sidewalkContour.contour[pi] * (float)(20 - k) * 0.05 + blocks[i].sidewalkContour.contour[next_pi] * (float)k * 0.05;
				float z = renderManager->getTerrainHeight(pt.x(), pt.y());
				min_z = std::min(min_z, z);
				max_z = std::max(max_z, z);
			}
			//float z = renderManager->getTerrainHeight(blocks[i].sidewalkContour.contour[pi].x(), blocks[i].sidewalkContour.contour[pi].y());
			//min_z = std::min(min_z, z);
			//max_z = std::max(max_z, z);
		}
		if (min_z < 40.0f) {
			blocks[i].valid = false;
			continue;
		} else if (max_z - min_z > 20.0f) {
			blocks[i].isPark = true;
			continue;
		}
	}

	// Compute the block contour (the outer part becomes sidewalks)
	for (int i = 0; i < blocks.size(); ++i) {
		if (!blocks[i].valid) continue;
		//if (blocks[i].isPark) continue;

		Loop3D blockContourInset;
		float sidewalk_width = G::getFloat("sidewalk_width");
		blocks[i].sidewalkContour.computeInset(sidewalk_width, blockContourInset, false);
		blocks[i].blockContour.contour = blockContourInset;
		//blocks[i].blockContour.getBBox3D(blocks[i].bbox.minPt, blocks[i].bbox.maxPt);
	}
}
Beispiel #3
0
Layer Layer::getSubgraphConnectedToTheseOutputs(
	const NeuronSet& outputs) const
{
	typedef std::set<size_t> BlockSet;
	
	BlockSet blocks;

	// TODO: eliminate the reundant inserts
	for(auto& output : outputs)
	{
		size_t block = (output / getOutputBlockingFactor()) % this->blocks();

		blocks.insert(block);
	}
	
	Layer layer(blocks.size(), getInputBlockingFactor(),
		getOutputBlockingFactor(), blockStep());
	
	for(auto& block : blocks)
	{
		size_t blockIndex = block - *blocks.begin();
		
		layer[blockIndex] = (*this)[block];
		layer.at_bias(blockIndex) = at_bias(block);
	}
	
	return layer;
}
TextAutosizer::Supercluster* TextAutosizer::getSupercluster(const LayoutBlock* block)
{
    Fingerprint fingerprint = m_fingerprintMapper.get(block);
    if (!fingerprint)
        return nullptr;

    BlockSet* roots = m_fingerprintMapper.getTentativeClusterRoots(fingerprint);
    if (!roots || roots->size() < 2 || !roots->contains(block))
        return nullptr;

    SuperclusterMap::AddResult addResult = m_superclusters.add(fingerprint, PassOwnPtr<Supercluster>());
    if (!addResult.isNewEntry)
        return addResult.storedValue->value.get();

    Supercluster* supercluster = new Supercluster(roots);
    addResult.storedValue->value = adoptPtr(supercluster);
    return supercluster;
}
Beispiel #5
0
void StackAllocationPromoter::promoteAllocationToPhi() {
  DEBUG(llvm::dbgs() << "*** Placing Phis for : " << *ASI);

  // A list of blocks that will require new Phi values.
  BlockSet PhiBlocks;

  // The "piggy-bank" data-structure that we use for processing the dom-tree
  // bottom-up.
  NodePriorityQueue PQ;

  // Collect all of the stores into the AllocStack. We know that at this point
  // we have at most one store per block.
  for (auto UI = ASI->use_begin(), E = ASI->use_end(); UI != E; ++UI) {
    SILInstruction *II = UI->getUser();
    // We need to place Phis for this block.
    if (isa<StoreInst>(II)) {
      // If the block is in the dom tree (dominated by the entry block).
      if (DomTreeNode *Node = DT->getNode(II->getParent()))
        PQ.push(std::make_pair(Node, DomTreeLevels[Node]));
    }
  }

  DEBUG(llvm::dbgs() << "*** Found: " << PQ.size() << " Defs\n");

  // A list of nodes for which we already calculated the dominator frontier.
  llvm::SmallPtrSet<DomTreeNode *, 32> Visited;

  SmallVector<DomTreeNode *, 32> Worklist;

  // Scan all of the definitions in the function bottom-up using the priority
  // queue.
  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNode *Root = RootPair.first;
    unsigned RootLevel = RootPair.second;

    // Walk all dom tree children of Root, inspecting their successors. Only
    // J-edges, whose target level is at most Root's level are added to the
    // dominance frontier.
    Worklist.clear();
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNode *Node = Worklist.pop_back_val();
      SILBasicBlock *BB = Node->getBlock();

      // For all successors of the node:
      for (auto &Succ : BB->getSuccessors()) {
        DomTreeNode *SuccNode = DT->getNode(Succ);

        // Skip D-edges (edges that are dom-tree edges).
        if (SuccNode->getIDom() == Node)
          continue;

        // Ignore J-edges that point to nodes that are not smaller or equal
        // to the root level.
        unsigned SuccLevel = DomTreeLevels[SuccNode];
        if (SuccLevel > RootLevel)
          continue;

        // Ignore visited nodes.
        if (!Visited.insert(SuccNode).second)
          continue;

        // If the new PHInode is not dominated by the allocation then it's dead.
        if (!DT->dominates(ASI->getParent(), SuccNode->getBlock()))
            continue;

        // If the new PHInode is properly dominated by the deallocation then it
        // is obviously a dead PHInode, so we don't need to insert it.
        if (DSI && DT->properlyDominates(DSI->getParent(),
                                         SuccNode->getBlock()))
          continue;

        // The successor node is a new PHINode. If this is a new PHI node
        // then it may require additional definitions, so add it to the PQ.
        if (PhiBlocks.insert(Succ).second)
          PQ.push(std::make_pair(SuccNode, SuccLevel));
      }

      // Add the children in the dom-tree to the worklist.
      for (auto CI = Node->begin(), CE = Node->end(); CI != CE; ++CI)
        if (!Visited.count(*CI))
          Worklist.push_back(*CI);
    }
  }

  DEBUG(llvm::dbgs() << "*** Found: " << PhiBlocks.size() << " new PHIs\n");
  NumPhiPlaced += PhiBlocks.size();

  // At this point we calculated the locations of all of the new Phi values.
  // Next, add the Phi values and promote all of the loads and stores into the
  // new locations.

  // Replace the dummy values with new block arguments.
  addBlockArguments(PhiBlocks);

  // Hook up the Phi nodes, loads, and debug_value_addr with incoming values.
  fixBranchesAndUses(PhiBlocks);

  DEBUG(llvm::dbgs() << "*** Finished placing Phis ***\n");
}
Beispiel #6
0
    // Main function.
    // Process a set of blocks with specified entries, returns a shape
    // The Make* functions receive a NextEntries. If they fill it with data, those are the entries for the
    //   ->Next block on them, and the blocks are what remains in Blocks (which Make* modify). In this way
    //   we avoid recursing on Next (imagine a long chain of Simples, if we recursed we could blow the stack).
    Shape *Process(BlockSet &Blocks, BlockSet& InitialEntries, Shape *Prev) {
      PrintDebug("Process() called\n");
      BlockSet *Entries = &InitialEntries;
      BlockSet TempEntries[2];
      int CurrTempIndex = 0;
      BlockSet *NextEntries;
      Shape *Ret = NULL;
      #define Make(call) \
        Shape *Temp = call; \
        if (Prev) Prev->Next = Temp; \
        if (!Ret) Ret = Temp; \
        if (!NextEntries->size()) { PrintDebug("Process() returning\n"); return Ret; } \
        Prev = Temp; \
        Entries = NextEntries; \
        continue;
      while (1) {
        PrintDebug("Process() running\n");
        DebugDump(Blocks, "  blocks : ");
        DebugDump(*Entries, "  entries: ");

        CurrTempIndex = 1-CurrTempIndex;
        NextEntries = &TempEntries[CurrTempIndex];
        NextEntries->clear();

        if (Entries->size() == 0) return Ret;
        if (Entries->size() == 1) {
          Block *Curr = *(Entries->begin());
          if (Curr->BranchesIn.size() == 0) {
            // One entry, no looping ==> Simple
            Make(MakeSimple(Blocks, Curr, *NextEntries));
          }
          // One entry, looping ==> Loop
          Make(MakeLoop(Blocks, *Entries, *NextEntries));
        }
        // More than one entry, try to eliminate through a Multiple groups of
        // independent blocks from an entry/ies. It is important to remove through
        // multiples as opposed to looping since the former is more performant.
        BlockBlockSetMap IndependentGroups;
        FindIndependentGroups(Blocks, *Entries, IndependentGroups);

        PrintDebug("Independent groups: %d\n", IndependentGroups.size());

        if (IndependentGroups.size() > 0) {
          // We can handle a group in a multiple if its entry cannot be reached by another group.
          // Note that it might be reachable by itself - a loop. But that is fine, we will create
          // a loop inside the multiple block (which is the performant order to do it).
          for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end();) {
            Block *Entry = iter->first;
            BlockSet &Group = iter->second;
            BlockBlockSetMap::iterator curr = iter++; // iterate carefully, we may delete
            for (BlockBranchMap::iterator iterBranch = Entry->BranchesIn.begin(); iterBranch != Entry->BranchesIn.end(); iterBranch++) {
              Block *Origin = iterBranch->first;
              if (Group.find(Origin) == Group.end()) {
                // Reached from outside the group, so we cannot handle this
                PrintDebug("Cannot handle group with entry %d because of incoming branch from %d\n", Entry->Id, Origin->Id);
                IndependentGroups.erase(curr);
                break;
              }
            }
          }

          // As an optimization, if we have 2 independent groups, and one is a small dead end, we can handle only that dead end.
          // The other then becomes a Next - without nesting in the code and recursion in the analysis.
          // TODO: if the larger is the only dead end, handle that too
          // TODO: handle >2 groups
          // TODO: handle not just dead ends, but also that do not branch to the NextEntries. However, must be careful
          //       there since we create a Next, and that Next can prevent eliminating a break (since we no longer
          //       naturally reach the same place), which may necessitate a one-time loop, which makes the unnesting
          //       pointless.
          if (IndependentGroups.size() == 2) {
            // Find the smaller one
            BlockBlockSetMap::iterator iter = IndependentGroups.begin();
            Block *SmallEntry = iter->first;
            int SmallSize = iter->second.size();
            iter++;
            Block *LargeEntry = iter->first;
            int LargeSize = iter->second.size();
            if (SmallSize != LargeSize) { // ignore the case where they are identical - keep things symmetrical there
              if (SmallSize > LargeSize) {
                Block *Temp = SmallEntry;
                SmallEntry = LargeEntry;
                LargeEntry = Temp; // Note: we did not flip the Sizes too, they are now invalid. TODO: use the smaller size as a limit?
              }
              // Check if dead end
              bool DeadEnd = true;
              BlockSet &SmallGroup = IndependentGroups[SmallEntry];
              for (BlockSet::iterator iter = SmallGroup.begin(); iter != SmallGroup.end(); iter++) {
                Block *Curr = *iter;
                for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) {
                  Block *Target = iter->first;
                  if (SmallGroup.find(Target) == SmallGroup.end()) {
                    DeadEnd = false;
                    break;
                  }
                }
                if (!DeadEnd) break;
              }
              if (DeadEnd) {
                PrintDebug("Removing nesting by not handling large group because small group is dead end\n");
                IndependentGroups.erase(LargeEntry);
              }
            }
          }

          PrintDebug("Handleable independent groups: %d\n", IndependentGroups.size());

          if (IndependentGroups.size() > 0) {
            // Some groups removable ==> Multiple
            Make(MakeMultiple(Blocks, *Entries, IndependentGroups, Prev, *NextEntries));
          }
        }
        // No independent groups, must be loopable ==> Loop
        Make(MakeLoop(Blocks, *Entries, *NextEntries));
      }
    }
Beispiel #7
0
    Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) {
      // Find the inner blocks in this loop. Proceed backwards from the entries until
      // you reach a seen block, collecting as you go.
      BlockSet InnerBlocks;
      BlockSet Queue = Entries;
      while (Queue.size() > 0) {
        Block *Curr = *(Queue.begin());
        Queue.erase(Queue.begin());
        if (!contains(InnerBlocks, Curr)) {
          // This element is new, mark it as inner and remove from outer
          InnerBlocks.insert(Curr);
          Blocks.erase(Curr);
          // Add the elements prior to it
          for (BlockSet::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) {
            Queue.insert(*iter);
          }
#if 0
          // Add elements it leads to, if they are dead ends. There is no reason not to hoist dead ends
          // into loops, as it can avoid multiple entries after the loop
          for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) {
            Block *Target = iter->first;
            if (Target->BranchesIn.size() <= 1 && Target->BranchesOut.size() == 0) {
              Queue.insert(Target);
            }
          }
#endif
        }
      }
      assert(InnerBlocks.size() > 0);

      for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) {
        Block *Curr = *iter;
        for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) {
          Block *Possible = iter->first;
          if (!contains(InnerBlocks, Possible)) {
            NextEntries.insert(Possible);
          }
        }
      }

#if 0
      // We can avoid multiple next entries by hoisting them into the loop.
      if (NextEntries.size() > 1) {
        BlockBlockSetMap IndependentGroups;
        FindIndependentGroups(NextEntries, IndependentGroups, &InnerBlocks);

        while (IndependentGroups.size() > 0 && NextEntries.size() > 1) {
          Block *Min = NULL;
          int MinSize = 0;
          for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) {
            Block *Entry = iter->first;
            BlockSet &Blocks = iter->second;
            if (!Min || Blocks.size() < MinSize) { // TODO: code size, not # of blocks
              Min = Entry;
              MinSize = Blocks.size();
            }
          }
          // check how many new entries this would cause
          BlockSet &Hoisted = IndependentGroups[Min];
          bool abort = false;
          for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end() && !abort; iter++) {
            Block *Curr = *iter;
            for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) {
              Block *Target = iter->first;
              if (Hoisted.find(Target) == Hoisted.end() && NextEntries.find(Target) == NextEntries.end()) {
                // abort this hoisting
                abort = true;
                break;
              }
            }
          }
          if (abort) {
            IndependentGroups.erase(Min);
            continue;
          }
          // hoist this entry
          PrintDebug("hoisting %d into loop\n", Min->Id);
          NextEntries.erase(Min);
          for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end(); iter++) {
            Block *Curr = *iter;
            InnerBlocks.insert(Curr);
            Blocks.erase(Curr);
          }
          IndependentGroups.erase(Min);
        }
      }
#endif

      PrintDebug("creating loop block:\n");
      DebugDump(InnerBlocks, "  inner blocks:");
      DebugDump(Entries, "  inner entries:");
      DebugDump(Blocks, "  outer blocks:");
      DebugDump(NextEntries, "  outer entries:");

      LoopShape *Loop = new LoopShape();
      Notice(Loop);

      // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations)
      // A. Branches to the loop entries become a continue to this shape
      for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) {
        Solipsize(*iter, Branch::Continue, Loop, InnerBlocks);
      }
      // B. Branches to outside the loop (a next entry) become breaks on this shape
      for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) {
        Solipsize(*iter, Branch::Break, Loop, InnerBlocks);
      }
      // Finish up
      Shape *Inner = Process(InnerBlocks, Entries, NULL);
      Loop->Inner = Inner;
      return Loop;
    }
/**
 * Generate blocks from the road network
 */
bool VBOPmBlocks::generateBlocks(VBORenderManager* renderManager, RoadGraph &roadGraph, BlockSet &blocks) {
	GraphUtil::normalizeLoop(roadGraph);

	roadGraphPtr = &roadGraph;
	blocksPtr = &blocks.blocks;
	blocksPtr->clear();

	bool isPlanar = false;
	bool converges = true;

	//GraphUtil::planarify(roadGraph);
	//GraphUtil::clean(roadGraph);
	
	//Make sure graph is planar
	typedef std::vector< RoadEdgeDesc > tEdgeDescriptorVector;
	std::vector<tEdgeDescriptorVector> embedding(boost::num_vertices(roadGraph.graph));

	int cont=0;

	removeIntersectingEdges(roadGraph);
	/*
	// Test for planarity
	while (cont<2) {
		if (boost::boyer_myrvold_planarity_test(boost::boyer_myrvold_params::graph =roadGraph.graph,
			boost::boyer_myrvold_params::embedding = &embedding[0]) 
			){
				isPlanar = true;
				break;
		} else {
			std::cout << "Input graph is not planar trying removeIntersectingEdges" << std::endl;
			// No planar: Remove intersecting edges and check again
			removeIntersectingEdges(roadGraph);
			cont++;
		}
	}

	if (!isPlanar) {
		std::cout << "ERROR: Graph could not be planarized: (generateBlocks)\n";
		return false;
	}
	*/
	
	// build embedding manually
	//embedding.clear();
	//embedding.resize(boost::num_vertices(roadGraph.graph));
	buildEmbedding(roadGraph, embedding);
	printf("embedding was built.\n");

	//Create edge index property map?	
	typedef std::map<RoadEdgeDesc, size_t> EdgeIndexMap;
	EdgeIndexMap mapEdgeIdx;
	boost::associative_property_map<EdgeIndexMap> pmEdgeIndex(mapEdgeIdx);		
	RoadEdgeIter ei, ei_end;	
	int edge_count = 0;
	for (boost::tie(ei, ei_end) = boost::edges(roadGraph.graph); ei != ei_end; ++ei) {
		mapEdgeIdx.insert(std::make_pair(*ei, edge_count++));	
	}

	//Extract blocks from road graph using boost graph planar_face_traversal
	vertex_output_visitor v_vis;	
	boost::planar_face_traversal(roadGraph.graph, &embedding[0], v_vis, pmEdgeIndex);

	printf("roads graph was traversed. %d blocks were extracted.\n", blocks.size());

	//Misc postprocessing operations on blocks =======
	int maxVtxCount = 0;
	int maxVtxCountIdx = -1;
	std::vector<float> blockAreas;

	Loop3D sidewalkContourInset;
	for (int i = 0; i < blocks.size(); ++i) {
		//Reorient faces
		if (Polygon3D::reorientFace(blocks[i].sidewalkContour.contour)) {
			std::reverse(blocks[i].sidewalkContourRoadsWidths.begin(), blocks[i].sidewalkContourRoadsWidths.end() - 1);
		}

		if( blocks[i].sidewalkContour.contour.size() != blocks[i].sidewalkContourRoadsWidths.size() ){
			std::cout << "Error: contour" << blocks[i].sidewalkContour.contour.size() << " widhts " << blocks[i].sidewalkContourRoadsWidths.size() << "\n";
			blocks[i].sidewalkContour.clear();
			blocks[i].valid = false;
			blockAreas.push_back(0.0f);
			continue;
		}

		if(blocks[i].sidewalkContour.contour.size() < 3){
			std::cout << "Error: Contour <3 " << "\n";
			blocks[i].valid = false;
			blockAreas.push_back(0.0f);
			continue;
		}

		//Compute block offset	
		float insetArea = blocks[i].sidewalkContour.computeInset(blocks[i].sidewalkContourRoadsWidths,sidewalkContourInset);
		
		blocks[i].sidewalkContour.contour = sidewalkContourInset;
		//blocks[i].sidewalkContour.getBBox3D(blocks[i].bbox.minPt, blocks[i].bbox.maxPt);
		
		blockAreas.push_back(insetArea);
	}

	// Remove the largest block
	float maxArea = -FLT_MAX;
	int maxAreaIdx = -1;
	for (int i = 0; i < blocks.size(); ++i) {
		if (blocks[i].sidewalkContour.contour.size() < 3) {
			continue;
		}
		if (blockAreas[i] > maxArea) {
			maxArea = blockAreas[i];
			maxAreaIdx = i;
		}
	}
	if (maxAreaIdx != -1) {
		blocks[maxAreaIdx].valid = false;
		//blocks.blocks.erase(blocks.blocks.begin()+maxAreaIdx);
	}

	// GEN: remove the blocks whose edges are less than 3
	// This problem is caused by the computeInset() function.
	// ToDo: fix the computeInset function.
	for (int i = 0; i < blocks.size(); ++i) {
		if (blocks[i].sidewalkContour.contour.size() < 3) {
			blocks[i].valid = false;
			//blocks.blocks.erase(blocks.blocks.begin() + i);
		}
	}

	// assign a zone to each block
	generateSideWalk(renderManager, blocks);

	return true;
}