static void linkAllBlocks() { auto e = gBlockMap.end(); auto i = gBlockMap.begin(); while(i!=e) { Block *block = (i++)->second; linkBlock(block); } }
sf::Image Cartographer::renderChunkTopDown( BlockMap & map, s32 bx, s32 by, s32 minZ, s32 maxZ) { s32 offX = bx * Block::SIZE; s32 offY = by * Block::SIZE; sf::Image img; img.create(Block::SIZE, Block::SIZE); Color color; // For each Z-wise voxel line for(u8 y = 0; y < Block::SIZE; y++) for(u8 x = 0; x < Block::SIZE; x++) { // Get color of the top voxel std::pair<s32, Voxel> topVoxel = map.getUpperVoxel(offX + x, offY + y, minZ, maxZ); color = topVoxel.second.properties().averageColor; // Apply some height gradient float k = 0.5f; if(topVoxel.first >= minZ) k = 0.1f + 0.9f * (float)(topVoxel.first - minZ) / (float)(maxZ - minZ); color.multiplyRGB(k); img.setPixel(x, y, color.toSfColor()); } return img; }
static void findBlockParent( Block *b ) { auto where = lseek64( b->chunk->getMap()->fd, b->chunk->getOffset(), SEEK_SET ); if(where!=(signed)b->chunk->getOffset()) { sysErrFatal( "failed to seek into block chain file %s", b->chunk->getMap()->name.c_str() ); } uint8_t buf[gHeaderSize]; auto nbRead = read( b->chunk->getMap()->fd, buf, gHeaderSize ); if(nbRead<(signed)gHeaderSize) { sysErrFatal( "failed to read from block chain file %s", b->chunk->getMap()->name.c_str() ); } auto i = gBlockMap.find(4 + buf); if(unlikely(gBlockMap.end()==i)) { uint8_t bHash[2*kSHA256ByteSize + 1]; toHex(bHash, b->hash); uint8_t pHash[2*kSHA256ByteSize + 1]; toHex(pHash, 4 + buf); warning( "in block %s failed to locate parent block %s", bHash, pHash ); return; } b->prev = i->second; }
static void getBlockHeader( size_t &size, Block *&prev, uint8_t *&hash, size_t &earlyMissCnt, const uint8_t *p ) { LOAD(uint32_t, magic, p); if(unlikely(gExpectedMagic!=magic)) { hash = 0; return; } LOAD(uint32_t, sz, p); size = sz; prev = 0; hash = allocHash256(); #if defined(DARKCOIN) h9(hash, p, gHeaderSize); #elif defined(PAYCON) h13(hash, p, gHeaderSize); #elif defined(MARTEXCOIN) h13(hash, p, gHeaderSize); #elif defined(CLAM) auto pBis = p; LOAD(uint32_t, nVersion, pBis); if(6<nVersion) { sha256Twice(hash, p, gHeaderSize); } else { scrypt(hash, p, gHeaderSize); } #elif defined(JUMBUCKS) scrypt(hash, p, gHeaderSize); #else sha256Twice(hash, p, gHeaderSize); #endif auto i = gBlockMap.find(p + 4); if(likely(gBlockMap.end()!=i)) { prev = i->second; } else { ++earlyMissCnt; } }
static void linkBlock( Block *block ) { if(unlikely(0==block->data)) { block->height = 0; block->prev = 0; block->next = 0; return; } int depth = 0; Block *b = block; while(b->height<0) { auto i = gBlockMap.find(4 + b->data); if(unlikely(gBlockMap.end()==i)) { uint8_t buf[2*kSHA256ByteSize + 1]; toHex(buf, 4 + b->data); warning("at depth %d in chain, failed to locate parent block %s", depth, buf); return; } Block *prev = i->second; prev->next = b; b->prev = prev; b = prev; ++depth; } uint64_t h = b->height; while(block!=b) { Block *next = b->next; b->height = h; b->next = 0; if(likely(gMaxHeight<h)) { gMaxHeight = h; gMaxBlock = b; } b = next; ++h; } }
static void initHashtables() { gTXMap.setEmptyKey(empty); gBlockMap.setEmptyKey(empty); auto e = mapVec.end(); uint64_t totalSize = 0; auto i = mapVec.begin(); while(i!=e) totalSize += (i++)->size; double txPerBytes = (3976774.0 / 1713189944.0); size_t nbTxEstimate = (1.5 * txPerBytes * totalSize); gTXMap.resize(nbTxEstimate); double blocksPerBytes = (184284.0 / 1713189944.0); size_t nbBlockEstimate = (1.5 * blocksPerBytes * totalSize); gBlockMap.resize(nbBlockEstimate); }
static void initHashtables() { info("initializing hash tables"); gTXOMap.setEmptyKey(empty); gBlockMap.setEmptyKey(empty); auto kAvgBytesPerTX = 542.0; auto nbTxEstimate = (size_t)(1.1 * (gChainSize / kAvgBytesPerTX)); if(gNeedUpstream) { gTXOMap.resize(nbTxEstimate); } auto kAvgBytesPerBlock = 140000; auto nbBlockEstimate = (size_t)(1.1 * (gChainSize / kAvgBytesPerBlock)); gBlockMap.resize(nbBlockEstimate); info("estimated number of blocks = %.2fK", 1e-3*nbBlockEstimate); info("estimated number of transactions = %.2fM", 1e-6*nbTxEstimate); info("done initializing hash tables - mem = %.3f Gigs", getMem()); }
static void initHashtables() { info("initializing hash tables"); gTXOMap.setEmptyKey(empty); gBlockMap.setEmptyKey(empty); gChainSize = 0; for(const auto &map : mapVec) { gChainSize += map.size; } auto txPerBytes = (52149122.0 / 26645195995.0); auto nbTxEstimate = (size_t)(1.1 * txPerBytes * gChainSize); gTXOMap.resize(nbTxEstimate); auto blocksPerBytes = (331284.0 / 26645195995.0); auto nbBlockEstimate = (size_t)(1.1 * blocksPerBytes * gChainSize); gBlockMap.resize(nbBlockEstimate); info("estimated number of blocks = %.2fK", 1e-3*nbBlockEstimate); info("estimated number of transactions = %.2fM", 1e-6*nbTxEstimate); }
void Cartographer::renderWorldToFile( const std::string worldDir, const std::string filename) { std::cout << "Cartographer: fetching world files..." << std::endl; MapLoader loader(worldDir); std::set<Vector3i> blockPositions; loader.getAllBlockPositions(blockPositions); std::cout << "| Found " << blockPositions.size() << " block files." << std::endl; if(blockPositions.size() == 0) { std::cout << "Cartographer: the world is empty : aborted" << std::endl; return; } std::cout << "Cartographer: Computing world edges... " << std::endl; Vector3i min, max; for(auto & pos : blockPositions) { if(pos.x < min.x) min.x = pos.x; if(pos.x > max.x) max.x = pos.x; if(pos.y < min.y) min.y = pos.y; if(pos.y > max.y) max.y = pos.y; if(pos.z < min.z) min.z = pos.z; if(pos.z > max.z) max.z = pos.z; } std::cout << "| min=" << min << ", max=" << max << std::endl; const Vector3i area = max - min; if( area.x >= MAX_SIZE_BLOCKS || area.y >= MAX_SIZE_BLOCKS || area.z >= MAX_SIZE_BLOCKS) { std::cout << "ERROR: Cartographer: cannot perform a full one-picture " << "rendering, the world is too big." << std::endl; return; } std::cout << "| Done." << std::endl; std::cout << "Cartographer: rendering..." << std::endl; BlockMap map; Vector3i pos; Cartography cartography; for(pos.x = min.x; pos.x <= max.x; pos.x++) { for(pos.y = min.y; pos.y <= max.y; pos.y++) { // Load a Z-wise chunk for(pos.z = min.z; pos.z <= max.z; pos.z++) { if(loader.isBlockOnHardDrive(pos)) { Block * b = loader.loadBlock(pos); if(b != 0) map.setBlock(b); } } // Render the chunk sf::Image pic = renderChunkTopDown( map, pos.x, pos.y, Block::SIZE * min.z, Block::SIZE * (max.z+1)); // Add it to the cartography cartography.setPictureFromImage(Vector2i(pos.x, pos.y), pic); // Clear map map.clear(); } // Progress int p = 100.f * (float)(pos.x - min.x) / (float)(max.x - min.x); std::cout << "| Progress : " << p << "%" << std::endl; } std::cout << "| Done." << std::endl; std::cout << "Cartographer: Saving cartography..." << std::endl; cartography.saveAsBigImage(filename); std::cout << "Cartographer: Finished." << std::endl; }
IloInt getBlock(IloNumVar x) const { BlockMap::const_iterator const it = blockMap.find(x); return (it == blockMap.end()) ? -1 : it->second; }
Example(IloEnv env) : nblocks(0), model(env), vars(env), ranges(env) { // Model data. // fixed[] is the fixed cost for opening a facility, // cost[i,j] is the cost for serving customer i from facility j. static double const fixed[] = { 2.0, 3.0, 3.0 }; static double const cost[] = { 2.0, 3.0, 4.0, 5.0, 7.0, 4.0, 3.0, 1.0, 2.0, 6.0, 5.0, 4.0, 2.0, 1.0, 3.0 }; #define NFACTORY ((CPXDIM)(sizeof(fixed) / sizeof(fixed[0]))) #define NCUSTOMER ((CPXDIM)((sizeof(cost) / sizeof(cost[0])) / NFACTORY)) nblocks = NCUSTOMER; IloExpr obj(env); // Create integer y variables. IloNumVarArray y(env); for (IloInt f = 0; f < NFACTORY; ++f) { std::stringstream s; s << "y" << f; IloIntVar v(env, 0, 1, s.str().c_str()); obj += fixed[f] * v; objMap[v] = fixed[f]; y.add(v); blockMap.insert(BlockMap::value_type(v, -1)); intersectMap.insert(IntersectMap::value_type(v, RowSet())); } // Create continuous x variables. IloNumVarArray x(env); for (IloInt f = 0; f < NFACTORY; ++f) { for (IloInt c = 0; c < NCUSTOMER; ++c) { std::stringstream s; s << "x" << f << "#" << c; IloNumVar v(env, 0.0, IloInfinity, s.str().c_str()); obj += v * cost[f * NCUSTOMER + c]; objMap[v] = cost[f * NCUSTOMER + c]; x.add(v); blockMap.insert(BlockMap::value_type(v, c)); intersectMap.insert(IntersectMap::value_type(v, RowSet())); } } vars.add(y); vars.add(x); model.add(vars); // Add objective function. model.add(IloMinimize(env, obj, "obj")); objSense = IloObjective::Minimize; obj.end(); // Satisfy each customer's demand. for (IloInt c = 0; c < NCUSTOMER; ++c) { std::stringstream s; s << "c1_" << c; IloRange r(env, 1.0, IloInfinity, s.str().c_str()); IloExpr lhs(env); for (IloInt f = 0; f < NFACTORY; ++f) { lhs += x[f * NCUSTOMER + c]; intersectMap[x[f * NCUSTOMER + c]].insert(r); } r.setExpr(lhs); ranges.add(r); lhs.end(); } // A factory must be open if we service from it. for (IloInt c = 0; c < NCUSTOMER; ++c) { for (IloInt f = 0; f < NFACTORY; ++f) { std::stringstream s; s << "c2_" << c << "#" << f; IloRange r(env, 0.0, IloInfinity, s.str().c_str()); intersectMap[x[f * NCUSTOMER + c]].insert(r); intersectMap[y[f]].insert(r); r.setExpr(-x[f * NCUSTOMER + c] + y[f]); ranges.add(r); } } // Capacity constraint. IloRange r(env, -IloInfinity, NFACTORY - 1, "c3"); IloExpr lhs(env); for (IloInt f = 0; f < NFACTORY; ++f) { lhs += y[f]; intersectMap[y[f]].insert(r); } r.setExpr(lhs); ranges.add(r); lhs.end(); model.add(ranges); #undef NFACTORY #undef NCUSTOMER }
bool SimplifyControlFlowGraphPass::_mergeExitBlocks(ir::IRKernel& k) { typedef std::unordered_map<ir::ControlFlowGraph::iterator, ir::ControlFlowGraph::instruction_iterator> BlockMap; report(" Merging exit blocks..."); BlockMap exitBlocks; // Find all blocks with exit instructions for(ir::ControlFlowGraph::iterator block = k.cfg()->begin(); block != k.cfg()->end(); ++block) { for(ir::ControlFlowGraph::instruction_iterator instruction = block->instructions.begin(); instruction != block->instructions.end(); ++instruction) { ir::PTXInstruction& ptx = static_cast<ir::PTXInstruction&>(**instruction); if(ptx.isExit() && ptx.opcode != ir::PTXInstruction::Trap) { // There should be an edge to the exit block assertM(block->find_out_edge(k.cfg()->get_exit_block()) != block->out_edges.end(), "No edge from " << block->label() << " to exit node."); exitBlocks.insert(std::make_pair(block, instruction)); break; } } } // If there is only one/zero blocks, then don't change anything if(exitBlocks.size() < 2) { if(exitBlocks.size() == 1) { ir::PTXInstruction& ptx = static_cast<ir::PTXInstruction&>(**exitBlocks.begin()->second); if(k.function()) { ptx.opcode = ir::PTXInstruction::Ret; } else { ptx.opcode = ir::PTXInstruction::Exit; } } return false; } // Otherwise... // 1) create a new exit block ir::ControlFlowGraph::iterator newExit = k.cfg()->insert_block( ir::BasicBlock(k.cfg()->newId())); ir::BasicBlock::EdgePointerVector deletedEdges = k.cfg()->get_exit_block()->in_edges; // 1a) Create edges targetting the new block for(ir::ControlFlowGraph::edge_pointer_iterator edge = deletedEdges.begin(); edge != deletedEdges.end(); ++edge) { k.cfg()->insert_edge(ir::Edge((*edge)->head, newExit, (*edge)->type)); k.cfg()->remove_edge(*edge); } k.cfg()->insert_edge(ir::Edge(newExit, k.cfg()->get_exit_block(), ir::Edge::FallThrough)); // 2) Delete the instructions from their blocks for(BlockMap::iterator block = exitBlocks.begin(); block != exitBlocks.end(); ++block) { report(" merging block " << block->first->label()); // 2a) Insert a branch from blocks with branch edges ir::ControlFlowGraph::edge_pointer_iterator edge = newExit->find_in_edge(block->first); if((*edge)->type == ir::Edge::Branch) { ir::PTXInstruction* newBranch = new ir::PTXInstruction( ir::PTXInstruction::Bra, ir::PTXOperand(newExit->label())); newBranch->uni = true; block->first->instructions.push_back(newBranch); } delete *block->second; block->first->instructions.erase(block->second); } // 3 Add an appropriate exit instruction to the new exit block if(k.function()) { newExit->instructions.push_back( new ir::PTXInstruction(ir::PTXInstruction::Ret)); } else { newExit->instructions.push_back( new ir::PTXInstruction(ir::PTXInstruction::Exit)); } return true; }