void genCodeImpl(CodeBlock& mainCode, CodeBlock& stubsCode, IRUnit& unit, std::vector<TransBCMapping>* bcMap, JIT::MCGenerator* mcg, const RegAllocInfo& regs, AsmInfo* asmInfo) { LiveRegs live_regs = computeLiveRegs(unit, regs); CodegenState state(unit, regs, live_regs, asmInfo); // Returns: whether a block has already been emitted. DEBUG_ONLY auto isEmitted = [&](Block* block) { return state.addresses[block]; }; /* * Emit the given block on the supplied assembler. The `nextLinear' * is the next block that will be emitted on this assembler. If is * not the next block in control flow order, then emit a patchable jump * to the next flow block. */ auto emitBlock = [&](CodeBlock& cb, Block* block, Block* nextLinear) { assert(!isEmitted(block)); FTRACE(6, "genBlock {} on {}\n", block->id(), cb.base() == stubsCode.base() ? "astubs" : "a"); auto const aStart = cb.frontier(); auto const astubsStart = stubsCode.frontier(); mcg->backEnd().patchJumps(cb, state, block); state.addresses[block] = aStart; // If the block ends with a Jmp and the next block is going to be // its target, we don't need to actually emit it. IRInstruction* last = &block->back(); state.noTerminalJmp = last->op() == Jmp && nextLinear == last->taken(); if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); } genBlock(unit, cb, stubsCode, mcg, state, block, bcMap); auto nextFlow = block->next(); if (nextFlow && nextFlow != nextLinear) { mcg->backEnd().emitFwdJmp(cb, nextFlow, state); } if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); if (cb.base() != stubsCode.base()) { state.asmInfo->astubRanges[block] = TcaRange(astubsStart, stubsCode.frontier()); } } }; if (RuntimeOption::EvalHHIRGenerateAsserts) { mcg->backEnd().emitTraceCall(mainCode, unit.bcOff()); } auto const linfo = layoutBlocks(unit); for (auto it = linfo.blocks.begin(); it != linfo.astubsIt; ++it) { Block* nextLinear = boost::next(it) != linfo.astubsIt ? *boost::next(it) : nullptr; emitBlock(mainCode, *it, nextLinear); } for (auto it = linfo.astubsIt; it != linfo.blocks.end(); ++it) { Block* nextLinear = boost::next(it) != linfo.blocks.end() ? *boost::next(it) : nullptr; emitBlock(stubsCode, *it, nextLinear); } if (debug) { for (Block* UNUSED block : linfo.blocks) { assert(isEmitted(block)); } } }
void BackEnd::genCodeImpl(IRUnit& unit, AsmInfo* asmInfo) { ctr++; auto regs = allocateRegs(unit); assert(checkRegisters(unit, regs)); // calls checkCfg internally. Timer _t(Timer::codeGen); LiveRegs live_regs = computeLiveRegs(unit, regs); CodegenState state(unit, regs, live_regs, asmInfo); CodeBlock& mainCodeIn = mcg->code.main(); CodeBlock& coldCodeIn = mcg->code.cold(); CodeBlock* frozenCode = &mcg->code.frozen(); CodeBlock mainCode; CodeBlock coldCode; bool relocate = false; if (RuntimeOption::EvalJitRelocationSize && supportsRelocation() && coldCodeIn.canEmit(RuntimeOption::EvalJitRelocationSize * 3)) { /* * This is mainly to exercise the relocator, and ensure that its * not broken by new non-relocatable code. Later, it will be * used to do some peephole optimizations, such as reducing branch * sizes. * Allocate enough space that the relocated cold code doesn't * overlap the emitted cold code. */ static unsigned seed = 42; auto off = rand_r(&seed) & (cacheLineSize() - 1); coldCode.init(coldCodeIn.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocCold"); mainCode.init(coldCode.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocMain"); relocate = true; } else { /* * Use separate code blocks, so that attempts to use the mcg's * code blocks directly will fail (eg by overwriting the same * memory being written through these locals). */ coldCode.init(coldCodeIn.frontier(), coldCodeIn.available(), coldCodeIn.name().c_str()); mainCode.init(mainCodeIn.frontier(), mainCodeIn.available(), mainCodeIn.name().c_str()); } if (frozenCode == &coldCodeIn) { frozenCode = &coldCode; } auto frozenStart = frozenCode->frontier(); auto coldStart DEBUG_ONLY = coldCodeIn.frontier(); auto mainStart DEBUG_ONLY = mainCodeIn.frontier(); size_t hhir_count{0}; { mcg->code.lock(); mcg->cgFixups().setBlocks(&mainCode, &coldCode, frozenCode); SCOPE_EXIT { mcg->cgFixups().setBlocks(nullptr, nullptr, nullptr); mcg->code.unlock(); }; if (RuntimeOption::EvalHHIRGenerateAsserts) { emitTraceCall(mainCode, unit.bcOff()); } auto const linfo = layoutBlocks(unit); auto main_start = mainCode.frontier(); auto cold_start = coldCode.frontier(); auto frozen_start = frozenCode->frontier(); Vasm vasm(&state.meta); auto& vunit = vasm.unit(); // create the initial set of vasm numbered the same as hhir blocks. for (uint32_t i = 0, n = unit.numBlocks(); i < n; ++i) { state.labels[i] = vunit.makeBlock(AreaIndex::Main); } vunit.roots.push_back(state.labels[unit.entry()]); vasm.main(mainCode); vasm.cold(coldCode); vasm.frozen(*frozenCode); for (auto it = linfo.blocks.begin(); it != linfo.blocks.end(); ++it) { auto block = *it; auto v = block->hint() == Block::Hint::Unlikely ? vasm.cold() : block->hint() == Block::Hint::Unused ? vasm.frozen() : vasm.main(); FTRACE(6, "genBlock {} on {}\n", block->id(), area_names[(unsigned)v.area()]); auto b = state.labels[block]; vunit.blocks[b].area = v.area(); v.use(b); hhir_count += genBlock(unit, v, vasm, state, block); assert(v.closed()); assert(vasm.main().empty() || vasm.main().closed()); assert(vasm.cold().empty() || vasm.cold().closed()); assert(vasm.frozen().empty() || vasm.frozen().closed()); } printUnit("after code-gen", vasm.unit()); vasm.finish(vasm_abi); if (state.asmInfo) { auto block = unit.entry(); state.asmInfo->asmRanges[block] = {main_start, mainCode.frontier()}; if (mainCode.base() != coldCode.base() && frozenCode != &coldCode) { state.asmInfo->acoldRanges[block] = {cold_start, coldCode.frontier()}; } if (mainCode.base() != frozenCode->base()) { state.asmInfo->afrozenRanges[block] = {frozen_start, frozenCode->frontier()}; } } } auto bcMap = &mcg->cgFixups().m_bcMap; if (!bcMap->empty()) { TRACE(1, "BCMAPS before relocation\n"); for (UNUSED auto& map : *bcMap) { TRACE(1, "%s %-6d %p %p %p\n", map.md5.toString().c_str(), map.bcStart, map.aStart, map.acoldStart, map.afrozenStart); } } assert(coldCodeIn.frontier() == coldStart); assert(mainCodeIn.frontier() == mainStart); if (relocate) { if (asmInfo) { printUnit(kRelocationLevel, unit, " before relocation ", ®s, asmInfo); } auto& be = mcg->backEnd(); RelocationInfo rel; size_t asm_count{0}; asm_count += be.relocate(rel, mainCodeIn, mainCode.base(), mainCode.frontier(), mcg->cgFixups()); asm_count += be.relocate(rel, coldCodeIn, coldCode.base(), coldCode.frontier(), mcg->cgFixups()); TRACE(1, "hhir-inst-count %ld asm %ld\n", hhir_count, asm_count); if (frozenCode != &coldCode) { rel.recordRange(frozenStart, frozenCode->frontier(), frozenStart, frozenCode->frontier()); } be.adjustForRelocation(rel, mcg->cgFixups()); be.adjustForRelocation(rel, asmInfo, mcg->cgFixups()); if (asmInfo) { static int64_t mainDeltaTot = 0, coldDeltaTot = 0; int64_t mainDelta = (mainCodeIn.frontier() - mainStart) - (mainCode.frontier() - mainCode.base()); int64_t coldDelta = (coldCodeIn.frontier() - coldStart) - (coldCode.frontier() - coldCode.base()); mainDeltaTot += mainDelta; HPHP::Trace::traceRelease("main delta after relocation: %" PRId64 " (%" PRId64 ")\n", mainDelta, mainDeltaTot); coldDeltaTot += coldDelta; HPHP::Trace::traceRelease("cold delta after relocation: %" PRId64 " (%" PRId64 ")\n", coldDelta, coldDeltaTot); } #ifndef NDEBUG auto& ip = mcg->cgFixups().m_inProgressTailJumps; for (size_t i = 0; i < ip.size(); ++i) { const auto& ib = ip[i]; assert(!mainCode.contains(ib.toSmash())); assert(!coldCode.contains(ib.toSmash())); } memset(mainCode.base(), 0xcc, mainCode.frontier() - mainCode.base()); memset(coldCode.base(), 0xcc, coldCode.frontier() - coldCode.base()); #endif } else { coldCodeIn.skip(coldCode.frontier() - coldCodeIn.frontier()); mainCodeIn.skip(mainCode.frontier() - mainCodeIn.frontier()); } if (asmInfo) { printUnit(kCodeGenLevel, unit, " after code gen ", ®s, asmInfo); } }
static void genCodeImpl(IRUnit& unit, AsmInfo* asmInfo) { auto regs = allocateRegs(unit); assert(checkRegisters(unit, regs)); // calls checkCfg internally. Timer _t(Timer::codeGen); LiveRegs live_regs = computeLiveRegs(unit, regs); CodegenState state(unit, regs, live_regs, asmInfo); // Returns: whether a block has already been emitted. DEBUG_ONLY auto isEmitted = [&](Block* block) { return state.addresses[block]; }; CodeBlock& mainCodeIn = mcg->code.main(); CodeBlock& coldCodeIn = mcg->code.cold(); CodeBlock* frozenCode = &mcg->code.frozen(); CodeBlock mainCode; CodeBlock coldCode; bool relocate = false; if (RuntimeOption::EvalJitRelocationSize && mcg->backEnd().supportsRelocation() && coldCodeIn.canEmit(RuntimeOption::EvalJitRelocationSize * 3)) { /* * This is mainly to exercise the relocator, and ensure that its * not broken by new non-relocatable code. Later, it will be * used to do some peephole optimizations, such as reducing branch * sizes. * Allocate enough space that the relocated cold code doesn't * overlap the emitted cold code. */ static unsigned seed = 42; auto off = rand_r(&seed) & (mcg->backEnd().cacheLineSize() - 1); coldCode.init(coldCodeIn.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocCold"); mainCode.init(coldCode.frontier() + RuntimeOption::EvalJitRelocationSize + off, RuntimeOption::EvalJitRelocationSize - off, "cgRelocMain"); relocate = true; } else { /* * Use separate code blocks, so that attempts to use the mcg's * code blocks directly will fail (eg by overwriting the same * memory being written through these locals). */ coldCode.init(coldCodeIn.frontier(), coldCodeIn.available(), coldCodeIn.name().c_str()); mainCode.init(mainCodeIn.frontier(), mainCodeIn.available(), mainCodeIn.name().c_str()); } if (frozenCode == &coldCodeIn) { frozenCode = &coldCode; } auto frozenStart = frozenCode->frontier(); auto coldStart DEBUG_ONLY = coldCodeIn.frontier(); auto mainStart DEBUG_ONLY = mainCodeIn.frontier(); auto bcMap = &mcg->cgFixups().m_bcMap; { mcg->code.lock(); mcg->cgFixups().setBlocks(&mainCode, &coldCode, frozenCode); SCOPE_EXIT { mcg->cgFixups().setBlocks(nullptr, nullptr, nullptr); mcg->code.unlock(); }; /* * Emit the given block on the supplied assembler. The `nextLinear' * is the next block that will be emitted on this assembler. If is * not the next block in control flow order, then emit a patchable jump * to the next flow block. */ auto emitBlock = [&](CodeBlock& cb, Block* block, Block* nextLinear) { assert(!isEmitted(block)); FTRACE(6, "genBlock {} on {}\n", block->id(), cb.base() == coldCode.base() ? "acold" : "a"); auto const aStart = cb.frontier(); auto const acoldStart = coldCode.frontier(); auto const afrozenStart = frozenCode->frontier(); mcg->backEnd().patchJumps(cb, state, block); state.addresses[block] = aStart; // If the block ends with a Jmp and the next block is going to be // its target, we don't need to actually emit it. IRInstruction* last = &block->back(); state.noTerminalJmp = last->op() == Jmp && nextLinear == last->taken(); if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); } genBlock(unit, cb, coldCode, *frozenCode, state, block, bcMap); auto nextFlow = block->next(); if (nextFlow && nextFlow != nextLinear) { mcg->backEnd().emitFwdJmp(cb, nextFlow, state); } if (state.asmInfo) { state.asmInfo->asmRanges[block] = TcaRange(aStart, cb.frontier()); if (cb.base() != coldCode.base() && frozenCode != &coldCode) { state.asmInfo->acoldRanges[block] = TcaRange(acoldStart, coldCode.frontier()); } if (cb.base() != frozenCode->base()) { state.asmInfo->afrozenRanges[block] = TcaRange(afrozenStart, frozenCode->frontier()); } } }; if (RuntimeOption::EvalHHIRGenerateAsserts) { mcg->backEnd().emitTraceCall(mainCode, unit.bcOff()); } auto const linfo = layoutBlocks(unit); for (auto it = linfo.blocks.begin(); it != linfo.acoldIt; ++it) { Block* nextLinear = boost::next(it) != linfo.acoldIt ? *boost::next(it) : nullptr; emitBlock(mainCode, *it, nextLinear); } for (auto it = linfo.acoldIt; it != linfo.afrozenIt; ++it) { Block* nextLinear = boost::next(it) != linfo.afrozenIt ? *boost::next(it) : nullptr; emitBlock(coldCode, *it, nextLinear); } for (auto it = linfo.afrozenIt; it != linfo.blocks.end(); ++it) { Block* nextLinear = boost::next(it) != linfo.blocks.end() ? *boost::next(it) : nullptr; emitBlock(*frozenCode, *it, nextLinear); } if (debug) { for (Block* UNUSED block : linfo.blocks) { assert(isEmitted(block)); } } } assert(coldCodeIn.frontier() == coldStart); assert(mainCodeIn.frontier() == mainStart); if (relocate) { if (asmInfo) { printUnit(kRelocationLevel, unit, " before relocation ", ®s, asmInfo); } auto& be = mcg->backEnd(); RelocationInfo rel; be.relocate(rel, mainCodeIn, mainCode.base(), mainCode.frontier(), mcg->cgFixups()); be.relocate(rel, coldCodeIn, coldCode.base(), coldCode.frontier(), mcg->cgFixups()); if (frozenCode != &coldCode) { rel.recordRange(frozenStart, frozenCode->frontier(), frozenStart, frozenCode->frontier()); } be.adjustForRelocation(rel, mcg->cgFixups()); be.adjustForRelocation(rel, asmInfo, mcg->cgFixups()); if (asmInfo) { static int64_t mainDeltaTot = 0, coldDeltaTot = 0; int64_t mainDelta = (mainCodeIn.frontier() - mainStart) - (mainCode.frontier() - mainCode.base()); int64_t coldDelta = (coldCodeIn.frontier() - coldStart) - (coldCode.frontier() - coldCode.base()); mainDeltaTot += mainDelta; HPHP::Trace::traceRelease("main delta after relocation: %" PRId64 " (%" PRId64 ")\n", mainDelta, mainDeltaTot); coldDeltaTot += coldDelta; HPHP::Trace::traceRelease("cold delta after relocation: %" PRId64 " (%" PRId64 ")\n", coldDelta, coldDeltaTot); } #ifndef NDEBUG auto& ip = mcg->cgFixups().m_inProgressTailJumps; for (size_t i = 0; i < ip.size(); ++i) { const auto& ib = ip[i]; assert(!mainCode.contains(ib.toSmash())); assert(!coldCode.contains(ib.toSmash())); } memset(mainCode.base(), 0xcc, mainCode.frontier() - mainCode.base()); memset(coldCode.base(), 0xcc, coldCode.frontier() - coldCode.base()); #endif } else { coldCodeIn.skip(coldCode.frontier() - coldCodeIn.frontier()); mainCodeIn.skip(mainCode.frontier() - mainCodeIn.frontier()); } if (asmInfo) { printUnit(kCodeGenLevel, unit, " after code gen ", ®s, asmInfo); } }
void Map_Geometry_Manager::Initialize() { Blocks.Preallocate(128); //0 = none genBlock(Blocks[0], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //1 - 2 = up 26 low, high genBlock(Blocks[1], Vector3f(0.0f,0.0f,0.5f), Vector3f(1.0f,0.0f,0.5f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,1.0f,0.0f)); genBlock(Blocks[2], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.5f), Vector3f(1.0f,1.0f,0.5f)); //3 - 4 = down 26 low, high genBlock(Blocks[3], Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,0.5f), Vector3f(1.0f,1.0f,0.5f)); genBlock(Blocks[4], Vector3f(0.0f,0.0f,0.5f), Vector3f(1.0f,0.0f,0.5f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //5 - 6 = left 26 low, high genBlock(Blocks[5], Vector3f(0.0f,0.0f,0.5f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,0.5f), Vector3f(1.0f,1.0f,0.0f)); genBlock(Blocks[6], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,0.5f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,0.5f)); //7 - 8 = right 26 low, high genBlock(Blocks[7], Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,0.0f,0.5f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,1.0f,0.5f)); genBlock(Blocks[8], Vector3f(0.0f,0.0f,0.5f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.5f), Vector3f(1.0f,1.0f,1.0f)); //9 - 16 = up 7 low – high for (int div = 0; div < 8; div++) genBlock(Blocks[9+div], Vector3f(0.0f,0.0f,0.125f*(div+1)), Vector3f(1.0f,0.0f,0.125f*(div+1)), Vector3f(0.0f,1.0f,0.125f*(div+0)), Vector3f(1.0f,1.0f,0.125f*(div+0))); //33 - 40 = right 7 low – high for (int div = 0; div < 8; div++) genBlock(Blocks[33+div],Vector3f(0.0f,0.0f,0.125f*(div+0)), Vector3f(1.0f,0.0f,0.125f*(div+1)), Vector3f(0.0f,1.0f,0.125f*(div+0)), Vector3f(1.0f,1.0f,0.125f*(div+1))); //17 - 24 = down 7 low – high for (int div = 0; div < 8; div++) genBlock(Blocks[17+div],Vector3f(0.0f,0.0f,0.125f*(div+0)), Vector3f(1.0f,0.0f,0.125f*(div+0)), Vector3f(0.0f,1.0f,0.125f*(div+1)), Vector3f(1.0f,1.0f,0.125f*(div+1))); //25 - 32 = left 7 low – high for (int div = 0; div < 8; div++) genBlock(Blocks[25+div],Vector3f(0.0f,0.0f,0.125f*(div+1)), Vector3f(1.0f,0.0f,0.125f*(div+0)), Vector3f(0.0f,1.0f,0.125f*(div+1)), Vector3f(1.0f,1.0f,0.125f*(div+0))); //41 - 44 = 45 up,down,left,right genBlock(Blocks[41], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,1.0f,0.0f)); genBlock(Blocks[42], Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); genBlock(Blocks[43], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,0.0f)); genBlock(Blocks[44], Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,1.0f,1.0f)); //45 = diagonal, facing up left genBlock(Blocks[45], Vector3f(1.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f),true); //46 = diagonal, facing up right genBlock(Blocks[46], Vector3f(0.0f,0.0f,1.0f), Vector3f(0.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //47 = diagonal, facing down left genBlock(Blocks[47], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(1.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //48 = diagonal, facing down right genBlock(Blocks[48], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(0.0f,1.0f,1.0f),true); //FIXME: texture problem //49 = 3 or 4-sided diagonal slope, facing up left genBlock(Blocks[49], Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f),true); //50 = 3 or 4-sided diagonal slope, facing up right genBlock(Blocks[50], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //51 = 3 or 4-sided diagonal slope, facing down left genBlock(Blocks[51], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,1.0f,1.0f)); //52 = 3 or 4-sided diagonal slope, facing down right genBlock(Blocks[52], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,0.0f),true); //53 = partial block left genBlock(Blocks[53], Vector3f(0.0f,0.0f,1.0f), Vector3f(0.5f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(0.5f,1.0f,1.0f)); //54 = partial block right genBlock(Blocks[54], Vector3f(0.5f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.5f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //55 = partial block top genBlock(Blocks[55], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,0.5f,1.0f), Vector3f(1.0f,0.5f,1.0f)); //56 = partial block bottom genBlock(Blocks[56], Vector3f(0.0f,0.5f,1.0f), Vector3f(1.0f,0.5f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //57 = partial block top left corner genBlock(Blocks[57], Vector3f(0.0f,0.0f,1.0f), Vector3f(0.5f,0.0f,1.0f), Vector3f(0.0f,0.5f,1.0f), Vector3f(0.5f,0.5f,1.0f)); //58 = partial block top right corner genBlock(Blocks[58], Vector3f(0.5f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.5f,0.5f,1.0f), Vector3f(1.0f,0.5f,1.0f)); //59 = partial block bottom right corner genBlock(Blocks[59], Vector3f(0.5f,0.5f,1.0f), Vector3f(1.0f,0.5f,1.0f), Vector3f(0.5f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //60 = partial block bottom left corner genBlock(Blocks[60], Vector3f(0.0f,0.5f,1.0f), Vector3f(0.5f,0.5f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(0.5f,1.0f,1.0f)); //61 = partial centre block 16x16 genBlock(Blocks[61], Vector3f(0.25f,0.25f,1.0f), Vector3f(0.75f,0.25f,1.0f), Vector3f(0.25f,0.75f,1.0f), Vector3f(0.75f,0.75f,1.0f)); //62 = <unused> //63 = <indicates slope in block above, same as 0 in OpenGTA2> //genBlock(Blocks[63], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), // Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //Up left genBlock(Blocks[64], Vector3f(1.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f), Vector3f(1.0f,0.0f,0.0f), Vector3f(0.0f,1.0f,0.0f)); //Up right genBlock(Blocks[65], Vector3f(0.0f,1.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,0.0f), Vector3f(0.0f,0.0f,0.0f)); genBlock(Blocks[66], Vector3f(1.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,0.0f,0.0f), Vector3f(1.0f,1.0f,0.0f)); genBlock(Blocks[67], Vector3f(0.0f,0.0f,1.0f), Vector3f(0.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,0.0f), Vector3f(1.0f,0.0f,0.0f)); /*genBlock(Blocks[64], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); //Blocks[64]->Coords[ genBlock(Blocks[65], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); genBlock(Blocks[66], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f)); genBlock(Blocks[67], Vector3f(0.0f,0.0f,1.0f), Vector3f(1.0f,0.0f,1.0f), Vector3f(0.0f,1.0f,1.0f), Vector3f(1.0f,1.0f,1.0f));*/ }