PRIVATE inline Receiver::Rcv_state Receiver::vcpu_async_ipc(Sender const *sender) const { if (EXPECT_FALSE(state() & Thread_ipc_mask)) return Rs_not_receiving; Vcpu_state *vcpu = vcpu_state().access(); if (EXPECT_FALSE(!vcpu_irqs_enabled(vcpu))) return Rs_not_receiving; Receiver *self = const_cast<Receiver*>(this); if (this == current()) self->spill_user_state(); if (self->vcpu_enter_kernel_mode(vcpu)) vcpu = vcpu_state().access(); LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log, l->type = 1; l->state = vcpu->_saved_state; l->ip = Mword(sender); l->sp = regs()->sp(); l->space = ~0; //vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0; );
size_t Call6(std::shared_ptr<unwindstack::Memory>& process_memory, unwindstack::Maps* maps) { std::unique_ptr<unwindstack::Regs> regs(unwindstack::Regs::CreateFromLocal()); unwindstack::RegsGetLocal(regs.get()); unwindstack::Unwinder unwinder(32, maps, regs.get(), process_memory); unwinder.Unwind(); return unwinder.NumFrames(); }
Mat RegionSaliency::GetRCNoColorConversion(const Mat &img3f, double sigmaDist, double segK, int segMinSize, double segSigma) { Mat regIdx1i, colorIdx1i, regSal1v, tmp, _img3f, color3fv; if (Quantize(img3f, colorIdx1i, color3fv, tmp) <= 2) // Color quantization return Mat::zeros(img3f.size(), CV_32F); _img3f = img3f.clone(); // cvtColor(img3f, _img3f, CV_BGR2Lab); // cvtColor(color3fv, color3fv, CV_BGR2Lab); int regNum = SegmentImage(_img3f, regIdx1i, segSigma, segK, segMinSize); vector<Region> regs(regNum); BuildRegions(regIdx1i, regs, colorIdx1i, color3fv.cols); RegionContrast(regs, color3fv, regSal1v, sigmaDist); Mat sal1f = Mat::zeros(img3f.size(), CV_32F); cv::normalize(regSal1v, regSal1v, 0, 1, NORM_MINMAX, CV_32F); float* regSal = (float*)regSal1v.data; for (int r = 0; r < img3f.rows; r++){ const int* regIdx = regIdx1i.ptr<int>(r); float* sal = sal1f.ptr<float>(r); for (int c = 0; c < img3f.cols; c++) sal[c] = regSal[regIdx[c]]; } GaussianBlur(sal1f, sal1f, Size(3, 3), 0); return sal1f; }
bool UnwindStackOffline::Unwind(size_t num_ignore_frames, void* ucontext) { if (ucontext == nullptr) { return false; } unwindstack::ArchEnum arch; switch (arch_) { case ARCH_ARM: arch = unwindstack::ARCH_ARM; break; case ARCH_ARM64: arch = unwindstack::ARCH_ARM64; break; case ARCH_X86: arch = unwindstack::ARCH_X86; break; case ARCH_X86_64: arch = unwindstack::ARCH_X86_64; break; default: return false; } std::unique_ptr<unwindstack::Regs> regs(unwindstack::Regs::CreateFromUcontext(arch, ucontext)); return Backtrace::Unwind(regs.get(), GetMap(), &frames_, num_ignore_frames, nullptr, &error_); }
Mat RegionSaliency::GetRCCB(const Mat &img3f, double sigmaDist, double segK, int segMinSize, double segSigma, double centerBiasWeight, double centerBiasHeightSigma, double centerBiasWidthSigma, const CenterBiasCombinationType_t cbct) { Mat regIdx1i, colorIdx1i, regSal1v, tmp, _img3f, color3fv; if (Quantize(img3f, colorIdx1i, color3fv, tmp) <= 2) // Color quantization return Mat::zeros(img3f.size(), CV_32F); cvtColor(img3f, _img3f, CV_BGR2Lab); cvtColor(color3fv, color3fv, CV_BGR2Lab); int regNum = SegmentImage(_img3f, regIdx1i, segSigma, segK, segMinSize); vector<Region> regs(regNum); BuildRegions(regIdx1i, regs, colorIdx1i, color3fv.cols); RegionContrast(regs, color3fv, regSal1v, sigmaDist); float* regsCenterBias = new float[regNum]; // the center-bias for each region float w0 = (float)centerBiasWidthSigma; // std. dev. of the Gaussian (width) float h0 = (float)centerBiasHeightSigma; // std. dev. of the Gaussian (height) for (int i = 0; i < regNum; i++) { const float x0 = 0.5; const float y0 = 0.5; regsCenterBias[i] = ( exp((-SQR(regs[i].centroid.x-x0))/SQR(w0)) * exp((-SQR(regs[i].centroid.y-y0))/SQR(h0)) ); } Mat sal1f = Mat::zeros(img3f.size(), CV_32F); cv::normalize(regSal1v, regSal1v, 0, 1, NORM_MINMAX, CV_32F); float* regSal = (float*)regSal1v.data; for (int r = 0; r < img3f.rows; r++) { const int* regIdx = regIdx1i.ptr<int>(r); float* sal = sal1f.ptr<float>(r); for (int c = 0; c < img3f.cols; c++) { switch (cbct) { case CB_LINEAR: sal[c] = (1-centerBiasWeight)*regSal[regIdx[c]] + centerBiasWeight*regsCenterBias[regIdx[c]]; break; case CB_PRODUCT: sal[c] = regSal[regIdx[c]] * regsCenterBias[regIdx[c]]; // weighting in this case would have no influence break; case CB_MAX: sal[c] = std::max((1-centerBiasWeight)*regSal[regIdx[c]], centerBiasWeight*regsCenterBias[regIdx[c]]); break; case CB_MIN: sal[c] = std::min((1-centerBiasWeight)*regSal[regIdx[c]], centerBiasWeight*regsCenterBias[regIdx[c]]); break; default: assert(false); exit(-1); } } } GaussianBlur(sal1f, sal1f, Size(3, 3), 0); delete [] regsCenterBias; return sal1f; }
TEST(RegsInfoTest, single_uint32_t) { RegsImplFake<uint32_t> regs(10); RegsInfo<uint32_t> info(®s); regs[1] = 0x100; ASSERT_FALSE(info.IsSaved(1)); ASSERT_EQ(0x100U, info.Get(1)); ASSERT_EQ(10, info.Total()); uint32_t* value = info.Save(1); ASSERT_EQ(value, ®s[1]); regs[1] = 0x200; ASSERT_TRUE(info.IsSaved(1)); ASSERT_EQ(0x100U, info.Get(1)); ASSERT_EQ(0x200U, regs[1]); }
int libxl_test_timedereg(libxl_ctx *ctx, libxl_asyncop_how *ao_how) { int i; AO_CREATE(ctx, 0, ao_how); tao = ao; for (i=0; i<NTIMES; i++) { libxl__ev_time_init(&et[0][i]); libxl__ev_time_init(&et[1][i]); } regs(gc, 0); return AO_INPROGRESS; }
TEST(RegsInfoTest, single_uint64_t) { RegsImplFake<uint64_t> regs(20); RegsInfo<uint64_t> info(®s); regs[3] = 0x300; ASSERT_FALSE(info.IsSaved(3)); ASSERT_EQ(0x300U, info.Get(3)); ASSERT_EQ(20, info.Total()); uint64_t* value = info.Save(3); ASSERT_EQ(value, ®s[3]); regs[3] = 0x400; ASSERT_TRUE(info.IsSaved(3)); ASSERT_EQ(0x300U, info.Get(3)); ASSERT_EQ(0x400U, regs[3]); }
void InputBase<T>::input(SubProcessor<T>& Proc, const vector<int>& args) { auto& input = Proc.input; for (int i = 0; i < Proc.P.num_players(); i++) input.reset(i); assert(args.size() % 2 == 0); int n_from_me = 0; if (Proc.Proc.opts.interactive and Proc.Proc.thread_num == 0) { for (size_t i = 1; i < args.size(); i += 2) n_from_me += (args[i] == Proc.P.my_num()); if (n_from_me > 0) cout << "Please input " << n_from_me << " numbers:" << endl; } for (size_t i = 0; i < args.size(); i += 2) { int n = args[i + 1]; if (n == Proc.P.my_num()) { long x = Proc.Proc.get_input(n_from_me > 0); input.add_mine(x); } else { input.add_other(n); } } if (n_from_me > 0) cout << "Thank you" << endl; input.send_mine(); vector<vector<int>> regs(Proc.P.num_players()); for (size_t i = 0; i < args.size(); i += 2) { regs[args[i + 1]].push_back(args[i]); } for (int i = 0; i < Proc.P.num_players(); i++) input.stop(i, regs[i]); }
/** Page fault handler. This handler suspends any ongoing IPC, then sets up page-fault IPC. Finally, the ongoing IPC's state (if any) is restored. @param pfa page-fault virtual address @param error_code page-fault error code. */ PRIVATE bool Thread::handle_page_fault_pager(Thread_ptr const &_pager, Address pfa, Mword error_code, L4_msg_tag::Protocol protocol) { #ifndef NDEBUG // do not handle user space page faults from kernel mode if we're // already handling a request if (EXPECT_FALSE(!PF::is_usermode_error(error_code) && thread_lock()->test() == Thread_lock::Locked)) { kdb_ke("Fiasco BUG: page fault, under lock"); panic("page fault in locked operation"); } #endif if (EXPECT_FALSE((state() & Thread_alien))) return false; Lock_guard<Cpu_lock> guard(&cpu_lock); unsigned char rights; Kobject_iface *pager = _pager.ptr(space(), &rights); if (!pager) { WARN("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n", current_cpu(), dbg_id(), pfa, error_code, _pager.raw(), regs()->ip()); LOG_TRACE("Page fault invalid pager", "pf", this, __fmt_page_fault_invalid_pager, Log_pf_invalid *l = tbe->payload<Log_pf_invalid>(); l->cap_idx = _pager.raw(); l->err = error_code; l->pfa = pfa); pager = this; // block on ourselves }
void do_it(InlinedScope* s) { GrowableArray<NonTrivialNode*>* tests = s->typeTests(); int len = tests->length(); for (int i = 0; i < len; i++) { NonTrivialNode* n = tests->at(i); assert(n->doesTypeTests(), "shouldn't be in list"); if (n->deleted) continue; if (n->hasUnknownCode()) continue; // can't optimize - expects other klasses, so would get uncommon trap at run-time if (!theLoop->isInLoop(n)) continue; // not in this loop GrowableArray<PReg*> regs(4); GrowableArray<GrowableArray<klassOop>*> klasses(4); n->collectTypeTests(regs, klasses); for (int j = 0; j < regs.length(); j++) { PReg* r = regs.at(j); if (theLoop->defsInLoop(r) == 0) { // this test can be hoisted if (CompilerDebug || PrintLoopOpts) cout(PrintLoopOpts)->print("*moving type test of %s at N%d out of loop\n", r->name(), n->id()); hoistableTests->append(new HoistedTypeTest(n, r, klasses.at(j))); } } } }
TEST(RegsInfoTest, all) { RegsImplFake<uint64_t> regs(64); RegsInfo<uint64_t> info(®s); for (uint32_t i = 0; i < 64; i++) { regs[i] = i * 0x100; ASSERT_EQ(i * 0x100, info.Get(i)) << "Reg " + std::to_string(i) + " failed."; } for (uint32_t i = 0; i < 64; i++) { ASSERT_FALSE(info.IsSaved(i)) << "Reg " + std::to_string(i) + " failed."; uint64_t* reg = info.Save(i); ASSERT_EQ(reg, ®s[i]) << "Reg " + std::to_string(i) + " failed."; *reg = i * 0x1000 + 0x100; ASSERT_EQ(i * 0x1000 + 0x100, regs[i]) << "Reg " + std::to_string(i) + " failed."; } for (uint32_t i = 0; i < 64; i++) { ASSERT_TRUE(info.IsSaved(i)) << "Reg " + std::to_string(i) + " failed."; ASSERT_EQ(i * 0x100, info.Get(i)) << "Reg " + std::to_string(i) + " failed."; } }
Mat CmSaliencyRC::GetRC(CMat &img3f, CMat ®Idx1i, int regNum, double sigmaDist) { Mat colorIdx1i, regSal1v, tmp, color3fv; int QuatizeNum = Quantize(img3f, colorIdx1i, color3fv, tmp); if (QuatizeNum == 2){ printf("QuatizeNum == 2, %d: %s\n", __LINE__, __FILE__); Mat sal; compare(colorIdx1i, 1, sal, CMP_EQ); sal.convertTo(sal, CV_32F, 1.0/255); return sal; } if (QuatizeNum <= 2) // Color quantization return Mat::zeros(img3f.size(), CV_32F); cvtColor(color3fv, color3fv, CV_BGR2Lab); vector<Region> regs(regNum); BuildRegions(regIdx1i, regs, colorIdx1i, color3fv.cols); RegionContrast(regs, color3fv, regSal1v, sigmaDist); Mat sal1f = Mat::zeros(img3f.size(), CV_32F); cv::normalize(regSal1v, regSal1v, 0, 1, NORM_MINMAX, CV_32F); float* regSal = (float*)regSal1v.data; for (int r = 0; r < img3f.rows; r++){ const int* regIdx = regIdx1i.ptr<int>(r); float* sal = sal1f.ptr<float>(r); for (int c = 0; c < img3f.cols; c++) sal[c] = regSal[regIdx[c]]; } Mat bdReg1u = GetBorderReg(regIdx1i, regNum, 0.02, 0.4); sal1f.setTo(0, bdReg1u); SmoothByHist(img3f, sal1f, 0.1f); SmoothByRegion(sal1f, regIdx1i, regNum); sal1f.setTo(0, bdReg1u); GaussianBlur(sal1f, sal1f, Size(3, 3), 0); return sal1f; }
static void occurs(libxl__egc *egc, libxl__ev_time *ev, const struct timeval *requested_abs) { EGC_GC; int i; int off = ev - &et[0][0]; LOG(DEBUG,"occurs[%d][%d] seq=%d", off/NTIMES, off%NTIMES, seq); switch (seq) { case 0: assert(ev == &et[0][1]); libxl__ev_time_deregister(gc, &et[0][0]); libxl__ev_time_deregister(gc, &et[0][2]); regs(gc, 1); libxl__ev_time_deregister(gc, &et[0][1]); break; case 1: case 2: assert(ev == &et[1][seq-1]); break; case 3: assert(ev == &et[1][2]); for (i=0; i<NTIMES; i++) { assert(!libxl__ev_time_isregistered(&et[0][i])); assert(!libxl__ev_time_isregistered(&et[1][i])); } libxl__ao_complete(egc, tao, 0); return; default: abort(); } seq++; }
/** Thread context switchin. Called on every re-activation of a thread (switch_exec()). This method is public only because it is called from from assembly code in switch_cpu(). */ IMPLEMENT void Context::switchin_context(Context *from) { assert_kdb (this == current()); assert_kdb (state() & Thread_ready_mask); // Set kernel-esp in case we want to return to the user. // kmem::kernel_sp() returns a pointer to the kernel SP (in the // TSS) the CPU uses when next switching from user to kernel mode. // regs() + 1 returns a pointer to the end of our kernel stack. Cpu::cpus.cpu(cpu()).kernel_sp() = reinterpret_cast<Address>(regs() + 1); // switch to our page directory if necessary vcpu_aware_space()->switchin_context(from->vcpu_aware_space()); // load new segment selectors load_segments(); // update the global UTCB pointer to make the thread find its UTCB // using fs:[0] Mem_layout::user_utcb_ptr(current_cpu()) = utcb().usr(); }
// set up a register block used as an IPC parameter block for the // page fault IPC Syscall_frame r; Utcb *utcb = this->utcb().access(true); // save the UTCB fields affected by PF IPC Pf_msg_utcb_saver saved_utcb_fields(utcb); utcb->buf_desc = L4_buf_desc(0, 0, 0, L4_buf_desc::Inherit_fpu); utcb->buffers[0] = L4_msg_item::map(0).raw(); utcb->buffers[1] = L4_fpage::all_spaces().raw(); utcb->values[0] = PF::addr_to_msgword0 (pfa, error_code); utcb->values[1] = regs()->ip(); //PF::pc_to_msgword1 (regs()->ip(), error_code)); L4_timeout_pair timeout(L4_timeout::Never, L4_timeout::Never); L4_msg_tag tag(2, 0, 0, protocol); r.timeout(timeout); r.tag(tag); r.from(0); r.ref(L4_obj_ref(_pager.raw() << L4_obj_ref::Cap_shift, L4_obj_ref::Ipc_call_ipc)); pager->invoke(r.ref(), rights, &r, utcb); bool success = true; if (EXPECT_FALSE(r.tag().has_error()))
/// @par /// /// See the #rcConfig documentation for more information on the configuration parameters. /// /// @see rcAllocHeightfieldLayerSet, rcCompactHeightfield, rcHeightfieldLayerSet, rcConfig bool rcBuildHeightfieldLayers(rcContext* ctx, rcCompactHeightfield& chf, const int borderSize, const int walkableHeight, rcHeightfieldLayerSet& lset) { rcAssert(ctx); rcScopedTimer timer(ctx, RC_TIMER_BUILD_LAYERS); const int w = chf.width; const int h = chf.height; rcScopedDelete<unsigned char> srcReg((unsigned char*)rcAlloc(sizeof(unsigned char)*chf.spanCount, RC_ALLOC_TEMP)); if (!srcReg) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'srcReg' (%d).", chf.spanCount); return false; } memset(srcReg,0xff,sizeof(unsigned char)*chf.spanCount); const int nsweeps = chf.width; rcScopedDelete<rcLayerSweepSpan> sweeps((rcLayerSweepSpan*)rcAlloc(sizeof(rcLayerSweepSpan)*nsweeps, RC_ALLOC_TEMP)); if (!sweeps) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'sweeps' (%d).", nsweeps); return false; } // Partition walkable area into monotone regions. int prevCount[256]; unsigned char regId = 0; for (int y = borderSize; y < h-borderSize; ++y) { memset(prevCount,0,sizeof(int)*regId); unsigned char sweepId = 0; for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; if (chf.areas[i] == RC_NULL_AREA) continue; unsigned char sid = 0xff; // -x if (rcGetCon(s, 0) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(0); const int ay = y + rcGetDirOffsetY(0); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); if (chf.areas[ai] != RC_NULL_AREA && srcReg[ai] != 0xff) sid = srcReg[ai]; } if (sid == 0xff) { sid = sweepId++; sweeps[sid].nei = 0xff; sweeps[sid].ns = 0; } // -y if (rcGetCon(s,3) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(3); const int ay = y + rcGetDirOffsetY(3); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); const unsigned char nr = srcReg[ai]; if (nr != 0xff) { // Set neighbour when first valid neighbour is encoutered. if (sweeps[sid].ns == 0) sweeps[sid].nei = nr; if (sweeps[sid].nei == nr) { // Update existing neighbour sweeps[sid].ns++; prevCount[nr]++; } else { // This is hit if there is nore than one neighbour. // Invalidate the neighbour. sweeps[sid].nei = 0xff; } } } srcReg[i] = sid; } } // Create unique ID. for (int i = 0; i < sweepId; ++i) { // If the neighbour is set and there is only one continuous connection to it, // the sweep will be merged with the previous one, else new region is created. if (sweeps[i].nei != 0xff && prevCount[sweeps[i].nei] == (int)sweeps[i].ns) { sweeps[i].id = sweeps[i].nei; } else { if (regId == 255) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Region ID overflow."); return false; } sweeps[i].id = regId++; } } // Remap local sweep ids to region ids. for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { if (srcReg[i] != 0xff) srcReg[i] = sweeps[srcReg[i]].id; } } } // Allocate and init layer regions. const int nregs = (int)regId; rcScopedDelete<rcLayerRegion> regs((rcLayerRegion*)rcAlloc(sizeof(rcLayerRegion)*nregs, RC_ALLOC_TEMP)); if (!regs) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'regs' (%d).", nregs); return false; } memset(regs, 0, sizeof(rcLayerRegion)*nregs); for (int i = 0; i < nregs; ++i) { regs[i].layerId = 0xff; regs[i].ymin = 0xffff; regs[i].ymax = 0; } // Find region neighbours and overlapping regions. for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; unsigned char lregs[RC_MAX_LAYERS]; int nlregs = 0; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; const unsigned char ri = srcReg[i]; if (ri == 0xff) continue; regs[ri].ymin = rcMin(regs[ri].ymin, s.y); regs[ri].ymax = rcMax(regs[ri].ymax, s.y); // Collect all region layers. if (nlregs < RC_MAX_LAYERS) lregs[nlregs++] = ri; // Update neighbours for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); const unsigned char rai = srcReg[ai]; if (rai != 0xff && rai != ri) { // Don't check return value -- if we cannot add the neighbor // it will just cause a few more regions to be created, which // is fine. addUnique(regs[ri].neis, regs[ri].nneis, RC_MAX_NEIS, rai); } } } } // Update overlapping regions. for (int i = 0; i < nlregs-1; ++i) { for (int j = i+1; j < nlregs; ++j) { if (lregs[i] != lregs[j]) { rcLayerRegion& ri = regs[lregs[i]]; rcLayerRegion& rj = regs[lregs[j]]; if (!addUnique(ri.layers, ri.nlayers, RC_MAX_LAYERS, lregs[j]) || !addUnique(rj.layers, rj.nlayers, RC_MAX_LAYERS, lregs[i])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } } } } } // Create 2D layers from regions. unsigned char layerId = 0; static const int MAX_STACK = 64; unsigned char stack[MAX_STACK]; int nstack = 0; for (int i = 0; i < nregs; ++i) { rcLayerRegion& root = regs[i]; // Skip already visited. if (root.layerId != 0xff) continue; // Start search. root.layerId = layerId; root.base = 1; nstack = 0; stack[nstack++] = (unsigned char)i; while (nstack) { // Pop front rcLayerRegion& reg = regs[stack[0]]; nstack--; for (int j = 0; j < nstack; ++j) stack[j] = stack[j+1]; const int nneis = (int)reg.nneis; for (int j = 0; j < nneis; ++j) { const unsigned char nei = reg.neis[j]; rcLayerRegion& regn = regs[nei]; // Skip already visited. if (regn.layerId != 0xff) continue; // Skip if the neighbour is overlapping root region. if (contains(root.layers, root.nlayers, nei)) continue; // Skip if the height range would become too large. const int ymin = rcMin(root.ymin, regn.ymin); const int ymax = rcMax(root.ymax, regn.ymax); if ((ymax - ymin) >= 255) continue; if (nstack < MAX_STACK) { // Deepen stack[nstack++] = (unsigned char)nei; // Mark layer id regn.layerId = layerId; // Merge current layers to root. for (int k = 0; k < regn.nlayers; ++k) { if (!addUnique(root.layers, root.nlayers, RC_MAX_LAYERS, regn.layers[k])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } root.ymin = rcMin(root.ymin, regn.ymin); root.ymax = rcMax(root.ymax, regn.ymax); } } } layerId++; } // Merge non-overlapping regions that are close in height. const unsigned short mergeHeight = (unsigned short)walkableHeight * 4; for (int i = 0; i < nregs; ++i) { rcLayerRegion& ri = regs[i]; if (!ri.base) continue; unsigned char newId = ri.layerId; for (;;) { unsigned char oldId = 0xff; for (int j = 0; j < nregs; ++j) { if (i == j) continue; rcLayerRegion& rj = regs[j]; if (!rj.base) continue; // Skip if the regions are not close to each other. if (!overlapRange(ri.ymin,ri.ymax+mergeHeight, rj.ymin,rj.ymax+mergeHeight)) continue; // Skip if the height range would become too large. const int ymin = rcMin(ri.ymin, rj.ymin); const int ymax = rcMax(ri.ymax, rj.ymax); if ((ymax - ymin) >= 255) continue; // Make sure that there is no overlap when merging 'ri' and 'rj'. bool overlap = false; // Iterate over all regions which have the same layerId as 'rj' for (int k = 0; k < nregs; ++k) { if (regs[k].layerId != rj.layerId) continue; // Check if region 'k' is overlapping region 'ri' // Index to 'regs' is the same as region id. if (contains(ri.layers,ri.nlayers, (unsigned char)k)) { overlap = true; break; } } // Cannot merge of regions overlap. if (overlap) continue; // Can merge i and j. oldId = rj.layerId; break; } // Could not find anything to merge with, stop. if (oldId == 0xff) break; // Merge for (int j = 0; j < nregs; ++j) { rcLayerRegion& rj = regs[j]; if (rj.layerId == oldId) { rj.base = 0; // Remap layerIds. rj.layerId = newId; // Add overlaid layers from 'rj' to 'ri'. for (int k = 0; k < rj.nlayers; ++k) { if (!addUnique(ri.layers, ri.nlayers, RC_MAX_LAYERS, rj.layers[k])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } // Update height bounds. ri.ymin = rcMin(ri.ymin, rj.ymin); ri.ymax = rcMax(ri.ymax, rj.ymax); } } } } // Compact layerIds unsigned char remap[256]; memset(remap, 0, 256); // Find number of unique layers. layerId = 0; for (int i = 0; i < nregs; ++i) remap[regs[i].layerId] = 1; for (int i = 0; i < 256; ++i) { if (remap[i]) remap[i] = layerId++; else remap[i] = 0xff; } // Remap ids. for (int i = 0; i < nregs; ++i) regs[i].layerId = remap[regs[i].layerId]; // No layers, return empty. if (layerId == 0) return true; // Create layers. rcAssert(lset.layers == 0); const int lw = w - borderSize*2; const int lh = h - borderSize*2; // Build contracted bbox for layers. float bmin[3], bmax[3]; rcVcopy(bmin, chf.bmin); rcVcopy(bmax, chf.bmax); bmin[0] += borderSize*chf.cs; bmin[2] += borderSize*chf.cs; bmax[0] -= borderSize*chf.cs; bmax[2] -= borderSize*chf.cs; lset.nlayers = (int)layerId; lset.layers = (rcHeightfieldLayer*)rcAlloc(sizeof(rcHeightfieldLayer)*lset.nlayers, RC_ALLOC_PERM); if (!lset.layers) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'layers' (%d).", lset.nlayers); return false; } memset(lset.layers, 0, sizeof(rcHeightfieldLayer)*lset.nlayers); // Store layers. for (int i = 0; i < lset.nlayers; ++i) { unsigned char curId = (unsigned char)i; rcHeightfieldLayer* layer = &lset.layers[i]; const int gridSize = sizeof(unsigned char)*lw*lh; layer->heights = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->heights) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'heights' (%d).", gridSize); return false; } memset(layer->heights, 0xff, gridSize); layer->areas = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->areas) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'areas' (%d).", gridSize); return false; } memset(layer->areas, 0, gridSize); layer->cons = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->cons) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'cons' (%d).", gridSize); return false; } memset(layer->cons, 0, gridSize); // Find layer height bounds. int hmin = 0, hmax = 0; for (int j = 0; j < nregs; ++j) { if (regs[j].base && regs[j].layerId == curId) { hmin = (int)regs[j].ymin; hmax = (int)regs[j].ymax; } } layer->width = lw; layer->height = lh; layer->cs = chf.cs; layer->ch = chf.ch; // Adjust the bbox to fit the heightfield. rcVcopy(layer->bmin, bmin); rcVcopy(layer->bmax, bmax); layer->bmin[1] = bmin[1] + hmin*chf.ch; layer->bmax[1] = bmin[1] + hmax*chf.ch; layer->hmin = hmin; layer->hmax = hmax; // Update usable data region. layer->minx = layer->width; layer->maxx = 0; layer->miny = layer->height; layer->maxy = 0; // Copy height and area from compact heightfield. for (int y = 0; y < lh; ++y) { for (int x = 0; x < lw; ++x) { const int cx = borderSize+x; const int cy = borderSize+y; const rcCompactCell& c = chf.cells[cx+cy*w]; for (int j = (int)c.index, nj = (int)(c.index+c.count); j < nj; ++j) { const rcCompactSpan& s = chf.spans[j]; // Skip unassigned regions. if (srcReg[j] == 0xff) continue; // Skip of does nto belong to current layer. unsigned char lid = regs[srcReg[j]].layerId; if (lid != curId) continue; // Update data bounds. layer->minx = rcMin(layer->minx, x); layer->maxx = rcMax(layer->maxx, x); layer->miny = rcMin(layer->miny, y); layer->maxy = rcMax(layer->maxy, y); // Store height and area type. const int idx = x+y*lw; layer->heights[idx] = (unsigned char)(s.y - hmin); layer->areas[idx] = chf.areas[j]; // Check connection. unsigned char portal = 0; unsigned char con = 0; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); unsigned char alid = srcReg[ai] != 0xff ? regs[srcReg[ai]].layerId : 0xff; // Portal mask if (chf.areas[ai] != RC_NULL_AREA && lid != alid) { portal |= (unsigned char)(1<<dir); // Update height so that it matches on both sides of the portal. const rcCompactSpan& as = chf.spans[ai]; if (as.y > hmin) layer->heights[idx] = rcMax(layer->heights[idx], (unsigned char)(as.y - hmin)); } // Valid connection mask if (chf.areas[ai] != RC_NULL_AREA && lid == alid) { const int nx = ax - borderSize; const int ny = ay - borderSize; if (nx >= 0 && ny >= 0 && nx < lw && ny < lh) con |= (unsigned char)(1<<dir); } } } layer->cons[idx] = (portal << 4) | con; } } } if (layer->minx > layer->maxx) layer->minx = layer->maxx = 0; if (layer->miny > layer->maxy) layer->miny = layer->maxy = 0; } return true; }
int GGLAssembler::scanline_core(const needs_t& needs, context_t const* c) { int64_t duration = ggl_system_time(); mBlendFactorCached = 0; mBlending = 0; mMasking = 0; mAA = GGL_READ_NEEDS(P_AA, needs.p); mDithering = GGL_READ_NEEDS(P_DITHER, needs.p); mAlphaTest = GGL_READ_NEEDS(P_ALPHA_TEST, needs.p) + GGL_NEVER; mDepthTest = GGL_READ_NEEDS(P_DEPTH_TEST, needs.p) + GGL_NEVER; mFog = GGL_READ_NEEDS(P_FOG, needs.p) != 0; mSmooth = GGL_READ_NEEDS(SHADE, needs.n) != 0; mBuilderContext.needs = needs; mBuilderContext.c = c; mBuilderContext.Rctx = reserveReg(R0); // context always in R0 mCbFormat = c->formats[ GGL_READ_NEEDS(CB_FORMAT, needs.n) ]; // ------------------------------------------------------------------------ decodeLogicOpNeeds(needs); decodeTMUNeeds(needs, c); mBlendSrc = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRC, needs.n)); mBlendDst = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DST, needs.n)); mBlendSrcA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_SRCA, needs.n)); mBlendDstA = ggl_needs_to_blendfactor(GGL_READ_NEEDS(BLEND_DSTA, needs.n)); if (!mCbFormat.c[GGLFormat::ALPHA].h) { if ((mBlendSrc == GGL_ONE_MINUS_DST_ALPHA) || (mBlendSrc == GGL_DST_ALPHA)) { mBlendSrc = GGL_ONE; } if ((mBlendSrcA == GGL_ONE_MINUS_DST_ALPHA) || (mBlendSrcA == GGL_DST_ALPHA)) { mBlendSrcA = GGL_ONE; } if ((mBlendDst == GGL_ONE_MINUS_DST_ALPHA) || (mBlendDst == GGL_DST_ALPHA)) { mBlendDst = GGL_ONE; } if ((mBlendDstA == GGL_ONE_MINUS_DST_ALPHA) || (mBlendDstA == GGL_DST_ALPHA)) { mBlendDstA = GGL_ONE; } } // if we need the framebuffer, read it now const int blending = blending_codes(mBlendSrc, mBlendDst) | blending_codes(mBlendSrcA, mBlendDstA); // XXX: handle special cases, destination not modified... if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) && (mBlendDst==GGL_ONE) && (mBlendDstA==GGL_ONE)) { // Destination unmodified (beware of logic ops) } else if ((mBlendSrc==GGL_ZERO) && (mBlendSrcA==GGL_ZERO) && (mBlendDst==GGL_ZERO) && (mBlendDstA==GGL_ZERO)) { // Destination is zero (beware of logic ops) } int fbComponents = 0; const int masking = GGL_READ_NEEDS(MASK_ARGB, needs.n); for (int i=0 ; i<4 ; i++) { const int mask = 1<<i; component_info_t& info = mInfo[i]; int fs = i==GGLFormat::ALPHA ? mBlendSrcA : mBlendSrc; int fd = i==GGLFormat::ALPHA ? mBlendDstA : mBlendDst; if (fs==GGL_SRC_ALPHA_SATURATE && i==GGLFormat::ALPHA) fs = GGL_ONE; info.masked = !!(masking & mask); info.inDest = !info.masked && mCbFormat.c[i].h && ((mLogicOp & LOGIC_OP_SRC) || (!mLogicOp)); if (mCbFormat.components >= GGL_LUMINANCE && (i==GGLFormat::GREEN || i==GGLFormat::BLUE)) { info.inDest = false; } info.needed = (i==GGLFormat::ALPHA) && (isAlphaSourceNeeded() || mAlphaTest != GGL_ALWAYS); info.replaced = !!(mTextureMachine.replaced & mask); info.iterated = (!info.replaced && (info.inDest || info.needed)); info.smooth = mSmooth && info.iterated; info.fog = mFog && info.inDest && (i != GGLFormat::ALPHA); info.blend = (fs != int(GGL_ONE)) || (fd > int(GGL_ZERO)); mBlending |= (info.blend ? mask : 0); mMasking |= (mCbFormat.c[i].h && info.masked) ? mask : 0; fbComponents |= mCbFormat.c[i].h ? mask : 0; } mAllMasked = (mMasking == fbComponents); if (mAllMasked) { mDithering = 0; } fragment_parts_t parts; // ------------------------------------------------------------------------ prolog(); // ------------------------------------------------------------------------ build_scanline_prolog(parts, needs); if (registerFile().status()) return registerFile().status(); // ------------------------------------------------------------------------ label("fragment_loop"); // ------------------------------------------------------------------------ { Scratch regs(registerFile()); if (mDithering) { // update the dither index. MOV(AL, 0, parts.count.reg, reg_imm(parts.count.reg, ROR, GGL_DITHER_ORDER_SHIFT)); ADD(AL, 0, parts.count.reg, parts.count.reg, imm( 1 << (32 - GGL_DITHER_ORDER_SHIFT))); MOV(AL, 0, parts.count.reg, reg_imm(parts.count.reg, ROR, 32 - GGL_DITHER_ORDER_SHIFT)); } // XXX: could we do an early alpha-test here in some cases? // It would probaly be used only with smooth-alpha and no texture // (or no alpha component in the texture). // Early z-test if (mAlphaTest==GGL_ALWAYS) { build_depth_test(parts, Z_TEST|Z_WRITE); } else { // we cannot do the z-write here, because // it might be killed by the alpha-test later build_depth_test(parts, Z_TEST); } { // texture coordinates Scratch scratches(registerFile()); // texel generation build_textures(parts, regs); if (registerFile().status()) return registerFile().status(); } if ((blending & (FACTOR_DST|BLEND_DST)) || (mMasking && !mAllMasked) || (mLogicOp & LOGIC_OP_DST)) { // blending / logic_op / masking need the framebuffer mDstPixel.setTo(regs.obtain(), &mCbFormat); // load the framebuffer pixel comment("fetch color-buffer"); load(parts.cbPtr, mDstPixel); } if (registerFile().status()) return registerFile().status(); pixel_t pixel; int directTex = mTextureMachine.directTexture; if (directTex | parts.packed) { // note: we can't have both here // iterated color or direct texture pixel = directTex ? parts.texel[directTex-1] : parts.iterated; pixel.flags &= ~CORRUPTIBLE; } else { if (mDithering) { const int ctxtReg = mBuilderContext.Rctx; const int mask = GGL_DITHER_SIZE-1; parts.dither = reg_t(regs.obtain()); AND(AL, 0, parts.dither.reg, parts.count.reg, imm(mask)); ADDR_ADD(AL, 0, parts.dither.reg, ctxtReg, parts.dither.reg); LDRB(AL, parts.dither.reg, parts.dither.reg, immed12_pre(GGL_OFFSETOF(ditherMatrix))); } // allocate a register for the resulting pixel pixel.setTo(regs.obtain(), &mCbFormat, FIRST); build_component(pixel, parts, GGLFormat::ALPHA, regs); if (mAlphaTest!=GGL_ALWAYS) { // only handle the z-write part here. We know z-test // was successful, as well as alpha-test. build_depth_test(parts, Z_WRITE); } build_component(pixel, parts, GGLFormat::RED, regs); build_component(pixel, parts, GGLFormat::GREEN, regs); build_component(pixel, parts, GGLFormat::BLUE, regs); pixel.flags |= CORRUPTIBLE; } if (registerFile().status()) return registerFile().status(); if (pixel.reg == -1) { // be defensive here. if we're here it's probably // that this whole fragment is a no-op. pixel = mDstPixel; } if (!mAllMasked) { // logic operation build_logic_op(pixel, regs); // masking build_masking(pixel, regs); comment("store"); store(parts.cbPtr, pixel, WRITE_BACK); } } if (registerFile().status()) return registerFile().status(); // update the iterated color... if (parts.reload != 3) { build_smooth_shade(parts); } // update iterated z build_iterate_z(parts); // update iterated fog build_iterate_f(parts); SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16)); B(PL, "fragment_loop"); label("epilog"); epilog(registerFile().touched()); if ((mAlphaTest!=GGL_ALWAYS) || (mDepthTest!=GGL_ALWAYS)) { if (mDepthTest!=GGL_ALWAYS) { label("discard_before_textures"); build_iterate_texture_coordinates(parts); } label("discard_after_textures"); build_smooth_shade(parts); build_iterate_z(parts); build_iterate_f(parts); if (!mAllMasked) { ADDR_ADD(AL, 0, parts.cbPtr.reg, parts.cbPtr.reg, imm(parts.cbPtr.size>>3)); } SUB(AL, S, parts.count.reg, parts.count.reg, imm(1<<16)); B(PL, "fragment_loop"); epilog(registerFile().touched()); }
bool CDVDStateSerializer::DVDToXMLState( std::string &xmlstate, const dvd_state_t *state ) { char buffer[256]; TiXmlDocument xmlDoc("navstate"); TiXmlElement eRoot("navstate"); eRoot.SetAttribute("version", 1); { TiXmlElement eRegisters("registers"); for( int i = 0; i < 24; i++ ) { if( state->registers.SPRM[i] ) { TiXmlElement eReg("sprm"); eReg.SetAttribute("index", i); { TiXmlElement eValue("value"); sprintf(buffer, "0x%hx", state->registers.SPRM[i]); eValue.InsertEndChild( TiXmlText(buffer) ); eReg.InsertEndChild(eValue); } eRegisters.InsertEndChild(eReg); } } for( int i = 0; i < 16; i++ ) { if( state->registers.GPRM[i] || state->registers.GPRM_mode[i] || state->registers.GPRM_time[i].tv_sec || state->registers.GPRM_time[i].tv_usec ) { TiXmlElement eReg("gprm"); eReg.SetAttribute("index", i); { TiXmlElement eValue("value"); sprintf(buffer, "0x%hx", state->registers.GPRM[i]); eValue.InsertEndChild( TiXmlText(buffer) ); eReg.InsertEndChild(eValue); } { TiXmlElement eMode("mode"); sprintf(buffer, "0x%c", state->registers.GPRM_mode[i]); eMode.InsertEndChild( TiXmlText(buffer) ); eReg.InsertEndChild(eMode); } { TiXmlElement eTime("time"); { TiXmlElement eValue("tv_sec"); sprintf(buffer, "%ld", state->registers.GPRM_time[i].tv_sec); eValue.InsertEndChild( TiXmlText( buffer ) ); eTime.InsertEndChild( eValue ) ; } { TiXmlElement eValue("tv_usec"); sprintf(buffer, "%ld", (long int)state->registers.GPRM_time[i].tv_usec); eValue.InsertEndChild( TiXmlText( buffer ) ); eTime.InsertEndChild( eValue ) ; } eReg.InsertEndChild(eTime); } eRegisters.InsertEndChild(eReg); } } eRoot.InsertEndChild(eRegisters); } { TiXmlElement element("domain"); sprintf(buffer, "%d", state->domain); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("vtsn"); sprintf(buffer, "%d", state->vtsN); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("pgcn"); sprintf(buffer, "%d", state->pgcN); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("pgn"); sprintf(buffer, "%d", state->pgN); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("celln"); sprintf(buffer, "%d", state->cellN); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("cell_restart"); sprintf(buffer, "%d", state->cell_restart); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement element("blockn"); sprintf(buffer, "%d", state->blockN); element.InsertEndChild( TiXmlText( buffer ) ); eRoot.InsertEndChild(element); } { TiXmlElement rsm("rsm"); { TiXmlElement element("vtsn"); sprintf(buffer, "%d", state->rsm_vtsN); element.InsertEndChild( TiXmlText( buffer ) ); rsm.InsertEndChild(element); } { TiXmlElement element("blockn"); sprintf(buffer, "%d", state->rsm_blockN); element.InsertEndChild( TiXmlText( buffer ) ); rsm.InsertEndChild(element); } { TiXmlElement element("pgcn"); sprintf(buffer, "%d", state->rsm_pgcN); element.InsertEndChild( TiXmlText( buffer ) ); rsm.InsertEndChild(element); } { TiXmlElement element("celln"); sprintf(buffer, "%d", state->rsm_cellN); element.InsertEndChild( TiXmlText( buffer ) ); rsm.InsertEndChild(element); } { TiXmlElement regs("registers"); for( int i = 0; i < 5; i++ ) { TiXmlElement reg("sprm"); reg.SetAttribute("index", i); { TiXmlElement element("value"); sprintf(buffer, "0x%hx", state->rsm_regs[i]); element.InsertEndChild( TiXmlText(buffer) ); reg.InsertEndChild(element); } regs.InsertEndChild(reg); } rsm.InsertEndChild(regs); } eRoot.InsertEndChild(rsm); } xmlDoc.InsertEndChild(eRoot); std::stringstream stream; stream << xmlDoc; xmlstate = stream.str(); return true; }
TEST(RegsInfoTest, invalid_register) { RegsImplFake<uint64_t> regs(64); RegsInfo<uint64_t> info(®s); EXPECT_DEATH(info.Save(RegsInfo<uint64_t>::MAX_REGISTERS), ""); }
void LinearScan::allocate_fpu_stack() { // First compute which FPU registers are live at the start of each basic block // (To minimize the amount of work we have to do if we have to merge FPU stacks) if (ComputeExactFPURegisterUsage) { Interval* intervals_in_register, *intervals_in_memory; create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL); // ignore memory intervals by overwriting intervals_in_memory // the dummy interval is needed to enforce the walker to walk until the given id: // without it, the walker stops when the unhandled-list is empty -> live information // beyond this point would be incorrect. Interval* dummy_interval = new Interval(any_reg); dummy_interval->add_range(max_jint - 2, max_jint - 1); dummy_interval->set_next(Interval::end()); intervals_in_memory = dummy_interval; IntervalWalker iw(this, intervals_in_register, intervals_in_memory); const int num_blocks = block_count(); for (int i = 0; i < num_blocks; i++) { BlockBegin* b = block_at(i); // register usage is only needed for merging stacks -> compute only // when more than one predecessor. // the block must not have any spill moves at the beginning (checked by assertions) // spill moves would use intervals that are marked as handled and so the usage bit // would been set incorrectly // NOTE: the check for number_of_preds > 1 is necessary. A block with only one // predecessor may have spill moves at the begin of the block. // If an interval ends at the current instruction id, it is not possible // to decide if the register is live or not at the block begin -> the // register information would be incorrect. if (b->number_of_preds() > 1) { int id = b->first_lir_instruction_id(); ResourceBitMap regs(FrameMap::nof_fpu_regs); iw.walk_to(id); // walk after the first instruction (always a label) of the block assert(iw.current_position() == id, "did not walk completely to id"); // Only consider FPU values in registers Interval* interval = iw.active_first(fixedKind); while (interval != Interval::end()) { int reg = interval->assigned_reg(); assert(reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg, "no fpu register"); assert(interval->assigned_regHi() == -1, "must not have hi register (doubles stored in one register)"); assert(interval->from() <= id && id < interval->to(), "interval out of range"); #ifndef PRODUCT if (TraceFPURegisterUsage) { tty->print("fpu reg %d is live because of ", reg - pd_first_fpu_reg); interval->print(); } #endif regs.set_bit(reg - pd_first_fpu_reg); interval = interval->next(); } b->set_fpu_register_usage(regs); #ifndef PRODUCT if (TraceFPURegisterUsage) { tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->cr(); } #endif } } } FpuStackAllocator alloc(ir()->compilation(), this); _fpu_stack_allocator = &alloc; alloc.allocate(); _fpu_stack_allocator = NULL; }
Mword Thread::user_ip() const { return exception_triggered()?_exc_cont.ip():regs()->ip(); }
IMPLEMENT inline Mword Thread::user_flags() const { return regs()->flags(); }