void audit_opnds(FILE *fp, const char *heading) { int size_reg = 0; int size_var = 0; int size_immed_integer = 0; int size_immed_string = 0; int size_addr_sym = 0; int size_addr_exp = 0; for (Iter<SymbolTableObject*> i = the_local_scope->get_symbol_table_object_iterator(); i.is_valid(); i.next()) { SymbolTableObject *sto = i.current(); if (is_kind_of<IrOpnd>(sto)) switch (static_cast<IrOpnd*>(sto)->get_kind()) { case opnd::NONE: claim(false, "Unexpected operand kind: opnd::NONE"); case opnd::REG_HARD: case opnd::REG_VIRTUAL: size_reg += sizeof(OpndReg); break; case opnd::VAR: size_var += sizeof(OpndVar); break; case opnd::IMMED_INTEGER: size_immed_integer += sizeof(OpndImmedInteger); break; case opnd::IMMED_STRING: size_immed_string += sizeof(OpndImmedString); break; case opnd::ADDR_SYM: size_addr_sym += sizeof(OpndAddrSym); break; case opnd::SYM_DISP: case opnd::INDEX_SYM_DISP: case opnd::BASE_DISP: case opnd::BASE_INDEX: case opnd::BASE_INDEX_DISP: case opnd::INDEX_SCALE_DISP: case opnd::BASE_INDEX_SCALE_DISP: size_addr_sym += sizeof(OpndAddrExp) + sizeof(IrOpnd*) * static_cast<OpndAddrExp*>(sto)->get_src_count(); break; default: claim(false, "Unknown operand kind"); } } fprintf(fp, "%s operand storage (bytes):\n", heading); fprintf(fp, " Register: %8d\n", size_reg); fprintf(fp, " Variable symbol: %8d\n", size_var); fprintf(fp, " String immediate: %8d\n", size_immed_string); fprintf(fp, " Integer immediate: %8d\n", size_immed_integer); fprintf(fp, " Address symbol: %8d\n", size_addr_sym); fprintf(fp, " Address expression:%8d\n", size_addr_exp); fprintf(fp, " %8d (total)\n", size_reg + size_var + size_immed_string + size_immed_integer + size_addr_sym + size_addr_exp); }
static void TestTInternalLList(skiatest::Reporter* reporter) { SkTInternalLList<ListElement> list; ListElement elements[4] = { ListElement(0), ListElement(1), ListElement(2), ListElement(3), }; // list should be empty to start with check_list(list, reporter, true, 0, false, false, false, false, elements); list.addToHead(&elements[0]); check_list(list, reporter, false, 1, true, false, false, false, elements); list.addToHead(&elements[1]); list.addToHead(&elements[2]); list.addToHead(&elements[3]); check_list(list, reporter, false, 4, true, true, true, true, elements); // test out iterators typedef SkTInternalLList<ListElement>::Iter Iter; Iter iter; ListElement* cur = iter.init(list, Iter::kHead_IterStart); for (int i = 0; NULL != cur; ++i, cur = iter.next()) { REPORTER_ASSERT(reporter, cur->fID == 3-i); } cur = iter.init(list, Iter::kTail_IterStart); for (int i = 0; NULL != cur; ++i, cur = iter.prev()) { REPORTER_ASSERT(reporter, cur->fID == i); } // remove middle, frontmost then backmost list.remove(&elements[1]); list.remove(&elements[3]); list.remove(&elements[0]); check_list(list, reporter, false, 1, false, false, true, false, elements); // remove last element list.remove(&elements[2]); // list should be empty again check_list(list, reporter, true, 0, false, false, false, false, elements); // test out methods that add to the middle of the list. list.addAfter(&elements[1], NULL); check_list(list, reporter, false, 1, false, true, false, false, elements); list.remove(&elements[1]); list.addBefore(&elements[1], NULL); check_list(list, reporter, false, 1, false, true, false, false, elements); list.addBefore(&elements[0], &elements[1]); check_list(list, reporter, false, 2, true, true, false, false, elements); list.addAfter(&elements[3], &elements[1]); check_list(list, reporter, false, 3, true, true, false, true, elements); list.addBefore(&elements[2], &elements[3]); check_list(list, reporter, false, 4, true, true, true, true, elements); cur = iter.init(list, Iter::kHead_IterStart); for (int i = 0; NULL != cur; ++i, cur = iter.next()) { REPORTER_ASSERT(reporter, cur->fID == i); } }
bool equal(const Iter& p1, const Iter& p2) const { return p1.base().first == p2.base().first; }
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. /// We assume there is only one delay slot per delayed instruction. bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>(); bool InMicroMipsMode = STI.inMicroMipsMode(); const MipsInstrInfo *TII = STI.getInstrInfo(); if (InMicroMipsMode && STI.hasMips32r6()) { // This is microMIPS32r6 or microMIPS64r6 processor. Delay slot for // branching instructions is not needed. return Changed; } for (Iter I = MBB.begin(); I != MBB.end(); ++I) { if (!hasUnoccupiedSlot(&*I)) continue; ++FilledSlots; Changed = true; // Delay slot filling is disabled at -O0. if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) { bool Filled = false; if (searchBackward(MBB, I)) { Filled = true; } else if (I->isTerminator()) { if (searchSuccBBs(MBB, I)) { Filled = true; } } else if (searchForward(MBB, I)) { Filled = true; } if (Filled) { // Get instruction with delay slot. MachineBasicBlock::instr_iterator DSI(I); if (InMicroMipsMode && TII->GetInstSizeInBytes(&*std::next(DSI)) == 2 && DSI->isCall()) { // If instruction in delay slot is 16b change opcode to // corresponding instruction with short delay slot. DSI->setDesc(TII->get(getEquivalentCallShort(DSI->getOpcode()))); } continue; } } // For microMIPS if instruction is BEQ or BNE with one ZERO register, then // instead of adding NOP replace this instruction with the corresponding // compact branch instruction, i.e. BEQZC or BNEZC. Additionally // PseudoReturn and PseudoIndirectBranch are expanded to JR_MM, so they can // be replaced with JRC16_MM. // For MIPSR6 attempt to produce the corresponding compact (no delay slot) // form of the CTI. For indirect jumps this will not require inserting a // NOP and for branches will hopefully avoid requiring a NOP. if ((InMicroMipsMode || STI.hasMips32r6()) && TII->getEquivalentCompactForm(I)) { I = replaceWithCompactBranch(MBB, I, I->getDebugLoc()); continue; } // Bundle the NOP to the instruction with the delay slot. BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP)); MIBundleBuilder(MBB, I, std::next(I, 2)); } return Changed; }
/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. /// We assume there is only one delay slot per delayed instruction. bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) { bool Changed = false; const MipsSubtarget &STI = MBB.getParent()->getSubtarget<MipsSubtarget>(); bool InMicroMipsMode = STI.inMicroMipsMode(); const MipsInstrInfo *TII = STI.getInstrInfo(); for (Iter I = MBB.begin(); I != MBB.end(); ++I) { if (!hasUnoccupiedSlot(&*I)) continue; ++FilledSlots; Changed = true; // Delay slot filling is disabled at -O0. if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None)) { bool Filled = false; if (searchBackward(MBB, I)) { Filled = true; } else if (I->isTerminator()) { if (searchSuccBBs(MBB, I)) { Filled = true; } } else if (searchForward(MBB, I)) { Filled = true; } if (Filled) { // Get instruction with delay slot. MachineBasicBlock::instr_iterator DSI(I); if (InMicroMipsMode && TII->GetInstSizeInBytes(std::next(DSI)) == 2 && DSI->isCall()) { // If instruction in delay slot is 16b change opcode to // corresponding instruction with short delay slot. DSI->setDesc(TII->get(getEquivalentCallShort(DSI->getOpcode()))); } continue; } } // If instruction is BEQ or BNE with one ZERO register, then instead of // adding NOP replace this instruction with the corresponding compact // branch instruction, i.e. BEQZC or BNEZC. unsigned Opcode = I->getOpcode(); if (InMicroMipsMode) { switch (Opcode) { case Mips::BEQ: case Mips::BNE: if (((unsigned) I->getOperand(1).getReg()) == Mips::ZERO) { I = replaceWithCompactBranch(MBB, I, I->getDebugLoc()); continue; } break; case Mips::JR: case Mips::PseudoReturn: case Mips::PseudoIndirectBranch: // For microMIPS the PseudoReturn and PseudoIndirectBranch are allways // expanded to JR_MM, so they can be replaced with JRC16_MM. I = replaceWithCompactJump(MBB, I, I->getDebugLoc()); continue; default: break; } } // Bundle the NOP to the instruction with the delay slot. BuildMI(MBB, std::next(I), I->getDebugLoc(), TII->get(Mips::NOP)); MIBundleBuilder(MBB, I, std::next(I, 2)); } return Changed; }
// Call this function when searching above a dst_type node. This function searches // for a public path to (static_ptr, static_type). // This function is guaranteed not to find a node of type dst_type. // Theoretically this is a very simple function which just stops if it finds a // static_type node: All the hoopla surrounding the search code is doing // nothing but looking for excuses to stop the search prematurely (break out of // the for-loop). That is, the algorithm below is simply an optimization of this: // void // __vmi_class_type_info::search_above_dst(__dynamic_cast_info* info, // const void* dst_ptr, // const void* current_ptr, // int path_below) const // { // if (this == info->static_type) // process_static_type_above_dst(info, dst_ptr, current_ptr, path_below); // else // { // typedef const __base_class_type_info* Iter; // // This is not a static_type and not a dst_type // for (Iter p = __base_info, e = __base_info + __base_count; p < e; ++p) // { // p->search_above_dst(info, dst_ptr, current_ptr, public_path); // // break out early here if you can detect it doesn't matter if you do // } // } // } void __vmi_class_type_info::search_above_dst(__dynamic_cast_info* info, const void* dst_ptr, const void* current_ptr, int path_below, bool use_strcmp) const { if (is_equal(this, info->static_type, use_strcmp)) process_static_type_above_dst(info, dst_ptr, current_ptr, path_below); else { typedef const __base_class_type_info* Iter; // This is not a static_type and not a dst_type // Save flags so they can be restored when returning to nodes below. bool found_our_static_ptr = info->found_our_static_ptr; bool found_any_static_type = info->found_any_static_type; // We've found a dst_type below with a path to here. If the path // to here is not public, there may be another path to here that // is public. So we have to assume that the path to here is public. // We can stop looking above if: // 1. We've found a public path to (static_ptr, static_type). // 2. We've found an ambiguous cast from (static_ptr, static_type) to a dst_type. // This is detected at the (static_ptr, static_type). // 3. We can prove that there is no public path to (static_ptr, static_type) // above here. const Iter e = __base_info + __base_count; Iter p = __base_info; // Zero out found flags info->found_our_static_ptr = false; info->found_any_static_type = false; p->search_above_dst(info, dst_ptr, current_ptr, path_below, use_strcmp); if (++p < e) { do { if (info->search_done) break; if (info->found_our_static_ptr) { // If we found what we're looking for, stop looking above. if (info->path_dst_ptr_to_static_ptr == public_path) break; // We found a private path to (static_ptr, static_type) // If there is no diamond then there is only one path // to (static_ptr, static_type) from here and we just found it. if (!(__flags & __diamond_shaped_mask)) break; } else if (info->found_any_static_type) { // If we found a static_type that isn't the one we're looking // for, and if there are no repeated types above here, // then stop looking. if (!(__flags & __non_diamond_repeat_mask)) break; } // Zero out found flags info->found_our_static_ptr = false; info->found_any_static_type = false; p->search_above_dst(info, dst_ptr, current_ptr, path_below, use_strcmp); } while (++p < e); } // Restore flags info->found_our_static_ptr = found_our_static_ptr; info->found_any_static_type = found_any_static_type; } }
int tool_main(int argc, char** argv) { SetupCrashHandler(); SkCommandLineFlags::Parse(argc, argv); #if SK_ENABLE_INST_COUNT if (FLAGS_leaks) { gPrintInstCount = true; } #endif SkAutoGraphics ag; // First, parse some flags. BenchLogger logger; if (FLAGS_logFile.count()) { logger.SetLogFile(FLAGS_logFile[0]); } LoggerResultsWriter logWriter(logger, FLAGS_timeFormat[0]); MultiResultsWriter writer; writer.add(&logWriter); SkAutoTDelete<JSONResultsWriter> jsonWriter; if (FLAGS_outResultsFile.count()) { jsonWriter.reset(SkNEW(JSONResultsWriter(FLAGS_outResultsFile[0]))); writer.add(jsonWriter.get()); } // Instantiate after all the writers have been added to writer so that we // call close() before their destructors are called on the way out. CallEnd<MultiResultsWriter> ender(writer); const uint8_t alpha = FLAGS_forceBlend ? 0x80 : 0xFF; SkTriState::State dither = SkTriState::kDefault; for (size_t i = 0; i < 3; i++) { if (strcmp(SkTriState::Name[i], FLAGS_forceDither[0]) == 0) { dither = static_cast<SkTriState::State>(i); } } BenchMode benchMode = kNormal_BenchMode; for (size_t i = 0; i < SK_ARRAY_COUNT(BenchMode_Name); i++) { if (strcmp(FLAGS_mode[0], BenchMode_Name[i]) == 0) { benchMode = static_cast<BenchMode>(i); } } SkTDArray<int> configs; bool runDefaultConfigs = false; // Try user-given configs first. for (int i = 0; i < FLAGS_config.count(); i++) { for (int j = 0; j < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++j) { if (0 == strcmp(FLAGS_config[i], gConfigs[j].name)) { *configs.append() = j; } else if (0 == strcmp(FLAGS_config[i], kDefaultsConfigStr)) { runDefaultConfigs = true; } } } // If there weren't any, fill in with defaults. if (runDefaultConfigs) { for (int i = 0; i < static_cast<int>(SK_ARRAY_COUNT(gConfigs)); ++i) { if (gConfigs[i].runByDefault) { *configs.append() = i; } } } // Filter out things we can't run. if (kNormal_BenchMode != benchMode) { // Non-rendering configs only run in normal mode for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kNonRendering_Backend == config.backend) { configs.remove(i, 1); --i; } } } #if SK_SUPPORT_GPU for (int i = 0; i < configs.count(); ++i) { const Config& config = gConfigs[configs[i]]; if (Benchmark::kGPU_Backend == config.backend) { GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { SkDebugf("GrContext could not be created for config %s. Config will be skipped.\n", config.name); configs.remove(i); --i; continue; } if (config.sampleCount > context->getMaxSampleCount()){ SkDebugf( "Sample count (%d) for config %s is not supported. Config will be skipped.\n", config.sampleCount, config.name); configs.remove(i); --i; continue; } } } #endif // All flags should be parsed now. Report our settings. if (FLAGS_runOnce) { logger.logError("bench was run with --runOnce, so we're going to hide the times." " It's for your own good!\n"); } writer.option("mode", FLAGS_mode[0]); writer.option("alpha", SkStringPrintf("0x%02X", alpha).c_str()); writer.option("antialias", SkStringPrintf("%d", FLAGS_forceAA).c_str()); writer.option("filter", SkStringPrintf("%d", FLAGS_forceFilter).c_str()); writer.option("dither", SkTriState::Name[dither]); writer.option("rotate", SkStringPrintf("%d", FLAGS_rotate).c_str()); writer.option("scale", SkStringPrintf("%d", FLAGS_scale).c_str()); writer.option("clip", SkStringPrintf("%d", FLAGS_clip).c_str()); #if defined(SK_BUILD_FOR_WIN32) writer.option("system", "WIN32"); #elif defined(SK_BUILD_FOR_MAC) writer.option("system", "MAC"); #elif defined(SK_BUILD_FOR_ANDROID) writer.option("system", "ANDROID"); #elif defined(SK_BUILD_FOR_UNIX) writer.option("system", "UNIX"); #else writer.option("system", "other"); #endif #if defined(SK_DEBUG) writer.option("build", "DEBUG"); #else writer.option("build", "RELEASE"); #endif // Set texture cache limits if non-default. for (size_t i = 0; i < SK_ARRAY_COUNT(gConfigs); ++i) { #if SK_SUPPORT_GPU const Config& config = gConfigs[i]; if (Benchmark::kGPU_Backend != config.backend) { continue; } GrContext* context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } size_t bytes; int count; context->getResourceCacheLimits(&count, &bytes); if (-1 != FLAGS_gpuCacheBytes) { bytes = static_cast<size_t>(FLAGS_gpuCacheBytes); } if (-1 != FLAGS_gpuCacheCount) { count = FLAGS_gpuCacheCount; } context->setResourceCacheLimits(count, bytes); #endif } // Run each bench in each configuration it supports and we asked for. Iter iter; Benchmark* bench; while ((bench = iter.next()) != NULL) { SkAutoTUnref<Benchmark> benchUnref(bench); if (SkCommandLineFlags::ShouldSkip(FLAGS_match, bench->getName())) { continue; } bench->setForceAlpha(alpha); bench->setForceAA(FLAGS_forceAA); bench->setForceFilter(FLAGS_forceFilter); bench->setDither(dither); bench->preDraw(); bool loggedBenchName = false; for (int i = 0; i < configs.count(); ++i) { const int configIndex = configs[i]; const Config& config = gConfigs[configIndex]; if (!bench->isSuitableFor(config.backend)) { continue; } GrContext* context = NULL; #if SK_SUPPORT_GPU SkGLContextHelper* glContext = NULL; if (Benchmark::kGPU_Backend == config.backend) { context = gContextFactory.get(config.contextType); if (NULL == context) { continue; } glContext = gContextFactory.getGLContext(config.contextType); } #endif SkAutoTUnref<SkCanvas> canvas; SkAutoTUnref<SkPicture> recordFrom; SkPictureRecorder recorderTo; const SkIPoint dim = bench->getSize(); SkAutoTUnref<SkSurface> surface; if (Benchmark::kNonRendering_Backend != config.backend) { surface.reset(make_surface(config.fColorType, dim, config.backend, config.sampleCount, context)); if (!surface.get()) { logger.logError(SkStringPrintf( "Device creation failure for config %s. Will skip.\n", config.name)); continue; } switch(benchMode) { case kDeferredSilent_BenchMode: case kDeferred_BenchMode: canvas.reset(SkDeferredCanvas::Create(surface.get())); break; case kRecord_BenchMode: canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; case kPictureRecord_BenchMode: { SkPictureRecorder recorderFrom; bench->draw(1, recorderFrom.beginRecording(dim.fX, dim.fY)); recordFrom.reset(recorderFrom.endRecording()); canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); break; } case kNormal_BenchMode: canvas.reset(SkRef(surface->getCanvas())); break; default: SkASSERT(false); } } if (NULL != canvas) { canvas->clear(SK_ColorWHITE); if (FLAGS_clip) { perform_clip(canvas, dim.fX, dim.fY); } if (FLAGS_scale) { perform_scale(canvas, dim.fX, dim.fY); } if (FLAGS_rotate) { perform_rotate(canvas, dim.fX, dim.fY); } } if (!loggedBenchName) { loggedBenchName = true; writer.bench(bench->getName(), dim.fX, dim.fY); } #if SK_SUPPORT_GPU SkGLContextHelper* contextHelper = NULL; if (Benchmark::kGPU_Backend == config.backend) { contextHelper = gContextFactory.getGLContext(config.contextType); } BenchTimer timer(contextHelper); #else BenchTimer timer; #endif double previous = std::numeric_limits<double>::infinity(); bool converged = false; // variables used to compute loopsPerFrame double frameIntervalTime = 0.0f; int frameIntervalTotalLoops = 0; bool frameIntervalComputed = false; int loopsPerFrame = 0; int loopsPerIter = 0; if (FLAGS_verbose) { SkDebugf("%s %s: ", bench->getName(), config.name); } if (!FLAGS_dryRun) { do { // Ramp up 1 -> 2 -> 4 -> 8 -> 16 -> ... -> ~1 billion. loopsPerIter = (loopsPerIter == 0) ? 1 : loopsPerIter * 2; if (loopsPerIter >= (1<<30) || timer.fWall > FLAGS_maxMs) { // If you find it takes more than a billion loops to get up to 20ms of runtime, // you've got a computer clocked at several THz or have a broken benchmark. ;) // "1B ought to be enough for anybody." logger.logError(SkStringPrintf( "\nCan't get %s %s to converge in %dms (%d loops)", bench->getName(), config.name, FLAGS_maxMs, loopsPerIter)); break; } if ((benchMode == kRecord_BenchMode || benchMode == kPictureRecord_BenchMode)) { // Clear the recorded commands so that they do not accumulate. canvas.reset(SkRef(recorderTo.beginRecording(dim.fX, dim.fY))); } timer.start(); // Inner loop that allows us to break the run into smaller // chunks (e.g. frames). This is especially useful for the GPU // as we can flush and/or swap buffers to keep the GPU from // queuing up too much work. for (int loopCount = loopsPerIter; loopCount > 0; ) { // Save and restore around each call to draw() to guarantee a pristine canvas. SkAutoCanvasRestore saveRestore(canvas, true/*also save*/); int loops; if (frameIntervalComputed && loopCount > loopsPerFrame) { loops = loopsPerFrame; loopCount -= loopsPerFrame; } else { loops = loopCount; loopCount = 0; } if (benchMode == kPictureRecord_BenchMode) { recordFrom->draw(canvas); } else { bench->draw(loops, canvas); } if (kDeferredSilent_BenchMode == benchMode) { static_cast<SkDeferredCanvas*>(canvas.get())->silentFlush(); } else if (NULL != canvas) { canvas->flush(); } #if SK_SUPPORT_GPU // swap drawing buffers on each frame to prevent the GPU // from queuing up too much work if (NULL != glContext) { glContext->swapBuffers(); } #endif } // Stop truncated timers before GL calls complete, and stop the full timers after. timer.truncatedEnd(); #if SK_SUPPORT_GPU if (NULL != glContext) { context->flush(); SK_GL(*glContext, Finish()); } #endif timer.end(); // setup the frame interval for subsequent iterations if (!frameIntervalComputed) { frameIntervalTime += timer.fWall; frameIntervalTotalLoops += loopsPerIter; if (frameIntervalTime >= FLAGS_minMs) { frameIntervalComputed = true; loopsPerFrame = (int)(((double)frameIntervalTotalLoops / frameIntervalTime) * FLAGS_minMs); if (loopsPerFrame < 1) { loopsPerFrame = 1; } // SkDebugf(" %s has %d loops in %f ms (normalized to %d)\n", // bench->getName(), frameIntervalTotalLoops, // timer.fWall, loopsPerFrame); } } const double current = timer.fWall / loopsPerIter; if (FLAGS_verbose && current > previous) { SkDebugf("↑"); } if (FLAGS_verbose) { SkDebugf("%.3g ", current); } converged = HasConverged(previous, current, timer.fWall); previous = current; } while (!FLAGS_runOnce && !converged); } if (FLAGS_verbose) { SkDebugf("\n"); } if (!FLAGS_dryRun && FLAGS_outDir.count() && Benchmark::kNonRendering_Backend != config.backend) { SkAutoTUnref<SkImage> image(surface->newImageSnapshot()); if (image.get()) { saveFile(bench->getName(), config.name, FLAGS_outDir[0], image); } } if (FLAGS_runOnce) { // Let's not mislead ourselves by looking at Debug build or single iteration bench times! continue; } // Normalize to ms per 1000 iterations. const double normalize = 1000.0 / loopsPerIter; const struct { char shortName; const char* longName; double ms; } times[] = { {'w', "msecs", normalize * timer.fWall}, {'W', "Wmsecs", normalize * timer.fTruncatedWall}, {'c', "cmsecs", normalize * timer.fCpu}, {'C', "Cmsecs", normalize * timer.fTruncatedCpu}, {'g', "gmsecs", normalize * timer.fGpu}, }; writer.config(config.name); for (size_t i = 0; i < SK_ARRAY_COUNT(times); i++) { if (strchr(FLAGS_timers[0], times[i].shortName) && times[i].ms > 0) { writer.timer(times[i].longName, times[i].ms); } } } } #if SK_SUPPORT_GPU gContextFactory.destroyContexts(); #endif return 0; }
Score getHighest(Idx& i, Idx& j) // could be const if we had const Iters { const bool trace = false; Iter iter = begin(); Score bestScore = *iter; Idx bestRow = iter.row(); Idx bestCol = iter.col(); if (trace) cout << iter.row() << " " << iter.col() << " " << *iter << endl; ++iter; for (Iter iterEnd = end(); iter != iterEnd; ++iter) { if (trace) cout << iter.row() << " " << iter.col() << " " << *iter << endl; if (scoreGreater(*iter, bestScore)) // if (ScoreComparer::_scoreGreater(*iter, bestScore, scoreComparer_TOLERANCE)) { if (false && gTraceInfo.iteration == 2441) { cout << iter.row() << " " << iter.col() << " "; printf("%20.20g", *iter); cout << endl; } bestScore = *iter; bestRow = iter.row(); bestCol = iter.col(); } } i = bestRow; j = bestCol; return bestScore; }
//--------------------------------------------------------------------------- bool __fastcall Parser::Execute(bool paint) { //parse lists tasklist.sort(); tasklistprior.sort(); upperbound = parent->GetActLine() + PARSEINADVANCE; bool painted = false; int parsed = 0; bool endValid = false; while(tasklistprior.size() > 0 || (tasklist.size() > 0 && (tasklist.front().linenum < upperbound || tasklist.front().line->parserState.parseid != currentparseid)) ) { ParseTask pt; if(tasklistprior.size() > 0) { pt = tasklistprior.front(); } else { pt = tasklist.front(); } linenum = parent->GetLineNum(pt.line); bool first = true; painted = linenum >= 0 | painted; Iter * itr = new Iter(pt.line, linenum >= 0 ? parent->itrLine.linenum+linenum : pt.linenum, 0, parent->buffer); state = itr->line->parserState; if(itr->linenum == 1) state.parseid = currentparseid; else if(itr->line != NULL && itr->line->prevline != NULL) state.parseid = itr->line->prevline->parserState.parseid; if(linenum >= 0) { ReconstructMarkup(); actIMarkup = itr->ReconstructIMarkFontStyle(); actMarkupCombined = actMarkup; actMarkupCombined += actIMarkup; itr->UpdateNextImark(); } if(state.searchStateStack.top == NULL) state.searchStateStack.Push(langdef->GetDefSC(0)); //LanguageDefinition::SearchIter * searchiter = &state.searchStateStack.top->data; //now passed directly - no need to keep it actFormat = *state.searchStateStack.top->data.base->format; tasklistprior.remove(pt); tasklist.remove(pt); while(itr->word->next && (first || itr->line->parserState != this->state || tasklistprior.front().line == itr->line)) { //take care of record from list newline = true; first = false; //take care of line if(itr->line->prevline != NULL) //otherwise sets a format to the first line -> changing langdef will never iterate change in buffer itr->line->parserState = this->state; tasklistprior.remove(pt); tasklist.remove(pt); // parent->Log(String("parsing line ")+String(itr->linenum)); ParseLine(itr, &state.searchStateStack.top->data, linenum >= 0); endValid = itr->GoChar(); if(linenum >= 0) { //FlushAll(); painted = true; linenum = parent->GetLineNum(itr->line); } else if(parent->IsLineFirstVisible(itr->line)) { itr->linenum = parent->GetActLine(); ReconstructMarkup(); actIMarkup = itr->ReconstructIMarkFontStyle(); actMarkupCombined = actMarkup; actMarkupCombined += actIMarkup; itr->UpdateNextImark(); linenum = 0; } pt.linenum++; pt.line = itr->line; parsed++; //if(!(linenum >= 0 && linenum <= parent->GetVisLineCount() && itr->line->parserState != this->state) && tasklistprior.size() > 0 ) if(linenum < 0 && (tasklistprior.size() > 0 || itr->linenum > upperbound || parsed > PARSEINONEGO)) { if(itr->word->next != NULL && itr->line->parserState != this->state) { this->tasklist.push_front(ParseTask(itr->line, itr->linenum)); } itr->line->parserState = this->state; break; } } if(!itr->word->next && linenum >= 0) { if(first) //means line has not been either parsed or flushed, because iter was already on nextline (empty line) //well I no longer understand purpose of this, but let's consider, that the tail of buffer has to be explicitly flushed to the drawer FlushAll(); SendEof(); } if(itr->word->next == NULL && endValid) //when last line is empty line, then parseline would not get assigned itr->line->parserState = this->state; //beware - you do not want to assign state into wrong line (when iter could go no further //parent->Log(String("parsed to line ")+String(itr->linenum)); delete itr; if((painted || parsed > PARSEINONEGO) && tasklistprior.size() == 0) { if(painted && paint) { drawer->Paint(); } if(parsed > PARSEINONEGO) { if(!onidleset) { oldidle = Application->OnIdle; Application->OnIdle = OnIdle; onidleset = true; } return false; } parsed = 0; } } return true; }
double log_probability(Iter const & iter) const { if (iter.is_missing(_slotID)) return 0.0; /* log(1) */ return _func->log_probability(iter); }
double log_probability(Iter const & iter) const { double log_prob = _func->log_probability(iter); if (std::isnan(log_prob)) throw QHMMException("NaN detected", "log_probability", false, _stateID, _slotID, iter.index(), iter.emission(_slotID)); // TODO: support higher dimensions! return log_prob; }
Statement* scope_statement_walker::dismantle_scope_statement(ScopeStatement *the_scope_stat){ ProcedureDefinition* proc_def = get_procedure_definition(the_scope_stat); Statement *body = the_scope_stat->get_body(); if(body==NULL){ the_scope_stat->print_to_default(); } body->set_parent(0); the_scope_stat->set_body(0); // This is a bug? // remove_suif_object(body); SymbolTable * symbol_table = the_scope_stat->get_symbol_table(); DefinitionBlock * definition_block = the_scope_stat->get_definition_block(); SymbolTable *new_symbol_table = proc_def->get_symbol_table(); DefinitionBlock *new_definition_block = proc_def->get_definition_block(); if (symbol_table != 0) { // start by creating a name for the symbol in the new symbol table Iter<SymbolTable::lookup_table_pair> piter = symbol_table->get_lookup_table_iterator(); while (piter.is_valid()) { indexed_list<LString,SymbolTableObject*>::pair p = piter.current(); SymbolTableObject * obj = p.second; const LString &name = p.first; new_symbol_table->add_lookup_table(name,obj); piter.next(); } // now move all symbols into the symbol table for the procedure scope // at the same time, we delete them from the current symbol table and // remove all references to this symbol table from the name list attached // to the symbol // DLH // I modifed this to build a list so we aren't iterating over // a changing object (which didn't work. and I don't expect it to) list<SymbolTableObject*> l; {for (Iter<SymbolTableObject*> iter = symbol_table->get_symbol_table_object_iterator(); iter.is_valid(); iter.next()) { l.push_back(iter.current()); }} for (list<SymbolTableObject*>::iterator iter = l.begin(); iter != l.end(); iter++) { SymbolTableObject *object = *iter; symbol_table->remove_symbol_table_object(object); rename_if_collision(object, new_symbol_table); new_symbol_table->add_symbol(object); // symbol_table->remove_all_from_lookup_table(object); // object->remove_all_from_name(symbol_table); } } if (definition_block != 0) { // move all definition block entries int i = definition_block->get_variable_definition_count(); while (i > 0){ i--; VariableDefinition *next = definition_block->remove_variable_definition(i); new_definition_block->append_variable_definition(next); } i = definition_block->get_procedure_definition_count(); while (i > 0){ i--; ProcedureDefinition *next = definition_block->remove_procedure_definition(i); new_definition_block->append_procedure_definition(next); } } the_scope_stat->get_parent()->replace(the_scope_stat, body); return body; }
void DismantleStructuredReturns::do_file_set_block( FileSetBlock* file_set_block ) { suif_map<CProcedureType *,QualifiedType *> type_map; list<ArrayReferenceExpression*> ref_exprs; SuifEnv *env = 0; TypeBuilder *tb = 0; VoidType *vt = 0; for (Iter<ProcedureSymbol> iter = object_iterator<ProcedureSymbol>(file_set_block); iter.is_valid(); iter.next()) { ProcedureSymbol *sym = &iter.current(); Type *type = sym->get_type(); if (!is_kind_of<CProcedureType>(type)) continue; CProcedureType *cp_type = to<CProcedureType>(type); type = cp_type->get_result_type(); if (!env) { env = type->get_suif_env(); tb = (TypeBuilder*) env->get_object_factory(TypeBuilder::get_class_name()); vt = tb->get_void_type(); } suif_map<CProcedureType *,QualifiedType *>::iterator t_iter = type_map.find(cp_type); QualifiedType *qtype; if (t_iter == type_map.end()) { if (!is_kind_of<GroupType>(type) && !is_kind_of<ArrayType>(type)) continue; qtype = tb->get_qualified_type( tb->get_pointer_type(to<DataType>(type))); cp_type->set_result_type(vt); cp_type->insert_argument(0,qtype); type_map.enter_value(cp_type,qtype); } else { qtype = (*t_iter).second; } ProcedureDefinition *def = sym->get_definition(); if (!def) continue; ParameterSymbol *par = create_parameter_symbol(env,qtype); def->get_symbol_table()->append_symbol_table_object(par); def->insert_formal_parameter(0,par); // Convert all returns into assigned and returns for (Iter<ReturnStatement> ret_iter = object_iterator<ReturnStatement>(def->get_body()); ret_iter.is_valid(); ret_iter.next()) { ReturnStatement *ret = &ret_iter.current(); Expression *retval = ret->get_return_value(); ret->set_return_value(0); retval->set_parent(0); insert_statement_before(ret, create_store_statement(env,retval,create_var_use(par))); } } // Change all calls to the new form for (Iter<CallStatement> cs_iter = object_iterator<CallStatement>(file_set_block); cs_iter.is_valid(); cs_iter.next()) { CallStatement *call = &cs_iter.current(); Type *type = call->get_callee_address()->get_result_type(); Type *p_type = tb->unqualify_type(to<PointerType>(type)->get_reference_type()); if (!is_kind_of<PointerType>(p_type)) continue; p_type = tb->unqualify_type(to<PointerType>(p_type)->get_reference_type()); if (!is_kind_of<CProcedureType>(p_type)) continue; CProcedureType *cp_type = to<CProcedureType>(p_type); suif_map<CProcedureType *,QualifiedType *>::iterator t_iter = type_map.find(cp_type); if (t_iter == type_map.end()) continue; QualifiedType *qtype = (*t_iter).second; DataType *var_type = to<DataType>(tb->unqualify_type(to<PointerType>(qtype->get_base_type()) ->get_reference_type())); VariableSymbol *var = new_anonymous_variable(env,call,tb->get_qualified_type(var_type)); Expression *exp = create_symbol_address_expression( env, tb->get_pointer_type(var_type), var); call->insert_argument(0,exp); call->set_destination(0); } for (Iter<CallExpression> ce_iter = object_iterator<CallExpression>(file_set_block); ce_iter.is_valid(); ce_iter.next()) { CallExpression *call = &ce_iter.current(); Type *type = call->get_callee_address()->get_result_type(); Type *p_type = tb->unqualify_type(to<PointerType>(type)->get_reference_type()); if (!is_kind_of<PointerType>(p_type)) continue; p_type = tb->unqualify_type(to<PointerType>(p_type)->get_reference_type()); if (!is_kind_of<CProcedureType>(p_type)) continue; CProcedureType *cp_type = to<CProcedureType>(p_type); ; suif_map<CProcedureType *,QualifiedType *>::iterator t_iter = type_map.find(cp_type); if (t_iter == type_map.end()) continue; QualifiedType *qtype = (*t_iter).second; DataType *var_type = to<DataType>(tb->unqualify_type(to<PointerType>(qtype->get_base_type()) ->get_reference_type())); VariableSymbol *var = new_anonymous_variable(env,call,tb->get_qualified_type(var_type)); Expression *exp = create_symbol_address_expression( env, tb->get_pointer_type(var_type), var); call->insert_argument(0,exp); Statement *loc = get_expression_owner(call); call->get_parent()->replace(call,create_var_use(var)); call->set_parent(0); suif_assert(vt != 0); call->set_result_type(vt); EvalStatement *es = create_eval_statement(env); insert_statement_before(loc,es); // Would be better to turn this into a call statement es->append_expression(call); } }
// Find the next real instruction from the current position. static Iter getNextMachineInstr(Iter Position) { Iter I = Position, E = Position->getParent()->end(); I = std::find_if_not(I, E, [](const Iter &Insn) { return Insn->isTransient(); }); assert(I != E); return I; }
Walker::ApplyStatus mdbm_c_for_statement_walker::operator () (SuifObject *x) { SuifEnv *env = get_env(); CForStatement *c_for_stmt = to<CForStatement>(x); // if(!is_stmt_within_begin_end_hw_marks(c_for_stmt)) // return Walker::Continue; Statement *body = c_for_stmt->get_body(); if (body){ Iter<CForStatement> iter_c_for = object_iterator<CForStatement>(body); if(iter_c_for.is_valid()) return Walker::Continue; StatementList *stmt_list_body = NULL; if(is_a<StatementList>(body)) stmt_list_body = to<StatementList>(body); else{ stmt_list_body = create_statement_list(env); c_for_stmt->set_body(0); stmt_list_body->append_statement(body); c_for_stmt->set_body(stmt_list_body); } Iter<Statement*> iter = stmt_list_body->get_statement_iterator(); for( ; iter.is_valid(); iter.next()){ Statement *child_stmt = iter.current(); if(is_a<StoreVariableStatement>(child_stmt)){ Iter<LoadExpression> iter2 = object_iterator<LoadExpression>(child_stmt); if(!iter2.is_valid()) break; }else break; } MarkStatement *end_of_mem_reads_mark = create_mark_statement(env); if(iter.is_valid()) insert_statement_before(iter.current(), end_of_mem_reads_mark); else stmt_list_body->insert_statement(0, end_of_mem_reads_mark); BrickAnnote *ba = create_brick_annote(env, "end_of_mem_reads"); ba->append_brick(create_suif_object_brick(env, end_of_mem_reads_mark)); c_for_stmt->append_annote(ba); for( ; iter.is_valid(); iter.next()){ Statement *child_stmt = iter.current(); if(is_a<StoreStatement>(child_stmt)) break; } MarkStatement *beg_of_mem_writes_mark = create_mark_statement(env); if(iter.is_valid()) insert_statement_before(iter.current(), beg_of_mem_writes_mark); else stmt_list_body->append_statement(beg_of_mem_writes_mark); ba = create_brick_annote(env, "beg_of_mem_writes"); ba->append_brick(create_suif_object_brick(env, beg_of_mem_writes_mark)); c_for_stmt->append_annote(ba); } return Walker::Continue; }
void swap_elements( Iter first, Iter second, boost::ptr_vector<T,C,A>& vec) { std::swap( *first.base(), *second.base()); }
int main() { const char * const JIEBA = "./dict/jieba.dict.utf8"; const char * const HHM = "./dict/hmm_model.utf8"; CppJieba::MixSegment seg(JIEBA, HHM); ifstream pagelib("/home/kevin/SpiderData/dmoz/pagelib.data"); ifstream pageindex("/home/kevin/SpiderData/dmoz/pageindex.index"); ofstream outputpageContent("/home/kevin/SpiderData/dmoz/pageTermContent.data", std::ofstream::app); //读取停用词集 ifstream stopwordsfile("/home/kevin/Github_practice/Week_11/5-18/getHtmlContentfromindex/dict/stop_words.utf8"); set<string> stopwords; string temp; while(stopwordsfile >> temp) { stopwords.insert(temp); } stopwordsfile.close(); //读取索引文件到vector unsigned int docid; unsigned long long int pos; vector<pair<unsigned int, unsigned long long int> > indexvec; while(pageindex >> docid >> pos) { indexvec.push_back(make_pair(docid, pos)); } pageindex.close(); //遍历原始网页库以建立网页文本库 for(unsigned int index = 0; index != indexvec.size() - 1; index++) { //读取原始网页文件 pagelib.seekg(indexvec[index].second); size_t length = indexvec[index + 1].second - indexvec[index].second; char *buf = new char[length]; pagelib.read(buf, length); string result(buf); result = parseString(result, 20);//抽取正文 最后一个参数为抽取阈值,越大抽取越严格! if(result.empty()) { continue; } //这一步之前必须判断字符的编码! if(!isUtf_8(result.c_str(), result.length())) { result = boost::locale::conv::between(result, "UTF-8", "GBK");//正文的编码转换 } std::vector<string> segwords; //分词结果数组 seg.cut(result,segwords); //分词 //将结果写入文件 outputpageContent << indexvec[index].first << endl; //写入文档编号 if(!segwords.empty()) { typedef vector<string>::iterator Iter; for(Iter iter = segwords.begin(); iter != segwords.end(); ++iter) { if(iter->length() >= 2 && iter->length() < 20 && !stopwords.count(*iter)) { //is vital to check the length! outputpageContent << *iter << ' '; } } outputpageContent << endl; } else { outputpageContent << endl; } delete[] buf; } outputpageContent.close(); return 0; }
Walker::ApplyStatus fle_c_for_statement_walker::operator () (SuifObject *x) { SuifEnv *env = get_env(); CForStatement *c_for_stmt = to<CForStatement>(x); // if(!is_stmt_within_begin_end_hw_marks(c_for_stmt)) // return Walker::Continue; Statement *body = c_for_stmt->get_body(); Iter<CForStatement> iter = object_iterator<CForStatement>(body); if(iter.is_valid()) return Walker::Continue; BrickAnnote* c_for_info = to<BrickAnnote>(c_for_stmt->lookup_annote_by_name("c_for_info")); String c_for_loop_counter_name = (to<StringBrick>(c_for_info->get_brick(1)))->get_value(); int c_for_loop_step_size = (to<IntegerBrick>(c_for_info->get_brick(4)))->get_value().c_int(); if(body){ list<StoreStatement*>* stores_list = collect_objects<StoreStatement>(body); if(stores_list->size() <= 0){ delete stores_list; return Walker::Continue; } ProcedureDefinition* proc_def = get_procedure_definition(c_for_stmt); VariableSymbol *c_for_stmt_index_var = get_c_for_basic_induction_variable(c_for_stmt); Expression *c_for_stmt_lower_bound_expr = get_c_for_lower_bound_expr(c_for_stmt); BrickAnnote *ba = to<BrickAnnote>(c_for_stmt->lookup_annote_by_name("end_of_mem_reads")); SuifObjectBrick *sob = to<SuifObjectBrick>(ba->get_brick(0)); MarkStatement *end_of_mem_reads = to<MarkStatement>(sob->get_object()); ba = to<BrickAnnote>(c_for_stmt->lookup_annote_by_name("beg_of_mem_writes")); sob = to<SuifObjectBrick>(ba->get_brick(0)); MarkStatement *beg_of_mem_writes = to<MarkStatement>(sob->get_object()); list<VariableSymbol*>* array_names_in_load_exprs = collect_array_name_symbols_used_in_loads(body); suif_map<LoadExpression*, ArrayInfo*>* load_expr_array_info_map = new suif_map<LoadExpression*, ArrayInfo*>; list<LoadExpression*>* loads_list = collect_objects<LoadExpression>(body); for(list<LoadExpression*>::iterator iter2 = loads_list->begin(); iter2 != loads_list->end(); iter2++) { LoadExpression *load_expr = *iter2; StoreVariableStatement *load_parent = to<StoreVariableStatement>(get_expression_owner(load_expr)); if(!is_a<ArrayReferenceExpression>(load_expr->get_source_address())) continue; ArrayReferenceExpression *source_address_expr = to<ArrayReferenceExpression>(load_expr->get_source_address()); BrickAnnote *source_address_info_annote = to<BrickAnnote>(source_address_expr->lookup_annote_by_name("array_ref_info")); sob = to<SuifObjectBrick>(source_address_info_annote->get_brick(0)); ArrayInfo *source_address_info = (ArrayInfo*)(sob->get_object()); load_expr_array_info_map->enter_value(load_expr, source_address_info); } delete loads_list; StatementList* before_beg_of_mem_writes = create_statement_list(env); StatementList* after_end_of_mem_reads = create_statement_list(env); StatementList* load_inits = create_statement_list(env); list<Statement*>* to_be_removed = new list<Statement*>; for(list<StoreStatement*>::iterator iter = stores_list->begin(); iter != stores_list->end(); iter++) { StoreStatement *store_stmt = *iter; if(!is_a<ArrayReferenceExpression>(store_stmt->get_destination_address())) continue; ArrayReferenceExpression *destination_address_expr = to<ArrayReferenceExpression>(store_stmt->get_destination_address()); BrickAnnote *destination_address_info_annote = to<BrickAnnote>(destination_address_expr->lookup_annote_by_name("array_ref_info")); SuifObjectBrick *sob = to<SuifObjectBrick>(destination_address_info_annote->get_brick(0)); ArrayInfo *destination_address_info = (ArrayInfo*)(sob->get_object()); VariableSymbol *array_sym = get_array_name_symbol(destination_address_expr); Type *t = get_base_type(destination_address_expr->get_result_type()); VariableSymbol *feedback_var = NULL; for(suif_map<LoadExpression*, ArrayInfo*>::iterator iter2 = load_expr_array_info_map->begin(); iter2 != load_expr_array_info_map->end(); ) { ArrayInfo *source_address_info = (*iter2).second; if(destination_address_info->get_array_symbol_name() != source_address_info->get_array_symbol_name()){ iter2++; continue; } if(is_a_feedback_pair(destination_address_info, source_address_info, c_for_loop_counter_name, c_for_loop_step_size)){ if(!feedback_var){ feedback_var = new_anonymous_variable(env, find_scope(proc_def->get_body()), retrieve_qualified_type(to<DataType>(t))); name_variable(feedback_var); StoreVariableStatement *feedback_var_set = create_store_variable_statement(env, feedback_var, to<LoadVariableExpression>(deep_suif_clone(store_stmt->get_value()))); before_beg_of_mem_writes->append_statement(feedback_var_set); } LoadExpression *load_expr = (*iter2).first; StoreVariableStatement *load_parent = to<StoreVariableStatement>(get_expression_owner(load_expr)); StoreVariableStatement *feedback_var_get = create_store_variable_statement(env, load_parent->get_destination(), create_load_variable_expression(env, to<DataType>(t), feedback_var)); after_end_of_mem_reads->append_statement(feedback_var_get); suif_map<LoadExpression*, ArrayInfo*>::iterator iter_temp = iter2; iter2++; load_expr_array_info_map->erase(iter_temp); to_be_removed->push_back(load_parent); load_parent->set_parent(0); load_parent->set_destination(feedback_var); load_inits->append_statement(load_parent); }else iter2++; } } int i = 0; StatementList* the_list = to<StatementList>(body); while(i < the_list->get_statement_count()){ if(is_in_list(the_list->get_statement(i), to_be_removed)) the_list->remove_statement(i); else i++; } fle_load_variable_expression_walker walker(env, c_for_stmt_index_var, c_for_stmt_lower_bound_expr); load_inits->walk(walker); insert_statement_before(beg_of_mem_writes, before_beg_of_mem_writes); insert_statement_after(end_of_mem_reads, after_end_of_mem_reads); insert_statement_before(c_for_stmt, load_inits); delete stores_list; delete array_names_in_load_exprs; delete load_expr_array_info_map; delete to_be_removed; } return Walker::Continue; }
// Call this function when searching below a dst_type node. This function searches // for a path to (static_ptr, static_type) and for paths to one or more dst_type nodes. // If it finds a static_type node, there is no need to further search base classes // above. // If it finds a dst_type node it should search base classes using search_above_dst // to find out if this dst_type points to (static_ptr, static_type) or not. // Either way, the dst_type is recorded as one of two "flavors": one that does // or does not point to (static_ptr, static_type). // If this is neither a static_type nor a dst_type node, continue searching // base classes above. // All the hoopla surrounding the search code is doing nothing but looking for // excuses to stop the search prematurely (break out of the for-loop). That is, // the algorithm below is simply an optimization of this: // void // __vmi_class_type_info::search_below_dst(__dynamic_cast_info* info, // const void* current_ptr, // int path_below) const // { // typedef const __base_class_type_info* Iter; // if (this == info->static_type) // process_static_type_below_dst(info, current_ptr, path_below); // else if (this == info->dst_type) // { // // Record the most public access path that got us here // if (info->path_dynamic_ptr_to_dst_ptr != public_path) // info->path_dynamic_ptr_to_dst_ptr = path_below; // bool does_dst_type_point_to_our_static_type = false; // for (Iter p = __base_info, e= __base_info + __base_count; p < e; ++p) // { // p->search_above_dst(info, current_ptr, current_ptr, public_path); // if (info->found_our_static_ptr) // does_dst_type_point_to_our_static_type = true; // // break out early here if you can detect it doesn't matter if you do // } // if (!does_dst_type_point_to_our_static_type) // { // // We found a dst_type that doesn't point to (static_ptr, static_type) // // So record the address of this dst_ptr and increment the // // count of the number of such dst_types found in the tree. // info->dst_ptr_not_leading_to_static_ptr = current_ptr; // info->number_to_dst_ptr += 1; // } // } // else // { // // This is not a static_type and not a dst_type. // for (Iter p = __base_info, e = __base_info + __base_count; p < e; ++p) // { // p->search_below_dst(info, current_ptr, public_path); // // break out early here if you can detect it doesn't matter if you do // } // } // } void __vmi_class_type_info::search_below_dst(__dynamic_cast_info* info, const void* current_ptr, int path_below, bool use_strcmp) const { typedef const __base_class_type_info* Iter; if (is_equal(this, info->static_type, use_strcmp)) process_static_type_below_dst(info, current_ptr, path_below); else if (is_equal(this, info->dst_type, use_strcmp)) { // We've been here before if we've recorded current_ptr in one of these // two places: if (current_ptr == info->dst_ptr_leading_to_static_ptr || current_ptr == info->dst_ptr_not_leading_to_static_ptr) { // We've seen this node before, and therefore have already searched // its base classes above. // Update path to here that is "most public". if (path_below == public_path) info->path_dynamic_ptr_to_dst_ptr = public_path; } else // We have haven't been here before { // Record the access path that got us here // If there is more than one dst_type this path doesn't matter. info->path_dynamic_ptr_to_dst_ptr = path_below; // Only search above here if dst_type derives from static_type, or // if it is unknown if dst_type derives from static_type. if (info->is_dst_type_derived_from_static_type != no) { // Set up flags to record results from all base classes bool is_dst_type_derived_from_static_type = false; bool does_dst_type_point_to_our_static_type = false; // We've found a dst_type with a potentially public path to here. // We have to assume the path is public because it may become // public later (if we get back to here with a public path). // We can stop looking above if: // 1. We've found a public path to (static_ptr, static_type). // 2. We've found an ambiguous cast from (static_ptr, static_type) to a dst_type. // This is detected at the (static_ptr, static_type). // 3. We can prove that there is no public path to (static_ptr, static_type) // above here. const Iter e = __base_info + __base_count; for (Iter p = __base_info; p < e; ++p) { // Zero out found flags info->found_our_static_ptr = false; info->found_any_static_type = false; p->search_above_dst(info, current_ptr, current_ptr, public_path, use_strcmp); if (info->search_done) break; if (info->found_any_static_type) { is_dst_type_derived_from_static_type = true; if (info->found_our_static_ptr) { does_dst_type_point_to_our_static_type = true; // If we found what we're looking for, stop looking above. if (info->path_dst_ptr_to_static_ptr == public_path) break; // We found a private path to (static_ptr, static_type) // If there is no diamond then there is only one path // to (static_ptr, static_type) and we just found it. if (!(__flags & __diamond_shaped_mask)) break; } else { // If we found a static_type that isn't the one we're looking // for, and if there are no repeated types above here, // then stop looking. if (!(__flags & __non_diamond_repeat_mask)) break; } } } if (!does_dst_type_point_to_our_static_type) { // We found a dst_type that doesn't point to (static_ptr, static_type) // So record the address of this dst_ptr and increment the // count of the number of such dst_types found in the tree. info->dst_ptr_not_leading_to_static_ptr = current_ptr; info->number_to_dst_ptr += 1; // If there exists another dst with a private path to // (static_ptr, static_type), then the cast from // (dynamic_ptr, dynamic_type) to dst_type is now ambiguous, // so stop search. if (info->number_to_static_ptr == 1 && info->path_dst_ptr_to_static_ptr == not_public_path) info->search_done = true; } // If we found no static_type,s then dst_type doesn't derive // from static_type, else it does. Record this result so that // next time we hit a dst_type we will know not to search above // it if it doesn't derive from static_type. if (is_dst_type_derived_from_static_type) info->is_dst_type_derived_from_static_type = yes; else info->is_dst_type_derived_from_static_type = no; } } } else { // This is not a static_type and not a dst_type. const Iter e = __base_info + __base_count; Iter p = __base_info; p->search_below_dst(info, current_ptr, path_below, use_strcmp); if (++p < e) { if ((__flags & __diamond_shaped_mask) || info->number_to_static_ptr == 1) { // If there are multiple paths to a base above from here, or if // a dst_type pointing to (static_ptr, static_type) has been found, // then there is no way to break out of this loop early unless // something below detects the search is done. do { if (info->search_done) break; p->search_below_dst(info, current_ptr, path_below, use_strcmp); } while (++p < e); } else if (__flags & __non_diamond_repeat_mask) { // There are not multiple paths to any base class from here and a // dst_type pointing to (static_ptr, static_type) has not yet been // found. do { if (info->search_done) break; // If we just found a dst_type with a public path to (static_ptr, static_type), // then the only reason to continue the search is to make sure // no other dst_type points to (static_ptr, static_type). // If !diamond, then we don't need to search here. if (info->number_to_static_ptr == 1 && info->path_dst_ptr_to_static_ptr == public_path) break; p->search_below_dst(info, current_ptr, path_below, use_strcmp); } while (++p < e); } else { // There are no repeated types above this node. // There are no nodes with multiple parents above this node. // no dst_type has been found to (static_ptr, static_type) do { if (info->search_done) break; // If we just found a dst_type with a public path to (static_ptr, static_type), // then the only reason to continue the search is to make sure sure // no other dst_type points to (static_ptr, static_type). // If !diamond, then we don't need to search here. // if we just found a dst_type with a private path to (static_ptr, static_type), // then we're only looking for a public path to (static_ptr, static_type) // and to check for other dst_types. // If !diamond & !repeat, then there is not a pointer to (static_ptr, static_type) // and not a dst_type under here. if (info->number_to_static_ptr == 1) break; p->search_below_dst(info, current_ptr, path_below, use_strcmp); } while (++p < e); } } } }
int main (int argc, char * const argv[]) { SkAutoGraphics ag; const char* writePath = NULL; // if non-null, where we write the originals const char* readPath = NULL; // if non-null, were we read from to compare char* const* stop = argv + argc; for (++argv; argv < stop; ++argv) { if (strcmp(*argv, "-w") == 0) { argv++; if (argv < stop && **argv) { writePath = *argv; } } else if (strcmp(*argv, "-r") == 0) { argv++; if (argv < stop && **argv) { readPath = *argv; } } } Iter iter; GM* gm; while ((gm = iter.next()) != NULL) { SkISize size = gm->getISize(); SkDebugf("creating... %s [%d %d]\n", gm->shortName(), size.width(), size.height()); SkBitmap bitmap; for (size_t i = 0; i < SK_ARRAY_COUNT(gRec); i++) { bitmap.setConfig(gRec[i].fConfig, size.width(), size.height()); bitmap.allocPixels(); bitmap.eraseColor(0); SkCanvas canvas(bitmap); gm->draw(&canvas); SkString name = make_name(gm->shortName(), gRec[i].fName); if (writePath) { SkString path = make_filename(writePath, name); bool success = write_bitmap(path, bitmap); if (!success) { fprintf(stderr, "FAILED to write %s\n", path.c_str()); } } else if (readPath) { SkString path = make_filename(readPath, name); SkBitmap orig; bool success = SkImageDecoder::DecodeFile(path.c_str(), &orig, SkBitmap::kARGB_8888_Config, SkImageDecoder::kDecodePixels_Mode, NULL); if (success) { compare(bitmap, orig, name); } else { fprintf(stderr, "FAILED to read %s\n", path.c_str()); } } } SkDELETE(gm); } return 0; }
bool SuifValidater::is_valid_SymbolTable(SymbolTable* symtab) { if (symtab == NULL) SUIF_THROW(SuifException(String("Cannot validate a NULL SymbolTable."))); bool ok_stat = true; {for (Iter<SymbolTable::lookup_table_pair> it = symtab->get_lookup_table_iterator(); it.is_valid(); it.next()) { if (!symtab->has_symbol_table_object_member(it.current().second)) { ok_stat = false; add_error(to_id_string(symtab) + " has a lookup pair <" + it.current().first + ", " + to_id_string(it.current().second) + "> with dangling object."); } }} {for (Iter<SymbolTableObject*> it = symtab->get_symbol_table_object_iterator(); it.is_valid(); it.next()) { SymbolTableObject *sobj = it.current(); if ((sobj->get_name().length() > 0) && !is_in_lookup_list(it.current(), symtab)) { ok_stat = false; add_error(to_id_string(symtab) + " has " + to_id_string(it.current()) + " not in lookup list."); } }} return ok_stat; }
void TransformSystemsToModules::Transform() { assert(procDef != NULL) ; // Collect all the input scalars and output scalars list<VariableSymbol*> ports ; SymbolTable* procSymTab = procDef->get_symbol_table() ; bool foundInputs = false ; bool foundOutputs = false ; for (int i = 0 ; i < procSymTab->get_symbol_table_object_count() ; ++i) { SymbolTableObject* nextObject = procSymTab->get_symbol_table_object(i) ; if (nextObject->lookup_annote_by_name("InputScalar") != NULL) { VariableSymbol* toConvert = dynamic_cast<VariableSymbol*>(nextObject) ; assert(toConvert != NULL) ; LString inputName = toConvert->get_name() ; inputName = inputName + "_in" ; toConvert->set_name(inputName) ; ports.push_back(toConvert) ; foundInputs = true ; } if (nextObject->lookup_annote_by_name("OutputVariable") != NULL) { VariableSymbol* toConvert = dynamic_cast<VariableSymbol*>(nextObject) ; assert(toConvert != NULL) ; LString outputName = toConvert->get_name() ; outputName = outputName + "_out" ; toConvert->set_name(outputName) ; ports.push_back(toConvert) ; foundOutputs = true ; } } assert(foundInputs && "Could not identify inputs. Were they removed via optimizations?") ; assert(foundOutputs && "Could not identify outputs. Were they removed via optimizations?") ; // Determine the bit size and add everything to a new symbol table int bitSize = 0 ; GroupSymbolTable* structTable = create_group_symbol_table(theEnv, procDef->get_symbol_table()) ; std::map<VariableSymbol*, FieldSymbol*> replacementFields ; bool portsRemoved = false ; // If this was actually a new style module, we should make sure to // put these in the correct order. if (isModule(procDef)) { // Go through the original symbol table and remove any parameter // symbols that originally existed SymbolTable* originalSymTab = procDef->get_symbol_table() ; Iter<SymbolTableObject*> originalIter = originalSymTab->get_symbol_table_object_iterator() ; while (originalIter.is_valid()) { SymbolTableObject* currentObj = originalIter.current() ; originalIter.next() ; if (dynamic_cast<ParameterSymbol*>(currentObj) != NULL) { originalSymTab->remove_symbol_table_object(currentObj) ; } } portsRemoved = true ; // Sort the variable symbols in parameter order. This is just an // insertion sort, so it could be done faster. list<VariableSymbol*> sortedPorts ; for (int i = 0 ; i < ports.size() ; ++i) { list<VariableSymbol*>::iterator portIter = ports.begin() ; while (portIter != ports.end()) { BrickAnnote* orderAnnote = dynamic_cast<BrickAnnote*>((*portIter)-> lookup_annote_by_name("ParameterOrder")) ; if (orderAnnote == NULL) { ++portIter ; continue ; } IntegerBrick* orderBrick = dynamic_cast<IntegerBrick*>(orderAnnote->get_brick(0)) ; assert(orderBrick != NULL) ; if (orderBrick->get_value().c_int() == i) { sortedPorts.push_back(*portIter) ; break ; } ++portIter ; } } if (sortedPorts.size() != ports.size()) { OutputWarning("Warning! Analysis detected some input scalars not in" " the parameter list") ; } // Replace ports with sortedPorts ports = sortedPorts ; } list<VariableSymbol*>::iterator portIter = ports.begin() ; while (portIter != ports.end()) { bitSize += (*portIter)->get_type()->get_base_type()->get_bit_size().c_int() ; LString dupeName = (*portIter)->get_name() ; // Create offset expression: IntConstant* offset = create_int_constant(theEnv, create_data_type(theEnv, IInteger(32), 0), IInteger(bitSize)) ; QualifiedType* dupeType = (*portIter)->get_type() ; // Deal with the case where reference types were passed in ReferenceType* refType = dynamic_cast<ReferenceType*>(dupeType->get_base_type()) ; while (refType != NULL) { dupeType = dynamic_cast<QualifiedType*>(refType->get_reference_type()) ; assert(dupeType != NULL) ; refType = dynamic_cast<ReferenceType*>(dupeType->get_base_type()) ; } // Create a new variable symbol clone FieldSymbol* dupe = create_field_symbol(theEnv, dupeType, offset, dupeName) ; structTable->append_symbol_table_object(dupe) ; // Make the connection with the duplicated symbol replacementFields[(*portIter)] = dupe ; // Remove the original variable symbol from the procedure definition // symbol table. if (!portsRemoved) { procDef->get_symbol_table()->remove_symbol_table_object(*portIter) ; } ++portIter ; } assert(bitSize != 0); StructType* moduleStruct = create_struct_type(theEnv, IInteger(bitSize), 0, // bit_alignment TempName(procDef->get_procedure_symbol()->get_name()), 0, // is_complete structTable) ; Iter<FileBlock*> fBlocks = theEnv->get_file_set_block()->get_file_block_iterator() ; assert(fBlocks.is_valid()) ; (fBlocks.current())->get_symbol_table()->append_symbol_table_object(moduleStruct) ; // This is commented out because it is in the file state block //procDef->get_symbol_table()->append_symbol_table_object(moduleStruct) ; QualifiedType* qualifiedModuleStruct = create_qualified_type(theEnv, moduleStruct, TempName(LString("qualifiedModuleStruct"))) ; procDef->get_symbol_table()->append_symbol_table_object(qualifiedModuleStruct) ; // Create an instance of this type and add it to the symbol table. ParameterSymbol* structInstance = create_parameter_symbol(theEnv, qualifiedModuleStruct, TempName(LString("structInstance"))) ; procDef->get_symbol_table()->append_symbol_table_object(structInstance) ; // Now, set up the procedure symbol to take the struct and return the // struct. assert(procDef != NULL) ; ProcedureSymbol* procSym = procDef->get_procedure_symbol() ; assert(procSym != NULL) ; ProcedureType* procType = procSym->get_type() ; assert(procType != NULL) ; CProcedureType* cProcType = dynamic_cast<CProcedureType*>(procType) ; assert(cProcType != NULL) ; // Instead of appending the struct argument, we need to replace all of the // arguments with the struct. while (cProcType->get_argument_count() > 0) { cProcType->remove_argument(0) ; } cProcType->set_result_type(moduleStruct) ; cProcType->append_argument(qualifiedModuleStruct) ; // Now go through all load variable expressions and replace them all with // field symbol values if appropriate list<LoadVariableExpression*>* allLoads = collect_objects<LoadVariableExpression>(procDef->get_body()) ; list<LoadVariableExpression*>::iterator loadIter = allLoads->begin() ; while (loadIter != allLoads->end()) { VariableSymbol* currentVariable = (*loadIter)->get_source() ; if (replacementFields.find(currentVariable) != replacementFields.end()) { (*loadIter)->set_source(replacementFields[currentVariable]) ; } ++loadIter ; } delete allLoads ; // Also replace all of the definitions with the field symbol list<StoreVariableStatement*>* allStoreVars = collect_objects<StoreVariableStatement>(procDef->get_body()) ; list<StoreVariableStatement*>::iterator storeVarIter = allStoreVars->begin(); while (storeVarIter != allStoreVars->end()) { VariableSymbol* currentDest = (*storeVarIter)->get_destination() ; if (replacementFields.find(currentDest) != replacementFields.end()) { (*storeVarIter)->set_destination(replacementFields[currentDest]) ; } ++storeVarIter ; } delete allStoreVars ; list<SymbolAddressExpression*>* allSymAddr = collect_objects<SymbolAddressExpression>(procDef->get_body()) ; list<SymbolAddressExpression*>::iterator symAddrIter = allSymAddr->begin() ; while (symAddrIter != allSymAddr->end()) { VariableSymbol* currentVar = dynamic_cast<VariableSymbol*>((*symAddrIter)->get_addressed_symbol()) ; if (currentVar != NULL && replacementFields.find(currentVar) != replacementFields.end()) { (*symAddrIter)->set_addressed_symbol(replacementFields[currentVar]) ; } ++symAddrIter ; } delete allSymAddr ; // One final for bool selects list<CallStatement*>* allCalls = collect_objects<CallStatement>(procDef->get_body()) ; list<CallStatement*>::iterator callIter = allCalls->begin() ; while(callIter != allCalls->end()) { VariableSymbol* currentVar = (*callIter)->get_destination() ; if (currentVar != NULL && replacementFields.find(currentVar) != replacementFields.end()) { (*callIter)->set_destination(replacementFields[currentVar]) ; } ++callIter ; } delete allCalls ; }
static String handle_static_statement(CPrintStyleModule *state, const SuifObject *obj) { Statement *stmt = to<Statement>(obj); // Use the iterator over // destination_vars // source ops // source variables // Use the iterator over source ops and // get the classname String return_str = "("; bool needs_comma = false; {for (Iter<VariableSymbol *> iter = stmt->get_destination_var_iterator(); iter.is_valid(); iter.next()) { VariableSymbol *var = iter.current(); if (needs_comma) { return_str += ","; } else { needs_comma = true; } String op = state->print_to_string(var); return_str += op; } } return_str += ") = "; String opname = stmt->getClassName(); return_str += String("?") + opname + "("; needs_comma = false; {for (Iter<Expression *> iter = stmt->get_source_op_iterator(); iter.is_valid(); iter.next()) { Expression *opn = iter.current(); if (needs_comma) { return_str += ","; } else { needs_comma = true; } String op = state->print_to_string(opn); return_str += op; }} return_str += ")"; needs_comma = false; {for (Iter<Statement *> iter = stmt->get_child_statement_iterator(); iter.is_valid(); iter.next()) { Statement *statement = iter.current(); if (needs_comma) { return_str += "; "; } else { needs_comma = true; } String op = state->print_to_string(statement); return_str += op; }} return_str += ";"; return(return_str); }
/* * layout_frame -- Assign stack frame locations to each variable that needs one. * Store the offsets in frame_map. * * A local variable needs a memory-stack location unless it is never used or it * is a memory-passed parameter. */ void CodeFinIa64::layout_frame() { debug(4, "... determine offsets from $sp for variables"); // Force max_arg_area to be a 16-byte multiple to be sure that the local- // storage area starts on a 16-byte boundary. max_arg_area = (max_arg_area + 15) & -16; // Unless this is a leaf procedure, reserve a 16-byte scratch area for // callees at the young end of the current frame. int scratch_area_size = is_leaf ? 0 : 16; // Frame_offset is the running offset of the current variable in the // local-storage area. Initialize it to the distance between the young // end of the frame and the local-storage area. frame_offset = scratch_area_size + max_arg_area; SymTable *st = cur_unit->get_symbol_table(); Iter<SymbolTableObject*> iter = st->get_symbol_table_object_iterator(); for ( ; iter.is_valid(); iter.next()) { SymbolTableObject *sto = iter.current(); if (is_kind_of<VarSym>(sto)) { VarSym *v = (VarSym*)sto; if (!is_reg_param(v) && is_a<ParameterSymbol>(v)) continue; // v's in caller's frame Map<Sym*,int>::iterator v_handle = frame_map.find(v); if (v_handle == frame_map.end()) continue; // v never used // Here v is an automatic variable, other than a stack-passed // parameter, that is actually used in the program. First, // adjust frame_offset to accommodate v'a alignment. The // frame_offset is already a multiple of four bytes. An // alignment value greater than four bytes will itself be a // multiple of four. Round frame_offset up if necessary to // satisfy that alignment constraint. TypeId v_type = get_type(v); int v_align = get_bit_alignment(v_type) >> 3; // in bytes if (v_align > 4) { claim(v_align % 4 == 0); frame_offset = ((frame_offset + v_align - 1) / v_align) * v_align; } (*v_handle).second = frame_offset; // update frame_map // Now allocate a multiple of four bytes to v. int v_size = get_bit_size(v_type) >> 3; // v's size in bytes frame_offset += (v_size + 3) & -4; } } // Compute number of bytes for registers saved in memory between locals // and the frame base. save_area_size = saved_reg_set[CLASS_GR].size() * 8 + saved_reg_set[CLASS_BR].size() * 8 + saved_reg_set[CLASS_FR].size() * 16; // The sum of local-area and save-area sizes must be a multiple of 16. // Frame_offset is now the local-area size. Pad it to make the sum a // 16-byte multiple. (Hint: save_area_size is already a multiple of 8.) claim((frame_offset & 3) == 0); // now a 4-byte multiple frame_offset = (frame_offset + (save_area_size & 15) + 15) & -16; debug(4, "... determine offsets from $sp for memory-passed parameters"); // Process parameter list in order. Set running offset param_offset for // memory-passed parameters. Also determine the highest GR arg register // used. // FIXME: does not allow for aggregates passed partly in regs and partly // in memory. int param_offset = frame_offset + save_area_size; if (param_offset < (scratch_area_size + 16)) param_offset = (scratch_area_size + 16); for (int i = 0; i < get_formal_param_count(cur_unit); i++) { VarSym *p = get_formal_param(cur_unit, i); // Each parameter consumes a multiple of 8 bytes. int p_size = get_bit_size(get_type(p)) >> 3; // in bytes p_size = (p_size + 7) & -8; if (is_reg_param(p)) { int first_reg = get_param_reg(p); if (GR_ARG0 <= first_reg && first_reg <= GR_LAST_ARG) { claim(first_reg > max_in_reg); max_in_reg = first_reg + (p_size / 8) - 1; if (max_in_reg > GR_LAST_ARG) max_in_reg = GR_LAST_ARG; } } else { claim(frame_map.count(p) == 0); frame_map[p] = param_offset; param_offset += p_size; } } // In a varargs procedure, the GR parameter registers that are not used // by named arguments must be spilled to memory adjacent to any // parameters passed in memory by the caller. So in the varargs case, // the first thing at the old end of the frame (beginning actually in // the caller's scratch area) is a varargs spill area. Note that we // increase max_in_reg to cover any reg-passed, unnamed varargs. if (is_varargs) { va_area_size = 64 - 8 * (max_in_reg - GR_STACK0 + 1); max_in_reg = GR_LAST_ARG; } else { va_area_size = 0; } // Frame size is the sum of the sizes of the scratch area, the // outgoing-arg area, the local-storage area, the callee-saves-register // area, and the register-passed-varargs area. (The first three have // already been combined in frame_offset.) The sum of frame_offset and // save_area_size is already a 16-byte multiple, but va_area_size may // not be, so pad to bring frame_size to a multiple of 16 bytes. frame_size = frame_offset + save_area_size + ((va_area_size + 15) & -16); // For a procedure that invokes va_start, bind the variable whose symbol // is stored in va_first_var to the offset from SP of the first unnamed // argument, whether passed in memory (va_area_size == 0) or in a register // that has been dumped to memory (va_area_size > 0). claim((va_first_var == NULL) == (is_varargs == false)); if (is_varargs) frame_map[va_first_var] = (va_area_size == 0) ? param_offset : (frame_size - (va_area_size - 16)); }
static void TestTLList(skiatest::Reporter* reporter) { typedef SkTLList<ListElement> ElList; typedef ElList::Iter Iter; SkRandom random; for (int i = 1; i <= 16; i *= 2) { ElList list1(i); ElList list2(i); Iter iter1; Iter iter2; Iter iter3; Iter iter4; #if SK_ENABLE_INST_COUNT SkASSERT(0 == ListElement::InstanceCount()); #endif REPORTER_ASSERT(reporter, list1.isEmpty()); REPORTER_ASSERT(reporter, NULL == iter1.init(list1, Iter::kHead_IterStart)); REPORTER_ASSERT(reporter, NULL == iter1.init(list1, Iter::kTail_IterStart)); // Try popping an empty list list1.popHead(); list1.popTail(); REPORTER_ASSERT(reporter, list1.isEmpty()); REPORTER_ASSERT(reporter, list1 == list2); // Create two identical lists, one by appending to head and the other to the tail. list1.addToHead(ListElement(1)); list2.addToTail(ListElement(1)); #if SK_ENABLE_INST_COUNT SkASSERT(2 == ListElement::InstanceCount()); #endif iter1.init(list1, Iter::kHead_IterStart); iter2.init(list1, Iter::kTail_IterStart); REPORTER_ASSERT(reporter, iter1.get()->fID == iter2.get()->fID); iter3.init(list2, Iter::kHead_IterStart); iter4.init(list2, Iter::kTail_IterStart); REPORTER_ASSERT(reporter, iter3.get()->fID == iter1.get()->fID); REPORTER_ASSERT(reporter, iter4.get()->fID == iter1.get()->fID); REPORTER_ASSERT(reporter, list1 == list2); list2.reset(); // use both before/after in-place construction on an empty list SkNEW_INSERT_IN_LLIST_BEFORE(&list2, list2.headIter(), ListElement, (1)); REPORTER_ASSERT(reporter, list2 == list1); list2.reset(); SkNEW_INSERT_IN_LLIST_AFTER(&list2, list2.tailIter(), ListElement, (1)); REPORTER_ASSERT(reporter, list2 == list1); // add an element to the second list, check that iters are still valid iter3.init(list2, Iter::kHead_IterStart); iter4.init(list2, Iter::kTail_IterStart); list2.addToHead(ListElement(2)); #if SK_ENABLE_INST_COUNT SkASSERT(3 == ListElement::InstanceCount()); #endif REPORTER_ASSERT(reporter, iter3.get()->fID == iter1.get()->fID); REPORTER_ASSERT(reporter, iter4.get()->fID == iter1.get()->fID); REPORTER_ASSERT(reporter, 1 == Iter(list2, Iter::kTail_IterStart).get()->fID); REPORTER_ASSERT(reporter, 2 == Iter(list2, Iter::kHead_IterStart).get()->fID); REPORTER_ASSERT(reporter, list1 != list2); list1.addToHead(ListElement(2)); REPORTER_ASSERT(reporter, list1 == list2); #if SK_ENABLE_INST_COUNT SkASSERT(4 == ListElement::InstanceCount()); #endif REPORTER_ASSERT(reporter, !list1.isEmpty()); list1.reset(); list2.reset(); #if SK_ENABLE_INST_COUNT SkASSERT(0 == ListElement::InstanceCount()); #endif REPORTER_ASSERT(reporter, list1.isEmpty() && list2.isEmpty()); // randomly perform insertions and deletions on a list and perform tests int count = 0; for (int j = 0; j < 100; ++j) { if (list1.isEmpty() || random.nextBiasedBool(3 * SK_Scalar1 / 4)) { int id = j; // Choose one of three ways to insert a new element: at the head, at the tail, // before a random element, after a random element int numValidMethods = 0 == count ? 2 : 4; int insertionMethod = random.nextULessThan(numValidMethods); switch (insertionMethod) { case 0: list1.addToHead(ListElement(id)); break; case 1: list1.addToTail(ListElement(id)); break; case 2: // fallthru to share code that picks random element. case 3: { int n = random.nextULessThan(list1.count()); Iter iter = list1.headIter(); // remember the elements before/after the insertion point. while (n--) { iter.next(); } Iter prev(iter); Iter next(iter); next.next(); prev.prev(); SkASSERT(NULL != iter.get()); // insert either before or after the iterator, then check that the // surrounding sequence is correct. if (2 == insertionMethod) { SkNEW_INSERT_IN_LLIST_BEFORE(&list1, iter, ListElement, (id)); Iter newItem(iter); newItem.prev(); REPORTER_ASSERT(reporter, newItem.get()->fID == id); if (NULL != next.get()) { REPORTER_ASSERT(reporter, next.prev()->fID == iter.get()->fID); } if (NULL != prev.get()) { REPORTER_ASSERT(reporter, prev.next()->fID == id); } } else { SkNEW_INSERT_IN_LLIST_AFTER(&list1, iter, ListElement, (id)); Iter newItem(iter); newItem.next(); REPORTER_ASSERT(reporter, newItem.get()->fID == id); if (NULL != next.get()) { REPORTER_ASSERT(reporter, next.prev()->fID == id); } if (NULL != prev.get()) { REPORTER_ASSERT(reporter, prev.next()->fID == iter.get()->fID); } } } } ++count; } else { // walk to a random place either forward or backwards and remove. int n = random.nextULessThan(list1.count()); Iter::IterStart start; ListElement* (Iter::*incrFunc)(); if (random.nextBool()) { start = Iter::kHead_IterStart; incrFunc = &Iter::next; } else { start = Iter::kTail_IterStart; incrFunc = &Iter::prev; } // find the element Iter iter(list1, start); while (n--) { REPORTER_ASSERT(reporter, NULL != iter.get()); (iter.*incrFunc)(); } REPORTER_ASSERT(reporter, NULL != iter.get()); // remember the prev and next elements from the element to be removed Iter prev = iter; Iter next = iter; prev.prev(); next.next(); list1.remove(iter.get()); // make sure the remembered next/prev iters still work Iter pn = prev; pn.next(); Iter np = next; np.prev(); // pn should match next unless the target node was the head, in which case prev // walked off the list. REPORTER_ASSERT(reporter, pn.get() == next.get() || NULL == prev.get()); // Similarly, np should match prev unless next originally walked off the tail. REPORTER_ASSERT(reporter, np.get() == prev.get() || NULL == next.get()); --count; } REPORTER_ASSERT(reporter, count == list1.count()); #if SK_ENABLE_INST_COUNT SkASSERT(count == ListElement::InstanceCount()); #endif } list1.reset(); #if SK_ENABLE_INST_COUNT SkASSERT(0 == ListElement::InstanceCount()); #endif } }
LiveWLdiagram::ActStatus LiveWLdiagram :: act (Worm& worm, Status stat, double Ewalk, const Iter& it_block) // Member-functions used in this function (like hop, relink_hop,...) // should not specify the base class (for example: don't use WLdiagram::hop,...), // so that the higher-level class (InfoWLdiagram) can overload them. { table.clear(); int site = worm.it()->site(); int comp = worm.it()->z.comp(); if (stat == FREE) { // 1.1 bounce table.add (Ewalk); // 1.2 insert interaction for(int nbi = 0; nbi < Wld::Diag::_latt(site).nbs(); ++nbi) { int n = worm.it()->assoc(nbi)->z.nbef(comp); bool creat = (worm.up() != worm.it()->z.creat()); if (check_nlimit (n, creat, nMAX, nMIN)) { // Check whether cannot creat or annihilate particle anymore int dir = _latt(site).nbdir(nbi); double weight = BH_t(comp,dir) * (creat ? n+1 : n); table.add (weight, nbi); } } // 1.3 choose #ifdef LOCAL_OPTIMAL table.locally_optimal(); #endif int choice = table.get_choice (rand.uniform()); if (choice == 0) { bounce (worm); return ACT_BOUNCE; } else { hop (worm, table.nbi(choice)); return ACT_HOP; } } else if (stat == HALTED) { Iter it_to = it_block->conj(); // 'it_to' is the to-node in remove case, or the middle-node in relink case // 2.1 bounce int dir = _latt.dir (site, it_to->site()); double weight = BH_t(comp,dir) * (worm.it()->z.creat() ? worm.it()->z.naft(comp) : worm.it()->z.nbef(comp)); table.add (weight); // 2.2 remove interaction double Ebef = diagonal_energy (it_to, it_to->z.nbef(0), it_to->z.nbef(1)); double Eaft = diagonal_energy (it_to, it_to->z.naft(0), it_to->z.naft(1)); if (worm.up()) weight = shift_energy (Eaft, Ebef); else weight = shift_energy (Ebef, Eaft); table.add (weight); // 2.3 relink interaction for(int nbi = 0; nbi < Wld::Diag::_latt(it_to->site()).nbs(); ++nbi) { Iter it_nb = it_to->assoc(nbi); if (it_nb->site() != worm.it()->site()) { // Check whether relink to the self site int n = it_nb->z.nbef(comp); bool creat = (worm.up() == worm.it()->z.creat()); if (check_nlimit (n, creat, nMAX, nMIN)) { // Check whether cannot creat or annihilate particle anymore int dir = _latt(site).nbdir(nbi); weight = BH_t(comp,dir) * (creat ? n+1 : n); table.add (weight, nbi); } } } // 2.4 choose #ifdef LOCAL_OPTIMAL table.locally_optimal(); #endif int choice = table.get_choice (rand.uniform()); if (choice == 0) { bounce (worm); return ACT_BOUNCE; } else if (choice == 1) { delete_hop (worm); return ACT_DELETE_HOP; } else { relink_hop (worm, table.nbi(choice)); return ACT_RELINK_HOP; } } else if (stat == CRASH) { remove (worm); return ACT_REMOVE_WORMS; } return ACT_NOTHING; }
void CommonHandler(Iter<int>& ii) { while(ii.Next()) ii.Get() = Random(); }
LiveWLdiagram::Status LiveWLdiagram :: walk (double dtime, Worm& worm, Iter& it_block) { #ifdef DEBUG_MODE check_online (worm); check_halt_time (worm); #endif // Get the block and the time-distance to the block int site = worm.it()->site(); it_block = worm.up() ? ++Iter(worm.it()) : --Iter(worm.it()); const Iter& fix = (worm.it()->z.comp() == 0 ? fix_a : fix_b); #ifdef NN_INTERACTION // Check the neighbor kinks int block_nbi = -1; if (worm.up()) for(int nbi = 0; nbi < _latt(site).nbs(); ++nbi) { Iter it_ass = worm.it()->assoc(nbi); if (it_ass->time() < it_block->time()) { it_block = it_ass; block_nbi = nbi; } } else for(int nbi = 0; nbi < _latt(site).nbs(); ++nbi) { Iter it_ass = --Iter(worm.it()->assoc(nbi)); if (it_ass->time() > it_block->time()) { it_block = it_ass; block_nbi = nbi; } } #endif double dtime_block = fabs (it_block->time() - worm.it()->time()); // Check force-halt double dtime_halt = fabs (halt_times(worm.ti) - worm.it()->time()); double dtime_stop = (dtime_halt < dtime_block ? dtime_halt : dtime_block); bool force_halt = (dtime_halt < dtime_block ? true : false); // Free if (dtime < dtime_stop) { Wld::walk (worm, dtime); return FREE; } // Force halt else if (force_halt) { Wld::walk (worm, dtime_stop); if (worm.up()) worm.ti++; else worm.ti--; return FORCED_HALTED; } #ifdef NN_INTERACTION // Pass neighbor else if (block_nbi != -1) { Wld::pass_neighbor (worm.up(), worm.it(), it_block, block_nbi); return PASS_NEIGHBOR; } #endif // Cross end else if (it_block->end()) { this->cross_end (worm); return CROSS_END; } // Cross node else if (it_block->z.comp() != worm.it()->z.comp() || it_block->z.creat() == worm.it()->z.creat()) { Wld::cross_node (worm); return CROSS_NODE; } // Meet fix-worm else if (it_block == fix) { return CRASH; } // Halted else { Wld::halt (worm); return HALTED; } }
void increment(Iter& i) { ++i.base().first; ++i.base().second; }
static void dismantle_multi_dim_array_expression( SuifEnv *env, MultiDimArrayExpression *exp, TypeBuilder *type_builder, suif_hash_map<MultiDimArrayType *,Type *> &type_map) { Expression *ref_exp = exp->get_array_address(); Type *typ = ref_exp->get_result_type(); if (is_kind_of<PointerType>(typ)) typ = to<PointerType>(typ)->get_reference_type(); if (is_kind_of<ReferenceType>(typ)) typ = to<ReferenceType>(typ)->get_reference_type(); if (is_kind_of<QualifiedType>(typ)) typ = to<QualifiedType>(typ)->get_base_type(); simple_stack<Expression *> lows; int dims; Type *rep_type; if (is_kind_of<MultiDimArrayType>(typ)) { MultiDimArrayType *mdatyp= to<MultiDimArrayType>(typ); suif_hash_map<MultiDimArrayType *,Type *>::iterator iter = type_map.find(mdatyp); kernel_assert_message(iter != type_map.end(), ("Error - type not converted")); rep_type = (*iter).second; dims = exp->get_index_count(); for (int i = dims - 1;i >=0 ; i--) { lows.push(mdatyp->get_lower_bound(i)); } } else { // this arm should never be taken, so assert kernel_assert_message(false,("This code should not have been accessed")); rep_type = typ; dims = 0; while (is_kind_of<ArrayType>(typ)) { ArrayType *atype = to<ArrayType>(typ); dims ++; lows.push(atype->get_lower_bound()); typ = to<QualifiedType>(atype->get_element_type())->get_base_type(); } } exp->replace(ref_exp,0); ref_exp->set_parent(0); int index_count = exp->get_index_count(); for (int i = 0;i < index_count;i ++) { Type *ref_type = rep_type; for (int j = 0;j <= i;j ++) { ref_type = type_builder->unqualify_type(ref_type); ref_type = to<ArrayType>(ref_type)->get_element_type(); } ref_type = type_builder->unqualify_type(ref_type); ref_type = type_builder->get_pointer_type(ref_type); Expression *index = exp->get_index(index_count - i - 1); // process nested multi dim arra expressions for (Iter<MultiDimArrayExpression> iter = object_iterator<MultiDimArrayExpression>(index); iter.is_valid(); iter.next()) { MultiDimArrayExpression *mexpr = &iter.current(); dismantle_multi_dim_array_expression(env,mexpr,type_builder,type_map); } exp->replace(index,0); index->set_parent(0); ref_exp = create_array_reference_expression( env,to<DataType>(ref_type),ref_exp, index); } exp->get_parent()->replace(exp,ref_exp); }