static void PrintObjectBasics(JSObject* obj) { if (JS_IsNative(obj)) DebugDump("%p 'native' <%s>", (void *)obj, js::GetObjectClass(obj)->name); else DebugDump("%p 'host'", (void *)obj); }
Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) { // Find the inner blocks in this loop. Proceed backwards from the entries until // you reach a seen block, collecting as you go. BlockSet InnerBlocks; BlockSet Queue = Entries; while (Queue.size() > 0) { Block *Curr = *(Queue.begin()); Queue.erase(Queue.begin()); if (InnerBlocks.find(Curr) == InnerBlocks.end()) { // This element is new, mark it as inner and remove from outer InnerBlocks.insert(Curr); Blocks.erase(Curr); // Add the elements prior to it for (BlockBranchMap::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) { Queue.insert(iter->first); } } } assert(InnerBlocks.size() > 0); for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Possible = iter->first; if (InnerBlocks.find(Possible) == InnerBlocks.end() && NextEntries.find(Possible) == NextEntries.find(Possible)) { NextEntries.insert(Possible); } } } PrintDebug("creating loop block:\n"); DebugDump(InnerBlocks, " inner blocks:"); DebugDump(Entries, " inner entries:"); DebugDump(Blocks, " outer blocks:"); DebugDump(NextEntries, " outer entries:"); // TODO: Optionally hoist additional blocks into the loop LoopShape *Loop = new LoopShape(); Notice(Loop); // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations) // A. Branches to the loop entries become a continue to this shape for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Solipsize(*iter, Branch::Continue, Loop, InnerBlocks); } // B. Branches to outside the loop (a next entry) become breaks on this shape for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) { Solipsize(*iter, Branch::Break, Loop, InnerBlocks); } // Finish up Shape *Inner = Process(InnerBlocks, Entries, NULL); Loop->Inner = Inner; return Loop; }
JSTrapStatus xpc_DebuggerKeywordHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval, void *closure) { static const char line[] = "------------------------------------------------------------------------\n"; DebugDump("%s", line); DebugDump("%s", "Hit JavaScript \"debugger\" keyword. JS call stack...\n"); xpc_DumpJSStack(cx, true, true, false); DebugDump("%s", line); return JSTRAP_CONTINUE; }
JSBool xpc_DumpEvalInJSStackFrame(JSContext* cx, uint32_t frameno, const char* text) { if (!cx || !text) { DebugDump("%s", "invalid params passed to xpc_DumpEvalInJSStackFrame!\n"); return false; } DebugDump("js[%d]> %s\n", frameno, text); uint32_t num = 0; JSAbstractFramePtr frame = JSNullFramePtr(); JSBrokenFrameIterator iter(cx); while (!iter.done()) { if (num == frameno) { frame = iter.abstractFramePtr(); break; } ++iter; num++; } if (!frame) { DebugDump("%s", "invalid frame number!\n"); return false; } JSAutoRequest ar(cx); JSExceptionState* exceptionState = JS_SaveExceptionState(cx); JSErrorReporter older = JS_SetErrorReporter(cx, xpcDumpEvalErrorReporter); jsval rval; JSString* str; JSAutoByteString bytes; if (frame.evaluateInStackFrame(cx, text, strlen(text), "eval", 1, &rval) && nullptr != (str = JS_ValueToString(cx, rval)) && bytes.encode(cx, str)) { DebugDump("%s\n", bytes.ptr()); } else DebugDump("%s", "eval failed!\n"); JS_SetErrorReporter(cx, older); JS_RestoreExceptionState(cx, exceptionState); return true; }
Shape *MakeMultiple(BlockSet &Blocks, BlockSet& Entries, BlockBlockSetMap& IndependentGroups, Shape *Prev, BlockSet &NextEntries) { PrintDebug("creating multiple block with %d inner groups\n", IndependentGroups.size()); bool Fused = !!(Shape::IsSimple(Prev)); MultipleShape *Multiple = new MultipleShape(); Notice(Multiple); BlockSet CurrEntries; for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { Block *CurrEntry = iter->first; BlockSet &CurrBlocks = iter->second; PrintDebug(" multiple group with entry %d:\n", CurrEntry->Id); DebugDump(CurrBlocks, " "); // Create inner block CurrEntries.clear(); CurrEntries.insert(CurrEntry); for (BlockSet::iterator iter = CurrBlocks.begin(); iter != CurrBlocks.end(); iter++) { Block *CurrInner = *iter; // Remove the block from the remaining blocks Blocks.erase(CurrInner); // Find new next entries and fix branches to them for (BlockBranchMap::iterator iter = CurrInner->BranchesOut.begin(); iter != CurrInner->BranchesOut.end();) { Block *CurrTarget = iter->first; BlockBranchMap::iterator Next = iter; Next++; if (CurrBlocks.find(CurrTarget) == CurrBlocks.end()) { NextEntries.insert(CurrTarget); Solipsize(CurrTarget, Branch::Break, Multiple, CurrBlocks); } iter = Next; // increment carefully because Solipsize can remove us } } Multiple->InnerMap[CurrEntry] = Process(CurrBlocks, CurrEntries, NULL); // If we are not fused, then our entries will actually be checked if (!Fused) { CurrEntry->IsCheckedMultipleEntry = true; } } DebugDump(Blocks, " remaining blocks after multiple:"); // Add entries not handled as next entries, they are deferred for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Block *Entry = *iter; if (IndependentGroups.find(Entry) == IndependentGroups.end()) { NextEntries.insert(Entry); } } return Multiple; }
static void PrintObject(JSObject* obj, int depth, ObjectPile* pile) { PrintObjectBasics(obj); switch (pile->Visit(obj)) { case ObjectPile::primary: DebugDump("%s", "\n"); break; case ObjectPile::seen: DebugDump("%s", " (SEE ABOVE)\n"); return; case ObjectPile::overflow: DebugDump("%s", " (TOO MANY OBJECTS)\n"); return; } if (!JS_IsNative(obj)) return; JSObject* parent = js::GetObjectParent(obj); JSObject* proto = js::GetObjectProto(obj); DebugDump("%*sparent: ", INDENT(depth+1)); if (parent) PrintObject(parent, depth+1, pile); else DebugDump("%s", "null\n"); DebugDump("%*sproto: ", INDENT(depth+1)); if (proto) PrintObject(proto, depth+1, pile); else DebugDump("%s", "null\n"); }
char* xpc_PrintJSStack(JSContext* cx, JSBool showArgs, JSBool showLocals, JSBool showThisProps) { char* buf; JSExceptionState *state = JS_SaveExceptionState(cx); if (!state) DebugDump("%s", "Call to a debug function modifying state!\n"); JS_ClearPendingException(cx); buf = JS::FormatStackDump(cx, nullptr, showArgs, showLocals, showThisProps); if (!buf) DebugDump("%s", "Failed to format JavaScript stack for dump\n"); JS_RestoreExceptionState(cx, state); return buf; }
JSBool xpc_DumpJSStack(JSContext* cx, JSBool showArgs, JSBool showLocals, JSBool showThisProps) { if (char* buf = xpc_PrintJSStack(cx, showArgs, showLocals, showThisProps)) { DebugDump("%s\n", buf); JS_smprintf_free(buf); } return true; }
void Log::Dump (const void *Data, unsigned Size, const void *Base) { if (LoggingFlags & LOG_SCREEN) DebugDump (stdout, Data, Size, Base); if (LoggingFlags & LOG_FILE) { if (!LogFile) { LogFile = fopen (LogFileName, "a"); if (!LogFile) { CONSOLE.Out ("\aPU\axcWARNING: Cannot open log file `%s'\aPO\n", LogFileName); return; } } DebugDump (LogFile, Data, Size, Base); } }
void CDebugDumpable::DebugDumpFormat(CDebugDumpFormatter& ddf, const string& bundle, unsigned int depth) const { if ( sm_DumpEnabled ) { CDebugDumpContext ddc(ddf, bundle); DebugDump(ddc, depth); } }
/** * This gets called by the parser when you want to add * a PI node to the current container in the content * model. * * @updated gess 3/25/98 * @param * @return */ NS_IMETHODIMP nsLoggingSink::AddProcessingInstruction(const nsIParserNode& aNode){ #ifdef VERBOSE_DEBUG DebugDump("<",aNode.GetText(),(mNodeStackPos)*2); #endif nsresult theResult=NS_OK; //then proxy the call to the real sink if you have one. if(mSink) { theResult=mSink->AddProcessingInstruction(aNode); } return theResult; }
// Converts/processes all branchings to a specific target void Solipsize(Block *Target, Branch::FlowType Type, Shape *Ancestor, BlockSet &From) { PrintDebug("Solipsizing branches into %d\n", Target->Id); DebugDump(From, " relevant to solipsize: "); for (BlockSet::iterator iter = Target->BranchesIn.begin(); iter != Target->BranchesIn.end();) { Block *Prior = *iter; if (!contains(From, Prior)) { iter++; continue; } Branch *PriorOut = Prior->BranchesOut[Target]; PriorOut->Ancestor = Ancestor; PriorOut->Type = Type; if (MultipleShape *Multiple = Shape::IsMultiple(Ancestor)) { Multiple->NeedLoop++; // We are breaking out of this Multiple, so need a loop } iter++; // carefully increment iter before erasing Target->BranchesIn.erase(Prior); Target->ProcessedBranchesIn.insert(Prior); Prior->BranchesOut.erase(Target); Prior->ProcessedBranchesOut[Target] = PriorOut; PrintDebug(" eliminated branch from %d\n", Prior->Id); } }
JSBool xpc_DumpJSObject(JSObject* obj) { ObjectPile pile; DebugDump("%s", "Debugging reminders...\n"); DebugDump("%s", " class: (JSClass*)(obj->fslots[2]-1)\n"); DebugDump("%s", " parent: (JSObject*)(obj->fslots[1])\n"); DebugDump("%s", " proto: (JSObject*)(obj->fslots[0])\n"); DebugDump("%s", "\n"); if (obj) PrintObject(obj, 0, &pile); else DebugDump("%s", "xpc_DumpJSObject passed null!\n"); return true; }
// Converts/processes all branchings to a specific target void Solipsize(Block *Target, Branch::FlowType Type, Shape *Ancestor, BlockSet &From) { PrintDebug("Solipsizing branches into %d\n", Target->Id); DebugDump(From, " relevant to solipsize: "); for (BlockBranchMap::iterator iter = Target->BranchesIn.begin(); iter != Target->BranchesIn.end();) { Block *Prior = iter->first; if (From.find(Prior) == From.end()) { iter++; continue; } Branch *TargetIn = iter->second; Branch *PriorOut = Prior->BranchesOut[Target]; PriorOut->Ancestor = Ancestor; // Do we need this info PriorOut->Type = Type; // on TargetIn too? if (MultipleShape *Multiple = Shape::IsMultiple(Ancestor)) { Multiple->NeedLoop++; // We are breaking out of this Multiple, so need a loop } iter++; // carefully increment iter before erasing Target->BranchesIn.erase(Prior); Target->ProcessedBranchesIn[Prior] = TargetIn; Prior->BranchesOut.erase(Target); Prior->ProcessedBranchesOut[Target] = PriorOut; PrintDebug(" eliminated branch from %d\n", Prior->Id); } }
PROOT_DCB MsCreateRootDcb ( IN PVCB Vcb ) /*++ Routine Description: This routine allocates, initializes, and inserts a new root DCB record into the in memory data structure. Arguments: Vcb - Supplies the Vcb to associate the new DCB under Return Value: PROOT_DCB - returns pointer to the newly allocated root DCB. --*/ { PROOT_DCB rootDcb; PAGED_CODE(); DebugTrace(+1, Dbg, "MsCreateRootDcb, Vcb = %08lx\n", (ULONG)Vcb); // // Make sure we don't already have a root dcb for this vcb // rootDcb = Vcb->RootDcb; if (rootDcb != NULL) { DebugDump("Error trying to create multiple root dcbs\n", 0, Vcb); KeBugCheck( MAILSLOT_FILE_SYSTEM ); } // // Allocate a new DCB and zero its fields. // rootDcb = FsRtlAllocatePool( NonPagedPool, sizeof(DCB) ); RtlZeroMemory( rootDcb, sizeof(DCB)); // // Set the proper node type code, node byte size, and reference count. // rootDcb->Header.NodeTypeCode = MSFS_NTC_ROOT_DCB; rootDcb->Header.NodeByteSize = sizeof(ROOT_DCB); rootDcb->Header.ReferenceCount = 1; rootDcb->Header.NodeState = NodeStateActive; // // The root Dcb has an empty parent dcb links field // InitializeListHead( &rootDcb->ParentDcbLinks ); // // Set the Vcb and give it a pointer to the new root DCB. // rootDcb->Vcb = Vcb; Vcb->RootDcb = rootDcb; // // Initialize the notify queues, and the parent dcb queue. // InitializeListHead( &rootDcb->Specific.Dcb.NotifyFullQueue ); InitializeListHead( &rootDcb->Specific.Dcb.NotifyPartialQueue ); InitializeListHead( &rootDcb->Specific.Dcb.ParentDcbQueue ); // // Set the full file name // { PWCH Name; Name = FsRtlAllocatePool(PagedPool, 2 * sizeof(WCHAR)); Name[0] = L'\\'; Name[1] = L'\0'; RtlInitUnicodeString( &rootDcb->FullFileName, Name ); RtlInitUnicodeString( &rootDcb->LastFileName, Name ); } // // Insert this DCB into the prefix table. // MsAcquirePrefixTableLock(); if (!RtlInsertUnicodePrefix( &Vcb->PrefixTable, &rootDcb->FullFileName, &rootDcb->PrefixTableEntry )) { DebugDump("Error trying to insert root dcb into prefix table\n", 0, Vcb); KeBugCheck( MAILSLOT_FILE_SYSTEM ); } MsReleasePrefixTableLock(); // // Initialize the resource variable. // ExInitializeResource( &(rootDcb->Resource) ); // // Return to the caller. // DebugTrace(-1, Dbg, "MsCreateRootDcb -> %8lx\n", (ULONG)rootDcb); return rootDcb; }
// Main function. // Process a set of blocks with specified entries, returns a shape // The Make* functions receive a NextEntries. If they fill it with data, those are the entries for the // ->Next block on them, and the blocks are what remains in Blocks (which Make* modify). In this way // we avoid recursing on Next (imagine a long chain of Simples, if we recursed we could blow the stack). Shape *Process(BlockSet &Blocks, BlockSet& InitialEntries, Shape *Prev) { PrintDebug("Process() called\n"); BlockSet *Entries = &InitialEntries; BlockSet TempEntries[2]; int CurrTempIndex = 0; BlockSet *NextEntries; Shape *Ret = NULL; #define Make(call) \ Shape *Temp = call; \ if (Prev) Prev->Next = Temp; \ if (!Ret) Ret = Temp; \ if (!NextEntries->size()) { PrintDebug("Process() returning\n"); return Ret; } \ Prev = Temp; \ Entries = NextEntries; \ continue; while (1) { PrintDebug("Process() running\n"); DebugDump(Blocks, " blocks : "); DebugDump(*Entries, " entries: "); CurrTempIndex = 1-CurrTempIndex; NextEntries = &TempEntries[CurrTempIndex]; NextEntries->clear(); if (Entries->size() == 0) return Ret; if (Entries->size() == 1) { Block *Curr = *(Entries->begin()); if (Curr->BranchesIn.size() == 0) { // One entry, no looping ==> Simple Make(MakeSimple(Blocks, Curr, *NextEntries)); } // One entry, looping ==> Loop Make(MakeLoop(Blocks, *Entries, *NextEntries)); } // More than one entry, try to eliminate through a Multiple groups of // independent blocks from an entry/ies. It is important to remove through // multiples as opposed to looping since the former is more performant. BlockBlockSetMap IndependentGroups; FindIndependentGroups(Blocks, *Entries, IndependentGroups); PrintDebug("Independent groups: %d\n", IndependentGroups.size()); if (IndependentGroups.size() > 0) { // We can handle a group in a multiple if its entry cannot be reached by another group. // Note that it might be reachable by itself - a loop. But that is fine, we will create // a loop inside the multiple block (which is the performant order to do it). for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end();) { Block *Entry = iter->first; BlockSet &Group = iter->second; BlockBlockSetMap::iterator curr = iter++; // iterate carefully, we may delete for (BlockBranchMap::iterator iterBranch = Entry->BranchesIn.begin(); iterBranch != Entry->BranchesIn.end(); iterBranch++) { Block *Origin = iterBranch->first; if (Group.find(Origin) == Group.end()) { // Reached from outside the group, so we cannot handle this PrintDebug("Cannot handle group with entry %d because of incoming branch from %d\n", Entry->Id, Origin->Id); IndependentGroups.erase(curr); break; } } } // As an optimization, if we have 2 independent groups, and one is a small dead end, we can handle only that dead end. // The other then becomes a Next - without nesting in the code and recursion in the analysis. // TODO: if the larger is the only dead end, handle that too // TODO: handle >2 groups // TODO: handle not just dead ends, but also that do not branch to the NextEntries. However, must be careful // there since we create a Next, and that Next can prevent eliminating a break (since we no longer // naturally reach the same place), which may necessitate a one-time loop, which makes the unnesting // pointless. if (IndependentGroups.size() == 2) { // Find the smaller one BlockBlockSetMap::iterator iter = IndependentGroups.begin(); Block *SmallEntry = iter->first; int SmallSize = iter->second.size(); iter++; Block *LargeEntry = iter->first; int LargeSize = iter->second.size(); if (SmallSize != LargeSize) { // ignore the case where they are identical - keep things symmetrical there if (SmallSize > LargeSize) { Block *Temp = SmallEntry; SmallEntry = LargeEntry; LargeEntry = Temp; // Note: we did not flip the Sizes too, they are now invalid. TODO: use the smaller size as a limit? } // Check if dead end bool DeadEnd = true; BlockSet &SmallGroup = IndependentGroups[SmallEntry]; for (BlockSet::iterator iter = SmallGroup.begin(); iter != SmallGroup.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (SmallGroup.find(Target) == SmallGroup.end()) { DeadEnd = false; break; } } if (!DeadEnd) break; } if (DeadEnd) { PrintDebug("Removing nesting by not handling large group because small group is dead end\n"); IndependentGroups.erase(LargeEntry); } } } PrintDebug("Handleable independent groups: %d\n", IndependentGroups.size()); if (IndependentGroups.size() > 0) { // Some groups removable ==> Multiple Make(MakeMultiple(Blocks, *Entries, IndependentGroups, Prev, *NextEntries)); } } // No independent groups, must be loopable ==> Loop Make(MakeLoop(Blocks, *Entries, *NextEntries)); } }
// For each entry, find the independent group reachable by it. The independent group is // the entry itself, plus all the blocks it can reach that cannot be directly reached by another entry. Note that we // ignore directly reaching the entry itself by another entry. void FindIndependentGroups(BlockSet &Blocks, BlockSet &Entries, BlockBlockSetMap& IndependentGroups) { typedef std::map<Block*, Block*> BlockBlockMap; struct HelperClass { BlockBlockSetMap& IndependentGroups; BlockBlockMap Ownership; // For each block, which entry it belongs to. We have reached it from there. HelperClass(BlockBlockSetMap& IndependentGroupsInit) : IndependentGroups(IndependentGroupsInit) {} void InvalidateWithChildren(Block *New) { // TODO: rename New BlockList ToInvalidate; // Being in the list means you need to be invalidated ToInvalidate.push_back(New); while (ToInvalidate.size() > 0) { Block *Invalidatee = ToInvalidate.front(); ToInvalidate.pop_front(); Block *Owner = Ownership[Invalidatee]; if (IndependentGroups.find(Owner) != IndependentGroups.end()) { // Owner may have been invalidated, do not add to IndependentGroups! IndependentGroups[Owner].erase(Invalidatee); } if (Ownership[Invalidatee]) { // may have been seen before and invalidated already Ownership[Invalidatee] = NULL; for (BlockBranchMap::iterator iter = Invalidatee->BranchesOut.begin(); iter != Invalidatee->BranchesOut.end(); iter++) { Block *Target = iter->first; BlockBlockMap::iterator Known = Ownership.find(Target); if (Known != Ownership.end()) { Block *TargetOwner = Known->second; if (TargetOwner) { ToInvalidate.push_back(Target); } } } } } } }; HelperClass Helper(IndependentGroups); // We flow out from each of the entries, simultaneously. // When we reach a new block, we add it as belonging to the one we got to it from. // If we reach a new block that is already marked as belonging to someone, it is reachable by // two entries and is not valid for any of them. Remove it and all it can reach that have been // visited. BlockList Queue; // Being in the queue means we just added this item, and we need to add its children for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Block *Entry = *iter; Helper.Ownership[Entry] = Entry; IndependentGroups[Entry].insert(Entry); Queue.push_back(Entry); } while (Queue.size() > 0) { Block *Curr = Queue.front(); Queue.pop_front(); Block *Owner = Helper.Ownership[Curr]; // Curr must be in the ownership map if we are in the queue if (!Owner) continue; // we have been invalidated meanwhile after being reached from two entries // Add all children for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *New = iter->first; BlockBlockMap::iterator Known = Helper.Ownership.find(New); if (Known == Helper.Ownership.end()) { // New node. Add it, and put it in the queue Helper.Ownership[New] = Owner; IndependentGroups[Owner].insert(New); Queue.push_back(New); continue; } Block *NewOwner = Known->second; if (!NewOwner) continue; // We reached an invalidated node if (NewOwner != Owner) { // Invalidate this and all reachable that we have seen - we reached this from two locations Helper.InvalidateWithChildren(New); } // otherwise, we have the same owner, so do nothing } } // Having processed all the interesting blocks, we remain with just one potential issue: // If a->b, and a was invalidated, but then b was later reached by someone else, we must // invalidate b. To check for this, we go over all elements in the independent groups, // if an element has a parent which does *not* have the same owner, we must remove it // and all its children. for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { BlockSet &CurrGroup = IndependentGroups[*iter]; BlockList ToInvalidate; for (BlockSet::iterator iter = CurrGroup.begin(); iter != CurrGroup.end(); iter++) { Block *Child = *iter; for (BlockBranchMap::iterator iter = Child->BranchesIn.begin(); iter != Child->BranchesIn.end(); iter++) { Block *Parent = iter->first; if (Helper.Ownership[Parent] != Helper.Ownership[Child]) { ToInvalidate.push_back(Child); } } } while (ToInvalidate.size() > 0) { Block *Invalidatee = ToInvalidate.front(); ToInvalidate.pop_front(); Helper.InvalidateWithChildren(Invalidatee); } } // Remove empty groups for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { if (IndependentGroups[*iter].size() == 0) { IndependentGroups.erase(*iter); } } #if DEBUG PrintDebug("Investigated independent groups:\n"); for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { DebugDump(iter->second, " group: "); } #endif }
static void xpcDumpEvalErrorReporter(JSContext *cx, const char *message, JSErrorReport *report) { DebugDump("Error: %s\n", message); }
int TetrisPlay(int param) { static int flag = 0; if(!flag){ flag = 1; InitialMatrix(); CreateBlock(&curBlock); // Create next block CreateBlock(&nextBlock); GetCurrentLine(curBlock.y); DisplayScoreLevel(); } //while(1) { int key; TetrisAction action; DebugDump(); // Check valid if(!CheckBlock(&curBlock,TA_None)){ // Game over printf("Game over!\n"); } key = GetKey(); switch(key){ case KEY_LEFT: action = TA_Left; score++; break; case KEY_RIGHT: action = TA_Right; score+=10; break; case KEY_UP: action = TA_Rotate; score+=100; break; case KEY_DOWN: action = TA_Down; score+=1000; break; case KEY_PAUSE: break; default: action = TA_Down; break; } if(CheckBlock(&curBlock,action)){ MoveBlock(&curBlock,action); }else if(action == TA_Down){ ScoreUp(DropBlock(&curBlock)); CopyBlock(&curBlock,&nextBlock); CreateBlock(&nextBlock); GetCurrentLine(curBlock.y); } } return 0; }
Shape *MakeLoop(BlockSet &Blocks, BlockSet& Entries, BlockSet &NextEntries) { // Find the inner blocks in this loop. Proceed backwards from the entries until // you reach a seen block, collecting as you go. BlockSet InnerBlocks; BlockSet Queue = Entries; while (Queue.size() > 0) { Block *Curr = *(Queue.begin()); Queue.erase(Queue.begin()); if (!contains(InnerBlocks, Curr)) { // This element is new, mark it as inner and remove from outer InnerBlocks.insert(Curr); Blocks.erase(Curr); // Add the elements prior to it for (BlockSet::iterator iter = Curr->BranchesIn.begin(); iter != Curr->BranchesIn.end(); iter++) { Queue.insert(*iter); } #if 0 // Add elements it leads to, if they are dead ends. There is no reason not to hoist dead ends // into loops, as it can avoid multiple entries after the loop for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (Target->BranchesIn.size() <= 1 && Target->BranchesOut.size() == 0) { Queue.insert(Target); } } #endif } } assert(InnerBlocks.size() > 0); for (BlockSet::iterator iter = InnerBlocks.begin(); iter != InnerBlocks.end(); iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Possible = iter->first; if (!contains(InnerBlocks, Possible)) { NextEntries.insert(Possible); } } } #if 0 // We can avoid multiple next entries by hoisting them into the loop. if (NextEntries.size() > 1) { BlockBlockSetMap IndependentGroups; FindIndependentGroups(NextEntries, IndependentGroups, &InnerBlocks); while (IndependentGroups.size() > 0 && NextEntries.size() > 1) { Block *Min = NULL; int MinSize = 0; for (BlockBlockSetMap::iterator iter = IndependentGroups.begin(); iter != IndependentGroups.end(); iter++) { Block *Entry = iter->first; BlockSet &Blocks = iter->second; if (!Min || Blocks.size() < MinSize) { // TODO: code size, not # of blocks Min = Entry; MinSize = Blocks.size(); } } // check how many new entries this would cause BlockSet &Hoisted = IndependentGroups[Min]; bool abort = false; for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end() && !abort; iter++) { Block *Curr = *iter; for (BlockBranchMap::iterator iter = Curr->BranchesOut.begin(); iter != Curr->BranchesOut.end(); iter++) { Block *Target = iter->first; if (Hoisted.find(Target) == Hoisted.end() && NextEntries.find(Target) == NextEntries.end()) { // abort this hoisting abort = true; break; } } } if (abort) { IndependentGroups.erase(Min); continue; } // hoist this entry PrintDebug("hoisting %d into loop\n", Min->Id); NextEntries.erase(Min); for (BlockSet::iterator iter = Hoisted.begin(); iter != Hoisted.end(); iter++) { Block *Curr = *iter; InnerBlocks.insert(Curr); Blocks.erase(Curr); } IndependentGroups.erase(Min); } } #endif PrintDebug("creating loop block:\n"); DebugDump(InnerBlocks, " inner blocks:"); DebugDump(Entries, " inner entries:"); DebugDump(Blocks, " outer blocks:"); DebugDump(NextEntries, " outer entries:"); LoopShape *Loop = new LoopShape(); Notice(Loop); // Solipsize the loop, replacing with break/continue and marking branches as Processed (will not affect later calculations) // A. Branches to the loop entries become a continue to this shape for (BlockSet::iterator iter = Entries.begin(); iter != Entries.end(); iter++) { Solipsize(*iter, Branch::Continue, Loop, InnerBlocks); } // B. Branches to outside the loop (a next entry) become breaks on this shape for (BlockSet::iterator iter = NextEntries.begin(); iter != NextEntries.end(); iter++) { Solipsize(*iter, Branch::Break, Loop, InnerBlocks); } // Finish up Shape *Inner = Process(InnerBlocks, Entries, NULL); Loop->Inner = Inner; return Loop; }
VOID MsDeleteRootDcb ( IN PROOT_DCB RootDcb ) /*++ Routine Description: This routine deallocates and removes the ROOT DCB record from our in-memory data structures. It also will remove all associated underlings (i.e., Notify queues and child FCB records). Arguments: RootDcb - Supplies the ROOT DCB to be removed Return Value: None --*/ { PLIST_ENTRY links; PIRP irp; PAGED_CODE(); DebugTrace(+1, Dbg, "MsDeleteRootDcb, RootDcb = %08lx\n", (ULONG)RootDcb); // // We can only delete this record if the reference count is zero. // if (RootDcb->Header.ReferenceCount != 0) { DebugDump("Error deleting RootDcb, Still Open\n", 0, RootDcb); KeBugCheck( MAILSLOT_FILE_SYSTEM ); } // // Remove every notify IRP from the two notify queues. // while (!IsListEmpty(&RootDcb->Specific.Dcb.NotifyFullQueue)) { links = RemoveHeadList( &RootDcb->Specific.Dcb.NotifyFullQueue ); irp = CONTAINING_RECORD( links, IRP, Tail.Overlay.ListEntry ); MsCompleteRequest( irp, STATUS_FILE_FORCED_CLOSED ); } while (!IsListEmpty(&RootDcb->Specific.Dcb.NotifyPartialQueue)) { links = RemoveHeadList( &RootDcb->Specific.Dcb.NotifyPartialQueue ); irp = CONTAINING_RECORD( links, IRP, Tail.Overlay.ListEntry ); MsCompleteRequest( irp, STATUS_FILE_FORCED_CLOSED ); } // // We can only be removed if the no other FCB have us referenced // as a their parent DCB. // if (!IsListEmpty(&RootDcb->Specific.Dcb.ParentDcbQueue)) { DebugDump("Error deleting RootDcb\n", 0, RootDcb); KeBugCheck( MAILSLOT_FILE_SYSTEM ); } // // Remove the entry from the prefix table, and then remove the full // file name. // MsAcquirePrefixTableLock(); RtlRemoveUnicodePrefix( &RootDcb->Vcb->PrefixTable, &RootDcb->PrefixTableEntry ); MsReleasePrefixTableLock(); ExFreePool( RootDcb->FullFileName.Buffer ); // // Free up the resource variable. // ExDeleteResource( &(RootDcb->Resource) ); // // Finally deallocate the DCB record. // ExFreePool( RootDcb ); // // Return to the caller. // DebugTrace(-1, Dbg, "MsDeleteRootDcb -> VOID\n", 0); return; }
PFCB MsCreateFcb ( IN PVCB Vcb, IN PDCB ParentDcb, IN PUNICODE_STRING FileName, IN PEPROCESS CreatorProcess, IN ULONG MailslotQuota, IN ULONG MaximumMessageSize ) /*++ Routine Description: This routine allocates, initializes, and inserts a new Fcb record into the in memory data structures. Arguments: Vcb - Supplies the Vcb to associate the new FCB under. ParentDcb - Supplies the parent dcb that the new FCB is under. FileName - Supplies the file name of the file relative to the directory it's in (e.g., the file \config.sys is called "CONFIG.SYS" without the preceding backslash). CreatorProcess - Supplies a pointer to our creator process MailslotQuota - Supplies the initial quota MaximumMessageSize - Supplies the size of the largest message that can be written to the mailslot Return Value: PFCB - Returns a pointer to the newly allocated FCB --*/ { PFCB fcb; PAGED_CODE(); DebugTrace(+1, Dbg, "MsCreateFcb\n", 0); // // Allocate a new FCB record, and zero its fields. // fcb = FsRtlAllocatePool( NonPagedPool, sizeof(FCB) ); RtlZeroMemory( fcb, sizeof(FCB) ); // // Set the proper node type code, node byte size, and reference count. // fcb->Header.NodeTypeCode = MSFS_NTC_FCB; fcb->Header.NodeByteSize = sizeof(FCB); fcb->Header.ReferenceCount = 1; fcb->Header.NodeState = NodeStateActive; // // Insert this FCB into our parent DCB's queue. // InsertTailList( &ParentDcb->Specific.Dcb.ParentDcbQueue, &fcb->ParentDcbLinks ); // // Initialize other FCB fields. // fcb->ParentDcb = ParentDcb; fcb->Vcb = Vcb; MsAcquireGlobalLock(); MsReferenceNode ( &Vcb->Header ); if (Vcb->Header.ReferenceCount == 2) { // // Set the driver paging back to normal // MmResetDriverPaging(MsCreateFcb); } MsReleaseGlobalLock(); fcb->CreatorProcess = CreatorProcess; ExInitializeResource( &(fcb->Resource) ); // // Initialize the CCB queue. // InitializeListHead( &fcb->Specific.Fcb.CcbQueue ); // // Set the file name. // { PWCH Name; ULONG Length; Length = FileName->Length; Name = FsRtlAllocatePool( PagedPool, Length + 2 ); RtlMoveMemory( Name, FileName->Buffer, Length ); *(PWCH)( (PCH)Name + Length ) = L'\0'; RtlInitUnicodeString( &fcb->FullFileName, Name ); RtlInitUnicodeString( &fcb->LastFileName, &Name[1] ); } // // Insert this FCB into the prefix table. // MsAcquirePrefixTableLock(); if (!RtlInsertUnicodePrefix( &Vcb->PrefixTable, &fcb->FullFileName, &fcb->PrefixTableEntry )) { DebugDump("Error trying to name into prefix table\n", 0, fcb); KeBugCheck( MAILSLOT_FILE_SYSTEM ); } MsReleasePrefixTableLock(); // // Initialize the data queue. // MsInitializeDataQueue( &fcb->DataQueue, CreatorProcess, MailslotQuota, MaximumMessageSize); // // Return to the caller. // DebugTrace(-1, Dbg, "MsCreateFcb -> %08lx\n", (ULONG)fcb); return fcb; }
ULONG FatExceptionFilter ( IN PIRP_CONTEXT IrpContext, IN PEXCEPTION_POINTERS ExceptionPointer ) /*++ Routine Description: This routine is used to decide if we should or should not handle an exception status that is being raised. It inserts the status into the IrpContext and either indicates that we should handle the exception or bug check the system. Arguments: ExceptionPointers - The result of GetExceptionInformation() in the context of the exception. Return Value: ULONG - returns EXCEPTION_EXECUTE_HANDLER or bugchecks --*/ { NTSTATUS ExceptionCode; ExceptionCode = ExceptionPointer->ExceptionRecord->ExceptionCode; DebugTrace(0, DEBUG_TRACE_UNWIND, "FatExceptionFilter %X\n", ExceptionCode); DebugDump("FatExceptionFilter\n", Dbg, NULL ); // // If the exception is STATUS_IN_PAGE_ERROR, get the I/O error code // from the exception record. // if (ExceptionCode == STATUS_IN_PAGE_ERROR) { if (ExceptionPointer->ExceptionRecord->NumberParameters >= 3) { ExceptionCode = (NTSTATUS)ExceptionPointer->ExceptionRecord->ExceptionInformation[2]; } } // // If there is not an irp context, we must have had insufficient resources. // if ( !ARGUMENT_PRESENT( IrpContext ) ) { if (!FsRtlIsNtstatusExpected( ExceptionCode )) { FatBugCheck( (ULONG_PTR)ExceptionPointer->ExceptionRecord, (ULONG_PTR)ExceptionPointer->ContextRecord, (ULONG_PTR)ExceptionPointer->ExceptionRecord->ExceptionAddress ); } return EXCEPTION_EXECUTE_HANDLER; } // // For the purposes of processing this exception, let's mark this // request as being able to wait and disable write through if we // aren't posting it. // SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); if ( (ExceptionCode != STATUS_CANT_WAIT) && (ExceptionCode != STATUS_VERIFY_REQUIRED) ) { SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_WRITE_THROUGH); } if ( IrpContext->ExceptionStatus == 0 ) { if (FsRtlIsNtstatusExpected( ExceptionCode )) { IrpContext->ExceptionStatus = ExceptionCode; return EXCEPTION_EXECUTE_HANDLER; } else { FatBugCheck( (ULONG_PTR)ExceptionPointer->ExceptionRecord, (ULONG_PTR)ExceptionPointer->ContextRecord, (ULONG_PTR)ExceptionPointer->ExceptionRecord->ExceptionAddress ); } } else { // // We raised this code explicitly ourselves, so it had better be // expected. // ASSERT( IrpContext->ExceptionStatus == ExceptionCode ); ASSERT( FsRtlIsNtstatusExpected( ExceptionCode ) ); } return EXCEPTION_EXECUTE_HANDLER; }