void Dbtup::prepareCopyProcedure(Uint32 numAttrs, Uint16 tableBits) { /* Set length of copy procedure section to the * number of attributes supplied */ ndbassert(numAttrs <= MAX_ATTRIBUTES_IN_TABLE); ndbassert(cCopyProcedure != RNIL); ndbassert(cCopyLastSeg == RNIL); ndbassert(cCopyOverwrite == 0); ndbassert(cCopyOverwriteLen == 0); Ptr<SectionSegment> first; g_sectionSegmentPool.getPtr(first, cCopyProcedure); /* Record original 'last segment' of section */ cCopyLastSeg= first.p->m_lastSegment; /* Check table bits to see if we need to do extra reads */ Uint32 extraAttrIds[EXTRA_COPY_PROC_WORDS]; Uint32 extraReads = 0; if (tableBits & Tablerec::TR_ExtraRowGCIBits) { AttributeHeader ah(AttributeHeader::ROW_GCI64,0); extraAttrIds[extraReads++] = ah.m_value; } if (tableBits & Tablerec::TR_ExtraRowAuthorBits) { AttributeHeader ah(AttributeHeader::ROW_AUTHOR,0); extraAttrIds[extraReads++] = ah.m_value; } /* Modify section to represent relevant prefix * of code by modifying size and lastSegment */ Uint32 newSize = numAttrs + extraReads; first.p->m_sz= newSize; if (extraReads) { cCopyOverwrite= numAttrs; cCopyOverwriteLen = extraReads; ndbrequire(writeToSection(first.i, numAttrs, extraAttrIds, extraReads)); } /* Trim section size and lastSegment */ Ptr<SectionSegment> curr= first; while(newSize > SectionSegment::DataLength) { g_sectionSegmentPool.getPtr(curr, curr.p->m_nextSegment); newSize-= SectionSegment::DataLength; } first.p->m_lastSegment= curr.i; }
void Dbtup::releaseCopyProcedure() { /* Return Copy Procedure section to original length */ ndbassert(cCopyProcedure != RNIL); ndbassert(cCopyLastSeg != RNIL); Ptr<SectionSegment> first; g_sectionSegmentPool.getPtr(first, cCopyProcedure); ndbassert(first.p->m_sz <= MAX_COPY_PROC_LEN); first.p->m_sz= MAX_COPY_PROC_LEN; first.p->m_lastSegment= cCopyLastSeg; if (cCopyOverwriteLen) { ndbassert(cCopyOverwriteLen <= EXTRA_COPY_PROC_WORDS); Uint32 attrids[EXTRA_COPY_PROC_WORDS]; for (Uint32 i=0; i < cCopyOverwriteLen; i++) { AttributeHeader ah(cCopyOverwrite + i, 0); attrids[i] = ah.m_value; } ndbrequire(writeToSection(first.i, cCopyOverwrite, attrids, cCopyOverwriteLen)); cCopyOverwriteLen= 0; cCopyOverwrite= 0; } cCopyLastSeg= RNIL; }
void run() { Client::WriteContext ctx(ns()); for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 8 << "bar" << 20)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // Foo == 7. Should be EOF. IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1)); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar == 20, not EOF. params.descriptor = getIndex(BSON("bar" << 1)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); }
void run() { Client::WriteContext ctx(ns()); for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << 1)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; BSONObj filterObj = BSON("foo" << BSON("$ne" << 1)); StatusWithMatchExpression swme = MatchExpressionParser::parse(filterObj); verify(swme.isOK()); auto_ptr<MatchExpression> filterExpr(swme.getValue()); scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, filterExpr.get())); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1)); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // bar == 1 params.descriptor = getIndex(BSON("bar" << 1)); ah->addChild(new IndexScan(params, &ws, NULL)); // Filter drops everything. ASSERT_EQUALS(0, countResults(ah.get())); }
void run() { Client::WriteContext ctx(ns()); for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << 20)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1)); params.startKey = BSON("" << 20); params.endKey = BSONObj(); params.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar == 5. Index scan should be eof. params.descriptor = getIndex(BSON("bar" << 1)); params.startKey = BSON("" << 5); params.endKey = BSON("" << 5); params.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); }
GfVec4d GfHomogeneousCross(const GfVec4d &a, const GfVec4d &b) { GfVec4d ah(GfGetHomogenized(a)); GfVec4d bh(GfGetHomogenized(b)); GfVec3d prod = GfCross(GfVec3d(ah[0], ah[1], ah[2]), GfVec3d(bh[0], bh[1], bh[2])); return GfVec4d(prod[0], prod[1], prod[2], 1); }
GfVec4f GfHomogeneousCross(const GfVec4f &a, const GfVec4f &b) { GfVec4f ah(GfGetHomogenized(a)); GfVec4f bh(GfGetHomogenized(b)); GfVec3f prod = GfCross(GfVec3f(ah[0], ah[1], ah[2]), GfVec3f(bh[0], bh[1], bh[2])); return GfVec4f(prod[0], prod[1], prod[2], 1); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << 1 << "bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Intersect with 7 <= bar < 10000 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); ah->addChild(new IndexScan(params, &ws, NULL)); WorkingSetID lastId = WorkingSet::INVALID_ID; int count = 0; while (!ah->isEOF()) { WorkingSetID id; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } BSONObj thisObj = ws.get(id)->loc.obj(); ASSERT_EQUALS(7 + count, thisObj["bar"].numberInt()); ++count; if (WorkingSet::INVALID_ID != lastId) { BSONObj lastObj = ws.get(lastId)->loc.obj(); ASSERT_LESS_THAN(lastObj["bar"].woCompare(thisObj["bar"]), 0); } lastId = id; } ASSERT_EQUALS(count, 43); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << 20)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar == 5. Index scan should be eof. params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 5); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); int count = 0; int works = 0; while (!ah->isEOF()) { WorkingSetID id; ++works; PlanStage::StageState status = ah->work(&id); if (PlanStage::ADVANCED != status) { continue; } ++count; } ASSERT_EQUALS(0, count); // We check the "look ahead for EOF" here by examining the number of works required to // hit EOF. Our first call to work will pick up that bar==5 is EOF and the AND will EOF // immediately. ASSERT_EQUALS(works, 1); }
/// <summary> /// Create new thread and execute code in it. Wait until execution ends /// </summary> /// <param name="pCode">Code to execute</param> /// <param name="size">Code size</param> /// <param name="callResult">Code return value</param> /// <returns>Status</returns> NTSTATUS RemoteExec::ExecInNewThread( PVOID pCode, size_t size, uint64_t& callResult ) { AsmJit::Assembler a; AsmJitHelper ah( a ); NTSTATUS dwResult = STATUS_SUCCESS; // Write code dwResult = CopyCode( pCode, size ); if (dwResult != STATUS_SUCCESS) return dwResult; bool switchMode = (_proc.core().native()->GetWow64Barrier().type == wow_64_32); auto pExitThread = _mods.GetExport( _mods.GetModule( L"ntdll.dll" ), "NtTerminateThread" ).procAddress; if (pExitThread == 0) return LastNtStatus( STATUS_NOT_FOUND ); ah.GenPrologue( switchMode ); // Prepare thread to run in x64 mode if(switchMode) { // Allocate new x64 activation stack auto createActStack = _mods.GetExport( _mods.GetModule( L"ntdll.dll", LdrList, mt_mod64 ), "RtlAllocateActivationContextStack" ).procAddress; if (createActStack) { ah.GenCall( static_cast<size_t>(createActStack), { _userData.ptr<size_t>() + 0x3100 } ); a.mov( AsmJit::nax, _userData.ptr<size_t>( ) + 0x3100 ); a.mov( AsmJit::nax, AsmJit::sysint_ptr( AsmJit::nax ) ); ah.SetTebPtr(); a.mov( AsmJit::sysint_ptr( AsmJit::ndx, 0x2c8 ), AsmJit::nax ); } } ah.GenCall( _userCode.ptr<size_t>(), { } ); ah.ExitThreadWithStatus( pExitThread, _userData.ptr<size_t>( ) + INTRET_OFFSET ); // Execute code in newly created thread if (_userCode.Write( size, a.getCodeSize(), a.make() ) == STATUS_SUCCESS) { auto thread = _threads.CreateNew( _userCode.ptr<ptr_t>() + size, _userData.ptr<ptr_t>() ); dwResult = thread.Join(); callResult = _userData.Read<uint64_t>( INTRET_OFFSET, 0 ); } else dwResult = LastNtStatus(); return dwResult; }
bool bt_rpa_irk_matches(const u8_t irk[16], const bt_addr_t *addr) { u8_t hash[3]; int err; BT_DBG("IRK %s bdaddr %s", bt_hex(irk, 16), bt_addr_str(addr)); err = ah(irk, addr->val + 3, hash); if (err) { return false; } return !memcmp(addr->val, hash, 3); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << i << "baz" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); addIndex(BSON("baz" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 10 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // 5 <= baz <= 15 params.descriptor = getIndex(BSON("baz" << 1), coll); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are: // foo == 10, 11, 12, 13, 14, 15. ASSERT_EQUALS(6, countResults(ah.get())); }
bool im_address_resolve(ble_gap_addr_t const * p_addr, ble_gap_irk_t const * p_irk) { if (p_addr->addr_type != BLE_GAP_ADDR_TYPE_RANDOM_PRIVATE_RESOLVABLE) { return false; } uint8_t hash[IM_ADDR_CIPHERTEXT_LENGTH]; uint8_t local_hash[IM_ADDR_CIPHERTEXT_LENGTH]; uint8_t prand[IM_ADDR_CLEARTEXT_LENGTH]; memcpy(hash, p_addr->addr, IM_ADDR_CIPHERTEXT_LENGTH); memcpy(prand, &p_addr->addr[IM_ADDR_CIPHERTEXT_LENGTH], IM_ADDR_CLEARTEXT_LENGTH); ah(p_irk->irk, prand, local_hash); return (memcmp(hash, local_hash, IM_ADDR_CIPHERTEXT_LENGTH) == 0); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } // Insert a bunch of data for (int i = 0; i < 50; ++i) { // Some data that'll show up but not be in all. insert(BSON("foo" << 1 << "baz" << 1)); insert(BSON("foo" << 1 << "bar" << 1)); // The needle in the haystack. Only these should be returned by the AND. insert(BSON("foo" << 1 << "bar" << 1 << "baz" << 1)); insert(BSON("foo" << 1)); insert(BSON("bar" << 1)); insert(BSON("baz" << 1)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); addIndex(BSON("baz" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // Scan over foo == 1 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // bar == 1 params.descriptor = getIndex(BSON("bar" << 1), coll); ah->addChild(new IndexScan(params, &ws, NULL)); // baz == 1 params.descriptor = getIndex(BSON("baz" << 1), coll); ah->addChild(new IndexScan(params, &ws, NULL)); ASSERT_EQUALS(50, countResults(ah.get())); }
void btle_generateResolvableAddress(const ble_gap_irk_t &irk, ble_gap_addr_t &address) { /* Set type to resolvable */ address.addr_type = BLE_GAP_ADDR_TYPE_RANDOM_PRIVATE_RESOLVABLE; /* * Assign a random number to the most significant 3 bytes * of the address. */ address.addr[BLE_GAP_ADDR_LEN - 3] = 0x8E; address.addr[BLE_GAP_ADDR_LEN - 2] = 0x4F; address.addr[BLE_GAP_ADDR_LEN - 1] = 0x7C; /* Calculate the hash and store it in the top half of the address */ ah(irk.irk, &address.addr[BLE_GAP_ADDR_LEN - 3], address.addr); }
/// <summary> /// Set custom exception handler to bypass SafeSEH under DEP /// </summary> /// <param name="pImage">image data</param> /// <returns>true on success</returns> bool MMap::EnableExceptions( ImageContext* pImage ) { #ifdef _M_AMD64 size_t size = pImage->PEImage.DirectorySize( IMAGE_DIRECTORY_ENTRY_EXCEPTION ); IMAGE_RUNTIME_FUNCTION_ENTRY *pExpTable = reinterpret_cast<decltype(pExpTable)>(pImage->PEImage.DirectoryAddress( IMAGE_DIRECTORY_ENTRY_EXCEPTION )); // Invoke RtlAddFunctionTable if(pExpTable) { AsmJit::Assembler a; AsmJitHelper ah(a); uint64_t result = 0; pImage->pExpTableAddr = REBASE( pExpTable, pImage->FileImage.base(), pImage->imgMem.ptr<ptr_t>() ); auto pAddTable = _process.modules().GetExport( _process.modules().GetModule( L"ntdll.dll", LdrList, pImage->PEImage.mType() ), "RtlAddFunctionTable" ); ah.GenPrologue(); ah.GenCall( static_cast<size_t>(pAddTable.procAddress), { pImage->pExpTableAddr, size / sizeof(IMAGE_RUNTIME_FUNCTION_ENTRY), pImage->imgMem.ptr<size_t>() } ); _process.remote().AddReturnWithEvent( ah ); ah.GenEpilogue(); if (_process.remote().ExecInWorkerThread( a.make(), a.getCodeSize(), result ) != STATUS_SUCCESS) return false; if (pImage->flags & CreateLdrRef) return true; else return (MExcept::CreateVEH( pImage->imgMem.ptr<size_t>(), pImage->PEImage.imageSize() ) == STATUS_SUCCESS); } else return false; #else _process.nativeLdr().InsertInvertedFunctionTable( pImage->imgMem.ptr<void*>(), pImage->PEImage.imageSize() ); if (pImage->flags & PartialExcept) return true; else return (MExcept::CreateVEH( pImage->imgMem.ptr<size_t>(), pImage->PEImage.imageSize() ) == STATUS_SUCCESS); #endif }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { insert(BSON("foo" << i << "bar" << (100 - i))); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; BSONObj filter = BSON("bar" << 97); StatusWithMatchExpression swme = MatchExpressionParser::parse(filter); verify(swme.isOK()); auto_ptr<MatchExpression> filterExpr(swme.getValue()); scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, filterExpr.get())); // Foo <= 20 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar >= 95 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 10); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar == 97 ASSERT_EQUALS(1, countResults(ah.get())); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 10; ++i) { insert(BSON("foo" << (100 + i))); insert(BSON("bar" << i)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndHashStage> ah(new AndHashStage(&ws, NULL)); // Foo >= 100 IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 100); params.bounds.endKey = BSONObj(); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // Bar <= 100 params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 100); // This is subtle and confusing. We couldn't extract any keys from the elements with // 'foo' in them so we would normally index them with the "nothing found" key. We don't // want to include that in our scan. params.bounds.endKey = BSON("" << ""); params.bounds.endKeyInclusive = false; params.direction = -1; ah->addChild(new IndexScan(params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); }
void run() { Client::WriteContext ctx(ns()); Database* db = ctx.ctx().db(); Collection* coll = db->getCollection(ns()); if (!coll) { coll = db->createCollection(ns()); } for (int i = 0; i < 50; ++i) { // Insert data with foo=7, bar==20, but nothing with both. insert(BSON("foo" << 8 << "bar" << 20)); insert(BSON("foo" << 7 << "bar" << 21)); insert(BSON("foo" << 7)); insert(BSON("bar" << 20)); } addIndex(BSON("foo" << 1)); addIndex(BSON("bar" << 1)); WorkingSet ws; scoped_ptr<AndSortedStage> ah(new AndSortedStage(&ws, NULL)); // foo == 7. IndexScanParams params; params.descriptor = getIndex(BSON("foo" << 1), coll); params.bounds.isSimpleRange = true; params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); // bar == 20. params.descriptor = getIndex(BSON("bar" << 1), coll); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); params.bounds.endKeyInclusive = true; params.direction = 1; ah->addChild(new IndexScan(params, &ws, NULL)); ASSERT_EQUALS(0, countResults(ah.get())); }
void Ndbinfo::Row::write_uint32(Uint32 value) { // Create AttributeHeader AttributeHeader ah(col_counter++, sizeof(Uint32)); check_attribute_type(ah, Ndbinfo::Number); if (!check_buffer_space(ah)) return; // Write AttributeHeader to buffer ah.insertHeader(curr); curr += ah.getHeaderSize(); // Write data to buffer memcpy(curr, &value, sizeof(Uint32)); curr += ah.getDataSize(); assert(curr <= end); return; }
int bt_rpa_create(const u8_t irk[16], bt_addr_t *rpa) { int err; err = bt_rand(rpa->val + 3, 3); if (err) { return err; } BT_ADDR_SET_RPA(rpa); err = ah(irk, rpa->val + 3, rpa->val); if (err) { return err; } BT_DBG("Created RPA %s", bt_addr_str((bt_addr_t *)rpa->val)); return 0; }
/** * Call the apply handler for each element in a linked list * * @param list Linked list * @param fwd true to traverse from head to tail, false for reverse * @param ah Apply handler * @param arg Handler argument * * @return Current list element if handler returned true */ struct le *list_apply(const struct list *list, bool fwd, list_apply_h *ah, void *arg) { struct le *le; if (!list || !ah) return NULL; le = fwd ? list->head : list->tail; while (le) { struct le *cur = le; le = fwd ? le->next : le->prev; if (ah(cur, arg)) return cur; } return NULL; }
void Ndbinfo::Row::write_string(const char* str) { const size_t clen = strlen(str) + 1; // Create AttributeHeader AttributeHeader ah(col_counter++, (Uint32)clen); check_attribute_type(ah, Ndbinfo::String); if (!check_buffer_space(ah)) return; // Write AttributeHeader to buffer ah.insertHeader(curr); curr += ah.getHeaderSize(); // Write data to buffer memcpy(curr, str, clen); curr += ah.getDataSize(); assert(curr <= end); return; }
/// <summary> /// Remove custom exception handler /// </summary> /// <param name="pImage">image data</param> /// <returns>true on success</returns> bool MMap::DisableExceptions( ImageContext* pImage ) { #ifdef _M_AMD64 if(pImage->pExpTableAddr) { AsmJit::Assembler a; AsmJitHelper ah( a ); size_t result = 0; ah.GenPrologue(); auto pRmoveTable = _process.modules().GetExport( _process.modules().GetModule( L"ntdll.dll", LdrList, pImage->PEImage.mType() ), "RtlDeleteFunctionTable" ); // RtlDeleteFunctionTable(pExpTable); ah.GenCall( static_cast<size_t>(pRmoveTable.procAddress), { pImage->pExpTableAddr } ); _process.remote().AddReturnWithEvent( ah ); ah.GenEpilogue(); if (_process.remote().ExecInWorkerThread( a.make(), a.getCodeSize(), result ) != STATUS_SUCCESS) return false; if (pImage->flags & CreateLdrRef) return true; else return (MExcept::RemoveVEH() == STATUS_SUCCESS); } else return false; #else if (pImage->flags & (PartialExcept | NoExceptions)) return true; else return (MExcept::RemoveVEH() == STATUS_SUCCESS); #endif }
QuatT CombineHVComponents3D(const QuatT& cmpH, const QuatT& cmpV) { ANIMCHAR_PROFILE_DETAILED; //return CombineHVComponents2D(cmpH, cmpV); CRY_ASSERT(cmpH.IsValid()); CRY_ASSERT(cmpV.IsValid()); QuatT cmb; cmb.t.x = cmpH.t.x; cmb.t.y = cmpH.t.y; cmb.t.z = cmpV.t.z; // TODO: This should be optimized! Ang3 ah(cmpH.q); Ang3 av(cmpV.q); Ang3 a(av.x, av.y, ah.z); cmb.q.SetRotationXYZ(a); CRY_ASSERT(cmb.IsValid()); return cmb; }
/** obtain compiletime provided root hints */ static struct delegpt* compile_time_root_prime(int do_ip4, int do_ip6) { /* from: ; This file is made available by InterNIC ; under anonymous FTP as ; file /domain/named.cache ; on server FTP.INTERNIC.NET ; -OR- RS.INTERNIC.NET ; ; related version of root zone: changes-on-20120103 */ struct delegpt* dp = delegpt_create_mlc((uint8_t*)"\000"); if(!dp) return NULL; dp->has_parent_side_NS = 1; if(do_ip4) { if(!ah(dp, "A.ROOT-SERVERS.NET.", "198.41.0.4")) goto failed; if(!ah(dp, "B.ROOT-SERVERS.NET.", "192.228.79.201")) goto failed; if(!ah(dp, "C.ROOT-SERVERS.NET.", "192.33.4.12")) goto failed; if(!ah(dp, "D.ROOT-SERVERS.NET.", "199.7.91.13")) goto failed; if(!ah(dp, "E.ROOT-SERVERS.NET.", "192.203.230.10")) goto failed; if(!ah(dp, "F.ROOT-SERVERS.NET.", "192.5.5.241")) goto failed; if(!ah(dp, "G.ROOT-SERVERS.NET.", "192.112.36.4")) goto failed; if(!ah(dp, "H.ROOT-SERVERS.NET.", "198.97.190.53")) goto failed; if(!ah(dp, "I.ROOT-SERVERS.NET.", "192.36.148.17")) goto failed; if(!ah(dp, "J.ROOT-SERVERS.NET.", "192.58.128.30")) goto failed; if(!ah(dp, "K.ROOT-SERVERS.NET.", "193.0.14.129")) goto failed; if(!ah(dp, "L.ROOT-SERVERS.NET.", "199.7.83.42")) goto failed; if(!ah(dp, "M.ROOT-SERVERS.NET.", "202.12.27.33")) goto failed; } if(do_ip6) { if(!ah(dp, "A.ROOT-SERVERS.NET.", "2001:503:ba3e::2:30")) goto failed; if(!ah(dp, "B.ROOT-SERVERS.NET.", "2001:500:84::b")) goto failed; if(!ah(dp, "C.ROOT-SERVERS.NET.", "2001:500:2::c")) goto failed; if(!ah(dp, "D.ROOT-SERVERS.NET.", "2001:500:2d::d")) goto failed; if(!ah(dp, "E.ROOT-SERVERS.NET.", "2001:500:a8::e")) goto failed; if(!ah(dp, "F.ROOT-SERVERS.NET.", "2001:500:2f::f")) goto failed; if(!ah(dp, "H.ROOT-SERVERS.NET.", "2001:500:1::53")) goto failed; if(!ah(dp, "I.ROOT-SERVERS.NET.", "2001:7fe::53")) goto failed; if(!ah(dp, "J.ROOT-SERVERS.NET.", "2001:503:c27::2:30")) goto failed; if(!ah(dp, "K.ROOT-SERVERS.NET.", "2001:7fd::1")) goto failed; if(!ah(dp, "L.ROOT-SERVERS.NET.", "2001:500:9f::42")) goto failed; if(!ah(dp, "M.ROOT-SERVERS.NET.", "2001:dc3::35")) goto failed; } return dp; failed: delegpt_free_mlc(dp); return 0; }
int NdbIndexStat::records_in_range(const NdbDictionary::Index* index, NdbIndexScanOperation* op, Uint64 table_rows, Uint64* count, int flags) { DBUG_ENTER("NdbIndexStat::records_in_range"); Uint64 rows; Uint32 key1[1000], keylen1; Uint32 key2[1000], keylen2; if (m_cache == NULL) flags |= RR_UseDb | RR_NoUpdate; else if (m_area[0].m_entries == 0 || m_area[1].m_entries == 0) flags |= RR_UseDb; if ((flags & (RR_UseDb | RR_NoUpdate)) != RR_UseDb | RR_NoUpdate) { // get start and end key - assume bound is ordered, wellformed Uint32 bound[1000]; Uint32 boundlen = op->getKeyFromSCANTABREQ(bound, 1000); keylen1 = keylen2 = 0; Uint32 n = 0; while (n < boundlen) { Uint32 t = bound[n]; AttributeHeader ah(bound[n + 1]); Uint32 sz = 2 + ah.getDataSize(); t &= 0xFFFF; // may contain length assert(t <= 4); bound[n] = t; if (t == 0 || t == 1 || t == 4) { memcpy(&key1[keylen1], &bound[n], sz << 2); keylen1 += sz; } if (t == 2 || t == 3 || t == 4) { memcpy(&key2[keylen2], &bound[n], sz << 2); keylen2 += sz; } n += sz; } } if (flags & RR_UseDb) { Uint32 out[4] = { 0, 0, 0, 0 }; // rows, in, before, after float tot[4] = { 0, 0, 0, 0 }; // totals of above int cnt, ret; bool forceSend = true; NdbTransaction* trans = op->m_transConnection; if (op->interpret_exit_last_row() == -1 || op->getValue(NdbDictionary::Column::RECORDS_IN_RANGE, (char*)out) == 0) { m_error = op->getNdbError(); DBUG_PRINT("error", ("op:%d", op->getNdbError().code)); DBUG_RETURN(-1); } if (trans->execute(NdbTransaction::NoCommit, NdbOperation::AbortOnError, forceSend) == -1) { m_error = trans->getNdbError(); DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code, op->getNdbError().code)); DBUG_RETURN(-1); } cnt = 0; while ((ret = op->nextResult(true, forceSend)) == 0) { DBUG_PRINT("info", ("frag rows=%u in=%u before=%u after=%u [error=%d]", out[0], out[1], out[2], out[3], (int)(out[1] + out[2] + out[3]) - (int)out[0])); unsigned i; for (i = 0; i < 4; i++) tot[i] += (float)out[i]; cnt++; } if (ret == -1) { m_error = op->getNdbError(); DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code, op->getNdbError().code)); DBUG_RETURN(-1); } op->close(forceSend); rows = (Uint64)tot[1]; if (cnt != 0 && ! (flags & RR_NoUpdate)) { float pct[2]; pct[0] = 100 * tot[2] / tot[0]; pct[1] = 100 * tot[3] / tot[0]; DBUG_PRINT("info", ("update stat pct" " before=%.2f after=%.2f", pct[0], pct[1])); stat_update(key1, keylen1, key2, keylen2, pct); } } else { float pct[2]; stat_select(key1, keylen1, key2, keylen2, pct); float diff = 100.0 - (pct[0] + pct[1]); float trows = (float)table_rows; DBUG_PRINT("info", ("select stat pct" " before=%.2f after=%.2f in=%.2f table_rows=%.2f", pct[0], pct[1], diff, trows)); rows = 0; if (diff >= 0) rows = (Uint64)(diff * trows / 100); if (rows == 0) rows = 1; } *count = rows; DBUG_PRINT("value", ("rows=%llu flags=%o", rows, flags)); DBUG_RETURN(0); }
int main(int argc, char* argv[]) { if (argc < 2) { printf("Usage: %s <program name>\n", argv[0]); exit(0); } filename = argv[1]; FILE* fp = fopen(filename, "rb"); if (fp == 0) error("opening"); ram = (Byte*)malloc(0x10000); memset(ram, 0, 0x10000); if (ram == 0) { fprintf(stderr, "Out of memory\n"); exit(1); } if (fseek(fp, 0, SEEK_END) != 0) error("seeking"); length = ftell(fp); if (length == -1) error("telling"); if (fseek(fp, 0, SEEK_SET) != 0) error("seeking"); if (length > 0x10000 - 0x100) { fprintf(stderr, "%s is too long to be a .com file\n", filename); exit(1); } if (fread(&ram[0x100], length, 1, fp) != 1) error("reading"); fclose(fp); Word segment = 0x1000; setAX(0x0000); setCX(0x00FF); setDX(segment); registers[3] = 0x0000; setSP(0xFFFE); registers[5] = 0x091C; setSI(0x0100); setDI(0xFFFE); for (int i = 0; i < 4; ++i) registers[8 + i] = segment; Byte* byteData = (Byte*)®isters[0]; int bigEndian = (byteData[2] == 0 ? 1 : 0); int byteNumbers[8] = {0, 2, 4, 6, 1, 3, 5, 7}; for (int i = 0 ; i < 8; ++i) byteRegisters[i] = &byteData[byteNumbers[i] ^ bigEndian]; bool prefix = false; for (int i = 0; i < 1000000000; ++i) { if (!repeating) { if (!prefix) { segmentOverride = -1; rep = 0; } prefix = false; opcode = fetchByte(); } wordSize = ((opcode & 1) != 0); bool sourceIsRM = ((opcode & 2) != 0); int operation = (opcode >> 3) & 7; bool jump; switch (opcode) { case 0x00: case 0x01: case 0x02: case 0x03: case 0x08: case 0x09: case 0x0a: case 0x0b: case 0x10: case 0x11: case 0x12: case 0x13: case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x20: case 0x21: case 0x22: case 0x23: case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x30: case 0x31: case 0x32: case 0x33: case 0x38: case 0x39: case 0x3a: case 0x3b: // alu rmv,rmv data = readEA(); if (!sourceIsRM) { destination = data; source = getReg(); } else { destination = getReg(); source = data; } aluOperation = operation; doALUOperation(); if (aluOperation != 7) { if (!sourceIsRM) finishWriteEA(data); else setReg(data); } break; case 0x04: case 0x05: case 0x0c: case 0x0d: case 0x14: case 0x15: case 0x1c: case 0x1d: case 0x24: case 0x25: case 0x2c: case 0x2d: case 0x34: case 0x35: case 0x3c: case 0x3d: // alu accum,i destination = getAccum(); source = !wordSize ? fetchByte() : fetchWord(); aluOperation = operation; doALUOperation(); if (aluOperation != 7) setAccum(); break; case 0x06: case 0x0e: case 0x16: case 0x1e: // PUSH segreg push(registers[operation + 8]); break; case 0x07: case 0x17: case 0x1f: // POP segreg registers[operation + 8] = pop(); break; case 0x26: case 0x2e: case 0x36: case 0x3e: // segment override segmentOverride = operation; prefix = true; break; case 0x27: case 0x2f: // DA if (af() || (al() & 0x0f) > 9) { data = al() + (opcode == 0x27 ? 6 : -6); setAL(data); setAF(true); if ((data & 0x100) != 0) setCF(true); } setCF(cf() || al() > 0x9f); if (cf()) setAL(al() + (opcode == 0x27 ? 0x60 : -0x60)); wordSize = false; data = al(); setPZS(); break; case 0x37: case 0x3f: // AA if (af() || (al() & 0xf) > 9) { setAL(al() + (opcode == 0x37 ? 6 : -6)); setAH(ah() + (opcode == 0x37 ? 1 : -1)); setCA(); } else clearCA(); setAL(al() & 0x0f); break; case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f: // incdec rw destination = rw(); wordSize = true; setRW(incdec((opcode & 8) != 0)); break; case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57: // PUSH rw push(rw()); break; case 0x58: case 0x59: case 0x5a: case 0x5b: case 0x5c: case 0x5d: case 0x5e: case 0x5f: // POP rw setRW(pop()); break; case 0x60: case 0x61: case 0x62: case 0x63: case 0x64: case 0x65: case 0x66: case 0x67: case 0x68: case 0x69: case 0x6a: case 0x6b: case 0x6c: case 0x6d: case 0x6e: case 0x6f: case 0xc0: case 0xc1: case 0xc8: case 0xc9: // invalid case 0xcc: case 0xf0: case 0xf1: case 0xf4: // INT 3, LOCK, HLT case 0x9b: case 0xce: case 0x0f: // WAIT, INTO, POP CS case 0xd8: case 0xd9: case 0xda: case 0xdb: case 0xdc: case 0xdd: case 0xde: case 0xdf: // escape case 0xe4: case 0xe5: case 0xe6: case 0xe7: case 0xec: case 0xed: case 0xee: case 0xef: // IN, OUT fprintf(stderr, "Invalid opcode %02x", opcode); runtimeError(""); break; case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: case 0x78: case 0x79: case 0x7a: case 0x7b: case 0x7c: case 0x7d: case 0x7e: case 0x7f: // Jcond cb switch (opcode & 0x0e) { case 0x00: jump = of(); break; case 0x02: jump = cf(); break; case 0x04: jump = zf(); break; case 0x06: jump = cf() || zf(); break; case 0x08: jump = sf(); break; case 0x0a: jump = pf(); break; case 0x0c: jump = sf() != of(); break; default: jump = sf() != of() || zf(); break; } jumpShort(fetchByte(), jump == ((opcode & 1) == 0)); break; case 0x80: case 0x81: case 0x82: case 0x83: // alu rmv,iv destination = readEA(); data = fetch(opcode == 0x81); if (opcode != 0x83) source = data; else source = signExtend(data); aluOperation = modRMReg(); doALUOperation(); if (aluOperation != 7) finishWriteEA(data); break; case 0x84: case 0x85: // TEST rmv,rv data = readEA(); test(data, getReg()); break; case 0x86: case 0x87: // XCHG rmv,rv data = readEA(); finishWriteEA(getReg()); setReg(data); break; case 0x88: case 0x89: // MOV rmv,rv ea(); finishWriteEA(getReg()); break; case 0x8a: case 0x8b: // MOV rv,rmv setReg(readEA()); break; case 0x8c: // MOV rmw,segreg ea(); wordSize = 1; finishWriteEA(registers[modRMReg() + 8]); break; case 0x8d: // LEA address = ea(); if (!useMemory) runtimeError("LEA needs a memory address"); setReg(address); break; case 0x8e: // MOV segreg,rmw wordSize = 1; data = readEA(); registers[modRMReg() + 8] = data; break; case 0x8f: // POP rmw writeEA(pop()); break; case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97: // XCHG AX,rw data = ax(); setAX(rw()); setRW(data); break; case 0x98: // CBW setAX(signExtend(al())); break; case 0x99: // CWD setDX((ax() & 0x8000) == 0 ? 0x0000 : 0xffff); break; case 0x9a: // CALL cp savedIP = fetchWord(); savedCS = fetchWord(); farCall(); break; case 0x9c: // PUSHF push((flags & 0x0fd7) | 0xf000); break; case 0x9d: // POPF flags = pop() | 2; break; case 0x9e: // SAHF flags = (flags & 0xff02) | ah(); break; case 0x9f: // LAHF setAH(flags & 0xd7); break; case 0xa0: case 0xa1: // MOV accum,xv data = read(fetchWord()); setAccum(); break; case 0xa2: case 0xa3: // MOV xv,accum write(getAccum(), fetchWord()); break; case 0xa4: case 0xa5: // MOVSv stoS(lodS()); doRep(); break; case 0xa6: case 0xa7: // CMPSv lodDIS(); source = data; sub(); doRep(); break; case 0xa8: case 0xa9: // TEST accum,iv data = fetch(wordSize); test(getAccum(), data); break; case 0xaa: case 0xab: // STOSv stoS(getAccum()); doRep(); break; case 0xac: case 0xad: // LODSv data = lodS(); setAccum(); doRep(); break; case 0xae: case 0xaf: // SCASv lodDIS(); destination = getAccum(); source = data; sub(); doRep(); break; case 0xb0: case 0xb1: case 0xb2: case 0xb3: case 0xb4: case 0xb5: case 0xb6: case 0xb7: setRB(fetchByte()); break; case 0xb8: case 0xb9: case 0xba: case 0xbb: case 0xbc: case 0xbd: case 0xbe: case 0xbf: // MOV rv,iv setRW(fetchWord()); break; case 0xc2: case 0xc3: case 0xca: case 0xcb: // RET savedIP = pop(); savedCS = (opcode & 8) == 0 ? cs() : pop(); if (!wordSize) setSP(sp() + fetchWord()); farJump(); break; case 0xc4: case 0xc5: // LES/LDS ea(); farLoad(); *modRMRW() = savedIP; registers[8 + (!wordSize ? 0 : 3)] = savedCS; break; case 0xc6: case 0xc7: // MOV rmv,iv ea(); finishWriteEA(fetch(wordSize)); break; case 0xcd: data = fetchByte(); if (data != 0x21) { fprintf(stderr, "Unknown interrupt 0x%02x", data); runtimeError(""); } switch (ah()) { case 2: printf("%c", dl()); break; case 0x4c: printf("*** Bytes: %i\n", length); printf("*** Cycles: %i\n", ios); printf("*** EXIT code %i\n", al()); exit(0); break; default: fprintf(stderr, "Unknown DOS call 0x%02x", data); runtimeError(""); } break; case 0xcf: ip = pop(); setCS(pop()); flags = pop() | 2; break; case 0xd0: case 0xd1: case 0xd2: case 0xd3: // rot rmv,n data = readEA(); if ((opcode & 2) == 0) source = 1; else source = cl(); while (source != 0) { destination = data; switch (modRMReg()) { case 0: // ROL data <<= 1; doCF(); data |= (cf() ? 1 : 0); setOFRotate(); break; case 1: // ROR setCF((data & 1) != 0); data >>= 1; if (cf()) data |= (!wordSize ? 0x80 : 0x8000); setOFRotate(); break; case 2: // RCL data = (data << 1) | (cf() ? 1 : 0); doCF(); setOFRotate(); break; case 3: // RCR data >>= 1; if (cf()) data |= (!wordSize ? 0x80 : 0x8000); setCF((destination & 1) != 0); setOFRotate(); break; case 4: // SHL case 6: data <<= 1; doCF(); setOFRotate(); setPZS(); break; case 5: // SHR setCF((data & 1) != 0); data >>= 1; setOFRotate(); setAF(true); setPZS(); break; case 7: // SAR setCF((data & 1) != 0); data >>= 1; if (!wordSize) data |= (destination & 0x80); else data |= (destination & 0x8000); setOFRotate(); setAF(true); setPZS(); break; } --source; } finishWriteEA(data); break; case 0xd4: // AAM data = fetchByte(); if (data == 0) divideOverflow(); setAH(al() / data); setAL(al() % data); wordSize = true; setPZS(); break; case 0xd5: // AAD data = fetchByte(); setAL(al() + ah()*data); setAH(0); setPZS(); break; case 0xd6: // SALC setAL(cf() ? 0xff : 0x00); break; case 0xd7: // XLATB setAL(readByte(bx() + al())); break; case 0xe0: case 0xe1: case 0xe2: // LOOPc cb setCX(cx() - 1); jump = (cx() != 0); switch (opcode) { case 0xe0: if (zf()) jump = false; break; case 0xe1: if (!zf()) jump = false; break; } jumpShort(fetchByte(), jump); break; case 0xe3: // JCXZ cb jumpShort(fetchByte(), cx() == 0); break; case 0xe8: // CALL cw call(ip + fetchWord()); break; case 0xe9: // JMP cw ip += fetchWord(); break; case 0xea: // JMP cp savedIP = fetchWord(); savedCS = fetchWord(); farJump(); break; case 0xeb: // JMP cb jumpShort(fetchByte(), true); break; case 0xf2: case 0xf3: // REP rep = opcode == 0xf2 ? 1 : 2; prefix = true; break; case 0xf5: // CMC flags ^= 1; break; case 0xf6: case 0xf7: // math rmv data = readEA(); switch (modRMReg()) { case 0: case 1: // TEST rmv,iv test(data, fetch(wordSize)); break; case 2: // NOT iv finishWriteEA(~data); break; case 3: // NEG iv source = data; destination = 0; sub(); finishWriteEA(data); break; case 4: case 5: // MUL rmv, IMUL rmv source = data; destination = getAccum(); data = destination; setSF(); setPF(); data *= source; setAX(data); if (!wordSize) { if (modRMReg() == 4) setCF(ah() != 0); else { if ((source & 0x80) != 0) setAH(ah() - destination); if ((destination & 0x80) != 0) setAH(ah() - source); setCF(ah() == ((al() & 0x80) == 0 ? 0 : 0xff)); } } else { setDX(data >> 16); if (modRMReg() == 4) { data |= dx(); setCF(dx() != 0); } else { if ((source & 0x8000) != 0) setDX(dx() - destination); if ((destination & 0x8000) != 0) setDX(dx() - source); data |= dx(); setCF(dx() == ((ax() & 0x8000) == 0 ? 0 : 0xffff)); } } setZF(); setOF(cf()); break; case 6: case 7: // DIV rmv, IDIV rmv source = data; if (source == 0) divideOverflow(); if (!wordSize) { destination = ax(); if (modRMReg() == 6) { div(); if (data > 0xff) divideOverflow(); } else { destination = ax(); if ((destination & 0x8000) != 0) destination |= 0xffff0000; source = signExtend(source); div(); if (data > 0x7f && data < 0xffffff80) divideOverflow(); } setAH(remainder); setAL(data); } else { destination = (dx() << 16) + ax(); div(); if (modRMReg() == 6) { if (data > 0xffff) divideOverflow(); } else { if (data > 0x7fff && data < 0xffff8000) divideOverflow(); } setDX(remainder); setAX(data); } break; } break; case 0xf8: case 0xf9: // STC/CLC setCF(wordSize); break; case 0xfa: case 0xfb: // STI/CLI setIF(wordSize); break; case 0xfc: case 0xfd: // STD/CLD setDF(wordSize); break; case 0xfe: case 0xff: // misc ea(); if ((!wordSize && modRMReg() >= 2 && modRMReg() <= 6) || modRMReg() == 7) { fprintf(stderr, "Invalid instruction %02x %02x", opcode, modRM); runtimeError(""); } switch (modRMReg()) { case 0: case 1: // incdec rmv destination = readEA2(); finishWriteEA(incdec(modRMReg() != 0)); break; case 2: // CALL rmv call(readEA2()); break; case 3: // CALL mp farLoad(); farCall(); break; case 4: // JMP rmw ip = readEA2(); break; case 5: // JMP mp farLoad(); farJump(); break; case 6: // PUSH rmw push(readEA2()); break; } break; } } runtimeError("Timed out"); }
/* * Search key vs node prefix or entry. * * The comparison starts at given attribute position. The position is * updated by number of equal initial attributes found. The entry data * may be partial in which case CmpUnknown may be returned. * * The attributes are normalized and have variable size given in words. */ int Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen) { const unsigned numAttrs = frag.m_numAttrs; const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); // skip to right position in search key only for (unsigned i = 0; i < start; i++) { jam(); searchKey += AttributeHeaderSize + ah(searchKey).getDataSize(); } // number of words of entry data left unsigned len2 = maxlen; int ret = 0; while (start < numAttrs) { if (len2 <= AttributeHeaderSize) { jam(); ret = NdbSqlUtil::CmpUnknown; break; } len2 -= AttributeHeaderSize; if (! ah(searchKey).isNULL()) { if (! ah(entryData).isNULL()) { jam(); // verify attribute id const DescAttr& descAttr = descEnt.m_descAttr[start]; ndbrequire(ah(searchKey).getAttributeId() == descAttr.m_primaryAttrId); ndbrequire(ah(entryData).getAttributeId() == descAttr.m_primaryAttrId); // sizes const unsigned size1 = ah(searchKey).getDataSize(); const unsigned size2 = min(ah(entryData).getDataSize(), len2); len2 -= size2; // compare NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start]; const Uint32* const p1 = &searchKey[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; const bool full = (maxlen == MaxAttrDataSize); ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full); if (ret != 0) { jam(); break; } } else { jam(); // not NULL > NULL ret = +1; break; } } else { if (! ah(entryData).isNULL()) { jam(); // NULL < not NULL ret = -1; break; } } searchKey += AttributeHeaderSize + ah(searchKey).getDataSize(); entryData += AttributeHeaderSize + ah(entryData).getDataSize(); start++; } return ret; }
/* * Scan bound vs node prefix or entry. * * Compare lower or upper bound and index entry data. The entry data * may be partial in which case CmpUnknown may be returned. Otherwise * returns -1 if the bound is to the left of the entry and +1 if the * bound is to the right of the entry. * * The routine is similar to cmpSearchKey, but 0 is never returned. * Suppose all attributes compare equal. Recall that all bounds except * possibly the last one are non-strict. Use the given bound direction * (0-lower 1-upper) and strictness of last bound to return -1 or +1. * * Following example illustrates this. We are at (a=2, b=3). * * idir bounds strict return * 0 a >= 2 and b >= 3 no -1 * 0 a >= 2 and b > 3 yes +1 * 1 a <= 2 and b <= 3 no +1 * 1 a <= 2 and b < 3 yes -1 * * The attributes are normalized and have variable size given in words. */ int Dbtux::cmpScanBound(const Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen) { const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); // direction 0-lower 1-upper ndbrequire(idir <= 1); // number of words of data left unsigned len2 = maxlen; // in case of no bounds, init last type to something non-strict unsigned type = 4; while (boundCount != 0) { if (len2 <= AttributeHeaderSize) { jam(); return NdbSqlUtil::CmpUnknown; } len2 -= AttributeHeaderSize; // get and skip bound type (it is used after the loop) type = boundInfo[0]; boundInfo += 1; if (! ah(boundInfo).isNULL()) { if (! ah(entryData).isNULL()) { jam(); // verify attribute id const Uint32 index = ah(boundInfo).getAttributeId(); ndbrequire(index < frag.m_numAttrs); const DescAttr& descAttr = descEnt.m_descAttr[index]; ndbrequire(ah(entryData).getAttributeId() == descAttr.m_primaryAttrId); // sizes const unsigned size1 = ah(boundInfo).getDataSize(); const unsigned size2 = min(ah(entryData).getDataSize(), len2); len2 -= size2; // compare NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index]; const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; const Uint32* const p2 = &entryData[AttributeHeaderSize]; const bool full = (maxlen == MaxAttrDataSize); int ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full); if (ret != 0) { jam(); return ret; } } else { jam(); // not NULL > NULL return +1; } } else { jam(); if (! ah(entryData).isNULL()) { jam(); // NULL < not NULL return -1; } } boundInfo += AttributeHeaderSize + ah(boundInfo).getDataSize(); entryData += AttributeHeaderSize + ah(entryData).getDataSize(); boundCount -= 1; } // all attributes were equal const int strict = (type & 0x1); return (idir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1)); }