transform_iterator operator++(int) { transform_iterator result (*this); increment(); return result; }
tree_iterator operator--(int) { tree_iterator result (*this); members_.nodeptr_ = node_algorithms::prev_node(members_.nodeptr_); return result; }
void CUser::ZoneChange(uint16 sNewZone, float x, float z) { C3DMap * pMap = g_pMain->GetZoneByID(sNewZone); if (pMap == nullptr) return; ZoneChangeError errorReason; if (!CanChangeZone(pMap, errorReason)) { Packet result; switch (errorReason) { case ZoneChangeErrorWrongLevel: /* this will depend on the zone */ break; case ZoneChangeErrorWarActive: result.Initialize(WIZ_WARP_LIST); result << uint8(2) << uint8(4); Send(&result); break; case ZoneChangeErrorNeedLoyalty: /* does this have an error? */ break; } return; } m_bWarp = true; m_bZoneChangeFlag = true; UserInOut(INOUT_OUT); if (sNewZone == ZONE_SNOW_BATTLE) SetMaxHp(1); if (GetZoneID() != sNewZone) { SetZoneAbilityChange(); // Reset the user's anger gauge when leaving the zone // Unknown if this is official behaviour, but it's logical. if (GetAngerGauge() > 0) UpdateAngerGauge(0); /* Here we also send a clan packet with subopcode 0x16 (with a byte flag of 2) if war zone/Moradon or subopcode 0x17 (with nWarEnemyID) for all else */ #if 0 if (isInClan()) { CKnights * pKnights = g_pMain->GetClanPtr(GetClanID()); if (pKnights != nullptr && pKnights->bKnightsWarStarted) { Packet clanPacket(WIZ_KNIGHTS_PROCESS); if (pMap->isWarZone() || byNewZone == ZONE_MORADON) clanPacket << uint8(0x17) << uint8(2); else clanPacket << uint16(0x16) << uint16(0 /*nWarEnemyID*/); Send(&clanPacket); } } #endif if (sNewZone == ZONE_SNOW_BATTLE) SetMaxHp(); if (isInParty()) PartyRemove(GetSocketID()); ResetWindows(); } m_bZone = (uint8) sNewZone; // this is 2 bytes to support the warp data loaded from SMDs. It should not go above a byte, however. SetPosition(x, 0.0f, z); m_pMap = pMap; if (g_pMain->m_nServerNo != pMap->m_nServerNo) { _ZONE_SERVERINFO *pInfo = g_pMain->m_ServerArray.GetData(pMap->m_nServerNo); if (pInfo == nullptr) return; UserDataSaveToAgent(); m_bLogout = 2; // server change flag SendServerChange(pInfo->strServerIP, 2); return; } SetRegion(GetNewRegionX(), GetNewRegionZ()); Packet result(WIZ_ZONE_CHANGE, uint8(ZoneChangeTeleport)); result << uint16(GetZoneID()) << GetSPosX() << GetSPosZ() << GetSPosY() << g_pMain->m_byOldVictory; Send(&result); if (!m_bZoneChangeSameZone) { m_sWhoKilledMe = -1; m_iLostExp = 0; m_bRegeneType = 0; m_tLastRegeneTime = 0; m_sBind = -1; InitType3(); InitType4(); CMagicProcess::CheckExpiredType9Skills(this, true); SetUserAbility(); } result.Initialize(AG_ZONE_CHANGE); result << GetSocketID() << GetZoneID(); Send_AIServer(&result); m_bZoneChangeSameZone = false; m_bZoneChangeFlag = false; }
constant_iterator operator++(int) { constant_iterator result (*this); increment(); return result; }
int main() { llvm::LLVMContext &context = llvm::getGlobalContext(); llvm::Module* module; llvm::IRBuilder<> builder(context); module = new llvm::Module("intermediary code", context); /* function Main */ llvm::Type* int64Type = llvm::Type::getInt64Ty(context); llvm::FunctionType* typeOfMain = llvm::FunctionType::get(int64Type, false); llvm::Function* mainFunction = llvm::Function::Create(typeOfMain, llvm::Function::ExternalLinkage, "main", module); llvm::BasicBlock* mainBB = llvm::BasicBlock::Create(context, "mainBB", mainFunction); builder.SetInsertPoint(mainBB); /* function Teste */ llvm::Type* boolType = llvm::Type::getInt1Ty(context); std::vector<llvm::Type*> testeParams(3, boolType); llvm::FunctionType* typeOfTeste = llvm::FunctionType::get(boolType, testeParams, false); llvm::Function* testeFunction = llvm::Function::Create(typeOfTeste, llvm::Function::ExternalLinkage, "teste", module); llvm::Function::arg_iterator params = testeFunction->arg_begin(); params->setName("x"); params++; params->setName("y"); params++; params->setName("z"); llvm::BasicBlock* testeBB = llvm::BasicBlock::Create(context, "testeBB", testeFunction); builder.SetInsertPoint(testeBB); llvm::Function::arg_iterator declParams = testeFunction->arg_begin(); auto x = declParams; auto y = ++declParams; auto z = ++declParams; auto xy = builder.CreateAnd(x, y, "andxy"); auto xz = builder.CreateAnd(x, z, "andxz"); auto nz = builder.CreateNot(z, "nz"); auto ny = builder.CreateNot(y, "ny"); auto nzny = builder.CreateAnd(nz, ny, "nzny"); auto res = builder.CreateOr(xy, xz, "res"); res = builder.CreateOr(res, nzny, "res"); builder.CreateRet(res); llvm::verifyFunction(*testeFunction); /* function Exemplo */ llvm::Type* realType = llvm::Type::getDoubleTy(context); std::vector<llvm::Type*> exemploParams(2, realType); llvm::FunctionType* typeOfExemplo = llvm::FunctionType::get(realType, exemploParams, false); llvm::Function* exemploFunction = llvm::Function::Create(typeOfExemplo, llvm::Function::ExternalLinkage, "exemplo", module); params = exemploFunction->arg_begin(); params->setName("a"); params++; params->setName("b"); llvm::BasicBlock* exemploBB = llvm::BasicBlock::Create(context, "exemploBB", exemploFunction); builder.SetInsertPoint(exemploBB); auto a = exemploFunction->arg_begin(); auto b = ++(exemploFunction->arg_begin()); auto multab = builder.CreateFMul(a, b, "multab"); auto const20 = llvm::ConstantFP::get(context, llvm::APFloat(2.0)); auto modab = builder.CreateFRem(a, b, "modab");//??? auto ret = builder.CreateFMul(const20, modab, "ret"); ret = builder.CreateFSub(multab, ret, "ret"); builder.CreateRet(ret); llvm::verifyFunction(*exemploFunction); /* Chamando as funções */ builder.SetInsertPoint(mainBB); std::vector<llvm::Value*> args; args.push_back(llvm::ConstantInt::get(context, llvm::APInt(1, 0))); args.push_back(llvm::ConstantInt::get(context, llvm::APInt(1, 0))); args.push_back(llvm::ConstantInt::get(context, llvm::APInt(1, 1))); auto testeRet = builder.CreateCall(testeFunction, args, "callteste"); std::vector<llvm::Value*> args2; args2.push_back(llvm::ConstantFP::get(context, llvm::APFloat(10.0))); args2.push_back(llvm::ConstantFP::get(context, llvm::APFloat(5.0))); auto exemploRet = builder.CreateCall(exemploFunction, args2, "callexemplo"); builder.CreateRet(testeRet); module->dump(); llvm::ExecutionEngine* execEngine; std::string err; LLVMInitializeNativeTarget(); execEngine = llvm::EngineBuilder(module).setErrorStr(&err).create(); if(!execEngine) { fprintf(stderr, "Could not create execEngine: %s\n", err.c_str()); exit(1); } void* mainPtr = execEngine->getPointerToFunction(mainFunction); int(*result)() = (int (*)())(intptr_t)mainPtr; std::cout << "Result of main: " << result() << std::endl; }
value_init_construct_iterator operator++(int) { value_init_construct_iterator result (*this); increment(); return result; }
this_type operator++(int) { this_type result (*this); increment(); return result; }
String operator+(const String &s1, const String &s2) { String result(s1); result.append(s2); return result; }
String operator+(const String &s1, const char *s2) { String result(s1); result += s2; return result; }
const QImage ImageLoaderFreeImage::imageAsRGB(const QSize &size) const { const QSize resultSize = size.isValid() ? size : sizePixels(); const bool isRGB24 = colorDataType() == Types::ColorTypeRGB && bitsPerPixel() == 24; const bool isARGB32 = colorDataType() == Types::ColorTypeRGBA && bitsPerPixel() == 32; QImage result(resultSize, isARGB32 ? QImage::Format_ARGB32 : QImage::Format_RGB32); const int width = resultSize.width(); const int height = resultSize.height(); const QSize sizePixels = this->sizePixels(); FIBITMAP* originalImage = m_bitmap; FIBITMAP* temp24BPPImage = NULL; FIBITMAP* scaledImage = NULL; if (!(isRGB24 || isARGB32)) { if (colorDataType() == Types::ColorTypeCMYK) { const bool isCmykJpeg = isJpeg(); // Value range inverted temp24BPPImage = FreeImage_Allocate(sizePixels.width(), sizePixels.height(), 24); const unsigned int columnsCount = sizePixels.width(); const unsigned int scanlinesCount = sizePixels.height(); for (unsigned int scanline = 0; scanline < scanlinesCount; scanline++) { const BYTE* const cmykBits = FreeImage_GetScanLine(m_bitmap, scanline); tagRGBTRIPLE* const rgbBits = (tagRGBTRIPLE *)FreeImage_GetScanLine(temp24BPPImage, scanline); for (unsigned int column = 0; column < columnsCount; column++) { const unsigned int cmykColumn = column * 4; const QColor rgbColor = isCmykJpeg ? QColor::fromCmyk(255 - cmykBits[cmykColumn], 255 - cmykBits[cmykColumn + 1], 255 - cmykBits[cmykColumn + 2], 255 - cmykBits[cmykColumn + 3]) : QColor::fromCmyk(cmykBits[cmykColumn], cmykBits[cmykColumn + 1], cmykBits[cmykColumn + 2], cmykBits[cmykColumn + 3]); rgbBits[column].rgbtRed = (BYTE)rgbColor.red(); rgbBits[column].rgbtGreen = (BYTE)rgbColor.green(); rgbBits[column].rgbtBlue = (BYTE)rgbColor.blue(); } } } else { temp24BPPImage = FreeImage_ConvertTo24Bits(originalImage); } originalImage = temp24BPPImage; } if (resultSize != sizePixels) { scaledImage = FreeImage_Rescale(originalImage, width, height, FILTER_BOX); originalImage = scaledImage; } for (int scanline = 0; scanline < height; scanline++) { QRgb *targetData = (QRgb*)result.scanLine(scanline); if (isARGB32) { const tagRGBQUAD *sourceRgba = (tagRGBQUAD*)FreeImage_GetScanLine(originalImage, height - scanline - 1); for (int column = 0; column < width; column++) { *targetData++ = qRgba(sourceRgba->rgbRed, sourceRgba->rgbGreen, sourceRgba->rgbBlue, sourceRgba->rgbReserved); sourceRgba++; } } else { const tagRGBTRIPLE *sourceRgb = (tagRGBTRIPLE*)FreeImage_GetScanLine(originalImage, height - scanline - 1); for (int column = 0; column < width; column++) { *targetData++ = qRgb(sourceRgb->rgbtRed, sourceRgb->rgbtGreen, sourceRgb->rgbtBlue); sourceRgb++; } } } if (temp24BPPImage) FreeImage_Unload(temp24BPPImage); if (scaledImage) FreeImage_Unload(scaledImage); return result; }
String operator+(const String &s1, const char c2) { String result(s1); result.add(c2); return result; }
IddObjectType CurveExponentialSkewNormal::iddObjectType() { IddObjectType result(IddObjectType::OS_Curve_ExponentialSkewNormal); return result; }
Matrix Matrix::operator~() const{ Matrix result(*this); result.inverse(); return result; }
IddObjectType CoilCoolingWater::iddObjectType() { IddObjectType result(IddObjectType::OS_Coil_Cooling_Water); return result; }
UniValue estimaterawfee(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) throw std::runtime_error( "estimaterawfee conf_target (threshold)\n" "\nWARNING: This interface is unstable and may disappear or change!\n" "\nWARNING: This is an advanced API call that is tightly coupled to the specific\n" " implementation of fee estimation. The parameters it can be called with\n" " and the results it returns will change if the internal implementation changes.\n" "\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n" "confirmation within conf_target blocks if possible. Uses virtual transaction size as\n" "defined in BIP 141 (witness data is discounted).\n" "\nArguments:\n" "1. conf_target (numeric) Confirmation target in blocks (1 - 1008)\n" "2. threshold (numeric, optional) The proportion of transactions in a given feerate range that must have been\n" " confirmed within conf_target in order to consider those feerates as high enough and proceed to check\n" " lower buckets. Default: 0.95\n" "\nResult:\n" "{\n" " \"short\" : { (json object, optional) estimate for short time horizon\n" " \"feerate\" : x.x, (numeric, optional) estimate fee-per-kilobyte (in BTC)\n" " \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n" " \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n" " \"pass\" : { (json object, optional) information about the lowest range of feerates to succeed in meeting the threshold\n" " \"startrange\" : x.x, (numeric) start of feerate range\n" " \"endrange\" : x.x, (numeric) end of feerate range\n" " \"withintarget\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed within target\n" " \"totalconfirmed\" : x.x, (numeric) number of txs over history horizon in the feerate range that were confirmed at any point\n" " \"inmempool\" : x.x, (numeric) current number of txs in mempool in the feerate range unconfirmed for at least target blocks\n" " \"leftmempool\" : x.x, (numeric) number of txs over history horizon in the feerate range that left mempool unconfirmed after target\n" " },\n" " \"fail\" : { ... }, (json object, optional) information about the highest range of feerates to fail to meet the threshold\n" " \"errors\": [ str... ] (json array of strings, optional) Errors encountered during processing\n" " },\n" " \"medium\" : { ... }, (json object, optional) estimate for medium time horizon\n" " \"long\" : { ... } (json object) estimate for long time horizon\n" "}\n" "\n" "Results are returned for any horizon which tracks blocks up to the confirmation target.\n" "\nExample:\n" + HelpExampleCli("estimaterawfee", "6 0.9") ); RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VNUM}, true); RPCTypeCheckArgument(request.params[0], UniValue::VNUM); unsigned int conf_target = ParseConfirmTarget(request.params[0]); double threshold = 0.95; if (!request.params[1].isNull()) { threshold = request.params[1].get_real(); } if (threshold < 0 || threshold > 1) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid threshold"); } UniValue result(UniValue::VOBJ); for (FeeEstimateHorizon horizon : {FeeEstimateHorizon::SHORT_HALFLIFE, FeeEstimateHorizon::MED_HALFLIFE, FeeEstimateHorizon::LONG_HALFLIFE}) { CFeeRate feeRate; EstimationResult buckets; // Only output results for horizons which track the target if (conf_target > ::feeEstimator.HighestTargetTracked(horizon)) continue; feeRate = ::feeEstimator.estimateRawFee(conf_target, threshold, horizon, &buckets); UniValue horizon_result(UniValue::VOBJ); UniValue errors(UniValue::VARR); UniValue passbucket(UniValue::VOBJ); passbucket.push_back(Pair("startrange", round(buckets.pass.start))); passbucket.push_back(Pair("endrange", round(buckets.pass.end))); passbucket.push_back(Pair("withintarget", round(buckets.pass.withinTarget * 100.0) / 100.0)); passbucket.push_back(Pair("totalconfirmed", round(buckets.pass.totalConfirmed * 100.0) / 100.0)); passbucket.push_back(Pair("inmempool", round(buckets.pass.inMempool * 100.0) / 100.0)); passbucket.push_back(Pair("leftmempool", round(buckets.pass.leftMempool * 100.0) / 100.0)); UniValue failbucket(UniValue::VOBJ); failbucket.push_back(Pair("startrange", round(buckets.fail.start))); failbucket.push_back(Pair("endrange", round(buckets.fail.end))); failbucket.push_back(Pair("withintarget", round(buckets.fail.withinTarget * 100.0) / 100.0)); failbucket.push_back(Pair("totalconfirmed", round(buckets.fail.totalConfirmed * 100.0) / 100.0)); failbucket.push_back(Pair("inmempool", round(buckets.fail.inMempool * 100.0) / 100.0)); failbucket.push_back(Pair("leftmempool", round(buckets.fail.leftMempool * 100.0) / 100.0)); // CFeeRate(0) is used to indicate error as a return value from estimateRawFee if (feeRate != CFeeRate(0)) { horizon_result.push_back(Pair("feerate", ValueFromAmount(feeRate.GetFeePerK()))); horizon_result.push_back(Pair("decay", buckets.decay)); horizon_result.push_back(Pair("scale", (int)buckets.scale)); horizon_result.push_back(Pair("pass", passbucket)); // buckets.fail.start == -1 indicates that all buckets passed, there is no fail bucket to output if (buckets.fail.start != -1) horizon_result.push_back(Pair("fail", failbucket)); } else { // Output only information that is still meaningful in the event of error horizon_result.push_back(Pair("decay", buckets.decay)); horizon_result.push_back(Pair("scale", (int)buckets.scale)); horizon_result.push_back(Pair("fail", failbucket)); errors.push_back("Insufficient data or no feerate found which meets threshold"); horizon_result.push_back(Pair("errors",errors)); } result.push_back(Pair(StringForFeeEstimateHorizon(horizon), horizon_result)); } return result; }
String operator+(const char c1, const String &s2) { String result(1, c1); result += s2; return result; }
IddObjectType SpaceInfiltrationEffectiveLeakageArea::iddObjectType() { IddObjectType result(IddObjectType::OS_SpaceInfiltration_EffectiveLeakageArea); return result; }
Spectrum eval(const BSDFQueryRecord &bRec, EMeasure measure) const { Spectrum sigmaA = m_sigmaA->getValue(bRec.its), sigmaS = m_sigmaS->getValue(bRec.its), sigmaT = sigmaA + sigmaS, tauD = sigmaT * m_thickness, result(0.0f); if (measure == EDiscrete) { /* Figure out if the specular transmission is specifically requested */ bool hasSpecularTransmission = (bRec.typeMask & EDeltaTransmission) && (bRec.component == -1 || bRec.component == 2); /* Return the attenuated light if requested */ if (hasSpecularTransmission && std::abs(1+dot(bRec.wi, bRec.wo)) < Epsilon) result = (-tauD/std::abs(Frame::cosTheta(bRec.wi))).exp(); } else if (measure == ESolidAngle) { /* Sample single scattering events */ bool hasGlossyReflection = (bRec.typeMask & EGlossyReflection) && (bRec.component == -1 || bRec.component == 0); bool hasGlossyTransmission = (bRec.typeMask & EGlossyTransmission) && (bRec.component == -1 || bRec.component == 1); Spectrum albedo; for (int i = 0; i < SPECTRUM_SAMPLES; i++) albedo[i] = sigmaT[i] > 0 ? (sigmaS[i]/sigmaT[i]) : (Float) 0; const Float cosThetaI = Frame::cosTheta(bRec.wi), cosThetaO = Frame::cosTheta(bRec.wo), dp = cosThetaI*cosThetaO; bool reflection = dp > 0, transmission = dp < 0; /* ==================================================================== */ /* Reflection component */ /* ==================================================================== */ if (hasGlossyReflection && reflection) { MediumSamplingRecord dummy; PhaseFunctionQueryRecord pRec(dummy,bRec.wi,bRec.wo); const Float phaseVal = m_phase->eval(pRec); result = albedo * (phaseVal*cosThetaI/(cosThetaI+cosThetaO)) * (Spectrum(1.0f)-((-1.0f/std::abs(cosThetaI)-1.0f/std::abs(cosThetaO)) * tauD).exp()); } /* ==================================================================== */ /* Transmission component */ /* ==================================================================== */ if (hasGlossyTransmission && transmission && m_thickness < std::numeric_limits<Float>::infinity()) { MediumSamplingRecord dummy; PhaseFunctionQueryRecord pRec(dummy,bRec.wi,bRec.wo); const Float phaseVal = m_phase->eval(pRec); /* Hanrahan etal 93 Single Scattering transmission term */ if (std::abs(cosThetaI + cosThetaO) < Epsilon) { /* avoid division by zero */ result += albedo * phaseVal*tauD/std::abs(cosThetaO) * ((-tauD/std::abs(cosThetaO)).exp()); } else { /* Guaranteed to be positive even if |cosThetaO| > |cosThetaI| */ result += albedo * phaseVal*std::abs(cosThetaI)/(std::abs(cosThetaI)-std::abs(cosThetaO)) * ((-tauD/std::abs(cosThetaI)).exp() - (-tauD/std::abs(cosThetaO)).exp()); } } return result * std::abs(cosThetaO); } return result; }
default_init_construct_iterator operator--(int) { default_init_construct_iterator result (*this); decrement(); return result; }
Test::Result PK_Encryption_Decryption_Test::run_one_test(const std::string& pad_hdr, const VarMap& vars) { const std::vector<uint8_t> plaintext = get_req_bin(vars, "Msg"); const std::vector<uint8_t> ciphertext = get_req_bin(vars, "Ciphertext"); const std::string padding = choose_padding(vars, pad_hdr); Test::Result result(algo_name() + (padding.empty() ? padding : "/" + padding) + " decryption"); std::unique_ptr<Botan::Private_Key> privkey = load_private_key(vars); // instead slice the private key to work around elgamal test inputs //std::unique_ptr<Botan::Public_Key> pubkey(Botan::X509::load_key(Botan::X509::BER_encode(*privkey))); Botan::Public_Key* pubkey = privkey.get(); std::vector<std::unique_ptr<Botan::PK_Decryptor>> decryptors; for(auto const& dec_provider : possible_providers(algo_name())) { std::unique_ptr<Botan::PK_Decryptor> decryptor; try { decryptor.reset(new Botan::PK_Decryptor_EME(*privkey, Test::rng(), padding, dec_provider)); } catch(Botan::Lookup_Error&) { continue; } Botan::secure_vector<uint8_t> decrypted; try { decrypted = decryptor->decrypt(ciphertext); } catch(Botan::Exception& e) { result.test_failure("Failed to decrypt KAT ciphertext", e.what()); } result.test_eq(dec_provider, "decryption of KAT", decrypted, plaintext); check_invalid_ciphertexts(result, *decryptor, plaintext, ciphertext); } for(auto const& enc_provider : possible_providers(algo_name())) { std::unique_ptr<Botan::PK_Encryptor> encryptor; try { encryptor.reset(new Botan::PK_Encryptor_EME(*pubkey, Test::rng(), padding, enc_provider)); } catch(Botan::Lookup_Error&) { continue; } std::unique_ptr<Botan::RandomNumberGenerator> kat_rng; if(vars.count("Nonce")) { kat_rng.reset(test_rng(get_req_bin(vars, "Nonce"))); } if(padding == "Raw") { /* Hack for RSA with no padding since sometimes one more bit will fit in but maximum_input_size rounds down to nearest byte */ result.test_lte("Input within accepted bounds", plaintext.size(), encryptor->maximum_input_size() + 1); } else { result.test_lte("Input within accepted bounds", plaintext.size(), encryptor->maximum_input_size()); } const std::vector<uint8_t> generated_ciphertext = encryptor->encrypt(plaintext, kat_rng ? *kat_rng : Test::rng()); if(enc_provider == "base") { result.test_eq(enc_provider, "generated ciphertext matches KAT", generated_ciphertext, ciphertext); } else if(generated_ciphertext != ciphertext) { for(std::unique_ptr<Botan::PK_Decryptor>& dec : decryptors) { result.test_eq("decryption of generated ciphertext", dec->decrypt(generated_ciphertext), plaintext); } } } return result; }
this_type operator--(int) { this_type result (*this); decrement(); return result; }
std::vector<Test::Result> PK_Key_Generation_Test::run() { std::vector<Test::Result> results; for(auto const& param : keygen_params()) { const std::string report_name = algo_name() + (param.empty() ? param : " " + param); Test::Result result(report_name + " keygen"); const std::vector<std::string> providers = possible_providers(algo_name()); if(providers.empty()) { result.note_missing("provider key generation " + algo_name()); } result.start_timer(); for(auto&& prov : providers) { std::unique_ptr<Botan::Private_Key> key_p = Botan::create_private_key(algo_name(), Test::rng(), param, prov); const Botan::Private_Key& key = *key_p; try { result.confirm("Key passes self tests", key.check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} result.test_gte("Key has reasonable estimated strength (lower)", key.estimated_strength(), 64); result.test_lt("Key has reasonable estimated strength (upper)", key.estimated_strength(), 512); // Test PEM public key round trips OK try { Botan::DataSource_Memory data_src(Botan::X509::PEM_encode(key)); std::unique_ptr<Botan::Public_Key> loaded(Botan::X509::load_key(data_src)); result.confirm("recovered public key from private", loaded.get() != nullptr); result.test_eq("public key has same type", loaded->algo_name(), key.algo_name()); try { result.test_eq("public key passes checks", loaded->check_key(Test::rng(), false), true); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip PEM public key", e.what()); } // Test DER public key round trips OK try { Botan::DataSource_Memory data_src(Botan::X509::BER_encode(key)); std::unique_ptr<Botan::Public_Key> loaded(Botan::X509::load_key(data_src)); result.confirm("recovered public key from private", loaded.get() != nullptr); result.test_eq("public key has same type", loaded->algo_name(), key.algo_name()); try { result.confirm("public key passes self tests", loaded->check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip BER public key", e.what()); } // Test PEM private key round trips OK try { Botan::DataSource_Memory data_src(Botan::PKCS8::PEM_encode(key)); std::unique_ptr<Botan::Private_Key> loaded( Botan::PKCS8::load_key(data_src, Test::rng())); result.confirm("recovered private key from PEM blob", loaded.get() != nullptr); result.test_eq("reloaded key has same type", loaded->algo_name(), key.algo_name()); try { result.confirm("private key passes self tests", loaded->check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip PEM private key", e.what()); } try { Botan::DataSource_Memory data_src(Botan::PKCS8::BER_encode(key)); std::unique_ptr<Botan::Public_Key> loaded(Botan::PKCS8::load_key(data_src, Test::rng())); result.confirm("recovered public key from private", loaded.get() != nullptr); result.test_eq("public key has same type", loaded->algo_name(), key.algo_name()); try { result.confirm("private key passes self tests", loaded->check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip BER private key", e.what()); } #if defined(BOTAN_HAS_PKCS5_PBE2) && defined(BOTAN_HAS_AES) && defined(BOTAN_HAS_SHA2_32) const std::string pbe_algo = "PBE-PKCS5v20(AES-128,SHA-256)"; const std::string passphrase = Test::random_password(); try { Botan::DataSource_Memory data_src( Botan::PKCS8::PEM_encode(key, Test::rng(), passphrase, std::chrono::milliseconds(10), pbe_algo)); std::unique_ptr<Botan::Private_Key> loaded( Botan::PKCS8::load_key(data_src, Test::rng(), passphrase)); result.confirm("recovered private key from encrypted blob", loaded.get() != nullptr); result.test_eq("reloaded key has same type", loaded->algo_name(), key.algo_name()); try { result.confirm("private key passes self tests", loaded->check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip encrypted PEM private key", e.what()); } try { Botan::DataSource_Memory data_src( Botan::PKCS8::BER_encode(key, Test::rng(), passphrase, std::chrono::milliseconds(10), pbe_algo)); std::unique_ptr<Botan::Private_Key> loaded( Botan::PKCS8::load_key(data_src, Test::rng(), passphrase)); result.confirm("recovered private key from BER blob", loaded.get() != nullptr); result.test_eq("reloaded key has same type", loaded->algo_name(), key.algo_name()); try { result.confirm("private key passes self tests", loaded->check_key(Test::rng(), true)); } catch(Botan::Lookup_Error&) {} } catch(std::exception& e) { result.test_failure("roundtrip encrypted BER private key", e.what()); } #endif } result.end_timer(); results.push_back(result); } return results; }
constant_iterator operator--(int) { constant_iterator result (*this); decrement(); return result; }
Test::Result PK_Signature_Generation_Test::run_one_test(const std::string& pad_hdr, const VarMap& vars) { const std::vector<uint8_t> message = get_req_bin(vars, "Msg"); const std::vector<uint8_t> signature = get_req_bin(vars, "Signature"); const std::string padding = choose_padding(vars, pad_hdr); Test::Result result(algo_name() + "/" + padding + " signature generation"); std::unique_ptr<Botan::Private_Key> privkey; try { privkey = load_private_key(vars); } catch(Botan::Lookup_Error& e) { result.note_missing(e.what()); return result; } std::unique_ptr<Botan::Public_Key> pubkey(Botan::X509::load_key(Botan::X509::BER_encode(*privkey))); std::vector<std::unique_ptr<Botan::PK_Verifier>> verifiers; for(auto const& verify_provider : possible_providers(algo_name())) { std::unique_ptr<Botan::PK_Verifier> verifier; try { verifier.reset(new Botan::PK_Verifier(*pubkey, padding, Botan::IEEE_1363, verify_provider)); } catch(Botan::Lookup_Error&) { //result.test_note("Skipping verifying with " + verify_provider); continue; } result.test_eq("KAT signature valid", verifier->verify_message(message, signature), true); check_invalid_signatures(result, *verifier, message, signature); verifiers.push_back(std::move(verifier)); } for(auto const& sign_provider : possible_providers(algo_name())) { std::unique_ptr<Botan::RandomNumberGenerator> rng; if(vars.count("Nonce")) { rng.reset(test_rng(get_req_bin(vars, "Nonce"))); } std::unique_ptr<Botan::PK_Signer> signer; std::vector<uint8_t> generated_signature; try { signer.reset(new Botan::PK_Signer(*privkey, Test::rng(), padding, Botan::IEEE_1363, sign_provider)); generated_signature = signer->sign_message(message, rng ? *rng : Test::rng()); } catch(Botan::Lookup_Error&) { //result.test_note("Skipping signing with " + sign_provider); continue; } if(sign_provider == "base") { result.test_eq("generated signature matches KAT", generated_signature, signature); } else if(generated_signature != signature) { for(std::unique_ptr<Botan::PK_Verifier>& verifier : verifiers) { if(!result.test_eq("generated signature valid", verifier->verify_message(message, generated_signature), true)) { result.test_failure("generated signature", generated_signature); } } } } return result; }
// create a new bitmap source with the same data but rotated counter-clockwise Ref<MonochromeBitmapSource> GrayBytesMonochromeBitmapSource::rotateCounterClockwise() { Ref<MonochromeBitmapSource> self(this); Ref<MonochromeBitmapSource> result(new TMBS90(self, 1.0)); return result; }
cv::Point2d SpaceOrientation:: computePointInRobotFrameGivenPointOnImage( cv::Point2d pointOnImage) { // How do we compute a point in space? // 1. We should have a reference in the image - that is, NAO's position; // 2. We know the size of the image; // 3. We know the coordinates in pixels of that point on image; // 4. We know what is height of the camera and its angle => // 5. We can compute the max. forward distance in the image; // 6. We know the lateral angle of view of the camera, thus we can compute // the max. lateral distance; // 7. Knowing max. forward and lateral distances, we can compute the lateral // and forward // distance of the point on image; // 8. We add those two to the reference in the image - but only taking into // account the orientation; cv::Point2d naoOnImage = getNaoPositionOnImage(); float centerYPercentage = (naoOnImage.y - pointOnImage.y) / NAO_POSITION_ON_IMAGE_AT_START_Y; float centerXPercentage = (pointOnImage.x - IMAGE_WIDTH/2 + naoOnImage.x) / IMAGE_WIDTH; // float angleAtZeroHeight = 26.48; // all angles are in degrees std::vector<float> cameraPos = getBottomCameraPosition(); float currentCameraYAngle = cameraPos[4] * 180 / M_PI; float cameraViewHeightAngle = 47.64; float cameraViewWidthAngle = 60.97; float angleAtZeroWidth = cameraViewWidthAngle / 2; // float alpha = 21.0; //head pitch degree float objectHeightAngle = 90 - currentCameraYAngle - cameraViewHeightAngle/2 + //angleAtZeroHeight - alpha + centerYPercentage*cameraViewHeightAngle;//beta float objectHeightAngleRads = objectHeightAngle * M_PI / 180; // std::cout << "Camera height view angle: " << objectHeightAngleRads << std::endl; float cameraHeight = cameraPos[2]; //0.45959; //in meters ; should be 452 mm // float cameraNeckDistance = 0.05071; //should be 5 mm // fabs(tan...) //b: float forwardDistance = cameraHeight * tan(objectHeightAngleRads) + cameraPos[0] - 0.0537006; //correction // forwardDistance += 0.0077; // !!! Whoa, forward distance is recomputed below. // Do we need to adjust it??? float cameraProjectionDistance = sqrt(forwardDistance*forwardDistance + cameraHeight*cameraHeight); //a float objectWidthAngle = -angleAtZeroWidth - 0 + centerXPercentage*cameraViewWidthAngle; //alpha float objectWidthAngleRads = objectWidthAngle * M_PI / 180; // std::cout << "Camera width view angle: " << objectWidthAngleRads << std::endl; //!!! Work here! float lateralDistance = cameraProjectionDistance * tan(objectWidthAngleRads) - cameraPos[1]; //forwardDistance * tan(-angleOfTurnRad); // std::cout << "Distances: " << lateralDistance << " " << forwardDistance // << std::endl; cv::Point2d distanceInWorldSpace = pointFromRobotFrameToWorldFrame( cv::Point2d(lateralDistance, forwardDistance)); //// Corrections for StandZero: // distanceInWorldSpace.y += 0.0077; // distanceInWorldSpace.x += -0.0016; // Corrections for Stand: distanceInWorldSpace.y += 0.0059; distanceInWorldSpace.x += 0.0113; //// Corrections for StandInit: // distanceInWorldSpace.y -= 0.0035; // distanceInWorldSpace.x += 0.0036; std::cout << "Distances in world space: " << distanceInWorldSpace.x << " " << distanceInWorldSpace.y << std::endl; std::vector<float> currentPosition = getNaoPositionInRobotFrame(); cv::Point2d naoCurrentPosition(currentPosition[1], currentPosition[0]); cv::Point2d result(-distanceInWorldSpace.x + naoCurrentPosition.x, distanceInWorldSpace.y + naoCurrentPosition.y); return result; }
// Describes how to download the results of the computation (more importantly: which buffer) static std::vector<T> DownloadResult(const Arguments<T> &args, Buffers<T> &buffers, Queue &queue) { std::vector<T> result(args.a_size, static_cast<T>(0)); buffers.a_mat.Read(queue, args.a_size, result); return result; }
UniValue getblocktemplate(const JSONRPCRequest& request) { if (request.fHelp || request.params.size() > 1) throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to explicitly select between the default 'template' request or a 'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki#getblocktemplate_changes\n" " https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', 'serverlist', 'workid'\n" " ,...\n" " ],\n" " \"rules\":[ (array, optional) A list of strings\n" " \"support\" (string) client side supported softfork deployment\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred block version\n" " \"rules\" : [ \"rulename\", ... ], (array of strings) specific block rules that are to be enforced\n" " \"vbavailable\" : { (json object) set of pending, supported versionbit (BIP 9) softfork deployments\n" " \"rulename\" : bitnumber (numeric) identifies the bit number as indicating acceptance and readiness for the named softfork rule\n" " ,...\n" " },\n" " \"vbrequired\" : n, (numeric) bit mask of versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n" " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers \n" " n (numeric) transactions before this one (by 1-based index in 'transactions' list) that must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in value between transaction inputs and outputs (in Satoshis); for coinbase transactions, this is a negative Number of the total collected block fees (ie, not including the block subsidy); if key is not present, fee is unknown and clients MUST NOT assume there isn't one\n" " \"sigops\" : n, (numeric) total SigOps cost, as counted for purposes of block limits; if key is not present, sigop cost is unknown and clients MUST NOT assume it is zero\n" " \"weight\" : n, (numeric) total transaction weight, as counted for purposes of block limits\n" " \"required\" : true|false (boolean) if provided and true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable input to coinbase transaction, including the generation award and transaction fees (in Satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum timestamp appropriate for next block time in seconds since epoch (Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of ways the block template may be changed \n" " \"value\" (string) A way the block template may be changed, e.g. 'time', 'transactions', 'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops in blocks\n" " \"sizelimit\" : n, (numeric) limit of block size\n" " \"weightlimit\" : n, (numeric) limit of block weight\n" " \"curtime\" : ttt, (numeric) current timestamp in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed target of next block\n" " \"height\" : n (numeric) The height of the next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "") ); LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set<std::string> setClientRules; int64_t nMaxVersionPreVB = -1; if (!request.params[0].isNull()) { const UniValue& oparam = request.params[0].get_obj(); const UniValue& modeval = find_value(oparam, "mode"); if (modeval.isStr()) strMode = modeval.get_str(); else if (modeval.isNull()) { /* Do nothing */ } else throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue& dataval = find_value(oparam, "data"); if (!dataval.isStr()) throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); uint256 hash = block.GetHash(); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) return "duplicate"; if (pindex->nStatus & BLOCK_FAILED_MASK) return "duplicate-invalid"; return "duplicate-inconclusive"; } CBlockIndex* const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) return "inconclusive-not-best-prevblk"; CValidationState state; TestBlockValidity(state, Params(), block, pindexPrev, false, true); return BIP22ValidationResult(state); } const UniValue& aClientRules = find_value(oparam, "rules"); if (aClientRules.isArray()) { for (unsigned int i = 0; i < aClientRules.size(); ++i) { const UniValue& v = aClientRules[i]; setClientRules.insert(v.get_str()); } } else { // NOTE: It is important that this NOT be read if versionbits is supported const UniValue& uvMaxVersion = find_value(oparam, "maxversion"); if (uvMaxVersion.isNum()) { nMaxVersionPreVB = uvMaxVersion.get_int64(); } } } if (strMode != "template") throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); if(!g_connman) throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Litecoin is not connected!"); if (IsInitialBlockDownload()) throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Litecoin is downloading blocks..."); static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has passed and there are more transactions uint256 hashWatchedChain; boost::system_time checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: <hashBestChain><nTransactionsUpdatedLast> std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = boost::get_system_time() + boost::posix_time::minutes(1); boost::unique_lock<boost::mutex> lock(csBestBlock); while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning()) { if (!cvBlockChange.timed_wait(lock, checktxtime)) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) break; checktxtime += boost::posix_time::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); // TODO: Maybe recheck connections/IBD and (if something wrong) send an expires-immediately template to stop miners? } const struct VBDeploymentInfo& segwit_info = VersionBitsDeploymentInfo[Consensus::DEPLOYMENT_SEGWIT]; // If the caller is indicating segwit support, then allow CreateNewBlock() // to select witness transactions, after segwit activates (otherwise // don't). bool fSupportsSegwit = setClientRules.find(segwit_info.name) != setClientRules.end(); // Update block static CBlockIndex* pindexPrev; static int64_t nStart; static std::unique_ptr<CBlockTemplate> pblocktemplate; // Cache whether the last invocation was with segwit support, to avoid returning // a segwit-block to a non-segwit caller. static bool fLastTemplateSupportsSegwit = true; if (pindexPrev != chainActive.Tip() || (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5) || fLastTemplateSupportsSegwit != fSupportsSegwit) { // Clear pindexPrev so future calls make a new block, despite any failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = mempool.GetTransactionsUpdated(); CBlockIndex* pindexPrevNew = chainActive.Tip(); nStart = GetTime(); fLastTemplateSupportsSegwit = fSupportsSegwit; // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(Params()).CreateNewBlock(scriptDummy, fSupportsSegwit); if (!pblocktemplate) throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } CBlock* pblock = &pblocktemplate->block; // pointer for convenience const Consensus::Params& consensusParams = Params().GetConsensus(); // Update nTime UpdateTime(pblock, consensusParams, pindexPrev); pblock->nNonce = 0; // NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration const bool fPreSegWit = (THRESHOLD_ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map<uint256, int64_t> setTxIndex; int i = 0; for (const auto& it : pblock->vtx) { const CTransaction& tx = *it; uint256 txHash = tx.GetHash(); setTxIndex[txHash] = i++; if (tx.IsCoinBase()) continue; UniValue entry(UniValue::VOBJ); entry.push_back(Pair("data", EncodeHexTx(tx))); entry.push_back(Pair("txid", txHash.GetHex())); entry.push_back(Pair("hash", tx.GetWitnessHash().GetHex())); UniValue deps(UniValue::VARR); for (const CTxIn &in : tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.push_back(Pair("depends", deps)); int index_in_template = i - 1; entry.push_back(Pair("fee", pblocktemplate->vTxFees[index_in_template])); int64_t nTxSigOps = pblocktemplate->vTxSigOpsCost[index_in_template]; if (fPreSegWit) { assert(nTxSigOps % WITNESS_SCALE_FACTOR == 0); nTxSigOps /= WITNESS_SCALE_FACTOR; } entry.push_back(Pair("sigops", nTxSigOps)); entry.push_back(Pair("weight", GetTransactionWeight(tx))); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.push_back(Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end()))); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.push_back(Pair("capabilities", aCaps)); UniValue aRules(UniValue::VARR); UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { case THRESHOLD_DEFINED: case THRESHOLD_FAILED: // Not exposed to GBT at all break; case THRESHOLD_LOCKED_IN: // Ensure bit is set in block version pblock->nVersion |= VersionBitsMask(consensusParams, pos); // FALL THROUGH to get vbavailable set... case THRESHOLD_STARTED: { const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.push_back(Pair(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it in the [default] version pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); } } break; } case THRESHOLD_ACTIVE: { // Add to rules only const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; aRules.push_back(gbt_vb_name(pos)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { // Not supported by the client; make sure it's safe to proceed if (!vbinfo.gbt_force) { // If we do anything other than throw an exception here, be sure version/force isn't sent to old clients throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Support for '%s' rule requires explicit client support", vbinfo.name)); } } break; } } } result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("rules", aRules)); result.push_back(Pair("vbavailable", vbavailable)); result.push_back(Pair("vbrequired", int(0))); if (nMaxVersionPreVB >= 2) { // If VB is supported by the client, nMaxVersionPreVB is -1, so we won't get here // Because BIP 34 changed how the generation transaction is serialized, we can only use version/force back to v2 blocks // This is safe to do [otherwise-]unconditionally only because we are throwing an exception above if a non-force deployment gets activated // Note that this can probably also be removed entirely after the first BIP9 non-force deployment (ie, probably segwit) gets activated aMutable.push_back("version/force"); } result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); result.push_back(Pair("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue)); result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast))); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back(Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); int64_t nSigOpLimit = MAX_BLOCK_SIGOPS_COST; int64_t nSizeLimit = MAX_BLOCK_SERIALIZED_SIZE; if (fPreSegWit) { assert(nSigOpLimit % WITNESS_SCALE_FACTOR == 0); nSigOpLimit /= WITNESS_SCALE_FACTOR; assert(nSizeLimit % WITNESS_SCALE_FACTOR == 0); nSizeLimit /= WITNESS_SCALE_FACTOR; } result.push_back(Pair("sigoplimit", nSigOpLimit)); result.push_back(Pair("sizelimit", nSizeLimit)); if (!fPreSegWit) { result.push_back(Pair("weightlimit", (int64_t)MAX_BLOCK_WEIGHT)); } result.push_back(Pair("curtime", pblock->GetBlockTime())); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight+1))); if (!pblocktemplate->vchCoinbaseCommitment.empty() && fSupportsSegwit) { result.push_back(Pair("default_witness_commitment", HexStr(pblocktemplate->vchCoinbaseCommitment.begin(), pblocktemplate->vchCoinbaseCommitment.end()))); } return result; }
IddObjectType OutsideSurfaceConvectionAlgorithm::iddObjectType() { IddObjectType result(IddObjectType::OS_SurfaceConvectionAlgorithm_Outside); return result; }
void AsyncResultsMerger::handleBatchResponse( const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, size_t remoteIndex) { stdx::lock_guard<stdx::mutex> lk(_mutex); auto& remote = _remotes[remoteIndex]; // Clear the callback handle. This indicates that we are no longer waiting on a response from // 'remote'. remote.cbHandle = executor::TaskExecutor::CallbackHandle(); // If we're in the process of shutting down then there's no need to process the batch. if (_lifecycleState != kAlive) { invariant(_lifecycleState == kKillStarted); // Make sure to wake up anyone waiting on '_currentEvent' if we're shutting down. signalCurrentEventIfReady_inlock(); // Make a best effort to parse the response and retrieve the cursor id. We need the cursor // id in order to issue a killCursors command against it. if (cbData.response.isOK()) { auto cursorResponse = parseCursorResponse(cbData.response.data, remote); if (cursorResponse.isOK()) { remote.cursorId = cursorResponse.getValue().getCursorId(); } } // If we're killed and we're not waiting on any more batches to come back, then we are ready // to kill the cursors on the remote hosts and clean up this cursor. Schedule the // killCursors command and signal that this cursor is safe now safe to destroy. We have to // promise not to touch any members of this class because 'this' could become invalid as // soon as we signal the event. if (!haveOutstandingBatchRequests_inlock()) { // If the event handle is invalid, then the executor is in the middle of shutting down, // and we can't schedule any more work for it to complete. if (_killCursorsScheduledEvent.isValid()) { scheduleKillCursors_inlock(); _executor->signalEvent(_killCursorsScheduledEvent); } _lifecycleState = kKillComplete; } return; } // Early return from this point on signal anyone waiting on an event, if ready() is true. ScopeGuard signaller = MakeGuard(&AsyncResultsMerger::signalCurrentEventIfReady_inlock, this); StatusWith<CursorResponse> cursorResponseStatus( cbData.response.isOK() ? parseCursorResponse(cbData.response.data, remote) : cbData.response.status); if (!cursorResponseStatus.isOK()) { // In the case a read is performed against a view, the shard primary can return an error // indicating that the underlying collection may be sharded. When this occurs the return // message will include an expanded view definition and collection namespace which we need // to store. This allows for a second attempt at the read directly against the underlying // collection. if (cursorResponseStatus.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) { auto& responseObj = cbData.response.data; if (!responseObj.hasField("resolvedView")) { remote.status = Status(ErrorCodes::InternalError, str::stream() << "Missing field 'resolvedView' in document: " << responseObj); return; } auto resolvedViewObj = responseObj.getObjectField("resolvedView"); if (resolvedViewObj.isEmpty()) { remote.status = Status(ErrorCodes::InternalError, str::stream() << "Field 'resolvedView' must be an object: " << responseObj); return; } ClusterQueryResult result; result.setViewDefinition(resolvedViewObj.getOwned()); remote.docBuffer.push(result); remote.cursorId = 0; remote.status = Status::OK(); return; } auto shard = remote.getShard(); if (!shard) { remote.status = Status(cursorResponseStatus.getStatus().code(), str::stream() << "Could not find shard " << *remote.shardId << " containing host " << remote.getTargetHost().toString()); } else { shard->updateReplSetMonitor(remote.getTargetHost(), cursorResponseStatus.getStatus()); // Retry initial cursor establishment if possible. Never retry getMores to avoid // accidentally skipping results. if (!remote.cursorId && remote.retryCount < kMaxNumFailedHostRetryAttempts && shard->isRetriableError(cursorResponseStatus.getStatus().code(), Shard::RetryPolicy::kIdempotent)) { invariant(remote.shardId); LOG(1) << "Initial cursor establishment failed with retriable error and will be " "retried" << causedBy(redact(cursorResponseStatus.getStatus())); ++remote.retryCount; // Since we potentially updated the targeter that the last host it chose might be // faulty, the call below may end up getting a different host. remote.status = askForNextBatch_inlock(remoteIndex); if (remote.status.isOK()) { return; } // If we end up here, it means we failed to schedule the retry request, which is a // more // severe error that should not be retried. Just pass through to the error handling // logic below. } else { remote.status = cursorResponseStatus.getStatus(); } } // Unreachable host errors are swallowed if the 'allowPartialResults' option is set. We // remove the unreachable host entirely from consideration by marking it as exhausted. if (_params.isAllowPartialResults) { remote.status = Status::OK(); // Clear the results buffer and cursor id. std::queue<ClusterQueryResult> emptyBuffer; std::swap(remote.docBuffer, emptyBuffer); remote.cursorId = 0; } return; } // Cursor id successfully established. auto cursorResponse = std::move(cursorResponseStatus.getValue()); remote.cursorId = cursorResponse.getCursorId(); remote.initialCmdObj = boost::none; for (const auto& obj : cursorResponse.getBatch()) { // If there's a sort, we're expecting the remote node to give us back a sort key. if (!_params.sort.isEmpty() && obj[ClusterClientCursorParams::kSortKeyField].type() != BSONType::Object) { remote.status = Status(ErrorCodes::InternalError, str::stream() << "Missing field '" << ClusterClientCursorParams::kSortKeyField << "' in document: " << obj); return; } ClusterQueryResult result(obj); remote.docBuffer.push(result); ++remote.fetchedCount; } // If we're doing a sorted merge, then we have to make sure to put this remote onto the // merge queue. if (!_params.sort.isEmpty() && !cursorResponse.getBatch().empty()) { _mergeQueue.push(remoteIndex); } // If the cursor is tailable and we just received an empty batch, the next return value should // be boost::none in order to indicate the end of the batch. if (_params.isTailable && !remote.hasNext()) { _eofNext = true; } // If even after receiving this batch we still don't have anything buffered (i.e. the batchSize // was zero), then can schedule work to retrieve the next batch right away. // // We do not ask for the next batch if the cursor is tailable, as batches received from remote // tailable cursors should be passed through to the client without asking for more batches. if (!_params.isTailable && !remote.hasNext() && !remote.exhausted()) { remote.status = askForNextBatch_inlock(remoteIndex); if (!remote.status.isOK()) { return; } } // ScopeGuard requires dismiss on success, but we want waiter to be signalled on success as // well as failure. signaller.Dismiss(); signalCurrentEventIfReady_inlock(); }