bool Base58::decode (const char* psz, Blob& vchRet, Alphabet const& alphabet) { CAutoBN_CTX pctx; vchRet.clear (); CBigNum bn58 = 58; CBigNum bn = 0; CBigNum bnChar; while (isspace (*psz)) psz++; // Convert big endian string to bignum for (const char* p = psz; *p; p++) { // VFALCO TODO Make this use the inverse table! // Or better yet ditch this and call raw_decode // const char* p1 = strchr (alphabet.chars(), *p); if (p1 == nullptr) { while (isspace (*p)) p++; if (*p != '\0') return false; break; } bnChar.setuint (p1 - alphabet.chars()); if (!BN_mul (&bn, &bn, &bn58, pctx)) throw bignum_error ("DecodeBase58 : BN_mul failed"); bn += bnChar; } // Get bignum as little endian data Blob vchTmp = bn.getvch (); // Trim off sign byte if present if (vchTmp.size () >= 2 && vchTmp.end ()[-1] == 0 && vchTmp.end ()[-2] >= 0x80) vchTmp.erase (vchTmp.end () - 1); // Restore leading zeros int nLeadingZeros = 0; for (const char* p = psz; *p == alphabet.chars()[0]; p++) nLeadingZeros++; vchRet.assign (nLeadingZeros + vchTmp.size (), 0); // Convert little endian data to big endian std::reverse_copy (vchTmp.begin (), vchTmp.end (), vchRet.end () - vchTmp.size ()); return true; }
// { // start: <index> // } Json::Value doTxHistory (RPC::Context& context) { context.loadType = Resource::feeMediumBurdenRPC; if (!context.params.isMember (jss::start)) return rpcError (rpcINVALID_PARAMS); unsigned int startIndex = context.params[jss::start].asUInt (); if ((startIndex > 10000) && (! isUnlimited (context.role))) return rpcError (rpcNO_PERMISSION); Json::Value obj; Json::Value txs; obj[jss::index] = startIndex; std::string sql = boost::str (boost::format ( "SELECT LedgerSeq, Status, RawTxn " "FROM Transactions ORDER BY LedgerSeq desc LIMIT %u,20;") % startIndex); { auto db = context.app.getTxnDB ().checkoutDb (); boost::optional<std::uint64_t> ledgerSeq; boost::optional<std::string> status; soci::blob sociRawTxnBlob (*db); soci::indicator rti; Blob rawTxn; soci::statement st = (db->prepare << sql, soci::into (ledgerSeq), soci::into (status), soci::into (sociRawTxnBlob, rti)); st.execute (); while (st.fetch ()) { if (soci::i_ok == rti) convert(sociRawTxnBlob, rawTxn); else rawTxn.clear (); if (auto trans = Transaction::transactionFromSQL ( ledgerSeq, status, rawTxn, context.app)) txs.append (trans->getJson (0)); } } obj[jss::txs] = txs; return obj; }
bool Base58::decodeWithCheck (const char* psz, Blob& vchRet, Alphabet const& alphabet) { if (!decode (psz, vchRet, alphabet)) return false; if (vchRet.size () < 4) { vchRet.clear (); return false; } uint256 hash = SHA256Hash (vchRet.begin (), vchRet.end () - 4); if (memcmp (&hash, &vchRet.end ()[-4], 4) != 0) { vchRet.clear (); return false; } vchRet.resize (vchRet.size () - 4); return true; }
WriteMethod OrfParser::encode( Blob& blob, const byte* pData, uint32_t size, const ExifData& exifData, const IptcData& iptcData, const XmpData& xmpData ) { /* Todo: Implement me! return TiffParserWorker::encode(blob, pData, size, exifData, iptcData, xmpData, TiffCreator::create, TiffMapping::findEncoder); */ blob.clear(); return wmIntrusive; }
WriteMethod Cr2Parser::encode( Blob& blob, const byte* /*pData*/, uint32_t /*size*/, const ExifData& /*exifData*/, const IptcData& /*iptcData*/, const XmpData& /*xmpData*/ ) { /* Todo: Implement me! TiffParserWorker::encode(blob, pData, size, exifData, iptcData, xmpData, TiffCreator::create, TiffMapping::findEncoder); */ blob.clear(); return wmIntrusive; }
void accountTxPage ( DatabaseCon& connection, AccountIDCache const& idCache, std::function<void (std::uint32_t)> const& onUnsavedLedger, std::function<void (std::uint32_t, std::string const&, Blob const&, Blob const&)> const& onTransaction, AccountID const& account, std::int32_t minLedger, std::int32_t maxLedger, bool forward, Json::Value& token, int limit, bool bAdmin, std::uint32_t page_length) { bool lookingForMarker = token.isObject(); std::uint32_t numberOfResults; if (limit <= 0 || (limit > page_length && !bAdmin)) numberOfResults = page_length; else numberOfResults = limit; // As an account can have many thousands of transactions, there is a limit // placed on the amount of transactions returned. If the limit is reached // before the result set has been exhausted (we always query for one more // than the limit), then we return an opaque marker that can be supplied in // a subsequent query. std::uint32_t queryLimit = numberOfResults + 1; std::uint32_t findLedger = 0, findSeq = 0; if (lookingForMarker) { try { if (!token.isMember(jss::ledger) || !token.isMember(jss::seq)) return; findLedger = token[jss::ledger].asInt(); findSeq = token[jss::seq].asInt(); } catch (std::exception const&) { return; } } // We're using the token reference both for passing inputs and outputs, so // we need to clear it in between. token = Json::nullValue; static std::string const prefix ( R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, Status,RawTxn,TxnMeta FROM AccountTransactions INNER JOIN Transactions ON Transactions.TransID = AccountTransactions.TransID AND AccountTransactions.Account = '%s' WHERE )"); std::string sql; // SQL's BETWEEN uses a closed interval ([a,b]) if (forward && (findLedger == 0)) { sql = boost::str (boost::format( prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u' ORDER BY AccountTransactions.LedgerSeq ASC, AccountTransactions.TxnSeq ASC LIMIT %u;)")) % idCache.toBase58(account) % minLedger % maxLedger % queryLimit); } else if (forward && (findLedger != 0)) { auto b58acct = idCache.toBase58(account); sql = boost::str (boost::format( (R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u') OR (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq = '%u' AND AccountTransactions.TxnSeq >= '%u') ORDER BY AccountTransactions.LedgerSeq ASC, AccountTransactions.TxnSeq ASC LIMIT %u; )")) % b58acct % (findLedger + 1) % maxLedger % b58acct % findLedger % findSeq % queryLimit); } else if (!forward && (findLedger == 0)) { sql = boost::str (boost::format( prefix + (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u' ORDER BY AccountTransactions.LedgerSeq DESC, AccountTransactions.TxnSeq DESC LIMIT %u;)")) % idCache.toBase58(account) % minLedger % maxLedger % queryLimit); } else if (!forward && (findLedger != 0)) { auto b58acct = idCache.toBase58(account); sql = boost::str (boost::format( (R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, Status,RawTxn,TxnMeta FROM AccountTransactions, Transactions WHERE (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u') OR (AccountTransactions.TransID = Transactions.TransID AND AccountTransactions.Account = '%s' AND AccountTransactions.LedgerSeq = '%u' AND AccountTransactions.TxnSeq <= '%u') ORDER BY AccountTransactions.LedgerSeq DESC, AccountTransactions.TxnSeq DESC LIMIT %u; )")) % b58acct % minLedger % (findLedger - 1) % b58acct % findLedger % findSeq % queryLimit); } else { assert (false); // sql is empty return; } { auto db (connection.checkoutDb()); Blob rawData; Blob rawMeta; boost::optional<std::uint64_t> ledgerSeq; boost::optional<std::uint32_t> txnSeq; boost::optional<std::string> status; soci::blob txnData (*db); soci::blob txnMeta (*db); soci::indicator dataPresent, metaPresent; soci::statement st = (db->prepare << sql, soci::into (ledgerSeq), soci::into (txnSeq), soci::into (status), soci::into (txnData, dataPresent), soci::into (txnMeta, metaPresent)); st.execute (); while (st.fetch ()) { if (lookingForMarker) { if (findLedger == ledgerSeq.value_or (0) && findSeq == txnSeq.value_or (0)) { lookingForMarker = false; } } else if (numberOfResults == 0) { token = Json::objectValue; token[jss::ledger] = rangeCheckedCast<std::uint32_t>(ledgerSeq.value_or (0)); token[jss::seq] = txnSeq.value_or (0); break; } if (!lookingForMarker) { if (dataPresent == soci::i_ok) convert (txnData, rawData); else rawData.clear (); if (metaPresent == soci::i_ok) convert (txnMeta, rawMeta); else rawMeta.clear (); // Work around a bug that could leave the metadata missing if (rawMeta.size() == 0) onUnsavedLedger(ledgerSeq.value_or (0)); onTransaction(rangeCheckedCast<std::uint32_t>(ledgerSeq.value_or (0)), *status, rawData, rawMeta); --numberOfResults; } } } return; }
WriteMethod ExifParser::encode( Blob& blob, const byte* pData, uint32_t size, ByteOrder byteOrder, const ExifData& exifData ) { ExifData ed = exifData; // Delete IFD0 tags that are "not recorded" in compressed images // Reference: Exif 2.2 specs, 4.6.8 Tag Support Levels, section A static const char* filteredIfd0Tags[] = { "Exif.Image.PhotometricInterpretation", "Exif.Image.StripOffsets", "Exif.Image.RowsPerStrip", "Exif.Image.StripByteCounts", "Exif.Image.JPEGInterchangeFormat", "Exif.Image.JPEGInterchangeFormatLength", "Exif.Image.SubIFDs" }; for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfd0Tags); ++i) { ExifData::iterator pos = ed.findKey(ExifKey(filteredIfd0Tags[i])); if (pos != ed.end()) { #ifdef DEBUG std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n"; #endif ed.erase(pos); } } // Delete IFDs which do not occur in JPEGs static const IfdId filteredIfds[] = { subImage1Id, subImage2Id, subImage3Id, subImage4Id, panaRawIfdId, ifd2Id }; for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfds); ++i) { #ifdef DEBUG std::cerr << "Warning: Exif IFD " << filteredIfds[i] << " not encoded\n"; #endif eraseIfd(ed, filteredIfds[i]); } // IPTC and XMP are stored elsewhere, not in the Exif APP1 segment. const IptcData emptyIptc; const XmpData emptyXmp; // Encode and check if the result fits into a JPEG Exif APP1 segment std::auto_ptr<TiffHeaderBase> header(new TiffHeader(byteOrder)); WriteMethod wm = TiffParserWorker::encode(blob, pData, size, ed, emptyIptc, emptyXmp, Tag::root, TiffMapping::findEncoder, header.get()); if (blob.size() <= 65527) return wm; // If it doesn't fit, remove additional tags blob.clear(); // Delete preview tags if the preview is larger than 32kB. // Todo: Enhance preview classes to be able to write and delete previews and use that instead. // Table must be sorted by preview, the first tag in each group is the size static const PreviewTags filteredPvTags[] = { { pttLen, "Exif.Minolta.ThumbnailLength" }, { pttTag, "Exif.Minolta.ThumbnailOffset" }, { pttLen, "Exif.Minolta.Thumbnail" }, { pttLen, "Exif.NikonPreview.JPEGInterchangeFormatLength" }, { pttIfd, "NikonPreview" }, { pttLen, "Exif.Olympus.ThumbnailLength" }, { pttTag, "Exif.Olympus.ThumbnailOffset" }, { pttLen, "Exif.Olympus.ThumbnailImage" }, { pttLen, "Exif.Olympus.Thumbnail" }, { pttLen, "Exif.Olympus2.ThumbnailLength" }, { pttTag, "Exif.Olympus2.ThumbnailOffset" }, { pttLen, "Exif.Olympus2.ThumbnailImage" }, { pttLen, "Exif.Olympus2.Thumbnail" }, { pttLen, "Exif.OlympusCs.PreviewImageLength" }, { pttTag, "Exif.OlympusCs.PreviewImageStart" }, { pttTag, "Exif.OlympusCs.PreviewImageValid" }, { pttLen, "Exif.Pentax.PreviewLength" }, { pttTag, "Exif.Pentax.PreviewOffset" }, { pttTag, "Exif.Pentax.PreviewResolution" }, { pttLen, "Exif.Thumbnail.StripByteCounts" }, { pttIfd, "Thumbnail" }, { pttLen, "Exif.Thumbnail.JPEGInterchangeFormatLength" }, { pttIfd, "Thumbnail" } }; bool delTags = false; ExifData::iterator pos; for (unsigned int i = 0; i < EXV_COUNTOF(filteredPvTags); ++i) { switch (filteredPvTags[i].ptt_) { case pttLen: delTags = false; pos = ed.findKey(ExifKey(filteredPvTags[i].key_)); if (pos != ed.end() && sumToLong(*pos) > 32768) { delTags = true; #ifndef SUPPRESS_WARNINGS std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n"; #endif ed.erase(pos); } break; case pttTag: if (delTags) { pos = ed.findKey(ExifKey(filteredPvTags[i].key_)); if (pos != ed.end()) { #ifndef SUPPRESS_WARNINGS std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n"; #endif ed.erase(pos); } } break; case pttIfd: if (delTags) { #ifndef SUPPRESS_WARNINGS std::cerr << "Warning: Exif IFD " << filteredPvTags[i].key_ << " not encoded\n"; #endif eraseIfd(ed, ExifTags::ifdIdByIfdItem(filteredPvTags[i].key_)); } break; } } // Delete unknown tags larger than 4kB. for (ExifData::iterator pos = ed.begin(); pos != ed.end(); ) { if ( pos->size() > 4096 && pos->tagName().substr(0, 2) == "0x") { #ifndef SUPPRESS_WARNINGS std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n"; #endif pos = ed.erase(pos); } else { ++pos; } } // Encode the remaining Exif tags again, don't care if it fits this time wm = TiffParserWorker::encode(blob, pData, size, ed, emptyIptc, emptyXmp, Tag::root, TiffMapping::findEncoder, header.get()); #ifdef DEBUG if (wm == wmIntrusive) { std::cerr << "SIZE OF EXIF DATA IS " << std::dec << blob.size() << " BYTES\n"; } else { std::cerr << "SIZE DOESN'T MATTER, NON-INTRUSIVE WRITING USED\n"; } #endif return wm; } // ExifParser::encode
WriteMethod TiffParserWorker::encode( Blob& blob, const byte* pData, uint32_t size, const ExifData& exifData, const IptcData& iptcData, const XmpData& xmpData, TiffCompFactoryFct createFct, FindEncoderFct findEncoderFct, TiffHeaderBase* pHeader ) { /* 1) parse the binary image, if one is provided, and 2) attempt updating the parsed tree in-place ("non-intrusive writing") 3) else, create a new tree and write a new TIFF structure ("intrusive writing"). If there is a parsed tree, it is only used to access the image data in this case. */ assert(pHeader); assert(pHeader->byteOrder() != invalidByteOrder); blob.clear(); WriteMethod writeMethod = wmIntrusive; TiffComponent::AutoPtr createdTree; TiffComponent::AutoPtr parsedTree = parse(pData, size, createFct, pHeader); if (0 != parsedTree.get()) { // Attempt to update existing TIFF components based on metadata entries TiffEncoder encoder(exifData, iptcData, xmpData, parsedTree.get(), pHeader->byteOrder(), findEncoderFct); parsedTree->accept(encoder); if (!encoder.dirty()) writeMethod = wmNonIntrusive; } if (writeMethod == wmIntrusive) { createdTree = createFct(Tag::root, Group::none); TiffEncoder encoder(exifData, iptcData, xmpData, createdTree.get(), pHeader->byteOrder(), findEncoderFct); // Add entries from metadata to composite encoder.add(createdTree.get(), parsedTree.get(), createFct); // Write binary representation from the composite tree uint32_t offset = pHeader->write(blob); uint32_t imageIdx(uint32_t(-1)); uint32_t len = createdTree->write(blob, pHeader->byteOrder(), offset, uint32_t(-1), uint32_t(-1), imageIdx); // Avoid writing just the header if there is no IFD data if (len == 0) blob.clear(); #ifdef DEBUG std::cerr << "Intrusive writing\n"; #endif } #ifdef DEBUG else { std::cerr << "Non-intrusive writing\n"; } #endif return writeMethod; } // TiffParserWorker::encode