void CheckAvailableParameters(std::size_t parameters, std::size_t position) { if (!parameters) { return; } const std::size_t start = GroupStarts.empty() ? 0 : GroupStarts.top(); if (Groups.empty() || Groups.top().End < start) { Require(parameters + start <= position); return; } const Group top = Groups.top(); const std::size_t nonGrouped = position - top.End; if (nonGrouped < parameters) { if (nonGrouped) { CheckAvailableParameters(parameters - nonGrouped, top.End); } else { Require(top.Size() == 1); Groups.pop(); CheckAvailableParameters(parameters - 1, top.Begin); Groups.push(top); } } }
void ParsePositions(Builder& builder) const { std::vector<PositionEntry> positions; uint_t loop = 0; PositionEntry entry; for (std::size_t posCursor = fromLE(Source.PositionsOffset); ; ++posCursor) { Require(positions.size() <= MAX_POSITIONS_COUNT); const uint_t val = PeekByte(posCursor); if (val == 0xff) { break; } else if (val == 0xfe) { loop = positions.size(); } else if (val >= 0x60) { entry.Transposition = val - 0x60; } else { Require(0 == val % 3); entry.PatternIndex = val / 3; positions.push_back(entry); } } Require(!positions.empty()); builder.SetPositions(positions, loop); Dbg("Positions: %1% entries, loop to %2%", positions.size(), loop); }
void ParseSectors(SourceStream& stream, ImageVisitor& visitor) { for (;;) { const RawTrack& track = stream.Get<RawTrack>(); if (track.IsLast()) { break; } Require(Math::InRange<uint_t>(track.Cylinder, 0, MAX_CYLINDERS_COUNT)); for (uint_t sect = 0; sect != track.Sectors; ++sect) { const RawSector& sector = stream.Get<RawSector>(); if (sector.NoData()) { continue; } Require(Math::InRange<uint_t>(sector.Size, 0, 6)); const std::size_t sectorSize = std::size_t(128) << sector.Size; const RawData& srcDataDesc = stream.Get<RawData>(); Require(Math::InRange<uint_t>(srcDataDesc.Method, RAW_SECTOR, RLE_SECTOR)); const std::size_t dataSize = fromLE(srcDataDesc.Size) - 1; const uint8_t* const rawData = stream.GetData(dataSize); //use track parameters for layout if (!sector.NoId()) { const Formats::CHS loc(sector.Cylinder, track.Head, sector.Number); visitor.OnSector(loc, rawData, dataSize, static_cast<SectorDataType>(srcDataDesc.Method), sectorSize); } } } }
static void ParseSubchunks(const Binary::Data& data, Builder& target) { try { Binary::TypedContainer typed(data); for (std::size_t pos = 0; pos < typed.GetSize(); ) { const SubChunkHeader* const hdr = typed.GetField<SubChunkHeader>(pos); Require(hdr != nullptr); if (hdr->ID == 0 && 0 != (pos % 4)) { //in despite of official format description, subchunks can be not aligned by 4 byte boundary ++pos; } else { Dbg("ParseSubchunk id=%u, type=%u, size=%u", uint_t(hdr->ID), uint_t(hdr->Type), fromLE(hdr->DataSize)); pos += sizeof(*hdr) + hdr->GetDataSize(); Require(pos <= typed.GetSize()); ParseSubchunk(*hdr, target); } } } catch (const std::exception&) { //ignore } }
void DecodeRLE(const uint8_t* data, std::size_t size, Dump& result) { Dump tmp; tmp.reserve(MAX_SECTOR_SIZE); ByteStream stream(data, size); while (!stream.Eof()) { const uint_t len = 2 * stream.GetByte(); Require(!stream.Eof()); const uint_t count = stream.GetByte(); Require(count != 0); const bool isRLE = len != 0; const uint_t blockSize = isRLE ? len : count; Require(stream.GetRestBytes() >= blockSize); for (uint_t idx = 0; idx != blockSize; ++idx) { tmp.push_back(stream.GetByte()); } if (isRLE) { Require(CopyFromBack(len, tmp, len * (count - 1))); } } result.swap(tmp); }
void GroupEnd() override { Require(!GroupStarts.empty()); Require(GroupStarts.top() != Position); Groups.push(Group(GroupStarts.top(), Position)); GroupStarts.pop(); Delegate.GroupEnd(); }
std::size_t Parse(const Binary::Container& rawData, ImageVisitor& visitor) { SourceStream stream(rawData); try { const RawHeader& header = stream.Get<RawHeader>(); const uint_t id = fromLE(header.ID); Require(id == ID_OLD || id == ID_NEW); Require(header.Sequence == 0); Require(Math::InRange<uint_t>(header.Sides, MIN_SIDES_COUNT, MAX_SIDES_COUNT)); if (header.HasComment()) { const RawComment& comment = stream.Get<RawComment>(); if (const std::size_t size = fromLE(comment.Size)) { stream.GetData(size); } } const bool compressedData = id == ID_NEW; const bool newCompression = header.Version > 20; if (compressedData) { if (!newCompression) { Dbg("Old compression is not supported."); return 0; } const std::size_t packedSize = rawData.Size() - sizeof(header); const Binary::Container::Ptr packed = rawData.GetSubcontainer(sizeof(header), packedSize); if (const Formats::Packed::Container::Ptr fullDecoded = Formats::Packed::Lha::DecodeRawDataAtLeast(*packed, COMPRESSION_ALGORITHM, MAX_IMAGE_SIZE)) { SourceStream subStream(*fullDecoded); ParseSectors(subStream, visitor); const std::size_t usedInPacked = subStream.GetOffset(); Dbg("Used %1% bytes in packed stream", usedInPacked); if (const Formats::Packed::Container::Ptr decoded = Formats::Packed::Lha::DecodeRawDataAtLeast(*packed, COMPRESSION_ALGORITHM, usedInPacked)) { const std::size_t usedSize = decoded->PackedSize(); return sizeof(header) + usedSize; } } Dbg("Failed to decode lha stream"); return 0; } else { ParseSectors(stream, visitor); } return stream.GetOffset(); } catch (const std::exception&) { return 0; } }
const uint8_t* GetData(std::size_t size) { Require(size != 0); const uint8_t* const first = Data.GetField<uint8_t>(Offset); const uint8_t* const last = Data.GetField<uint8_t>(Offset + size - 1); Require(first != nullptr && last != nullptr); Offset += size; return first; }
void ParseBuffer(uint_t count, Stream& source, Builder& target) { const std::size_t bufSize = source.GetBufferSize(); Dump buf(bufSize); std::size_t cursor = 0; uint_t flag = 0x40; //dX_flag while (count) { //dX_next flag <<= 1; if ((flag & 0xff) == 0) { flag = source.ReadByte(); flag = (flag << 1) | 1; } if ((flag & 0x100) != 0) { flag &= 0xff; uint_t counter = source.ReadCounter(); std::size_t srcPtr = source.ReadBackRef(); Require(count >= counter); Require(srcPtr < bufSize); count -= counter; while (counter--) { buf[cursor++] = buf[srcPtr++]; if (cursor >= bufSize) { target.AddValues(buf); cursor -= bufSize; } if (srcPtr >= bufSize) { srcPtr -= bufSize; } } } else { //dX_chr --count; buf[cursor++] = source.ReadByte(); if (cursor >= bufSize) { target.AddValues(buf); cursor -= bufSize; } } } if (cursor) { buf.resize(cursor); target.AddValues(buf); } }
void FormatTest() { Require(strcmp(Format("foo"), "foo") == 0); Require(strcmp(Format("bar%d",1), "bar1") == 0); const char* aaa = Format("aaa"); Require(strcmp(aaa, "aaa") == 0); Format("bbb"); Require(strcmp(aaa, "aaa") != 0); }
inline uint_t ParseDecimalValue(const std::string& num) { Require(!num.empty()); uint_t res = 0; for (RangeIterator<std::string::const_iterator> it(num.begin(), num.end()); it; ++it) { Require(0 != std::isdigit(*it)); res = res * 10 + (*it - '0'); } return res; }
void OnSector(const Formats::CHS& /*loc*/, const uint8_t* /*rawData*/, std::size_t rawSize, SectorDataType type, std::size_t targetSize) override { switch (type) { case RAW_SECTOR: Require(rawSize == targetSize); break; case R2P_SECTOR: Require(rawSize % sizeof(R2PEntry) == 0); break; default: break; } }
void DecodeR2P(const uint8_t* data, std::size_t size, Dump& result) { Require(size % sizeof(R2PEntry) == 0); Dump tmp; tmp.reserve(MAX_SECTOR_SIZE); for (const R2PEntry* it = safe_ptr_cast<const R2PEntry*>(data), *lim = it + size / sizeof(*it); it != lim; ++it) { const uint_t count = fromLE(it->Count); Require(count != 0); tmp.push_back(it->Data[0]); tmp.push_back(it->Data[1]); Require(CopyFromBack(sizeof(it->Data), tmp, sizeof(it->Data) * (count - 1))); } result.swap(tmp); }
Formats::Chiptune::Container::Ptr Parse(const Binary::Container& data, Builder& target) { if (!FastCheck(data)) { return Formats::Chiptune::Container::Ptr(); } try { Binary::InputStream stream(data); stream.ReadField<SignatureType>(); target.SetTitle(DecodeString(stream.ReadCString(MAX_STRING_SIZE))); target.SetAuthor(DecodeString(stream.ReadCString(MAX_STRING_SIZE))); target.SetComment(DecodeString(stream.ReadCString(MAX_COMMENT_SIZE))); const std::size_t fixedOffset = stream.GetPosition(); std::size_t totalFrames = 0; for (;;) { const uint8_t val = stream.ReadField<uint8_t>(); if (val == FINISH) { break; } switch (val) { case BEGIN_FRAME: ++totalFrames; target.BeginFrames(1); break; case SKIP_FRAMES: { const uint_t frames = 3 + stream.ReadField<uint8_t>(); totalFrames += frames; target.BeginFrames(frames); } break; case SELECT_SECOND_CHIP: target.SelectChip(1); break; case SELECT_FIRST_CHIP: target.SelectChip(0); break; case LOOP_MARKER: target.SetLoop(); break; default: target.SetRegister(val, stream.ReadField<uint8_t>()); break; } } Require(totalFrames >= MIN_FRAMES); const std::size_t usedSize = stream.GetPosition(); const auto subData = stream.GetReadData(); return CreateCalculatingCrcContainer(subData, fixedOffset, usedSize - fixedOffset); } catch (const std::exception&) { return Formats::Chiptune::Container::Ptr(); } }
uint8_t Hex2Bin(char val) { Require(std::isxdigit(val)); return std::isdigit(val) ? val - '0' : std::toupper(val) - 'A' + 10; }
void FillVer3Traits(uint_t hwMode) { switch (hwMode) { case Version3_0::Ver_48k: case Version3_0::Ver_48k_iface1: case Version3_0::Ver_48k_mgt: Fill48kTraits(); break; case Version3_0::Ver_SamRam: FillSamRamTraits(); break; case Version3_0::Ver_128k: case Version3_0::Ver_128k_iface1: case Version3_0::Ver_128k_mgt: case Version3_0::Ver_Pentagon: Fill128kTraits(); break; case Version3_0::Ver_Scorpion: Fill256kTraits(); break; default: Require(false); break; } }
const T& Get() { const T* const res = Data.GetField<T>(Offset); Require(res != nullptr); Offset += sizeof(*res); return *res; }
std::size_t DecodeBlock(const uint8_t* src, std::size_t srcSize, uint8_t* dst, std::size_t dstSize) { const uint8_t PREFIX = 0xed; std::memset(dst, 0, dstSize); std::size_t restIn = srcSize; while (restIn > 0 && dstSize > 0) { if (restIn >= 4 && src[0] == PREFIX && src[1] == PREFIX) { const std::size_t count = src[2]; const uint8_t data = src[3]; Require(count <= dstSize); std::memset(dst, data, count); src += 4; restIn -= 4; dst += count; dstSize -= count; } else { *dst++ = *src++; --restIn; --dstSize; } } return srcSize - restIn; }
HRESULT CTSBuffer::DequeFromBuffer(BYTE *pbData, long lDataLength) { Mediaportal::CEnterCriticalSection lock(m_BufferLock); HRESULT hr = Require(lDataLength); if (FAILED(hr)) return hr; long bytesWritten = 0; while (bytesWritten < lDataLength) { if(!m_Array.size() || m_Array.size() <= 0) return E_FAIL; BYTE *item = m_Array.at(0); long copyLength = min(m_lTSBufferItemSize-m_lItemOffset, lDataLength-bytesWritten); memcpy(pbData + bytesWritten, item + m_lItemOffset, copyLength); bytesWritten += copyLength; m_lItemOffset += copyLength; if (m_lItemOffset >= m_lTSBufferItemSize) { m_Array.erase(m_Array.begin()); delete[] item; m_lItemOffset -= m_lTSBufferItemSize; //should result in zero } } return S_OK; }
User::EmailTokenRole AbstractUserDatabase::emailTokenRole(const User& user) const { LOG_ERROR(Require("emailTokenRole()", EMAIL_VERIFICATION).what()); return User::VerifyEmail; }
vguard<vguard<LogProb> > SumProduct::logNodeExcludedPostProb (TreeNodeIndex node, TreeNodeIndex exclude, bool normalize) const { Require (!isGap(node), "Attempt to find posterior probability of sequence at gapped position"); const UnvalidatedAlphTok tok = isWild(node) ? -1 : model.tokenize(gappedCol[node]); vguard<LogProb> lppInit (model.alphabetSize(), isWild(node) ? 0 : -numeric_limits<double>::infinity()); if (!isWild(node)) lppInit[tok] = 0; vguard<vguard<LogProb> > v (model.components(), lppInit); LogProb norm = -numeric_limits<double>::infinity(); for (int cpt = 0; cpt < components(); ++cpt) { vguard<LogProb>& lpp = v[cpt]; for (auto& lp: lpp) lp += logCptWeight[cpt]; for (size_t nc = 0; nc < tree.nChildren(node); ++nc) { const TreeNodeIndex child = tree.getChild(node,nc); if (child != exclude) for (AlphTok i = 0; i < model.alphabetSize(); ++i) lpp[i] += log (E[cpt][child][i]) + logE[cpt][child]; } const TreeNodeIndex parent = tree.parentNode (node); for (AlphTok i = 0; i < model.alphabetSize(); ++i) { lpp[i] += parent == exclude ? 0 // to add a prior for orphaned nodes, this should be log(insProb[i]), but that complicates MCMC etc : (log(G[cpt][node][i]) + logG[cpt][node]); log_accum_exp (norm, lpp[i]); } } if (normalize) for (auto& lpp: v) for (auto& lp: lpp) lp -= norm; return v; }
void GetResult(Dump& data) const override { Dump rawDump; Delegate->GetResult(rawDump); Require(0 == rawDump.size() % Registers::TOTAL); const uint32_t framesCount = rawDump.size() / Registers::TOTAL; const uint_t storedRegisters = Registers::TOTAL; const String& title = Params->Title(); const String author = Params->Author(); const uint32_t headerSize = sizeof(FYMHeader) + (title.size() + 1) + (author.size() + 1); const std::size_t contentSize = framesCount * storedRegisters; Binary::DataBuilder builder(headerSize + contentSize); FYMHeader& header = builder.Add<FYMHeader>(); header.HeaderSize = fromLE(headerSize); header.FramesCount = fromLE(framesCount); header.LoopFrame = fromLE(static_cast<uint32_t>(Params->LoopFrame())); header.PSGFreq = fromLE(static_cast<uint32_t>(Params->ClockFreq())); header.IntFreq = fromLE(static_cast<uint32_t>(Time::GetFrequencyForPeriod(Params->FrameDuration()))); builder.AddCString(title); builder.AddCString(author); for (uint_t reg = 0; reg < storedRegisters; ++reg) { uint8_t* const result = static_cast<uint8_t*>(builder.Allocate(framesCount)); for (uint_t frm = 0, inOffset = reg; frm < framesCount; ++frm, inOffset += Registers::TOTAL) { result[frm] = rawDump[inOffset]; } } Dump result; builder.CaptureResult(result); Binary::Compression::Zlib::Compress(result, data); }
/* ParseFor - parse the 'FOR' statement */ static void ParseFor(ParseContext *c) { ParseTreeNode *var, *step; int test, body, inst; Token tkn; PVAL pv; PushBlock(c); c->bptr->type = BLOCK_FOR; /* get the control variable */ FRequire(c, T_IDENTIFIER); var = GetSymbolRef(c, c->token); code_lvalue(c, var, &pv); FRequire(c, '='); /* parse the starting value expression */ ParseRValue(c); /* parse the TO expression and generate the loop termination test */ test = codeaddr(c); (*pv.fcn)(c, PV_STORE, &pv); (*pv.fcn)(c, PV_LOAD, &pv); FRequire(c, T_TO); ParseRValue(c); putcbyte(c, OP_LE); putcbyte(c, OP_BRT); body = putcword(c, 0); /* branch to the end if the termination test fails */ putcbyte(c, OP_BR); c->bptr->u.ForBlock.end = putcword(c, 0); /* update the for variable after an iteration of the loop */ c->bptr->u.ForBlock.nxt = codeaddr(c); (*pv.fcn)(c, PV_LOAD, &pv); /* get the STEP expression */ if ((tkn = GetToken(c)) == T_STEP) { step = ParseExpr(c); code_rvalue(c, step); tkn = GetToken(c); } /* no step so default to one */ else { putcbyte(c, OP_LIT); putcword(c, 1); } /* generate the increment code */ putcbyte(c, OP_ADD); inst = putcbyte(c, OP_BR); putcword(c, test - inst - 1 - sizeof(VMUVALUE)); /* branch to the loop body */ fixupbranch(c, body, codeaddr(c)); Require(c, tkn, T_EOL); }
const T& Get(std::size_t offset) const { const T* const ptr = Delegate.GetField<T>(offset); Require(ptr != 0); Min = std::min(Min, offset); Max = std::max(Max, offset + sizeof(T)); return *ptr; }
RemoteIdentifier(const String& scheme, const String& path, const String& subpath) : SchemeValue(scheme) , PathValue(path) , SubpathValue(subpath) , FullValue(Serialize()) { Require(!SchemeValue.empty() && !PathValue.empty()); }
void Flush() { while (!Ops.empty()) { Require(Ops.top().IsOperation()); FlushOperations(); } }
int AbstractUserDatabase::updateAuthToken(const User& user, const std::string& hash, const std::string& newHash) { LOG_WARN(Require("updateAuthToken()", AUTH_TOKEN).what()); return -1; }
void GroupEnd() override { FlushOperations(); Require(!Ops.empty() && Ops.top().Value() == GROUP_START); Ops.pop(); Delegate.GroupEnd(); LastIsMatch = false; }
RemoteIdentifier(String scheme, String path, String subpath) : SchemeValue(std::move(scheme)) , PathValue(std::move(path)) , SubpathValue(std::move(subpath)) , FullValue(Serialize()) { Require(!SchemeValue.empty() && !PathValue.empty()); }
Formats::Packed::Container::Ptr Version1_45::Decode(Binary::InputStream& stream) { const Version1_45::Header hdr = stream.ReadField<Version1_45::Header>(); const std::size_t restSize = stream.GetRestSize(); const std::size_t TARGET_SIZE = 49152; const uint32_t FOOTER = 0x00eded00; if (0 == (hdr.Flag1 & hdr.COMPRESSED)) { Require(restSize >= TARGET_SIZE); const Binary::Container::Ptr rest = stream.ReadRestData(); return CreatePackedContainer(rest->GetSubcontainer(0, TARGET_SIZE), sizeof(hdr) + TARGET_SIZE); } Require(restSize > sizeof(FOOTER)); std::auto_ptr<Dump> res(new Dump(TARGET_SIZE)); DecodeBlock(stream, restSize - sizeof(FOOTER), *res); const uint32_t footer = fromLE(stream.ReadField<uint32_t>()); Require(footer == FOOTER); return CreatePackedContainer(res, stream.GetPosition()); }