Example #1
0
/*
 *   Parses autopilot heading message (page 1)
 */
bool tCanKingProtocol::AutopilotHeading( const quint8* pData, quint16& actualHeading )
{
    if( PageNumber( pData ) == 1 )
    {
        actualHeading = qFromBigEndian< quint16 >( pData+4 );
        return true;
    }
    return false;
}
PageNumber *getFrameContents (BM_BufferPool *const bm){
    PageNumber (*arr)[bm->numPages];
    arr=calloc(bm->numPages,sizeof(PageNumber));
    for (int i =g_queue->queueSize-1;i>=0;i--)
    {
        (*arr)[i]=bufferInfo[i]->PageNum;
    }
    return *arr;
    
    
    
}
Example #3
0
bool
MappableSeekableZStream::ensure(const void *addr)
{
    DEBUG_LOG("ensure @%p", addr);
    const void *addrPage = PageAlignedPtr(addr);
    /* Find the mapping corresponding to the given page */
    std::vector<LazyMap>::iterator map;
    for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
        if (map->Contains(addrPage))
            break;
    }
    if (map == lazyMaps.end())
        return false;

    /* Find corresponding chunk */
    off_t mapOffset = map->offsetOf(addrPage);
    off_t chunk = mapOffset / zStream.GetChunkSize();

    /* In the typical case, we just need to decompress the chunk entirely. But
     * when the current mapping ends in the middle of the chunk, we want to
     * stop at the end of the corresponding page.
     * However, if another mapping needs the last part of the chunk, we still
     * need to continue. As mappings are ordered by offset and length, we don't
     * need to scan the entire list of mappings.
     * It is safe to run through lazyMaps here because the linker is never
     * going to call mmap (which adds lazyMaps) while this function is
     * called. */
    size_t length = zStream.GetChunkSize(chunk);
    off_t chunkStart = chunk * zStream.GetChunkSize();
    off_t chunkEnd = chunkStart + length;
    std::vector<LazyMap>::iterator it;
    for (it = map; it < lazyMaps.end(); ++it) {
        if (chunkEnd <= it->endOffset())
            break;
    }
    if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
        /* The mapping "it" points at now is past the interesting one */
        --it;
        length = it->endOffset() - chunkStart;
    }

    length = PageAlignedSize(length);

    /* The following lock can be re-acquired by the thread holding it.
     * If this happens, it means the following code is interrupted somehow by
     * some signal, and ends up retriggering a chunk decompression for the
     * same MappableSeekableZStream.
     * If the chunk to decompress is different the second time, then everything
     * is safe as the only common data touched below is chunkAvailNum, and it is
     * atomically updated (leaving out any chance of an interruption while it is
     * updated affecting the result). If the chunk to decompress is the same, the
     * worst thing that can happen is chunkAvailNum being incremented one too
     * many times, which doesn't affect functionality. The chances of it
     * happening being pretty slim, and the effect being harmless, we can just
     * ignore the issue. Other than that, we'd just be wasting time decompressing
     * the same chunk twice. */
    AutoLock lock(&mutex);

    /* The very first page is mapped and accessed separately of the rest, and
     * as such, only the first page of the first chunk is decompressed this way.
     * When we fault in the remaining pages of that chunk, we want to decompress
     * the complete chunk again. Short of doing that, we would end up with
     * no data between PageSize() and chunkSize, which would effectively corrupt
     * symbol resolution in the underlying library. */
    if (chunkAvail[chunk] < PageNumber(length)) {
        if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
            return false;

#if defined(ANDROID) && defined(__arm__)
        if (map->prot & PROT_EXEC) {
            /* We just extracted data that may be executed in the future.
             * We thus need to ensure Instruction and Data cache coherency. */
            DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
            cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
                       reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
        }
#endif
        /* Only count if we haven't already decompressed parts of the chunk */
        if (chunkAvail[chunk] == 0)
            chunkAvailNum++;

        chunkAvail[chunk] = PageNumber(length);
    }

    /* Flip the chunk mapping protection to the recorded flags. We could
     * also flip the protection for other mappings of the same chunk,
     * but it's easier to skip that and let further segfaults call
     * ensure again. */
    const void *chunkAddr = reinterpret_cast<const void *>
                            (reinterpret_cast<uintptr_t>(addrPage)
                             - mapOffset % zStream.GetChunkSize());
    const void *chunkEndAddr = reinterpret_cast<const void *>
                               (reinterpret_cast<uintptr_t>(chunkAddr) + length);

    const void *start = std::max(map->addr, chunkAddr);
    const void *end = std::min(map->end(), chunkEndAddr);
    length = reinterpret_cast<uintptr_t>(end)
             - reinterpret_cast<uintptr_t>(start);

    DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
    if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
        return true;

    LOG("mprotect failed");
    return false;
}