Exemplo n.º 1
0
// static
Shmem::SharedMemory*
Shmem::Alloc(IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead,
             size_t aNBytes, 
             SharedMemoryType aType,
             bool /*unused*/)
{
  SharedMemory *segment = nsnull;

  if (aType == SharedMemory::TYPE_BASIC)
    segment = CreateSegment(PageAlignedSize(aNBytes + sizeof(size_t)),
                            SharedMemoryBasic::NULLHandle());
#ifdef MOZ_HAVE_SHAREDMEMORYSYSV
  else if (aType == SharedMemory::TYPE_SYSV)
    segment = CreateSegment(PageAlignedSize(aNBytes + sizeof(size_t)),
                            SharedMemorySysV::NULLHandle());
#endif
  else
    // Unhandled!!
    NS_ABORT();

  if (!segment)
    return 0;

  *PtrToSize(segment) = aNBytes;

  return segment;
}
Exemplo n.º 2
0
// static
Shmem::SharedMemory*
Shmem::OpenExisting(IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead,
                    const IPC::Message& aDescriptor,
                    id_t* aId,
                    bool /*unused*/)
{
  if (SHMEM_CREATED_MESSAGE_TYPE != aDescriptor.type())
    NS_RUNTIMEABORT("expected 'shmem created' message");

  SharedMemory::SharedMemoryType type;
  void* iter = 0;
  size_t size;
  if (!ShmemCreated::ReadInfo(&aDescriptor, &iter, aId, &size, &type))
    return 0;

  SharedMemory* segment = 0;
  size_t segmentSize = PageAlignedSize(size + sizeof(size_t));

  if (SharedMemory::TYPE_BASIC == type) {
    SharedMemoryBasic::Handle handle;
    if (!ShmemCreated::ReadHandle(&aDescriptor, &iter, &handle))
      return 0;

    if (!SharedMemoryBasic::IsHandleValid(handle))
      NS_RUNTIMEABORT("trying to open invalid handle");

    segment = CreateSegment(segmentSize, handle);
  }
#ifdef MOZ_HAVE_SHAREDMEMORYSYSV
  else if (SharedMemory::TYPE_SYSV == type) {
    SharedMemorySysV::Handle handle;
    if (!ShmemCreated::ReadHandle(&aDescriptor, &iter, &handle))
      return 0;

    if (!SharedMemorySysV::IsHandleValid(handle))
      NS_RUNTIMEABORT("trying to open invalid handle");
    segment = CreateSegment(segmentSize, handle);
  }
#endif
  else {
    NS_RUNTIMEABORT("unknown shmem type");
  }

  if (!segment)
    return 0;

  // this is the only validity check done OPT builds
  if (size != *PtrToSize(segment))
    NS_RUNTIMEABORT("Alloc() segment size disagrees with OpenExisting()'s");

  return segment;
}
Exemplo n.º 3
0
// static
Shmem::SharedMemory*
Shmem::OpenExisting(IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead,
                    const IPC::Message& aDescriptor,
                    id_t* aId,
                    bool aProtect)
{
  if (SHMEM_CREATED_MESSAGE_TYPE != aDescriptor.type())
    NS_RUNTIMEABORT("expected 'shmem created' message");

  void* iter = 0;
  SharedMemory::SharedMemoryType type;
  size_t size;
  if (!ShmemCreated::ReadInfo(&aDescriptor, &iter, aId, &size, &type))
    return 0;

  SharedMemory* segment = 0;
  size_t pageSize = SharedMemory::SystemPageSize();
  // |2*pageSize| is for the front and back sentinels
  size_t segmentSize = PageAlignedSize(size + 2*pageSize);

  if (SharedMemory::TYPE_BASIC == type) {
    SharedMemoryBasic::Handle handle;
    if (!ShmemCreated::ReadHandle(&aDescriptor, &iter, &handle))
      return 0;

    if (!SharedMemoryBasic::IsHandleValid(handle))
      NS_RUNTIMEABORT("trying to open invalid handle");
    segment = CreateSegment(segmentSize, handle);
  }
#ifdef MOZ_HAVE_SHAREDMEMORYSYSV
  else if (SharedMemory::TYPE_SYSV == type) {
    SharedMemorySysV::Handle handle;
    if (!ShmemCreated::ReadHandle(&aDescriptor, &iter, &handle))
      return 0;

    if (!SharedMemorySysV::IsHandleValid(handle))
      NS_RUNTIMEABORT("trying to open invalid handle");
    segment = CreateSegment(segmentSize, handle);
  }
#endif
  else {
    NS_RUNTIMEABORT("unknown shmem type");
  }

  if (!segment)
    return 0;

  if (aProtect)
    Protect(segment);

  return segment;
}
Exemplo n.º 4
0
// static
Shmem::SharedMemory*
Shmem::Alloc(IHadBetterBeIPDLCodeCallingThis_OtherwiseIAmADoodyhead,
             size_t aNBytes,
             SharedMemoryType aType,
             bool aProtect)
{
  size_t pageSize = SharedMemory::SystemPageSize();
  SharedMemory* segment = nsnull;
  // |2*pageSize| is for the front and back sentinel
  size_t segmentSize = PageAlignedSize(aNBytes + 2*pageSize);

  if (aType == SharedMemory::TYPE_BASIC)
    segment = CreateSegment(segmentSize, SharedMemoryBasic::NULLHandle());
#ifdef MOZ_HAVE_SHAREDMEMORYSYSV
  else if (aType == SharedMemory::TYPE_SYSV)
    segment = CreateSegment(segmentSize, SharedMemorySysV::NULLHandle());
#endif
  else
    NS_RUNTIMEABORT("unknown shmem type");

  if (!segment)
    return 0;

  char *frontSentinel;
  char *data;
  char *backSentinel;
  GetSections(segment, &frontSentinel, &data, &backSentinel);

  // initialize the segment with Shmem-internal information
  Header* header = reinterpret_cast<Header*>(frontSentinel);
  memcpy(header->mMagic, sMagic, sizeof(sMagic));
  header->mSize = aNBytes;

  if (aProtect)
    Protect(segment);

  return segment;
}
Exemplo n.º 5
0
bool
MappableSeekableZStream::ensure(const void *addr)
{
    DEBUG_LOG("ensure @%p", addr);
    const void *addrPage = PageAlignedPtr(addr);
    /* Find the mapping corresponding to the given page */
    std::vector<LazyMap>::iterator map;
    for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
        if (map->Contains(addrPage))
            break;
    }
    if (map == lazyMaps.end())
        return false;

    /* Find corresponding chunk */
    off_t mapOffset = map->offsetOf(addrPage);
    off_t chunk = mapOffset / zStream.GetChunkSize();

    /* In the typical case, we just need to decompress the chunk entirely. But
     * when the current mapping ends in the middle of the chunk, we want to
     * stop at the end of the corresponding page.
     * However, if another mapping needs the last part of the chunk, we still
     * need to continue. As mappings are ordered by offset and length, we don't
     * need to scan the entire list of mappings.
     * It is safe to run through lazyMaps here because the linker is never
     * going to call mmap (which adds lazyMaps) while this function is
     * called. */
    size_t length = zStream.GetChunkSize(chunk);
    off_t chunkStart = chunk * zStream.GetChunkSize();
    off_t chunkEnd = chunkStart + length;
    std::vector<LazyMap>::iterator it;
    for (it = map; it < lazyMaps.end(); ++it) {
        if (chunkEnd <= it->endOffset())
            break;
    }
    if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
        /* The mapping "it" points at now is past the interesting one */
        --it;
        length = it->endOffset() - chunkStart;
    }

    length = PageAlignedSize(length);

    /* The following lock can be re-acquired by the thread holding it.
     * If this happens, it means the following code is interrupted somehow by
     * some signal, and ends up retriggering a chunk decompression for the
     * same MappableSeekableZStream.
     * If the chunk to decompress is different the second time, then everything
     * is safe as the only common data touched below is chunkAvailNum, and it is
     * atomically updated (leaving out any chance of an interruption while it is
     * updated affecting the result). If the chunk to decompress is the same, the
     * worst thing that can happen is chunkAvailNum being incremented one too
     * many times, which doesn't affect functionality. The chances of it
     * happening being pretty slim, and the effect being harmless, we can just
     * ignore the issue. Other than that, we'd just be wasting time decompressing
     * the same chunk twice. */
    AutoLock lock(&mutex);

    /* The very first page is mapped and accessed separately of the rest, and
     * as such, only the first page of the first chunk is decompressed this way.
     * When we fault in the remaining pages of that chunk, we want to decompress
     * the complete chunk again. Short of doing that, we would end up with
     * no data between PageSize() and chunkSize, which would effectively corrupt
     * symbol resolution in the underlying library. */
    if (chunkAvail[chunk] < PageNumber(length)) {
        if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
            return false;

#if defined(ANDROID) && defined(__arm__)
        if (map->prot & PROT_EXEC) {
            /* We just extracted data that may be executed in the future.
             * We thus need to ensure Instruction and Data cache coherency. */
            DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
            cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
                       reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
        }
#endif
        /* Only count if we haven't already decompressed parts of the chunk */
        if (chunkAvail[chunk] == 0)
            chunkAvailNum++;

        chunkAvail[chunk] = PageNumber(length);
    }

    /* Flip the chunk mapping protection to the recorded flags. We could
     * also flip the protection for other mappings of the same chunk,
     * but it's easier to skip that and let further segfaults call
     * ensure again. */
    const void *chunkAddr = reinterpret_cast<const void *>
                            (reinterpret_cast<uintptr_t>(addrPage)
                             - mapOffset % zStream.GetChunkSize());
    const void *chunkEndAddr = reinterpret_cast<const void *>
                               (reinterpret_cast<uintptr_t>(chunkAddr) + length);

    const void *start = std::max(map->addr, chunkAddr);
    const void *end = std::min(map->end(), chunkEndAddr);
    length = reinterpret_cast<uintptr_t>(end)
             - reinterpret_cast<uintptr_t>(start);

    DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
    if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
        return true;

    LOG("mprotect failed");
    return false;
}