Esempio n. 1
0
void
gum_code_allocator_free_slice (GumCodeAllocator * self,
                               GumCodeSlice * slice)
{
  GumCodePage * cp;
  guint slice_idx;
  gboolean is_empty;

  cp = (GumCodePage *) (GPOINTER_TO_SIZE (slice) & ~(self->page_size - 1));

  if (!gum_query_is_rwx_supported ())
    gum_mprotect (cp, self->page_size, GUM_PAGE_RW);

  gum_code_slice_mark_free (slice);

  is_empty = TRUE;
  for (slice_idx = 0; slice_idx != self->slices_per_page; slice_idx++)
  {
    if (!gum_code_slice_is_free (&cp->slice[slice_idx]))
    {
      is_empty = FALSE;
      break;
    }
  }

  if (is_empty)
  {
    self->pages = gum_list_remove (self->pages, cp);
    gum_code_page_free (cp);
  }
  else if (!gum_query_is_rwx_supported ())
  {
    gum_mprotect (cp, self->page_size, GUM_PAGE_RX);
  }
}
Esempio n. 2
0
GumScriptBackend *
gum_script_backend_obtain_v8 (void)
{
  static volatile gsize gonce_value;

  if (g_once_init_enter (&gonce_value))
  {
    GumScriptBackend * backend = NULL;

    if (gum_query_is_rwx_supported ())
    {
#ifdef HAVE_V8
      backend = GUM_SCRIPT_BACKEND (
          g_object_new (GUM_V8_TYPE_SCRIPT_BACKEND, NULL));
#endif

      if (backend != NULL)
        _gum_register_early_destructor (gum_script_backend_deinit_v8);
    }

    g_once_init_leave (&gonce_value, GPOINTER_TO_SIZE (backend) + 1);
  }

  return GUM_SCRIPT_BACKEND (GSIZE_TO_POINTER (gonce_value - 1));
}
Esempio n. 3
0
void
gum_code_slice_free (GumCodeSlice * slice)
{
  GumCodeSliceElement * element;
  GumCodePages * pages;

  if (slice == NULL)
    return;

  element = GUM_CODE_SLICE_ELEMENT_FROM_SLICE (slice);
  pages = element->parent.data;

  if (gum_query_is_rwx_supported ())
  {
    GumCodeAllocator * allocator = pages->allocator;
    GList * link = &element->parent;

    if (allocator->free_slices != NULL)
      allocator->free_slices->prev = link;
    link->next = allocator->free_slices;
    allocator->free_slices = link;
  }
  else
  {
    gum_code_pages_unref (pages);
  }
}
Esempio n. 4
0
void
gum_code_allocator_init (GumCodeAllocator * allocator,
                         guint slice_size)
{
  allocator->pages = NULL;
  allocator->page_size = gum_query_page_size ();

  allocator->slice_size = slice_size;

  if (gum_query_is_rwx_supported ())
  {
    allocator->header_size = 0;
    do
    {
      allocator->header_size += 16;
      allocator->slices_per_page =
          (allocator->page_size - allocator->header_size)
          / allocator->slice_size;
    }
    while (allocator->header_size <
        allocator->slices_per_page * sizeof (GumCodeSlice));
  }
  else
  {
    /*
     * We choose to waste some memory instead of risking stepping on existing
     * slices whenever a new one is to be initialized.
     */
    allocator->header_size = 16;
    allocator->slices_per_page = 1;
  }
}
Esempio n. 5
0
static GumCodePage *
gum_code_allocator_new_page_near (GumCodeAllocator * self,
                                  gpointer address)
{
  GumPageProtection prot;
  GumAddressSpec spec;
  GumCodePage * cp;
  guint slice_idx;

  prot = gum_query_is_rwx_supported () ? GUM_PAGE_RWX : GUM_PAGE_RW;

  spec.near_address = address;
  spec.max_distance = GUM_CODE_ALLOCATOR_MAX_DISTANCE;

  cp = (GumCodePage *) gum_alloc_n_pages_near (1, prot, &spec);

  for (slice_idx = 0; slice_idx != self->slices_per_page; slice_idx++)
  {
    GumCodeSlice * slice = &cp->slice[slice_idx];

    slice->data =
        (guint8 *) cp + self->header_size + (slice_idx * self->slice_size);
    slice->size = self->slice_size;
    gum_code_slice_mark_free (slice);
  }

  return cp;
}
Esempio n. 6
0
void
gum_code_allocator_commit (GumCodeAllocator * self)
{
  gboolean rwx_supported;
  GSList * cur;
  GumCodePages * pages;
  GHashTableIter iter;

  rwx_supported = gum_query_is_rwx_supported ();

  for (cur = self->uncommitted_pages; cur != NULL; cur = cur->next)
  {
    GumCodeSegment * segment;

    pages = cur->data;
    segment = pages->segment;

    if (segment != NULL)
    {
      gum_code_segment_realize (segment);
      gum_code_segment_map (segment, 0,
          gum_code_segment_get_virtual_size (segment),
          gum_code_segment_get_address (segment));
    }
    else
    {
      gum_mprotect (pages->data, pages->size, GUM_PAGE_RX);
    }
  }
  g_slist_free (self->uncommitted_pages);
  self->uncommitted_pages = NULL;

  g_hash_table_iter_init (&iter, self->dirty_pages);
  while (g_hash_table_iter_next (&iter, (gpointer *) &pages, NULL))
  {
    gum_clear_cache (pages->data, pages->size);
  }
  g_hash_table_remove_all (self->dirty_pages);

  if (!rwx_supported)
  {
    g_list_foreach (self->free_slices, (GFunc) gum_code_pages_unref, NULL);
    self->free_slices = NULL;
  }
}
Esempio n. 7
0
GumCodeSlice *
gum_code_allocator_new_slice_near (GumCodeAllocator * self,
                                   gpointer address)
{
  GumList * walk;
  GumCodePage * cp;
  GumCodeSlice * slice;

  for (walk = self->pages; walk != NULL; walk = walk->next)
  {
    GumCodePage * page = (GumCodePage *) walk->data;

    if (gum_code_page_is_near (page, address))
    {
      guint slice_idx;

      for (slice_idx = 0; slice_idx != self->slices_per_page; slice_idx++)
      {
        slice = &page->slice[slice_idx];

        if (gum_code_slice_is_free (slice))
        {
          if (!gum_query_is_rwx_supported ())
            gum_mprotect (page, self->page_size, GUM_PAGE_RW);
          gum_code_slice_mark_taken (slice);
          return slice;
        }
      }
    }
  }

  cp = gum_code_allocator_new_page_near (self, address);
  self->pages = gum_list_prepend (self->pages, cp);

  slice = &cp->slice[0];
  gum_code_slice_mark_taken (slice);
  return slice;
}
Esempio n. 8
0
static GumCodeSlice *
gum_code_allocator_try_alloc_batch_near (GumCodeAllocator * self,
                                         const GumAddressSpec * spec)
{
  GumCodeSlice * result = NULL;
  gboolean rwx_supported, code_segment_supported;
  gsize page_size, size_in_pages, size_in_bytes;
  GumCodeSegment * segment;
  gpointer data;
  GumCodePages * pages;
  guint i;

  rwx_supported = gum_query_is_rwx_supported ();
  code_segment_supported = gum_code_segment_is_supported ();

  page_size = gum_query_page_size ();
  size_in_pages = self->pages_per_batch;
  size_in_bytes = size_in_pages * page_size;

  if (rwx_supported || !code_segment_supported)
  {
    GumPageProtection protection;
    GumMemoryRange range;

    protection = rwx_supported ? GUM_PAGE_RWX : GUM_PAGE_RW;

    segment = NULL;
    if (spec != NULL)
    {
      data = gum_try_alloc_n_pages_near (size_in_pages, protection, spec);
      if (data == NULL)
        return NULL;
    }
    else
    {
      data = gum_alloc_n_pages (size_in_pages, protection);
    }

    gum_query_page_allocation_range (data, size_in_bytes, &range);
    gum_cloak_add_range (&range);
  }
  else
  {
    segment = gum_code_segment_new (size_in_bytes, spec);
    if (segment == NULL)
      return NULL;
    data = gum_code_segment_get_address (segment);
  }

  pages = g_slice_alloc (self->pages_metadata_size);
  pages->ref_count = self->slices_per_batch;

  pages->segment = segment;
  pages->data = data;
  pages->size = size_in_bytes;

  pages->allocator = self;

  for (i = self->slices_per_batch; i != 0; i--)
  {
    guint slice_index = i - 1;
    GumCodeSliceElement * element = &pages->elements[slice_index];
    GList * link;
    GumCodeSlice * slice;

    slice = &element->slice;
    slice->data = (guint8 *) data + (slice_index * self->slice_size);
    slice->size = self->slice_size;

    link = &element->parent;
    link->data = pages;
    link->prev = NULL;
    if (slice_index == 0)
    {
      link->next = NULL;
      result = slice;
    }
    else
    {
      if (self->free_slices != NULL)
        self->free_slices->prev = link;
      link->next = self->free_slices;
      self->free_slices = link;
    }
  }

  if (!rwx_supported)
    self->uncommitted_pages = g_slist_prepend (self->uncommitted_pages, pages);

  g_hash_table_add (self->dirty_pages, pages);

  return result;
}