Пример #1
0
void
_gum_jsc_process_init (GumJscProcess * self,
                       GumJscCore * core,
                       JSObjectRef scope)
{
  JSContextRef ctx = core->ctx;
  JSClassDefinition def;
  JSClassRef klass;
  JSObjectRef process;

  self->core = core;

  def = kJSClassDefinitionEmpty;
  def.className = "Process";
  def.staticFunctions = gumjs_process_functions;
  klass = JSClassCreate (&def);
  process = JSObjectMake (ctx, klass, self);
  JSClassRelease (klass);

  _gumjs_object_set_string (ctx, process, "arch", GUM_SCRIPT_ARCH);
  _gumjs_object_set_string (ctx, process, "platform", GUM_SCRIPT_PLATFORM);
  _gumjs_object_set_uint (ctx, process, "pageSize", gum_query_page_size ());
  _gumjs_object_set_uint (ctx, process, "pointerSize", GLIB_SIZEOF_VOID_P);

  _gumjs_object_set (ctx, scope, def.className, process);
}
Пример #2
0
void
_gum_duk_process_init (GumDukProcess * self,
                       GumDukCore * core)
{
  GumDukScope scope = GUM_DUK_SCOPE_INIT (core);
  duk_context * ctx = scope.ctx;

  self->core = core;

  duk_push_c_function (ctx, gumjs_process_construct, 0);
  duk_push_object (ctx);
  duk_put_function_list (ctx, -1, gumjs_process_functions);
  duk_push_string (ctx, GUM_SCRIPT_ARCH);
  duk_put_prop_string (ctx, -2, "arch");
  duk_push_string (ctx, GUM_SCRIPT_PLATFORM);
  duk_put_prop_string (ctx, -2, "platform");
  duk_push_uint (ctx, gum_query_page_size ());
  duk_put_prop_string (ctx, -2, "pageSize");
  duk_push_uint (ctx, GLIB_SIZEOF_VOID_P);
  duk_put_prop_string (ctx, -2, "pointerSize");
  duk_put_prop_string (ctx, -2, "prototype");
  duk_new (ctx, 0);
  _gum_duk_put_data (ctx, -1, self);
  duk_put_global_string (ctx, "Process");
}
Пример #3
0
static guint
gum_round_up_to_page_size (guint size)
{
  guint page_mask = gum_query_page_size () - 1;

  return (size + page_mask) & ~page_mask;
}
Пример #4
0
static gpointer
do_init (gpointer data)
{
  cs_opt_mem gum_cs_mem_callbacks = {
    gum_capstone_malloc,
    gum_capstone_calloc,
    gum_capstone_realloc,
    gum_capstone_free,
    gum_vsnprintf
  };

  page_size = gum_query_page_size ();

  gum_memory_init ();

#if GLIB_CHECK_VERSION (2, 46, 0)
  glib_init ();
  gio_init ();
#endif

  cs_option (0, CS_OPT_MEM, GPOINTER_TO_SIZE (&gum_cs_mem_callbacks));

  _gum_interceptor_init ();

  return NULL;
}
Пример #5
0
static gpointer
do_init (gpointer data)
{
  GumFeatureFlags features = (GumFeatureFlags) GPOINTER_TO_INT (data);
  cs_opt_mem gum_cs_mem_callbacks = {
    gum_capstone_malloc,
    gum_capstone_calloc,
    gum_capstone_realloc,
    gum_capstone_free,
    gum_vsnprintf
  };

  (void) features;

  page_size = gum_query_page_size ();

  gum_memory_init ();

#if GLIB_CHECK_VERSION (2, 42, 0)
  glib_init ();
  gio_init ();
#endif

  cs_option (0, CS_OPT_MEM, GPOINTER_TO_SIZE (&gum_cs_mem_callbacks));

#ifdef HAVE_SYMBOL_BACKEND
  if ((features & GUM_FEATURE_SYMBOL_LOOKUP) != 0)
    _gum_symbol_util_init ();
#endif

  _gum_interceptor_init ();

  return NULL;
}
Пример #6
0
void
gum_code_allocator_init (GumCodeAllocator * allocator,
                         guint slice_size)
{
  allocator->pages = NULL;
  allocator->page_size = gum_query_page_size ();

  allocator->slice_size = slice_size;

  if (gum_query_is_rwx_supported ())
  {
    allocator->header_size = 0;
    do
    {
      allocator->header_size += 16;
      allocator->slices_per_page =
          (allocator->page_size - allocator->header_size)
          / allocator->slice_size;
    }
    while (allocator->header_size <
        allocator->slices_per_page * sizeof (GumCodeSlice));
  }
  else
  {
    /*
     * We choose to waste some memory instead of risking stepping on existing
     * slices whenever a new one is to be initialized.
     */
    allocator->header_size = 16;
    allocator->slices_per_page = 1;
  }
}
Пример #7
0
void
gum_darwin_enumerate_modules (mach_port_t task,
                              GumFoundModuleFunc func,
                              gpointer user_data)
{
    GumEnumerateModulesContext ctx;
    guint i;

    ctx.task = task;
    ctx.func = func;
    ctx.user_data = user_data;

    ctx.ranges = g_array_sized_new (FALSE, FALSE, sizeof (GumMemoryRange), 64);
    ctx.page_size = gum_query_page_size ();

    gum_darwin_enumerate_ranges (task, GUM_PAGE_RX,
                                 gum_store_range_of_potential_modules, &ctx);

    for (i = 0; i != ctx.ranges->len; i++)
    {
        GumMemoryRange * r = &g_array_index (ctx.ranges, GumMemoryRange, i);
        if (!gum_emit_modules_in_range (r, &ctx))
            break;
    }

    g_array_unref (ctx.ranges);
}
Пример #8
0
static void
test_memory_access_monitor_fixture_setup (TestMAMonitorFixture * fixture,
                                          gconstpointer data)
{
  fixture->range.base_address = GUM_ADDRESS (gum_alloc_n_pages (2, GUM_PAGE_RWX));
  fixture->range.size = 2 * gum_query_page_size ();
  fixture->offset_in_first_page = gum_query_page_size () / 2;
  fixture->offset_in_second_page =
      fixture->offset_in_first_page + gum_query_page_size ();
  *((guint8 *) fixture->range.base_address) = 0xc3; /* ret instruction */
  fixture->nop_function_in_first_page =
      GUM_POINTER_TO_FUNCPTR (GCallback, fixture->range.base_address);

  fixture->number_of_notifies = 0;

  fixture->monitor = NULL;
}
static void
gum_memory_access_monitor_init (GumMemoryAccessMonitor * self)
{
  self->priv = G_TYPE_INSTANCE_GET_PRIVATE (self,
      GUM_TYPE_MEMORY_ACCESS_MONITOR, GumMemoryAccessMonitorPrivate);

  self->priv->page_size = gum_query_page_size ();
}
Пример #10
0
void
gum_metal_array_init (GumMetalArray * array,
                      guint element_size)
{
  array->data = gum_alloc_n_pages (1, GUM_PAGE_RW);
  array->length = 0;
  array->capacity = gum_query_page_size () / element_size;

  array->element_size = element_size;
}
Пример #11
0
static gboolean
gum_memory_get_protection (GumAddress address,
                           gsize len,
                           GumPageProtection * prot)
{
  gboolean success = FALSE;
  MEMORY_BASIC_INFORMATION mbi;

  if (prot == NULL)
  {
    GumPageProtection ignored_prot;

    return gum_memory_get_protection (address, len, &ignored_prot);
  }

  *prot = GUM_PAGE_NO_ACCESS;

  if (len > 1)
  {
    GumAddress page_size, start_page, end_page, cur_page;

    page_size = gum_query_page_size ();

    start_page = address & ~(page_size - 1);
    end_page = (address + len - 1) & ~(page_size - 1);

    success = gum_memory_get_protection (start_page, 1, prot);

    for (cur_page = start_page + page_size;
        cur_page != end_page + page_size;
        cur_page += page_size)
    {
      GumPageProtection cur_prot;

      if (gum_memory_get_protection (cur_page, 1, &cur_prot))
      {
        success = TRUE;
        *prot &= cur_prot;
      }
      else
      {
        *prot = GUM_PAGE_NO_ACCESS;
        break;
      }
    }

    return success;
  }

  success = VirtualQuery (GSIZE_TO_POINTER (address), &mbi, sizeof (mbi)) != 0;
  if (success)
    *prot = gum_page_protection_from_windows (mbi.Protect);

  return success;
}
Пример #12
0
static guint8 *
test_stalker_fixture_dup_code (TestStalkerFixture * fixture,
                               const guint8 * tpl_code,
                               guint tpl_size)
{
  if (fixture->code != NULL)
    gum_free_pages (fixture->code);
  fixture->code = (guint8 *) gum_alloc_n_pages (
      (tpl_size / gum_query_page_size ()) + 1, GUM_PAGE_RWX);
  memcpy (fixture->code, tpl_code, tpl_size);
  return fixture->code;
}
Пример #13
0
GumAddress
gum_darwin_find_entrypoint (mach_port_t task)
{
    GumFindEntrypointContext ctx;

    ctx.result = 0;
    ctx.task = task;
    ctx.page_size = gum_query_page_size ();

    gum_darwin_enumerate_ranges (task, GUM_PAGE_RX,
                                 gum_probe_range_for_entrypoint, &ctx);

    return ctx.result;
}
Пример #14
0
static void
gum_page_pool_init (GumPagePool * self)
{
  GumPagePoolPrivate * priv;

  self->priv = G_TYPE_INSTANCE_GET_PRIVATE (self, GUM_TYPE_PAGE_POOL,
      GumPagePoolPrivate);

  priv = GUM_PAGE_POOL_GET_PRIVATE (self);

  priv->page_size = gum_query_page_size ();
  priv->protect_mode = DEFAULT_PROTECT_MODE;
  priv->size = DEFAULT_POOL_SIZE;
  priv->front_alignment = DEFAULT_FRONT_ALIGNMENT;
}
Пример #15
0
gpointer
gum_alloc_n_pages (guint n_pages,
                   GumPageProtection page_prot)
{
  guint size;
  DWORD win_page_prot;
  gpointer result;

  size = n_pages * gum_query_page_size ();
  win_page_prot = gum_page_protection_to_windows (page_prot);
  result = VirtualAlloc (NULL, size, MEM_COMMIT | MEM_RESERVE, win_page_prot);
  g_assert (result != NULL);

  return result;
}
Пример #16
0
void
gum_code_allocator_init (GumCodeAllocator * allocator,
                         gsize slice_size)
{
  allocator->slice_size = slice_size;
  allocator->pages_per_batch = 7;
  allocator->slices_per_batch =
      (allocator->pages_per_batch * gum_query_page_size ()) / slice_size;
  allocator->pages_metadata_size = sizeof (GumCodePages) +
      ((allocator->slices_per_batch - 1) * sizeof (GumCodeSliceElement));

  allocator->uncommitted_pages = NULL;
  allocator->dirty_pages = g_hash_table_new (NULL, NULL);
  allocator->free_slices = NULL;

  allocator->dispatchers = NULL;
}
Пример #17
0
static void
test_relocator_fixture_setup (TestRelocatorFixture * fixture,
                              gconstpointer data)
{
  guint page_size;
  guint8 stack_data[1] = { 42 };
  GumAddressSpec as;

  page_size = gum_query_page_size ();

  as.near_address = (gpointer) stack_data;
  as.max_distance = G_MAXINT32 - page_size;

  fixture->output = (guint8 *) gum_alloc_n_pages_near (1, GUM_PAGE_RWX, &as);
  memset (fixture->output, 0, page_size);

  gum_x86_writer_init (&fixture->cw, fixture->output);
}
Пример #18
0
gpointer
gum_alloc_n_pages_near (guint n_pages,
                        GumPageProtection page_prot,
                        GumAddressSpec * address_spec)
{
  gpointer result = NULL;
  gsize page_size, size;
  DWORD win_page_prot;
  guint8 * low_address, * high_address;

  page_size = gum_query_page_size ();
  size = n_pages * page_size;
  win_page_prot = gum_page_protection_to_windows (page_prot);

  low_address = (guint8 *)
      (GPOINTER_TO_SIZE (address_spec->near_address) & ~(page_size - 1));
  high_address = low_address;

  do
  {
    gsize cur_distance;

    low_address -= page_size;
    high_address += page_size;
    cur_distance = (gsize) high_address - (gsize) address_spec->near_address;
    if (cur_distance > address_spec->max_distance)
      break;

    result = VirtualAlloc (low_address, size, MEM_COMMIT | MEM_RESERVE,
        win_page_prot);
    if (result == NULL)
    {
      result = VirtualAlloc (high_address, size, MEM_COMMIT | MEM_RESERVE,
          win_page_prot);
    }
  }
  while (result == NULL);

  g_assert (result != NULL);

  return result;
}
Пример #19
0
void
gum_metal_array_ensure_capacity (GumMetalArray * self,
                                 guint capacity)
{
  guint size_in_bytes, page_size, size_in_pages;
  gpointer new_data;

  if (self->capacity >= capacity)
    return;

  size_in_bytes = capacity * self->element_size;
  page_size = gum_query_page_size ();
  size_in_pages = size_in_bytes / page_size;
  if (size_in_bytes % page_size != 0)
    size_in_pages++;

  new_data = gum_alloc_n_pages (size_in_pages, GUM_PAGE_RW);
  gum_memcpy (new_data, self->data, self->length * self->element_size);

  gum_free_pages (self->data);
  self->data = new_data;
  self->capacity = (size_in_pages * page_size) / self->element_size;
}
Пример #20
0
static GumCodeDeflectorDispatcher *
gum_code_deflector_dispatcher_new (const GumAddressSpec * caller,
                                   gpointer return_address,
                                   gpointer dedicated_target)
{
#if defined (HAVE_DARWIN) || (defined (HAVE_LINUX) && GLIB_SIZEOF_VOID_P == 4)
  GumCodeDeflectorDispatcher * dispatcher;
  GumProbeRangeForCodeCaveContext probe_ctx;
  GumInsertDeflectorContext insert_ctx;

  probe_ctx.caller = caller;

  probe_ctx.cave.base_address = 0;
  probe_ctx.cave.size = 0;

  gum_process_enumerate_modules (gum_probe_module_for_code_cave, &probe_ctx);

  if (probe_ctx.cave.base_address == 0)
    return NULL;

  dispatcher = g_slice_new0 (GumCodeDeflectorDispatcher);

  dispatcher->address = GSIZE_TO_POINTER (probe_ctx.cave.base_address);

  dispatcher->original_data = g_memdup (dispatcher->address,
      probe_ctx.cave.size);
  dispatcher->original_size = probe_ctx.cave.size;

  if (dedicated_target == NULL)
  {
    gsize thunk_size;
    GumMemoryRange range;

    thunk_size = gum_query_page_size ();

    dispatcher->thunk =
        gum_memory_allocate (NULL, thunk_size, thunk_size, GUM_PAGE_RW);
    dispatcher->thunk_size = thunk_size;

    gum_memory_patch_code (dispatcher->thunk, GUM_MAX_CODE_DEFLECTOR_THUNK_SIZE,
        (GumMemoryPatchApplyFunc) gum_write_thunk, dispatcher);

    range.base_address = GUM_ADDRESS (dispatcher->thunk);
    range.size = thunk_size;
    gum_cloak_add_range (&range);
  }

  insert_ctx.pc = GUM_ADDRESS (dispatcher->address);
  insert_ctx.max_size = dispatcher->original_size;
  insert_ctx.return_address = return_address;
  insert_ctx.dedicated_target = dedicated_target;

  insert_ctx.dispatcher = dispatcher;

  gum_memory_patch_code (dispatcher->address, dispatcher->original_size,
      (GumMemoryPatchApplyFunc) gum_insert_deflector, &insert_ctx);

  return dispatcher;
#else
  (void) gum_insert_deflector;
  (void) gum_write_thunk;
  (void) gum_probe_module_for_code_cave;

  return NULL;
#endif
}
Пример #21
0
static GumCodeSlice *
gum_code_allocator_try_alloc_batch_near (GumCodeAllocator * self,
                                         const GumAddressSpec * spec)
{
  GumCodeSlice * result = NULL;
  gboolean rwx_supported, code_segment_supported;
  gsize page_size, size_in_pages, size_in_bytes;
  GumCodeSegment * segment;
  gpointer data;
  GumCodePages * pages;
  guint i;

  rwx_supported = gum_query_is_rwx_supported ();
  code_segment_supported = gum_code_segment_is_supported ();

  page_size = gum_query_page_size ();
  size_in_pages = self->pages_per_batch;
  size_in_bytes = size_in_pages * page_size;

  if (rwx_supported || !code_segment_supported)
  {
    GumPageProtection protection;
    GumMemoryRange range;

    protection = rwx_supported ? GUM_PAGE_RWX : GUM_PAGE_RW;

    segment = NULL;
    if (spec != NULL)
    {
      data = gum_try_alloc_n_pages_near (size_in_pages, protection, spec);
      if (data == NULL)
        return NULL;
    }
    else
    {
      data = gum_alloc_n_pages (size_in_pages, protection);
    }

    gum_query_page_allocation_range (data, size_in_bytes, &range);
    gum_cloak_add_range (&range);
  }
  else
  {
    segment = gum_code_segment_new (size_in_bytes, spec);
    if (segment == NULL)
      return NULL;
    data = gum_code_segment_get_address (segment);
  }

  pages = g_slice_alloc (self->pages_metadata_size);
  pages->ref_count = self->slices_per_batch;

  pages->segment = segment;
  pages->data = data;
  pages->size = size_in_bytes;

  pages->allocator = self;

  for (i = self->slices_per_batch; i != 0; i--)
  {
    guint slice_index = i - 1;
    GumCodeSliceElement * element = &pages->elements[slice_index];
    GList * link;
    GumCodeSlice * slice;

    slice = &element->slice;
    slice->data = (guint8 *) data + (slice_index * self->slice_size);
    slice->size = self->slice_size;

    link = &element->parent;
    link->data = pages;
    link->prev = NULL;
    if (slice_index == 0)
    {
      link->next = NULL;
      result = slice;
    }
    else
    {
      if (self->free_slices != NULL)
        self->free_slices->prev = link;
      link->next = self->free_slices;
      self->free_slices = link;
    }
  }

  if (!rwx_supported)
    self->uncommitted_pages = g_slist_prepend (self->uncommitted_pages, pages);

  g_hash_table_add (self->dirty_pages, pages);

  return result;
}
Пример #22
0
static gpointer
gum_capstone_malloc (gsize size)
{
  guint page_size;

  page_size = gum_query_page_size ();

  do
  {
    GumPool * head, * pool;
    GumBlock * block, * next_block;
    gsize aligned_block_size, pool_size, pages;
    gpointer pool_start, pool_end;

    head = pools;
    pool = NULL;
    for (pool = pools; pool != NULL; pool = pool->next)
    {
      if (pool->block_size == size)
      {
        do
        {
          block = pool->free;
          if (block == NULL)
            break;
        }
        while (!g_atomic_pointer_compare_and_exchange (&pool->free, block,
            block->next));

        if (block != NULL)
          return GUM_BLOCK_TO_DATA_POINTER (block);
      }
    }

    aligned_block_size = GUM_BLOCK_HEADER_SIZE + GUM_ALIGNED_SIZE (size);
    pool_size = GUM_POOL_HEADER_SIZE + (100 * aligned_block_size);
    pages = pool_size / page_size;
    if (pool_size % page_size != 0)
      pages++;

    pool_start = gum_alloc_n_pages (pages, GUM_PAGE_RW);
    pool_end = (guint8 *) pool_start + pool_size;
    pool = (GumPool *) pool_start;
    pool->block_size = size;
    block = (GumBlock *) ((guint8 *) pool_start + GUM_POOL_HEADER_SIZE);
    pool->free = block;
    do
    {
      next_block = (GumBlock *) ((guint8 *) block + aligned_block_size);
      if (next_block == pool_end)
        next_block = NULL;
      block->pool = pool;
      block->next = next_block;
      block = next_block;
    }
    while (next_block != NULL);
    pool->next = head;
    if (!g_atomic_pointer_compare_and_exchange (&pools, head, pool))
      gum_free_pages (pool);
  }
  while (TRUE);
}