void gum_spinlock_init (GumSpinlock * spinlock) { GumSpinlockImpl * self = (GumSpinlockImpl *) spinlock; GumX86Writer cw; gpointer try_again_label = "gum_spinlock_try_again"; gpointer beach_label = "gum_spinlock_beach"; self->is_held = FALSE; self->code = gum_alloc_n_pages (1, GUM_PAGE_RWX); gum_x86_writer_init (&cw, self->code); self->acquire_impl = GUM_POINTER_TO_FUNCPTR (GumSpinlockAcquireFunc, gum_x86_writer_cur (&cw)); gum_x86_writer_put_mov_reg_u32 (&cw, GUM_REG_EDX, 1); gum_x86_writer_put_label (&cw, try_again_label); gum_x86_writer_put_mov_reg_u32 (&cw, GUM_REG_EAX, 0); gum_x86_writer_put_lock_cmpxchg_reg_ptr_reg (&cw, GUM_REG_RCX, GUM_REG_EDX); gum_x86_writer_put_jcc_short_label (&cw, GUM_X86_JZ, beach_label, GUM_NO_HINT); gum_x86_writer_put_pause (&cw); gum_x86_writer_put_jmp_short_label (&cw, try_again_label); gum_x86_writer_put_label (&cw, beach_label); gum_x86_writer_put_ret (&cw); gum_x86_writer_free (&cw); }
static void test_thumb_relocator_fixture_setup (TestThumbRelocatorFixture * fixture, gconstpointer data) { fixture->output = (guint8 *) gum_alloc_n_pages (1, GUM_PAGE_RW); gum_thumb_writer_init (&fixture->tw, fixture->output); }
static gpointer gum_capstone_malloc (gsize size) { do { GumPool * head, * pool; GumBlock * block, * next_block; gsize aligned_block_size, pool_size, pages; gpointer pool_start, pool_end; head = pools; pool = NULL; for (pool = pools; pool != NULL; pool = pool->next) { if (pool->block_size == size) { do { block = pool->free; if (block == NULL) break; } while (!g_atomic_pointer_compare_and_exchange (&pool->free, block, block->next)); if (block != NULL) return GUM_BLOCK_TO_DATA_POINTER (block); } } aligned_block_size = GUM_BLOCK_HEADER_SIZE + GUM_ALIGNED_SIZE (size); pool_size = GUM_POOL_HEADER_SIZE + (100 * aligned_block_size); pages = pool_size / page_size; if (pool_size % page_size != 0) pages++; pool_start = gum_alloc_n_pages (pages, GUM_PAGE_RW); pool_end = (guint8 *) pool_start + pool_size; pool = (GumPool *) pool_start; pool->block_size = size; block = (GumBlock *) ((guint8 *) pool_start + GUM_POOL_HEADER_SIZE); pool->free = block; do { next_block = (GumBlock *) ((guint8 *) block + aligned_block_size); if (next_block == pool_end) next_block = NULL; block->pool = pool; block->next = next_block; block = next_block; } while (next_block != NULL); pool->next = head; if (!g_atomic_pointer_compare_and_exchange (&pools, head, pool)) gum_free_pages (pool); } while (TRUE); }
static void test_arm64_relocator_fixture_setup (TestArm64RelocatorFixture * fixture, gconstpointer data) { fixture->output = (guint8 *) gum_alloc_n_pages (1, GUM_PAGE_RW); gum_arm64_writer_init (&fixture->aw, fixture->output); fixture->aw.pc = 1024; }
void gum_metal_array_init (GumMetalArray * array, guint element_size) { array->data = gum_alloc_n_pages (1, GUM_PAGE_RW); array->length = 0; array->capacity = gum_query_page_size () / element_size; array->element_size = element_size; }
static void gum_page_pool_constructed (GObject * object) { GumPagePool * self = GUM_PAGE_POOL (object); GumPagePoolPrivate * priv = GUM_PAGE_POOL_GET_PRIVATE (self); priv->available = priv->size; priv->pool = gum_alloc_n_pages (priv->size, GUM_PAGE_NO_ACCESS); priv->pool_end = priv->pool + (priv->size * priv->page_size); priv->block_details = gum_malloc0 (priv->size * sizeof (GumBlockDetails)); }
static guint8 * test_stalker_fixture_dup_code (TestStalkerFixture * fixture, const guint8 * tpl_code, guint tpl_size) { if (fixture->code != NULL) gum_free_pages (fixture->code); fixture->code = (guint8 *) gum_alloc_n_pages ( (tpl_size / gum_query_page_size ()) + 1, GUM_PAGE_RWX); memcpy (fixture->code, tpl_code, tpl_size); return fixture->code; }
static void test_memory_access_monitor_fixture_setup (TestMAMonitorFixture * fixture, gconstpointer data) { fixture->range.base_address = GUM_ADDRESS (gum_alloc_n_pages (2, GUM_PAGE_RWX)); fixture->range.size = 2 * gum_query_page_size (); fixture->offset_in_first_page = gum_query_page_size () / 2; fixture->offset_in_second_page = fixture->offset_in_first_page + gum_query_page_size (); *((guint8 *) fixture->range.base_address) = 0xc3; /* ret instruction */ fixture->nop_function_in_first_page = GUM_POINTER_TO_FUNCPTR (GCallback, fixture->range.base_address); fixture->number_of_notifies = 0; fixture->monitor = NULL; }
void lowlevel_helpers_init (void) { GumX86Writer cw; g_assert (clobber_test_function == NULL); clobber_test_function = GUM_POINTER_TO_FUNCPTR (ClobberTestFunc, gum_alloc_n_pages (1, GUM_PAGE_RWX)); gum_x86_writer_init (&cw, (gpointer) (gsize) clobber_test_function); gum_x86_writer_put_nop (&cw); gum_x86_writer_put_nop (&cw); gum_x86_writer_put_nop (&cw); gum_x86_writer_put_nop (&cw); gum_x86_writer_put_nop (&cw); gum_x86_writer_put_ret (&cw); gum_x86_writer_free (&cw); }
void gum_metal_array_ensure_capacity (GumMetalArray * self, guint capacity) { guint size_in_bytes, page_size, size_in_pages; gpointer new_data; if (self->capacity >= capacity) return; size_in_bytes = capacity * self->element_size; page_size = gum_query_page_size (); size_in_pages = size_in_bytes / page_size; if (size_in_bytes % page_size != 0) size_in_pages++; new_data = gum_alloc_n_pages (size_in_pages, GUM_PAGE_RW); gum_memcpy (new_data, self->data, self->length * self->element_size); gum_free_pages (self->data); self->data = new_data; self->capacity = (size_in_pages * page_size) / self->element_size; }
static GumCodeSlice * gum_code_allocator_try_alloc_batch_near (GumCodeAllocator * self, const GumAddressSpec * spec) { GumCodeSlice * result = NULL; gboolean rwx_supported, code_segment_supported; gsize page_size, size_in_pages, size_in_bytes; GumCodeSegment * segment; gpointer data; GumCodePages * pages; guint i; rwx_supported = gum_query_is_rwx_supported (); code_segment_supported = gum_code_segment_is_supported (); page_size = gum_query_page_size (); size_in_pages = self->pages_per_batch; size_in_bytes = size_in_pages * page_size; if (rwx_supported || !code_segment_supported) { GumPageProtection protection; GumMemoryRange range; protection = rwx_supported ? GUM_PAGE_RWX : GUM_PAGE_RW; segment = NULL; if (spec != NULL) { data = gum_try_alloc_n_pages_near (size_in_pages, protection, spec); if (data == NULL) return NULL; } else { data = gum_alloc_n_pages (size_in_pages, protection); } gum_query_page_allocation_range (data, size_in_bytes, &range); gum_cloak_add_range (&range); } else { segment = gum_code_segment_new (size_in_bytes, spec); if (segment == NULL) return NULL; data = gum_code_segment_get_address (segment); } pages = g_slice_alloc (self->pages_metadata_size); pages->ref_count = self->slices_per_batch; pages->segment = segment; pages->data = data; pages->size = size_in_bytes; pages->allocator = self; for (i = self->slices_per_batch; i != 0; i--) { guint slice_index = i - 1; GumCodeSliceElement * element = &pages->elements[slice_index]; GList * link; GumCodeSlice * slice; slice = &element->slice; slice->data = (guint8 *) data + (slice_index * self->slice_size); slice->size = self->slice_size; link = &element->parent; link->data = pages; link->prev = NULL; if (slice_index == 0) { link->next = NULL; result = slice; } else { if (self->free_slices != NULL) self->free_slices->prev = link; link->next = self->free_slices; self->free_slices = link; } } if (!rwx_supported) self->uncommitted_pages = g_slist_prepend (self->uncommitted_pages, pages); g_hash_table_add (self->dirty_pages, pages); return result; }