Пример #1
0
void*
public_mEMALIGn(size_t alignment, size_t bytes)
{
  struct malloc_arena* ar_ptr;
  void *p;

  void * (*hook) (size_t, size_t, const void *) = __memalign_hook;
  if (hook != NULL)
    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));

  /* If need less alignment than we give anyway, just relay to malloc */
  if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);

  /* Otherwise, ensure that it is at least a minimum chunk size */
  if (alignment <  MIN_CHUNK_SIZE)
    alignment = MIN_CHUNK_SIZE;

  arena_get(ar_ptr,
	    bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes);

  if (p && ar_ptr != &main_arena)
    set_non_main_arena(p, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!p || is_mmapped(mem2chunk(p)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(p)));
  return p;
}
Пример #2
0
int
internal_function
_dl_make_stack_executable (void **stack_endp)
{
  /* This gives us the highest/lowest page that needs to be changed.  */
  uintptr_t page = ((uintptr_t) *stack_endp
		    & -(intptr_t) GLRO(dl_pagesize));
  int result = 0;

  /* Challenge the caller.  */
  if (__builtin_expect (__check_caller (RETURN_ADDRESS (0),
					allow_ldso|allow_libpthread) != 0, 0)
      || __builtin_expect (*stack_endp != __libc_stack_end, 0))
    return EPERM;

  if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize),
				    __stack_prot) == 0, 1))
    goto return_success;
  result = errno;
  goto out;

 return_success:
  /* Clear the address.  */
  *stack_endp = NULL;

  /* Remember that we changed the permission.  */
  GL(dl_stack_flags) |= PF_X;

 out:
#ifdef check_consistency
  check_consistency ();
#endif

  return result;
}
Пример #3
0
void
mono_trace_enter_method (MonoMethod *method, char *ebp)
{
	int i, j;
	MonoClass *klass;
	MonoObject *o;
	MonoJitArgumentInfo *arg_info;
	MonoMethodSignature *sig;
	char *fname;
	MonoGenericSharingContext *gsctx = NULL;

	if (!trace_spec.enabled)
		return;

	while (output_lock != 0 || InterlockedCompareExchange (&output_lock, 1, 0) != 0)
		mono_thread_info_yield ();

	fname = mono_method_full_name (method, TRUE);
	indent (1);
	printf ("ENTER: %s(", fname);
	g_free (fname);

	if (!ebp) {
		printf (") ip: %p\n", RETURN_ADDRESS_N (1));
		goto unlock;
	}

	sig = mono_method_signature (method);

	arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));

	if (method->is_inflated) {
		/* FIXME: Might be better to pass the ji itself */
		MonoJitInfo *ji = mini_jit_info_table_find (mono_domain_get (), RETURN_ADDRESS (), NULL);
		if (ji) {
			gsctx = mono_jit_info_get_generic_sharing_context (ji);
			if (gsctx && gsctx->is_gsharedvt) {
				/* Needs a ctx to get precise method */
				printf (") <gsharedvt>\n");
				goto unlock;
			}
		}
	}

	mono_arch_get_argument_info (sig, sig->param_count, arg_info);

	if (MONO_TYPE_ISSTRUCT (mono_method_signature (method)->ret)) {
		g_assert (!mono_method_signature (method)->ret->byref);

		printf ("VALUERET:%p, ", *((gpointer *)(ebp + 8)));
	}

	if (mono_method_signature (method)->hasthis) {
		gpointer *this = (gpointer *)(ebp + arg_info [0].offset);
		if (method->klass->valuetype) {
			printf ("value:%p, ", *arg_in_stack_slot(this, gpointer *));
		} else {
Пример #4
0
/* Function: starter_main
 * Description: Called by start() in starter.S. Jumps to xmon_loader - xmon loader.
 *              This function never returns back.
 * Input: Registers pushed right to left:
 *        eip0 - return address on stack,
 *        pushal - eax, ecx, edx, ebx, esp, ebp, esi, edi
 *        pushfl - flags
 */
void starter_main(uint32_t eflags,
		  uint32_t edi,
		  uint32_t esi,
		  uint32_t ebp,
		  uint32_t esp,
		  uint32_t ebx,
		  uint32_t edx,
		  uint32_t ecx,
		  uint32_t eax,
		  uint32_t eip0)
{
	uint32_t eip1;
	xmon_desc_t *td;
	mon_guest_cpu_startup_state_t *s;

	eip1 = (uint32_t)RETURN_ADDRESS();
	td = (xmon_desc_t *)((eip1 & 0xffffff00) - 0x400);

	mon_memset((void *)GUEST1_BASE(td),
		0, XMON_LOADER_BASE(td) - GUEST1_BASE(td)
		);

	s = (mon_guest_cpu_startup_state_t *)GUEST1_BASE(td);
	s->gp.reg[IA32_REG_RIP] = eip0;
	s->gp.reg[IA32_REG_RFLAGS] = eflags;
	s->gp.reg[IA32_REG_RAX] = eax;
	s->gp.reg[IA32_REG_RCX] = ecx;
	s->gp.reg[IA32_REG_RDX] = edx;
	s->gp.reg[IA32_REG_RBX] = ebx;
	s->gp.reg[IA32_REG_RSP] = esp + 4;
	s->gp.reg[IA32_REG_RBP] = ebp;
	s->gp.reg[IA32_REG_RSI] = esi;
	s->gp.reg[IA32_REG_RDI] = edi;


	save_cpu_state(s);

	if (check_vmx_support() != 0) {
		goto error;
	}

	run_xmon_loader(td);

error:

	/* clean memory */

	mon_memset((void *)((uint32_t)td + td->xmon_loader_start * 512),
		0, XMON_LOADER_HEAP_BASE(td) + XMON_LOADER_HEAP_SIZE -
		(td->xmon_loader_start) * 512);

	while (1) {
	}
}
Пример #5
0
void*
public_rEALLOc(void* oldmem, size_t bytes)
{
  struct malloc_arena* ar_ptr;

  mchunkptr oldp;             /* chunk corresponding to oldmem */

  void* newp;             /* chunk to return */

  void * (*hook) (void *, size_t, const void *) = __realloc_hook;
  if (hook != NULL)
    return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));

#if REALLOC_ZERO_BYTES_FREES
  if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
#endif

  /* realloc of null is supposed to be same as malloc */
  if (oldmem == 0)
    return public_mALLOc(bytes);

  oldp    = mem2chunk(oldmem);
  if (is_mmapped(oldp))
    ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */
  else
    ar_ptr = arena_for_chunk(oldp);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif

#ifndef NO_THREADS
  /* As in malloc(), remember this arena for the next allocation. */
  tsd_setspecific(arena_key, (void *)ar_ptr);
#endif

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes);

  if (newp && ar_ptr != &main_arena)
    set_non_main_arena(newp, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);

  assert(!newp || is_mmapped(mem2chunk(newp)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(newp)));
  return newp;
}
Пример #6
0
void *
__dlvsym (void *handle, const char *name, const char *version_str)
{
  struct dlvsym_args args;

  args.handle = handle;
  args.name = name;
  args.who = RETURN_ADDRESS (0);
  args.version = version_str;

  return (_dlerror_run (dlvsym_doit, &args) ? NULL : args.sym);
}
Пример #7
0
/**
  Frees pool.

  @param  Buffer                 The allocated pool entry to free

  @retval EFI_INVALID_PARAMETER  Buffer is not a valid value.
  @retval EFI_SUCCESS            Pool successfully freed.

**/
EFI_STATUS
EFIAPI
CoreFreePool (
  IN VOID  *Buffer
  )
{
  EFI_STATUS  Status;

  Status = CoreInternalFreePool (Buffer);
  if (!EFI_ERROR (Status)) {
    CoreUpdateProfile ((EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0), MemoryProfileActionFreePool, 0, 0, Buffer);
  }
  return Status;
}
Пример #8
0
void*
public_cALLOc(size_t n_elements, size_t elem_size)
{
  struct malloc_arena* ar_ptr;
  size_t bytes, sz;
  void* mem;
  void * (*hook) (size_t, const void *) = __malloc_hook;

  /* size_t is unsigned so the behavior on overflow is defined.  */
  bytes = n_elements * elem_size;
#define HALF_INTERNAL_SIZE_T \
  (((size_t) 1) << (8 * sizeof (size_t) / 2))
  if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
    if (elem_size != 0 && bytes / elem_size != n_elements) {
      /*MALLOC_FAILURE_ACTION;*/
      return 0;
    }
  }

  if (hook != NULL) {
    sz = bytes;
    mem = (*hook)(sz, RETURN_ADDRESS (0));
    if(mem == 0)
      return 0;
#ifdef HAVE_MEMCPY
    return memset(mem, 0, sz);
#else
    while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
    return mem;
#endif
  }

  arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
  if(!ar_ptr)
    return 0;

  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1);

  if (mem && ar_ptr != &main_arena)
    set_non_main_arena(mem, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);
  
  assert(!mem || is_mmapped(mem2chunk(mem)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(mem)));

  return mem;
}
Пример #9
0
/**
  Frees previous allocated pages.

  @param  Memory                 Base address of memory being freed.
  @param  NumberOfPages          The number of pages to free.

  @retval EFI_NOT_FOUND          Could not find the entry that covers the range.
  @retval EFI_INVALID_PARAMETER  Address not aligned.
  @return EFI_SUCCESS            Pages successfully freed.

**/
EFI_STATUS
EFIAPI
SmmFreePages (
  IN EFI_PHYSICAL_ADDRESS  Memory,
  IN UINTN                 NumberOfPages
  )
{
  EFI_STATUS  Status;

  Status = SmmInternalFreePages (Memory, NumberOfPages);
  if (!EFI_ERROR (Status)) {
    SmmCoreUpdateProfile ((EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0), MemoryProfileActionFreePages, 0, EFI_PAGES_TO_SIZE (NumberOfPages), (VOID *) (UINTN) Memory);
  }
  return Status;
}
Пример #10
0
/**
  Allocate pool of a particular type.

  @param  PoolType               Type of pool to allocate
  @param  Size                   The amount of pool to allocate
  @param  Buffer                 The address to return a pointer to the allocated
                                 pool

  @retval EFI_INVALID_PARAMETER  PoolType not valid or Buffer is NULL.
  @retval EFI_OUT_OF_RESOURCES   Size exceeds max pool size or allocation failed.
  @retval EFI_SUCCESS            Pool successfully allocated.

**/
EFI_STATUS
EFIAPI
CoreAllocatePool (
  IN EFI_MEMORY_TYPE  PoolType,
  IN UINTN            Size,
  OUT VOID            **Buffer
  )
{
  EFI_STATUS  Status;

  Status = CoreInternalAllocatePool (PoolType, Size, Buffer);
  if (!EFI_ERROR (Status)) {
    CoreUpdateProfile ((EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0), MemoryProfileActionAllocatePool, PoolType, Size, *Buffer);
  }
  return Status;
}
Пример #11
0
/**
  Allocates pages from the memory map.

  @param  Type                   The type of allocation to perform.
  @param  MemoryType             The type of memory to turn the allocated pages
                                 into.
  @param  NumberOfPages          The number of pages to allocate.
  @param  Memory                 A pointer to receive the base allocated memory
                                 address.

  @retval EFI_INVALID_PARAMETER  Parameters violate checking rules defined in spec.
  @retval EFI_NOT_FOUND          Could not allocate pages match the requirement.
  @retval EFI_OUT_OF_RESOURCES   No enough pages to allocate.
  @retval EFI_SUCCESS            Pages successfully allocated.

**/
EFI_STATUS
EFIAPI
SmmAllocatePages (
  IN  EFI_ALLOCATE_TYPE     Type,
  IN  EFI_MEMORY_TYPE       MemoryType,
  IN  UINTN                 NumberOfPages,
  OUT EFI_PHYSICAL_ADDRESS  *Memory
  )
{
  EFI_STATUS  Status;

  Status = SmmInternalAllocatePages (Type, MemoryType, NumberOfPages, Memory);
  if (!EFI_ERROR (Status)) {
    SmmCoreUpdateProfile ((EFI_PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS (0), MemoryProfileActionAllocatePages, MemoryType, EFI_PAGES_TO_SIZE (NumberOfPages), (VOID *) (UINTN) *Memory);
  }
  return Status;
}
Пример #12
0
void free (void *ptr)
{
  void *caller;
  if (no_hook) {
    _do_free(ptr, freep);
    goto _return;
  }
  no_hook = 1;
  caller = RETURN_ADDRESS(0);
  if (g_tasklet_info) MEM_ALLOC_PRINTF("%p-%p free(%p", caller, g_tasklet_info, ptr);
  _do_free(ptr, freep);
  if (g_tasklet_info) MEM_ALLOC_PRINTF(") -> \n");
  no_hook = 0;

_return:
  return;
  caller = caller;
}
Пример #13
0
void *memalign(size_t len, size_t size)
{
  void *ret;
  void *caller;
  if (no_hook) {
    ret = (*memalignp)(len, size);
    goto _return;
  }
  no_hook = 1;
  caller = RETURN_ADDRESS(0);
  if (g_tasklet_info) MEM_ALLOC_PRINTF("%p-%p memalign(%zu, %zu", caller, g_tasklet_info, len, size);
  ret = (*memalignp)(len, size);
  if (g_tasklet_info) MEM_ALLOC_PRINTF(") -> %p\n", ret);
  no_hook = 0;

_return:
  return ret;
  caller = caller;
}
EFIAPI
AllocateRuntimeZeroPool (
  IN UINTN  AllocationSize
  )
{
  VOID  *Buffer;

  Buffer = InternalAllocateZeroPool (EfiRuntimeServicesData, AllocationSize);
  if (Buffer != NULL) {
    MemoryProfileLibRecord (
      (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0),
      MEMORY_PROFILE_ACTION_LIB_ALLOCATE_RUNTIME_ZERO_POOL,
      EfiRuntimeServicesData,
      Buffer,
      AllocationSize,
      NULL
      );
  }
  return Buffer;
}
EFIAPI
AllocateReservedZeroPool (
  IN UINTN  AllocationSize
  )
{
  VOID  *Buffer;

  Buffer = InternalAllocateZeroPool (EfiReservedMemoryType, AllocationSize);
  if (Buffer != NULL) {
    MemoryProfileLibRecord (
      (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0),
      MEMORY_PROFILE_ACTION_LIB_ALLOCATE_RESERVED_ZERO_POOL,
      EfiReservedMemoryType,
      Buffer,
      AllocationSize,
      NULL
      );
  }
  return Buffer;
}
EFIAPI
AllocatePages (
  IN UINTN  Pages
  )
{
  VOID  *Buffer;

  Buffer = InternalAllocatePages (EfiBootServicesData, Pages);
  if (Buffer != NULL) {
    MemoryProfileLibRecord (
      (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0),
      MEMORY_PROFILE_ACTION_LIB_ALLOCATE_PAGES,
      EfiBootServicesData,
      Buffer,
      EFI_PAGES_TO_SIZE (Pages),
      NULL
      );
  }
  return Buffer;
}
EFIAPI
AllocateReservedCopyPool (
  IN UINTN       AllocationSize,
  IN CONST VOID  *Buffer
  )
{
  VOID  *NewBuffer;

  NewBuffer = InternalAllocateCopyPool (EfiReservedMemoryType, AllocationSize, Buffer);
  if (NewBuffer != NULL) {
    MemoryProfileLibRecord (
      (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0),
      MEMORY_PROFILE_ACTION_LIB_ALLOCATE_RESERVED_COPY_POOL,
      EfiRuntimeServicesData,
      NewBuffer,
      AllocationSize,
      NULL
      );
  }
  return NewBuffer;
}
EFIAPI
AllocateAlignedReservedPages (
  IN UINTN  Pages,
  IN UINTN  Alignment
  )
{
  VOID  *Buffer;

  Buffer = InternalAllocateAlignedPages (EfiReservedMemoryType, Pages, Alignment);
  if (Buffer != NULL) {
    MemoryProfileLibRecord (
      (PHYSICAL_ADDRESS) (UINTN) RETURN_ADDRESS(0),
      MEMORY_PROFILE_ACTION_LIB_ALLOCATE_ALIGNED_RESERVED_PAGES,
      EfiReservedMemoryType,
      Buffer,
      EFI_PAGES_TO_SIZE (Pages),
      NULL
      );
  }
  return Buffer;
}
Пример #19
0
void*
public_mALLOc(size_t bytes)
{
  struct malloc_arena* ar_ptr;
  void *victim;

  void * (*hook) (size_t, const void *) = __malloc_hook;
  if (hook != NULL)
    return (*hook)(bytes, RETURN_ADDRESS (0));

  arena_get(ar_ptr, bytes + FOOTER_OVERHEAD);
  if (!ar_ptr)
    return 0;
  if (ar_ptr != &main_arena)
    bytes += FOOTER_OVERHEAD;
  victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes);
  if (victim && ar_ptr != &main_arena)
    set_non_main_arena(victim, ar_ptr);
  (void)mutex_unlock(&ar_ptr->mutex);
  assert(!victim || is_mmapped(mem2chunk(victim)) ||
	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
  return victim;
}
Пример #20
0
void
public_fREe(void* mem)
{
  struct malloc_arena* ar_ptr;
  mchunkptr p;                          /* chunk corresponding to mem */

  void (*hook) (void *, const void *) = __free_hook;
  if (hook != NULL) {
    (*hook)(mem, RETURN_ADDRESS (0));
    return;
  }

  if (mem == 0)                              /* free(0) has no effect */
    return;

  p = mem2chunk(mem);

  if (is_mmapped(p)) {                      /* release mmapped memory. */
    ar_ptr = arena_for_mmap_chunk(p);
    munmap_chunk(arena_to_mspace(ar_ptr), p);
    return;
  }

  ar_ptr = arena_for_chunk(p);
#if THREAD_STATS
  if(!mutex_trylock(&ar_ptr->mutex))
    ++(ar_ptr->stat_lock_direct);
  else {
    (void)mutex_lock(&ar_ptr->mutex);
    ++(ar_ptr->stat_lock_wait);
  }
#else
  (void)mutex_lock(&ar_ptr->mutex);
#endif
  mspace_free(arena_to_mspace(ar_ptr), mem);
  (void)mutex_unlock(&ar_ptr->mutex);
}
Пример #21
0
int
internal_function
_dl_make_stack_executable (void **stack_endp)
{
  /* This gives us the highest/lowest page that needs to be changed.  */
  uintptr_t page = ((uintptr_t) *stack_endp
		    & -(intptr_t) GLRO(dl_pagesize));
  int result = 0;

  /* Challenge the caller.  */
  if (__builtin_expect (__check_caller (RETURN_ADDRESS (0),
					allow_ldso|allow_libpthread) != 0, 0)
      || __builtin_expect (*stack_endp != __libc_stack_end, 0))
    return EPERM;

  /* Newer Linux kernels support a flag to make our job easy.  */
#if defined  PROT_GROWSDOWN || defined PROT_GROWSUP
# if __ASSUME_PROT_GROWSUPDOWN == 0
  static bool no_growsupdown;
  if (! no_growsupdown)
# endif
    {
      if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize),
					__stack_prot) == 0, 1))
	goto return_success;
# if __ASSUME_PROT_GROWSUPDOWN == 0
      if (errno == EINVAL)
	no_growsupdown = true;
      else
# endif
	{
	  result = errno;
	  goto out;
	}
    }
#endif

  /* There is always a hole in the address space below the bottom of the
     stack.  So when we make an mprotect call that starts below the bottom
     of the stack, it will include the hole and fail with ENOMEM.

     We start with a random guess at how deep the stack might have gotten
     so as to have extended the GROWSDOWN mapping to lower pages.  */

#if __ASSUME_PROT_GROWSUPDOWN == 0
  size_t size = GLRO(dl_pagesize) * 8;

# if _STACK_GROWS_DOWN
  page = page + GLRO(dl_pagesize) - size;
  while (1)
    {
      if (__mprotect ((void *) page, size,
		      __stack_prot & ~PROT_GROWSDOWN) == 0)
	/* We got this chunk changed; loop to do another chunk below.  */
	page -= size;
      else
	{
	  if (errno != ENOMEM)	/* Unexpected failure mode.  */
	    {
	      result = errno;
	      goto out;
	    }

	  if (size == GLRO(dl_pagesize))
	    /* We just tried to mprotect the top hole page and failed.
	       We are done.  */
	    break;

	  /* Our mprotect call failed because it started below the lowest
	     stack page.  Try again on just the top half of that region.  */
	  size /= 2;
	  page += size;
	}
    }

# elif _STACK_GROWS_UP
  while (1)
    {
      if (__mprotect ((void *) page, size, __stack_prot & ~PROT_GROWSUP) == 0)
	/* We got this chunk changed; loop to do another chunk below.  */
	page += size;
      else
	{
	  if (errno != ENOMEM)	/* Unexpected failure mode.  */
	    {
	      result = errno;
	      goto out;
	    }

	  if (size == GLRO(dl_pagesize))
	    /* We just tried to mprotect the lowest hole page and failed.
	       We are done.  */
	    break;

	  /* Our mprotect call failed because it extended past the highest
	     stack page.  Try again on just the bottom half of that region.  */
	  size /= 2;
	}
    }

# else
#  error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
# endif
#endif

 return_success:
  /* Clear the address.  */
  *stack_endp = NULL;

  /* Remember that we changed the permission.  */
  GL(dl_stack_flags) |= PF_X;

 out:
#ifdef check_consistency
  check_consistency ();
#endif

  return result;
}
Пример #22
0
SAMPGDK_API(bool, sampgdk_Load(void **ppData)) {
  void *plugin = sampgdk_plugin_get_handle(RETURN_ADDRESS());
  return init_plugin(plugin, ppData) >= 0;
}
Пример #23
0
SAMPGDK_API(void, sampgdk_Unload(void)) {
  void *plugin = sampgdk_plugin_get_handle(RETURN_ADDRESS());
  cleanup_plugin(plugin);
}
Пример #24
0
SAMPGDK_API(void, sampgdk_ProcessTick(void)) {
  void *plugin = sampgdk_plugin_get_handle(RETURN_ADDRESS());
  sampgdk_timer_process_timers(plugin);
}
Пример #25
0
void *
dlsym (void *handle, const char *name)
{
  return __dlsym (handle, name, RETURN_ADDRESS (0));
}
Пример #26
0
void
_dl_mcount_wrapper_check (void *selfpc)
{
  if (GL(dl_profile_map) != NULL)
    GLRO(dl_mcount) ((ElfW(Addr)) RETURN_ADDRESS (0), (ElfW(Addr)) selfpc);
}
Пример #27
0
void
_dl_mcount_wrapper (void *selfpc)
{
  GLRO(dl_mcount) ((ElfW(Addr)) RETURN_ADDRESS (0), (ElfW(Addr)) selfpc);
}
Пример #28
0
void
_dl_mcount_wrapper_check (void *selfpc)
{
  if (_dl_profile_map != NULL)
    _dl_mcount ((ElfW(Addr)) RETURN_ADDRESS (0), (ElfW(Addr)) selfpc);
}
Пример #29
0
weak_function
dlvsym (void *handle, const char *name, const char *version_str)
{
  return __dlvsym (handle, name, version_str, RETURN_ADDRESS (0));
}
Пример #30
0
/* MME_AllocDataBuffer()
 * Allocate a data buffer that is optimal for the transformer instantiation
 * to pass between a host and companion
 */
MME_ERROR MME_AllocDataBuffer (MME_TransformerHandle_t handle, MME_UINT size,
			       MME_AllocationFlags_t flags, MME_DataBuffer_t **dataBufferp) 
{
  MME_ERROR     res;
  ICS_MEM_FLAGS mflags;
  
  mme_transformer_t *transformer;
  mme_buffer_t      *buf;
  
  MME_AllocationFlags_t illegalFlags = ~(MME_ALLOCATION_PHYSICAL | MME_ALLOCATION_CACHED | MME_ALLOCATION_UNCACHED);
  
  if (!mme_state)
    return MME_DRIVER_NOT_INITIALIZED;

  MME_PRINTF(MME_DBG_BUFFER, "handle 0x%x size %d flags 0x%x dataBufferp %p\n",
	     handle, size, flags, dataBufferp);

  /* Validate parameters */
  if (size == 0 || dataBufferp == NULL || (flags & illegalFlags))
  {
    return MME_INVALID_ARGUMENT;
  }

  /* Disallow very large allocations (size becomes -ve) */
  if (0 > (int) size)
  {
    return MME_NOMEM;
  }

  /* Validate transformer handle */
  if (handle == 0)
  {
    return MME_INVALID_HANDLE;
  }

  /* Lookup the transformer instance (takes lock on success) */
  transformer = mme_transformer_instance(handle);
  if (transformer == NULL)
  {
    return MME_INVALID_HANDLE;
  }

  /* Don't use the transformer so drop the lock */
  _ICS_OS_MUTEX_RELEASE(&transformer->tlock);

  /* Allocate local buffer descriptor */
  _ICS_OS_ZALLOC(buf, sizeof(*buf));

  /* Fill out MME_DataBuffer_t struct */
  buf->buffer.StructSize           = sizeof(MME_DataBuffer_t);
  buf->buffer.NumberOfScatterPages = 1;
  buf->buffer.ScatterPages_p       = buf->pages;
  buf->buffer.TotalSize            = size;

  buf->flags                       = flags;
  buf->pages[0].Size               = size;

  /* DEBUG: Stash owner of buffer */
  buf->owner                       = RETURN_ADDRESS(0);

  /* Translate the MME buffer allocation flags */
  if (flags & MME_ALLOCATION_CACHED)
    mflags = ICS_CACHED;
  else if (flags & MME_ALLOCATION_UNCACHED)
    mflags = ICS_UNCACHED;
  else
    /* Set default/affinity memory allocation flags if non supplied */
    mflags = _MME_BUF_CACHE_FLAGS;
  
  MME_ASSERT(mflags);

  /* Now allocate the Companion mapped memory from the ICS heap */
  buf->pages[0].Page_p = ics_heap_alloc(mme_state->heap, size, mflags);
  if (buf->pages[0].Page_p == NULL)
  {
    MME_EPRINTF(MME_DBG_BUFFER, "Failed to allocate %d bytes mflags 0x%x from ICS heap %p\n",
		size, mflags, mme_state->heap);

    res = MME_NOMEM;
    goto error_free;
  }
  
  MME_PRINTF(MME_DBG_BUFFER, "Successfully allocated buf %p size %d Page_p %p (%s)\n",
	     buf, size, buf->pages[0].Page_p,
	     (mflags & ICS_CACHED) ? "CACHED" : "UNCACHED");
    
  /* Return MME_DataBuffer_t pointer to caller */
  *dataBufferp = &buf->buffer;

  return MME_SUCCESS;

error_free:
  _ICS_OS_FREE(buf);
  
  return res;
}