void* public_mEMALIGn(size_t alignment, size_t bytes) { struct malloc_arena* ar_ptr; void *p; void * (*hook) (size_t, size_t, const void *) = __memalign_hook; if (hook != NULL) return (*hook)(alignment, bytes, RETURN_ADDRESS (0)); /* If need less alignment than we give anyway, just relay to malloc */ if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes); /* Otherwise, ensure that it is at least a minimum chunk size */ if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE); if(!ar_ptr) return 0; if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes); if (p && ar_ptr != &main_arena) set_non_main_arena(p, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!p || is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); return p; }
void* extmem_malloc_page_align(size_t bytes) { void *mem = NULL; extmem_init(); mem = mspace_memalign(extmem_mspace, 1<<PAGE_SHIFT, bytes); extmem_printk("[EXT_MEM] %s mem:%p, size: 0x%zx\n", __FUNCTION__, mem, bytes); return mem; }
void * NONNULL(1) MALLOC mm_shared_space_aligned_alloc(struct mm_shared_space *space, size_t align, size_t size) { mm_common_lock(&space->lock); void *ptr = mspace_memalign(space->space.opaque, align, size); mm_common_unlock(&space->lock); return ptr; }
void* palHeapAllocator::Allocate(uint64_t size, uint32_t alignment) { void* ptr = mspace_memalign(internal_, alignment, (size_t)size); if (ptr) { uint32_t reported_size = mspace_usable_size(ptr); ReportMemoryAllocation(ptr, reported_size); } return ptr; }
void* __gm_memalign(size_t blocksize, size_t bytes) { assert(GM); assert(GM->mspace_ptr); futex_lock(&GM->lock); void* ptr = mspace_memalign(GM->mspace_ptr, blocksize, bytes); futex_unlock(&GM->lock); if (!ptr) panic("gm_memalign(): Out of global heap memory, use a larger GM segment"); return ptr; }
void * Impl_malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { OSMemoryZone* osm = (OSMemoryZone*)zone; if (osm->global_zone) { return memalign(alignment, size); } else { return mspace_memalign(osm->memory_space, alignment, size); } }
/** * allocate memory of SIZE bytes, aligning to ALIGNMENT */ void * shmemi_mem_align (size_t alignment, size_t size) { void *aligned_addr = mspace_memalign (myspace, alignment, size); #ifdef HAVE_FEATURE_DEBUG debug_alloc_add (aligned_addr, size); #endif /* HAVE_FEATURE_DEBUG */ return aligned_addr; }
static void* memalign_starter(size_t align, size_t sz, const void *caller) { void* victim; /*ptmalloc_init_minimal();*/ victim = mspace_memalign(arena_to_mspace(&main_arena), align, sz); THREAD_STAT(++main_arena.stat_starter); return victim; }
void* texAlloc(size_t size, size_t alignment) { void *p; p = mspace_memalign(tex_msp, alignment, size); if (p == 0) { printf("out of texture memory %d\n", size); while (p == 0) { svcSleepThread(20000); } } return p; }
int hbw_posix_memalign(void **memptr, size_t alignment, size_t size) { if (myhbwmalloc_mspace == NULL) { if (!myhbwmalloc_hardfail) { fprintf(stderr, "hbwmalloc: mspace invalid - allocating from default heap\n"); return posix_memalign(memptr, alignment, size); } else { fprintf(stderr, "hbwmalloc: mspace invalid - cannot allocate from hbw heap\n"); abort(); } } *memptr = mspace_memalign(myhbwmalloc_mspace, alignment, size); return (*memptr == NULL) ? -1 : 0; }
void* memory_memalign(size_t alignment, size_t bytes) { void* mem = NULL; int i = 0; if (!use_allocator) { memory_check_limits(bytes, 0); MALLOC_ALIGN(mem, bytes, alignment); return mem; } while (mem == NULL) { memory_t* memptr = &memory_table[i]; /* First try to allocate in one of the existing chunks */ if (memptr->in_use == 1) if ((mem = mspace_memalign(memptr->shm_mspace, alignment, bytes)) != NULL) break; /* Create a new chunck if already past the last valid chunk */ if (i > memory_table_last) { memptr = memory_expand(bytes); if ((mem = mspace_memalign(memptr->shm_mspace, alignment, bytes)) != NULL) break; else ERROR("EPLIB memalign failed to allocate %ld bytes\n", bytes); } i++; } DEBUG_ASSERT(mem); MAKE_BOUNDS(mem, bytes); return mem; }
void* OOBase::ArenaAllocator::reallocate(void* ptr, size_t bytes, size_t align) { if (align <= 8) return mspace_realloc(m_mspace,ptr,bytes); if (mspace_realloc_in_place(m_mspace,ptr,bytes)) return ptr; void* new_ptr = mspace_memalign(m_mspace,align,bytes); if (new_ptr) { memcpy(new_ptr,ptr,mspace_usable_size(ptr)); mspace_free(m_mspace,ptr); } return new_ptr; }
void* public_vALLOc(size_t bytes) { struct malloc_arena* ar_ptr; void *p; if(__malloc_initialized < 0) ptmalloc_init (); arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + MIN_CHUNK_SIZE); if(!ar_ptr) return 0; if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; p = mspace_memalign(arena_to_mspace(ar_ptr), 4096, bytes); if (p && ar_ptr != &main_arena) set_non_main_arena(p, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); return p; }
void * NONNULL(1) MALLOC mm_private_space_aligned_alloc(struct mm_private_space *space, size_t align, size_t size) { return mspace_memalign(space->space.opaque, align, size); }
void* extmem_malloc_page_align(size_t bytes) { extmem_init(); extmem_printk("[EXT_MEM] %s size: 0x%x\n", __FUNCTION__, bytes); return mspace_memalign(extmem_mspace, 1<<PAGE_SHIFT, bytes); }
void* memalign(size_t alignment, size_t bytes) { if(unlikely(sm_mspace == NULL)) __sm_init(); return mspace_memalign(sm_mspace, alignment, bytes); }
s32 wad_install(FIL *fil) { u16 i; u32 br; struct wadheader hdr __attribute__((aligned(32))); f_lseek(fil, 0); ASSERT(!f_read(fil, &hdr, sizeof(hdr), &br)); ASSERT(br == sizeof(hdr)); u32 offset = 0; static u8 key[16] __attribute__((aligned(64))) = {0,}; static u8 iv[16] __attribute__((aligned(64))) = {0,}; ASSERT(hdr.hdr_size == 0x20); offset += ALIGN(hdr.hdr_size, 0x40); offset += ALIGN(hdr.certs_size, 0x40); struct tik *tik = memalign(32, hdr.tik_size); ASSERT(tik); f_lseek(fil, offset); ASSERT(!f_read(fil, tik, hdr.tik_size, &br)); ASSERT(br == hdr.tik_size); offset += ALIGN(hdr.tik_size, 0x40); struct tmd *tmd = memalign(32, hdr.tmd_size); ASSERT(tmd); f_lseek(fil, offset); ASSERT(!f_read(fil, tmd, hdr.tmd_size, &br)); ASSERT(br == hdr.tmd_size); offset += ALIGN(hdr.tmd_size, 0x40); hexdump(tik, hdr.tik_size); hexdump(tmd, hdr.tmd_size); otp_init(); // Get the title key aes_reset(); aes_set_key(otp.common_key); memcpy(iv, &tik->title_id, 8); aes_set_iv(iv); printf("common:\n"); hexdump(otp.common_key, 16); printf("iv:\n"); hexdump(iv, 16); printf("ctk:\n"); hexdump(tik->cipher_title_key, 16); memcpy(iv, tik->cipher_title_key, 16); aes_decrypt(iv, key, 1, 0); memset(iv, 0, 16); printf("title key:\n"); hexdump(key, 16); printf("es_addtitle: %d\n", es_addtitle(tmd, tik)); printf("num contents: %d\n", tmd->num_contents); printf("%08x %08x\n", tmd, tik); for(i = 0; i < tmd->num_contents; i++) { #ifdef MSPACES u8 *content = mspace_memalign(mem2space, 32, ALIGN(tmd->contents[i].size, 0x40)); #else u8 *content = (void *)0x91000000;//memalign(32, ALIGN(tmd->contents[i].size, 0x40)); #endif ASSERT(content); f_lseek(fil, offset); printf("read content --> %08x size %08x", content, (u32) tmd->contents[i].size); ASSERT(!f_read(fil, content, ALIGN(tmd->contents[i].size, 0x40), &br)); printf("done\n"); ASSERT(br == ALIGN(tmd->contents[i].size, 0x40)); memcpy(iv, &tmd->contents[i].index, 2); aes_reset(); aes_set_key(key); aes_set_iv(iv); printf("an IV:\n"); hexdump(iv, 16); aes_decrypt(content, content, ALIGN(tmd->contents[i].size, 0x40) / 16, 0); printf("decrypted "); ASSERT(!es_addtitlecontent(tmd, tmd->contents[i].index, content, tmd->contents[i].size)); offset += ALIGN(tmd->contents[i].size, 0x40); #ifdef MSPACES mspace_free(mem2space, content); #else //free(content); #endif } free(tmd); free(tik); return 0; }
void* OOBase::ArenaAllocator::allocate(size_t bytes, size_t align) { return mspace_memalign(m_mspace,align,bytes); }
void* PoolAllocator::Allocate(size_t size, size_t alignment) { void* p = mspace_memalign(m_mspace, alignment, size); SI_ASSERT(p != nullptr); return p; }