/** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b> * bytes. <b>sz</b> should be significantly smaller than the area's chunk * size, though we can deal if it isn't. */ void * memarea_alloc(memarea_t *area, size_t sz) { memarea_chunk_t *chunk = area->first; char *result; tor_assert(chunk); if (sz == 0) sz = 1; if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0); new_chunk->next_chunk = chunk->next_chunk; chunk->next_chunk = new_chunk; chunk = new_chunk; } else { memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1); new_chunk->next_chunk = chunk; area->first = chunk = new_chunk; } tor_assert(chunk->mem_size >= sz); } result = chunk->next_mem; chunk->next_mem = chunk->next_mem + sz; // XXXX021 remove these once bug 930 is solved. tor_assert(chunk->next_mem >= chunk->u.mem); tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size); chunk->next_mem = realign_pointer(chunk->next_mem); return result; }
static void * alloc_do_malloc (struct Alloc *alloc, uint32_t size) { if (size < (alloc->default_mmap_size - chunk_overhead ())) { uint8_t bucket = size_to_bucket (size); if (alloc->buckets[bucket] != 0) { // fast path. struct AllocAvailable *avail = alloc->buckets[bucket]; MARK_DEFINED (avail, sizeof (void *)); struct AllocAvailable *next = avail->next; MARK_UNDEFINED (avail, sizeof (void *)); alloc->buckets[bucket] = next; REPORT_MALLOC (avail, size); return (uint8_t *) avail; } // slow path struct AllocAvailable *avail = (struct AllocAvailable *) alloc_brk (alloc, bucket_to_size (bucket)); REPORT_MALLOC (avail, size); avail->next = 0; return (uint8_t *) avail; } else { alloc_chunk (alloc, size + chunk_overhead ()); uint8_t *buffer = alloc_brk (alloc, size); REPORT_MALLOC (buffer, size); return buffer; } }
void* kmalloc(uint32_t len) { len += sizeof(kheader_t); kheader_t *cur_header = kheap_first, *prev_header = 0; while (cur_header) { if (cur_header->allocated == 0 && cur_header->length >= len) { split_chunk(cur_header, len); cur_header->allocated = 1; return (void*) ((uint32_t) cur_header + sizeof(kheader_t)); } prev_header = cur_header; cur_header = cur_header->next; } uint32_t chunk_start; if (prev_header) chunk_start = (uint32_t) prev_header + prev_header->length; else { chunk_start = KHEAP_START; kheap_first = (kheader_t *) chunk_start; } alloc_chunk(chunk_start, len); cur_header = (kheader_t *) chunk_start; cur_header->prev = prev_header; cur_header->next = 0; cur_header->allocated = 1; cur_header->length = len; prev_header->next = cur_header; return (void*) (chunk_start + sizeof(kheader_t)); }
/** Allocate and return new memarea. */ memarea_t * memarea_new(void) { memarea_t *head = tor_malloc(sizeof(memarea_t)); head->first = alloc_chunk(CHUNK_SIZE, 1); return head; }
int git_revwalk_new(git_revwalk **revwalk_out, git_repository *repo) { git_revwalk *walk; walk = git__malloc(sizeof(git_revwalk)); if (walk == NULL) return GIT_ENOMEM; memset(walk, 0x0, sizeof(git_revwalk)); walk->commits = git_hashtable_alloc(64, object_table_hash, (git_hash_keyeq_ptr)git_oid_cmp); if (walk->commits == NULL) { free(walk); return GIT_ENOMEM; } git_pqueue_init(&walk->iterator_time, 8, commit_time_cmp); git_vector_init(&walk->memory_alloc, 8, NULL); alloc_chunk(walk); walk->get_next = &revwalk_next_unsorted; walk->enqueue = &revwalk_enqueue_unsorted; walk->repo = repo; *revwalk_out = walk; return GIT_SUCCESS; }
void* al_malloc(size_t bytes) { /* * the allocator must has been initialized * bytes must > 0 */ void* ret; assert(bytes > 0); if (bytes > MAX_BYTES) { ret = malloc(bytes + PREFIX_SIZE); *(size_t*)ret = NFREELISTS; ret = (byte_t*)ret + PREFIX_SIZE; } else { struct allocator_t* self = &_s_allocator; size_t index = freelist_index(bytes); spinlock_lock(&self->spinlock); if (NULL == self->free_list[index]) alloc_chunk(self, index); ret = (byte_t*)self->free_list[index] + PREFIX_SIZE; self->free_list[index] = self->free_list[index]->next; spinlock_unlock(&self->spinlock); } return ret; }
address OsMemory_allocate_chunk(size_t initial_size, size_t max_size, size_t alignment) { // make it page aligned max_size = page_align_up(max_size); address chunk = anon_mmap(NULL, max_size); if (chunk == MAP_FAILED) { return NULL; } GUARANTEE((juint)chunk % alignment == 0, "must be aligned"); GUARANTEE((juint)chunk % SysPageSize == 0, "must be page aligned"); size_t aligned_size = page_align_up(initial_size); alloc_chunk(chunk, aligned_size, max_size); if (max_size > aligned_size) { protect_area(chunk + aligned_size, max_size - aligned_size); } return chunk; }
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size) { sljit_uw *ptr = (sljit_uw *)alloc_chunk(size + sizeof (sljit_uw)); *ptr = size; return (void*)(ptr + 1); }
static void monte_run(void) { print_chunk("Seed", seed, 0); chunk_t MDi_3 = alloc_chunk(seed.len, "MDi_3"); chunk_t MDi_2 = alloc_chunk(seed.len, "MDi_2"); chunk_t MDi_1 = alloc_chunk(seed.len, "MDi_1"); chunk_t Mi = alloc_chunk(3 * seed.len, "Mi"); for (int j = 0; j < 100; j++) { //MD[0] = MD[1] = MD[2] = Seed memcpy(MDi_3.ptr, seed.ptr, seed.len); memcpy(MDi_2.ptr, seed.ptr, seed.len); memcpy(MDi_1.ptr, seed.ptr, seed.len); for (int i = 3; i < 1003; i++) { // shuffle chunk_t tmp = MDi_3; MDi_3 = MDi_2; MDi_2 = MDi_1; MDi_1 = seed; seed = tmp; // M[i] = MD[i-3] || MD[i-2] || MD[i-1]; memcpy(Mi.ptr + seed.len * 0, MDi_3.ptr, seed.len); memcpy(Mi.ptr + seed.len * 1, MDi_2.ptr, seed.len); memcpy(Mi.ptr + seed.len * 2, MDi_1.ptr, seed.len); // MDi = SHA(Mi); struct hash_context *hash = hash_alg->hash_ops->init(hash_alg, "sha", DBG_CRYPT); hash_alg->hash_ops->digest_bytes(hash, "msg", Mi.ptr, Mi.len); hash_alg->hash_ops->final_bytes(&hash, seed.ptr, seed.len); // printf("%d ", i); // print_chunk("MDi", seed, 0); } print_line(""); print_number("COUNT", j); // MDj = Seed = MD1002; // OUTPUT: MDj; (aka seed) print_chunk("MD", seed, 0); } freeanychunk(MDi_3); freeanychunk(MDi_2); freeanychunk(MDi_1); freeanychunk(Mi); print_line(""); exit(0); }
//还是理解一下内核中的堆栈的到底是怎么一个概念, void *kmalloc(uint32 len) { //申请的内存必须加上头指针,用于管理内存的空间 len += sizeof(heap_t); heap_t * cur_head = heap_first; heap_t * prev_head = 0; //第一次运行的时候为0,然后运行的时候,先判断已经使用过的内存中 //有没有能用的,如果有能用的,则不用申请其他的内存使用 while (cur_head) { if (cur_head->allocate == 0 && cur_head->length > len) { //切分len的长度的内存出来 split_chunk(cur_head, len); //将这个内存头指针标示为1,标示已经使用 cur_head->allocate = 1; /* *这个目前不是很理解 */ return (void *) ((uint32)cur_head + sizeof(heap_t)); } prev_head = cur_head; cur_head = cur_head->next; } uint32 chunk_start; //第一次执行,会执行else后面的代码,然后第二次初始化后执行正确的 if (prev_head) { chunk_start = (uint32)prev_head + prev_head->length; } else { chunk_start = HEAP_START; heap_first = (heap_t *)chunk_start; } //就是把这个线性地址和物理地址形成映射 alloc_chunk(chunk_start, len); //描述找个内存块的相关信息 cur_head = (heap_t *) chunk_start; //连接到上一个已经藐视内存的结构体 cur_head->prev = prev_head; cur_head->next = 0; cur_head->allocate = 1; cur_head->length = len; //将两个用过的内存块链接在一起 if (prev_head) { prev_head->next = cur_head; } //申请的空间头部有heap_t这个结构体,只是描述这个内存块的信息 //所以返回的地址必须是除去这个结构体的,由于内存向上增加 return (void *)(chunk_start + sizeof(heap_t)); }
static commit_object *alloc_commit(git_revwalk *walk) { unsigned char *chunk; if (walk->chunk_size == COMMITS_PER_CHUNK) alloc_chunk(walk); chunk = git_vector_get(&walk->memory_alloc, walk->memory_alloc.length - 1); chunk += (walk->chunk_size * CHUNK_STEP); walk->chunk_size++; return (commit_object *)chunk; }
static struct page_info * __init alloc_chunk( struct domain *d, unsigned long max_pages) { static unsigned int __initdata last_order = MAX_ORDER; static unsigned int __initdata memflags = MEMF_no_dma; struct page_info *page; unsigned int order = get_order_from_pages(max_pages), free_order; if ( order > last_order ) order = last_order; else if ( max_pages & (max_pages - 1) ) --order; while ( (page = alloc_domheap_pages(d, order, memflags)) == NULL ) if ( order-- == 0 ) break; if ( page ) last_order = order; else if ( memflags ) { /* * Allocate up to 2MB at a time: It prevents allocating very large * chunks from DMA pools before the >4GB pool is fully depleted. */ last_order = 21 - PAGE_SHIFT; memflags = 0; return alloc_chunk(d, max_pages); } /* * Make a reasonable attempt at finding a smaller chunk at a higher * address, to avoid allocating from low memory as much as possible. */ for ( free_order = order; !memflags && page && order--; ) { struct page_info *pg2; if ( d->tot_pages + (1 << order) > d->max_pages ) continue; pg2 = alloc_domheap_pages(d, order, 0); if ( pg2 > page ) { free_domheap_pages(page, free_order); page = pg2; free_order = order; } else if ( pg2 ) free_domheap_pages(pg2, order); } return page; }
void *kmalloc(uint32_t len) { // 所有申请的内存长度加上管理头的长度 // 因为在内存申请和释放的时候要通过该结构去管理 len += sizeof(header_t); header_t *cur_header = heap_first; header_t *prev_header = 0; while (cur_header) { // 如果当前内存块没有被申请过而且长度大于待申请的块 if (cur_header->allocated == 0 && cur_header->length >= len) { // 按照当前长度切割内存 split_chunk(cur_header, len); cur_header->allocated = 1; // 返回的时候必须将指针挪到管理结构之后 return (void *)((uint32_t)cur_header + sizeof(header_t)); } // 逐次推移指针 prev_header = cur_header; cur_header = cur_header->next; } uint32_t chunk_start; // 第一次执行该函数则初始化内存块起始位置 // 之后根据当前指针加上申请的长度即可 if (prev_header) { chunk_start = (uint32_t)prev_header + prev_header->length; } else { chunk_start = HEAP_START; heap_first = (header_t *)chunk_start; } // 检查是否需要申请内存页 alloc_chunk(chunk_start, len); cur_header = (header_t *)chunk_start; cur_header->prev = prev_header; cur_header->next = 0; cur_header->allocated = 1; cur_header->length = len; if (prev_header) { prev_header->next = cur_header; } return (void*)(chunk_start + sizeof(header_t)); }
static uint8_t * alloc_brk (struct Alloc *alloc, uint32_t needed) { struct AllocMmapChunk *tmp; do { for (tmp = alloc->chunks; tmp != 0; tmp = tmp->next) { if (tmp->size - tmp->brk >= needed) { uint8_t *buffer = tmp->buffer + tmp->brk; tmp->brk += needed; return buffer; } } } while (alloc_chunk (alloc, alloc->default_mmap_size)); return 0; }
static void msg_run(void) { print_number("Len", len); /* byte aligned */ passert(len == (len & -4)); /* when len==0, msg may contain one byte :-/ */ passert((len == 0 && msg.len <= 1) || (len == msg.len * BITS_PER_BYTE)); print_chunk("Msg", msg, 0); struct hash_context *hash = hash_alg->hash_ops->init(hash_alg, "sha", DBG_CRYPT); /* See above, use LEN, not MSG.LEN */ hash_alg->hash_ops->digest_bytes(hash, "msg", msg.ptr, len / BITS_PER_BYTE); chunk_t bytes = alloc_chunk(l, "bytes"); hash_alg->hash_ops->final_bytes(&hash, bytes.ptr, bytes.len); print_chunk("MD", bytes, 0); freeanychunk(bytes); }
void *kmalloc(uint32_t len) { heap_header_t *curr, *prev; uint32_t chunk_start; len += sizeof(heap_header_t); // include sizeof(heap_header_t) curr = heap_frist; prev = 0; while (curr) { if (curr->allocated == 0 && curr->length >= len) { split_chunk(curr, len); curr->allocated = 1; return (void *)((uint32_t)curr + sizeof(heap_header_t)); } prev = curr; curr = curr->next; } if (prev) { chunk_start = (uint32_t)prev + prev->length; } else { chunk_start = HEAP_START_ADDR; heap_frist = (heap_header_t *)chunk_start; } alloc_chunk(chunk_start, len); curr = (heap_header_t *)chunk_start; curr->prev = prev; curr->next = 0; curr->allocated = 1; curr->length = len; if (prev) { prev->next = curr; } return (void *)(chunk_start + sizeof(heap_header_t)); }
void *kmalloc(uint32_t len){ len += sizeof(header_t); header_t *cur_header = heap_first; header_t *prev_header = 0; while(cur_header){ if(cur_header->allocated == 0 && cur_header->length >= len){ split_chunk(cur_header, len); cur_header->allocated = 1; return (void *)((uint32_t)cur_header + sizeof(header_t)); } prev_header = cur_header; cur_header = cur_header->next; } uint32_t chunk_start; if(prev_header){ chunk_start = (uint32_t)prev_header + prev_header->length; } else{ chunk_start = HEAP_START; heap_first = (header_t *)chunk_start; } //check if need RAM alloc_chunk(chunk_start, len); cur_header = (header_t *)chunk_start; cur_header->prev = prev_header; cur_header->next = 0; cur_header->allocated = 1; cur_header->length = len; if(prev_header) prev_header->next = cur_header; return (void*)(chunk_start + sizeof(header_t)); }
struct mcache * mc_create(struct mcache *parent, const char *name, size_t size, void (*destroy_cb)(struct mcache *, void *), void *destroy_arg) { struct mcache *cache; struct mc_chunk *chunk; struct mc_obj *obj; if ((chunk = alloc_chunk(size)) == NULL) return NULL; obj = split_obj(chunk, TAILQ_FIRST(&chunk->objs), ALIGNOF(sizeof(*cache))); cache = (struct mcache *) (obj->body); cache->name = name; cache->parent = NULL; chunk->cache = cache; #ifdef ENABLE_MC_DEBUG obj->file = name; obj->func = __func__; obj->line = 0; #endif TAILQ_INIT(&cache->chunks); TAILQ_INSERT_HEAD(&cache->chunks, chunk, node); TAILQ_INIT(&cache->childs); cache->chunk_num = 1; cache->child_num = 0; if ((cache->destroy_cb = destroy_cb) == NULL) destroy_arg = NULL; cache->destroy_arg = destroy_arg; link_cache(parent, cache); DBGTRACE("%s(%d): %s parent:%p size:%lu cache:%p\n", __func__, __LINE__, name, parent, size, cache); return cache; }
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size) { struct block_header *header; struct block_header *next_header; struct free_block *free_block; sljit_uw chunk_size; allocator_grab_lock(); if (size < (64 - sizeof(struct block_header))) size = (64 - sizeof(struct block_header)); size = ALIGN_SIZE(size); free_block = free_blocks; while (free_block) { if (free_block->size >= size) { chunk_size = free_block->size; if (chunk_size > size + 64) { /* We just cut a block from the end of the free block. */ chunk_size -= size; free_block->size = chunk_size; header = AS_BLOCK_HEADER(free_block, chunk_size); header->prev_size = chunk_size; AS_BLOCK_HEADER(header, size)->prev_size = size; } else { sljit_remove_free_block(free_block); header = (struct block_header*)free_block; size = chunk_size; } allocated_size += size; header->size = size; allocator_release_lock(); return MEM_START(header); } free_block = free_block->next; } chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK; header = (struct block_header*)alloc_chunk(chunk_size); if (!header) { allocator_release_lock(); return NULL; } chunk_size -= sizeof(struct block_header); total_size += chunk_size; header->prev_size = 0; if (chunk_size > size + 64) { /* Cut the allocated space into a free and a used block. */ allocated_size += size; header->size = size; chunk_size -= size; free_block = AS_FREE_BLOCK(header, size); free_block->header.prev_size = size; sljit_insert_free_block(free_block, chunk_size); next_header = AS_BLOCK_HEADER(free_block, chunk_size); } else { /* All space belongs to this allocation. */ allocated_size += chunk_size; header->size = chunk_size; next_header = AS_BLOCK_HEADER(header, chunk_size); } next_header->size = 1; next_header->prev_size = chunk_size; allocator_release_lock(); return MEM_START(header); }
static int find_cache_item(unsigned rec_num,int assign_uncompressed) {int i=cache_head,d=get_current_db_idx(),idle_item=(cache_head+1)&cache_length_mask; if(assign_uncompressed)uncompressed=0; if(saved_uncompressed&&cache[idle_item].chunk.d>=0 &&saved_uncompressed==cache[idle_item].content) idle_item=(idle_item+1)&cache_length_mask; if(facunde) {char s[0x33];StrCopy(s,"cache find ");StrCat(s,"("); StrIToA(s+StrLen(s),d);StrCat(s,":"); StrIToA(s+StrLen(s),rec_num);StrCat(s,")"); draw_chars(s,0,0); } do {if(facunde) {char s[0x33];StrCopy(s,"cache ");StrIToA(s+StrLen(s),i);StrCat(s,":("); StrIToA(s+StrLen(s),cache[i].db_idx);StrCat(s,":"); StrIToA(s+StrLen(s),cache[i].rec_num);StrCat(s,");"); StrIToA(s+StrLen(s),cache[i].chunk.d);StrCat(s," "); draw_chars(s,0,0); } if(cache[i].chunk.d<0){idle_item=i;goto next_i;} if(d==cache[i].db_idx&&rec_num==cache[i].rec_num) {if(facunde) {char s[0x33];StrCopy(s,"found ");StrCat(s," "); StrIToA(s+StrLen(s),i);StrCat(s,":("); StrIToA(s+StrLen(s),d);StrCat(s,":"); StrIToA(s+StrLen(s),rec_num);StrCat(s,")"); StrIToH(s+StrLen(s),(UInt32)(cache[i].content));StrCat(s," "); draw_chars(s,0,0); }if(assign_uncompressed)uncompressed=cache[i].content;return i; } next_i:i=(i-1)&cache_length_mask; }while(i!=cache_head); if(facunde) {char s[0x33];StrCopy(s,"cache loop done ");StrIToA(s+StrLen(s),idle_item);StrCat(s,":("); StrIToA(s+StrLen(s),cache[idle_item].db_idx);StrCat(s,":"); StrIToA(s+StrLen(s),cache[idle_item].rec_num);StrCat(s,");"); StrIToA(s+StrLen(s),cache[idle_item].chunk.d);StrCat(s," "); draw_chars(s,0,0); } if(cache[idle_item].chunk.d>=0) {free_chunk(cache[idle_item].chunk); cache[idle_item].chunk.d=invalid_chunk_descriptor; }i=(idle_item+1)&cache_length_mask; while(i!=idle_item) {cache[idle_item].chunk=alloc_chunk(dh.record_size); if(cache[idle_item].chunk.d>=0){cache_head=idle_item;break;} while(i!=idle_item) {int i_prev=i;i=(i+1)&cache_length_mask; if(cache[i_prev].chunk.d>=0&& (!saved_uncompressed||saved_uncompressed!=cache[i_prev].content)) {free_chunk(cache[i_prev].chunk); cache[i_prev].chunk.d=invalid_chunk_descriptor;break; } } } if(facunde) {char s[0x33];StrCopy(s,"idle alloc ");StrIToA(s+StrLen(s),idle_item);StrCat(s,":("); StrIToA(s+StrLen(s),d);StrCat(s,":"); StrIToA(s+StrLen(s),rec_num);StrCat(s,");"); StrIToA(s+StrLen(s),cache[idle_item].chunk.d);StrCat(s," "); draw_chars(s,0,0); } if(cache[idle_item].chunk.d>=0) {if(inflate_into_chunk(cache[idle_item].chunk)) {free_chunk(cache[idle_item].chunk); cache[idle_item].chunk.d=invalid_chunk_descriptor; }else {const char*uc=lock_chunk(cache[idle_item].chunk); cache[idle_item].rec_num=rec_num;cache[idle_item].db_idx=d; cache[idle_item].content=uc;if(assign_uncompressed)uncompressed=uc; return idle_item; } }return-1; }
/** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b> * bytes. <b>sz</b> should be significantly smaller than the area's chunk * size, though we can deal if it isn't. */ void * memarea_alloc(memarea_t *area, size_t sz) { memarea_chunk_t *chunk = area->first; char *result; tor_assert(chunk); CHECK_SENTINEL(chunk); tor_assert(sz < SIZE_T_CEILING); if (sz == 0) sz = 1; if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0); new_chunk->next_chunk = chunk->next_chunk; chunk->next_chunk = new_chunk; chunk = new_chunk; } else { memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1); new_chunk->next_chunk = chunk; area->first = chunk = new_chunk; } tor_assert(chunk->mem_size >= sz); } result = chunk->next_mem; chunk->next_mem = chunk->next_mem + sz; /* Reinstate these if bug 930 ever comes back tor_assert(chunk->next_mem >= chunk->U_MEM); tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size); */ chunk->next_mem = realign_pointer(chunk->next_mem); return result; }