void * memalign(size_t align, size_t size) { heap_mem_header_t new_mem; heap_mem_header_t *cur_mem; heap_mem_header_t *old_mem; void *ptr = NULL; if (align <= DEFAULT_ALIGNMENT) return malloc(size); /* Allocate with extra alignment bytes just in case it isn't aligned properly by malloc. */ if ((ptr = malloc(size + align)) == NULL) return ptr; /* NULL */ /* If malloc returned it aligned for us we're fine. */ if (((u32)ptr & (align - 1)) == 0) return ptr; alloc_lock(); cur_mem = (heap_mem_header_t *)((u32)ptr - sizeof(heap_mem_header_t)); cur_mem->size -= align; /* Otherwise, align the pointer and fixup our hearder accordingly. */ ptr = (void *)ALIGN((u32)ptr, align); old_mem = cur_mem; /* Copy the heap_mem_header_t locally, before repositioning (to make sure we don't overwrite ourselves. */ memcpy(&new_mem, cur_mem, sizeof(heap_mem_header_t)); cur_mem = (heap_mem_header_t *)((u32)ptr - sizeof(heap_mem_header_t)); memcpy(cur_mem, &new_mem, sizeof(heap_mem_header_t)); if (cur_mem->prev) cur_mem->prev->next = cur_mem; if (cur_mem->next) cur_mem->next->prev = cur_mem; if (__alloc_heap_head == old_mem) __alloc_heap_head = cur_mem; if (__alloc_heap_tail == old_mem) __alloc_heap_tail = cur_mem; cur_mem->ptr = ptr; alloc_unlock(); return ptr; }
EXTERN void neko_global_init() { # ifdef NEKO_DIRECT_THREADED op_last = neko_get_ttable()[Last]; # endif empty_array.ptr = val_null; neko_gc_init(); neko_vm_context = alloc_local(); neko_fields_lock = alloc_lock(); neko_fields = (objtable*)alloc_root((NEKO_FIELDS_MASK+1) * sizeof(struct _objtable) / sizeof(value)); { int i; for(i=0;i<=NEKO_FIELDS_MASK;i++) otable_init(&neko_fields[i]); } neko_init_builtins(); kind_names = (kind_list**)alloc_root(1); *kind_names = NULL; id_loader = val_id("loader"); id_exports = val_id("exports"); id_cache = val_id("cache"); id_path = val_id("path"); id_loader_libs = val_id("__libs"); neko_id_module = val_id("__module"); INIT_ID(compare); INIT_ID(string); INIT_ID(add); INIT_ID(radd); INIT_ID(sub); INIT_ID(rsub); INIT_ID(mult); INIT_ID(rmult); INIT_ID(div); INIT_ID(rdiv); INIT_ID(mod); INIT_ID(rmod); INIT_ID(get); INIT_ID(set); apply_string = alloc_root(1); *apply_string = alloc_string("apply"); neko_init_jit(); }
/** * __get_lock - find or create a lock instance * @lock: pointer to a pthread lock function * * Try to find an existing lock in the rbtree using the provided pointer. If * one wasn't found - create it. */ static struct lock_lookup *__get_lock(void *lock) { struct rb_node **node, *parent; struct lock_lookup *l; ll_pthread_rwlock_rdlock(&locks_rwlock); node = __get_lock_node(lock, &parent); ll_pthread_rwlock_unlock(&locks_rwlock); if (*node) { return rb_entry(*node, struct lock_lookup, node); } /* We didn't find the lock, let's create it */ l = alloc_lock(); if (l == NULL) return NULL; l->orig = lock; /* * Currently the name of the lock is the ptr value of the pthread lock, * while not optimal, it makes debugging a bit easier. * * TODO: Get the real name of the lock using libdwarf */ sprintf(l->name, "%p", lock); lockdep_init_map(&l->dep_map, l->name, &l->key, 0); ll_pthread_rwlock_wrlock(&locks_rwlock); /* This might have changed since the last time we fetched it */ node = __get_lock_node(lock, &parent); rb_link_node(&l->node, parent, node); rb_insert_color(&l->node, &locks); ll_pthread_rwlock_unlock(&locks_rwlock); return l; }
static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd) { struct file_lock *fl; struct file_lock *left = 0; struct file_lock *right = 0; struct file_lock **before; int added = 0; /* * Find the first old lock with the same owner as the new lock. */ before = &filp->f_inode->i_flock; while ((fl = *before) && (caller->fl_owner != fl->fl_owner || caller->fl_fd != fl->fl_fd)) before = &fl->fl_next; /* * Look up all locks of this owner. */ while ( (fl = *before) && caller->fl_owner == fl->fl_owner && caller->fl_fd == fl->fl_fd) { /* * Detect adjacent or overlapping regions (if same lock type) */ if (caller->fl_type == fl->fl_type) { if (fl->fl_end < caller->fl_start - 1) goto next_lock; /* * If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. */ if (fl->fl_start > caller->fl_end + 1) break; /* * If we come here, the new and old lock are of the * same type and adjacent or overlapping. Make one * lock yielding from the lower start address of both * locks to the higher end address. */ if (fl->fl_start > caller->fl_start) fl->fl_start = caller->fl_start; else caller->fl_start = fl->fl_start; if (fl->fl_end < caller->fl_end) fl->fl_end = caller->fl_end; else caller->fl_end = fl->fl_end; if (added) { free_lock(before); continue; } caller = fl; added = 1; goto next_lock; } /* * Processing for different lock types is a bit more complex. */ if (fl->fl_end < caller->fl_start) goto next_lock; if (fl->fl_start > caller->fl_end) break; if (caller->fl_type == F_UNLCK) added = 1; if (fl->fl_start < caller->fl_start) left = fl; /* * If the next lock in the list has a higher end address than * the new one, insert the new one here. */ if (fl->fl_end > caller->fl_end) { right = fl; break; } if (fl->fl_start >= caller->fl_start) { /* * The new lock completely replaces an old one (This may * happen several times). */ if (added) { free_lock(before); continue; } /* * Replace the old lock with the new one. Wake up * anybody waiting for the old one, as the change in * lock type might satisfy his needs. */ wake_up(&fl->fl_wait); fl->fl_start = caller->fl_start; fl->fl_end = caller->fl_end; fl->fl_type = caller->fl_type; caller = fl; added = 1; } /* * Go on to next lock. */ next_lock: before = &(*before)->fl_next; } if (! added) { if (caller->fl_type == F_UNLCK) { /* * XXX - under iBCS-2, attempting to unlock a not-locked region is * not considered an error condition, although I'm not sure if this * should be a default behavior (it makes porting to native Linux easy) * or a personality option. * * Does Xopen/1170 say anything about this? * - [email protected] */ #if 0 return -EINVAL; #else return 0; #endif } if (! (caller = alloc_lock(before, caller, fd))) return -ENOLCK; } if (right) { if (left == right) { /* * The new lock breaks the old one in two pieces, so we * have to allocate one more lock (in this case, even * F_UNLCK may fail!). */ if (! (left = alloc_lock(before, right, fd))) { if (! added) free_lock(before); return -ENOLCK; } } right->fl_start = caller->fl_end + 1; } if (left) left->fl_end = caller->fl_start - 1; return 0; }
void free(void *ptr) { heap_mem_header_t *cur; void *heap_top; size_t size; if (!ptr) return; alloc_lock(); if (!__alloc_heap_head) { alloc_unlock(); return; } /* Freeing the head pointer is a special case. */ if (ptr == __alloc_heap_head->ptr) { size = __alloc_heap_head->size + (size_t)(__alloc_heap_head->ptr - (void *)__alloc_heap_head); __alloc_heap_head = __alloc_heap_head->next; if (__alloc_heap_head != NULL) { __alloc_heap_head->prev = NULL; } else { __alloc_heap_tail = NULL; alloc_sbrk(-size); } alloc_unlock(); return; } cur = __alloc_heap_head; while (ptr != cur->ptr) { /* ptr isn't in our list */ if (cur->next == NULL) { alloc_unlock(); return; } cur = cur->next; } /* Deallocate the block. */ if (cur->next != NULL) { cur->next->prev = cur->prev; } else { /* If this block was the last one in the list, shrink the heap. */ __alloc_heap_tail = cur->prev; /* We need to free (heap top) - (prev->ptr + prev->size), or else we'll end up with an unallocatable block of heap. */ heap_top = alloc_sbrk(0); size = (u32)heap_top - (u32)(cur->prev->ptr + cur->prev->size); alloc_sbrk(-size); } cur->prev->next = cur->next; alloc_unlock(); }
void * realloc(void *ptr, size_t size) { heap_mem_header_t *prev_mem; void *new_ptr = NULL; if (!size && ptr != NULL) { free(ptr); return new_ptr; } if (ptr == NULL) return malloc(size); if ((size & (DEFAULT_ALIGNMENT - 1)) != 0) size = ALIGN(size, DEFAULT_ALIGNMENT); alloc_lock(); prev_mem = (heap_mem_header_t *)((u32)ptr - sizeof(heap_mem_header_t)); /* Don't do anything if asked for same sized block. */ /* If the new size is shorter, let's just shorten the block. */ if (prev_mem->size >= size) { /* However, if this is the last block, we have to shrink the heap. */ if (!prev_mem->next) alloc_sbrk(ptr + size - alloc_sbrk(0)); prev_mem->size = size; alloc_unlock(); return ptr; } /* We are asked for a larger block of memory. */ /* Are we the last memory block ? */ if (!prev_mem->next) { /* Yes, let's just extend the heap then. */ if (alloc_sbrk(size - prev_mem->size) == (void*) -1) return NULL; prev_mem->size = size; alloc_unlock(); return ptr; } /* Is the next block far enough so we can extend the current block ? */ if ((prev_mem->next->ptr - ptr) > size) { prev_mem->size = size; alloc_unlock(); return ptr; } alloc_unlock(); /* We got out of luck, let's allocate a new block of memory. */ if ((new_ptr = malloc(size)) == NULL) return new_ptr; /* New block is larger, we only copy the old data. */ memcpy(new_ptr, ptr, prev_mem->size); free(ptr); return new_ptr; }
void * malloc(size_t size) { void *ptr = NULL, *mem_ptr; heap_mem_header_t *new_mem, *prev_mem; size_t mem_sz, heap_align_bytes; mem_sz = size + sizeof(heap_mem_header_t); if ((mem_sz & (DEFAULT_ALIGNMENT - 1)) != 0) mem_sz = ALIGN(mem_sz, DEFAULT_ALIGNMENT); alloc_lock(); /* If we don't have any allocated blocks, reserve the first block from the OS and initialize __alloc_heap_tail. */ if (__alloc_heap_head == NULL) { /* Align the bottom of the heap to our default alignment. */ if (__alloc_heap_base == NULL) { heap_align_bytes = (u32)alloc_sbrk(0) & (DEFAULT_ALIGNMENT - 1); alloc_sbrk(heap_align_bytes); __alloc_heap_base = alloc_sbrk(0); } /* Allocate the physical heap and setup the head block. */ if ((mem_ptr = alloc_sbrk(mem_sz)) == (void *)-1) return ptr; /* NULL */ ptr = (void *)((u32)mem_ptr + sizeof(heap_mem_header_t)); __alloc_heap_head = (heap_mem_header_t *)mem_ptr; __alloc_heap_head->ptr = ptr; __alloc_heap_head->size = mem_sz - sizeof(heap_mem_header_t); __alloc_heap_head->prev = NULL; __alloc_heap_head->next = NULL; __alloc_heap_tail = __alloc_heap_head; alloc_unlock(); return ptr; } /* Check to see if there's free space at the bottom of the heap. */ if ((__alloc_heap_base + mem_sz) < (void *)__alloc_heap_head) { new_mem = (heap_mem_header_t *)__alloc_heap_base; ptr = (void *)((u32)new_mem + sizeof(heap_mem_header_t)); new_mem->ptr = ptr; new_mem->size = mem_sz - sizeof(heap_mem_header_t); new_mem->prev = NULL; new_mem->next = __alloc_heap_head; new_mem->next->prev = new_mem; __alloc_heap_head = new_mem; alloc_unlock(); return ptr; } /* See if we can allocate the block without extending the heap. */ prev_mem = _heap_mem_fit(__alloc_heap_head, mem_sz); if (prev_mem != NULL) { new_mem = (heap_mem_header_t *)((u32)prev_mem->ptr + prev_mem->size); ptr = (void *)((u32)new_mem + sizeof(heap_mem_header_t)); new_mem->ptr = ptr; new_mem->size = mem_sz - sizeof(heap_mem_header_t); new_mem->prev = prev_mem; new_mem->next = prev_mem->next; new_mem->next->prev = new_mem; prev_mem->next = new_mem; alloc_unlock(); return ptr; } /* Extend the heap, but make certain the block is inserted in order. */ if ((mem_ptr = alloc_sbrk(mem_sz)) == (void *)-1) { alloc_unlock(); return ptr; /* NULL */ } ptr = (void *)((u32)mem_ptr + sizeof(heap_mem_header_t)); new_mem = (heap_mem_header_t *)mem_ptr; new_mem->ptr = ptr; new_mem->size = mem_sz - sizeof(heap_mem_header_t); new_mem->prev = __alloc_heap_tail; new_mem->next = NULL; __alloc_heap_tail->next = new_mem; __alloc_heap_tail = new_mem; alloc_unlock(); return ptr; }
static int lock_it(struct file *filp, struct file_lock *caller, unsigned int fd) { struct file_lock *fl; struct file_lock *left = 0; struct file_lock *right = 0; struct file_lock **before; int added = 0; /* * Find the first old lock with the same owner as the new lock. */ before = &filp->f_inode->i_flock; while ((fl = *before) && (caller->fl_owner != fl->fl_owner || caller->fl_fd != fl->fl_fd)) before = &fl->fl_next; /* * Look up all locks of this owner. */ while ( (fl = *before) && caller->fl_owner == fl->fl_owner && caller->fl_fd == fl->fl_fd) { /* * Detect adjacent or overlapping regions (if same lock type) */ if (caller->fl_type == fl->fl_type) { if (fl->fl_end < caller->fl_start - 1) goto next_lock; /* * If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. */ if (fl->fl_start > caller->fl_end + 1) break; /* * If we come here, the new and old lock are of the * same type and adjacent or overlapping. Make one * lock yielding from the lower start address of both * locks to the higher end address. */ if (fl->fl_start > caller->fl_start) fl->fl_start = caller->fl_start; else caller->fl_start = fl->fl_start; if (fl->fl_end < caller->fl_end) fl->fl_end = caller->fl_end; else caller->fl_end = fl->fl_end; if (added) { free_lock(before); continue; } caller = fl; added = 1; goto next_lock; } /* * Processing for different lock types is a bit more complex. */ if (fl->fl_end < caller->fl_start) goto next_lock; if (fl->fl_start > caller->fl_end) break; if (caller->fl_type == F_UNLCK) added = 1; if (fl->fl_start < caller->fl_start) left = fl; /* * If the next lock in the list has a higher end address than * the new one, insert the new one here. */ if (fl->fl_end > caller->fl_end) { right = fl; break; } if (fl->fl_start >= caller->fl_start) { /* * The new lock completely replaces an old one (This may * happen several times). */ if (added) { free_lock(before); continue; } /* * Replace the old lock with the new one. Wake up * anybody waiting for the old one, as the change in * lock type migth satisfy his needs. */ wake_up(&fl->fl_wait); fl->fl_start = caller->fl_start; fl->fl_end = caller->fl_end; fl->fl_type = caller->fl_type; caller = fl; added = 1; } /* * Go on to next lock. */ next_lock: before = &(*before)->fl_next; } if (! added) { if (caller->fl_type == F_UNLCK) return -EINVAL; if (! (caller = alloc_lock(before, caller, fd))) return -ENOLCK; } if (right) { if (left == right) { /* * The new lock breaks the old one in two pieces, so we * have to allocate one more lock (in this case, even * F_UNLCK may fail!). */ if (! (left = alloc_lock(before, right, fd))) { if (! added) free_lock(before); return -ENOLCK; } } right->fl_start = caller->fl_end + 1; } if (left) left->fl_end = caller->fl_start - 1; return 0; }