Eterm enif_make_sub_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, size_t pos, size_t size) { ErlSubBin* sb; Eterm orig; Uint offset, bit_offset, bit_size; #ifdef DEBUG unsigned src_size; ASSERT(is_binary(bin_term)); src_size = binary_size(bin_term); ASSERT(pos <= src_size); ASSERT(size <= src_size); ASSERT(pos + size <= src_size); #endif sb = (ErlSubBin*) alloc_heap(env, ERL_SUB_BIN_SIZE); ERTS_GET_REAL_BIN(bin_term, orig, offset, bit_offset, bit_size); sb->thing_word = HEADER_SUB_BIN; sb->size = size; sb->offs = offset + pos; sb->orig = orig; sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; return make_binary(sb); }
int main(int argc, char **argv) { Obj *root = NULL; printf("sizeof(Obj): %d MEMORY_SIZE: %d\n", sizeof(Obj), HEAP_SIZE); memory.len = 0; memory.capa = MAX_HEAPS_SIZE; memory.heaps = malloc(sizeof(Obj*) * MAX_HEAPS_SIZE); free_list = alloc_heap(); if (DEBUG_GC) printf("MEMORY: %p + %x\n", memory, HEAP_SIZE); Nil = make_spe(TNIL); Dot = make_spe(TDOT); Cparen = make_spe(TCPAREN); True = make_spe(TTRUE); Env *env = malloc(sizeof(Env)); env->vars = Nil; env->next = NULL; define_consts(env, root); define_primitives(env, root); if (argc < 2) { do_repl(env, root); } else { eval_file(env, root, argv[1]); } return 0; }
ERL_NIF_TERM enif_make_string_len(ErlNifEnv* env, const char* string, size_t len, ErlNifCharEncoding encoding) { Eterm* hp = alloc_heap(env,len*2); ASSERT(encoding == ERL_NIF_LATIN1); return erts_bld_string_n(&hp,NULL,string,len); }
ERL_NIF_TERM enif_make_resource(ErlNifEnv* env, void* obj) { ErlNifResource* resource = DATA_TO_RESOURCE(obj); ErtsBinary* bin = ERTS_MAGIC_BIN_FROM_DATA(resource); Eterm* hp = alloc_heap(env,PROC_BIN_SIZE); return erts_mk_magic_binary_term(&hp, &MSO(env->proc), &bin->binary); }
ERL_NIF_TERM enif_make_uint64(ErlNifEnv* env, ErlNifUInt64 i) { Uint* hp; Uint need = 0; erts_bld_uint64(NULL, &need, i); hp = alloc_heap(env, need); return erts_bld_uint64(&hp, NULL, i); }
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term) { Uint sz; Eterm* hp; sz = size_object(src_term); hp = alloc_heap(dst_env, sz); return copy_struct(src_term, sz, &hp, &MSO(dst_env->proc)); }
ERL_NIF_TERM enif_make_double(ErlNifEnv* env, double d) { Eterm* hp = alloc_heap(env,FLOAT_SIZE_OBJECT); FloatDef f; f.fd = d; PUT_DOUBLE(f, hp); return make_float(hp); }
ERL_NIF_TERM enif_make_uint(ErlNifEnv* env, unsigned i) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return IS_USMALL(0,i) ? make_small(i) : uint_to_big(i,alloc_heap(env,2)); #elif SIZEOF_LONG == ERTS_SIZEOF_ETERM return make_small(i); #endif }
ERL_NIF_TERM enif_make_int(ErlNifEnv* env, int i) { #if SIZEOF_INT == ERTS_SIZEOF_ETERM return IS_SSMALL(i) ? make_small(i) : small_to_big(i,alloc_heap(env,2)); #elif SIZEOF_LONG == ERTS_SIZEOF_ETERM return make_small(i); #endif }
ERL_NIF_TERM enif_make_list_cell(ErlNifEnv* env, Eterm car, Eterm cdr) { Eterm* hp = alloc_heap(env,2); Eterm ret = make_list(hp); CAR(hp) = car; CDR(hp) = cdr; return ret; }
ERL_NIF_TERM enif_make_ulong(ErlNifEnv* env, unsigned long i) { if (IS_USMALL(0,i)) { return make_small(i); } #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return uint_to_big(i,alloc_heap(env,2)); #elif SIZEOF_LONG == 8 ensure_heap(env,3); return erts_uint64_to_big(i, &env->hp); #endif }
ERL_NIF_TERM enif_make_tuple_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt) { Eterm* hp = alloc_heap(env,cnt+1); Eterm ret = make_tuple(hp); const Eterm* src = arr; *hp++ = make_arityval(cnt); while (cnt--) { *hp++ = *src++; } return ret; }
VM* init_vm(int stack_size, size_t heap_size, int max_threads // not implemented yet ) { VM* vm = malloc(sizeof(VM)); STATS_INIT_STATS(vm->stats) STATS_ENTER_INIT(vm->stats) VAL* valstack = malloc(stack_size * sizeof(VAL)); vm->active = 1; vm->valstack = valstack; vm->valstack_top = valstack; vm->valstack_base = valstack; vm->stack_max = valstack + stack_size; alloc_heap(&(vm->heap), heap_size, heap_size, NULL); c_heap_init(&vm->c_heap); vm->ret = NULL; vm->reg1 = NULL; #ifdef HAS_PTHREAD vm->inbox = malloc(1024*sizeof(VAL)); memset(vm->inbox, 0, 1024*sizeof(VAL)); vm->inbox_end = vm->inbox + 1024; vm->inbox_write = vm->inbox; vm->inbox_nextid = 1; // The allocation lock must be reentrant. The lock exists to ensure that // no memory is allocated during the message sending process, but we also // check the lock in calls to allocate. // The problem comes when we use requireAlloc to guarantee a chunk of memory // first: this sets the lock, and since it is not reentrant, we get a deadlock. pthread_mutexattr_t rec_attr; pthread_mutexattr_init(&rec_attr); pthread_mutexattr_settype(&rec_attr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&(vm->inbox_lock), NULL); pthread_mutex_init(&(vm->inbox_block), NULL); pthread_mutex_init(&(vm->alloc_lock), &rec_attr); pthread_cond_init(&(vm->inbox_waiting), NULL); vm->max_threads = max_threads; vm->processes = 0; #else global_vm = vm; #endif STATS_LEAVE_INIT(vm->stats) return vm; }
ERL_NIF_TERM enif_make_long(ErlNifEnv* env, long i) { if (IS_SSMALL(i)) { return make_small(i); } #if SIZEOF_LONG == ERTS_SIZEOF_ETERM return small_to_big(i, alloc_heap(env,2)); #elif SIZEOF_LONG_LONG == ERTS_SIZEOF_ETERM return make_small(i); #elif SIZEOF_LONG == 8 ensure_heap(env,3); return erts_sint64_to_big(i, &env->hp); #endif }
Obj *alloc(Env *env, Obj *root, int type) { if (!free_list) gc(env, root); if (!free_list) { /*printf("FreeList is empty.\n");*/ free_list = alloc_heap(); if (!free_list) error("memory exhausted"); } Obj *obj = free_list; free_list = obj->next; obj->type = type; return obj; }
ERL_NIF_TERM enif_make_tuple(ErlNifEnv* env, unsigned cnt, ...) { Eterm* hp = alloc_heap(env,cnt+1); Eterm ret = make_tuple(hp); va_list ap; *hp++ = make_arityval(cnt); va_start(ap,cnt); while (cnt--) { *hp++ = va_arg(ap,Eterm); } va_end(ap); return ret; }
ERL_NIF_TERM enif_make_list_from_array(ErlNifEnv* env, const ERL_NIF_TERM arr[], unsigned cnt) { Eterm* hp = alloc_heap(env,cnt*2); Eterm ret = make_list(hp); Eterm* last = &ret; const Eterm* src = arr; while (cnt--) { *last = make_list(hp); *hp = *src++; last = ++hp; ++hp; } *last = NIL; return ret; }
VM* init_vm(int stack_size, size_t heap_size, int max_threads, // not implemented yet int argc, char* argv[]) { VM* vm = malloc(sizeof(VM)); STATS_INIT_STATS(vm->stats) STATS_ENTER_INIT(vm->stats) VAL* valstack = malloc(stack_size * sizeof(VAL)); vm->valstack = valstack; vm->valstack_top = valstack; vm->valstack_base = valstack; vm->stack_max = valstack + stack_size; alloc_heap(&(vm->heap), heap_size); vm->ret = NULL; vm->reg1 = NULL; vm->inbox = malloc(1024*sizeof(VAL)); memset(vm->inbox, 0, 1024*sizeof(VAL)); vm->inbox_end = vm->inbox + 1024; vm->inbox_ptr = vm->inbox; vm->inbox_write = vm->inbox; pthread_mutex_init(&(vm->inbox_lock), NULL); pthread_mutex_init(&(vm->inbox_block), NULL); pthread_mutex_init(&(vm->alloc_lock), NULL); pthread_cond_init(&(vm->inbox_waiting), NULL); vm->max_threads = max_threads; vm->processes = 0; vm->argv = argv; vm->argc = argc; STATS_LEAVE_INIT(vm->stats) return vm; }
int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list) { Eterm *listptr, ret = NIL, *hp; if (is_nil(term)) { *list = term; return 1; } ret = NIL; while (is_not_nil(term)) { if (is_not_list(term)) { return 0; } hp = alloc_heap(env, 2); listptr = list_val(term); ret = CONS(hp, CAR(listptr), ret); term = CDR(listptr); } *list = ret; return 1; }
ERL_NIF_TERM enif_make_list(ErlNifEnv* env, unsigned cnt, ...) { if (cnt == 0) { return NIL; } else { Eterm* hp = alloc_heap(env,cnt*2); Eterm ret = make_list(hp); Eterm* last = &ret; va_list ap; va_start(ap,cnt); while (cnt--) { *last = make_list(hp); *hp = va_arg(ap,Eterm); last = ++hp; ++hp; } va_end(ap); *last = NIL; return ret; } }
Eterm enif_make_binary(ErlNifEnv* env, ErlNifBinary* bin) { if (bin->bin_term != THE_NON_VALUE) { return bin->bin_term; } else if (bin->ref_bin != NULL) { Binary* bptr = bin->ref_bin; ProcBin* pb; Eterm bin_term; /* !! Copy-paste from new_binary() !! */ pb = (ProcBin *) alloc_heap(env, PROC_BIN_SIZE); pb->thing_word = HEADER_PROC_BIN; pb->size = bptr->orig_size; pb->next = MSO(env->proc).first; MSO(env->proc).first = (struct erl_off_heap_header*) pb; pb->val = bptr; pb->bytes = (byte*) bptr->orig_bytes; pb->flags = 0; OH_OVERHEAD(&(MSO(env->proc)), pb->size / sizeof(Eterm)); bin_term = make_binary(pb); if (erts_refc_read(&bptr->refc, 1) == 1) { /* Total ownership transfer */ bin->ref_bin = NULL; bin->bin_term = bin_term; } return bin_term; } else { flush_env(env); bin->bin_term = new_binary(env->proc, bin->data, bin->size); cache_env(env); return bin->bin_term; } }
void load_kernel(int bootdrv) { struct master_boot_record *mbr; struct superblock *sb; struct groupdesc *group; struct inodedesc *inode; int blocksize; int blks_per_sect; int kernelsize; int kernelpages; char *kerneladdr; struct dos_header *doshdr; struct image_header *imghdr; char *addr; blkno_t blkno; int i; int j; pte_t *pt; int imgpages; int start; char *label; struct boot_sector *bootsect; //kprintf("Loading kernel"); // Determine active boot partition if booting from harddisk if (bootdrv & 0x80 && (bootdrv & 0xF0) != 0xF0) { mbr = (struct master_boot_record *) bsect; if (boot_read(mbr, SECTORSIZE, 0) != SECTORSIZE) { panic("unable to read master boot record"); } if (mbr->signature != MBR_SIGNATURE) panic("invalid boot signature"); bootsect = (struct boot_sector *) bsect; label = bootsect->label; if (label[0] == 'S' && label[1] == 'A' && label[2] == 'N' && label[3] == 'O' && label[4] == 'S') { // Disk does not have a partition table start = 0; bootpart = -1; } else { // Find active partition bootpart = -1; for (i = 0; i < 4; i++) { if (mbr->parttab[i].bootid == 0x80) { bootpart = i; start = mbr->parttab[i].relsect; } } if (bootpart == -1) panic("no bootable partition on boot drive"); } } else { start = 0; bootpart = 0; } // Read super block from boot device sb = (struct superblock *) ssect; if (boot_read(sb, SECTORSIZE, 1 + start) != SECTORSIZE) { panic("unable to read super block from boot device"); } // Check signature and version if (sb->signature != DFS_SIGNATURE) panic("invalid DFS signature"); if (sb->version != DFS_VERSION) panic("invalid DFS version"); blocksize = 1 << sb->log_block_size; blks_per_sect = blocksize / SECTORSIZE; // Read first group descriptor group = (struct groupdesc *) gsect; if (boot_read(group, SECTORSIZE, sb->groupdesc_table_block * blks_per_sect + start) != SECTORSIZE) { panic("unable to read group descriptor from boot device"); } // Read inode for kernel inode = (struct inodedesc *) isect; if (boot_read(isect, SECTORSIZE, group->inode_table_block * blks_per_sect + start) != SECTORSIZE) { panic("unable to read kernel inode from boot device"); } inode += DFS_INODE_KRNL; // Calculate kernel size kernelsize = (int) inode->size; kernelpages = PAGES(kernelsize); //kprintf("Kernel size %d KB\n", kernelsize / 1024); // Allocate page table for kernel if (kernelpages > PTES_PER_PAGE) panic("kernel too big"); pt = (pte_t *) alloc_heap(1); pdir[PDEIDX(OSBASE)] = (unsigned long) pt | PT_PRESENT | PT_WRITABLE; // Allocate pages for kernel kerneladdr = alloc_heap(kernelpages); // Read kernel from boot device if (inode->depth == 0) { addr = kerneladdr; for (i = 0; i < (int) inode->blocks; i++) { if (boot_read(addr, blocksize, inode->blockdir[i] * blks_per_sect + start) != blocksize) { panic("error reading kernel from boot device"); } addr += blocksize; } } else if (inode->depth == 1) { addr = kerneladdr; blkno = 0; for (i = 0; i < DFS_TOPBLOCKDIR_SIZE; i++) { if (boot_read(blockdir, blocksize, inode->blockdir[i] * blks_per_sect + start) != blocksize) { panic("error reading kernel inode dir from boot device"); } for (j = 0; j < (int) (blocksize / sizeof(blkno_t)); j++) { if (boot_read(addr, blocksize, blockdir[j] * blks_per_sect + start) != blocksize) { panic("error reading kernel inode dir from boot device"); } addr += blocksize; blkno++; if (blkno == inode->blocks) break; } if (blkno == inode->blocks) break; } } else { panic("unsupported inode depth"); } // Determine entry point for kernel doshdr = (struct dos_header *) kerneladdr; imghdr = (struct image_header *) (kerneladdr + doshdr->e_lfanew); krnlentry = imghdr->optional.address_of_entry_point + OSBASE; // Allocate pages for .data section imgpages = PAGES(imghdr->optional.size_of_image); alloc_heap(imgpages - kernelpages); // Relocate resource data and clear uninitialized data if (imghdr->header.number_of_sections == 4) { struct image_section_header *data = &imghdr->sections[2]; struct image_section_header *rsrc = &imghdr->sections[3]; memcpy(kerneladdr + rsrc->virtual_address, kerneladdr + rsrc->pointer_to_raw_data, rsrc->size_of_raw_data); memset(kerneladdr + data->virtual_address + data->size_of_raw_data, 0, data->virtual_size - data->size_of_raw_data); } // Map kernel into vitual address space for (i = 0; i < imgpages; i++) pt[i] = (unsigned long) (kerneladdr + i * PAGESIZE) | PT_PRESENT | PT_WRITABLE; }
ERL_NIF_TERM enif_make_ref(ErlNifEnv* env) { Eterm* hp = alloc_heap(env, REF_THING_SIZE); return erts_make_ref_in_buffer(hp); }