Val* heapio__read_externs_table (Inbuf *bp) { // ========================= // // Read the header: // Externs_Header header; heapio__read_block( bp, &header, sizeof( header ) ); Val* externs = MALLOC_VEC( Val, header.externs_count ); // Read in the names of the exported symbols: // Unt8* buf = MALLOC_VEC( Unt8, header.externs_bytesize ); heapio__read_block( bp, buf, header.externs_bytesize ); // Map the names of the external symbols // to addresses in the run-time system: // Unt8* cp = buf; for (int i = 0; i < header.externs_count; i++) { // Val heapval = find_cfun ((char*) cp); if (heapval == HEAP_VOID) die ("Run-time system does not provide \"%s\"", cp); externs[i] = heapval; cp += strlen((char*)cp) + 1; } FREE( buf ); return externs; }
static void read_heap ( // ========= // Inbuf* bp, Heap_Header* header, Task* task, Val* externs ){ Heap* heap = task->heap; Sib_Header* sib_headers; Sib_Header* p; Sib_Header* q; int sib_headers_bytesize; int i, j, k; long prevSzB[MAX_PLAIN_SIBS], size; Sibid* oldBOOK2SIBID; Punt addrOffset[MAX_AGEGROUPS][MAX_PLAIN_SIBS]; Hugechunk_Quire_Relocation_Info* boRelocInfo; Addresstable* boRegionTable; // Allocate a book_to_sibid__global for the imported // heap image's address space: // #ifdef TWO_LEVEL_MAP #error two level map not supported #else oldBOOK2SIBID = MALLOC_VEC (Sibid, BOOK2SIBID_TABLE_SIZE_IN_SLOTS); #endif // Read in the hugechunk region descriptors // for the old address space: // { int size; Hugechunk_Quire_Header* boRgnHdr; boRegionTable = make_address_hashtable(LOG2_BOOK_BYTESIZE+1, header->hugechunk_quire_count); size = header->hugechunk_quire_count * sizeof(Hugechunk_Quire_Header); boRgnHdr = (Hugechunk_Quire_Header*) MALLOC (size); heapio__read_block( bp, boRgnHdr, size ); boRelocInfo = MALLOC_VEC(Hugechunk_Quire_Relocation_Info, header->hugechunk_quire_count); for (i = 0; i < header->hugechunk_quire_count; i++) { set_book2sibid_entries_for_range(oldBOOK2SIBID, (Val*)(boRgnHdr[i].base_address), BOOKROUNDED_BYTESIZE(boRgnHdr[i].bytesize), HUGECHUNK_DATA_SIBID(1) ); oldBOOK2SIBID[GET_BOOK_CONTAINING_POINTEE(boRgnHdr[i].base_address)] = HUGECHUNK_RECORD_SIBID(MAX_AGEGROUPS); boRelocInfo[i].first_ram_quantum = boRgnHdr[i].first_ram_quantum; boRelocInfo[i].page_count = (boRgnHdr[i].bytesize - (boRgnHdr[i].first_ram_quantum - boRgnHdr[i].base_address)) >> LOG2_HUGECHUNK_RAM_QUANTUM_IN_BYTES; boRelocInfo[i].hugechunk_page_to_hugechunk = MALLOC_VEC(Hugechunk_Relocation_Info*, boRelocInfo[i].page_count); for (j = 0; j < boRelocInfo[i].page_count; j++) { // boRelocInfo[i].hugechunk_page_to_hugechunk[j] = NULL; } addresstable_insert (boRegionTable, boRgnHdr[i].base_address, &(boRelocInfo[i])); } FREE (boRgnHdr); } // Read the sib headers: // sib_headers_bytesize = header->active_agegroups * TOTAL_SIBS * sizeof( Sib_Header ); // sib_headers = (Sib_Header*) MALLOC( sib_headers_bytesize ); // heapio__read_block( bp, sib_headers, sib_headers_bytesize ); for (i = 0; i < MAX_PLAIN_SIBS; i++) { // prevSzB[i] = task->heap_allocation_buffer_bytesize; } // Allocate the sib buffers and read in the heap image: // for (p = sib_headers, i = 0; i < header->active_agegroups; i++) { // Agegroup* age = heap->agegroup[ i ]; // Compute the space required for this agegroup, // and mark the oldBOOK2SIBID to reflect the old address space: // for (q = p, j = 0; j < MAX_PLAIN_SIBS; j++) { set_book2sibid_entries_for_range ( // oldBOOK2SIBID, (Val*) q->info.o.base_address, BOOKROUNDED_BYTESIZE( q->info.o.bytesize ), age->sib[ j ]->id ); size = q->info.o.bytesize + prevSzB[j]; if (j == RO_CONSCELL_SIB && size > 0 ){ size += 2*WORD_BYTESIZE; } age->sib[ j ]->tospace.bytesize = BOOKROUNDED_BYTESIZE( size ); prevSzB[ j ] = q->info.o.bytesize; q++; } if (set_up_tospace_sib_buffers_for_agegroup(age) == FALSE) { die ("unable to allocated space for agegroup %d\n", i+1); } if (sib_is_active( age->sib[ RW_POINTERS_SIB ] )) { // sib_is_active def in src/c/h/heap.h // make_new_coarse_inter_agegroup_pointers_map_for_agegroup (age); } // Read in the sib buffers for this agegroup // and initialize the address offset table: // for (int j = 0; j < MAX_PLAIN_SIBS; j++) { // Sib* ap = age->sib[ j ]; if (p->info.o.bytesize > 0) { addrOffset[i][j] = (Punt)(ap->tospace.start) - (Punt)(p->info.o.base_address); heapio__seek( bp, (long) p->offset ); heapio__read_block( bp, (ap->tospace.start), p->info.o.bytesize ); ap->tospace.used_end = (Val *)((Punt)(ap->tospace.start) + p->info.o.bytesize); ap->fromspace.seniorchunks_end = ap->tospace.start; } else if (sib_is_active(ap)) { ap->fromspace.seniorchunks_end = ap->tospace.start; } if (verbosity__global > 0) say("."); p++; } // Read in the hugechunk sib buffers (currently just codechunks): // for (int ilk = 0; ilk < MAX_HUGE_SIBS; ilk++) { // MAX_HUGE_SIBS def in src/c/h/sibid.h // Punt totSizeB; Hugechunk* free_chunk; Hugechunk* bdp = NULL; // Without this initialization, gcc -Wall gives a 'possible uninitialized use' warning. Hugechunk_Quire* free_quire; Hugechunk_Header* boHdrs; int boHdrSizeB; int index; Hugechunk_Quire_Relocation_Info* region; if (p->info.bo.hugechunk_quanta_count > 0) { // totSizeB = p->info.bo.hugechunk_quanta_count << LOG2_HUGECHUNK_RAM_QUANTUM_IN_BYTES; free_chunk = allocate_hugechunk_quire( heap, totSizeB ); free_quire = free_chunk->hugechunk_quire; free_quire->age_of_youngest_live_chunk_in_quire = i; set_book2sibid_entries_for_range ( // book_to_sibid__global, (Val*) free_quire, BYTESIZE_OF_QUIRE( free_quire->quire ), HUGECHUNK_DATA_SIBID( i ) ); book_to_sibid__global[ GET_BOOK_CONTAINING_POINTEE( free_quire ) ] = HUGECHUNK_RECORD_SIBID( i ); // Read in the hugechunk headers: // boHdrSizeB = p->info.bo.hugechunk_count * sizeof(Hugechunk_Header); // boHdrs = (Hugechunk_Header*) MALLOC (boHdrSizeB); // heapio__read_block (bp, boHdrs, boHdrSizeB); // Read in the hugechunks: // heapio__read_block( bp, (void *)(free_chunk->chunk), totSizeB ); // if (ilk == CODE__HUGE_SIB) { // ilk = 0 == CODE__HUGE_SIB def in src/c/h/sibid.h // flush_instruction_cache ((void *)(free_chunk->chunk), totSizeB); } // Set up the hugechunk descriptors // and per-chunk relocation info: // for (k = 0; k < p->info.bo.hugechunk_count; k++) { // // Find the region relocation info for the // chunk's region in the exported heap: // for (index = GET_BOOK_CONTAINING_POINTEE(boHdrs[k].base_address); !SIBID_ID_IS_BIGCHUNK_RECORD(oldBOOK2SIBID[index]); index--) continue; region = LOOK_UP_HUGECHUNK_REGION (boRegionTable, index); // Allocate the hugechunk record for // the chunk and link it into the list // of hugechunks for its agegroup. // bdp = allocate_a_hugechunk( free_chunk, &(boHdrs[k]), region ); bdp->next = age->hugechunks[ ilk ]; age->hugechunks[ ilk ] = bdp; ASSERT( bdp->gen == i+1 ); if (codechunk_comment_display_is_enabled__global && ilk == CODE__HUGE_SIB ){ // Dump the comment string of the code chunk. Unt8* namestring; // if ((namestring = get_codechunk_comment_string_else_null( bdp ))) { debug_say ("[%6d bytes] %s\n", bdp->bytesize, (char*)namestring); } } } if (free_chunk != bdp) { // if p->info.bo.hugechunk_count can be zero, 'bdp' value here may be bogus. XXX BUGGO FIXME. // // There was some extra space left in the region: // insert_hugechunk_in_doubly_linked_list( heap->hugechunk_freelist, free_chunk); // insert_hugechunk_in_doubly_linked_list def in src/c/h/heap.h } FREE (boHdrs); } if (verbosity__global > 0) say("."); p++; } } repair_heap (heap, oldBOOK2SIBID, addrOffset, boRegionTable, externs); // Adjust the run-time globals // that point into the heap: // *PTR_CAST( Val*, PERVASIVE_PACKAGE_PICKLE_LIST_REFCELL__GLOBAL ) = repair_word( *PTR_CAST( Val*, PERVASIVE_PACKAGE_PICKLE_LIST_REFCELL__GLOBAL ), oldBOOK2SIBID, addrOffset, boRegionTable, externs ); runtime_package__global = repair_word( runtime_package__global, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); #ifdef ASM_MATH mathvec__global = repair_word (mathvec__global, oldBOOK2SIBID, addrOffset, boRegionTable, externs); #endif // Adjust the Mythryl registers // to the new address space: // ASSIGN( POSIX_INTERPROCESS_SIGNAL_HANDLER_REFCELL__GLOBAL, // repair_word ( // DEREF( POSIX_INTERPROCESS_SIGNAL_HANDLER_REFCELL__GLOBAL ), oldBOOK2SIBID, addrOffset, boRegionTable, externs ) ); task->argument = repair_word( task->argument, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->fate = repair_word( task->fate, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->current_closure = repair_word( task->current_closure, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->program_counter = repair_word( task->program_counter, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->link_register = repair_word (task->link_register, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->exception_fate = repair_word( task->exception_fate, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->current_thread = repair_word( task->current_thread, oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->callee_saved_registers[0] = repair_word( task->callee_saved_registers[0], oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->callee_saved_registers[1] = repair_word( task->callee_saved_registers[1], oldBOOK2SIBID, addrOffset, boRegionTable, externs ); task->callee_saved_registers[2] = repair_word( task->callee_saved_registers[2], oldBOOK2SIBID, addrOffset, boRegionTable, externs ); // Release storage: // for (i = 0; i < header->hugechunk_quire_count; i++) { // Hugechunk_Relocation_Info* p; for (p = NULL, j = 0; j < boRelocInfo[i].page_count; j++) { if ((boRelocInfo[i].hugechunk_page_to_hugechunk[j] != NULL) && (boRelocInfo[i].hugechunk_page_to_hugechunk[j] != p)) { FREE (boRelocInfo[i].hugechunk_page_to_hugechunk[j]); p = boRelocInfo[i].hugechunk_page_to_hugechunk[j]; } } } free_address_table( boRegionTable, FALSE ); FREE( boRelocInfo ); FREE( sib_headers ); FREE( oldBOOK2SIBID ); // Reset the tospace.swept_end pointers: // for (int i = 0; i < heap->active_agegroups; i++) { // Agegroup* age = heap->agegroup[i]; // for (int j = 0; j < MAX_PLAIN_SIBS; j++) { // Sib* ap = age->sib[ j ]; // if (sib_is_active(ap)) { // sib_is_active def in src/c/h/heap.h // ap->tospace.swept_end = ap->tospace.used_end; } } } } // fun read_heap
static Status read_image (Task* task, Inbuf* bp, Val* chunk_ref) { // ========== // Pickle_Header pickle_header; Val* externs; Sib_Header* sib_headers[ TOTAL_SIBS ]; Sib_Header* sib_headers_buffer; int sib_headers_size; Agegroup* age1 = task->heap->agegroup[ 0 ]; if (heapio__read_block( bp, &pickle_header, sizeof(pickle_header) ) == FALSE || pickle_header.smallchunk_sibs_count > MAX_PLAIN_SIBS // MAX_PLAIN_SIBS def in src/c/h/sibid.h || pickle_header.hugechunk_sibs_count > MAX_HUGE_SIBS // MAX_HUGE_SIBS def in src/c/h/sibid.h ){ return FALSE; // XXX BUGGO FIXME we gotta do better than this. } // Read the externals table: // externs = heapio__read_externs_table( bp ); // Read the sib headers: // sib_headers_size = (pickle_header.smallchunk_sibs_count + pickle_header.hugechunk_sibs_count) * sizeof( Sib_Header ); // sib_headers_buffer = (Sib_Header*) MALLOC (sib_headers_size); // if (heapio__read_block( bp, sib_headers_buffer, sib_headers_size ) == FALSE) { // FREE( sib_headers_buffer ); return FALSE; } // for (int ilk = 0; ilk < TOTAL_SIBS; ilk++) { // sib_headers[ ilk ] = NULL; } // for (int sib = 0; sib < pickle_header.smallchunk_sibs_count; sib++) { // Sib_Header* p = &sib_headers_buffer[ sib ]; // sib_headers[ p->chunk_ilk ] = p; } // DO BIG CHUNK HEADERS TOO // Check the heap to see if there is // enough free space in agegroup 1: // { Punt agegroup0_buffer_bytesize = agegroup0_buffer_size_in_bytes( task ); // Bool needs_cleaning = FALSE; for (int ilk = 0; ilk < MAX_PLAIN_SIBS; ilk++) { // Sib* sib = age1->sib[ ilk ]; if (sib_headers[ilk] != NULL && (!sib_is_active(sib) // sib_is_active def in src/c/h/heap.h || sib_freespace_in_bytes(sib) < sib_headers[ ilk ]->info.o.bytesize // sib_freespace_in_bytes def in src/c/h/heap.h + agegroup0_buffer_bytesize ) ){ needs_cleaning = TRUE; sib->requested_extra_free_bytes = sib_headers[ ilk ]->info.o.bytesize; } } if (needs_cleaning) { // if (bp->nbytes <= 0) { // call_heapcleaner( task, 1 ); // call_heapcleaner def in /src/c/heapcleaner/call-heapcleaner.c } else { // // The cleaning may move the buffer, so: Val buffer = PTR_CAST( Val, bp->base ); { Roots extra_roots = { &buffer, NULL }; // call_heapcleaner_with_extra_roots (task, 1, &extra_roots ); } if (buffer != PTR_CAST( Val, bp->base )) { // // The buffer moved, so adjust the buffer pointers: Unt8* new_base = PTR_CAST( Unt8*, buffer ); bp->buf = new_base + (bp->buf - bp->base); bp->base = new_base; } } } }
Val unpickle_datastructure (Task* task, Unt8* buf, long len, Bool* seen_error) { //====================== // // Build a heap chunk from a sequence of bytes. // The fd is the underlying file descriptor (== -1, if unpickling from a bytevector). // buf is any pre-read bytes of data. // nbytesP points to the number of bytes in buf. // // This fn gets exported to the Mythryl level as 'unpickle_datastructure' via // // src/c/lib/heap/datastructure-unpickler.c // and then // src/lib/std/src/unsafe/unsafe.pkg // Inbuf inbuf; Heapfile_Header header; Val chunk; inbuf.needs_to_be_byteswapped = FALSE; inbuf.file = NULL; inbuf.base = buf; inbuf.buf = buf; inbuf.nbytes = len; // Read the chunk header: // if (heapio__read_block( &inbuf, &header, sizeof(header) ) == FALSE) { // heapio__read_block def in src/c/heapcleaner/import-heap-stuff.c // *seen_error = TRUE; return HEAP_VOID; } if (header.byte_order != ORDER) { if (BIGENDIAN_TO_HOST(header.byte_order) != ORDER) { *seen_error = TRUE; return HEAP_VOID; } header.magic = BIGENDIAN_TO_HOST(header.magic); header.kind = BIGENDIAN_TO_HOST(header.kind); inbuf.needs_to_be_byteswapped = TRUE; } if (header.magic != PICKLE_MAGIC) { *seen_error = TRUE; return HEAP_VOID; } switch (header.kind) { // case NORMAL_DATASTRUCTURE_PICKLE: if (read_image( task, &inbuf, &chunk ) == FALSE) { // Defined below *seen_error = TRUE; return HEAP_VOID; } break; case UNBOXED_PICKLE: { Pickle_Header bhdr; if (heapio__read_block( &inbuf, &bhdr, sizeof(bhdr) ) != FALSE) { chunk = bhdr.root_chunk; } else { *seen_error = TRUE; return HEAP_VOID; } } break; default: *seen_error = TRUE; return HEAP_VOID; } return chunk; } // fun unpickle_datastructure