Exemple #1
0
static void   check_ro_ptrpair_sib   (Sib* ap) {
    //        ====================
    //
    Val* p;
    Val* stop;
    Val	 w;

    int gen =  GET_AGE_FROM_SIBID(ap->id);

    if (*sib_is_active(ap))   return;							// sib_is_active	def in    src/c/h/heap.h

    debug_say ("  pairs [%d]: [%#x..%#x:%#x)\n",
	gen, ap->tospace, ap->tospace.first_free, ap->tospace.limit);

    p = ap->tospace + 2;
    stop = ap->tospace.first_free;
    while (p < stop) {
	w = *p++;
	if (IS_TAGWORD(w)) {
	    ERROR;
	    debug_say (
		"** @%#x: unexpected tagword %#x in pair sib\n",
		p-1, w);
	    return;
	}
	else if (IS_POINTER(w)) {
	    check_pointer(p, w, gen, RO_CONSCELL_KIND, CHUNKC_any);
	}
    }
}
Exemple #2
0
/* _lib7_win32_IO_read_vec : (one_word_unt * int) -> word8vector.Vector
 *                          handle   nbytes
 *
 * Read the specified number of bytes from the specified handle,
 * returning them in a vector.
 *
 * Note: Read operations on console devices do not trap ctrl-C.
 *       ctrl-Cs are placed in the input buffer.
 */
Val _lib7_win32_IO_read_vec(Task *task, Val arg)
{
    HANDLE h = (HANDLE) WORD_LIB7toC(GET_TUPLE_SLOT_AS_VAL(arg, 0));
    DWORD nbytes = (DWORD) GET_TUPLE_SLOT_AS_INT(arg, 1);
    DWORD n;

    // Allocate the vector.
    // Note that this might cause a GC:
    //
    Val vec = allocate_nonempty_int1_vector( task, BYTES_TO_WORDS (nbytes) );

    if (ReadFile( h, PTR_CAST(void*, vec), nbytes, &n, NULL)) {

        if (n == 0) {
#ifdef WIN32_DEBUG
            debug_say("_lib7_win32_IO_read_vec: eof on device\n");
#endif
            return ZERO_LENGTH_STRING__GLOBAL;
        }

        if (n < nbytes) {
	    //
            shrink_fresh_int1_vector( task, vec, BYTES_TO_WORDS(n) );
        }

        /* Allocate header: */
        {   Val result;
            SEQHDR_ALLOC (task, result, STRING_TAGWORD, vec, n);
            return result;
        }

    } else {
Exemple #3
0
void   set_signal_state   (Pthread* pthread,  int signal_number,  int signal_state) {
    // ================
    //
    #ifdef WIN32_DEBUG
	debug_say("win32:set_signal_state: not setting state for signal %d\n", signal_number);
    #endif
}
Exemple #4
0
Val   list_signals__may_heapclean  (Task* task, Roots* extra_roots)   {
    //===========================
    #ifdef WIN32_DEBUG
	debug_say("win32:list_signals: returning dummy signal list\n");
    #endif
    return dump_table_as_system_constants_list__may_heapclean (task, &SigTable, extra_roots);
} 
Exemple #5
0
void   load_task_from_posthandler_resumption_fate   (Task* task) {						// Called exactly once, from   src/c/main/run-mythryl-code-and-runtime-eventloop.c
    // ==========================================
    // 
    // Load the Mythryl state with the state preserved
    // in resumption fate made by make_posthandler_resumption_fate_from_task.
    //
    Val* current_closure;

    #ifdef SIGNAL_DEBUG
        debug_say ("load_task_from_posthandler_resumption_fate:\n");
    #endif

    current_closure = PTR_CAST(Val*, task->current_closure);

    task->argument		= current_closure[1];
    task->fate			= current_closure[2];
    task->current_closure	= current_closure[3];
    task->link_register		= current_closure[4];
    task->program_counter	= current_closure[5];
    task->exception_fate	= current_closure[6];

    // John (Reppy) says current_thread
    // should not be included here...
    //    task->current_thread	= current_closure[7];

    task->callee_saved_registers[0]	= current_closure[7];
    task->callee_saved_registers[1]	= current_closure[8];
    task->callee_saved_registers[2]	= current_closure[9];
}
Exemple #6
0
void   pause_until_signal   (Pthread* pthread) {
    // ==================
    // Suspend the given Pthread until a signal is received.
    //
    #ifdef WIN32_DEBUG
	debug_say("win32:pause_until_signal: returning without pause\n");
    #endif
} 
Exemple #7
0
int   get_signal_state   (Pthread* pthread, int signal_number) {
    //================

    #ifdef WIN32_DEBUG
	debug_say("win32:get_signal_state: returning state for signal %d as LIB7_SIG_DEFAULT\n", signal_number);
    #endif

    return LIB7_SIG_DEFAULT;
}  
Exemple #8
0
    static void   print_region_map   (Hugechunk_Quire_Relocation_Info* r)   {
        //
	Hugechunk_Relocation_Info* dp;
	Hugechunk_Relocation_Info* dq;

	debug_say ("region @%#x: |", r->first_ram_quantum);

	for (int i = 0, dq = r->chunkmap[0];  i < r->page_count;  i++) {
	    //
	    dp = r->chunkmap[i];

	    if (dp != dq) {
		debug_say ("|");
		dq = dp;
	    }

	    if (dp == NULL)	debug_say ("_");
	    else		debug_say ("X");
	}
	debug_say ("|\n");
    }
Exemple #9
0
/* _lib7_win32_IO_get_std_handle: one_word_unt -> one_word_unt
 * interface to win32 GetStdHandle
 */
Val _lib7_win32_IO_get_std_handle(Task *task, Val arg)
{
  Val_Sized_Unt w = WORD_LIB7toC(arg);
  HANDLE h = GetStdHandle(w);
  Val res;

#ifdef WIN32_DEBUG
  debug_say("getting std handle for %x as %x\n", w, (unsigned int) h);
#endif
  WORD_ALLOC(task, res, (Val_Sized_Unt)h);
  return res;
}
Exemple #10
0
Val   make_mythryl_signal_handler_arg   (			// Called only from handle-interprocess-signal code in   src/c/main/run-mythryl-code-and-runtime-eventloop.c
    //=============================== 
    //
    Task* task,
    Val*  resume_after_handling_signal
){
    // We're handling an interprocess signal for
    //
    //     src/c/main/run-mythryl-code-and-runtime-eventloop.c
    //
    // Depending on platform,    resume_after_handling_signal
    // is from one of
    //     src/c/machine-dependent/prim.intel32.asm
    //     src/c/machine-dependent/prim.intel32.masm
    //     src/c/machine-dependent/prim.sun.asm
    //     src/c/machine-dependent/prim.pwrpc32.asm
    //
    // Our job is to build the Mythryl argument record for
    // the Mythryl signal handler.  The handler has type
    //
    //   posix_interprocess_signal_handler : (Int, Int, Fate(Void)) -> X
    //
    // where
    //     The first  argument is  the signal id 		// For example SIGALRM,
    //     the second argument is  the signal count		// I.e., number of times signal has been recieved since last handled.
    //     the third  argument is  the resumption fate.
    //
    // The return type is X because the Mythryl
    // signal handler should never return.
    //
    // NOTE: Maybe this should be combined with choose_signal???	XXX BUGGO FIXME


    Hostthread* hostthread = task->hostthread;

    Val run_fate =  make_posthandler_resumption_fate_from_task( task,  resume_after_handling_signal );

    // Allocate the Mythryl signal handler's argument record:
    //
    Val arg = make_three_slot_record( task, 
	//
	TAGGED_INT_FROM_C_INT( hostthread->next_posix_signal_id	   ),
        TAGGED_INT_FROM_C_INT( hostthread->next_posix_signal_count ),
	run_fate
    );
if (hostthread->next_posix_signal_id == 2 /*SIGINT*/) ramlog_printf("#%d make_mythryl_signal_handler_arg: hostthread->next_posix_signal_id==SIGINT\n", syscalls_seen );

    #ifdef SIGNAL_DEBUG
	debug_say( "make_mythryl_signal_handler_arg: resumeC = %#x, arg = %#x\n", run_fate, arg );
    #endif

    return arg;
}
Exemple #11
0
void   check_heap   (Heap* heap,  int max_swept_agegroup)   {
    //
    // Check the heap for consistency after a cleaning (or datastructure pickling).

    ErrCount = 0;

    debug_say ("Checking heap (%d agegroups) ...\n", max_swept_agegroup);

    for (int i = 0;  i < max_swept_agegroup; i++) {
        //
	Agegroup*	g =  heap->agegroup[i];
        //
	check_ro_pointer_sib (g->sib[ RO_POINTERS_SIB ]);
	check_ro_ptrpair_sib (g->sib[ RO_CONSCELL_SIB ]);
	check_nonpointer_sib (g->sib[ NONPTR_DATA_SIB ]);
	check_rw_pointer_sib (g->sib[ RW_POINTERS_SIB ], g->dirty);
    }
    debug_say ("... done\n");

    if (ErrCount > 0)	die ("check_heap --- inconsistent heap\n");
}									// fun check_heap
void    pth__finish_heapcleaning   (Task*  task)   {
    //  ========================
    //
    // This fn is called only from
    //
    //     src/c/heapcleaner/call-heapcleaner.c
    //
    // 

    // This works, but partition_agegroup0_buffer_between_pthreads is overkill:		XXX BUGGO FIXME
    //
    partition_agegroup0_buffer_between_pthreads( pthread_table__global );

    PTH__MUTEX_LOCK( &pth__heapcleaner_mutex__global );

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d entering barrier\n", task->pthread->pid );
    #endif

    {   Bool result;
	char* err = pth__barrier_wait( &pth__heapcleaner_barrier__global, &result );		// We're the designated heapcleaner;  By calling this, we release all the other pthreads to resume execution of user code.
	    //											// They should all be already waiting on this barrier, so we should never block at this point.
	    // 'result' will be TRUE for one pthread waiting on barrier, FALSE for the rest;
	    // We do not take advantage of that here.
	    //
	    // 'err' will be NULL normally, non-NULL only on an error;
	    // for the moment we just die(). XXX SUCKO FIXME.
	if (err) die(err);
    }

    pth__barrier_destroy( &pth__heapcleaner_barrier__global );					// "destroy" is poor nomenclature; all it does is undo what pth__barrier_init() did.

    pthreads_ready_to_clean__local = 0;

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d left barrier\n", task->pthread->pid);
    #endif

    PTH__MUTEX_UNLOCK( &pth__heapcleaner_mutex__global );
}
Exemple #13
0
Val   get_signal_mask__may_heapclean   (Task* task, Val arg, Roots* extra_roots)   {
    //==============================
    //
    // Return the current signal mask (only those signals supported by Lib7); like
    // set_signal_mask, the result has the following semantics:
    //	NULL	-- the empty mask
    //	THE[]	-- mask all signals
    //	THE l	-- the signals in l are the mask
    //
#ifdef WIN32_DEBUG
    debug_say("win32:get_signal_mask__may_heapclean: returning mask as NULL\n");
#endif
    return OPTION_NULL;
}
Exemple #14
0
/* _lib7_win32_IO_close: one_word_unt -> Void
 * close a handle
 */
Val _lib7_win32_IO_close(Task *task, Val arg)
{
    HANDLE h = (HANDLE) WORD_LIB7toC(arg);

    if (CloseHandle(h)) {
      return HEAP_VOID;
    }

#ifdef WIN32_DEBUG
    debug_say("_lib7_win32_IO_close: failing\n");
#endif

    return RAISE_SYSERR(task,-1);
}
Exemple #15
0
void   set_signal_mask   (Task* task, Val sigList)   {
    // =============
    //
    // Set the signal mask to the given list of signals.  The sigList has the
    // type: "sysconst list option", with the following semantics -- see
    //
    //     src/lib/std/src/nj/runtime-signals.pkg
    //
    //	NULL	-- the empty mask
    //	THE[]	-- mask all signals
    //	THE l	-- the signals in l are the mask
    //
#ifdef WIN32_DEBUG
    debug_say("win32:SetSigMask: not setting mask\n");
#endif
}
    static void   arithmetic_fault_handler   (int signal,  siginfo_t* si,  void* c)   {
        //        ========================
	//
	ucontext_t* scp = (ucontext_t*) c;

	Task*  task =   SELF_HOSTTHREAD->task;
									    ENTER_MYTHRYL_CALLABLE_C_FN(__func__);

	extern Vunt   request_fault[]; 

	int code =  GET_SIGNAL_CODE( si, scp );

	#ifdef SIGNAL_DEBUG
	    debug_say ("Fault handler: signal = %d, inLib7 = %d\n", signal, SELF_HOSTTHREAD->executing_mythryl_code);
	#endif

	if (! SELF_HOSTTHREAD->executing_mythryl_code) {
  	    //
	    fprintf(stderr,"\n=================================================================================\n");
	    fprintf(stderr, "error: Uncaught signal while running in Mythryl C layer: signal = %d, signal code = %#x, pc = %#x. -- %d:%s\n", signal, GET_SIGNAL_CODE(si, scp), GET_SIGNAL_PROGRAM_COUNTER(scp), __LINE__, __FILE__);
	    fflush( stderr);
	    //
	    enter_debug_loop();
	}

	// Map the signal to the appropriate Mythryl exception:
        //
	if (INT_OVFLW(signal, code)) {								// INT_OVFLW	is from   src/c/h/system-dependent-signal-get-set-etc.h 
	    //
	    task->fault_exception = OVERFLOW_EXCEPTION__GLOBAL;					// OVERFLOW_EXCEPTION__GLOBAL	is from   src/c/h/runtime-globals.h
	    task->faulting_program_counter = (Vunt)GET_SIGNAL_PROGRAM_COUNTER(scp);
	    //
	} else if (INT_DIVZERO(signal, code)) {
	    //
	    task->fault_exception = DIVIDE_EXCEPTION__GLOBAL;
	    task->faulting_program_counter = (Vunt)GET_SIGNAL_PROGRAM_COUNTER(scp);

	} else {
	    //
	    die ("unexpected fault, signal = %d, signal code = %#x", signal, code);
	}

	SET_SIGNAL_PROGRAM_COUNTER( scp, request_fault );

	RESET_FLOATING_POINT_EXCEPTION_HANDLING( scp );
									    EXIT_MYTHRYL_CALLABLE_C_FN(__func__);
    }
Exemple #17
0
static void   read_heap   (
    //        =========
    //
    Inbuf*       bp,
    Heap_Header* header,
    Task*        task,
    Val*         externs
){
    Heap*		heap =  task->heap;

    Sib_Header*	sib_headers;
    Sib_Header*	p;
    Sib_Header*	q;

    int			sib_headers_bytesize;
    int			i, j, k;

    long		prevSzB[MAX_PLAIN_SIBS], size;
    Sibid*		oldBOOK2SIBID;
    Punt		addrOffset[MAX_AGEGROUPS][MAX_PLAIN_SIBS];

    Hugechunk_Quire_Relocation_Info*	boRelocInfo;

    Addresstable*	boRegionTable;

    // Allocate a book_to_sibid__global for the imported
    // heap image's address space:
    //
    #ifdef TWO_LEVEL_MAP
        #error two level map not supported
    #else
	oldBOOK2SIBID = MALLOC_VEC (Sibid, BOOK2SIBID_TABLE_SIZE_IN_SLOTS);
    #endif

    // Read in the hugechunk region descriptors
    // for the old address space:
    //
    {
	int		  size;
	Hugechunk_Quire_Header* boRgnHdr;

	boRegionTable = make_address_hashtable(LOG2_BOOK_BYTESIZE+1, header->hugechunk_quire_count);

	size = header->hugechunk_quire_count * sizeof(Hugechunk_Quire_Header);

	boRgnHdr = (Hugechunk_Quire_Header*) MALLOC (size);

	heapio__read_block( bp, boRgnHdr, size );

	boRelocInfo = MALLOC_VEC(Hugechunk_Quire_Relocation_Info, header->hugechunk_quire_count);

	for (i = 0;  i < header->hugechunk_quire_count;  i++) {

	    set_book2sibid_entries_for_range(oldBOOK2SIBID,
		(Val*)(boRgnHdr[i].base_address),
		BOOKROUNDED_BYTESIZE(boRgnHdr[i].bytesize),
		HUGECHUNK_DATA_SIBID(1)
            );

	    oldBOOK2SIBID[GET_BOOK_CONTAINING_POINTEE(boRgnHdr[i].base_address)] = HUGECHUNK_RECORD_SIBID(MAX_AGEGROUPS);

	    boRelocInfo[i].first_ram_quantum = boRgnHdr[i].first_ram_quantum;

	    boRelocInfo[i].page_count
                =
                (boRgnHdr[i].bytesize - (boRgnHdr[i].first_ram_quantum - boRgnHdr[i].base_address))
                >>
                LOG2_HUGECHUNK_RAM_QUANTUM_IN_BYTES;

	    boRelocInfo[i].hugechunk_page_to_hugechunk = MALLOC_VEC(Hugechunk_Relocation_Info*, boRelocInfo[i].page_count);

	    for (j = 0;  j < boRelocInfo[i].page_count;  j++) {
	        //
		boRelocInfo[i].hugechunk_page_to_hugechunk[j] = NULL;
            } 
	    addresstable_insert (boRegionTable, boRgnHdr[i].base_address, &(boRelocInfo[i]));
	}
	FREE (boRgnHdr);
    }

    // Read the sib headers:
    //
    sib_headers_bytesize = header->active_agegroups * TOTAL_SIBS * sizeof( Sib_Header );
    //
    sib_headers = (Sib_Header*) MALLOC( sib_headers_bytesize );
    //
    heapio__read_block( bp, sib_headers, sib_headers_bytesize );

    for (i = 0;  i < MAX_PLAIN_SIBS;  i++) {
        //
	prevSzB[i] = task->heap_allocation_buffer_bytesize;
    }

    // Allocate the sib buffers and read in the heap image:
    //
    for (p = sib_headers, i = 0;  i < header->active_agegroups;  i++) {
        //
	Agegroup*  age =  heap->agegroup[ i ];

	// Compute the space required for this agegroup,
	// and mark the oldBOOK2SIBID to reflect the old address space:
	//
	for (q = p, j = 0;  j < MAX_PLAIN_SIBS;  j++) {

	    set_book2sibid_entries_for_range (
		//
		oldBOOK2SIBID,

		(Val*) q->info.o.base_address,

		BOOKROUNDED_BYTESIZE( q->info.o.bytesize ),

		age->sib[ j ]->id
	    );

	    size = q->info.o.bytesize + prevSzB[j];

	    if (j == RO_CONSCELL_SIB
            &&  size > 0
            ){
		size += 2*WORD_BYTESIZE;
	    }

	    age->sib[ j ]->tospace.bytesize
		=
		BOOKROUNDED_BYTESIZE( size );

	    prevSzB[ j ] =  q->info.o.bytesize;

	    q++;
	}

	if (set_up_tospace_sib_buffers_for_agegroup(age) == FALSE) {
	    die ("unable to allocated space for agegroup %d\n", i+1);
        } 
	if (sib_is_active( age->sib[ RW_POINTERS_SIB ] )) {							// sib_is_active	def in    src/c/h/heap.h
	    //
	    make_new_coarse_inter_agegroup_pointers_map_for_agegroup (age);
        }

	// Read in the sib buffers for this agegroup
	// and initialize the address offset table:
	//
	for (int j = 0;  j < MAX_PLAIN_SIBS;  j++) {
	    //
	    Sib* ap = age->sib[ j ];

	    if (p->info.o.bytesize > 0) {

		addrOffset[i][j] = (Punt)(ap->tospace.start) - (Punt)(p->info.o.base_address);

		heapio__seek( bp, (long) p->offset );

		heapio__read_block( bp, (ap->tospace.start), p->info.o.bytesize );

		ap->tospace.used_end  = (Val *)((Punt)(ap->tospace.start) + p->info.o.bytesize);

		ap->fromspace.seniorchunks_end =  ap->tospace.start;

	    } else if (sib_is_active(ap)) {

		ap->fromspace.seniorchunks_end =  ap->tospace.start;
	    }

	    if (verbosity__global > 0)   say(".");

	    p++;
	}

        // Read in the hugechunk sib buffers (currently just codechunks):
        //
	for (int ilk = 0;  ilk < MAX_HUGE_SIBS;  ilk++) {			// MAX_HUGE_SIBS		def in    src/c/h/sibid.h
	    //	
	    Punt	 totSizeB;

	    Hugechunk* free_chunk;
	    Hugechunk* bdp = NULL;		// Without this initialization, gcc -Wall gives a 'possible uninitialized use' warning.

	    Hugechunk_Quire*	 free_quire;
	    Hugechunk_Header*	 boHdrs;

	    int			 boHdrSizeB;
	    int			 index;

	    Hugechunk_Quire_Relocation_Info*  region;

	    if (p->info.bo.hugechunk_quanta_count > 0) {
		//
		totSizeB = p->info.bo.hugechunk_quanta_count << LOG2_HUGECHUNK_RAM_QUANTUM_IN_BYTES;

		free_chunk = allocate_hugechunk_quire( heap, totSizeB );

		free_quire = free_chunk->hugechunk_quire;

		free_quire->age_of_youngest_live_chunk_in_quire
		    =
                    i;

		set_book2sibid_entries_for_range (
		    //
		    book_to_sibid__global,
                    (Val*) free_quire,
		    BYTESIZE_OF_QUIRE( free_quire->quire ),
		    HUGECHUNK_DATA_SIBID( i )
		);

		book_to_sibid__global[ GET_BOOK_CONTAINING_POINTEE( free_quire ) ]
		    =
		    HUGECHUNK_RECORD_SIBID( i );

	        // Read in the hugechunk headers:
                //
		boHdrSizeB = p->info.bo.hugechunk_count * sizeof(Hugechunk_Header);
		//
		boHdrs = (Hugechunk_Header*) MALLOC (boHdrSizeB);
		//
		heapio__read_block (bp, boHdrs, boHdrSizeB);

	        // Read in the hugechunks:
                //
		heapio__read_block( bp, (void *)(free_chunk->chunk), totSizeB );
		//
		if (ilk == CODE__HUGE_SIB) {					// ilk = 0 == CODE__HUGE_SIB	def in    src/c/h/sibid.h
		    //
		    flush_instruction_cache ((void *)(free_chunk->chunk), totSizeB);
		}

	        // Set up the hugechunk descriptors 
                // and per-chunk relocation info:
                //
		for (k = 0;  k < p->info.bo.hugechunk_count;  k++) {
		    //
		    // Find the region relocation info for the
		    // chunk's region in the exported heap:
		    //
		    for (index = GET_BOOK_CONTAINING_POINTEE(boHdrs[k].base_address);
			!SIBID_ID_IS_BIGCHUNK_RECORD(oldBOOK2SIBID[index]);
			index--)
			continue;

		    region = LOOK_UP_HUGECHUNK_REGION (boRegionTable, index);

		    // Allocate the hugechunk record for
		    // the chunk and link it into the list
                    // of hugechunks for its agegroup.
		    //
		    bdp = allocate_a_hugechunk( free_chunk, &(boHdrs[k]), region );

		    bdp->next = age->hugechunks[ ilk ];

		    age->hugechunks[ ilk ] = bdp;

		    ASSERT( bdp->gen == i+1 );

		    if (codechunk_comment_display_is_enabled__global
                    &&  ilk == CODE__HUGE_SIB
                    ){
		        // Dump the comment string of the code chunk.

			Unt8* namestring;
			//
			if ((namestring = get_codechunk_comment_string_else_null( bdp ))) {
			    debug_say ("[%6d bytes] %s\n", bdp->bytesize, (char*)namestring);
                        }
		    }
		}

		if (free_chunk != bdp) {					// if p->info.bo.hugechunk_count can be zero, 'bdp' value here may be bogus. XXX BUGGO FIXME.
		    //
		    // There was some extra space left in the region:
		    //
		    insert_hugechunk_in_doubly_linked_list( heap->hugechunk_freelist, free_chunk);						// insert_hugechunk_in_doubly_linked_list	def in   src/c/h/heap.h
		}

		FREE (boHdrs);
	    }

	    if (verbosity__global > 0)   say(".");

	    p++;
	}
    }

    repair_heap (heap, oldBOOK2SIBID, addrOffset, boRegionTable, externs);

    // Adjust the run-time globals
    // that point into the heap:
    //
    *PTR_CAST( Val*, PERVASIVE_PACKAGE_PICKLE_LIST_REFCELL__GLOBAL )
        =
        repair_word(
            *PTR_CAST( Val*, PERVASIVE_PACKAGE_PICKLE_LIST_REFCELL__GLOBAL ),
	    oldBOOK2SIBID,
            addrOffset,
            boRegionTable,
            externs
        );

    runtime_package__global = repair_word( runtime_package__global, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

#ifdef ASM_MATH
    mathvec__global = repair_word (mathvec__global, oldBOOK2SIBID, addrOffset, boRegionTable, externs);
#endif

    // Adjust the Mythryl registers
    // to the new address space:
    //
    ASSIGN(
        POSIX_INTERPROCESS_SIGNAL_HANDLER_REFCELL__GLOBAL,
	//
        repair_word (
	    //
	    DEREF( POSIX_INTERPROCESS_SIGNAL_HANDLER_REFCELL__GLOBAL ),
	    oldBOOK2SIBID,
	    addrOffset,
	    boRegionTable,
            externs
	)
    );

    task->argument
	=
	repair_word( task->argument, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->fate
	=
	repair_word( task->fate, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->current_closure
	=
	repair_word( task->current_closure, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->program_counter
	=
	repair_word(  task->program_counter, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->link_register
	=
	repair_word (task->link_register, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->exception_fate
	=
	repair_word( task->exception_fate, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->current_thread
	=
	repair_word( task->current_thread, oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->callee_saved_registers[0]
	=
	repair_word( task->callee_saved_registers[0], oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->callee_saved_registers[1]
	=
	repair_word( task->callee_saved_registers[1], oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    task->callee_saved_registers[2]
	=
	repair_word( task->callee_saved_registers[2], oldBOOK2SIBID, addrOffset, boRegionTable, externs );

    // Release storage:
    //
    for (i = 0; i < header->hugechunk_quire_count;  i++) {
      //
	Hugechunk_Relocation_Info*	p;
	for (p = NULL, j = 0;  j < boRelocInfo[i].page_count;  j++) {
	    if ((boRelocInfo[i].hugechunk_page_to_hugechunk[j] != NULL)
	    && (boRelocInfo[i].hugechunk_page_to_hugechunk[j] != p)) {
		FREE (boRelocInfo[i].hugechunk_page_to_hugechunk[j]);
		p = boRelocInfo[i].hugechunk_page_to_hugechunk[j];
	    }
	}
    }

    free_address_table( boRegionTable, FALSE );

    FREE( boRelocInfo    );
    FREE( sib_headers  );
    FREE( oldBOOK2SIBID       );

    // Reset the tospace.swept_end pointers:
    //
    for (int i = 0;  i < heap->active_agegroups;  i++) {
        //
	Agegroup*	age =  heap->agegroup[i];
        //
	for (int j = 0;  j < MAX_PLAIN_SIBS;  j++) {
	    //
	    Sib* ap =  age->sib[ j ];
	    //
	    if (sib_is_active(ap)) {							// sib_is_active	def in    src/c/h/heap.h
		//
		ap->tospace.swept_end
		    =
		    ap->tospace.used_end;
	    }
	}
    }
}                                                       // fun read_heap
int   pth__call_heapcleaner_with_extra_roots   (Task *task, va_list ap) {
    //=====================================
    //
    // This fn is called (only) from:
    //
    //     src/c/heapcleaner/call-heapcleaner.c
    //
    // As above, but we collect extra roots into pth__extra_heapcleaner_roots__global.

    int active_pthread_count;
    Val* p;

    Pthread* pthread =  task->pthread;

    PTH__MUTEX_LOCK( &pth__heapcleaner_mutex__global );
	//
	if (pthreads_ready_to_clean__local++ == 0) {
	    //
	    extra_cleaner_roots__local = pth__extra_heapcleaner_roots__global;

	    // Signal other pthreads to enter heapcleaning mode:
	    //
	    #if NEED_PTHREAD_SUPPORT_FOR_SOFTWARE_GENERATED_PERIODIC_EVENTS
		//
		ASSIGN( SOFTWARE_GENERATED_PERIODIC_EVENTS_SWITCH_REFCELL__GLOBAL, HEAP_TRUE);
		//	
		#ifdef NEED_PTHREAD_SUPPORT_DEBUG
		    debug_say ("%d: set poll event\n", pthread->pid);
		#endif
	    #endif

	    // We're the first one in, we'll do the collect:
	    //
	    cleaning_pthread__local = pthread->pid;

	    barrier_needs_to_be_initialized__local =  TRUE;

	    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
		debug_say ("cleaning_pthread__local is %d\n",cleaning_pthread__local);
	    #endif
	}

	while ((p = va_arg(ap, Val *)) != NULL) {
	    //
	    *extra_cleaner_roots__local++ = p;
	}
	*extra_cleaner_roots__local = p;			// NULL

    PTH__MUTEX_UNLOCK( &pth__heapcleaner_mutex__global );



    //////////////////////////////////////////////////////////
    // Whether or not we're the first pthread to enter
    // heapcleaning mode, we now wait until all the
    // other active pthreads have also entered
    // heapcleaning mode.
    //
    // Note that we cannot use a barrier wait here because
    // we do not know how many pthreads will wind up entering
    // heapcleaner mode -- one or more pthreads might be starting
    // up additional pthreads.
    {
        // Spin until all active pthreads have enetered this loop:
        //
	int n = 0;
        //
	while (pthreads_ready_to_clean__local !=  (active_pthread_count = pth__get_active_pthread_count())) {

	    // SPIN

	    if (n != 1000) {
		for (int i = 10000; i --> 0; );
		n++;
	    } else {
		n = 0;
		#ifdef NEED_PTHREAD_SUPPORT_DEBUG
		    debug_say ("%d spinning %d <> %d <alloc=0x%x, limit=0x%x>\n", 
			pthread->pid, pthreads_ready_to_clean__local, pthread_count, task->heap_allocation_pointer,
			task->heap_allocation_limit);
		#endif
	    }
	}

	// As soon as all active pthreads have entered the above
        // loop, they all fall out and arrive here.  The first to
	// do so needs to initialize the barrier, so that everyone
	// can wait at it:
	//
        PTH__MUTEX_LOCK( &pth__heapcleaner_mutex__global );					// Use mutex to avoid a race condition -- otherwise multiple pthreads might think they were the designated heapcleaner.
	    //
	    if (barrier_needs_to_be_initialized__local) {
		barrier_needs_to_be_initialized__local = FALSE;					// We're the first pthread to exit the spinloop.
		//
		pth__barrier_init( &pth__heapcleaner_barrier__global, active_pthread_count );	// Set up barrier to wait on proper number of threads.
	    }
	    //
	PTH__MUTEX_UNLOCK( &pth__heapcleaner_mutex__global );
    }

    // All Pthreads now ready to clean:
    //
    #if NEED_PTHREAD_SUPPORT_FOR_SOFTWARE_GENERATED_PERIODIC_EVENTS
	//
	ASSIGN(  SOFTWARE_GENERATED_PERIODIC_EVENTS_SWITCH_REFCELL__GLOBAL,  HEAP_FALSE  );
	//
	#ifdef NEED_PTHREAD_SUPPORT_DEBUG
	    debug_say ("%d: cleared poll event\n", task->pid);
	#endif
    #endif

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("(%d) all %d/%d procs in\n", task->pthread->pid, pthreads_ready_to_clean__local, pth__get_active_pthread_count());
    #endif

    if (cleaning_pthread__local == pthread->pid) {
	//
        return TRUE;			// We're the designated heapcleaner.
    }

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d entering barrier %d\n", pthread->pid, pthread_count);
    #endif

    {   Bool result;
        char* err = pth__barrier_wait( &pth__heapcleaner_barrier__global, &result );			// We're not the designated heapcleaner;  wait for the designated heapcleaner to finish heapcleaning.
	    //
	    // 'result' will be TRUE for one pthread waiting on barrier, FALSE for the rest;
	    // We do not take advantage of that here.
	    //
	    // 'err' will be NULL normally, non-NULL only on an error;
	    // for the moment we hope for the best. XXX SUCKO FIXME.
	if (err) die(err);
    }

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d left barrier\n", pthread->pid);
    #endif

    return 0;
}												// fun pth__call_heapcleaner_with_extra_roots
void   partition_agegroup0_buffer_between_pthreads   (Pthread *pthread_table[]) {	// pthread_table is always   pthread_table__global
    // ===========================================
    //
    // Outside of this file, this fn is called (only) from
    //
    //     make_task   in   src/c/main/runtime-state.c
    //
    // Divide the agegroup0 buffer into smaller disjoint
    // buffers for use by the parallel pthreads.
    //
    // Typically at this point
    //
    //     task0->heap->agegroup0_buffer_bytesize
    //
    // will at this point have been set to
    //
    //	   DEFAULT_AGEGROUP0_BUFFER_BYTESIZE  				// DEFAULT_AGEGROUP0_BUFFER_BYTESIZE is defined at 256K in   src/c/h/runtime-configuration.h
    //     *
    //     MAX_PTHREADS							// MAX_PTHREADS is defined as something like 8 or 16    in   src/c/mythryl-config.h
    //
    // by the logic in
    //
    //     src/c/heapcleaner/heapcleaner-initialization.c
    //     


    int poll_freq
	=
	TAGGED_INT_TO_C_INT(
	    DEREF(
		SOFTWARE_GENERATED_PERIODIC_EVENT_INTERVAL_REFCELL__GLOBAL
	    )
	);

    Task* task;
    Task* task0 =  pthread_table[ 0 ]->task;

    int per_thread_agegroup0_buffer_bytesize
	=
	task0->heap->agegroup0_buffer_bytesize
        /
        MAX_PTHREADS;

    Val* start_of_agegroup0_buffer_for_next_pthread
	=
	task0->heap->agegroup0_buffer;

    for (int pthread = 0;   pthread < MAX_PTHREADS;   pthread++) {
        //
	task =  pthread_table[ pthread ]->task;

	#ifdef NEED_PTHREAD_SUPPORT_DEBUG
	    debug_say ("pthread_table[%d]->task-> (heap_allocation_pointer %x/heap_allocation_limit %x) changed to ", pthread, task->heap_allocation_pointer, task->heap_allocation_limit);
	#endif

	task->heap                       =  task0->heap;
	task->heap_allocation_pointer    =  start_of_agegroup0_buffer_for_next_pthread;
	task->real_heap_allocation_limit =  HEAP_ALLOCATION_LIMIT_SIZE( start_of_agegroup0_buffer_for_next_pthread, per_thread_agegroup0_buffer_bytesize );

	#if !NEED_PTHREAD_SUPPORT_FOR_SOFTWARE_GENERATED_PERIODIC_EVENTS
	    //
	    task->heap_allocation_limit
		=
		HEAP_ALLOCATION_LIMIT_SIZE(					// HEAP_ALLOCATION_LIMIT_SIZE	def in   src/c/h/heap.h
		    //								// This macro basically just subtracts a MIN_FREE_BYTES_IN_AGEGROUP0_BUFFER safety margin from the actual buffer limit.
		    start_of_agegroup0_buffer_for_next_pthread,
		    per_thread_agegroup0_buffer_bytesize
		);
	#else
	    if (poll_freq <= 0) {
		//
		task->heap_allocation_limit = task->real_heap_allocation_limit;
		//
	    } else {
		//
		// In order to generate software events at (approximately)
		// the desired frequency, we (may) here artificially decrease
		// the heaplimit pointer to trigger an early heapcleaner call,
		// at which point our logic will regain control.
		//
		#ifdef NEED_PTHREAD_SUPPORT_DEBUG
		    debug_say ("(with poll_freq=%d) ", poll_freq);
		#endif

		task->heap_allocation_limit
		    =
		    start_of_agegroup0_buffer_for_next_pthread
		    +
		    poll_freq * PERIODIC_EVENT_TIME_GRANULARITY_IN_NEXTCODE_INSTRUCTIONS;

		task->heap_allocation_limit
		    =
		    (task->heap_allocation_limit > task->real_heap_allocation_limit)
			? task->real_heap_allocation_limit
			: task->heap_allocation_limit;

	    }
	#endif

	#ifdef NEED_PTHREAD_SUPPORT_DEBUG
	    debug_say ("%x/%x\n",task->heap_allocation_pointer, task->heap_allocation_limit);
	#endif

	// Step over this pthread's buffer to
	// get start of next pthread's buffer:
	//
	start_of_agegroup0_buffer_for_next_pthread
	    =
	    (Val*) ( ((Punt) start_of_agegroup0_buffer_for_next_pthread)
                     +
                     per_thread_agegroup0_buffer_bytesize
                   );
    }										// for (int pthread = 0;   pthread < MAX_PTHREADS;   pthread++)
}										// fun partition_agegroup0_buffer_between_pthreads
Val   make_package_literals_via_bytecode_interpreter   (Task* task,   Unt8* bytecode_vector,   int bytecode_vector_length_in_bytes)   {
    //==============
    //
    // NOTE: We allocate all of the chunks in agegroup 1,
    // but allocate the vector of literals in agegroup0.
    //
    // This fn gets exported to the Mythryl level as
    //
    //     make_package_literals_via_bytecode_interpreter
    // in
    //     src/lib/compiler/execution/code-segments/code-segment.pkg
    // via
    //     src/c/lib/heap/make-package-literals-via-bytecode-interpreter.c
    //
    // Our ultimate invocation is in
    //
    //     src/lib/compiler/execution/main/execute.pkg


    int pc = 0;

    // Check that sufficient space is available for the
    // literal chunk that we are about to allocate.
    // Note that the cons cell has already been accounted
    // for in space_available (but not in space_needed).
    //
    #define GC_CHECK										\
	do {											\
	    if (space_needed > space_available							\
            &&  need_to_call_heapcleaner( task, space_needed + LIST_CONS_CELL_BYTESIZE)		\
            ){											\
		call_heapcleaner_with_extra_roots (task, 0, (Val *)&bytecode_vector, &stk, NULL);	\
		space_available = 0;								\
												\
	    } else {										\
												\
		space_available -= space_needed;						\
	    }											\
	} while (0)

    #ifdef DEBUG_LITERALS
	debug_say("make_package_literals_via_bytecode_interpreter: bytecode_vector = %#x, bytecode_vector_length_in_bytes = %d\n", bytecode_vector, bytecode_vector_length_in_bytes);
    #endif

    if (bytecode_vector_length_in_bytes <= 8)   return HEAP_NIL;

    Val_Sized_Unt  magic
	=
	GET32(bytecode_vector);   pc += 4;

    Val_Sized_Unt  max_depth							/* This variable is currently unused, so suppress 'unused var' compiler warning: */   __attribute__((unused))
	=
	GET32(bytecode_vector);   pc += 4;

    if (magic != V1_MAGIC) {
	die("bogus literal magic number %#x", magic);
    }

    Val	stk = HEAP_NIL;

    int space_available = 0;

    for (;;) {
	//
	ASSERT(pc < bytecode_vector_length_in_bytes);

	space_available -= LIST_CONS_CELL_BYTESIZE;	// Space for stack cons cell.

	if (space_available < ONE_K_BINARY) {
	    //
	    if (need_to_call_heapcleaner(task, 64*ONE_K_BINARY)) {
		//
		call_heapcleaner_with_extra_roots (task, 0, (Val *)&bytecode_vector, &stk, NULL);
            }
	    space_available = 64*ONE_K_BINARY;
	}


	switch (bytecode_vector[ pc++ ]) {
	    //
	case I_INT:
	    {	int i = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: INT(%d)\n", pc-5, i);
		#endif

		LIST_CONS(task, stk, TAGGED_INT_FROM_C_INT(i), stk);
	    }
	    break;

	case I_RAW32:
	    {
		int i = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: RAW32[%d]\n", pc-5, i);
		#endif

		Val               result;
		INT1_ALLOC(task, result, i);

		LIST_CONS(task, stk, result, stk);
		space_available -= 2*WORD_BYTESIZE;
	    }
	    break;

	case I_RAW32L:
	    {
		int n = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		debug_say("[%2d]: RAW32L(%d) [...]\n", pc-5, n);
		#endif

		ASSERT(n > 0);

		int space_needed = 4*(n+1);
		GC_CHECK;

		LIB7_AllocWrite (task, 0, MAKE_TAGWORD(n, FOUR_BYTE_ALIGNED_NONPOINTER_DATA_BTAG));

		for (int j = 1;  j <= n;  j++) {
		    //
		    int i = GET32(bytecode_vector);	pc += 4;

		    LIB7_AllocWrite (task, j, (Val)i);
		}

		Val result =  LIB7_Alloc(task, n );

		LIST_CONS(task, stk, result, stk);
	    }
	    break;

	case I_RAW64:
	    {
		double d = get_double(&(bytecode_vector[pc]));	pc += 8;

		Val	           result;
		REAL64_ALLOC(task, result, d);

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: RAW64[%f] @ %#x\n", pc-5, d, result);
		#endif

		LIST_CONS(task, stk, result, stk);

		space_available -= 4*WORD_BYTESIZE;		// Extra 4 bytes for alignment padding.
	    }
	    break;

	case I_RAW64L:
	    {
		int n = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: RAW64L(%d) [...]\n", pc-5, n);
		#endif

		ASSERT(n > 0);

		int space_needed = 8*(n+1);
		GC_CHECK;

		#ifdef ALIGN_FLOAT64S
		    // Force FLOAT64_BYTESIZE alignment (descriptor is off by one word)
		    //
		    task->heap_allocation_pointer = (Val*)((Punt)(task->heap_allocation_pointer) | WORD_BYTESIZE);
		#endif

		int j = 2*n;							// Number of words.

		LIB7_AllocWrite (task, 0, MAKE_TAGWORD(j, EIGHT_BYTE_ALIGNED_NONPOINTER_DATA_BTAG));

		Val result =  LIB7_Alloc(task, j );

		for (int j = 0;  j < n;  j++) {
		    //
		    PTR_CAST(double*, result)[j] = get_double(&(bytecode_vector[pc]));	pc += 8;
		}
		LIST_CONS(task, stk, result, stk);
	    }
	    break;

	case I_STR:
	    {
		int n = GET32(bytecode_vector);		pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: STR(%d) [...]", pc-5, n);
		#endif

		if (n == 0) {
		    #ifdef DEBUG_LITERALS
			debug_say("\n");
		    #endif

		    LIST_CONS(task, stk, ZERO_LENGTH_STRING__GLOBAL, stk);

		    break;
		}

		int j = BYTES_TO_WORDS(n+1);								// '+1' to include space for '\0'.

		// The space request includes space for the data-chunk header word and
		// the sequence header chunk.
		//
		int space_needed = WORD_BYTESIZE*(j+1+3);
		GC_CHECK;

		// Allocate the data chunk:
		//
		LIB7_AllocWrite(task, 0, MAKE_TAGWORD(j, FOUR_BYTE_ALIGNED_NONPOINTER_DATA_BTAG));
		LIB7_AllocWrite (task, j, 0);								// So word-by-word string equality works.

		Val result = LIB7_Alloc (task, j);

		#ifdef DEBUG_LITERALS
		    debug_say(" @ %#x (%d words)\n", result, j);
		#endif
		memcpy (PTR_CAST(void*, result), &bytecode_vector[pc], n);		pc += n;

		// Allocate the header chunk:
		//
		SEQHDR_ALLOC(task, result, STRING_TAGWORD, result, n);

		// Push on stack:
		//
		LIST_CONS(task, stk, result, stk);
	    }
	    break;

	case I_LIT:
	    {
		int n = GET32(bytecode_vector);	pc += 4;

		Val result = stk;

		for (int j = 0;  j < n;  j++) {
		    //
		    result = LIST_TAIL(result);
		}

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: LIT(%d) = %#x\n", pc-5, n, LIST_HEAD(result));
		#endif

		LIST_CONS(task, stk, LIST_HEAD(result), stk);
	    }
	    break;

	  case I_VECTOR:
	    {
		int n = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: VECTOR(%d) [", pc-5, n);
		#endif

		if (n == 0) {
		    #ifdef DEBUG_LITERALS
			debug_say("]\n");
		    #endif
		    LIST_CONS(task, stk, ZERO_LENGTH_VECTOR__GLOBAL, stk);
		    break;
		}

		// The space request includes space
		// for the data-chunk header word and
		// the sequence header chunk.
		//
		int space_needed = WORD_BYTESIZE*(n+1+3);
		GC_CHECK;

		// Allocate the data chunk:
		//
		LIB7_AllocWrite(task, 0, MAKE_TAGWORD(n, RO_VECTOR_DATA_BTAG));

		// Top of stack is last element in vector:
		//
		for (int j = n;  j > 0;  j--) {
		    //
		    LIB7_AllocWrite(task, j, LIST_HEAD(stk));

		    stk = LIST_TAIL(stk);
		}

		Val result =  LIB7_Alloc(task, n );

		// Allocate the header chunk:
		//
		SEQHDR_ALLOC(task, result, TYPEAGNOSTIC_RO_VECTOR_TAGWORD, result, n);

		#ifdef DEBUG_LITERALS
		    debug_say("...] @ %#x\n", result);
		#endif

		LIST_CONS(task, stk, result, stk);
	    }
	    break;

	case I_RECORD:
	    {
		int n = GET32(bytecode_vector);	pc += 4;

		#ifdef DEBUG_LITERALS
		    debug_say("[%2d]: RECORD(%d) [", pc-5, n);
		#endif

		if (n == 0) {
		    #ifdef DEBUG_LITERALS
			debug_say("]\n");
		    #endif

		    LIST_CONS(task, stk, HEAP_VOID, stk);
		    break;

		} else {

		    int space_needed = 4*(n+1);
		    GC_CHECK;

		    LIB7_AllocWrite(task, 0, MAKE_TAGWORD(n, PAIRS_AND_RECORDS_BTAG));
		}

		// Top of stack is last element in record:
		//
		for (int j = n;  j > 0;  j--) {
		    //
		    LIB7_AllocWrite(task, j, LIST_HEAD(stk));

		    stk = LIST_TAIL(stk);
		}

		Val result = LIB7_Alloc(task, n );

		#ifdef DEBUG_LITERALS
		    debug_say("...] @ %#x\n", result);
		#endif

		LIST_CONS(task, stk, result, stk);
	    }
	    break;

	case I_RETURN:
	    ASSERT(pc == bytecode_vector_length_in_bytes);

	    #ifdef DEBUG_LITERALS
	        debug_say("[%2d]: RETURN(%#x)\n", pc-5, LIST_HEAD(stk));
	    #endif

	    return  LIST_HEAD( stk );
	    break;

	default:
	    die ("bogus literal opcode #%x @ %d", bytecode_vector[pc-1], pc-1);
	}								// switch
    }									// while
}									// fun make_package_literals_via_bytecode_interpreter
Exemple #21
0
void   choose_signal   (Pthread* pthread)   {
    // =============
    //
    // Caller guarantees that at least one Unix signal has been
    // seen at the C level but not yet handled at the Mythryl
    // level.  Our job is to find and return the number of
    // that signal plus the number of times it has fired at
    // the C level since last being handled at the Mythry level.
    //
    // Choose which signal to pass to the Mythryl-level handler
    // and set up the Mythryl state vector accordingly.
    //
    // This function gets called (only) from
    //
    //     src/c/main/run-mythryl-code-and-runtime-eventloop.c
    //
    // WARNING: This should be called with signals masked
    // to avoid race conditions.

    int i, j, delta;

    // Scan the signal counts looking for
    // a signal that needs to be handled.
    //
    // The 'seen_count' field for a signal gets
    // incremented once for each incoming signal
    // in   c_signal_handler()   in
    //
    //     src/c/machine-dependent/posix-signal.c
    //
    // Here we increment the matching 'done_count' field
    // each time we invoke appropriate handling for that
    // signal;  thus, the difference between the two
    // gives the number of pending instances of that signal
    // currently needing to be handled.
    //
    // For fairness we scan for signals round-robin style, using
    //
    //     pthread->posix_signal_rotor
    //
    // to remember where we left off scanning, so we can pick
    // up from there next time:

    i = pthread->posix_signal_rotor;
    j = 0;
    do {
        ASSERT (j++ < NUM_SIGS);

        i++;

        // Wrap circularly around the signal vector:
        //
        if (i == MAX_POSIX_SIGNALS)
            i = MIN_SYSTEM_SIG;

        // Does this signal have pending work? (Nonzero == "yes"):
        //
        delta = pthread->posix_signal_counts[i].seen_count - pthread->posix_signal_counts[i].done_count;

    } while (delta == 0);

    pthread->posix_signal_rotor = i;		// Next signal to scan on next call to this fn.

    // Record the signal to process
    // and how many times it has fired
    // since last being handled at the
    // Mythryl level:
    //
    pthread->next_posix_signal_id  = i;
    pthread->next_posix_signal_count = delta;

    // Mark this signal as 'done':
    //
    pthread->posix_signal_counts[i].done_count  += delta;
    pthread->all_posix_signals.done_count += delta;

#ifdef SIGNAL_DEBUG
    debug_say ("choose_signal: sig = %d, count = %d\n", pthread->next_posix_signal_id, pthread->next_posix_signal_count);
#endif
}
Exemple #22
0
static void   c_signal_handler   (int sig,  siginfo_t* si,  void* c)   {
    //        ================
    //
    // This is the C signal handler for
    // signals that are to be passed to
    // the Mythryl level via signal_handler in
    //
    //     src/lib/std/src/nj/runtime-signals-guts.pkg
    //

    ucontext_t* scp		/* This variable is unused on some platforms, so suppress 'unused var' compiler warning: */   __attribute__((unused))
        =
        (ucontext_t*) c;

    Pthread* pthread = SELF_PTHREAD;


    // Sanity check:  We compile in a MAX_POSIX_SIGNALS value but
    // have no way to ensure that we don't wind up getting run
    // on some custom kernel supporting more than MAX_POSIX_SIGNAL,
    // so we check here to be safe:
    //
    if (sig >= MAX_POSIX_SIGNALS)    die ("posix-signal.c: c_signal_handler: sig d=%d >= MAX_POSIX_SIGNAL %d\n", sig, MAX_POSIX_SIGNALS ); 


    // Remember that we have seen signal number 'sig'.
    //
    // This will eventually get noticed by  choose_signal()  in
    //
    //     src/c/machine-dependent/signal-stuff.c
    //
    pthread->posix_signal_counts[sig].seen_count++;
    pthread->all_posix_signals.seen_count++;

    log_if(
        "posix-signal.c/c_signal_handler: signal d=%d  seen_count d=%d  done_count d=%d   diff d=%d",
        sig,
        pthread->posix_signal_counts[sig].seen_count,
        pthread->posix_signal_counts[sig].done_count,
        pthread->posix_signal_counts[sig].seen_count - pthread->posix_signal_counts[sig].done_count
    );

    #ifdef SIGNAL_DEBUG
    debug_say ("c_signal_handler: sig = %d, pending = %d, inHandler = %d\n", sig, pthread->posix_signal_pending, pthread->mythryl_handler_for_posix_signal_is_running);
    #endif

    // The following line is needed only when
    // currently executing "pure" C code, but
    // doing it anyway in all other cases will
    // not hurt:
    //
    pthread->ccall_limit_pointer_mask = 0;

    if (  pthread->executing_mythryl_code
    &&  ! pthread->posix_signal_pending
    &&  ! pthread->mythryl_handler_for_posix_signal_is_running
    ){
	pthread->posix_signal_pending = TRUE;

	#ifdef USE_ZERO_LIMIT_PTR_FN
	    //
	    SIG_SavePC( pthread->task, scp );
	    SET_SIGNAL_PROGRAM_COUNTER( scp, Zero_Heap_Allocation_Limit );
	#else
	    SIG_Zero_Heap_Allocation_Limit( scp );			// OK to adjust the heap limit directly.
	#endif
    }
}
void   set_up_heap   (			// Create and initialize the heap.
    // ===========
    //
    Task*              task,
    Bool               is_boot,
    Heapcleaner_Args*  params
) {
    int		ratio;
    int		max_size = 0;		// Initialized only to suppress a gcc -Wall warning.

    Heap*	heap;
    Agegroup*	ag;

    Multipage_Ram_Region*  multipage_ram_region;

    Val* agegroup0_buffer;

    // Default any parameters unspecified by user:
    //
    if (params->agegroup0_buffer_bytesize == 0)  params->agegroup0_buffer_bytesize  = DEFAULT_AGEGROUP0_BUFFER_BYTESIZE;		// From   src/c/h/runtime-configuration.h
    if (params->active_agegroups           < 0)  params->active_agegroups           = DEFAULT_ACTIVE_AGEGROUPS;				// From   src/c/h/runtime-configuration.h

    if (params->oldest_agegroup_keeping_idle_fromspace_buffers < 0) {
        params->oldest_agegroup_keeping_idle_fromspace_buffers =  DEFAULT_OLDEST_AGEGROUP_KEEPING_IDLE_FROMSPACE_BUFFERS;		// From   src/c/h/runtime-configuration.h
    }

    // First we initialize the underlying memory system:
    //
    set_up_multipage_ram_region_os_interface ();				// set_up_multipage_ram_region_os_interface	def in   src/c/ram/get-multipage-ram-region-from-mach.c
										// set_up_multipage_ram_region_os_interface	def in   src/c/ram/get-multipage-ram-region-from-mmap.c
										// set_up_multipage_ram_region_os_interface	def in   src/c/ram/get-multipage-ram-region-from-win32.c

    // Allocate a ram region to hold
    // the book_to_sibid__global and agegroup0 buffer:
    //
    {   long	book2sibid_bytesize;

	#ifdef TWO_LEVEL_MAP
	    #error two level map not supported
	#else
		book2sibid_bytesize = BOOK2SIBID_TABLE_SIZE_IN_SLOTS * sizeof( Sibid );
	#endif

	multipage_ram_region
	    =
            obtain_multipage_ram_region_from_os(
		//
		MAX_PTHREADS * params->agegroup0_buffer_bytesize
                +
                book2sibid_bytesize
           );

	if (multipage_ram_region == NULL) 	   die ("Unable to allocate ram region for book_to_sibid__global");

	book_to_sibid__global = (Sibid*) BASE_ADDRESS_OF_MULTIPAGE_RAM_REGION( multipage_ram_region );

	agegroup0_buffer = (Val*) (((Punt)book_to_sibid__global) + book2sibid_bytesize);
    }

    // Initialize the book_to_sibid__global:
    //
    #ifdef TWO_LEVEL_MAP
        #error two level map not supported
    #else
	for (int i = 0;  i < BOOK2SIBID_TABLE_SIZE_IN_SLOTS;  i++) {
	    //
	    book_to_sibid__global[ i ] = UNMAPPED_BOOK_SIBID;
	}
    #endif

    // Initialize heap descriptor:
    //
    heap = MALLOC_CHUNK(Heap);
    //
    memset ((char*)heap, 0, sizeof(Heap));
    //
    for (int age = 0;  age < MAX_AGEGROUPS;  age++) {
	//
	ratio = DfltRatios[age];

	if (age == 0) {   max_size = MAX_SZ1( params->agegroup0_buffer_bytesize * MAX_PTHREADS );
	} else {          max_size = (5 * max_size)/2;
	    //
	    if (max_size > 64 * ONE_MEG_BINARY)  {				// WTF? This silliness probably needs to Just Die.  XXX BUGGO FIXME. -- 2011-11-01 CrT
                max_size = 64 * ONE_MEG_BINARY;
	    }
	}

	ag		      =
	heap->agegroup[age]   =  MALLOC_CHUNK( Agegroup );

	ag->heap	= heap;
	ag->age	        = age+1;
	ag->cleanings	= 0;
	ag->ratio	= ratio;
	//
	ag->last_cleaning_count_of_younger_agegroup
	    =
	    0;
	//
	ag->tospace_ram_region			= NULL;
	ag->fromspace_ram_region		= NULL;
	ag->saved_fromspace_ram_region		= NULL;
	ag->coarse_inter_agegroup_pointers_map	= NULL;

	for (int ilk = 0;  ilk < MAX_PLAIN_ILKS;  ilk++) {			// MAX_PLAIN_ILKS		def in    src/c/h/sibid.h
	    //
	    ag->sib[ ilk ] = MALLOC_CHUNK( Sib );
	    //
	    ag->sib[ ilk ]->tospace_bytesize              = 0;
	    ag->sib[ ilk ]->requested_sib_buffer_bytesize = 0;
	    ag->sib[ ilk ]->soft_max_bytesize             = max_size;
	    //
	    ag->sib[ ilk ]->id =   MAKE_SIBID( age+1, ilk+1, 0);
	}
	for (int ilk = 0;  ilk < MAX_HUGE_ILKS;  ilk++) {			// MAX_HUGE_ILKS		def in    src/c/h/sibid.h
	    //
	    ag->hugechunks[ ilk ] = NULL;					// ilk = 0 == CODE__HUGE_ILK	def in    src/c/h/sibid.h
	}
    }

    for (int age = 0;   age < params->active_agegroups;   age++) {
	//
	int k = (age == params->active_agegroups -1)
                     ?  age
                     :  age+1;

	for (int ilk = 0;  ilk < MAX_PLAIN_ILKS;  ilk++) {
	    //
	    heap->agegroup[ age ]->sib[ ilk ]->sib_for_promoted_chunks
                =
                heap->agegroup[ k ]->sib[ ilk ];
	}
    }

    heap->oldest_agegroup_keeping_idle_fromspace_buffers
	=
	params->oldest_agegroup_keeping_idle_fromspace_buffers;

    heap->active_agegroups                    = params->active_agegroups;
    //
    heap->agegroup0_cleanings_done            = 0;
    heap->hugechunk_ramregion_count	      = 0;
    heap->hugechunk_ramregions		      = NULL;
    //
    heap->hugechunk_freelist		      = MALLOC_CHUNK( Hugechunk );
    heap->hugechunk_freelist->chunk	      = (Punt)0;
    //
    heap->hugechunk_freelist->bytesize   = 0;
    heap->hugechunk_freelist->hugechunk_state = FREE_HUGECHUNK;
    heap->hugechunk_freelist->prev	      = heap->hugechunk_freelist;
    //
    heap->hugechunk_freelist->next	      = heap->hugechunk_freelist;
    heap->weak_pointers_forwarded_during_cleaning		    = NULL;

    // Initialize new space:
    //
    heap->multipage_ram_region       =  multipage_ram_region;
    //
    heap->agegroup0_buffer           =  agegroup0_buffer;
    //
    heap->agegroup0_buffer_bytesize  =  params->agegroup0_buffer_bytesize
				      * MAX_PTHREADS;				// "* MAX_PTHREADS" because it gets partitioned into MAX_PTHREADS buffers by
										// partition_agegroup0_buffer_between_pthreads() in   src/c/heapcleaner/pthread-heapcleaner-stuff.c
    //
    set_book2sibid_entries_for_range (
	//
	book_to_sibid__global,
	(Val*) book_to_sibid__global,
	BYTESIZE_OF_MULTIPAGE_RAM_REGION( heap->multipage_ram_region ),
	NEWSPACE_SIBID
    );

    #ifdef VERBOSE
	debug_say ("NewSpace = [%#x, %#x:%#x), %d bytes\n",
	    heap->agegroup0_buffer, HEAP_ALLOCATION_LIMIT( heap ),
	    (Val_Sized_Unt)(heap->agegroup0_buffer)+params->agegroup0_buffer_bytesize, params->agegroup0_buffer_bytesize);
    #endif

    clear_cleaner_statistics( heap );										// clear_cleaner_statistics		def in   src/c/heapcleaner/heapcleaner-initialization.c


    //
    if (heapcleaner_statistics_fd__global > 0) {
	//	
      Cleaner_Statistics_Header   header;									// Cleaner_Statistics_Header		is from   src/c/h/heapcleaner-statistics-2.h
	//
	ZERO_BIGCOUNTER( &heap->total_bytes_allocated );
	//
	header.mask = STATMASK_ALLOC
		    | STATMASK_NGENS
		    | STATMASK_START
		    | STATMASK_STOP;

	header.is_new_runtime = 1;
	//
	header.agegroup0_buffer_bytesize = params->agegroup0_buffer_bytesize;
	header.active_agegroups        = params->active_agegroups;
	//
	{   struct timeval tv;
	    //
	    gettimeofday ( &tv, NULL);
	    //
	    header.start_time.seconds  =  tv.tv_sec;	
	    header.start_time.uSeconds =  tv.tv_usec;	
	};
	//
	write( heapcleaner_statistics_fd__global, (char*)&header, sizeof( Cleaner_Statistics_Header ) );
    }


    if (is_boot) {
	//
	// Create agegroup 1's to-space:
	//
        for (int ilk = 0;  ilk < MAX_PLAIN_ILKS;  ilk++) {
	    //
	    heap->agegroup[ 0 ]->sib[ ilk ]->tospace_bytesize
                =
                BOOKROUNDED_BYTESIZE( 2 * heap->agegroup0_buffer_bytesize );
	}

	if (allocate_and_partition_an_agegroup( heap->agegroup[0] ) == FAILURE)	    die ("unable to allocate initial agegroup 1 buffer\n");

	for (int ilk = 0;  ilk < MAX_PLAIN_ILKS;  ilk++) {
	    //
	    heap->agegroup[ 0 ]->sib[ ilk ]->end_of_fromspace_oldstuff
		=
		heap->agegroup[ 0 ]->sib[ ilk ]->tospace;
	}
    }

    // Initialize the cleaner-related
    // parts of the Mythryl state:
    //
    task->heap	                  =  heap;
    task->heap_allocation_pointer =  (Val*) task->heap->agegroup0_buffer;

    #if NEED_SOFTWARE_GENERATED_PERIODIC_EVENTS
	//
	reset_heap_allocation_limit_for_software_generated_periodic_events( task );
    #else
	task->heap_allocation_limit = HEAP_ALLOCATION_LIMIT( heap );
    #endif
}										// fun set_up_heap
Exemple #24
0
static void   check_ro_pointer_sib   (Sib* ap) {
    //        ====================
    Val* p;
    Val* stop;
    Val  tagword;
    Val  w;
    int	 i;
    int	 len;

    int gen =  GET_AGE_FROM_SIBID( ap->id );

    if (*sib_is_active(ap))   return;							// sib_is_active	def in    src/c/h/heap.h

    debug_say ("  records [%d]: [%#x..%#x:%#x)\n",
	//
        gen,
        ap->tospace,
	ap->tospace.first_free,
	ap->tospace.limit
    );

    p = ap->tospace;
    stop = ap->tospace.first_free;

    while (p < stop) {
	//
	tagword = *p++;

	if (*IS_TAGWORD(tagword)) {
	    ERROR;
	    debug_say (
		"** @%#x: expected tagword, but found %#x in record sib\n",
		p-1, tagword);
	    return;
	}

	switch (GET_BTAG_FROM_TAGWORD tagword) {
	    //
	case PAIRS_AND_RECORDS_BTAG:
	    #
	    len =  GET_LENGTH_IN_WORDS_FROM_TAGWORD( tagword );			// Length excludes tagword.
	    #
	    for (i = 0;  i < len;  i++, p++) {
		w = *p;
		if (IS_TAGWORD(w)) {
		    ERROR;
		    debug_say (
			"** @%#x: unexpected tagword %#x in slot %d of %d\n",
			p, w, i, GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword));
		    return;
		}
		else if (IS_POINTER(w)) {
		    check_pointer(p, w, gen, RO_POINTERS_KIND, CHUNKC_any);
		}
	    }
	    break;

	case RW_VECTOR_HEADER_BTAG:
	case RO_VECTOR_HEADER_BTAG:
	    //
	    switch (GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword)) {
		//
	    case TYPEAGNOSTIC_VECTOR_CTAG:
		if (GET_BTAG_FROM_TAGWORD(tagword) == RW_VECTOR_HEADER_BTAG)	check_pointer (p, *p, gen, RO_POINTERS_KIND, CHUNKC__IS_RW_POINTERS);
		else					    			check_pointer (p, *p, gen, RO_POINTERS_KIND, CHUNKC__IS_RO_POINTERS|CHUNKC__IS_RO_CONSCELL);
		break;

	    case VECTOR_OF_ONE_BYTE_UNTS_CTAG:
	    case UNT16_VECTOR_CTAG:
	    case TAGGED_INT_VECTOR_CTAG:
	    case INT1_VECTOR_CTAG:
	    case VECTOR_OF_FOUR_BYTE_FLOATS_CTAG:
	    case VECTOR_OF_EIGHT_BYTE_FLOATS_CTAG:
		check_pointer (p, *p, gen, RO_POINTERS_KIND, CHUNKC__IS_NONPTR_DATA);
		break;

	    default:
		ERROR;
		debug_say ("** @%#x: strange sequence kind %d in record sib\n",
		    p-1, GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword));
		return;
	    }

	    if (*IS_TAGGED_INT(p[1])) {
		ERROR;
		debug_say ("** @%#x: sequence header length field not an in (%#x)\n",
		    p+1, p[1]);
	    }
	    p += 2;
	    break;

	default:
	    ERROR;
	    debug_say ("** @%#x: strange tag (%#x) in record sib\n",
		p-1, GET_BTAG_FROM_TAGWORD(tagword));
	    return;
	}
    }
}											// fun check_ro_pointer_sib
int   pth__start_heapcleaning   (Task *task) {
    //======================
    //
    // This fn is called only from
    //
    //     src/c/heapcleaner/call-heapcleaner.c
    //
    // Here we handle the start-of-heapcleaning work
    // specific to our multicore implementation.
    // Specifically, we need to
    //
    //   o Elect one pthread to do the actual heapcleaning work.
    //
    //   o Ensure that all pthreads cease running user code before
    //     heapcleaning begins.
    //
    //   o Ensure that all pthreads resume running user code after
    //     heapcleaning completes.
    //
    //
    // In more detail:
    //
    //   o The first pthread to check in becomes the
    //     designated heapcleaner, which we remember by saving its pid
    //     in cleaning_pthread__local.
    //
    //   o The designated heapcleaner returns to the invoking
    //     call-heapcleaner fnc and does the heapcleaning work
    //     while the other pthreads wait at a barrier.
    //
    //   o When done heapcleaning, the designated heapcleaner
    //     thread checks into the barrier, releasing the remaining
    //     pthreads to resume execution of user code.

    int	     active_pthread_count;
    Pthread* pthread = task->pthread;

    // If we're the first pthread to start heapcleaning,
    // remember that and signal the remaining pthreads
    // to join in.
    //
    PTH__MUTEX_LOCK( &pth__heapcleaner_mutex__global );					// Use mutex to avoid a race condition -- otherwise multiple pthreads might think they were the designated heapcleaner.
    //
    if (pthreads_ready_to_clean__local++ == 0) {
        //
        // We're the first pthread starting heapcleaning,
	// so we'll assume the mantle of designated-heapcleaner,
	// as well as signalling the other threads to enter
	// heapcleaning mode. 
        //
        pth__extra_heapcleaner_roots__global[0] =  NULL;

        extra_cleaner_roots__local =  pth__extra_heapcleaner_roots__global;

	// I'm guessing the point of this is to get the other
	// pthreads to enter heapcleaning mode pronto:			-- 2011-11-02 CrT
	//
	#if NEED_PTHREAD_SUPPORT_FOR_SOFTWARE_GENERATED_PERIODIC_EVENTS
	    //
	    ASSIGN( SOFTWARE_GENERATED_PERIODIC_EVENTS_SWITCH_REFCELL__GLOBAL, HEAP_TRUE );	// This refcell appears to be read only by   need_to_call_heapcleaner   in   src/c/heapcleaner/call-heapcleaner.c
	    //											// although it is also exported to the Mythryl level -- see   src/lib/std/src/unsafe/software-generated-periodic-events.api
	    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
		debug_say ("%d: set poll event\n", task->pid);
	    #endif
	#endif

	cleaning_pthread__local =  pthread->pid;							// Assume the awesome responsilibity of being the designated heapcleaner thread.

	barrier_needs_to_be_initialized__local =  TRUE;

	#ifdef NEED_PTHREAD_SUPPORT_DEBUG
	    debug_say ("cleaning_pthread__local is %d\n", cleaning_pthread__local);
	#endif
    }
    PTH__MUTEX_UNLOCK( &pth__heapcleaner_mutex__global );


    //////////////////////////////////////////////////////////
    // Whether or not we're the first pthread to enter
    // heapcleaning mode, we now wait until all the
    // other active pthreads have also entered
    // heapcleaning mode.
    //
    // Note that we cannot use a barrier wait here because
    // we do not know how many pthreads will wind up entering
    // heapcleaner mode -- one or more pthreads might be starting
    // up additional pthreads.
    {
        // Spin until all active pthreads have enetered this loop:
        //
	int n = 0;
        //
	while (pthreads_ready_to_clean__local !=  (active_pthread_count = pth__get_active_pthread_count())) {	// pth__get_active_pthread_count	def in   src/c/pthread/pthread-on-posix-threads.c
	    // 
	    // Spinwait.  This is bad;
	    // to avoid being hideously bad we avoid
	    // constantly pounding the mutex (and thus
	    // the shared memory bus) by counting to 10,000
	    // on our fingers between mutex ops:
	    //
	    if (n != 1000) {
		//
		for (int i = 10000; i --> 0; );
		//
		n++;
		//
	    } else {
		//
		n = 0;
		//
		#ifdef NEED_PTHREAD_SUPPORT_DEBUG
		    //
		    debug_say ("%d spinning %d <> %d <alloc=0x%x, limit=0x%x>\n", 
			task->pidf, pthreads_ready_to_clean__local, active_pthread_count, task->heap_allocation_pointer,
			task->heap_allocation_limit);
		#endif
	    }
	}

	// As soon as all active pthreads have entered the above
        // loop, they all fall out and arrive here.  The first to
	// do so needs to initialize the barrier, so that everyone
	// can wait at it:
	//
        PTH__MUTEX_LOCK( &pth__heapcleaner_mutex__global );					// Use mutex to avoid a race condition -- otherwise multiple pthreads might think they were the designated heapcleaner.
	    //
	    if (barrier_needs_to_be_initialized__local) {
		barrier_needs_to_be_initialized__local = FALSE;					// We're the first pthread to exit the spinloop.
		//
		pth__barrier_init( &pth__heapcleaner_barrier__global, active_pthread_count );	// Set up barrier to wait on proper number of threads.
	    }
	    //
	PTH__MUTEX_UNLOCK( &pth__heapcleaner_mutex__global );

    }

    // All Pthreads are now ready to clean.

    #if NEED_PTHREAD_SUPPORT_FOR_SOFTWARE_GENERATED_PERIODIC_EVENTS
	//
	ASSIGN(  SOFTWARE_GENERATED_PERIODIC_EVENTS_SWITCH_REFCELL__GLOBAL,  HEAP_FALSE  );
	//
	#ifdef NEED_PTHREAD_SUPPORT_DEBUG
	    debug_say ("%d: cleared poll event\n", task->pid);
	#endif
    #endif

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("(%d) all %d/%d procs in\n", task->pid, pthreads_ready_to_clean__local, pth__get_active_pthread_count());
    #endif


    ////////////////////////////////////////////////////////////////// 
    // If we're the designated heapcleaner thread
    // we now return to caller to take up our
    // heapcleaning responsibilities:
    //
    if (pthread->pid == cleaning_pthread__local) {
        //
        return TRUE;										// We're the designated heapcleaner.
    }


    ////////////////////////////////////////////////////////////////// 
    // We're not the designated heapcleaner thread,
    // so we take a break until that thread has
    // finished heapcleaning:
    //
    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d entering barrier %d\n",pthread->pid,active_pthread_count);
    #endif

    {   Bool result;
        char* err = pth__barrier_wait( &pth__heapcleaner_barrier__global, &result );			// We're not the designated heapcleaner;  wait for the designated heapcleaner to finish heapcleaning.
	    //
	    // 'result' will be TRUE for one pthread waiting on barrier, FALSE for the rest;
	    // We do not take advantage of that here.
	    //
	    // 'err' will be NULL normally, non-NULL only on an error;
	    // for the moment we hope for the best. XXX SUCKO FIXME.
	if (err) die(err);
    }

    #ifdef NEED_PTHREAD_SUPPORT_DEBUG
	debug_say ("%d left barrier\n", pthread->pid);
    #endif

    // We return FALSE to tell caller that we're
    // not the designated heapcleaner, so we shouldn't
    // do any heapcleaning work upon our return:
    //
    return FALSE;
}							// fun pth__start_heapcleaning
Exemple #26
0
static int   check_pointer   (Val* p,  Val w,  int src_age,  int srcKind,  int dstKind)   {
    //       =============
    //
    Sibid sibid  = SIBID_FOR_POINTER( book_to_sibid__global, w);
    int	  dstGen = GET_AGE_FROM_SIBID(sibid);
    int	  chunkc = GET_KIND_FROM_SIBID(sibid);

    switch (chunkc) {
        //
    case RO_POINTERS_KIND:
    case RO_CONSCELL_KIND:
    case NONPTR_DATA_KIND:
    case RW_POINTERS_KIND:
	if (!(dstKind & (1 << chunkc))) {
	    ERROR;
	    debug_say (
		"** @%#x: sequence data kind mismatch (expected %d, found %d)\n",
		p, dstKind, chunkc);
	}

	if (dstGen < src_age) {
	    if (srcKind != RW_POINTERS_KIND) {
		ERROR;
	        debug_say (
		    "** @%#x: reference to younger chunk @%#x (gen = %d)\n",
		    p, w, dstGen);
	    }
	}

	if ((chunkc != RO_CONSCELL_KIND) && (*IS_TAGWORD(((Val *)w)[-1]))) {
	    ERROR;
	    debug_say ("** @%#x: reference into chunk middle @#x\n", p, w);
	}
	break;

    case CODE_KIND:
	break;

    case NEW_KIND:
	ERROR;
	debug_say ("** @%#x: unexpected new-space reference\n", p);
	dstGen = MAX_AGEGROUPS;
	break;

    default:
	if (sibid != UNMAPPED_BOOK_SIBID) {
	    die("bogus chunk ilk in book_to_sibid__global\n");
	} else {
	    if (name_of_cfun(w) == NULL) {					// name_of_cfun	def in   src/c/heapcleaner/mythryl-callable-cfun-hashtable.c
		ERROR;
		debug_say (
		    "** @%#x: reference to unregistered external address %#x\n",
		    p, w);
	    }
	    dstGen = MAX_AGEGROUPS;
	}
	break;
    }

    return dstGen;
}								// fun check_pointer
Exemple #27
0
static void   check_nonpointer_sib   (Sib* ap)   {
    //        ================
    // 
    // Check a string sib for consistency.

    Val* p;
    Val* stop;
    Val* prevTagword;
    Val  tagword;
    Val	 next;

    int  len;
    int  gen =  GET_AGE_FROM_SIBID( ap->id );

    if (*sib_is_active(ap))   return;							// sib_is_active	def in    src/c/h/heap.h

    debug_say ("  strings [%d]: [%#x..%#x:%#x)\n",
	//
	gen,
	ap->tospace,
	ap->tospace.first_free,
	ap->tospace.limit
    );

    p = ap->tospace;
    stop = ap->tospace.first_free;
    prevTagword = NULL;
    while (p < stop) {
	tagword = *p++;
	if (IS_TAGWORD(tagword)) {
	    //
	    switch (GET_BTAG_FROM_TAGWORD(tagword)) {
	        //
	    case FOUR_BYTE_ALIGNED_NONPOINTER_DATA_BTAG:
	    case EIGHT_BYTE_ALIGNED_NONPOINTER_DATA_BTAG:
		len = GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword);
		break;

	    default:
		ERROR;
		debug_say ("** @%#x: strange tag (%#x) in string sib\n",
		    p-1, GET_BTAG_FROM_TAGWORD(tagword));
		if (prevTagword != NULL)
		    debug_say ("   previous string started @ %#x\n", prevTagword);
		return;
	    }
	    prevTagword = p-1;
	    p += len;
	}
#ifdef ALIGN_FLOAT64S
	else if ((tagword == 0) && (((Vunt)p & WORD_BYTESIZE) != 0))
	    continue;	    // Assume this is alignment padding.
#endif
	else {
	    ERROR;

	    debug_say ("** @%#x: expected tagword, but found %#x in string sib\n", p-1, tagword);

	    if (prevTagword != NULL)   debug_say ("   previous string started @ %#x\n", prevTagword);

	    return;
	}
    }

}								// fun check_nonpointer_sib
Exemple #28
0
static void   check_rw_pointer_sib   (Sib* ap,  Coarse_Inter_Agegroup_Pointers_Map* map)   {		// 'map' is nowhere used in the code?! Should be deleted or used.  XXX BUGGO FIXME
    //        ====================
    //
    Val* p;
    Val* stop;
    Val  tagword;
    Val  w;

    int  i, j;
    int  len;

    int  gen =  GET_AGE_FROM_SIBID(ap->id);

    if (*sib_is_active(ap))   return;							// sib_is_active	def in    src/c/h/heap.h

    debug_say ("  arrays [%d]: [%#x..%#x:%#x)\n",
	//
	gen,
	ap->tospace,
	ap->tospace.first_free,
	ap->tospace.limit
    );

    p = ap->tospace;
    stop = ap->tospace.first_free;

    while (p < stop) {
	tagword = *p++;
	if (*IS_TAGWORD(tagword)) {
	    ERROR;
	    debug_say (
		"** @%#x: expected tagword, but found %#x in vector sib\n",
		p-1, tagword);
	    return;
	}

	switch (GET_BTAG_FROM_TAGWORD(tagword)) {
	    //
	case RW_VECTOR_DATA_BTAG:
	    len = GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword);
	    break;

	case WEAK_POINTER_OR_SUSPENSION_BTAG:
	    len = 1;
	    break;

	default:
	    ERROR;
	    debug_say ("** @%#x: strange tag (%#x) in vector sib\n",
		p-1, GET_BTAG_FROM_TAGWORD(tagword));
	    return;
	}

	for (int i = 0;  i < len;  i++, p++) {
	    //
	    w = *p;
	    if (IS_TAGWORD(w)) {
		ERROR;
		debug_say (
		    "** @%#x: Unexpected tagword %#x in rw_vector slot %d of %d\n",
		    p, w, i, GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword));
		for (p -= (i+1), j = 0;  j <= len;  j++, p++) {
		    debug_say ("  %#x: %#10x\n", p, *p);
		}
		return;
	    } else if (IS_POINTER(w)) {
		check_pointer(p, w, gen, RW_POINTERS_KIND, CHUNKC_any);
	    }
	}
    }
}								// fun check_rw_pointer_sib
Exemple #29
0
Val   raise_error__may_heapclean (
    //==========================
    //
    Task*	    task,
    const char*	    altMsg,
    const char*     at,			// C sourcefile and line number raising this error:  "<foo.c:37>"
    Roots*	    extra_roots
) {
    // Raise the Mythryl exception RUNTIME_EXCEPTION, which is defined as:
    //
    //    exception RUNTIME_EXCEPTION (String, Null_Or(System_Error) );
    //
    // We normally get invoked via either the
    // RAISE_SYSERR__MAY_HEAPCLEAN or RAISE_ERROR__MAY_HEAPCLEAN macro from
    //
    //     src/c/lib/raise-error.h 
    //
    // For the time being, we use the errno value as the System_Error; eventually that
    // will be represented by an (Int, String) pair.  If alt_msg is non-zero,
    // then use it as the error string and use NULL for the System_Error.

    int error_number = errno;		// Various calls can trash this value so preserve it early.


    const char*	    msg;
    char	    buf[32];

    Val  null_or_errno;

    if (altMsg != NULL) {
	//
	msg           =  altMsg;
	null_or_errno =  OPTION_NULL;

    } else if ((msg = strerror(error_number)) != NULL) {

        null_or_errno =  OPTION_THE( task, TAGGED_INT_FROM_C_INT(error_number) );

    } else {

	sprintf(buf, "<unknown error %d>", error_number);
	msg = buf;
	null_or_errno =  OPTION_THE(  task,  TAGGED_INT_FROM_C_INT(error_number)  );
    }

    #if (defined(DEBUG_OS_INTERFACE) || defined(DEBUG_TRACE_CCALL))
	debug_say ("RaiseSysError: errno = %d, msg = \"%s\"\n",
	    (altMsg != NULL) ? -1 : error_number, msg);
    #endif

    Roots roots1 = { &null_or_errno, extra_roots };

    Val errno_string = make_ascii_string_from_c_string__may_heapclean (task, msg, &roots1 );

    Val at_list;			// [] or [ "<foo.c:187>" ].
    //
    if (at != NULL) {
        //
	Roots roots2 = { &errno_string, &roots1 };

	Val at_cstring
            =
	    make_ascii_string_from_c_string__may_heapclean (task, at, &roots2 );

	at_list = LIST_CONS(task, at_cstring, LIST_NIL);

    } else {

	at_list = LIST_NIL;
    }

    Val arg = make_two_slot_record( task,  errno_string, null_or_errno);

    Val syserr_exception =   MAKE_EXCEPTION(task, PTR_CAST( Val, RUNTIME_EXCEPTION__GLOBAL), arg, at_list);

    // Modify the task state so that 'syserr_exception'
    // will be raised when Mythryl execution resumes:
    //
    raise_mythryl_exception( task, syserr_exception );		// raise_mythryl_exception	is from    src/c/main/run-mythryl-code-and-runtime-eventloop.c

    return  syserr_exception;
}								// fun raise_error__may_heapclean
Exemple #30
0
static Val   forward_special_chunk   (Agegroup* ag1,  Val* chunk,   Val tagword)   {
    //       =====================
    // 
    // Forward a special chunk (suspension or weak pointer).

    Sib*  sib =  ag1->sib[ RW_POINTERS_SIB ];						// Special chunks can be updated (modified)
											// so they have to go in RW_POINTERS_SIB.
    Val*  new_chunk = sib->tospace.first_free;

    sib->tospace.first_free += SPECIAL_CHUNK_SIZE_IN_WORDS;			// All specials are two words.

    switch (GET_LENGTH_IN_WORDS_FROM_TAGWORD( tagword )) {
        //
    case EVALUATED_LAZY_SUSPENSION_CTAG:
    case UNEVALUATED_LAZY_SUSPENSION_CTAG:
        //
	*new_chunk++ = tagword;
	*new_chunk = *chunk;
	break;

    case WEAK_POINTER_CTAG:
        {
      	    //
	    Val	v = *chunk;
									    #ifdef DEBUG_WEAKREFS
										debug_say ("MinorGC: weak [%#x ==> %#x] --> %#x", chunk, new_chunk+1, v);
									    #endif

	    if (! IS_POINTER( v )) {
										#ifdef DEBUG_WEAKREFS
										debug_say (" unboxed\n");
										#endif

	        // Weak references to unboxed chunks (i.e., immediate Int31)
		// can never be nullified, since Int31 values, being stored
		// in-pointer, take no actual heapspace and thus cannot actually
		// ever get garbage-collected.  Consequently, we can just copy
		// such weakrefs over and skip the rest of our usual processing:
                //
		new_chunk[0] = WEAKREF_TAGWORD;
		new_chunk[1] = v;

		++new_chunk;

	    } else {

		Sibid sibid =  SIBID_FOR_POINTER( book_to_sibid__global, v );
		Val*  vp    =  PTR_CAST( Val*, v );

		if (sibid != AGEGROUP0_SIBID) {

		    // Weakref points to a value in an older heap agegroup.
		    // Since we are only heapcleaning agegroup0 in
		    // this file, the referenced value cannot get
		    // garbage-collected this pass, so we can skip
		    // the usual work to check for that and if necessary
		    // null out the weakref:
		    //
										    #ifdef DEBUG_WEAKREFS
											debug_say (" old chunk\n");
										    #endif

		    new_chunk[0] =  WEAKREF_TAGWORD;
		    new_chunk[1] =  v;

		    ++new_chunk;

		} else {

		    //
		    if (vp[-1] == FORWARDED_CHUNK_TAGWORD) {
		        //
			// Reference to a chunk that has already been forwarded.
			// Note that we have to put the pointer to the non-forwarded
			// copy of the chunk (i.e, v) into the to-space copy
			// of the weak pointer, since the heapcleaner has the invariant
			// that it never sees to-space pointers during sweeping.
											#ifdef DEBUG_WEAKREFS
											    debug_say (" already forwarded to %#x\n", PTR_CAST( Val, FOLLOW_FORWARDING_POINTER(vp)));
											#endif

			new_chunk[0] =  WEAKREF_TAGWORD;
			new_chunk[1] =  v;

			++new_chunk;

		    } else {

			// This is the important case: We are copying a weakref
			// of an agegroup0 value.  That agegroup0 value might get
			// get garbage-collected this pass; if it does, we must null
			// out the weakref.
			//
			// To do this efficiently, as we copy such weakrefs from
			// agegroup0 into agegroup1 we chain them togther via
			// their tagword fields with the root pointer kept
                        // in ag1->heap->weakrefs_forwarded_during_heapcleaning.
			//
			// At the end of heapcleaning we will consume this chain of
			// weakrefs in null_out_newly_dead_weakrefs() where					// null_out_newly_dead_weakrefs	is from   src/c/heapcleaner/heapcleaner-stuff.c
			// we will null out any newly dead weakrefs and then
			// replace the chainlinks with valid tagwords -- either
			// WEAKREF_TAGWORD or NULLED_WEAKREF_TAGWORD,
			// as appropriate, thus erasing our weakref chain and
			// restoring sanity.
			//
                        // We mark the chunk reference field in the forwarded copy
			// to make it look like an Tagged_Int so that the to-space
			// sweeper does not follow the weak reference.
											#ifdef DEBUG_WEAKREFS
											    debug_say (" forward\n");
											#endif

			new_chunk[0] =  MARK_POINTER(PTR_CAST( Val, ag1->heap->weakrefs_forwarded_during_heapcleaning ));		// MARK_POINTER just sets the low bit to 1, making it look like an Int31 value
			new_chunk[1] =  MARK_POINTER( vp );										// MARK_POINTER		is from   src/c/h/heap-tags.h

			ag1->heap->weakrefs_forwarded_during_heapcleaning =  new_chunk;

			++new_chunk;
		    }
		}
	    }
	}
	break;

    case NULLED_WEAK_POINTER_CTAG:					// Shouldn't happen in agegroup0.
    default:
	die (
            "strange/unexpected special chunk @ %#x; tagword = %#x\n",
            chunk, tagword
	);
    }								// switch (GET_LENGTH_IN_WORDS_FROM_TAGWORD(tagword))

    chunk[-1] =  FORWARDED_CHUNK_TAGWORD;
    chunk[ 0] =  (Val) (Vunt) new_chunk;

    return   PTR_CAST( Val, new_chunk );
}								// fun forward_special_chunk