int keeper_push_linda_storage( struct s_Universe* U, lua_State* L, void* ptr, unsigned long magic_) { struct s_Keeper* K = keeper_acquire( U->keepers, magic_); lua_State* KL = K ? K->L : NULL; if( KL == NULL) return 0; STACK_GROW( KL, 4); STACK_CHECK( KL); lua_pushlightuserdata( KL, fifos_key); // fifos_key lua_rawget( KL, LUA_REGISTRYINDEX); // fifos lua_pushlightuserdata( KL, ptr); // fifos ud lua_rawget( KL, -2); // fifos storage lua_remove( KL, -2); // storage if( !lua_istable( KL, -1)) { lua_pop( KL, 1); // STACK_MID( KL, 0); return 0; } // move data from keeper to destination state KEEPER MAIN lua_pushnil( KL); // storage nil STACK_GROW( L, 5); STACK_CHECK( L); lua_newtable( L); // out while( lua_next( KL, -2)) // storage key fifo { keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo lua_pushvalue( KL, -2); // storage key fifo key luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key fifo // out key STACK_MID( L, 2); lua_newtable( L); // out key keyout luaG_inter_move( U, KL, L, 1, eLM_FromKeeper); // storage key // out key keyout fifo lua_pushinteger( L, fifo->first); // out key keyout fifo first STACK_MID( L, 5); lua_setfield( L, -3, "first"); // out key keyout fifo lua_pushinteger( L, fifo->count); // out key keyout fifo count STACK_MID( L, 5); lua_setfield( L, -3, "count"); // out key keyout fifo lua_pushinteger( L, fifo->limit); // out key keyout fifo limit STACK_MID( L, 5); lua_setfield( L, -3, "limit"); // out key keyout fifo lua_setfield( L, -2, "fifo"); // out key keyout lua_rawset( L, -3); // out STACK_MID( L, 1); } STACK_END( L, 1); lua_pop( KL, 1); // STACK_END( KL, 0); keeper_release( K); return 1; }
/* * Return the registered ID function for 'index' (deep userdata proxy), * or NULL if 'index' is not a deep userdata proxy. */ static inline luaG_IdFunction get_idfunc( lua_State* L, int index, enum eLookupMode mode_) { // when looking inside a keeper, we are 100% sure the object is a deep userdata if( mode_ == eLM_FromKeeper) { DEEP_PRELUDE** proxy = (DEEP_PRELUDE**) lua_touserdata( L, index); // we can (and must) cast and fetch the internally stored idfunc return (*proxy)->idfunc; } else { // essentially we are making sure that the metatable of the object we want to copy is stored in our metatable/idfunc database // it is the only way to ensure that the userdata is indeed a deep userdata! // of course, we could just trust the caller, but we won't luaG_IdFunction ret; STACK_GROW( L, 1); STACK_CHECK( L); if( !lua_getmetatable( L, index)) // deep ... metatable? { return NULL; // no metatable: can't be a deep userdata object! } // replace metatable with the idfunc pointer, if it is actually a deep userdata get_deep_lookup( L); // deep ... idfunc|nil ret = (luaG_IdFunction) lua_touserdata( L, -1); // NULL if not a userdata lua_pop( L, 1); STACK_END( L, 0); return ret; } }
// cause each keeper state to populate its database of transferable functions with those from the specified module // do do this we simply require the module inside the keeper state, then populate the lookup database void populate_keepers( lua_State* L) { size_t name_len; char const* name = luaL_checklstring( L, -1, &name_len); int i; DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "populate_keepers %s BEGIN\n" INDENT_END, name)); DEBUGSPEW_CODE( ++ debugspew_indent_depth); for( i = 0; i < GNbKeepers; ++ i) { lua_State* K = GKeepers[i].L; int res; MUTEX_LOCK( &GKeepers[i].lock_); STACK_CHECK( K); STACK_GROW( K, 2); lua_getglobal( K, "require"); lua_pushlstring( K, name, name_len); res = lua_pcall( K, 1, 1, 0); if( res != LUA_OK) { char const* err = luaL_checkstring( K, -1); luaL_error( L, "error requiring '%s' in keeper state: %s", name, err); } // after requiring the module, register the functions it exported in our name<->function database populate_func_lookup_table( K, -1, name); lua_pop( K, 1); STACK_END( K, 0); MUTEX_UNLOCK( &GKeepers[i].lock_); } DEBUGSPEW_CODE( -- debugspew_indent_depth); }
/* * Initialize keeper states * * If there is a problem, return an error message (NULL for okay). * * Note: Any problems would be design flaws; the created Lua state is left * unclosed, because it does not really matter. In production code, this * function never fails. */ char const* init_keepers( int const _nbKeepers, lua_CFunction _on_state_create) { int i; assert( _nbKeepers >= 1); GNbKeepers = _nbKeepers; GKeepers = malloc( _nbKeepers * sizeof( struct s_Keeper)); for( i = 0; i < _nbKeepers; ++ i) { // We need to load all base libraries in the keeper states so that the transfer databases are populated properly // // 'io' for debugging messages, 'package' because we need to require modules exporting idfuncs // the others because they export functions that we may store in a keeper for transfer between lanes lua_State* K = luaG_newstate( "*", _on_state_create); if (!K) return "out of memory"; STACK_CHECK( K) // to see VM name in Decoda debugger lua_pushliteral( K, "Keeper #"); lua_pushinteger( K, i + 1); lua_concat( K, 2); lua_setglobal( K, "decoda_name"); #if KEEPER_MODEL == KEEPER_MODEL_C // create the fifos table in the keeper state lua_pushlightuserdata( K, fifos_key); lua_newtable( K); lua_rawset( K, LUA_REGISTRYINDEX); #endif // KEEPER_MODEL == KEEPER_MODEL_C #if KEEPER_MODEL == KEEPER_MODEL_LUA // use package.loaders[2] to find keeper microcode lua_getglobal( K, "package"); // package lua_getfield( K, -1, "loaders"); // package package.loaders lua_rawgeti( K, -1, 2); // package package.loaders package.loaders[2] lua_pushliteral( K, "lanes-keeper"); // package package.loaders package.loaders[2] "lanes-keeper" STACK_MID( K, 4); // first pcall loads lanes-keeper.lua, second one runs the chunk if( lua_pcall( K, 1 /*args*/, 1 /*results*/, 0 /*errfunc*/) || lua_pcall( K, 0 /*args*/, 0 /*results*/, 0 /*errfunc*/)) { // LUA_ERRRUN / LUA_ERRMEM / LUA_ERRERR // char const* err = lua_tostring( K, -1); assert( err); return err; } // package package.loaders STACK_MID( K, 2); lua_pop( K, 2); #endif // KEEPER_MODEL == KEEPER_MODEL_LUA STACK_END( K, 0) MUTEX_INIT( &GKeepers[i].lock_); GKeepers[i].L = K; //GKeepers[i].count = 0; } #if HAVE_KEEPER_ATEXIT_DESINIT atexit( atexit_close_keepers); #endif // HAVE_KEEPER_ATEXIT_DESINIT return NULL; // ok }
/* * Push a registry subtable (keyed by unique 'token') onto the stack. * If the subtable does not exist, it is created and chained. */ static void push_registry_subtable( lua_State *L, void *token ) { STACK_GROW(L,3); STACK_CHECK(L) lua_pushlightuserdata( L, token ); lua_rawget( L, LUA_REGISTRYINDEX ); // // [-1]: nil/subtable if (lua_isnil(L,-1)) { lua_pop(L,1); lua_newtable(L); // value lua_pushlightuserdata( L, token ); // key lua_pushvalue(L,-2); // // [-3]: value (2nd ref) // [-2]: key // [-1]: value lua_rawset( L, LUA_REGISTRYINDEX ); } STACK_END(L,1) ASSERT_L( lua_istable(L,-1) ); }
/* * Create a deep userdata * * proxy_ud= deep_userdata( idfunc [, ...] ) * * Creates a deep userdata entry of the type defined by 'idfunc'. * Other parameters are passed on to the 'idfunc' "new" invocation. * * 'idfunc' must fulfill the following features: * * lightuserdata = idfunc( eDO_new [, ...] ) -- creates a new deep data instance * void = idfunc( eDO_delete, lightuserdata ) -- releases a deep data instance * tbl = idfunc( eDO_metatable ) -- gives metatable for userdata proxies * * Reference counting and true userdata proxying are taken care of for the * actual data type. * * Types using the deep userdata system (and only those!) can be passed between * separate Lua states via 'luaG_inter_move()'. * * Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' */ int luaG_newdeepuserdata( lua_State* L, luaG_IdFunction idfunc) { char const* errmsg; DEEP_PRELUDE* prelude = DEEP_MALLOC( sizeof(DEEP_PRELUDE)); if( prelude == NULL) { return luaL_error( L, "couldn't not allocate deep prelude: out of memory"); } prelude->refcount = 0; // 'push_deep_proxy' will lift it to 1 prelude->idfunc = idfunc; STACK_GROW( L, 1); STACK_CHECK( L); { int oldtop = lua_gettop( L); prelude->deep = idfunc( L, eDO_new); if( prelude->deep == NULL) { luaL_error( L, "idfunc(eDO_new) failed to create deep userdata (out of memory)"); } if( lua_gettop( L) - oldtop != 0) { luaL_error( L, "Bad idfunc(eDO_new): should not push anything on the stack"); } } errmsg = push_deep_proxy( get_universe( L), L, prelude, eLM_LaneBody); // proxy if( errmsg != NULL) { luaL_error( L, errmsg); } STACK_END( L, 1); return 1; }
/* * Does what the original 'push_registry_subtable' function did, but adds an optional mode argument to it */ void push_registry_subtable_mode( lua_State* L, void* key_, const char* mode_) { STACK_GROW( L, 3); STACK_CHECK( L); lua_pushlightuserdata( L, key_); // key lua_rawget( L, LUA_REGISTRYINDEX); // {}|nil if( lua_isnil( L, -1)) { lua_pop( L, 1); // lua_newtable( L); // {} lua_pushlightuserdata( L, key_); // {} key lua_pushvalue( L, -2); // {} key {} // _R[key_] = {} lua_rawset( L, LUA_REGISTRYINDEX); // {} // Set its metatable if requested if( mode_) { lua_newtable( L); // {} mt lua_pushliteral( L, "__mode"); // {} mt "__mode" lua_pushstring( L, mode_); // {} mt "__mode" mode lua_rawset( L, -3); // {} mt lua_setmetatable( L, -2); // {} } } STACK_END( L, 1); ASSERT_L( lua_istable( L, -1)); }
// cause each keeper state to populate its database of transferable functions with those from the specified module void populate_keepers( lua_State *L) { size_t name_len; char const *name = luaL_checklstring( L, -1, &name_len); size_t package_path_len; char const *package_path; size_t package_cpath_len; char const *package_cpath; int i; // we need to make sure that package.path & package.cpath are the same in the keepers // than what is currently in use when the module is required in the caller's Lua state STACK_CHECK(L) STACK_GROW( L, 3); lua_getglobal( L, "package"); lua_getfield( L, -1, "path"); package_path = luaL_checklstring( L, -1, &package_path_len); lua_getfield( L, -2, "cpath"); package_cpath = luaL_checklstring( L, -1, &package_cpath_len); for( i = 0; i < GNbKeepers; ++ i) { lua_State *K = GKeepers[i].L; int res; MUTEX_LOCK( &GKeepers[i].lock_); STACK_CHECK(K) STACK_GROW( K, 2); lua_getglobal( K, "package"); lua_pushlstring( K, package_path, package_path_len); lua_setfield( K, -2, "path"); lua_pushlstring( K, package_cpath, package_cpath_len); lua_setfield( K, -2, "cpath"); lua_pop( K, 1); lua_getglobal( K, "require"); lua_pushlstring( K, name, name_len); res = lua_pcall( K, 1, 0, 0); if( res != 0) { char const *err = luaL_checkstring( K, -1); luaL_error( L, "error requiring '%s' in keeper state: %s", name, err); } STACK_END(K, 0) MUTEX_UNLOCK( &GKeepers[i].lock_); } lua_pop( L, 3); STACK_END(L, 0) }
/* * Initialize keeper states * * If there is a problem, return an error message (NULL for okay). * * Note: Any problems would be design flaws; the created Lua state is left * unclosed, because it does not really matter. In production code, this * function never fails. * settings table is at position 1 on the stack */ char const* init_keepers( lua_State* L) { int i; PROPAGATE_ALLOCF_PREP( L); STACK_CHECK( L); lua_getfield( L, 1, "nb_keepers"); GNbKeepers = (int) lua_tointeger( L, -1); lua_pop( L, 1); STACK_END( L, 0); assert( GNbKeepers >= 1); GKeepers = malloc( GNbKeepers * sizeof( struct s_Keeper)); for( i = 0; i < GNbKeepers; ++ i) { lua_State* K = PROPAGATE_ALLOCF_ALLOC(); if( K == NULL) { (void) luaL_error( L, "init_keepers() failed while creating keeper state; out of memory"); } STACK_CHECK( K); // to see VM name in Decoda debugger lua_pushliteral( K, "Keeper #"); lua_pushinteger( K, i + 1); lua_concat( K, 2); lua_setglobal( K, "decoda_name"); // create the fifos table in the keeper state lua_pushlightuserdata( K, fifos_key); lua_newtable( K); lua_rawset( K, LUA_REGISTRYINDEX); STACK_END( K, 0); // we can trigger a GC from inside keeper_call(), where a keeper is acquired // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. MUTEX_RECURSIVE_INIT( &GKeepers[i].lock_); GKeepers[i].L = K; } #if HAVE_KEEPER_ATEXIT_DESINIT atexit( atexit_close_keepers); #endif // HAVE_KEEPER_ATEXIT_DESINIT return NULL; // ok }
int keeper_push_linda_storage( lua_State* L, void* ptr) { struct s_Keeper* K = keeper_acquire( ptr); lua_State* KL = K->L; STACK_CHECK( KL) lua_pushlightuserdata( KL, fifos_key); // fifos_key lua_rawget( KL, LUA_REGISTRYINDEX); // fifos lua_pushlightuserdata( KL, ptr); // fifos ud lua_rawget( KL, -2); // fifos storage lua_remove( KL, -2); // storage if( !lua_istable( KL, -1)) { lua_pop( KL, 1); // STACK_MID( KL, 0); return 0; } lua_pushnil( KL); // storage nil lua_newtable( L); // out while( lua_next( KL, -2)) // storage key fifo { keeper_fifo* fifo = prepare_fifo_access( KL, -1); // storage key fifo lua_pushvalue( KL, -2); // storage key fifo key luaG_inter_move( KL, L, 1); // storage key fifo // out key STACK_CHECK( L) lua_newtable( L); // out key keyout luaG_inter_move( KL, L, 1); // storage key // out key keyout fifo lua_pushinteger( L, fifo->first); // out key keyout fifo first lua_setfield( L, -3, "first"); // out key keyout fifo lua_pushinteger( L, fifo->count); // out key keyout fifo count lua_setfield( L, -3, "count"); // out key keyout fifo lua_pushinteger( L, fifo->limit); // out key keyout fifo limit lua_setfield( L, -3, "limit"); // out key keyout fifo lua_setfield( L, -2, "fifo"); // out key keyout lua_rawset( L, -3); // out STACK_END( L, 0) } lua_pop( KL, 1); // STACK_END( KL, 0) keeper_release( K); return 1; }
/* * Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. * Pops the both values off the stack. */ static void set_deep_lookup( lua_State* L) { STACK_GROW( L, 3); STACK_CHECK( L); // a b push_registry_subtable( L, DEEP_LOOKUP_KEY); // a b {} STACK_MID( L, 1); lua_insert( L, -3); // {} a b lua_pushvalue( L, -1); // {} a b b lua_pushvalue( L,-3); // {} a b b a lua_rawset( L, -5); // {} a b lua_rawset( L, -3); // {} lua_pop( L, 1); // STACK_END( L, -2); }
/* * Pops the key (metatable or idfunc) off the stack, and replaces with the * deep lookup value (idfunc/metatable/nil). */ static void get_deep_lookup( lua_State* L) { STACK_GROW( L, 1); STACK_CHECK( L); // a lua_pushlightuserdata( L, DEEP_LOOKUP_KEY); // a DLK lua_rawget( L, LUA_REGISTRYINDEX); // a {} if( !lua_isnil( L, -1)) { lua_insert( L, -2); // {} a lua_rawget( L, -2); // {} b } lua_remove( L, -2); // a|b STACK_END( L, 0); }
/* * Access deep userdata through a proxy. * * Reference count is not changed, and access to the deep userdata is not * serialized. It is the module's responsibility to prevent conflicting usage. */ void* luaG_todeep( lua_State* L, luaG_IdFunction idfunc, int index) { DEEP_PRELUDE** proxy; STACK_CHECK( L); // ensure it is actually a deep userdata if( get_idfunc( L, index, eLM_LaneBody) != idfunc) { return NULL; // no metatable, or wrong kind } proxy = (DEEP_PRELUDE**) lua_touserdata( L, index); STACK_END( L, 0); return (*proxy)->deep; }
lref_t apply1(lref_t fn, size_t argc, lref_t argv[]) { checked_assert((argc == 0) || (argv != NULL)); lref_t retval = NIL; STACK_CHECK(&fn); lref_t env = NIL; lref_t next_form = apply(fn, argc, argv, &env, &retval); if (NULLP(next_form)) return retval; else return execute_fast_op(next_form, env); }
/* * Sets up [-1]<->[-2] two-way lookups, and ensures the lookup table exists. * Pops the both values off the stack. */ void set_deep_lookup( lua_State *L ) { STACK_GROW(L,3); STACK_CHECK(L) #if 1 push_registry_subtable( L, DEEP_LOOKUP_KEY ); #else /* ..to be removed.. */ lua_pushlightuserdata( L, DEEP_LOOKUP_KEY ); lua_rawget( L, LUA_REGISTRYINDEX ); if (lua_isnil(L,-1)) { // First time here; let's make the lookup // lua_pop(L,1); lua_newtable(L); lua_pushlightuserdata( L, DEEP_LOOKUP_KEY ); lua_pushvalue(L,-2); // // [-3]: {} (2nd ref) // [-2]: DEEP_LOOKUP_KEY // [-1]: {} lua_rawset( L, LUA_REGISTRYINDEX ); // // [-1]: lookup table (empty) } #endif STACK_MID(L,1) lua_insert(L,-3); // [-3]: lookup table // [-2]: A // [-1]: B lua_pushvalue( L,-1 ); // B lua_pushvalue( L,-3 ); // A lua_rawset( L, -5 ); // B->A lua_rawset( L, -3 ); // A->B lua_pop( L,1 ); STACK_END(L,-2) }
void luaG_dump( lua_State* L ) { int top= lua_gettop(L); int i; fprintf( stderr, "\n\tDEBUG STACK:\n" ); if (top==0) fprintf( stderr, "\t(none)\n" ); for( i=1; i<=top; i++ ) { int type= lua_type( L, i ); fprintf( stderr, "\t[%d]= (%s) ", i, lua_typename(L,type) ); // Print item contents here... // // Note: this requires 'tostring()' to be defined. If it is NOT, // enable it for more debugging. // STACK_CHECK(L) STACK_GROW( L, 2 ) lua_getglobal( L, "tostring" ); // // [-1]: tostring function, or nil if (!lua_isfunction(L,-1)) { fprintf( stderr, "('tostring' not available)" ); } else { lua_pushvalue( L, i ); lua_call( L, 1 /*args*/, 1 /*retvals*/ ); // Don't trust the string contents // fprintf( stderr, "%s", lua_tostring(L,-1) ); } lua_pop(L,1); STACK_END(L,0) fprintf( stderr, "\n" ); } fprintf( stderr, "\n" ); }
/* * Get a unique ID for metatable at [i]. */ static uint_t get_mt_id( lua_State *L, int i ) { static uint_t last_id= 0; uint_t id; i= STACK_ABS(L,i); STACK_GROW(L,3); STACK_CHECK(L) push_registry_subtable( L, REG_MTID ); lua_pushvalue(L, i); lua_rawget( L, -2 ); // // [-2]: reg[REG_MTID] // [-1]: nil/uint id= lua_tointeger(L,-1); // 0 for nil lua_pop(L,1); STACK_MID(L,1) if (id==0) { MUTEX_LOCK( &mtid_lock ); id= ++last_id; MUTEX_UNLOCK( &mtid_lock ); /* Create two-way references: id_uint <-> table */ lua_pushvalue(L,i); lua_pushinteger(L,id); lua_rawset( L, -3 ); lua_pushinteger(L,id); lua_pushvalue(L,i); lua_rawset( L, -3 ); } lua_pop(L,1); // remove 'reg[REG_MTID]' reference STACK_END(L,0) return id; }
static void push_table( lua_State* L, int idx) { STACK_GROW( L, 4); STACK_CHECK( L); idx = lua_absindex( L, idx); lua_pushlightuserdata( L, fifos_key); // ud fifos_key lua_rawget( L, LUA_REGISTRYINDEX); // ud fifos lua_pushvalue( L, idx); // ud fifos ud lua_rawget( L, -2); // ud fifos fifos[ud] STACK_MID( L, 2); if( lua_isnil( L, -1)) { lua_pop( L, 1); // ud fifos // add a new fifos table for this linda lua_newtable( L); // ud fifos fifos[ud] lua_pushvalue( L, idx); // ud fifos fifos[ud] ud lua_pushvalue( L, -2); // ud fifos fifos[ud] ud fifos[ud] lua_rawset( L, -4); // ud fifos fifos[ud] } lua_remove( L, -2); // ud fifos[ud] STACK_END( L, 1); }
/* * Pops the key (metatable or idfunc) off the stack, and replaces with the * deep lookup value (idfunc/metatable/nil). */ void get_deep_lookup( lua_State *L ) { STACK_GROW(L,1); STACK_CHECK(L) lua_pushlightuserdata( L, DEEP_LOOKUP_KEY ); lua_rawget( L, LUA_REGISTRYINDEX ); if (!lua_isnil(L,-1)) { // [-2]: key (metatable or idfunc) // [-1]: lookup table lua_insert( L, -2 ); lua_rawget( L, -2 ); // [-2]: lookup table // [-1]: value (metatable / idfunc / nil) } lua_remove(L,-2); // remove lookup, or unused key STACK_END(L,0) }
/* * Create a deep userdata * * proxy_ud= deep_userdata( idfunc [, ...] ) * * Creates a deep userdata entry of the type defined by 'idfunc'. * Other parameters are passed on to the 'idfunc' "new" invocation. * * 'idfunc' must fulfill the following features: * * lightuserdata= idfunc( "new" [, ...] ) -- creates a new deep data instance * void= idfunc( "delete", lightuserdata ) -- releases a deep data instance * tbl= idfunc( "metatable" ) -- gives metatable for userdata proxies * * Reference counting and true userdata proxying are taken care of for the * actual data type. * * Types using the deep userdata system (and only those!) can be passed between * separate Lua states via 'luaG_inter_move()'. * * Returns: 'proxy' userdata for accessing the deep data via 'luaG_todeep()' */ int luaG_deep_userdata( lua_State *L ) { lua_CFunction idfunc= lua_tocfunction( L,1 ); int pushed; DEEP_PRELUDE *prelude= DEEP_MALLOC( sizeof(DEEP_PRELUDE) ); ASSERT_L(prelude); prelude->refcount= 0; // 'luaG_push_proxy' will lift it to 1 STACK_GROW(L,1); STACK_CHECK(L) // Replace 'idfunc' with "new" in the stack (keep possible other params) // lua_remove(L,1); lua_pushliteral( L, "new" ); lua_insert(L,1); // lightuserdata= idfunc( "new" [, ...] ) // pushed= idfunc(L); if ((pushed!=1) || lua_type(L,-1) != LUA_TLIGHTUSERDATA) luaL_error( L, "Bad idfunc on \"new\": did not return light userdata" ); prelude->deep= lua_touserdata(L,-1); ASSERT_L(prelude->deep); lua_pop(L,1); // pop deep data luaG_push_proxy( L, idfunc, prelude ); // // [-1]: proxy userdata STACK_END(L,1) return 1; }
static void push_cached_func( lua_State *L2, uint_t L2_cache_i, lua_State *L, uint_t i ) { // TBD: Merge this and same code for tables ASSERT_L( hijacked_tostring ); ASSERT_L( L2_cache_i != 0 ); STACK_GROW(L,2); STACK_GROW(L2,3); STACK_CHECK(L) lua_pushcfunction( L, hijacked_tostring ); lua_pushvalue( L, i ); lua_call( L, 1 /*args*/, 1 /*retvals*/ ); // // [-1]: "function: 0x...." STACK_END(L,1) ASSERT_L( lua_type(L,-1) == LUA_TSTRING ); // L2_cache[id_str]= function // STACK_CHECK(L2) // We don't need to use the from state ('L') in ID since the life span // is only for the duration of a copy (both states are locked). // lua_pushstring( L2, lua_tostring(L,-1) ); lua_pop(L,1); // remove the 'tostring(tbl)' value (in L!) //fprintf( stderr, "<< ID: %s >>\n", lua_tostring(L2,-1) ); lua_pushvalue( L2, -1 ); lua_rawget( L2, L2_cache_i ); // // [-2]: identity string ("function: 0x...") // [-1]: function|nil|true (true means: we're working on it; recursive) if (lua_isnil(L2,-1)) { lua_pop(L2,1); // Set to 'true' for the duration of creation; need to find self-references // via upvalues // lua_pushboolean(L2,TRUE); lua_setfield( L2, L2_cache_i, lua_tostring(L2,-2) ); inter_copy_func( L2, L2_cache_i, L, i ); // pushes a copy of the func lua_pushvalue(L2,-1); lua_insert(L2,-3); // // [-3]: function (2nd ref) // [-2]: identity string // [-1]: function lua_rawset(L2,L2_cache_i); // // [-1]: function (tied to 'L2_cache' table') } else if (lua_isboolean(L2,-1)) { // Loop in preparing upvalues; either direct or via a table // // Note: This excludes the case where a function directly addresses // itself as an upvalue (recursive lane creation). // luaL_error( L, "Recursive use of upvalues; cannot copy the function" ); } else { lua_remove(L2,-2); } STACK_END(L2,1) // // L2 [-1]: function ASSERT_L( lua_isfunction(L2,-1) ); }
/* * Check if we've already copied the same table from 'L', and * reuse the old copy. This allows table upvalues shared by multiple * local functions to point to the same table, also in the target. * * Always pushes a table to 'L2'. * * Returns TRUE if the table was cached (no need to fill it!); FALSE if * it's a virgin. */ static bool_t push_cached_table( lua_State *L2, uint_t L2_cache_i, lua_State *L, uint_t i ) { bool_t ret; ASSERT_L( hijacked_tostring ); ASSERT_L( L2_cache_i != 0 ); STACK_GROW(L,2); STACK_GROW(L2,3); // Create an identity string for table at [i]; it should stay unique at // least during copying of the data (then we can clear the caches). // STACK_CHECK(L) lua_pushcfunction( L, hijacked_tostring ); lua_pushvalue( L, i ); lua_call( L, 1 /*args*/, 1 /*retvals*/ ); // // [-1]: "table: 0x...." STACK_END(L,1) ASSERT_L( lua_type(L,-1) == LUA_TSTRING ); // L2_cache[id_str]= [{...}] // STACK_CHECK(L2) // We don't need to use the from state ('L') in ID since the life span // is only for the duration of a copy (both states are locked). // lua_pushstring( L2, lua_tostring(L,-1) ); lua_pop(L,1); // remove the 'tostring(tbl)' value (in L!) //fprintf( stderr, "<< ID: %s >>\n", lua_tostring(L2,-1) ); lua_pushvalue( L2, -1 ); lua_rawget( L2, L2_cache_i ); // // [-2]: identity string ("table: 0x...") // [-1]: table|nil if (lua_isnil(L2,-1)) { lua_pop(L2,1); lua_newtable(L2); lua_pushvalue(L2,-1); lua_insert(L2,-3); // // [-3]: new table (2nd ref) // [-2]: identity string // [-1]: new table lua_rawset(L2, L2_cache_i); // // [-1]: new table (tied to 'L2_cache' table') ret= FALSE; // brand new } else { lua_remove(L2,-2); ret= TRUE; // from cache } STACK_END(L2,1) // // L2 [-1]: table to use as destination ASSERT_L( lua_istable(L2,-1) ); return ret; }
void Interpret (int pop_return_p) { long dispatch_code; struct interpreter_state_s new_state; /* Primitives jump back here for errors, requests to evaluate an expression, apply a function, or handle an interrupt request. On errors or interrupts they leave their arguments on the stack, the primitive itself in GET_EXP. The code should do a primitive backout in these cases, but not in others (apply, eval, etc.), since the primitive itself will have left the state of the interpreter ready for operation. */ bind_interpreter_state (&new_state); dispatch_code = (setjmp (interpreter_catch_env)); preserve_signal_mask (); fixup_float_environment (); switch (dispatch_code) { case 0: /* first time */ if (pop_return_p) goto pop_return; /* continue */ else break; /* fall into eval */ case PRIM_APPLY: PROCEED_AFTER_PRIMITIVE (); goto internal_apply; case PRIM_NO_TRAP_APPLY: PROCEED_AFTER_PRIMITIVE (); goto Apply_Non_Trapping; case PRIM_APPLY_INTERRUPT: PROCEED_AFTER_PRIMITIVE (); PREPARE_APPLY_INTERRUPT (); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); case PRIM_APPLY_ERROR: PROCEED_AFTER_PRIMITIVE (); APPLICATION_ERROR (prim_apply_error_code); case PRIM_DO_EXPRESSION: SET_VAL (GET_EXP); PROCEED_AFTER_PRIMITIVE (); REDUCES_TO (GET_VAL); case PRIM_NO_TRAP_EVAL: SET_VAL (GET_EXP); PROCEED_AFTER_PRIMITIVE (); NEW_REDUCTION (GET_VAL, GET_ENV); goto eval_non_trapping; case PRIM_POP_RETURN: PROCEED_AFTER_PRIMITIVE (); goto pop_return; case PRIM_RETURN_TO_C: PROCEED_AFTER_PRIMITIVE (); unbind_interpreter_state (interpreter_state); return; case PRIM_NO_TRAP_POP_RETURN: PROCEED_AFTER_PRIMITIVE (); goto pop_return_non_trapping; case PRIM_INTERRUPT: back_out_of_primitive (); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); case PRIM_ABORT_TO_C: back_out_of_primitive (); unbind_interpreter_state (interpreter_state); return; case ERR_ARG_1_WRONG_TYPE: back_out_of_primitive (); Do_Micro_Error (ERR_ARG_1_WRONG_TYPE, true); goto internal_apply; case ERR_ARG_2_WRONG_TYPE: back_out_of_primitive (); Do_Micro_Error (ERR_ARG_2_WRONG_TYPE, true); goto internal_apply; case ERR_ARG_3_WRONG_TYPE: back_out_of_primitive (); Do_Micro_Error (ERR_ARG_3_WRONG_TYPE, true); goto internal_apply; default: back_out_of_primitive (); Do_Micro_Error (dispatch_code, true); goto internal_apply; } do_expression: /* GET_EXP has an Scode item in it that should be evaluated and the result left in GET_VAL. A "break" after the code for any operation indicates that all processing for this operation has been completed, and the next step will be to pop a return code off the stack and proceed at pop_return. This is sometimes called "executing the continuation" since the return code can be considered the continuation to be performed after the operation. An operation can terminate with a REDUCES_TO or REDUCES_TO_NTH macro. This indicates that the value of the current Scode item is the value returned when the new expression is evaluated. Therefore no new continuation is created and processing continues at do_expression with the new expression in GET_EXP. Finally, an operation can terminate with a DO_NTH_THEN macro. This indicates that another expression must be evaluated and them some additional processing will be performed before the value of this S-Code item available. Thus a new continuation is created and placed on the stack (using SAVE_CONT), the new expression is placed in the GET_EXP, and processing continues at do_expression. */ /* Handling of Eval Trapping. If we are handling traps and there is an Eval Trap set, turn off all trapping and then go to internal_apply to call the user supplied eval hook with the expression to be evaluated and the environment. */ #ifdef COMPILE_STEPPER if (trapping && (!WITHIN_CRITICAL_SECTION_P ()) && ((FETCH_EVAL_TRAPPER ()) != SHARP_F)) { trapping = false; Will_Push (4); PUSH_ENV (); PUSH_EXP (); STACK_PUSH (FETCH_EVAL_TRAPPER ()); PUSH_APPLY_FRAME_HEADER (2); Pushed (); goto Apply_Non_Trapping; } #endif /* COMPILE_STEPPER */ eval_non_trapping: #ifdef EVAL_UCODE_HOOK EVAL_UCODE_HOOK (); #endif switch (OBJECT_TYPE (GET_EXP)) { case TC_BIG_FIXNUM: /* The self evaluating items */ case TC_BIG_FLONUM: case TC_CHARACTER_STRING: case TC_CHARACTER: case TC_COMPILED_CODE_BLOCK: case TC_COMPLEX: case TC_CONTROL_POINT: case TC_DELAYED: case TC_ENTITY: case TC_ENVIRONMENT: case TC_EXTENDED_PROCEDURE: case TC_FIXNUM: case TC_HUNK3_A: case TC_HUNK3_B: case TC_INTERNED_SYMBOL: case TC_LIST: case TC_NON_MARKED_VECTOR: case TC_NULL: case TC_PRIMITIVE: case TC_PROCEDURE: case TC_QUAD: case TC_RATNUM: case TC_REFERENCE_TRAP: case TC_RETURN_CODE: case TC_UNINTERNED_SYMBOL: case TC_CONSTANT: case TC_VECTOR: case TC_VECTOR_16B: case TC_VECTOR_1B: default: SET_VAL (GET_EXP); break; case TC_ACCESS: Will_Push (CONTINUATION_SIZE); PUSH_NTH_THEN (RC_EXECUTE_ACCESS_FINISH, ACCESS_ENVIRONMENT); case TC_ASSIGNMENT: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_EXECUTE_ASSIGNMENT_FINISH, ASSIGN_VALUE); case TC_BROKEN_HEART: Microcode_Termination (TERM_BROKEN_HEART); case TC_COMBINATION: { long length = ((VECTOR_LENGTH (GET_EXP)) - 1); Will_Push (length + 2 + CONTINUATION_SIZE); stack_pointer = (STACK_LOC (-length)); STACK_PUSH (MAKE_OBJECT (TC_MANIFEST_NM_VECTOR, length)); /* The finger: last argument number */ Pushed (); if (length == 0) { PUSH_APPLY_FRAME_HEADER (0); /* Frame size */ DO_NTH_THEN (RC_COMB_APPLY_FUNCTION, COMB_FN_SLOT); } PUSH_ENV (); DO_NTH_THEN (RC_COMB_SAVE_VALUE, (length + 1)); } case TC_COMBINATION_1: Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG + 1); PUSH_ENV (); DO_NTH_THEN (RC_COMB_1_PROCEDURE, COMB_1_ARG_1); case TC_COMBINATION_2: Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG + 2); PUSH_ENV (); DO_NTH_THEN (RC_COMB_2_FIRST_OPERAND, COMB_2_ARG_2); case TC_COMMENT: REDUCES_TO_NTH (COMMENT_EXPRESSION); case TC_CONDITIONAL: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_CONDITIONAL_DECIDE, COND_PREDICATE); #ifdef CC_SUPPORT_P case TC_COMPILED_ENTRY: dispatch_code = (enter_compiled_expression ()); goto return_from_compiled_code; #endif case TC_DEFINITION: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_EXECUTE_DEFINITION_FINISH, DEFINE_VALUE); case TC_DELAY: /* Deliberately omitted: EVAL_GC_CHECK (2); */ SET_VAL (MAKE_POINTER_OBJECT (TC_DELAYED, Free)); (Free[THUNK_ENVIRONMENT]) = GET_ENV; (Free[THUNK_PROCEDURE]) = (MEMORY_REF (GET_EXP, DELAY_OBJECT)); Free += 2; break; case TC_DISJUNCTION: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_DISJUNCTION_DECIDE, OR_PREDICATE); case TC_EXTENDED_LAMBDA: /* Deliberately omitted: EVAL_GC_CHECK (2); */ SET_VAL (MAKE_POINTER_OBJECT (TC_EXTENDED_PROCEDURE, Free)); (Free[PROCEDURE_LAMBDA_EXPR]) = GET_EXP; (Free[PROCEDURE_ENVIRONMENT]) = GET_ENV; Free += 2; break; case TC_IN_PACKAGE: Will_Push (CONTINUATION_SIZE); PUSH_NTH_THEN (RC_EXECUTE_IN_PACKAGE_CONTINUE, IN_PACKAGE_ENVIRONMENT); case TC_LAMBDA: case TC_LEXPR: /* Deliberately omitted: EVAL_GC_CHECK (2); */ SET_VAL (MAKE_POINTER_OBJECT (TC_PROCEDURE, Free)); (Free[PROCEDURE_LAMBDA_EXPR]) = GET_EXP; (Free[PROCEDURE_ENVIRONMENT]) = GET_ENV; Free += 2; break; case TC_MANIFEST_NM_VECTOR: EVAL_ERROR (ERR_EXECUTE_MANIFEST_VECTOR); case TC_PCOMB0: /* The argument to Will_Eventually_Push is determined by how much will be on the stack if we back out of the primitive. */ Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG); Finished_Eventual_Pushing (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG); SET_EXP (OBJECT_NEW_TYPE (TC_PRIMITIVE, GET_EXP)); goto primitive_internal_apply; case TC_PCOMB1: Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG + 1); DO_NTH_THEN (RC_PCOMB1_APPLY, PCOMB1_ARG_SLOT); case TC_PCOMB2: Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG + 2); PUSH_ENV (); DO_NTH_THEN (RC_PCOMB2_DO_1, PCOMB2_ARG_2_SLOT); case TC_PCOMB3: Will_Eventually_Push (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG + 3); PUSH_ENV (); DO_NTH_THEN (RC_PCOMB3_DO_2, PCOMB3_ARG_3_SLOT); case TC_SCODE_QUOTE: SET_VAL (MEMORY_REF (GET_EXP, SCODE_QUOTE_OBJECT)); break; case TC_SEQUENCE_2: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_SEQ_2_DO_2, SEQUENCE_1); case TC_SEQUENCE_3: Will_Push (CONTINUATION_SIZE + 1); PUSH_ENV (); PUSH_NTH_THEN (RC_SEQ_3_DO_2, SEQUENCE_1); case TC_SYNTAX_ERROR: EVAL_ERROR (ERR_SYNTAX_ERROR); case TC_THE_ENVIRONMENT: SET_VAL (GET_ENV); break; case TC_VARIABLE: { SCHEME_OBJECT val = GET_VAL; SCHEME_OBJECT name = (GET_VARIABLE_SYMBOL (GET_EXP)); long temp = (lookup_variable (GET_ENV, name, (&val))); if (temp != PRIM_DONE) { /* Back out of the evaluation. */ if (temp == PRIM_INTERRUPT) { PREPARE_EVAL_REPEAT (); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); } EVAL_ERROR (temp); } SET_VAL (val); } } /* Now restore the continuation saved during an earlier part of the EVAL cycle and continue as directed. */ pop_return: #ifdef COMPILE_STEPPER if (trapping && (!WITHIN_CRITICAL_SECTION_P ()) && ((FETCH_RETURN_TRAPPER ()) != SHARP_F)) { Will_Push (3); trapping = false; PUSH_VAL (); STACK_PUSH (FETCH_RETURN_TRAPPER ()); PUSH_APPLY_FRAME_HEADER (1); Pushed (); goto Apply_Non_Trapping; } #endif /* COMPILE_STEPPER */ pop_return_non_trapping: #ifdef POP_RETURN_UCODE_HOOK POP_RETURN_UCODE_HOOK (); #endif RESTORE_CONT (); #ifdef ENABLE_DEBUGGING_TOOLS if (!RETURN_CODE_P (GET_RET)) { PUSH_VAL (); /* For possible stack trace */ SAVE_CONT (); Microcode_Termination (TERM_BAD_STACK); } #endif /* Dispatch on the return code. A BREAK here will cause a "goto pop_return" to occur, since this is the most common occurrence. */ switch (OBJECT_DATUM (GET_RET)) { case RC_COMB_1_PROCEDURE: POP_ENV (); PUSH_VAL (); /* Arg. 1 */ STACK_PUSH (SHARP_F); /* Operator */ PUSH_APPLY_FRAME_HEADER (1); Finished_Eventual_Pushing (CONTINUATION_SIZE); DO_ANOTHER_THEN (RC_COMB_APPLY_FUNCTION, COMB_1_FN); case RC_COMB_2_FIRST_OPERAND: POP_ENV (); PUSH_VAL (); PUSH_ENV (); DO_ANOTHER_THEN (RC_COMB_2_PROCEDURE, COMB_2_ARG_1); case RC_COMB_2_PROCEDURE: POP_ENV (); PUSH_VAL (); /* Arg 1, just calculated */ STACK_PUSH (SHARP_F); /* Function */ PUSH_APPLY_FRAME_HEADER (2); Finished_Eventual_Pushing (CONTINUATION_SIZE); DO_ANOTHER_THEN (RC_COMB_APPLY_FUNCTION, COMB_2_FN); case RC_COMB_APPLY_FUNCTION: END_SUBPROBLEM (); goto internal_apply_val; case RC_COMB_SAVE_VALUE: { long Arg_Number; POP_ENV (); Arg_Number = ((OBJECT_DATUM (STACK_REF (STACK_COMB_FINGER))) - 1); (STACK_REF (STACK_COMB_FIRST_ARG + Arg_Number)) = GET_VAL; (STACK_REF (STACK_COMB_FINGER)) = (MAKE_OBJECT (TC_MANIFEST_NM_VECTOR, Arg_Number)); /* DO NOT count on the type code being NMVector here, since the stack parser may create them with #F here! */ if (Arg_Number > 0) { PUSH_ENV (); DO_ANOTHER_THEN (RC_COMB_SAVE_VALUE, ((COMB_ARG_1_SLOT - 1) + Arg_Number)); } /* Frame Size */ STACK_PUSH (MEMORY_REF (GET_EXP, 0)); DO_ANOTHER_THEN (RC_COMB_APPLY_FUNCTION, COMB_FN_SLOT); } #ifdef CC_SUPPORT_P #define DEFINE_COMPILER_RESTART(return_code, entry) \ case return_code: \ { \ dispatch_code = (entry ()); \ goto return_from_compiled_code; \ } DEFINE_COMPILER_RESTART (RC_COMP_INTERRUPT_RESTART, comp_interrupt_restart); DEFINE_COMPILER_RESTART (RC_COMP_LOOKUP_TRAP_RESTART, comp_lookup_trap_restart); DEFINE_COMPILER_RESTART (RC_COMP_ASSIGNMENT_TRAP_RESTART, comp_assignment_trap_restart); DEFINE_COMPILER_RESTART (RC_COMP_OP_REF_TRAP_RESTART, comp_op_lookup_trap_restart); DEFINE_COMPILER_RESTART (RC_COMP_CACHE_REF_APPLY_RESTART, comp_cache_lookup_apply_restart); DEFINE_COMPILER_RESTART (RC_COMP_SAFE_REF_TRAP_RESTART, comp_safe_lookup_trap_restart); DEFINE_COMPILER_RESTART (RC_COMP_UNASSIGNED_TRAP_RESTART, comp_unassigned_p_trap_restart); DEFINE_COMPILER_RESTART (RC_COMP_LINK_CACHES_RESTART, comp_link_caches_restart); DEFINE_COMPILER_RESTART (RC_COMP_ERROR_RESTART, comp_error_restart); case RC_REENTER_COMPILED_CODE: dispatch_code = (return_to_compiled_code ()); goto return_from_compiled_code; #endif case RC_CONDITIONAL_DECIDE: END_SUBPROBLEM (); POP_ENV (); REDUCES_TO_NTH ((GET_VAL == SHARP_F) ? COND_ALTERNATIVE : COND_CONSEQUENT); case RC_DISJUNCTION_DECIDE: /* Return predicate if it isn't #F; else do ALTERNATIVE */ END_SUBPROBLEM (); POP_ENV (); if (GET_VAL != SHARP_F) goto pop_return; REDUCES_TO_NTH (OR_ALTERNATIVE); case RC_END_OF_COMPUTATION: { /* Signals bottom of stack */ interpreter_state_t previous_state; previous_state = (interpreter_state -> previous_state); if (previous_state == NULL_INTERPRETER_STATE) { termination_end_of_computation (); /*NOTREACHED*/ } else { dstack_position = interpreter_catch_dstack_position; interpreter_state = previous_state; return; } } case RC_EVAL_ERROR: /* Should be called RC_REDO_EVALUATION. */ POP_ENV (); REDUCES_TO (GET_EXP); case RC_EXECUTE_ACCESS_FINISH: { SCHEME_OBJECT val; long code; code = (lookup_variable (GET_VAL, (MEMORY_REF (GET_EXP, ACCESS_NAME)), (&val))); if (code == PRIM_DONE) SET_VAL (val); else if (code == PRIM_INTERRUPT) { PREPARE_POP_RETURN_INTERRUPT (RC_EXECUTE_ACCESS_FINISH, GET_VAL); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); } else POP_RETURN_ERROR (code); } END_SUBPROBLEM (); break; case RC_EXECUTE_ASSIGNMENT_FINISH: { SCHEME_OBJECT variable = (MEMORY_REF (GET_EXP, ASSIGN_NAME)); SCHEME_OBJECT old_val; long code; POP_ENV (); if (TC_VARIABLE == (OBJECT_TYPE (variable))) code = (assign_variable (GET_ENV, (GET_VARIABLE_SYMBOL (variable)), GET_VAL, (&old_val))); else code = ERR_BAD_FRAME; if (code == PRIM_DONE) SET_VAL (old_val); else { PUSH_ENV (); if (code == PRIM_INTERRUPT) { PREPARE_POP_RETURN_INTERRUPT (RC_EXECUTE_ASSIGNMENT_FINISH, GET_VAL); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); } else POP_RETURN_ERROR (code); } } END_SUBPROBLEM (); break; case RC_EXECUTE_DEFINITION_FINISH: { SCHEME_OBJECT name = (MEMORY_REF (GET_EXP, DEFINE_NAME)); SCHEME_OBJECT value = GET_VAL; long result; POP_ENV (); result = (define_variable (GET_ENV, name, value)); if (result == PRIM_DONE) { END_SUBPROBLEM (); SET_VAL (name); break; } PUSH_ENV (); if (result == PRIM_INTERRUPT) { PREPARE_POP_RETURN_INTERRUPT (RC_EXECUTE_DEFINITION_FINISH, value); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); } SET_VAL (value); POP_RETURN_ERROR (result); } case RC_EXECUTE_IN_PACKAGE_CONTINUE: if (ENVIRONMENT_P (GET_VAL)) { END_SUBPROBLEM (); SET_ENV (GET_VAL); REDUCES_TO_NTH (IN_PACKAGE_EXPRESSION); } POP_RETURN_ERROR (ERR_BAD_FRAME); case RC_HALT: Microcode_Termination (TERM_TERM_HANDLER); case RC_HARDWARE_TRAP: { /* This just reinvokes the handler */ SCHEME_OBJECT info = (STACK_REF (0)); SCHEME_OBJECT handler = SHARP_F; SAVE_CONT (); if (VECTOR_P (fixed_objects)) handler = (VECTOR_REF (fixed_objects, TRAP_HANDLER)); if (handler == SHARP_F) { outf_fatal ("There is no trap handler for recovery!\n"); termination_trap (); /*NOTREACHED*/ } Will_Push (STACK_ENV_EXTRA_SLOTS + 2); STACK_PUSH (info); STACK_PUSH (handler); PUSH_APPLY_FRAME_HEADER (1); Pushed (); } goto internal_apply; /* internal_apply, the core of the application mechanism. Branch here to perform a function application. At this point the top of the stack contains an application frame which consists of the following elements (see sdata.h): - A header specifying the frame length. - A procedure. - The actual (evaluated) arguments. No registers (except the stack pointer) are meaning full at this point. Before interrupts or errors are processed, some registers are cleared to avoid holding onto garbage if a garbage collection occurs. */ case RC_INTERNAL_APPLY_VAL: internal_apply_val: (APPLY_FRAME_PROCEDURE ()) = GET_VAL; case RC_INTERNAL_APPLY: internal_apply: #ifdef COMPILE_STEPPER if (trapping && (!WITHIN_CRITICAL_SECTION_P ()) && ((FETCH_APPLY_TRAPPER ()) != SHARP_F)) { unsigned long frame_size = (APPLY_FRAME_SIZE ()); (* (STACK_LOC (0))) = (FETCH_APPLY_TRAPPER ()); PUSH_APPLY_FRAME_HEADER (frame_size); trapping = false; } #endif /* COMPILE_STEPPER */ Apply_Non_Trapping: if (PENDING_INTERRUPTS_P) { unsigned long interrupts = (PENDING_INTERRUPTS ()); PREPARE_APPLY_INTERRUPT (); SIGNAL_INTERRUPT (interrupts); } perform_application: #ifdef APPLY_UCODE_HOOK APPLY_UCODE_HOOK (); #endif { SCHEME_OBJECT Function = (APPLY_FRAME_PROCEDURE ()); apply_dispatch: switch (OBJECT_TYPE (Function)) { case TC_ENTITY: { unsigned long frame_size = (APPLY_FRAME_SIZE ()); SCHEME_OBJECT data = (MEMORY_REF (Function, ENTITY_DATA)); if ((VECTOR_P (data)) && (frame_size < (VECTOR_LENGTH (data))) && ((VECTOR_REF (data, frame_size)) != SHARP_F) && ((VECTOR_REF (data, 0)) == (VECTOR_REF (fixed_objects, ARITY_DISPATCHER_TAG)))) { Function = (VECTOR_REF (data, frame_size)); (APPLY_FRAME_PROCEDURE ()) = Function; goto apply_dispatch; } (STACK_REF (0)) = (MEMORY_REF (Function, ENTITY_OPERATOR)); PUSH_APPLY_FRAME_HEADER (frame_size); /* This must be done to prevent an infinite push loop by an entity whose handler is the entity itself or some other such loop. Of course, it will die if stack overflow interrupts are disabled. */ STACK_CHECK (0); goto internal_apply; } case TC_PROCEDURE: { unsigned long frame_size = (APPLY_FRAME_SIZE ()); Function = (MEMORY_REF (Function, PROCEDURE_LAMBDA_EXPR)); { SCHEME_OBJECT formals = (MEMORY_REF (Function, LAMBDA_FORMALS)); if ((frame_size != (VECTOR_LENGTH (formals))) && (((OBJECT_TYPE (Function)) != TC_LEXPR) || (frame_size < (VECTOR_LENGTH (formals))))) APPLICATION_ERROR (ERR_WRONG_NUMBER_OF_ARGUMENTS); } if (GC_NEEDED_P (frame_size + 1)) { PREPARE_APPLY_INTERRUPT (); IMMEDIATE_GC (frame_size + 1); } { SCHEME_OBJECT * end = (Free + 1 + frame_size); SCHEME_OBJECT env = (MAKE_POINTER_OBJECT (TC_ENVIRONMENT, Free)); (*Free++) = (MAKE_OBJECT (TC_MANIFEST_VECTOR, frame_size)); (void) STACK_POP (); while (Free < end) (*Free++) = (STACK_POP ()); SET_ENV (env); REDUCES_TO (MEMORY_REF (Function, LAMBDA_SCODE)); } } case TC_CONTROL_POINT: if ((APPLY_FRAME_SIZE ()) != 2) APPLICATION_ERROR (ERR_WRONG_NUMBER_OF_ARGUMENTS); SET_VAL (* (APPLY_FRAME_ARGS ())); unpack_control_point (Function); RESET_HISTORY (); goto pop_return; /* After checking the number of arguments, remove the frame header since primitives do not expect it. NOTE: This code must match the application code which follows primitive_internal_apply. */ case TC_PRIMITIVE: if (!IMPLEMENTED_PRIMITIVE_P (Function)) APPLICATION_ERROR (ERR_UNIMPLEMENTED_PRIMITIVE); { unsigned long n_args = (APPLY_FRAME_N_ARGS ()); /* Note that the first test below will fail for lexpr primitives. */ if (n_args != (PRIMITIVE_ARITY (Function))) { if ((PRIMITIVE_ARITY (Function)) != LEXPR_PRIMITIVE_ARITY) APPLICATION_ERROR (ERR_WRONG_NUMBER_OF_ARGUMENTS); SET_LEXPR_ACTUALS (n_args); } stack_pointer = (APPLY_FRAME_ARGS ()); SET_EXP (Function); APPLY_PRIMITIVE_FROM_INTERPRETER (Function); POP_PRIMITIVE_FRAME (n_args); goto pop_return; } case TC_EXTENDED_PROCEDURE: { SCHEME_OBJECT lambda; SCHEME_OBJECT temp; unsigned long nargs; unsigned long nparams; unsigned long formals; unsigned long params; unsigned long auxes; long rest_flag; long size; long i; SCHEME_OBJECT * scan; nargs = (POP_APPLY_FRAME_HEADER ()); lambda = (MEMORY_REF (Function, PROCEDURE_LAMBDA_EXPR)); Function = (MEMORY_REF (lambda, ELAMBDA_NAMES)); nparams = ((VECTOR_LENGTH (Function)) - 1); Function = (Get_Count_Elambda (lambda)); formals = (Elambda_Formals_Count (Function)); params = ((Elambda_Opts_Count (Function)) + formals); rest_flag = (Elambda_Rest_Flag (Function)); auxes = (nparams - (params + rest_flag)); if ((nargs < formals) || (!rest_flag && (nargs > params))) { PUSH_APPLY_FRAME_HEADER (nargs); APPLICATION_ERROR (ERR_WRONG_NUMBER_OF_ARGUMENTS); } /* size includes the procedure slot, but not the header. */ size = (params + rest_flag + auxes + 1); if (GC_NEEDED_P (size + 1 + ((nargs > params) ? (2 * (nargs - params)) : 0))) { PUSH_APPLY_FRAME_HEADER (nargs); PREPARE_APPLY_INTERRUPT (); IMMEDIATE_GC (size + 1 + ((nargs > params) ? (2 * (nargs - params)) : 0)); } scan = Free; temp = (MAKE_POINTER_OBJECT (TC_ENVIRONMENT, scan)); (*scan++) = (MAKE_OBJECT (TC_MANIFEST_VECTOR, size)); if (nargs <= params) { for (i = (nargs + 1); (--i) >= 0; ) (*scan++) = (STACK_POP ()); for (i = (params - nargs); (--i) >= 0; ) (*scan++) = DEFAULT_OBJECT; if (rest_flag) (*scan++) = EMPTY_LIST; for (i = auxes; (--i) >= 0; ) (*scan++) = UNASSIGNED_OBJECT; } else { /* rest_flag must be true. */ SCHEME_OBJECT list = (MAKE_POINTER_OBJECT (TC_LIST, (scan + size))); for (i = (params + 1); (--i) >= 0; ) (*scan++) = (STACK_POP ()); (*scan++) = list; for (i = auxes; (--i) >= 0; ) (*scan++) = UNASSIGNED_OBJECT; /* Now scan == OBJECT_ADDRESS (list) */ for (i = (nargs - params); (--i) >= 0; ) { (*scan++) = (STACK_POP ()); (*scan) = MAKE_POINTER_OBJECT (TC_LIST, (scan + 1)); scan += 1; } (scan[-1]) = EMPTY_LIST; } Free = scan; SET_ENV (temp); REDUCES_TO (Get_Body_Elambda (lambda)); } #ifdef CC_SUPPORT_P case TC_COMPILED_ENTRY: { guarantee_cc_return (1 + (APPLY_FRAME_SIZE ())); dispatch_code = (apply_compiled_procedure ()); return_from_compiled_code: switch (dispatch_code) { case PRIM_DONE: goto pop_return; case PRIM_APPLY: goto internal_apply; case PRIM_INTERRUPT: SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); case PRIM_APPLY_INTERRUPT: PREPARE_APPLY_INTERRUPT (); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); case ERR_INAPPLICABLE_OBJECT: case ERR_WRONG_NUMBER_OF_ARGUMENTS: APPLICATION_ERROR (dispatch_code); default: Do_Micro_Error (dispatch_code, true); goto internal_apply; } } #endif default: APPLICATION_ERROR (ERR_INAPPLICABLE_OBJECT); } } case RC_JOIN_STACKLETS: unpack_control_point (GET_EXP); break; case RC_NORMAL_GC_DONE: SET_VAL (GET_EXP); /* Paranoia */ if (GC_NEEDED_P (gc_space_needed)) termination_gc_out_of_space (); gc_space_needed = 0; EXIT_CRITICAL_SECTION ({ SAVE_CONT (); }); break; case RC_PCOMB1_APPLY: END_SUBPROBLEM (); PUSH_VAL (); /* Argument value */ Finished_Eventual_Pushing (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG); SET_EXP (MEMORY_REF (GET_EXP, PCOMB1_FN_SLOT)); primitive_internal_apply: #ifdef COMPILE_STEPPER if (trapping && (!WITHIN_CRITICAL_SECTION_P ()) && ((FETCH_APPLY_TRAPPER ()) != SHARP_F)) { Will_Push (3); PUSH_EXP (); STACK_PUSH (FETCH_APPLY_TRAPPER ()); PUSH_APPLY_FRAME_HEADER (1 + (PRIMITIVE_N_PARAMETERS (GET_EXP))); Pushed (); trapping = false; goto Apply_Non_Trapping; } #endif /* COMPILE_STEPPER */ /* NOTE: This code must match the code in the TC_PRIMITIVE case of internal_apply. This code is simpler because: 1) The arity was checked at syntax time. 2) We don't have to deal with "lexpr" primitives. 3) We don't need to worry about unimplemented primitives because unimplemented primitives will cause an error at invocation. */ { SCHEME_OBJECT primitive = GET_EXP; APPLY_PRIMITIVE_FROM_INTERPRETER (primitive); POP_PRIMITIVE_FRAME (PRIMITIVE_ARITY (primitive)); break; } case RC_PCOMB2_APPLY: END_SUBPROBLEM (); PUSH_VAL (); /* Value of arg. 1 */ Finished_Eventual_Pushing (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG); SET_EXP (MEMORY_REF (GET_EXP, PCOMB2_FN_SLOT)); goto primitive_internal_apply; case RC_PCOMB2_DO_1: POP_ENV (); PUSH_VAL (); /* Save value of arg. 2 */ DO_ANOTHER_THEN (RC_PCOMB2_APPLY, PCOMB2_ARG_1_SLOT); case RC_PCOMB3_APPLY: END_SUBPROBLEM (); PUSH_VAL (); /* Save value of arg. 1 */ Finished_Eventual_Pushing (CONTINUATION_SIZE + STACK_ENV_FIRST_ARG); SET_EXP (MEMORY_REF (GET_EXP, PCOMB3_FN_SLOT)); goto primitive_internal_apply; case RC_PCOMB3_DO_1: { SCHEME_OBJECT Temp = (STACK_POP ()); /* Value of arg. 3 */ POP_ENV (); STACK_PUSH (Temp); /* Save arg. 3 again */ PUSH_VAL (); /* Save arg. 2 */ DO_ANOTHER_THEN (RC_PCOMB3_APPLY, PCOMB3_ARG_1_SLOT); } case RC_PCOMB3_DO_2: SET_ENV (STACK_REF (0)); PUSH_VAL (); /* Save value of arg. 3 */ DO_ANOTHER_THEN (RC_PCOMB3_DO_1, PCOMB3_ARG_2_SLOT); case RC_POP_RETURN_ERROR: case RC_RESTORE_VALUE: SET_VAL (GET_EXP); break; /* The following two return codes are both used to restore a saved history object. The difference is that the first does not copy the history object while the second does. In both cases, the GET_EXP contains the history object and the next item to be popped off the stack contains the offset back to the previous restore history return code. */ case RC_RESTORE_DONT_COPY_HISTORY: { prev_restore_history_offset = (OBJECT_DATUM (STACK_POP ())); (void) STACK_POP (); history_register = (OBJECT_ADDRESS (GET_EXP)); break; } case RC_RESTORE_HISTORY: { if (!restore_history (GET_EXP)) { SAVE_CONT (); Will_Push (CONTINUATION_SIZE); SET_EXP (GET_VAL); SET_RC (RC_RESTORE_VALUE); SAVE_CONT (); Pushed (); IMMEDIATE_GC (HEAP_AVAILABLE); } prev_restore_history_offset = (OBJECT_DATUM (STACK_POP ())); (void) STACK_POP (); if (prev_restore_history_offset > 0) (STACK_LOCATIVE_REFERENCE (STACK_BOTTOM, (-prev_restore_history_offset))) = (MAKE_RETURN_CODE (RC_RESTORE_HISTORY)); break; } case RC_RESTORE_INT_MASK: SET_INTERRUPT_MASK (UNSIGNED_FIXNUM_TO_LONG (GET_EXP)); if (GC_NEEDED_P (0)) REQUEST_GC (0); if (PENDING_INTERRUPTS_P) { SET_RC (RC_RESTORE_VALUE); SET_EXP (GET_VAL); SAVE_CONT (); SIGNAL_INTERRUPT (PENDING_INTERRUPTS ()); } break; case RC_STACK_MARKER: /* Frame consists of the return code followed by two objects. The first object has already been popped into GET_EXP, so just pop the second argument. */ stack_pointer = (STACK_LOCATIVE_OFFSET (stack_pointer, 1)); break; case RC_SEQ_2_DO_2: END_SUBPROBLEM (); POP_ENV (); REDUCES_TO_NTH (SEQUENCE_2); case RC_SEQ_3_DO_2: SET_ENV (STACK_REF (0)); DO_ANOTHER_THEN (RC_SEQ_3_DO_3, SEQUENCE_2); case RC_SEQ_3_DO_3: END_SUBPROBLEM (); POP_ENV (); REDUCES_TO_NTH (SEQUENCE_3); case RC_SNAP_NEED_THUNK: /* Don't snap thunk twice; evaluation of the thunk's body might have snapped it already. */ if ((MEMORY_REF (GET_EXP, THUNK_SNAPPED)) == SHARP_T) SET_VAL (MEMORY_REF (GET_EXP, THUNK_VALUE)); else { MEMORY_SET (GET_EXP, THUNK_SNAPPED, SHARP_T); MEMORY_SET (GET_EXP, THUNK_VALUE, GET_VAL); } break; default: POP_RETURN_ERROR (ERR_INAPPLICABLE_CONTINUATION); }
/* * Push a proxy userdata on the stack. * * Initializes necessary structures if it's the first time 'idfunc' is being * used in this Lua state (metatable, registring it). Otherwise, increments the * reference count. */ void luaG_push_proxy( lua_State *L, lua_CFunction idfunc, DEEP_PRELUDE *prelude ) { DEEP_PRELUDE **proxy; MUTEX_LOCK( &deep_lock ); ++(prelude->refcount); // one more proxy pointing to this deep data MUTEX_UNLOCK( &deep_lock ); STACK_GROW(L,4); STACK_CHECK(L) proxy= lua_newuserdata( L, sizeof( DEEP_PRELUDE* ) ); ASSERT_L(proxy); *proxy= prelude; // Get/create metatable for 'idfunc' (in this state) // lua_pushcfunction( L, idfunc ); // key get_deep_lookup(L); // // [-2]: proxy // [-1]: metatable / nil if (lua_isnil(L,-1)) { // No metatable yet; make one and register it // lua_pop(L,1); // tbl= idfunc( "metatable" ) // lua_pushcfunction( L, idfunc ); lua_pushliteral( L, "metatable" ); lua_call( L, 1 /*args*/, 1 /*results*/ ); // // [-2]: proxy // [-1]: metatable (returned by 'idfunc') if (!lua_istable(L,-1)) luaL_error( L, "Bad idfunc on \"metatable\": did not return one" ); // Add '__gc' method // lua_pushcfunction( L, deep_userdata_gc ); lua_setfield( L, -2, "__gc" ); // Memorize for later rounds // lua_pushvalue( L,-1 ); lua_pushcfunction( L, idfunc ); // // [-4]: proxy // [-3]: metatable (2nd ref) // [-2]: metatable // [-1]: idfunc set_deep_lookup(L); } STACK_MID(L,2) ASSERT_L( lua_isuserdata(L,-2) ); ASSERT_L( lua_istable(L,-1) ); // [-2]: proxy userdata // [-1]: metatable to use lua_setmetatable( L, -2 ); STACK_END(L,1) // [-1]: proxy userdata }
/* * Initialize keeper states * * If there is a problem, returns NULL and pushes the error message on the stack * else returns the keepers bookkeeping structure. * * Note: Any problems would be design flaws; the created Lua state is left * unclosed, because it does not really matter. In production code, this * function never fails. * settings table is at position 1 on the stack */ void init_keepers( struct s_Universe* U, lua_State* L) { int i; int nb_keepers; void* allocUD; lua_Alloc allocF = lua_getallocf( L, &allocUD); STACK_CHECK( L); // L K lua_getfield( L, 1, "nb_keepers"); // nb_keepers nb_keepers = (int) lua_tointeger( L, -1); lua_pop( L, 1); // assert( nb_keepers >= 1); // struct s_Keepers contains an array of 1 s_Keeper, adjust for the actual number of keeper states { size_t const bytes = sizeof( struct s_Keepers) + (nb_keepers - 1) * sizeof(struct s_Keeper); U->keepers = (struct s_Keepers*) allocF( allocUD, NULL, 0, bytes); if( U->keepers == NULL) { (void) luaL_error( L, "init_keepers() failed while creating keeper array; out of memory"); return; } memset( U->keepers, 0, bytes); U->keepers->nb_keepers = nb_keepers; } for( i = 0; i < nb_keepers; ++ i) // keepersUD { lua_State* K = PROPAGATE_ALLOCF_ALLOC(); if( K == NULL) { (void) luaL_error( L, "init_keepers() failed while creating keeper states; out of memory"); return; } U->keepers->keeper_array[i].L = K; // we can trigger a GC from inside keeper_call(), where a keeper is acquired // from there, GC can collect a linda, which would acquire the keeper again, and deadlock the thread. // therefore, we need a recursive mutex. MUTEX_RECURSIVE_INIT( &U->keepers->keeper_array[i].keeper_cs); STACK_CHECK( K); // copy the universe pointer in the keeper itself lua_pushlightuserdata( K, UNIVERSE_REGKEY); lua_pushlightuserdata( K, U); lua_rawset( K, LUA_REGISTRYINDEX); STACK_MID( K, 0); // make sure 'package' is initialized in keeper states, so that we have require() // this because this is needed when transferring deep userdata object luaL_requiref( K, "package", luaopen_package, 1); // package lua_pop( K, 1); // STACK_MID( K, 0); serialize_require( U, K); STACK_MID( K, 0); // copy package.path and package.cpath from the source state lua_getglobal( L, "package"); // "..." keepersUD package if( !lua_isnil( L, -1)) { // when copying with mode eLM_ToKeeper, error message is pushed at the top of the stack, not raised immediately if( luaG_inter_copy_package( U, L, K, -1, eLM_ToKeeper)) { // if something went wrong, the error message is at the top of the stack lua_remove( L, -2); // error_msg (void) lua_error( L); return; } } lua_pop( L, 1); // STACK_MID( L, 0); // attempt to call on_state_create(), if we have one and it is a C function // (only support a C function because we can't transfer executable Lua code in keepers) // will raise an error in L in case of problem call_on_state_create( U, K, L, eLM_ToKeeper); // to see VM name in Decoda debugger lua_pushliteral( K, "Keeper #"); // "Keeper #" lua_pushinteger( K, i + 1); // "Keeper #" n lua_concat( K, 2); // "Keeper #n" lua_setglobal( K, "decoda_name"); // // create the fifos table in the keeper state lua_pushlightuserdata( K, fifos_key); // fifo_key lua_newtable( K); // fifo_key {} lua_rawset( K, LUA_REGISTRYINDEX); // STACK_END( K, 0); } STACK_END( L, 0); }
/* * Initialize keeper states * * If there is a problem, return an error message (NULL for okay). * * Note: Any problems would be design flaws; the created Lua state is left * unclosed, because it does not really matter. In production code, this * function never fails. */ char const* init_keepers( lua_State* L, int _on_state_create, int const _nbKeepers) { int i; assert( _nbKeepers >= 1); GNbKeepers = _nbKeepers; GKeepers = malloc( _nbKeepers * sizeof( struct s_Keeper)); for( i = 0; i < _nbKeepers; ++ i) { lua_State* K; DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "### init_keepers %d BEGIN\n" INDENT_END, i)); DEBUGSPEW_CODE( ++ debugspew_indent_depth); // We need to load all base libraries in the keeper states so that the transfer databases are populated properly // // 'io' for debugging messages, 'package' because we need to require modules exporting idfuncs // the others because they export functions that we may store in a keeper for transfer between lanes K = luaG_newstate( L, _on_state_create, "K"); STACK_CHECK( K); // replace default 'package' contents with stuff gotten from the master state lua_getglobal( L, "package"); luaG_inter_copy_package( L, K, -1); lua_pop( L, 1); DEBUGSPEW_CODE( fprintf( stderr, INDENT_BEGIN "### init_keepers %d END\n" INDENT_END, i)); DEBUGSPEW_CODE( -- debugspew_indent_depth); // to see VM name in Decoda debugger lua_pushliteral( K, "Keeper #"); lua_pushinteger( K, i + 1); lua_concat( K, 2); lua_setglobal( K, "decoda_name"); #if KEEPER_MODEL == KEEPER_MODEL_C // create the fifos table in the keeper state lua_pushlightuserdata( K, fifos_key); lua_newtable( K); lua_rawset( K, LUA_REGISTRYINDEX); #endif // KEEPER_MODEL == KEEPER_MODEL_C #if KEEPER_MODEL == KEEPER_MODEL_LUA // use package.loaders[2] to find keeper microcode (NOTE: this works only if nobody tampered with the loaders table...) lua_getglobal( K, "package"); // package lua_getfield( K, -1, "loaders"); // package package.loaders lua_rawgeti( K, -1, 2); // package package.loaders package.loaders[2] lua_pushliteral( K, "lanes-keeper"); // package package.loaders package.loaders[2] "lanes-keeper" STACK_MID( K, 4); // first pcall loads lanes-keeper.lua, second one runs the chunk if( lua_pcall( K, 1 /*args*/, 1 /*results*/, 0 /*errfunc*/) || lua_pcall( K, 0 /*args*/, 0 /*results*/, 0 /*errfunc*/)) { // LUA_ERRRUN / LUA_ERRMEM / LUA_ERRERR // char const* err = lua_tostring( K, -1); assert( err); return err; } // package package.loaders STACK_MID( K, 2); lua_pop( K, 2); #endif // KEEPER_MODEL == KEEPER_MODEL_LUA STACK_END( K, 0); MUTEX_INIT( &GKeepers[i].lock_); GKeepers[i].L = K; //GKeepers[i].count = 0; } #if HAVE_KEEPER_ATEXIT_DESINIT atexit( atexit_close_keepers); #endif // HAVE_KEEPER_ATEXIT_DESINIT return NULL; // ok }
/* * Push a proxy userdata on the stack. * returns NULL if ok, else some error string related to bad idfunc behavior or module require problem * (error cannot happen with mode_ == eLM_ToKeeper) * * Initializes necessary structures if it's the first time 'idfunc' is being * used in this Lua state (metatable, registring it). Otherwise, increments the * reference count. */ char const* push_deep_proxy( struct s_Universe* U, lua_State* L, DEEP_PRELUDE* prelude, enum eLookupMode mode_) { DEEP_PRELUDE** proxy; // Check if a proxy already exists push_registry_subtable_mode( L, DEEP_PROXY_CACHE_KEY, "v"); // DPC lua_pushlightuserdata( L, prelude->deep); // DPC deep lua_rawget( L, -2); // DPC proxy if ( !lua_isnil( L, -1)) { lua_remove( L, -2); // proxy return NULL; } else { lua_pop( L, 1); // DPC } MUTEX_LOCK( &U->deep_lock); ++ (prelude->refcount); // one more proxy pointing to this deep data MUTEX_UNLOCK( &U->deep_lock); STACK_GROW( L, 7); STACK_CHECK( L); proxy = lua_newuserdata( L, sizeof( DEEP_PRELUDE*)); // DPC proxy ASSERT_L( proxy); *proxy = prelude; // Get/create metatable for 'idfunc' (in this state) lua_pushlightuserdata( L, prelude->idfunc); // DPC proxy idfunc get_deep_lookup( L); // DPC proxy metatable? if( lua_isnil( L, -1)) // // No metatable yet. { char const* modname; int oldtop = lua_gettop( L); // DPC proxy nil lua_pop( L, 1); // DPC proxy // 1 - make one and register it if( mode_ != eLM_ToKeeper) { prelude->idfunc( L, eDO_metatable); // DPC proxy metatable deepversion if( lua_gettop( L) - oldtop != 1 || !lua_istable( L, -2) || !lua_isstring( L, -1)) { lua_settop( L, oldtop); // DPC proxy X lua_pop( L, 3); // return "Bad idfunc(eOP_metatable): unexpected pushed value"; } luaG_pushdeepversion( L); // DPC proxy metatable deepversion deepversion if( !lua501_equal( L, -1, -2)) { lua_pop( L, 5); // return "Bad idfunc(eOP_metatable): mismatched deep version"; } lua_pop( L, 2); // DPC proxy metatable // make sure the idfunc didn't export __gc, as we will store our own lua_getfield( L, -1, "__gc"); // DPC proxy metatable __gc if( !lua_isnil( L, -1)) { lua_pop( L, 4); // return "idfunc-created metatable shouldn't contain __gc"; } lua_pop( L, 1); // DPC proxy metatable } else { // keepers need a minimal metatable that only contains __gc lua_newtable( L); // DPC proxy metatable } // Add our own '__gc' method lua_pushcfunction( L, deep_userdata_gc); // DPC proxy metatable __gc lua_setfield( L, -2, "__gc"); // DPC proxy metatable // Memorize for later rounds lua_pushvalue( L, -1); // DPC proxy metatable metatable lua_pushlightuserdata( L, prelude->idfunc); // DPC proxy metatable metatable idfunc set_deep_lookup( L); // DPC proxy metatable // 2 - cause the target state to require the module that exported the idfunc // this is needed because we must make sure the shared library is still loaded as long as we hold a pointer on the idfunc { int oldtop = lua_gettop( L); modname = (char const*) prelude->idfunc( L, eDO_module); // DPC proxy metatable // make sure the function pushed nothing on the stack! if( lua_gettop( L) - oldtop != 0) { lua_pop( L, 3); // return "Bad idfunc(eOP_module): should not push anything"; } } if( modname) // we actually got a module name { // somehow, L.registry._LOADED can exist without having registered the 'package' library. lua_getglobal( L, "require"); // DPC proxy metatable require() // check that the module is already loaded (or being loaded, we are happy either way) if( lua_isfunction( L, -1)) { lua_pushstring( L, modname); // DPC proxy metatable require() "module" lua_getfield( L, LUA_REGISTRYINDEX, "_LOADED"); // DPC proxy metatable require() "module" _R._LOADED if( lua_istable( L, -1)) { bool_t alreadyloaded; lua_pushvalue( L, -2); // DPC proxy metatable require() "module" _R._LOADED "module" lua_rawget( L, -2); // DPC proxy metatable require() "module" _R._LOADED module alreadyloaded = lua_toboolean( L, -1); if( !alreadyloaded) // not loaded { int require_result; lua_pop( L, 2); // DPC proxy metatable require() "module" // require "modname" require_result = lua_pcall( L, 1, 0, 0); // DPC proxy metatable error? if( require_result != LUA_OK) { // failed, return the error message lua_pushfstring( L, "error while requiring '%s' identified by idfunc(eOP_module): ", modname); lua_insert( L, -2); // DPC proxy metatable prefix error lua_concat( L, 2); // DPC proxy metatable error return lua_tostring( L, -1); } } else // already loaded, we are happy { lua_pop( L, 4); // DPC proxy metatable } } else // no L.registry._LOADED; can this ever happen? { lua_pop( L, 6); // return "unexpected error while requiring a module identified by idfunc(eOP_module)"; } } else // a module name, but no require() function :-( { lua_pop( L, 4); // return "lanes receiving deep userdata should register the 'package' library"; } } } STACK_MID( L, 2); // DPC proxy metatable ASSERT_L( lua_isuserdata( L, -2)); ASSERT_L( lua_istable( L, -1)); lua_setmetatable( L, -2); // DPC proxy // If we're here, we obviously had to create a new proxy, so cache it. lua_pushlightuserdata( L, (*proxy)->deep); // DPC proxy deep lua_pushvalue( L, -2); // DPC proxy deep proxy lua_rawset( L, -4); // DPC proxy lua_remove( L, -2); // proxy ASSERT_L( lua_isuserdata( L, -1)); STACK_END( L, 0); return NULL; }
static lref_t execute_fast_op(lref_t fop, lref_t env) { lref_t retval = NIL; lref_t sym; lref_t binding; lref_t fn; lref_t args; size_t argc; lref_t argv[ARG_BUF_LEN]; lref_t after; lref_t tag; lref_t cell; lref_t escape_retval; jmp_buf *jmpbuf; STACK_CHECK(&fop); _process_interrupts(); fstack_enter_eval_frame(&fop, fop, env); while(!NULLP(fop)) { switch(fop->header.opcode) { case FOP_LITERAL: retval = fop->as.fast_op.arg1; fop = fop->as.fast_op.next; break; case FOP_GLOBAL_REF: sym = fop->as.fast_op.arg1; binding = SYMBOL_VCELL(sym); if (UNBOUND_MARKER_P(binding)) vmerror_unbound(sym); retval = binding; fop = fop->as.fast_op.next; break; case FOP_GLOBAL_SET: sym = fop->as.fast_op.arg1; binding = SYMBOL_VCELL(sym); if (UNBOUND_MARKER_P(binding)) vmerror_unbound(sym); SET_SYMBOL_VCELL(sym, retval); fop = fop->as.fast_op.next; break; case FOP_APPLY_GLOBAL: sym = fop->as.fast_op.arg1; fn = SYMBOL_VCELL(sym); if (UNBOUND_MARKER_P(fn)) vmerror_unbound(sym); argc = 0; args = fop->as.fast_op.arg2; while (CONSP(args)) { if (argc >= ARG_BUF_LEN) { vmerror_unsupported(_T("too many actual arguments")); break; } argv[argc] = execute_fast_op(CAR(args), env); args = CDR(args); argc++; } if (!NULLP(args)) vmerror_arg_out_of_range(fop->as.fast_op.arg2, _T("bad formal argument list")); fop = apply(fn, argc, argv, &env, &retval); break; case FOP_APPLY: argc = 0; fn = execute_fast_op(fop->as.fast_op.arg1, env); args = fop->as.fast_op.arg2; while (CONSP(args)) { if (argc >= ARG_BUF_LEN) { vmerror_unsupported(_T("too many actual arguments")); break; } argv[argc] = execute_fast_op(CAR(args), env); args = CDR(args); argc++; } if (!NULLP(args)) vmerror_arg_out_of_range(fop->as.fast_op.arg2, _T("bad formal argument list")); fop = apply(fn, argc, argv, &env, &retval); break; case FOP_IF_TRUE: if (TRUEP(retval)) fop = fop->as.fast_op.arg1; else fop = fop->as.fast_op.arg2; break; case FOP_RETVAL: fop = fop->as.fast_op.next; break; case FOP_SEQUENCE: retval = execute_fast_op(fop->as.fast_op.arg1, env); fop = fop->as.fast_op.arg2; break; case FOP_THROW: tag = execute_fast_op(fop->as.fast_op.arg1, env); escape_retval = execute_fast_op(fop->as.fast_op.arg2, env); dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: throw ~a, retval = ~a\n"), tag, escape_retval)); CURRENT_TIB()->escape_frame = find_matching_escape(CURRENT_TIB()->frame, tag); CURRENT_TIB()->escape_value = escape_retval; if (CURRENT_TIB()->escape_frame == NULL) { /* If we don't find a matching catch for the throw, we have a * problem and need to invoke a trap. */ vmtrap(TRAP_UNCAUGHT_THROW, (enum vmt_options_t)(VMT_MANDATORY_TRAP | VMT_HANDLER_MUST_ESCAPE), 2, tag, escape_retval); } unwind_stack_for_throw(); fop = fop->as.fast_op.next; break; case FOP_CATCH: tag = execute_fast_op(fop->as.fast_op.arg1, env); jmpbuf = fstack_enter_catch_frame(tag, CURRENT_TIB()->frame); dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: setjmp tag: ~a, frame: ~c&, jmpbuf: ~c&\n"), tag, CURRENT_TIB()->frame, jmpbuf)); if (setjmp(*jmpbuf) == 0) { retval = execute_fast_op(fop->as.fast_op.arg2, env); } else { dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: catch, retval = ~a\n"), CURRENT_TIB()->escape_value)); retval = CURRENT_TIB()->escape_value; CURRENT_TIB()->escape_value = NIL; } fstack_leave_frame(); fop = fop->as.fast_op.next; break; case FOP_WITH_UNWIND_FN: fstack_enter_unwind_frame(execute_fast_op(fop->as.fast_op.arg1, env)); retval = execute_fast_op(fop->as.fast_op.arg2, env); after = CURRENT_TIB()->frame[FOFS_UNWIND_AFTER]; fstack_leave_frame(); apply1(after, 0, NULL); fop = fop->as.fast_op.next; break; case FOP_CLOSURE: retval = lclosurecons(env, lcons(lcar(fop->as.fast_op.arg1), fop->as.fast_op.arg2), lcdr(fop->as.fast_op.arg1)); fop = fop->as.fast_op.next; break; case FOP_CAR: retval = lcar(retval); fop = fop->as.fast_op.next; break; case FOP_CDR: retval = lcdr(retval); fop = fop->as.fast_op.next; break; case FOP_NOT: retval = boolcons(!TRUEP(retval)); fop = fop->as.fast_op.next; break; case FOP_NULLP: retval = boolcons(NULLP(retval)); fop = fop->as.fast_op.next; break; case FOP_EQP: retval = boolcons(EQ(execute_fast_op(fop->as.fast_op.arg1, env), execute_fast_op(fop->as.fast_op.arg2, env))); fop = fop->as.fast_op.next; break; case FOP_GET_ENV: retval = env; fop = fop->as.fast_op.next; break; case FOP_GLOBAL_DEF: // three args, third was genv, but currently unused retval = lidefine_global(fop->as.fast_op.arg1, fop->as.fast_op.arg2); fop = fop->as.fast_op.next; break; case FOP_GET_FSP: retval = fixcons((fixnum_t)CURRENT_TIB()->fsp); fop = fop->as.fast_op.next; break; case FOP_GET_FRAME: retval = fixcons((fixnum_t)CURRENT_TIB()->frame); fop = fop->as.fast_op.next; break; case FOP_GET_HFRAMES: retval = CURRENT_TIB()->handler_frames; fop = fop->as.fast_op.next; break; case FOP_SET_HFRAMES: CURRENT_TIB()->handler_frames = execute_fast_op(fop->as.fast_op.arg1, env); fop = fop->as.fast_op.next; break; case FOP_GLOBAL_PRESERVE_FRAME: sym = fop->as.fast_op.arg1; binding = SYMBOL_VCELL(sym); if (UNBOUND_MARKER_P(binding)) vmerror_unbound(sym); SET_SYMBOL_VCELL(sym, fixcons((fixnum_t)CURRENT_TIB()->frame)); retval = execute_fast_op(fop->as.fast_op.arg2, env); fop = fop->as.fast_op.next; break; case FOP_STACK_BOUNDARY: sym = execute_fast_op(fop->as.fast_op.arg1, env); fstack_enter_boundary_frame(sym); retval = execute_fast_op(fop->as.fast_op.arg2, env); fstack_leave_frame(); fop = fop->as.fast_op.next; break; case FOP_FAST_ENQUEUE_CELL: retval = execute_fast_op(fop->as.fast_op.arg2, env); cell = execute_fast_op(fop->as.fast_op.arg1, env); SET_CDR(CAR(retval), cell); SET_CAR(retval, cell); fop = fop->as.fast_op.next; break; case FOP_WHILE_TRUE: while(TRUEP(execute_fast_op(fop->as.fast_op.arg1, env))) { retval = execute_fast_op(fop->as.fast_op.arg2, env); } fop = fop->as.fast_op.next; break; case FOP_LOCAL_REF_BY_INDEX: retval = lenvlookup_by_index(FIXNM(fop->as.fast_op.arg1), FIXNM(fop->as.fast_op.arg2), env); fop = fop->as.fast_op.next; break; case FOP_LOCAL_REF_RESTARG: retval = lenvlookup_restarg_by_index(FIXNM(fop->as.fast_op.arg1), FIXNM(fop->as.fast_op.arg2), env); fop = fop->as.fast_op.next; break; case FOP_LOCAL_SET_BY_INDEX: lenvlookup_set_by_index(FIXNM(fop->as.fast_op.arg1), FIXNM(fop->as.fast_op.arg2), env, retval); fop = fop->as.fast_op.next; break; default: panic("Unsupported fast-op"); } } fstack_leave_frame(); return retval; }
TokenSeq *glsl_expand(TokenSeq *ts, bool recursive) { TokenSeq *res = NULL; STACK_CHECK(); while (ts || !res) { if (!recursive && ts == NULL) ts = next_tokenseq(); if (ts == NULL) break; if (!glsl_tokenlist_contains(ts->hide, ts->token)) { Macro *m = glsl_macrolist_find(directive_macros, ts->token); if (m != NULL) { switch (m->type) { case MACRO_OBJECT: ts = glsl_tokenseq_destructive_reverse(subst(m->body, NULL, NULL, glsl_tokenlist_construct(ts->token, ts->hide), NULL), ts->next); continue; // effective tail call case MACRO_FUNCTION: if (!recursive && ts->next == NULL) ts->next = next_tokenseq(); if (ts->next && is_lparen(ts->next->token)) { int formal_count = glsl_tokenlist_length(m->args); int depth = 0; int count = 0; /* gather up actual parameters */ TokenSeq *tail = ts->next->next; TokenSeq *curr = NULL; TokenSeqList *actuals = NULL; while (true) { if (!recursive && tail == NULL) tail = next_tokenseq(); if (tail == NULL) glsl_compile_error(ERROR_PREPROCESSOR, 1, g_LineNumber, "mismatched parenthesis in macro invocation"); if (is_lparen(tail->token)) depth++; if (is_rparen(tail->token)) { if (depth == 0) break; else depth--; } if (is_comma(tail->token)) if (depth == 0) { actuals = glsl_tokenseqlist_construct(glsl_tokenseq_destructive_reverse(curr, NULL), actuals); count++; tail = tail->next; curr = NULL; continue; } curr = glsl_tokenseq_construct(tail->token, tail->hide, curr); tail = tail->next; } if (count > 0 || formal_count == 1 || curr != NULL) { actuals = glsl_tokenseqlist_construct(glsl_tokenseq_destructive_reverse(curr, NULL), actuals); count++; } /* check against arity of macro */ if (count == formal_count) { ts = glsl_tokenseq_destructive_reverse(subst(m->body, m->args, actuals, glsl_tokenlist_construct(ts->token, glsl_tokenlist_intersect(ts->hide, tail->hide)), NULL), tail->next); continue; // effective tail call } else glsl_compile_error(ERROR_PREPROCESSOR, 1, g_LineNumber, "arity mismatch in macro invocation"); } break; case MACRO_LINE: ts = glsl_tokenseq_construct(glsl_token_construct_ppnumberi(g_LineNumber), NULL, ts->next); break; case MACRO_FILE: ts = glsl_tokenseq_construct(glsl_token_construct_ppnumberi(g_FileNumber), NULL, ts->next); break; default: UNREACHABLE(); break; } } } res = glsl_tokenseq_construct(ts->token, ts->hide, res); ts = ts->next; } return glsl_tokenseq_destructive_reverse(res, NULL); }