static void ram_hack(void) { errval_t err; for (int i = 0; i < 100; i++) { err = ram_alloc(&ram_caps[i], BASE_PAGE_BITS); assert(err_is_ok(err)); } err = ram_alloc_set(my_ram_alloc); assert(err_is_ok(err)); }
static void use_local_memserv_handler(struct spawn_binding *b) { ram_alloc_set(NULL); errval_t err; err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT); if (err_is_fail(err)) { DEBUG_ERR(err, "error sending use_local_memserv reply"); if (err_no(err) == FLOUNDER_ERR_TX_BUSY) { err = b->register_send(b, get_default_waitset(), MKCONT(retry_use_local_memserv_response, b)); if (err_is_fail(err)) { // note that only one continuation may be registered at a time DEBUG_ERR(err, "register_send failed!"); } } } }
/** * \brief Initialize monitor running on bsp core */ static errval_t boot_bsp_core(int argc, char *argv[]) { errval_t err; // First argument contains the bootinfo location bi = (struct bootinfo*)strtol(argv[1], NULL, 10); bsp_monitor = true; err = monitor_client_setup_mem_serv(); assert(err_is_ok(err)); /* Wait for mem_serv to advertise its iref to us */ while (mem_serv_iref == 0) { messages_wait_and_handle_next(); } update_ram_alloc_binding = false; /* Can now connect to and use mem_serv */ err = ram_alloc_set(NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC_SET); } // Export ram_alloc service err = mon_ram_alloc_serve(); assert(err_is_ok(err)); /* Set up monitor rpc channel */ err = monitor_rpc_init(); if (err_is_fail(err)) { DEBUG_ERR(err, "monitor rpc init failed"); return err; } /* SKB needs vfs for ECLiPSe so we need to start ramfsd first... */ err = spawn_domain("ramfsd"); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawning ramfsd"); return err; } // XXX: Wait for ramfsd to initialize while (ramfs_serv_iref == 0) { messages_wait_and_handle_next(); } /* Spawn skb (new nameserver) before other domains */ err = spawn_domain("skb"); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawning skb"); return err; } // XXX: Wait for name_server to initialize while (name_serv_iref == 0) { messages_wait_and_handle_next(); } #ifdef __k1om__ char args[40]; snprintf(args, sizeof(args), "0x%016lx 0x%02x", bi->host_msg, bi->host_msg_bits); char *mgr_argv[MAX_CMDLINE_ARGS + 1]; spawn_tokenize_cmdargs(args, mgr_argv, ARRAY_LENGTH(mgr_argv)); err = spawn_domain_with_args("xeon_phi", mgr_argv,environ); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawning xeon_phi"); return err; } #endif /* Spawn boot domains in menu.lst */ err = spawn_all_domains(); if (err_is_fail(err)) { DEBUG_ERR(err, "spawn_all_domains failed"); return err; } return SYS_ERR_OK; }
/** * \brief Use cmdline args to figure out which core the monitor is running on * and which cores to boot. */ int main(int argc, char *argv[]) { printf("monitor: invoked as:"); for (int i = 0; i < argc; i++) { printf(" %s", argv[i]); } printf("\n"); errval_t err; /* Initialize the library */ bench_init(); /* Set core id */ err = invoke_kernel_get_core_id(cap_kernel, &my_core_id); assert(err_is_ok(err)); disp_set_core_id(my_core_id); // Setup all channels and channel support code err = monitor_client_setup_monitor(); assert(err_is_ok(err)); if (argc == 2) { /* Bsp monitor */ err = boot_bsp_core(argc, argv); if (err_is_fail(err)) { USER_PANIC_ERR(err, "failed to boot BSP core"); return EXIT_FAILURE; } } else { /* Non bsp monitor */ err = boot_app_core(argc, argv); if(err_is_fail(err)) { USER_PANIC_ERR(err, "starting app monitor"); return EXIT_FAILURE; } } #if defined(TRACING_EXISTS) && defined(CONFIG_TRACE) err = trace_my_setup(); assert(err_is_ok(err)); trace_reset_buffer(); struct capref tracecap; err = trace_setup_on_core(&tracecap); if (err_is_fail(err)) { if(err_no(err) != TRACE_ERR_NO_BUFFER) { DEBUG_ERR(err, "trace_setup_on_core failed"); printf("Warning: tracing not available on core %d\n", my_core_id); } } else { err = invoke_trace_setup(tracecap); if (err_is_fail(err)) { DEBUG_ERR(err, "invoke_trace_setup failed"); printf("Warning: tracing not available on core %d\n", my_core_id); } } #endif // tracing domain_mgmt_init(); #ifdef MONITOR_HEARTBEAT struct deferred_event ev; mon_heartbeat(&ev); #endif for(;;) { err = event_dispatch(get_default_waitset()); if(err_is_fail(err)) { USER_PANIC_ERR(err, "event_dispatch"); } if(update_ram_alloc_binding) { update_ram_alloc_binding = false; err = ram_alloc_set(NULL); if(err_is_fail(err)) { DEBUG_ERR(err, "ram_alloc_set to local allocator failed. " "Will stick with intermon memory allocation."); } } } }
errval_t initialize_mem_serv(void) { errval_t err; /* Step 1: Initialize slot allocator by passing a cnode cap for it to start with */ struct capref cnode_cap; err = slot_alloc(&cnode_cap); assert(err_is_ok(err)); struct capref cnode_start_cap = { .slot = 0 }; struct capref ram; err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0); assert(err_is_ok(err)); err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode, DEFAULT_CNODE_BITS); assert(err_is_ok(err)); /* location where slot allocator will place its top-level cnode */ struct capref top_slot_cap = { .cnode = cnode_root, .slot = ROOTCN_SLOT_SLOT_ALLOCR, }; /* clear mm_ram struct */ memset(&mm_ram, 0, sizeof(mm_ram)); /* init slot allocator */ err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS, CNODE_BITS, cnode_start_cap, 1UL << DEFAULT_CNODE_BITS, &mm_ram); assert(err_is_ok(err)); // FIXME: remove magic constant for lowest valid RAM address err = mm_init(&mm_ram, ObjType_RAM, 0x80000000, MAXSIZEBITS, MAXCHILDBITS, NULL, slot_alloc_prealloc, &ram_slot_alloc, true); assert(err_is_ok(err)); /* Step 2: give MM allocator static storage to get it started */ static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))]; slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf)); /* Step 3: walk bootinfo and add all unused RAM caps to allocator */ struct capref mem_cap = { .cnode = cnode_super, .slot = 0, }; for (int i = 0; i < bi->regions_length; i++) { if (bi->regions[i].mr_type == RegionType_Empty) { //dump_ram_region(i, bi->regions + i); mem_total += ((size_t)1) << bi->regions[i].mr_bits; if (bi->regions[i].mr_consumed) { // region consumed by init, skipped mem_cap.slot++; continue; } err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits, bi->regions[i].mr_base); if (err_is_ok(err)) { mem_avail += ((size_t)1) << bi->regions[i].mr_bits; } else { DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED", i, bi->regions[i].mr_base, bi->regions[i].mr_bits); } /* try to refill slot allocator (may fail if the mem allocator is empty) */ err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) { DEBUG_ERR(err, "in slot_prealloc_refill() while initialising" " memory allocator"); abort(); } /* refill slab allocator if needed and possible */ if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2 + 10 * BASE_PAGE_SIZE) { slab_default_refill(&mm_ram.slabs); // may fail } mem_cap.slot++; } } err = slot_prealloc_refill(mm_ram.slot_alloc_inst); if (err_is_fail(err)) { debug_printf("Fatal internal error in RAM allocator: failed to initialise " "slot allocator\n"); DEBUG_ERR(err, "failed to init slot allocator"); abort(); } debug_printf("RAM allocator initialised, %zd MB (of %zd MB) available\n", mem_avail / 1024 / 1024, mem_total / 1024 / 1024); // setup proper multi slot alloc err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL); if(err_is_fail(err)) { USER_PANIC_ERR(err, "multi_slot_alloc_init"); } debug_printf("MSA initialised\n"); // switch over ram alloc to proper ram allocator ram_alloc_set(memserv_alloc); return SYS_ERR_OK; }
/** * \brief Setups a local memory allocator for init to use till the memory server * is ready to be used. */ errval_t initialize_ram_alloc(void) { errval_t err; /* walk bootinfo looking for suitable RAM cap to use * we pick the first cap equal to MM_REQUIREDBITS, * or else the next closest less than MM_MAXSIZEBITS */ int mem_region = -1, mem_slot = 0; struct capref mem_cap = { .cnode = cnode_super, .slot = 0, }; assert(bi != NULL); for (int i = 0; i < bi->regions_length; i++) { assert(!bi->regions[i].mr_consumed); if (bi->regions[i].mr_type == RegionType_Empty) { if (bi->regions[i].mr_bits >= MM_REQUIREDBITS && bi->regions[i].mr_bits <= MM_MAXSIZEBITS && (mem_region == -1 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) { mem_region = i; mem_cap.slot = mem_slot; if (bi->regions[i].mr_bits == MM_REQUIREDBITS) { break; } } mem_slot++; } } if (mem_region < 0) { printf("Error: no RAM capability found in the size range " "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS); return INIT_ERR_NO_MATCHING_RAM_CAP; } bi->regions[mem_region].mr_consumed = true; /* init slot allocator */ static struct slot_alloc_basecn init_slot_alloc; err = slot_alloc_basecn_init(&init_slot_alloc); if (err_is_fail(err)) { return err_push(err, MM_ERR_SLOT_ALLOC_INIT); } /* init MM allocator */ assert(bi->regions[mem_region].mr_type != RegionType_Module); err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base, bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL, slot_alloc_basecn, &init_slot_alloc, true); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_INIT); } /* give MM allocator enough static storage for its node allocator */ static char nodebuf[SLAB_STATIC_SIZE(MM_NNODES, MM_NODE_SIZE(MM_MAXCHILDBITS))]; slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf)); /* add single RAM cap to allocator */ err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits, bi->regions[mem_region].mr_base); if (err_is_fail(err)) { return err_push(err, MM_ERR_MM_ADD); } // initialise generic RAM allocator to use local allocator err = ram_alloc_set(mymm_alloc); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC_SET); } return SYS_ERR_OK; }
static void set_local_bindings(void) { ram_alloc_set(NULL); }
/** * \brief Initialize monitor running on bsp core */ static errval_t boot_bsp_core(int argc, char *argv[]) { errval_t err; // First argument contains the bootinfo location bi = (struct bootinfo*)strtol(argv[1], NULL, 10); bsp_monitor = true; err = monitor_client_setup_mem_serv(); assert(err_is_ok(err)); /* Wait for mem_serv to advertise its iref to us */ while (mem_serv_iref == 0) { messages_wait_and_handle_next(); } update_ram_alloc_binding = false; /* Can now connect to and use mem_serv */ err = ram_alloc_set(NULL); if (err_is_fail(err)) { return err_push(err, LIB_ERR_RAM_ALLOC_SET); } // Export ram_alloc service err = mon_ram_alloc_serve(); assert(err_is_ok(err)); /* Set up monitor rpc channel */ err = monitor_rpc_init(); if (err_is_fail(err)) { DEBUG_ERR(err, "monitor rpc init failed"); return err; } /* SKB needs vfs for ECLiPSe so we need to start ramfsd first... */ err = spawn_domain("ramfsd"); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawning ramfsd"); return err; } // XXX: Wait for ramfsd to initialize while (ramfs_serv_iref == 0) { messages_wait_and_handle_next(); } /* Spawn skb (new nameserver) before other domains */ err = spawn_domain("skb"); if (err_is_fail(err)) { DEBUG_ERR(err, "failed spawning skb"); return err; } // XXX: Wait for name_server to initialize while (name_serv_iref == 0) { messages_wait_and_handle_next(); } /* initialise rcap_db */ err = rcap_db_init(); if (err_is_fail(err)) { DEBUG_ERR(err, "monitor rcap_db init failed"); return err; } /* Spawn boot domains in menu.lst */ err = spawn_all_domains(); if (err_is_fail(err)) { DEBUG_ERR(err, "spawn_all_domains failed"); return err; } return SYS_ERR_OK; }