/** * \brief allocates a frame on a specific node * * \param dest capref to store the frame * \param size size of the frame to allocated * \param node node on which the frame should be allocated * \param ret_size returned size of the frame capability * * \returns SYS_ERR_OK on SUCCESS * errval on FAILURE */ errval_t numa_frame_alloc_on_node(struct capref *dest, size_t size, nodeid_t node, size_t *ret_size) { errval_t err; NUMA_DEBUG_ALLOC("allocating frame on node %" PRIuNODEID "\n", node); uint64_t min_base, max_limit; ram_get_affinity(&min_base, &max_limit); if (node >= numa_topology.num_nodes) { return NUMA_ERR_NODEID_INVALID; } uint64_t node_base = numa_node_base(node); uint64_t node_limit = node_base + numa_node_size(node, NULL); NUMA_DEBUG_ALLOC("setting affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n", node_base, node_limit); ram_set_affinity(node_base, node_limit); err = frame_alloc(dest, size, ret_size); ram_set_affinity(min_base, max_limit); NUMA_DEBUG_ALLOC("restore affinity to 0x%" PRIx64 "..0x%" PRIx64 "\n", min_base, max_limit); return err; }
void alloc_local(void) { errval_t err; #ifndef __k1om__ uint64_t minbase, maxlimit; ram_get_affinity(&minbase, &maxlimit); ram_set_affinity(XPHI_BENCH_RAM_MINBASE, XPHI_BENCH_RAM_MAXLIMIT); #endif size_t alloced_size = 0; err = frame_alloc(&local_frame, XPHI_BENCH_MSG_FRAME_SIZE, &alloced_size); EXPECT_SUCCESS(err, "frame_alloc"); #ifndef __k1om__ ram_set_affinity(minbase, maxlimit); #endif struct frame_identity id; err = invoke_frame_identify(local_frame, &id); EXPECT_SUCCESS(err, "invoke_frame_identify"); local_base = id.base; local_frame_sz = alloced_size; debug_printf("alloc_local | Frame base: %016lx, size=%lx\n", id.base, 1UL << id.bits); err = vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL); EXPECT_SUCCESS(err, "vspace_map_one_frame"); }
/** * \brief initializes a dma descriptor ring and allocates memory for it * * \param ring the ring structure to initialize * \param size number of elements in the ring * * \returns SYS_ERR_OK on success * errval on error */ errval_t xeon_phi_dma_desc_ring_alloc(struct xdma_ring *ring, uint16_t size) { errval_t err; memset(ring, 0, sizeof(*ring)); assert(size < (XEON_PHI_DMA_DESC_RING_MAX)); assert(IS_POW2(size)); #ifndef __k1om__ /* * we set the ram affinity to the maximum range mapped by the system memory * page tables when being on the host. Otherwise the card cannot access it. */ uint64_t minbase, maxlimit; ram_get_affinity(&minbase, &maxlimit); ram_set_affinity(0, XEON_PHI_SYSMEM_SIZE-8*XEON_PHI_SYSMEM_PAGE_SIZE); #endif size_t frame_size = ((size_t) size) * XEON_PHI_DMA_DESC_SIZE; err = frame_alloc(&ring->cap, frame_size, NULL); #ifndef __k1om__ ram_set_affinity(minbase, maxlimit); #endif if (err_is_fail(err)) { return err; } err = vspace_map_one_frame_attr(&ring->vbase, frame_size, ring->cap, VREGION_FLAGS_READ_WRITE, NULL, NULL); if (err_is_fail(err)) { cap_destroy(ring->cap); return err; } struct frame_identity id; err = invoke_frame_identify(ring->cap, &id); assert(err_is_ok(err)); ring->pbase = id.base; ring->size = size; memset(ring->vbase, 0, frame_size); return SYS_ERR_OK; }
static void client_connected(void *st, errval_t err, struct rcce_binding *b) { struct rcce_state *cs = st; assert(err_is_ok(err)); /* printf("%s: Am connected to client\n", my_name); */ b->rx_vtbl = rcce_vtbl; b->st = cs; // Create a Frame Capability size_t allocated_size; struct capref shared_mem; #ifdef __scc__ ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * disp_get_core_id()), SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (disp_get_core_id() + 1))); #endif errval_t r = frame_alloc(&shared_mem, BULK_SIZE * 2, &allocated_size); assert(err_is_ok(r)); #ifdef __scc__ ram_set_affinity(0, 0); #endif // Map the frame in local memory void *pool; r = vspace_map_one_frame_attr(&pool, allocated_size, shared_mem, BULK_PAGE_MAP, NULL, NULL); assert(pool != NULL); assert(err_is_ok(r)); assert(allocated_size >= BULK_SIZE * 2); // Init sender err = bulk_init(pool, BULK_SIZE, BLOCK_SIZE, &cs->bt); assert(err_is_ok(err)); // Init receiver err = bulk_slave_init(pool + BULK_SIZE, BULK_SIZE, &cs->btr); assert(err_is_ok(err)); barrier_binding_init(b); barray[cs->index] = b; err = barray[cs->index]->tx_vtbl.init_request(barray[cs->index], NOP_CONT, my_core_id, bsp_id, (uint64_t)(uintptr_t)cs, shared_mem); assert(err_is_ok(err)); }
/** * Allocates memory for kernel binary. * * For x86, the app kernel can only be loaded in the first 4GB * of memory. Further, it must not overlap the integer * boundaries, i.e. 0-1, 1-2, 2-3, or 3-4. * * Probably because we identity map this region during boot-phase * so we can't access anything higher. Not sure about overlap tough. */ static errval_t allocate_kernel_memory(lvaddr_t cpu_binary, genpaddr_t page_size, struct capref* cpu_memory_cap, size_t* cpu_memory, struct frame_identity* id) { errval_t err; #ifdef __scc__ *cpu_memory = X86_32_BASE_PAGE_SIZE; err = frame_alloc_identify(cpu_memory_cap, *cpu_memory, cpu_memory, id); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } #else *cpu_memory = elf_virtual_size(cpu_binary) + page_size; uint64_t old_minbase; uint64_t old_maxlimit; ram_get_affinity(&old_minbase, &old_maxlimit); DEBUG("%s:%d: \n", __FILE__, __LINE__); for (uint64_t minbase = 0, maxlimit = (uint64_t)1 << 30; minbase < (uint64_t)4 << 30; minbase += (uint64_t)1 << 30, maxlimit += (uint64_t)1 << 30) { ram_set_affinity(minbase, maxlimit); err = frame_alloc_identify(cpu_memory_cap, *cpu_memory, cpu_memory, id); if (err_is_fail(err)) { continue; } else { goto done; } } USER_PANIC("No memory in the first 4GB, cannot continue booting cores"); done: ram_set_affinity(old_minbase, old_maxlimit); #endif return SYS_ERR_OK; }
void benchmark_argument(char *arg) { if (!strcmp(arg, "use_udp=1")) { use_udp = true; } else if (!strcmp(arg, "elb_server=1")) { is_server = true; app_type = "server"; } else if (!strncmp(arg, "runs=", strlen("runs="))) { total_runs = atol(arg + strlen("runs=")); } else if (!strncmp(arg, "dry_runs=", strlen("dry_runs="))) { dry_runs = atol(arg + strlen("dry_runs=")); } else if (!strncmp(arg, "payload_size=", strlen("payload_size="))) { payload_size = atol(arg + strlen("payload_size=")); } else if (!strncmp(arg, "elp_outprefix=", strlen("elp_outprefix="))) { out_prefix = arg + strlen("elp_outprefix="); } else if (!strncmp(arg, "elb_nocache=", strlen("elb_nocache="))) { use_nocache = !!atol(arg + strlen("elb_nocache=")); } else if (!strncmp(arg, "read_incoming=", strlen("read_incoming="))) { read_incoming = !!atol(arg + strlen("read_incoming=")); } else if (!strncmp(arg, "dump_each=", strlen("dump_each="))) { dump_each_run = !!atol(arg + strlen("dump_each=")); } else if (!strncmp(arg, "affinitymin=", strlen("affinitymin="))) { minbase = atol(arg + strlen("affinitymin=")); } else if(!strncmp(arg, "affinitymax=", strlen("affinitymax="))) { maxbase = atol(arg + strlen("affinitymax=")); } else if(!strncmp(arg, "cardname=", strlen("cardname="))) { cardname = arg + strlen("cardname="); } else if(!strncmp(arg, "queue=", strlen("queue="))) { qi = atol(arg + strlen("queue=")); } else if(!strncmp(arg, "server_port=", strlen("server_port="))) { server_port = atol(arg + strlen("server_port=")); } else if(!strncmp(arg, "server_ip=", strlen("server_ip="))) { server_ip_addr = arg + strlen("server_ip="); } else { printf("Invalid command line argument [%s]\n", arg); abort(); } if (!affinity_set && minbase != -1ULL && maxbase != -1ULL) { ram_set_affinity(minbase, maxbase); affinity_set = true; } }
void ethersrv_argument(const char* arg) { static uint64_t minbase = -1ULL; static uint64_t maxbase = -1ULL; static bool affinity_set = false; if (!strncmp(arg, "affinitymin=", strlen("affinitymin="))) { minbase = atol(arg + strlen("affinitymin=")); } else if(!strncmp(arg, "affinitymax=", strlen("affinitymax="))) { maxbase = atol(arg + strlen("affinitymax=")); } else if (!strncmp(arg, "disable_sf=", strlen("disable_sf="))) { force_disable_sf = !!atol(arg + strlen("disable_sf=")); } if (!affinity_set && minbase != -1ULL && maxbase != -1ULL) { ram_set_affinity(minbase, maxbase); affinity_set = true; } }
/** * \brief A monitor receives request to setup a connection * with another newly booted monitor from a third monitor */ static void bind_monitor_request_scc(struct intermon_binding *b, coreid_t core_id, intermon_caprep_t caprep, chanid_t chan_id, coreid_t from_core_id) { struct intermon_ump_ipi_binding *umpb = NULL; errval_t err; /* Create the cap */ struct capability cap_raw; caprep_to_capability(&caprep, &cap_raw); if (cap_raw.type != ObjType_Frame) { err = MON_ERR_WRONG_CAP_TYPE; goto error; } struct capref frame; err = slot_alloc(&frame); if (err_is_fail(err)) { goto error; } ram_set_affinity(cap_raw.u.frame.base, cap_raw.u.frame.base + ((genpaddr_t)1 << cap_raw.u.frame.bits)); err = frame_alloc(&frame, ((genpaddr_t)1 << cap_raw.u.frame.bits), NULL); ram_set_affinity(0,0); /* err = monitor_cap_create(frame, &cap_raw, core_id); */ if (err_is_fail(err)) { goto error; } struct frame_identity frameid = { .base = 0, .bits = 0 }; err = invoke_frame_identify(frame, &frameid); assert(err == SYS_ERR_OK); printf("bind_monitor_request: URPC physical frame at 0x%llx\n", frameid.base); /* Setup the connection */ void *buf; err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { err = err_push(err, LIB_ERR_VSPACE_MAP); goto error; } // Create remote notify cap struct capref notify_cap; err = notification_create_cap(chan_id, core_id, ¬ify_cap); assert(err == SYS_ERR_OK); // Allocate my own notification caps struct capref ep, my_notify_cap; struct lmp_endpoint *iep; int chanid; err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep); assert(err_is_ok(err)); err = notification_allocate(ep, &chanid); assert(err == SYS_ERR_OK); err = notification_create_cap(chanid, my_core_id, &my_notify_cap); assert(err == SYS_ERR_OK); // setup our side of the binding umpb = malloc(sizeof(struct intermon_ump_ipi_binding)); assert(umpb != NULL); err = intermon_ump_ipi_init(umpb, get_default_waitset(), buf + MON_URPC_CHANNEL_LEN, MON_URPC_CHANNEL_LEN, buf, MON_URPC_CHANNEL_LEN, notify_cap, my_notify_cap, ep, iep); assert(err_is_ok(err)); // Identify UMP frame for tracing struct frame_identity umpid; err = invoke_frame_identify(frame, &umpid); assert(err_is_ok(err)); umpb->ump_state.chan.recvid = (uintptr_t)umpid.base; umpb->ump_state.chan.sendid = (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN); // connect it to our request handlers err = intermon_init(&umpb->b, core_id); assert(err_is_ok(err)); /* Send reply */ reply: assert(umpb != NULL); bind_monitor_reply_scc_cont(&umpb->b, err, chanid); return; error: assert(!"Argh"); // FIXME: cleanup! goto reply; } /** * \brief The monitor that proxied the request for one monitor to * setup a connection with another monitor gets the reply */ static void bind_monitor_reply_scc(struct intermon_binding *binding, errval_t err, chanid_t chan_id, coreid_t core_id) { struct intermon_ump_ipi_binding *b = (struct intermon_ump_ipi_binding *)binding; // Create notify cap to that core struct capref notify_cap; err = notification_create_cap(chan_id, core_id, ¬ify_cap); assert(err == SYS_ERR_OK); // And assign it to the binding err = ipi_notify_set(&b->ipi_notify, notify_cap); assert(err_is_ok(err)); if (err_is_fail(err)) { // XXX DEBUG_ERR(err, "Got error in bind monitor reply"); } } /******* stack-ripped bind_monitor_proxy_scc *******/ static void bind_monitor_request_scc_handler(struct intermon_binding *b, struct intermon_msg_queue_elem *e); struct bind_monitor_request_scc_state { struct intermon_msg_queue_elem elem; struct intermon_bind_monitor_request_scc__args args; }; static void bind_monitor_request_scc_cont(struct intermon_binding *dst_binding, coreid_t src_core_id, intermon_caprep_t caprep, chanid_t chan_id, coreid_t core_id) { errval_t err; err = dst_binding->tx_vtbl. bind_monitor_request_scc(dst_binding, NOP_CONT, src_core_id, caprep, chan_id, core_id); if (err_is_fail(err)) { if(err_no(err) == FLOUNDER_ERR_TX_BUSY) { struct bind_monitor_request_scc_state *me = malloc(sizeof(struct bind_monitor_request_scc_state)); assert(me != NULL); struct intermon_state *ist = dst_binding->st; assert(ist != NULL); me->args.core_id = src_core_id; me->args.cap = caprep; me->args.chan_id = chan_id; me->args.from_core_id = core_id; me->elem.cont = bind_monitor_request_scc_handler; err = intermon_enqueue_send(dst_binding, &ist->queue, get_default_waitset(), &me->elem.queue); assert(err_is_ok(err)); return; } DEBUG_ERR(err, "forwarding bind request failed"); } }
/** * \brief Notification of a newly booted monitor. * Setup our connection and request the sender to proxy * the bind request to the monitor */ static void new_monitor_notify(struct intermon_binding *b, coreid_t core_id) { errval_t err; /* Setup the connection */ ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * my_core_id), SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (my_core_id + 1))); struct capref frame; err = frame_alloc(&frame, MON_URPC_SIZE, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "frame_alloc failed"); return; // FIXME: cleanup } ram_set_affinity(0, 0); // Reset affinity void *buf; err = vspace_map_one_frame_attr(&buf, MON_URPC_SIZE, frame, VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "vspace_map_one_frame failed"); assert(buf); // XXX } // XXX: Clear the frame (kernel can't do it for us) memset(buf, 0, MON_URPC_SIZE); // Allocate my own notification caps struct capref ep, my_notify_cap; struct lmp_endpoint *iep; int chanid; err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep); assert(err_is_ok(err)); err = notification_allocate(ep, &chanid); assert(err == SYS_ERR_OK); err = notification_create_cap(chanid, my_core_id, &my_notify_cap); assert(err == SYS_ERR_OK); // init our end of the binding and channel struct intermon_ump_ipi_binding *ump_binding = malloc(sizeof(struct intermon_ump_ipi_binding)); assert(ump_binding != NULL); err = intermon_ump_ipi_init(ump_binding, get_default_waitset(), buf, MON_URPC_CHANNEL_LEN, buf + MON_URPC_CHANNEL_LEN, MON_URPC_CHANNEL_LEN, NULL_CAP, my_notify_cap, ep, iep); assert(err_is_ok(err)); /* if (err_is_fail(err)) { */ /* cap_destroy(frame); */ /* return err_push(err, LIB_ERR_UMP_CHAN_BIND); */ /* } */ // Identify UMP frame for tracing struct frame_identity umpid = { .base = 0, .bits = 0 }; err = invoke_frame_identify(frame, &umpid); assert(err_is_ok(err)); ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base; ump_binding->ump_state.chan.sendid = (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN); err = intermon_init(&ump_binding->b, core_id); assert(err_is_ok(err)); /* Identify the frame cap */ struct capability frame_cap; err = monitor_cap_identify(frame, &frame_cap); if (err_is_fail(err)) { DEBUG_ERR(err, "monitor_cap_identify failed"); return; // FIXME: cleanup } intermon_caprep_t caprep; capability_to_caprep(&frame_cap, &caprep); /* reply to the sending monitor to proxy request */ err = b->tx_vtbl.bind_monitor_proxy_scc(b, NOP_CONT, core_id, caprep, chanid, my_core_id); if (err_is_fail(err)) { DEBUG_ERR(err, "bind proxy request failed"); } } errval_t arch_intermon_init(struct intermon_binding *b) { b->rx_vtbl.bind_monitor_request_scc = bind_monitor_request_scc; b->rx_vtbl.bind_monitor_reply_scc = bind_monitor_reply_scc; b->rx_vtbl.bind_monitor_proxy_scc = bind_monitor_proxy_scc; b->rx_vtbl.new_monitor_notify = new_monitor_notify; return SYS_ERR_OK; }
/** * \brief Initialise a new UMP channel and initiate a binding * * \param uc Storage for channel state * \param cont Continuation for bind completion/failure * \param qnode Storage for an event queue node (used for queuing bind request) * \param iref IREF to which to bind * \param monitor_binding Monitor binding to use * \param inchanlen Size of incoming channel, in bytes (rounded to #UMP_MSG_BYTES) * \param outchanlen Size of outgoing channel, in bytes (rounded to #UMP_MSG_BYTES) * \param notify_cap Capability to use for notifications, or #NULL_CAP */ errval_t ump_chan_bind(struct ump_chan *uc, struct ump_bind_continuation cont, struct event_queue_node *qnode, iref_t iref, struct monitor_binding *monitor_binding, size_t inchanlen, size_t outchanlen, struct capref notify_cap) { errval_t err; // round up channel sizes to message size inchanlen = ROUND_UP(inchanlen, UMP_MSG_BYTES); outchanlen = ROUND_UP(outchanlen, UMP_MSG_BYTES); // compute size of frame needed and allocate it size_t framesize = inchanlen + outchanlen; #ifdef __scc__ ram_set_affinity(SHARED_MEM_MIN + (PERCORE_MEM_SIZE * disp_get_core_id()), SHARED_MEM_MIN + (PERCORE_MEM_SIZE * (disp_get_core_id() + 1))); #endif err = frame_alloc(&uc->frame, framesize, &framesize); if (err_is_fail(err)) { return err_push(err, LIB_ERR_FRAME_ALLOC); } #ifdef __scc__ ram_set_affinity(0, 0); #endif // map it in void *buf; err = vspace_map_one_frame_attr(&buf, framesize, uc->frame, UMP_MAP_ATTR, NULL, &uc->vregion); if (err_is_fail(err)) { cap_destroy(uc->frame); return err_push(err, LIB_ERR_VSPACE_MAP); } // initialise channel state err = ump_chan_init(uc, buf, inchanlen, (char *)buf + inchanlen, outchanlen); if (err_is_fail(err)) { vregion_destroy(uc->vregion); cap_destroy(uc->frame); return err; } // Ids for tracing struct frame_identity id; err = invoke_frame_identify(uc->frame, &id); if (err_is_fail(err)) { vregion_destroy(uc->vregion); cap_destroy(uc->frame); return err_push(err, LIB_ERR_FRAME_IDENTIFY); } uc->recvid = (uintptr_t)id.base; uc->sendid = (uintptr_t)(id.base + inchanlen); // store bind args uc->bind_continuation = cont; uc->monitor_binding = monitor_binding; uc->iref = iref; uc->inchanlen = inchanlen; uc->outchanlen = outchanlen; uc->notify_cap = notify_cap; // wait for the ability to use the monitor binding uc->connstate = UMP_BIND_WAIT; event_mutex_enqueue_lock(&monitor_binding->mutex, qnode, MKCLOSURE(send_bind_cont, uc)); return SYS_ERR_OK; }