static errval_t vsic_read(struct storage_vsic *vsic, struct storage_vsa *vsa, off_t offset, size_t size, void *buffer) { assert(vsic != NULL); assert(vsa != NULL); assert(buffer != NULL); struct ahci_vsic *mydata = vsic->data; uint8_t *buf = NULL; size_t bytes_read, toread = STORAGE_VSIC_ROUND(vsic, size); errval_t err = mydata->ata_rw28_rpc.vtbl. read_dma(&mydata->ata_rw28_rpc, toread, offset, &buf, &bytes_read); if (err_is_fail(err)) USER_PANIC_ERR(err, "read_dma rpc"); if (!buf) USER_PANIC("read_dma -> !buf"); if (bytes_read != toread) USER_PANIC("read_dma -> read_size != size"); // XXX: Copy from DMA buffer to user buffer memcpy(buffer, buf, size); free(buf); return SYS_ERR_OK; }
// initialize tcp connection for the client int tcp_client_bm_init(char *ip_addr_str, uint16_t server_port) { err_t r; // Preparing IP address for use assert(ip_addr_str != NULL); struct in_addr addr; if (inet_aton(ip_addr_str, &addr) == 0) { printf("Invalid IP addr: %s\n", ip_addr_str); USER_PANIC("Invalid IP address %s", ip_addr_str); return -1; } struct ip_addr ip; ip.addr = addr.s_addr; // Prepare tcp_pcb client_pcb = tcp_new(); if (client_pcb == NULL) { USER_PANIC("tcp_new failed"); return -1; } //don't use htons() on port no. (don't know why...) r = tcp_connect(client_pcb, &ip, server_port, tcp_is_connected_client); if(r != ERR_OK) { USER_PANIC("tcp_connect failed"); return(r); } // Connection established! printf("TCP benchmark client started\n"); return (0); } // end function: tcp_client_bm_init
void qd_main(void) { // Validate some settings if (qi == -1) { USER_PANIC("For queue driver the queue= parameter has to be specified " "on the command line!"); } if (use_interrupts && standalone && !use_msix) { USER_PANIC("Interrupts with standalone queue driver only work if MSI-X " "is enabled."); } #ifndef LIBRARY if (standalone) { connect_to_mngif(); } else { #endif idc_request_device_info(); #ifndef LIBRARY } if (use_interrupts) { eventloop_ints(); } else { eventloop(); } #endif }
int main(int argc, char *argv[]) { errval_t err; vfs_init(); err = vfs_mkdir("/filetests"); if (err_is_fail(err)) { USER_PANIC_ERR(err, "vfs_mkdir failed"); } /* Create a file with a lot of data */ FILE *fh = fopen("/filetests/fread_test.dat", "w"); if (!fh) { USER_PANIC("fopen failed"); } for (int i = 0; i < AMOUNT; i++) { fprintf(fh, "h"); } fclose(fh); /* Read out the data in chunks */ fh = fopen("/filetests/fread_test.dat", "r"); if (!fh) { USER_PANIC("fopen failed"); } char *ptr = malloc(AMOUNT); assert(ptr); size_t size = fread(ptr, 10, 1, fh); if (size != 10) { USER_PANIC("fread did not read full amount"); } size = fread(ptr, AMOUNT - 10, 1, fh); if (size != AMOUNT - 10) { USER_PANIC("fread did not read full amount"); } size = fread(ptr, AMOUNT, 1, fh); if (size != 0) { USER_PANIC("fread did not read full amount"); } printf("client done\n"); return 0; }
static void export_cb(void *st, errval_t err, iref_t iref) { size_t size = 0; char *service_name = NULL; char *driver_name = (char *) st; if (err_is_fail(err)) { USER_PANIC_ERR(err, "Exporting basic interface failed.\n"); } // build service name as driver_name.SERVICE_SUFFIX size = snprintf(NULL, 0, "%s.%s", driver_name, SERVICE_SUFFIX); service_name = (char *) malloc(size + 1); if (service_name == NULL) { USER_PANIC("Error allocating memory."); } snprintf(service_name, size + 1, "%s.%s", driver_name, SERVICE_SUFFIX); SERIAL_DEBUG("About to register basic interface '%s' at nameservice.\n", service_name); // register basic serial driver service at nameservice err = nameservice_register(service_name, iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "Registering basic interface at " "nameserver failed."); } free(service_name); }
// function to handle incoming mac address requests static void get_mac_addr_qm(struct net_queue_manager_binding *cc, uint64_t queueid) { struct q_entry entry; memset(&entry, 0, sizeof(struct q_entry)); entry.handler = wrapper_send_mac_addr_response; entry.binding_ptr = (void *) cc; struct client_closure *ccl = (struct client_closure *) cc->st; assert(ccl->queueid == queueid); entry.plist[0] = queueid; entry.plist[1] = get_mac_addr_from_device(); // queueid, hwaddr struct waitset *ws = get_default_waitset(); int passed_events = 0; while (can_enqueue_cont_q(ccl->q) == false) { // USER_PANIC("queue full, can go further\n"); if (passed_events > 5) { USER_PANIC("queue full, can go further\n"); // return CONT_ERR_NO_MORE_SLOTS; } event_dispatch_debug(ws); ++passed_events; } enqueue_cont_q(ccl->q, &entry); }
// find out the proper filter manager based on requested type static struct filters_tx_vtbl *lookup_filt_mng(uint8_t filt_mng_type) { switch(filt_mng_type) { case 0: // software filter manager return get_soft_filt_mng_sign(); break; case 1: // e10K hardware filter manager return get_e10k_filt_mng_sign(); break; /* case 2: // Solarflare filter manager return NULL; break; */ default: // Unknown filter manager USER_PANIC("Filter Manager type %"PRIu8" not supported\n", filt_mng_type); abort(); return NULL; break; } // end switch : for filter type return NULL; } // end function: lookup_filt_mng
/** * \brief initializes and allocates a nested OpenMP lock * * \param arg returned pointer to the lock * * The effect of these routines is to initialize the lock to the unlocked state; * that is, no task owns the lock. In addition, the nesting count for a nestable * lock is set to zero. */ void omp_init_nest_lock(omp_nest_lock_t *arg) { struct __omp_nested_lock *nlock = arg; #ifdef BARRELFISH switch (g_bomp_state->backend_type) { case BOMP_BACKEND_BOMP: thread_mutex_init(&nlock->mutex); break; case BOMP_BACKEND_XOMP: assert("NYI"); break; default: USER_PANIC("Invalid Backend Type"); break; } #else nlock = calloc(1, sizeof(struct __omp_nested_lock)); if (lock == NULL) { printf("failed to allocate lock\n"); abort(); } pthread_mutex_init(&nlock->mutex, NULL); #endif nlock->owner = NULL; nlock->count = 0; nlock->initialized = 1; }
size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream) { if(size == 0 || nmemb == 0) { return 0; } lock_stream(stream); if (stream->unget_pos) { USER_PANIC("handling unget not implemented"); } size_t actual_size = size * nmemb; // Actual amount requested to be read unsigned char *p = ptr; size_t ret = stream->read_fn(p, stream->current_pos, actual_size, stream->handle); stream->current_pos += ret; if (ret != actual_size) { stream->eof = 1; } unlock_stream(stream); return ret / size; }
static void report_register_buffer_result(struct net_queue_manager_binding *cc, errval_t err, uint64_t queueid, uint64_t buffer_id) { struct q_entry entry; memset(&entry, 0, sizeof(struct q_entry)); entry.handler = send_new_buffer_id; entry.binding_ptr = (void *) cc; struct client_closure *ccl = (struct client_closure *) cc->st; entry.plist[0] = err; entry.plist[1] = queueid; entry.plist[2] = buffer_id; // error, queue_id, buffer_id struct waitset *ws = get_default_waitset(); int passed_events = 0; while (can_enqueue_cont_q(ccl->q) == false) { // USER_PANIC("queue full, can go further\n"); if (passed_events > 5) { USER_PANIC("queue full, can go further\n"); //return CONT_ERR_NO_MORE_SLOTS; } event_dispatch_debug(ws); ++passed_events; } enqueue_cont_q(ccl->q, &entry); }
errval_t rcap_db_remote_recursive_lock_req(struct capability *cap, coreid_t from_core, recordid_t ccast_recordid) { USER_PANIC("NYI"); return LIB_ERR_NOT_IMPLEMENTED; }
errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id) { assert(thread == thread_self()); dispatcher_handle_t mydisp = disp_disable(); struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp); struct dispatcher_shared_generic *disp = get_dispatcher_shared_generic(mydisp); struct thread *next = thread->next; thread_remove_from_queue(&disp_gen->runq, thread); errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp); if(err_is_fail(err)) { thread_enqueue(thread, &disp_gen->runq); disp_enable(mydisp); return err; } // run the next thread, if any if (next != thread) { disp_gen->current = next; disp_resume(mydisp, &next->regs); } else { disp_gen->current = NULL; disp->haswork = havework_disabled(mydisp); disp_yield_disabled(mydisp); } USER_PANIC("should never be reached"); }
// fill up the lo_map_tbl with valid entries // Assumptions: // * client_no starts at 0 // * First connection is RX (ie 0) and second is TX (ie 1) static void populate_lo_mapping_table(int cur_cl_no, struct buffer_descriptor *buffer_ptr) { // sanity checks printf("populate called for client %d\n", cur_cl_no); assert(is_loopback_device); // ensuring loopback device assert(cur_cl_no == lo_tbl_idx); // ensure monotonic increase assert(lo_tbl_idx < 4); // we currently support only 2 applications for lo // and I should validate the buffer types assert(RX_BUFFER_ID == 0); // ensure RX is 0 if((lo_tbl_idx % 4) != (buffer_ptr->role)) { printf(" tlb_idx %d, role %"PRIu8"\n", lo_tbl_idx, buffer_ptr->role); } assert((lo_tbl_idx % 4) == (buffer_ptr->role)); // populate the table entries lo_map_tbl[lo_tbl_idx].tx_cl_no = -1; lo_map_tbl[lo_tbl_idx].rx_cl_no = -1; lo_map_tbl[lo_tbl_idx].lo_rx_buf = buffer_ptr; switch(lo_tbl_idx) { case 0: case 2: // intermediate state, nothing to do! break; case 1: printf("populate case 1 crossing 0 %d\n", cur_cl_no); // Assuming only one app, so mapping tx to rx lo_map_tbl[lo_tbl_idx].tx_cl_no = cur_cl_no; lo_map_tbl[lo_tbl_idx].rx_cl_no = 0; lo_map_tbl[lo_tbl_idx].lo_rx_buf = lo_map_tbl[0].lo_rx_buf; break; case 3: // Assuming are two apps, so mapping them // mapping 3 to 0 lo_map_tbl[lo_tbl_idx].tx_cl_no = cur_cl_no; lo_map_tbl[lo_tbl_idx].rx_cl_no = 0; lo_map_tbl[lo_tbl_idx].lo_rx_buf = lo_map_tbl[0].lo_rx_buf; // mapping 1 to 2 lo_map_tbl[1].tx_cl_no = 1; lo_map_tbl[1].rx_cl_no = 2; lo_map_tbl[1].lo_rx_buf = lo_map_tbl[2].lo_rx_buf; break; default: USER_PANIC("More than two clients are not supported for lo"); abort(); break; } // end switch: ++lo_tbl_idx; } // end function: populate_lo_mapping_table
/** * \brief Query existing page mapping * * \param pmap The pmap object * \param vaddr The virtual address to query * \param retvaddr Returns the base virtual address of the mapping * \param retsize Returns the actual size of the mapping * \param retcap Returns the cap mapped at this address * \param retoffset Returns the offset within the cap that is mapped * \param retflags Returns the flags for this mapping * * All of the ret parameters are optional. */ static errval_t lookup(struct pmap *pmap, genvaddr_t vaddr, genvaddr_t *retvaddr, size_t *retsize, struct capref *retcap, genvaddr_t *retoffset, vregion_flags_t *retflags) { USER_PANIC("NYI"); return 0; }
static errval_t mp_destroy(struct descq_binding* b, errval_t *err) { struct descq* q = (struct descq*) b->st; *err = q->f.destroy(q); USER_PANIC("Destroy NYI \n"); return SYS_ERR_OK; }
static errval_t unpack_cpio(struct dirent *root, void *data, size_t len) { if (!cpio_archive_valid(data, len)) { USER_PANIC("invalid CPIO archive"); } cpio_generic_header_t h; cpio_visit(data, len, cpio_entry_handler, &h, root); return SYS_ERR_OK; }
// get entry from the mapping table static inline struct multihop_chan* multihop_chan_mappings_lookup(multihop_vci_t vci) { assert(is_mapping_table_initialized); struct multihop_chan *chan_state = collections_hash_find(mappings, vci); if (chan_state == NULL) { USER_PANIC("invalid virtual circuit identifier in multi-hop channel"); } return chan_state; }
/* set an initial default environment for our boot-time children */ static void init_environ(void) { int r; /* PATH=/arch/sbin */ char pathstr[64]; snprintf(pathstr, sizeof(pathstr), "/%s/sbin", cpu_type_to_archstr(CURRENT_CPU_TYPE)); pathstr[sizeof(pathstr) - 1] = '\0'; r = setenv("PATH", pathstr, 0); if (r != 0) { USER_PANIC("failed to set PATH"); } /* HOME=/ */ r = setenv("HOME", "/", 0); if (r != 0) { USER_PANIC("failed to set HOME"); } }
// checks if there are elements in the list that should be merged static void check_consistency(struct memory_list* region) { struct memory_ele* ele = region->buffers; while (ele->next != NULL) { if (ele->offset + ele->length == ele->next->offset) { printf("offset=%lu length=%lu \n", ele->offset, ele->length); dump_list(region); USER_PANIC("Found entry that should be merged \n"); } ele = ele->next; } }
/** Tell card driver to stop this queue. */ static void idc_terminate_queue(void) { errval_t r; INITDEBUG("idc_terminate_queue()\n"); if (!standalone) { USER_PANIC("Terminating monolithic driver is not a good idea"); } r = e10k_terminate_queue__tx(binding, NOP_CONT, qi); // TODO: handle busy assert(err_is_ok(r)); }
void cd_register_queue_memory(struct e10k_binding *b, uint8_t queue, struct capref tx, struct capref txhwb, struct capref rx, uint32_t rxbufsz, int16_t msix_intvec, uint8_t msix_intdest, bool use_ints, bool use_rsc_) { USER_PANIC("Should not be called"); }
/** * \brief registers the Xeon Phi driver card with the Xeon Phi Manager * * \param svc_iref iref of the own exported Xeon Phi driver interface * \param id returns the assigned Xeon Phi card ID * \param num returns the size of the cards array * \param irefs returns array of irefs to the other cards * * NOTE: this is a blocking function. The function will only return after * the Xeon Phi manager connection has been fully established and the * registration protocol has been executed. * * \returns SYS_ERR_OK on success * errval on failure */ errval_t xeon_phi_manager_client_register(iref_t svc_iref, uint8_t *id, uint8_t *num, iref_t **irefs) { errval_t err, msgerr; if (strcmp(disp_name(), "xeon_phi") != 0) { USER_PANIC("client register called on non xeon phi driver"); return -1; } if (conn_state >= XPM_STATE_REGISTER_OK) { return SYS_ERR_OK; } DEBUG_XPMC("Registration with Xeon Phi Manager service.\n"); err = xpm_bind(); if (err_is_fail(err)) { return err; } xpm_reg_data.svc_iref = svc_iref; xeon_phi_manager_cards_t cards; err = xpm_rpc_client.vtbl.register_driver(&xpm_rpc_client, svc_iref, id, &cards, &msgerr); if (err_is_fail(err)) { return err; } if (err_is_fail(msgerr)) { return msgerr; } conn_state = XPM_STATE_REGISTER_OK; iref_t *cardiref =calloc(cards.num, sizeof(iref_t)); assert(cardiref); for(uint32_t i = 0; i < cards.num; ++i) { cardiref[i] = ((iref_t *)&cards.card0)[i]; } *irefs = cardiref; *num = cards.num; return SYS_ERR_OK; }
static void span_slave_done_request(struct interdisp_binding *b) { USER_PANIC("shouldn't be called"); struct waitset_chanstate *cs = malloc(sizeof(struct waitset_chanstate)); // Signal the default waitset of this event struct event_closure closure = { .handler = span_slave_done_handler, .arg = cs, }; waitset_chanstate_init(cs, CHANTYPE_EVENT_QUEUE); errval_t err = waitset_chan_trigger_closure(get_default_waitset(), cs, closure); if(err_is_fail(err)) { USER_PANIC_ERR(err, "Triggering default waitset"); } }
/** * \brief destroys a simple OpenMP lock * * \param arg OpenMP lock to destroyed (set to zero) * * The effect of these routines is to change the state of the lock to uninitialized. */ void omp_destroy_lock(omp_lock_t *arg) { struct __omp_lock *lock = (struct __omp_lock *) arg; lock->initialized = 0x0; #ifdef BARRELFISH switch (g_bomp_state->backend_type) { case BOMP_BACKEND_BOMP: break; case BOMP_BACKEND_XOMP: assert("NYI"); break; default: USER_PANIC("Invalid Backend Type"); break; } #else pthread_mutex_destroy(&lock->mutex); #endif }
/** * \brief initializes and allocates a simple OpenMP lock * * \param arg returned pointer to the lock * * The effect of these routines is to initialize the lock to the unlocked state; * that is, no task owns the lock. */ void omp_init_lock(omp_lock_t *lock) { #ifdef BARRELFISH switch (g_bomp_state->backend_type) { case BOMP_BACKEND_BOMP: thread_mutex_init(&lock->mutex); break; case BOMP_BACKEND_XOMP: assert("NYI"); break; default: USER_PANIC("Invalid Backend Type"); break; } #else pthread_mutex_init(&lock->mutex, NULL); #endif lock->initialized = 0x1; }
static void rx_myrpc_call(struct xmplrpc_binding *b, int i) { debug_printf("server: received myrpc_call: %d\n", i); // prepare and send reply struct server_state *st = malloc(sizeof(struct server_state)); if (st == NULL) { USER_PANIC("cannot reply, out of memory"); } st->b = b; st->s = malloc(20); if (st->s != NULL) { snprintf(st->s, 20, "!%d!", i); } send_myrpc_response(st); }
static void monitor_bind_ump_reply(struct monitor_binding *dom_binding, uintptr_t my_mon_id, uintptr_t domain_id, errval_t msgerr, struct capref notify) { errval_t err; struct remote_conn_state *conn = remote_conn_lookup(my_mon_id); if (conn == NULL) { USER_PANIC("invalid mon_id in UMP bind reply"); return; } uintptr_t your_mon_id = conn->mon_id; struct intermon_binding *mon_binding = conn->mon_binding; if (err_is_ok(msgerr)) { /* Connection accepted */ conn->domain_id = domain_id; conn->domain_binding = dom_binding; } else { //error: /* Free the cap */ err = cap_destroy(conn->x.ump.frame); assert(err_is_ok(err)); err = remote_conn_free(my_mon_id); assert(err_is_ok(err)); } // Identify notify cap struct capability capability; err = monitor_cap_identify(notify, &capability); if (err_is_fail(err)) { USER_PANIC_ERR(err, "monitor_cap_identify failed, ignored"); return; } assert(capability.type == ObjType_Notify_RCK || capability.type == ObjType_Notify_IPI || capability.type == ObjType_Null); /* assert(capability.u.notify.coreid == my_core_id); */ bind_ump_reply_cont(mon_binding, your_mon_id, my_mon_id, msgerr, capability); }
// initializes the hardware independent part of device manager errval_t init_device_manager(char *dev_name, uint64_t valid_queues, uint8_t filt_mng_type) { // making sure that parameters passed are sensible assert(dev_name != NULL); assert(valid_queues > 0); NDM_DEBUG("init_device_manager: called for dev[%s] with %"PRIu64" queues\n", dev_name, valid_queues); // making sure that this is the first call to this function assert(qlist == NULL); assert(total_queues == 0); // set the total queues total_queues = valid_queues; // TODO: memory from local NUMA domain qlist = (struct NIC_q_closure *)malloc (sizeof(struct NIC_q_closure) * total_queues ); if (qlist == NULL) { USER_PANIC("init_dev_mng: Not enough memory (malloc failed)\n"); return PORT_ERR_NOT_ENOUGH_MEMORY; } // Based on what device it is, choose proper filter_manager struct filters_tx_vtbl *filt_mng_ptr = lookup_filt_mng(filt_mng_type); // initialize closures for all queues memset(qlist, 0, (sizeof(struct NIC_q_closure) * total_queues)); for (qid_t i = 0; i < total_queues; ++i) { qlist[i].qid = i; qlist[i].filt_mng = filt_mng_ptr; } // for each queue // Also, for shared queue (qid = 0), use soft_filt_mng qlist[0].filt_mng = lookup_filt_mng(0); return init_ports_service(dev_name); // return SYS_ERR_OK; } // end function: init_device_manager
void remove_vnode(struct vnode *root, struct vnode *item) { assert(root->is_vnode); struct vnode *walk = root->u.vnode.children; struct vnode *prev = NULL; while (walk) { if (walk == item) { if (prev) { prev->next = walk->next; return; } else { root->u.vnode.children = walk->next; return; } } prev = walk; walk = walk->next; } USER_PANIC("Should not get here"); }
static err_t idc_redirect(struct ip_addr *local_ip, u16_t local_port, struct ip_addr *remote_ip, u16_t remote_port, net_ports_port_type_t port_type) { if (is_owner) { // redirecting doesn't make sense if we are the owner return ERR_USE; // TODO: correct error } errval_t msgerr; // errval_t err; USER_PANIC("Pause: NYI"); abort(); #if 0 /* getting the proper buffer id's here */ err = net_ports_rpc.vtbl.redirect(&net_ports_rpc, port_type, local_ip->addr, local_port, remote_ip->addr, remote_port, /* buffer for RX */ ((struct client_closure_NC *) driver_connection[RECEIVE_CONNECTION]->st)-> buff_ptr->buffer_id, /* buffer for TX */ ((struct client_closure_NC *) driver_connection[TRANSMIT_CONNECTION]->st)-> buff_ptr->buffer_id, &msgerr); if (err_is_fail(err)) { USER_PANIC_ERR(err, "error sending redirect"); } #endif // 0 if (msgerr == PORT_ERR_IN_USE) { return ERR_USE; } else if (msgerr == PORT_ERR_REDIRECT) { return ERR_USE; // TODO: correct error } // FIXME: other errors? return ERR_OK; }