//Update instructions rofl_result_t __of1x_update_instructions(of1x_instruction_group_t* group, of1x_instruction_group_t* new_group){ //Apply Actions if(__of1x_update_apply_actions(&group->instructions[OF1X_IT_APPLY_ACTIONS].apply_actions, new_group->instructions[OF1X_IT_APPLY_ACTIONS].apply_actions)!=ROFL_SUCCESS) return ROFL_FAILURE; //Make sure apply actions inst is marked as NULL, so that is not released platform_memset(&new_group->instructions[OF1X_IT_APPLY_ACTIONS],0,sizeof(of1x_instruction_t)); //Write actions if(__of1x_update_write_actions(&group->instructions[OF1X_IT_WRITE_ACTIONS].write_actions, new_group->instructions[OF1X_IT_WRITE_ACTIONS].write_actions) != ROFL_SUCCESS) return ROFL_FAILURE; //Make sure write actions inst is marked as NULL, so that is not freed platform_memset(&new_group->instructions[OF1X_IT_WRITE_ACTIONS],0,sizeof(of1x_instruction_t)); //Static ones //TODO: METADATA && EXPERIMENTER //Static stuff group->instructions[OF1X_IT_CLEAR_ACTIONS] = new_group->instructions[OF1X_IT_CLEAR_ACTIONS]; group->instructions[OF1X_IT_GOTO_TABLE] = new_group->instructions[OF1X_IT_GOTO_TABLE]; //Static stuff group->num_of_instructions = new_group->num_of_instructions; return ROFL_SUCCESS; }
static int64_t ioctl_unload_vmm(void) { int64_t i; int64_t ret; int64_t status = BF_IOCTL_SUCCESS; ret = common_unload_vmm(); if (ret != BF_SUCCESS) { ALERT("IOCTL_UNLOAD_VMM: common_unload_vmm failed: %p - %s\n", (void *)ret, ec_to_str(ret)); status = BF_IOCTL_FAILURE; } for (i = 0; i < g_num_pmodules; i++) platform_free_rwe(pmodules[i].data, pmodules[i].size); g_num_pmodules = 0; platform_memset(&pmodules, 0, sizeof(pmodules)); if (status == BF_IOCTL_SUCCESS) DEBUG("IOCTL_UNLOAD_VMM: succeeded\n"); return status; }
//LSIs dpid_list_t* physical_switch_get_all_lsi_dpids(void){ int i,j; dpid_list_t* list; list = platform_malloc_shared(sizeof(dpid_list_t)); //Prevent management actions to screw the walk through the LSIs platform_mutex_lock(psw->mutex); //Set the number of elements list->num_of_lsis = psw->num_of_logical_switches; //Allocate the list space list->dpids = platform_malloc_shared(sizeof(uint64_t)*list->num_of_lsis); if(!list->dpids){ platform_mutex_unlock(psw->mutex); return NULL; } //Fill it with 0s platform_memset(list->dpids,0,sizeof(uint64_t)*list->num_of_lsis); for(i=0,j=0;i<PHYSICAL_SWITCH_MAX_LS;i++){ if(psw->logical_switches[i]){ list->dpids[j] = psw->logical_switches[i]->dpid; j++; } } platform_mutex_unlock(psw->mutex); return list; }
//Init rofl_result_t physical_switch_init(){ ROFL_PIPELINE_DEBUG("Initializing physical switch\n"); //Allocate memory for the physical switch structure psw = platform_malloc_shared(sizeof(physical_switch_t)); if( unlikely(psw==NULL) ) return ROFL_FAILURE; psw->mutex = platform_mutex_init(NULL); if(!psw->mutex) return ROFL_FAILURE; platform_memset(psw->logical_switches, 0, sizeof(psw->logical_switches)); psw->num_of_logical_switches = 0; platform_memset(psw->physical_ports, 0, sizeof(psw->physical_ports)); platform_memset(psw->tunnel_ports, 0, sizeof(psw->tunnel_ports)); platform_memset(psw->virtual_ports, 0, sizeof(psw->virtual_ports)); platform_memset(psw->meta_ports, 0, sizeof(psw->meta_ports)); //Generate metaports //Flood psw->meta_ports[META_PORT_FLOOD_INDEX].type = PORT_TYPE_META_FLOOD; strncpy(psw->meta_ports[META_PORT_FLOOD_INDEX].name, "Flood meta port", SWITCH_PORT_MAX_LEN_NAME); //In port psw->meta_ports[META_PORT_IN_PORT_INDEX].type = PORT_TYPE_META_IN_PORT; strncpy(psw->meta_ports[META_PORT_IN_PORT_INDEX].name, "In port meta port", SWITCH_PORT_MAX_LEN_NAME); //All psw->meta_ports[META_PORT_ALL_INDEX].type = PORT_TYPE_META_ALL; strncpy(psw->meta_ports[META_PORT_ALL_INDEX].name, "All meta port", SWITCH_PORT_MAX_LEN_NAME); //Set extern pointer flood_meta_port = &psw->meta_ports[META_PORT_FLOOD_INDEX]; in_port_meta_port = &psw->meta_ports[META_PORT_IN_PORT_INDEX]; all_meta_port = &psw->meta_ports[META_PORT_ALL_INDEX]; //Initialize monitoring data if(__monitoring_init(&psw->monitoring) != ROFL_SUCCESS) return ROFL_FAILURE; //Generate matching algorithm lists __physical_switch_generate_matching_algorithm_list(); return ROFL_SUCCESS; }
RANDO_SECTION void *API::mmap(void *addr, size_t size, PagePermissions perms, bool commit) { RANDO_ASSERT(perms == PagePermissions::RW); void *res = platform_alloc_rw(size); RANDO_ASSERT(addr == nullptr || addr == res); if (res != nullptr) platform_memset(res, 0, size); return res; }
/* * * Statistics * */ rofl_result_t of1x_get_flow_stats_loop(struct of1x_flow_table *const table, uint64_t cookie, uint64_t cookie_mask, uint32_t out_port, uint32_t out_group, of1x_match_group_t *const matches, of1x_stats_flow_msg_t* msg){ of1x_flow_entry_t* entry, flow_stats_entry; of1x_stats_single_flow_msg_t* flow_stats; bool check_cookie = (table->pipeline->sw->of_ver != OF_VERSION_10); if( unlikely(msg==NULL) || unlikely(table==NULL) ) return ROFL_FAILURE; //Create a flow_stats_entry platform_memset(&flow_stats_entry,0,sizeof(of1x_flow_entry_t)); flow_stats_entry.matches = *matches; flow_stats_entry.cookie = cookie; flow_stats_entry.cookie_mask = cookie_mask; check_cookie = ( table->pipeline->sw->of_ver != OF_VERSION_10 ); //Ignore cookie in OF1.0 //Mark table as being read platform_rwlock_rdlock(table->rwlock); //Loop over the table and calculate stats for(entry = table->entries; entry!=NULL; entry = entry->next){ //Check if is contained if(__of1x_flow_entry_check_contained(&flow_stats_entry, entry, false, check_cookie, out_port, out_group, true)){ // update statistics from platform platform_of1x_update_stats_hook(entry); //Create a new single flow entry and fillin flow_stats = __of1x_init_stats_single_flow_msg(entry); if(!flow_stats) return ROFL_FAILURE; //Push this stat to the msg __of1x_push_single_flow_stats_to_msg(msg, flow_stats); } } //Release the table platform_rwlock_rdunlock(table->rwlock); return ROFL_SUCCESS; }
RANDO_SECTION void *API::mem_alloc(size_t size, bool zeroed) { // Since kernels allocate page-aligned blocks anyway, // we can ask for page-sized allocations size = (size + sizeof(size) + kPageSize - 1) & ~(kPageSize - 1); auto res = reinterpret_cast<size_t*>(platform_alloc_rw(size)); if (res == nullptr) return nullptr; if (zeroed) platform_memset(res, 0, size); *res = size; return reinterpret_cast<void*>(res + 1); }
/* Instruction groups init and destroy */ void __of1x_init_match_group(of1x_match_group_t* group){ platform_memset(group,0,sizeof(of1x_match_group_t)); //Set min max group->ver_req.min_ver = OF1X_MIN_VERSION; group->ver_req.max_ver = OF1X_MAX_VERSION; //OF1.0 full wildcard bitmap128_set(&group->of10_wildcard_bm, OF1X_MATCH_ETH_DST); bitmap128_set(&group->of10_wildcard_bm, OF1X_MATCH_ETH_SRC); bitmap128_set(&group->of10_wildcard_bm, OF1X_MATCH_NW_SRC); bitmap128_set(&group->of10_wildcard_bm, OF1X_MATCH_NW_DST); }
static int64_t ioctl_add_module(char *file, int64_t len) { char *buf; int64_t ret; if (g_num_pmodules >= MAX_NUM_MODULES) { ALERT("IOCTL_ADD_MODULE: too many modules have been loaded\n"); return BF_IOCTL_FAILURE; } buf = platform_alloc_rwe(len); if (buf == NULL) { ALERT("IOCTL_ADD_MODULE: failed to allocate memory for the module\n"); return BF_IOCTL_FAILURE; } platform_memset(buf, 0, len); platform_memcpy(buf, file, len); ret = common_add_module(buf, len); if (ret != BF_SUCCESS) { ALERT("IOCTL_ADD_MODULE: common_add_module failed: %p - %s\n", (void *)ret, ec_to_str(ret)); goto failed; } pmodules[g_num_pmodules].data = buf; pmodules[g_num_pmodules].size = len; g_num_pmodules++; DEBUG("IOCTL_ADD_MODULE: succeeded\n"); return BF_IOCTL_SUCCESS; failed: platform_free_rwe(buf, len); DEBUG("IOCTL_ADD_MODULE: failed\n"); return BF_IOCTL_FAILURE; }
rofl_result_t of1x_get_flow_aggregate_stats_loop(struct of1x_flow_table *const table, uint64_t cookie, uint64_t cookie_mask, uint32_t out_port, uint32_t out_group, of1x_match_group_t *const matches, of1x_stats_flow_aggregate_msg_t* msg){ bool check_cookie; of1x_flow_entry_t* entry, flow_stats_entry; if( unlikely(msg==NULL) || unlikely(table==NULL) ) return ROFL_FAILURE; //Flow stats entry for easy comparison platform_memset(&flow_stats_entry,0,sizeof(of1x_flow_entry_t)); flow_stats_entry.matches = *matches; flow_stats_entry.cookie = cookie; flow_stats_entry.cookie_mask = cookie_mask; check_cookie = ( table->pipeline->sw->of_ver != OF_VERSION_10 ); //Ignore cookie in OF1.0 //Mark table as being read platform_rwlock_rdlock(table->rwlock); //Loop over the table and calculate stats for(entry = table->entries; entry!=NULL; entry = entry->next){ //Check if is contained if(__of1x_flow_entry_check_contained(&flow_stats_entry, entry, false, check_cookie, out_port, out_group,true)){ //Increment stats msg->packet_count += entry->stats.packet_count; msg->byte_count += entry->stats.byte_count; msg->flow_count++; } } //Release the table platform_rwlock_rdunlock(table->rwlock); return ROFL_SUCCESS; }
RANDO_SECTION void *API::mem_realloc(void *old_ptr, size_t new_size, bool zeroed) { if (old_ptr == nullptr) return mem_alloc(new_size, zeroed); auto *old_size_ptr = reinterpret_cast<size_t*>(old_ptr); old_size_ptr--; auto old_size = *old_size_ptr; new_size = (new_size + sizeof(new_size) + kPageSize - 1) & ~(kPageSize - 1); if (new_size == old_size) return old_ptr; void *res = old_size_ptr; if (new_size < old_size) { // We're shrinking the region auto new_end = reinterpret_cast<BytePointer>(old_size_ptr) + new_size; RANDO_ASSERT((reinterpret_cast<uintptr_t>(new_end) & (kPageSize - 1)) == 0); platform_free_rw(new_end, old_size - new_size); if (new_size == 0) return nullptr; // Fall-through with res == old_size_ptr } else { // new_size > old_size // We're growing the region res = platform_alloc_rw(new_size); if (res == nullptr) { // Release the old memory, then return an error platform_free_rw(old_size_ptr, old_size); return nullptr; } // Copy over the old data, then release the old region platform_memcpy(res, old_size_ptr, old_size); if (zeroed) { auto old_end = reinterpret_cast<BytePointer>(old_size_ptr) + old_size; platform_memset(old_end, 0, new_size - old_size); } platform_free_rw(old_size_ptr, old_size); } auto new_size_ptr = reinterpret_cast<size_t*>(res); *new_size_ptr = new_size; return reinterpret_cast<void*>(new_size_ptr + 1); }
/* Instruction groups init and destroy */ void __of1x_init_instruction_group(of1x_instruction_group_t* group){ platform_memset(group,0,sizeof(of1x_instruction_group_t)); }