int find_symbol_by_name(Context * ctx, int frame, ContextAddress ip, char * name, Symbol ** sym) { int found = 0; *sym = alloc_symbol(); (*sym)->ctx = ctx; if (frame == STACK_TOP_FRAME && (frame = get_top_frame(ctx)) < 0) return -1; if (find_pe_symbol_by_name(ctx, frame, ip, name, *sym) >= 0) found = 1; else if (get_error_code(errno) != ERR_SYM_NOT_FOUND) return -1; #if ENABLE_RCBP_TEST if (!found) { int sym_class = 0; void * address = NULL; if (find_test_symbol(ctx, name, &address, &sym_class) >= 0) found = 1; else if (get_error_code(errno) != ERR_SYM_NOT_FOUND) return -1; if (found) { (*sym)->ctx = ctx->mem; (*sym)->sym_class = sym_class; (*sym)->address = (ContextAddress)address; } } #endif if (!found) { if (find_basic_type_symbol(ctx, name, *sym) >= 0) found = 1; else if (get_error_code(errno) != ERR_SYM_NOT_FOUND) return -1; } if (!found) { errno = ERR_SYM_NOT_FOUND; return -1; } assert(frame >= 0 || (*sym)->ctx == ctx->mem); assert((*sym)->ctx == ((*sym)->frame ? ctx : ctx->mem)); assert((*sym)->frame == ((*sym)->ctx == (*sym)->ctx->mem ? 0u : frame - STACK_NO_FRAME)); return 0; }
void die_with_error(meta_error e, const char *fmt, ...) { va_list ap; int rc; va_start(ap, fmt); if (is_tcpip_error(e)) syslog(LOG_ERR, "A tcp/ip error has occured"); else if (is_protocol_error(e)) syslog(LOG_ERR, "A protocol error has occured"); else if (is_app_error(e)) syslog(LOG_ERR, "A application error has occured"); else if (is_os_error(e)) syslog(LOG_ERR, "A os error has occured"); else if (is_db_error(e)) syslog(LOG_ERR, "A database error has occured"); else if (is_other_error(e)) syslog(LOG_ERR, "An unknown error has occured"); if (has_error_message(e)) syslog(LOG_ERR, "Error message: %s", get_error_message(e)); else if ((rc = get_error_code(e)) != 0) syslog(LOG_ERR, "Possible error: %d %s\n", rc, strerror(rc)); meta_vsyslog(LOG_ERR, fmt, ap); va_end(ap); exit(EXIT_FAILURE); }
static void command_find_by_addr_cache_client(void * x) { CommandFindByAddrArgs * args = (CommandFindByAddrArgs *)x; Channel * c = cache_channel(); Context * ctx = NULL; int frame = STACK_NO_FRAME; Symbol * sym = NULL; int err = 0; if (id2frame(args->id, &ctx, &frame) < 0) ctx = id2ctx(args->id); if (ctx == NULL) err = set_errno(ERR_INV_CONTEXT, args->id); else if (ctx->exited) err = ERR_ALREADY_EXITED; if (err == 0 && find_symbol_by_addr(ctx, frame, args->addr, &sym) < 0) err = errno; list_cnt = 0; if (err == 0) { list_add(sym); while (find_next_symbol(&sym) == 0) list_add(sym); if (get_error_code(errno) != ERR_SYM_NOT_FOUND) err = errno; } cache_exit(); write_stringz(&c->out, "R"); write_stringz(&c->out, args->token); write_errno(&c->out, err); write_symbol_list(&c->out); write_stream(&c->out, MARKER_EOM); }
static void command_find_in_scope_cache_client(void * x) { CommandFindInScopeArgs * args = (CommandFindInScopeArgs *)x; Channel * c = cache_channel(); Context * ctx = NULL; int frame = STACK_NO_FRAME; Symbol * scope = NULL; Symbol * sym = NULL; int err = 0; if (id2frame(args->frame_id, &ctx, &frame) < 0) ctx = id2ctx(args->frame_id); if (ctx == NULL) err = set_errno(ERR_INV_CONTEXT, args->frame_id); else if (ctx->exited) err = ERR_ALREADY_EXITED; if (err == 0 && args->scope_id[0] && id2symbol(args->scope_id, &scope) < 0) err = errno; if (err == 0 && args->name == NULL) err = set_errno(EINVAL, "Symbol name must not be null"); if (err == 0 && find_symbol_in_scope(ctx, frame, args->ip, scope, args->name, &sym) < 0) err = errno; list_cnt = 0; if (err == 0) { list_add(sym); while (find_next_symbol(&sym) == 0) list_add(sym); if (get_error_code(errno) != ERR_SYM_NOT_FOUND) err = errno; } cache_exit(); write_stringz(&c->out, "R"); write_stringz(&c->out, args->token); write_errno(&c->out, err); write_symbol_list(&c->out); write_stream(&c->out, MARKER_EOM); loc_free(args->name); }
void exn_page_fault(void) { void *addr; struct vma *vma; unsigned long error; error = get_error_code(current->esp); MOV("cr2", addr); // address not mapped in process address space if (!(vma = vma_get(¤t->mm, addr))) { segfault(SEGV_MAPERR, error, addr); return; } // page not present (demand paging) if (!(error & PGF_PERM)) { if (vm_map_page(vma, addr) < 0) // FIXME: stall until memory available? segfault(SEGV_MAPERR, error, addr); return; } // write permission if (error & PGF_WRITE) { if (vm_write_perm(vma, addr) < 0) segfault(SEGV_ACCERR, error, addr); return; } // read permission if (vm_read_perm(vma, addr) < 0) segfault(SEGV_ACCERR, error, addr); }
static void handle_resolve_response(DNSServiceRef sdr, ipc_msg_hdr *hdr, char *data) { DNSServiceFlags flags; char fullname[kDNSServiceMaxDomainName]; char target[kDNSServiceMaxDomainName]; uint16_t txtlen; union { uint16_t s; u_char b[2]; } port; uint32_t ifi; DNSServiceErrorType err; unsigned char *txtrecord; int str_error = 0; (void)hdr; //unused flags = get_flags(&data); ifi = get_long(&data); err = get_error_code(&data); if (get_string(&data, fullname, kDNSServiceMaxDomainName) < 0) str_error = 1; if (get_string(&data, target, kDNSServiceMaxDomainName) < 0) str_error = 1; port.b[0] = *data++; port.b[1] = *data++; txtlen = get_short(&data); txtrecord = (unsigned char *)get_rdata(&data, txtlen); if (!err && str_error) err = kDNSServiceErr_Unknown; ((DNSServiceResolveReply)sdr->app_callback)(sdr, flags, ifi, err, fullname, target, port.s, txtlen, txtrecord, sdr->app_context); }
static void handle_query_response(DNSServiceRef sdr, ipc_msg_hdr *hdr, char *data) { DNSServiceFlags flags; uint32_t interfaceIndex, ttl; DNSServiceErrorType errorCode; char name[kDNSServiceMaxDomainName]; uint16_t rrtype, rrclass, rdlen; char *rdata; int str_error = 0; (void)hdr;//Unused flags = get_flags(&data); interfaceIndex = get_long(&data); errorCode = get_error_code(&data); if (get_string(&data, name, kDNSServiceMaxDomainName) < 0) str_error = 1; rrtype = get_short(&data); rrclass = get_short(&data); rdlen = get_short(&data); rdata = get_rdata(&data, rdlen); ttl = get_long(&data); if (!errorCode && str_error) errorCode = kDNSServiceErr_Unknown; ((DNSServiceQueryRecordReply)sdr->app_callback)(sdr, flags, interfaceIndex, errorCode, name, rrtype, rrclass, rdlen, rdata, ttl, sdr->app_context); return; }
/* use software emulated X86 pgfault */ static void handle_tlbmiss(struct trapframe* tf, int write) { #if 0 if(!trap_in_kernel(tf)){ print_trapframe(tf); while(1); } #endif static int entercnt = 0; entercnt ++; //kprintf("## enter handle_tlbmiss %d times\n", entercnt); int in_kernel = trap_in_kernel(tf); assert(current_pgdir != NULL); //print_trapframe(tf); uint32_t badaddr = tf->tf_vaddr; int ret = 0; pte_t *pte = get_pte(current_pgdir, tf->tf_vaddr, 0); if(pte==NULL || ptep_invalid(pte)){ //PTE miss, pgfault //panic("unimpl"); //TODO //tlb will not be refill in do_pgfault, //so a vmm pgfault will trigger 2 exception //permission check in tlb miss ret = pgfault_handler(tf, badaddr, get_error_code(write, pte)); }else{ //tlb miss only, reload it /* refill two slot */ /* check permission */ if(in_kernel){ tlb_refill(badaddr, pte); //kprintf("## refill K\n"); return; }else{ if(!ptep_u_read(pte)){ ret = -1; goto exit; } if(write && !ptep_u_write(pte)){ ret = -2; goto exit; } //kprintf("## refill U %d %08x\n", write, badaddr); tlb_refill(badaddr, pte); return ; } } exit: if(ret){ print_trapframe(tf); if(in_kernel){ panic("unhandled pgfault"); }else{ do_exit(-E_KILLED); } } return ; }
static void run_cache_client(int retry) { Trap trap; unsigned i; unsigned id = current_client.id; void * args_copy = NULL; assert(id != 0); current_cache = NULL; cache_miss_cnt = 0; def_channel = NULL; if (current_client.args_copy) args_copy = current_client.args; for (i = 0; i < listeners_cnt; i++) listeners[i](retry ? CTLE_RETRY : CTLE_START); if (set_trap(&trap)) { current_client.client(current_client.args); clear_trap(&trap); assert(current_client.id == 0); assert(cache_miss_cnt == 0); } else if (id != current_client.id) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %s", errno_to_str(trap.error)); assert(current_client.id == 0); assert(cache_miss_cnt == 0); } else { if (get_error_code(trap.error) != ERR_CACHE_MISS || cache_miss_cnt == 0 || current_cache == NULL) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %s", errno_to_str(trap.error)); for (i = 0; i < listeners_cnt; i++) listeners[i](CTLE_COMMIT); } else { AbstractCache * cache = current_cache; if (cache->wait_list_cnt >= cache->wait_list_max) { cache->wait_list_max += 8; cache->wait_list_buf = (WaitingCacheClient *)loc_realloc(cache->wait_list_buf, cache->wait_list_max * sizeof(WaitingCacheClient)); } if (current_client.args != NULL && !current_client.args_copy) { void * mem = loc_alloc(current_client.args_size); memcpy(mem, current_client.args, current_client.args_size); current_client.args = mem; current_client.args_copy = 1; } if (cache->wait_list_cnt == 0) list_add_last(&cache->link, &cache_list); if (current_client.channel != NULL) channel_lock_with_msg(current_client.channel, channel_lock_msg); cache->wait_list_buf[cache->wait_list_cnt++] = current_client; for (i = 0; i < listeners_cnt; i++) listeners[i](CTLE_ABORT); args_copy = NULL; } memset(¤t_client, 0, sizeof(current_client)); current_cache = NULL; cache_miss_cnt = 0; def_channel = NULL; } if (args_copy != NULL) loc_free(args_copy); }
static void handle_enumeration_response(DNSServiceRef sdr, ipc_msg_hdr *hdr, char *data) { DNSServiceFlags flags; uint32_t interfaceIndex; DNSServiceErrorType err; char domain[kDNSServiceMaxDomainName]; int str_error = 0; (void)hdr;//Unused flags = get_flags(&data); interfaceIndex = get_long(&data); err = get_error_code(&data); if (get_string(&data, domain, kDNSServiceMaxDomainName) < 0) str_error = 1; if (!err && str_error) err = kDNSServiceErr_Unknown; ((DNSServiceDomainEnumReply)sdr->app_callback)(sdr, flags, interfaceIndex, err, domain, sdr->app_context); }
static void handle_regrecord_response(DNSServiceRef sdr, ipc_msg_hdr *hdr, char *data) { DNSServiceFlags flags; uint32_t interfaceIndex; DNSServiceErrorType errorCode; DNSRecordRef rref = hdr->client_context.context; if (sdr->op != connection) { rref->app_callback(rref->sdr, rref, 0, kDNSServiceErr_Unknown, rref->app_context); return; } flags = get_flags(&data); interfaceIndex = get_long(&data); errorCode = get_error_code(&data); rref->app_callback(rref->sdr, rref, flags, errorCode, rref->app_context); }
static void handle_regservice_response(DNSServiceRef sdr, ipc_msg_hdr *hdr, char *data) { DNSServiceFlags flags; uint32_t interfaceIndex; DNSServiceErrorType errorCode; char name[256], regtype[kDNSServiceMaxDomainName], domain[kDNSServiceMaxDomainName]; int str_error = 0; (void)hdr;//Unused flags = get_flags(&data); interfaceIndex = get_long(&data); errorCode = get_error_code(&data); if (get_string(&data, name, 256) < 0) str_error = 1; if (get_string(&data, regtype, kDNSServiceMaxDomainName) < 0) str_error = 1; if (get_string(&data, domain, kDNSServiceMaxDomainName) < 0) str_error = 1; if (!errorCode && str_error) errorCode = kDNSServiceErr_Unknown; ((DNSServiceRegisterReply)sdr->app_callback)(sdr, flags, errorCode, name, regtype, domain, sdr->app_context); }
static void run_cache_client(void) { Trap trap; cache_miss_cnt = 0; client_exited = 0; if (set_trap(&trap)) { current_client.client(current_client.args); clear_trap(&trap); assert(cache_miss_cnt == 0); assert(client_exited); } else if (get_error_code(trap.error) != ERR_CACHE_MISS || client_exited || cache_miss_cnt == 0) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %d %s", trap.error, errno_to_str(trap.error)); } if (cache_miss_cnt == 0 && current_client.args_copy) loc_free(current_client.args); memset(¤t_client, 0, sizeof(current_client)); cache_miss_cnt = 0; client_exited = 0; }
int set_errno(int no, const char * msg) { errno = no; if (no != 0 && msg != NULL) { ErrorMessage * m = alloc_msg(SRC_MESSAGE); /* alloc_msg() assigns new value to 'errno', * need to be sure it does not change until this function exits. */ int err = errno; m->error = get_error_code(no); if (no == ERR_OTHER) { m->text = loc_strdup(msg); } else { size_t msg_len = strlen(msg); if (msg_len == 0) { m->text = loc_strdup(errno_to_str(no)); } else { const char * text0 = tmp_strdup(msg); const char * text1 = errno_to_str(no); if (text0[msg_len - 1] == '.' || text0[msg_len - 1] == '\n') { size_t len = msg_len + strlen(text1) + 2; char * text2 = (char *)loc_alloc(len); snprintf(text2, len, "%s %s", text0, text1); m->text = text2; } else { size_t len = msg_len + strlen(text1) + 3; char * text2 = (char *)loc_alloc(len); snprintf(text2, len, "%s. %s", text0, text1); m->text = text2; } } } errno = err; } return errno; }
static void run_cache_client(int retry) { Trap trap; unsigned i; cache_miss_cnt = 0; client_exited = 0; def_channel = NULL; for (i = 0; i < listeners_cnt; i++) listeners[i](retry ? CTLE_RETRY : CTLE_START); if (set_trap(&trap)) { current_client.client(current_client.args); clear_trap(&trap); assert(cache_miss_cnt == 0); assert(client_exited); } else if (get_error_code(trap.error) != ERR_CACHE_MISS || client_exited || cache_miss_cnt == 0) { trace(LOG_ALWAYS, "Unhandled exception in data cache client: %d %s", trap.error, errno_to_str(trap.error)); } for (i = 0; i < listeners_cnt; i++) listeners[i](client_exited ? CTLE_COMMIT : CTLE_ABORT); if (cache_miss_cnt == 0 && current_client.args_copy) loc_free(current_client.args); memset(¤t_client, 0, sizeof(current_client)); cache_miss_cnt = 0; client_exited = 0; def_channel = NULL; }
/**************************************************************************//** * See mss_nvm.h for details of how to use this function. */ nvm_status_t NVM_unlock ( uint32_t start_addr, uint32_t length ) { nvm_status_t status; uint32_t nvm_offset; uint32_t first_page; uint32_t last_page; uint32_t current_page; uint32_t current_offset; /* Ignore upper address bits to ignore remapping setting. */ nvm_offset = start_addr & NVM_OFFSET_SIGNIFICANT_BITS; /* Ignore remapping. */ /* Check against attempt to write data larger than eNVM. */ ASSERT((nvm_offset + length) < MAX_512K_OFFSET); if((nvm_offset + length) < MAX_512K_OFFSET) { current_offset = nvm_offset; first_page = nvm_offset / BYTES_PER_PAGE; last_page = (nvm_offset + length) / BYTES_PER_PAGE; /* Gain exclusive access to eNVM controller */ status = get_ctrl_access(nvm_offset, length); /* Unlock eNVM one page at a time. */ if(NVM_SUCCESS == status) { uint32_t block; uint32_t inc; uint32_t first_word; uint32_t word_offset; uint32_t * p_nvm32; uint32_t errors; p_nvm32 = (uint32_t *)NVM_BASE_ADDRESS; first_word = nvm_offset / 4u; word_offset = first_word; for(current_page = first_page; (current_page <= last_page) && (NVM_SUCCESS == status); ++current_page) { uint32_t ctrl_status; if(word_offset >= BLOCK1_FIRST_WORD_OFFSET) { block = NVM_BLOCK_1; } else { block = NVM_BLOCK_0; } for(inc = 0u; inc < WD_WORD_SIZE; ++inc) { g_nvm32[block]->WD[inc] = p_nvm32[word_offset]; ++word_offset; } g_nvm[block]->PAGE_LOCK = NVM_DO_NOT_LOCK_PAGE; g_nvm[block]->CMD = USER_UNLOCK; /* Issue program command */ g_nvm[block]->CMD = PROG_ADS | (current_offset & PAGE_ADDR_MASK); current_offset += BYTES_PER_PAGE; /* Wait for NVM to become ready. */ ctrl_status = wait_nvm_ready(block); /* Check for errors. */ errors = ctrl_status & WRITE_ERROR_MASK; if(errors) { uint32_t nvm_hw_status; nvm_hw_status = g_nvm[block]->STATUS; status = get_error_code(nvm_hw_status); } } /* Release eNVM controllers so that other masters can gain access to it. */ release_ctrl_access(); } } else { status = NVM_INVALID_PARAMETER; } return status; }
static int serviceConnection2( http_server srv, connection conn, http_request request, http_response response, meta_error e) { dynamic_page dp; size_t cbSent; int status, error = 0; size_t max_posted_content = http_server_get_post_limit(srv); while(!http_server_shutting_down(srv)) { if(!data_on_socket(conn)) return set_tcpip_error(e, EAGAIN); /* Were we able to read a valid http request? * If not, what is the cause of the error? If it is a http * protocol error, we try to send a response back to the client * and close the connection. If it is anything else(tcp/ip, os) * we stop processing. */ error = !request_receive(request, conn, max_posted_content, e); /* So far, so good. We have a valid HTTP request. * Now see if we can locate a page handler function for it. * If we do, call it. If not, see if it on disk or if the * http_server has a default page handler. If neither is true, * then the page was not found(404). */ if(error) ; else if( (dp = http_server_lookup(srv, request)) != NULL) { if(!handle_dynamic(srv, dp, request, response, e)) { error = 1; } } else if(http_server_can_read_files(srv)) { if(!send_disk_file(srv, conn, request, response, e)) { error = 1; } } else if(http_server_has_default_page_handler(srv)) { if(!http_server_run_default_page_handler(srv, request, response, e)) { error = 1; } } else { /* We didn't find the page */ response_set_status(response, HTTP_404_NOT_FOUND); response_set_connection(response, "close"); } if(error) { if(is_protocol_error(e)) { status = get_error_code(e); response_set_status(response, status); response_set_connection(response, "close"); cbSent = response_send(response, conn, e); http_server_add_logentry(srv, conn, request, status, cbSent); } return 0; } /* * Some extra stuff for HTTP 1.0 clients. If client is 1.0 * and connection_close() == 1 and connection header field * isn't set, then we set the connection flag to close. * Done so that 1.0 clients (Lynx) can detect closure. */ if(request_get_version(request) != VERSION_11 && !connection_is_persistent(conn) && strlen(response_get_connection(response)) == 0) response_set_connection(response, "close"); cbSent = response_send(response, conn, e); http_server_add_logentry(srv, conn, request, response_get_status(response), cbSent); if(cbSent == 0) return 0; /* Did the user set the Connection header field to "close" */ if(strcmp(response_get_connection(response), "close") == 0) return 1; if(!connection_is_persistent(conn)) return 1; /** * NOTE: Her må/bør vi legge inn ny funksjonalitet: * Disconnect connections som * a) Har kjørt lengst i tid * b) Har overført mest bytes (opp eller ned) * eller c) har dårligst transfer rate * Grunnen er at dersom vi har n worker threads og n * persistent connections, havner alle nye connections i kø. * De får aldri kjøretid. Så disconnect-regelen over gjelder derfor * kun om køen har > 0 entries. */ connection_flush(conn); request_recycle(request); response_recycle(response); } /* Shutdown detected */ return 1; }
static void disassemble_cache_client(void * x) { DisassembleCmdArgs * args = (DisassembleCmdArgs *)x; int error = 0; Context * ctx = NULL; uint8_t * mem_buf = NULL; ContextAddress buf_addr = 0; ContextAddress buf_size = 0; size_t mem_size = 0; ByteArrayOutputStream buf; OutputStream * buf_out = create_byte_array_output_stream(&buf); Channel * c = cache_channel(); char * data = NULL; size_t size = 0; ContextISA isa; memset(&isa, 0, sizeof(isa)); ctx = id2ctx(args->id); if (ctx == NULL) error = ERR_INV_CONTEXT; else if (ctx->exited) error = ERR_ALREADY_EXITED; if (!error) check_all_stopped(ctx); if (!error) { ContextAddress sym_addr = 0; ContextAddress sym_size = 0; int sym_addr_ok = 0; int sym_size_ok = 0; #if SERVICE_Symbols { Symbol * sym = NULL; if (find_symbol_by_addr(ctx, STACK_NO_FRAME, args->addr, &sym) == 0) { if (get_symbol_address(sym, &sym_addr) == 0) sym_addr_ok = 1; if (get_symbol_size(sym, &sym_size) == 0) sym_size_ok = 1; } if (sym_addr_ok && sym_addr <= args->addr) { if (args->addr - sym_addr >= 0x1000) { sym_addr_ok = 0; sym_size_ok = 0; } else if (sym_size_ok && sym_addr + sym_size > args->addr + args->size) { sym_size = args->addr + args->size - sym_addr; } } } #endif #if SERVICE_LineNumbers if (!sym_addr_ok || !sym_size_ok) { CodeArea * area = NULL; address_to_line(ctx, args->addr, args->addr + 1, address_to_line_cb, &area); if (area != NULL) { sym_addr = area->start_address; sym_size = area->end_address - area->start_address; sym_addr_ok = 1; sym_size_ok = 1; } } #endif if (sym_addr_ok && sym_size_ok && sym_addr <= args->addr && sym_addr + sym_size > args->addr) { buf_addr = sym_addr; buf_size = sym_size; mem_size = (size_t)sym_size; } else if (sym_addr_ok && sym_addr < args->addr) { if (get_isa(ctx, sym_addr, &isa) < 0) { error = errno; } else { buf_addr = sym_addr; buf_size = args->addr + args->size - sym_addr; if (isa.max_instruction_size > 0) { mem_size = (size_t)(buf_size + isa.max_instruction_size); } else { mem_size = (size_t)(buf_size + MAX_INSTRUCTION_SIZE); } } } else { /* Use default address alignment */ if (get_isa(ctx, args->addr, &isa) < 0) { error = errno; } else { if (isa.alignment > 0) { buf_addr = args->addr & ~(ContextAddress)(isa.alignment - 1); } else { buf_addr = args->addr & ~(ContextAddress)(DEFAULT_ALIGMENT - 1); } buf_size = args->addr + args->size - buf_addr; if (isa.max_instruction_size > 0) { mem_size = (size_t)(buf_size + isa.max_instruction_size); } else { mem_size = (size_t)(buf_size + MAX_INSTRUCTION_SIZE); } } } if (!error) { mem_buf = (uint8_t *)tmp_alloc(mem_size); if (context_read_mem(ctx, buf_addr, mem_buf, mem_size) < 0) error = errno; if (error) { #if ENABLE_ExtendedMemoryErrorReports MemoryErrorInfo info; if (context_get_mem_error_info(&info) == 0 && info.size_valid > 0) { mem_size = info.size_valid; error = 0; } #endif } } } if (!error && disassemble_block( ctx, buf_out, mem_buf, buf_addr, buf_size, mem_size, &isa, args) < 0) error = errno; if (get_error_code(error) == ERR_CACHE_MISS) { loc_free(buf.mem); buf.mem = NULL; buf.max = 0; buf.pos = 0; } cache_exit(); get_byte_array_output_stream_data(&buf, &data, &size); if (!is_channel_closed(c)) { OutputStream * out = &c->out; write_stringz(out, "R"); write_stringz(out, args->token); write_errno(out, error); if (size > 0) { write_block_stream(out, data, size); } else { write_string(out, "null"); } write_stream(out, 0); write_stream(out, MARKER_EOM); } loc_free(data); }
/**************************************************************************//** * See mss_nvm.h for details of how to use this function. */ nvm_status_t NVM_unlock ( uint32_t start_addr, uint32_t length ) { nvm_status_t status; uint32_t nvm_offset; uint32_t first_page; uint32_t last_page; uint32_t current_page; uint32_t current_offset; uint32_t initial_nvm_config; /* * SAR 57547: Set the FREQRNG field of the eNVM configuration register * to its maximum value (i.e. 15) to ensure successful writes to eNVM. * Store the value of the eNVM configuration before updating it, so * that the prior configuration can be restored when the eNVM write * operation has completed. */ initial_nvm_config = SYSREG->ENVM_CR; SYSREG->ENVM_CR = (initial_nvm_config & NVM_FREQRNG_MASK) | NVM_FREQRNG_MAX; /* Check input parameters */ if((start_addr >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) || \ ((start_addr >= NVM_RSV_PROTECTION_OFFSET) && \ (start_addr < NVM_BASE_ADDRESS)) || \ (!length) || \ ((start_addr + length) >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET))|| \ (((start_addr + length) >= NVM_RSV_PROTECTION_OFFSET) && \ ((start_addr + length) < NVM_BASE_ADDRESS))) { if(((start_addr >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) && \ (start_addr <= (NVM_BASE_ADDRESS + NVM_TOP_OFFSET))) || \ ((start_addr >= NVM_RSV_PROTECTION_OFFSET) && (start_addr <= NVM_TOP_OFFSET)) || \ (((start_addr + length) >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) && \ ((start_addr + length) <= (NVM_BASE_ADDRESS + NVM_TOP_OFFSET))) || \ (((start_addr + length) >= NVM_RSV_PROTECTION_OFFSET) && \ ((start_addr + length) <= NVM_TOP_OFFSET))) { status = NVM_PROTECTION_ERROR; } else { status = NVM_INVALID_PARAMETER; } } else { /* Ignore upper address bits to ignore remapping setting. */ nvm_offset = start_addr & NVM_OFFSET_SIGNIFICANT_BITS; /* Ignore remapping. */ /* Check against attempt to write data larger than eNVM. */ ASSERT((nvm_offset + length) <= MAX_504K_OFFSET); if((nvm_offset + length) <= MAX_504K_OFFSET) { first_page = nvm_offset / BYTES_PER_PAGE; last_page = (nvm_offset + (length - 1u)) / BYTES_PER_PAGE; /* Gain exclusive access to eNVM controller */ status = get_ctrl_access(nvm_offset, length); /* Unlock eNVM one page at a time. */ if(NVM_SUCCESS == status) { uint32_t block; uint32_t inc; uint32_t * p_nvm32; uint32_t errors_and_warnings; for(current_page = first_page; (current_page <= last_page) && ((NVM_SUCCESS == status) ||(NVM_WRITE_THRESHOLD_WARNING == status)); ++current_page) { uint32_t ctrl_status; if(current_page > PAGES_PER_BLOCK) { block = NVM_BLOCK_1; } else { block = NVM_BLOCK_0; } if(g_nvm[block]->STATUS & MSS_NVM_WR_DENIED) { /* Clear the access denied flag */ g_nvm[block]->CLRHINT |= ACCESS_DENIED_FLAG_CLEAR; } current_offset = (current_page << 0x7u); p_nvm32 = (uint32_t *)(NVM_BASE_ADDRESS + current_offset); for(inc = 0u; inc < WD_WORD_SIZE; ++inc) { g_nvm32[block]->WD[inc] = p_nvm32[inc]; } g_nvm[block]->PAGE_LOCK = NVM_DO_NOT_LOCK_PAGE; g_nvm[block]->CMD = USER_UNLOCK | (current_offset & PAGE_ADDR_MASK); /* Issue program command */ g_nvm[block]->CMD = PROG_ADS | (current_offset & PAGE_ADDR_MASK); /* Wait for NVM to become ready. */ ctrl_status = wait_nvm_ready(block); /* Check for errors and warnings. */ errors_and_warnings = ctrl_status & (WRITE_ERROR_MASK | MSS_NVM_WRCNT_OVER); if(errors_and_warnings) { uint32_t nvm_hw_status; nvm_hw_status = g_nvm[block]->STATUS; status = get_error_code(nvm_hw_status); } } /* Release eNVM controllers so that other masters can gain access to it. */ release_ctrl_access(); } } else { status = NVM_INVALID_PARAMETER; } } /* Restore back to original value. */ SYSREG->ENVM_CR = initial_nvm_config; return status; }
/**************************************************************************//** * See mss_nvm.h for details of how to use this function. */ nvm_status_t NVM_write ( uint32_t start_addr, const uint8_t * pidata, uint32_t length, uint32_t lock_page ) { nvm_status_t status; uint32_t nvm_offset; uint32_t device_version; uint32_t initial_nvm_config; /* * SAR 57547: Set the FREQRNG field of the eNVM configuration register * to its maximum value (i.e. 15) to ensure successful writes to eNVM. * Store the value of the eNVM configuration before updating it, so * that the prior configuration can be restored when the eNVM write * operation has completed. */ initial_nvm_config = SYSREG->ENVM_CR; SYSREG->ENVM_CR = (initial_nvm_config & NVM_FREQRNG_MASK) | NVM_FREQRNG_MAX; /* Check input parameters */ if((start_addr >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) || \ ((start_addr >= NVM_RSV_PROTECTION_OFFSET) && \ (start_addr < NVM_BASE_ADDRESS)) || \ (!pidata) || \ (!length) || \ ((start_addr + length) >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET))|| \ (((start_addr + length) >= NVM_RSV_PROTECTION_OFFSET) && \ ((start_addr + length) < NVM_BASE_ADDRESS))|| \ (lock_page > PARAM_LOCK_PAGE_FLAG)) { if(((start_addr >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) && \ (start_addr <= (NVM_BASE_ADDRESS + NVM_TOP_OFFSET))) || \ ((start_addr >= NVM_RSV_PROTECTION_OFFSET) && (start_addr <= NVM_TOP_OFFSET)) || \ (((start_addr + length) >= (NVM_BASE_ADDRESS + NVM_RSV_PROTECTION_OFFSET)) && \ ((start_addr + length) <= (NVM_BASE_ADDRESS + NVM_TOP_OFFSET))) || \ (((start_addr + length) >= NVM_RSV_PROTECTION_OFFSET) && \ ((start_addr + length) <= NVM_TOP_OFFSET))) { status = NVM_PROTECTION_ERROR; } else { status = NVM_INVALID_PARAMETER; } } else { /* * Prevent pages being locked for silicon versions which do not allow * locked pages to be unlocked. */ device_version = SYSREG->DEVICE_VERSION; if((0x0000F802u == device_version) || (0x0001F802u == device_version)) { lock_page = NVM_DO_NOT_LOCK_PAGE; } /* Ignore upper address bits to ignore remapping setting. */ nvm_offset = start_addr & NVM_OFFSET_SIGNIFICANT_BITS; /* Ignore remapping. */ /* Check against attempt to write data larger than eNVM. */ ASSERT((nvm_offset + length) <= MAX_504K_OFFSET); if((nvm_offset + length) <= MAX_504K_OFFSET) { /* Gain exclusive access to eNVM controller */ status = get_ctrl_access(nvm_offset, length); /* Write eNVM one page at a time. */ if(NVM_SUCCESS == status) { uint32_t remaining_length = length; uint32_t errors_and_warnings; while(remaining_length > 0u) { uint32_t length_written; uint32_t nvm_hw_status = 0u; length_written = write_nvm(start_addr + (length - remaining_length), &pidata[length - remaining_length], remaining_length, lock_page, &nvm_hw_status); /* Check for errors and warnings. */ errors_and_warnings = nvm_hw_status & (WRITE_ERROR_MASK | MSS_NVM_WRCNT_OVER); if(errors_and_warnings) { /* * Ensure that the status returned by the NVM_write() * function is NVM_WRITE_THRESHOLD_WARNING if at least one * of the written eNVM pages indicate a write over * threshold condition. */ status = get_error_code(nvm_hw_status); } if((NVM_SUCCESS == status) || (NVM_WRITE_THRESHOLD_WARNING == status )) { if(remaining_length > length_written) { remaining_length -= length_written; } else { remaining_length = 0u; } } else { remaining_length = 0u; } } /* Release eNVM controllers so that other masters can gain access to it. */ release_ctrl_access(); } } else { status = NVM_INVALID_PARAMETER; } } /* Restore back to original value. */ SYSREG->ENVM_CR = initial_nvm_config; return status; }
// Returns message corresponding to last errno std::string Socket::get_error_msg() { //Actually works on windows, but may be better use FormatMessage? return strerror(get_error_code()); }
/**************************************************************************//** * See mss_nvm.h for details of how to use this function. */ nvm_status_t NVM_write ( uint32_t start_addr, const uint8_t * pidata, uint32_t length, uint32_t lock_page ) { nvm_status_t status; uint32_t nvm_offset; uint32_t device_version; /* * Prevent pages being locked for silicon versions which do not allow * locked pages to be unlocked. */ device_version = SYSREG->DEVICE_VERSION; if((0x0000F802u == device_version) || (0x0001F802u == device_version)) { lock_page = NVM_DO_NOT_LOCK_PAGE; } /* Ignore upper address bits to ignore remapping setting. */ nvm_offset = start_addr & NVM_OFFSET_SIGNIFICANT_BITS; /* Ignore remapping. */ /* Check against attempt to write data larger than eNVM. */ ASSERT((nvm_offset + length) < MAX_512K_OFFSET); if((nvm_offset + length) < MAX_512K_OFFSET) { /* Gain exclusive access to eNVM controller */ status = get_ctrl_access(nvm_offset, length); /* Write eNVM one page at a time. */ if(NVM_SUCCESS == status) { uint32_t remaining_length = length; while((remaining_length > 0u) && (NVM_SUCCESS == status)) { uint32_t length_written; uint32_t nvm_hw_status = 0u; length_written = write_nvm(start_addr + (length - remaining_length), &pidata[length - remaining_length], remaining_length, lock_page, &nvm_hw_status); if(0u == length_written) { status = get_error_code(nvm_hw_status); } else if(remaining_length > length_written) { remaining_length -= length_written; } else { remaining_length = 0u; } } /* Release eNVM controllers so that other masters can gain access to it. */ release_ctrl_access(); } } else { status = NVM_INVALID_PARAMETER; } return status; }
int main(int argc, char *argv[]) // _M1 { int client_waiting = 0; int waiting_client_ip = -1; int waiting_client_port = -1; int sockfd; struct addrinfo hints, *servinfo, *p; int rv; int numbytes; struct sockaddr_storage their_addr; char buf[MAXBUFLEN]; socklen_t addr_len; char s[INET6_ADDRSTRLEN]; /* _M1 Begin */ if (argc != 2) { fprintf(stderr,"usage: ServerUDP Port# \n"); exit(1); } /* _M1 End*/ memset(&hints, 0, sizeof hints); hints.ai_family = AF_UNSPEC; // set to AF_INET to force IPv4 hints.ai_socktype = SOCK_DGRAM; hints.ai_flags = AI_PASSIVE; // use my IP if ((rv = getaddrinfo(NULL, argv[1] /* _M1 MYPORT */, &hints, &servinfo)) != 0) { fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv)); return 1; } // loop through all the results and bind to the first we can for(p = servinfo; p != NULL; p = p->ai_next) { if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1) { perror("listener: socket"); continue; } if (bind(sockfd, p->ai_addr, p->ai_addrlen) == -1) { close(sockfd); perror("listener: bind"); continue; } break; } if (p == NULL) { fprintf(stderr, "listener: failed to bind socket\n"); return 2; } freeaddrinfo(servinfo); while (1){ // _M2 printf("\n >>>> listener: waiting for a datagram...\n"); addr_len = sizeof their_addr; lab_3_request *request = (lab_3_request *)malloc(sizeof(lab_3_request)); if ((numbytes = recvfrom(sockfd, request, /*MAXBUFLEN*/sizeof(lab_3_request), 0, (struct sockaddr *)&their_addr, &addr_len)) == -1) { perror("recvfrom"); exit(1); } printf("Received raw bytes:\n"); displayBuffer((char *) request, numbytes); request_to_host_byte_order(request); printf("Bytes after endianness conversion:\n"); displayBuffer((char *)request, numbytes); printf("Received from client:\n"); printf("Magic number: %u\n", request->magic_number); printf("Group ID: %u\n", request->group_id); printf("Port Number: %u\n", request->port_number); printf("listener: got packet from %s\n", inet_ntop(their_addr.ss_family, get_in_addr((struct sockaddr *)&their_addr), s, sizeof s)); printf("listener: packet is %d bytes long\n", numbytes); int error_code = get_error_code(request->group_id, request->port_number, request->magic_number); if(error_code) { lab_3_error_response *response = malloc(sizeof(lab_3_error_response)); response->magic_number = MAGIC_NUMBER; response->group_id = GROUP_ID; response->padding = PADDING; response->error_code = error_code; printf("Sending error response:\n"); displayBuffer((char *)response, ERROR_RESPONSE_SIZE); printf("Magic Number: %u\n", response->magic_number); printf("Group ID: %u\n", response->group_id); printf("Padding: %u\n", response->padding); printf("Error Code: %u\n", response->error_code); //TODO: To network byte order error_to_network_byte_order(response); sendto(sockfd, response, ERROR_RESPONSE_SIZE, 0, (struct sockaddr *)&their_addr, addr_len); } else if(client_waiting) { lab_3_client_waiting_response *response = malloc(sizeof(lab_3_client_waiting_response)); response->magic_number = MAGIC_NUMBER; response->group_id = GROUP_ID; response->ip_address = waiting_client_ip; response->port_number = waiting_client_port; printf("\nSending response to matched player:"); displayBuffer((char *)response, CLIENT_WAITING_RESPONSE_SIZE); printf("Magic Number: %u\n", response->magic_number); printf("Group ID: %u\n", response->group_id); printf("IP Address (in decimal): %u\n", response->ip_address); printf("Port Number: %u\n", response->port_number); //TODO: To network byte order client_waiting_response_to_network_byte_order(response); sendto(sockfd, response, CLIENT_WAITING_RESPONSE_SIZE, 0, (struct sockaddr *)&their_addr, addr_len); //Flush stored information client_waiting = 0; waiting_client_ip = -1; waiting_client_port = -1; } else { client_waiting = -1; //TODO: verify this correctly converts IP addresses waiting_client_ip = get_ip_address(&their_addr); waiting_client_port = request->port_number; lab_3_no_client_waiting_response *response = malloc(sizeof(lab_3_client_waiting_response)); response->magic_number = request->magic_number; response->group_id = request->group_id; response->port_number = request->port_number; printf("\nSending Response to new game host:"); displayBuffer((char *)response, NO_CLIENT_WAITING_RESPONSE_SIZE); printf("Magic Number: %u\n", response->magic_number); printf("Group ID: %u\n", response->group_id); printf("Port Number: %u\n", response->port_number); no_client_waiting_response_to_network_byte_order(response); sendto(sockfd, response, NO_CLIENT_WAITING_RESPONSE_SIZE, 0, (struct sockaddr *)&their_addr, addr_len); } } // _M2 close(sockfd); return 0; }