/* Locates and clears a region for a new top level page table. */ void initialize_identity_maps(void) { /* Init mapping_info with run-time function/buffer pointers. */ mapping_info.alloc_pgt_page = alloc_pgt_page; mapping_info.context = &pgt_data; /* * It should be impossible for this not to already be true, * but since calling this a second time would rewind the other * counters, let's just make sure this is reset too. */ pgt_data.pgt_buf_offset = 0; /* * If we came here via startup_32(), cr3 will be _pgtable already * and we must append to the existing area instead of entirely * overwriting it. */ level4p = read_cr3(); if (level4p == (unsigned long)_pgtable) { debug_putstr("booted via startup_32()\n"); pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); } else { debug_putstr("booted via startup_64()\n"); pgt_data.pgt_buf = _pgtable; pgt_data.pgt_buf_size = BOOT_PGT_SIZE; memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); level4p = (unsigned long)alloc_pgt_page(&pgt_data); } }
void debug_info(t_info *info) { my_putstr("#==========\033[32mDEBUGATOR-INFO\033[0m==========#\n"); if (info->expr) debug_putstr("expr", info->expr); if (info->base) debug_putstr("base", info->base); if (info->struc) debug_putstr("structure", info->struc); if (info->baselen) debug_put_nbr("baselen", info->baselen); if (info->size) debug_put_nbr("size", info->size); if (info->var->a) debug_put_nbr("var_a", info->var->a); if (info->var->b) debug_put_nbr("var_b", info->var->b); if (info->var->c) debug_put_nbr("var_c", info->var->c); if (info->var->d) debug_put_nbr("var_d", info->var->d); if (info->var->e) debug_put_nbr("var_e", info->var->e); my_putstr("#==================================#\n"); }
void debug_node(t_node *node) { my_putstr("#==========\033[32mDEBUGATOR-NODE\033[0m==========#\n"); if (node->str) debug_putstr("str", node->str); if (node->i) debug_put_nbr("i", node->i); if (node->next) debug_putstr("next", "ok"); if (node->prev) debug_putstr("prev", "ok"); my_putstr("#==================================#\n"); }
void debug_stack(t_stack *stack) { my_putstr("#==========\033[32mDEBUGATOR-STACK\033[0m=========#\n"); if (stack->i) debug_put_nbr("i", stack->i); if (stack->start) debug_put_nbr("start", stack->start); if (stack->end) debug_put_nbr("end", stack->end); if (stack->side) debug_put_nbr("side", stack->side); if (stack->next) debug_putstr("next", "ok"); if (stack->prev) debug_putstr("prev", "ok"); my_putstr("#==================================#\n"); }
/* Locates and clears a region for a new top level page table. */ void initialize_identity_maps(void) { unsigned long sev_me_mask = get_sev_encryption_mask(); /* Init mapping_info with run-time function/buffer pointers. */ mapping_info.alloc_pgt_page = alloc_pgt_page; mapping_info.context = &pgt_data; mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; /* * It should be impossible for this not to already be true, * but since calling this a second time would rewind the other * counters, let's just make sure this is reset too. */ pgt_data.pgt_buf_offset = 0; /* * If we came here via startup_32(), cr3 will be _pgtable already * and we must append to the existing area instead of entirely * overwriting it. * * With 5-level paging, we use '_pgtable' to allocate the p4d page table, * the top-level page table is allocated separately. * * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level * cases. On 4-level paging it's equal to 'top_level_pgt'. */ top_level_pgt = read_cr3_pa(); if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { debug_putstr("booted via startup_32()\n"); pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); } else { debug_putstr("booted via startup_64()\n"); pgt_data.pgt_buf = _pgtable; pgt_data.pgt_buf_size = BOOT_PGT_SIZE; memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); } }
/* * Allocates space for a page table entry, using struct alloc_pgt_data * above. Besides the local callers, this is used as the allocation * callback in mapping_info below. */ static void *alloc_pgt_page(void *context) { struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; unsigned char *entry; /* Validate there is space available for a new page. */ if (pages->pgt_buf_offset >= pages->pgt_buf_size) { debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); debug_putaddr(pages->pgt_buf_offset); debug_putaddr(pages->pgt_buf_size); return NULL; } entry = pages->pgt_buf + pages->pgt_buf_offset; pages->pgt_buf_offset += PAGE_SIZE; return entry; }
unsigned long kaslr_get_random_long(const char *purpose) { #ifdef CONFIG_X86_64 const unsigned long mix_const = 0x5d6008cbf3848dd3UL; #else const unsigned long mix_const = 0x3f39e593UL; #endif unsigned long raw, random = get_boot_seed(); bool use_i8254 = true; debug_putstr(purpose); debug_putstr(" KASLR using"); if (has_cpuflag(X86_FEATURE_RDRAND)) { debug_putstr(" RDRAND"); if (rdrand_long(&raw)) { random ^= raw; use_i8254 = false; } } if (has_cpuflag(X86_FEATURE_TSC)) { debug_putstr(" RDTSC"); raw = rdtsc(); random ^= raw; use_i8254 = false; } if (use_i8254) { debug_putstr(" i8254"); random ^= i8254(); } /* Circular multiply for better bit diffusion */ asm("mul %3" : "=a" (random), "=d" (raw) : "a" (random), "rm" (mix_const)); random += raw; debug_putstr("...\n"); return random; }
void bootp_handle_reply(void) { int i; struct bootp *pk = uip_appdata; if(pk->bp_op != BOOTREPLY) return; /* ugh? shouldn't happen */ if(pk->bp_htype != HTYPE_ETHERNET) return; for(i = 0; i < 4; i ++) { if(pk->bp_xid[i] != uip_udp_conn->appstate.bootp.xid[i]) return; /* session id doesn't match */ if(pk->bp_vend[i] != replycookie[i]) return; /* reply cookie doesn't match */ } /* * looks like we have received a valid bootp reply, * prepare to override eeprom configuration */ uip_ipaddr_t ips[5]; memset(&ips, 0, sizeof(ips)); /* extract our ip addresses, subnet-mask and gateway ... */ memcpy(&ips[0], pk->bp_yiaddr, 4); uip_sethostaddr(&ips[0]); debug_printf ("BOOTP: configured new ip address %d.%d.%d.%d\n", ((unsigned char *) ips)[0], ((unsigned char *) ips)[1], ((unsigned char *) ips)[2], ((unsigned char *) ips)[3]); unsigned char *ptr = pk->bp_vend + 4; while(*ptr != 0xFF) { switch(* ptr) { case TAG_SUBNET_MASK: memcpy(&ips[1], &ptr[2], 4); uip_setnetmask(&ips[1]); break; case TAG_GATEWAY: memcpy(&ips[2], &ptr[2], 4); uip_setdraddr(&ips[2]); break; #ifdef DNS_SUPPORT case TAG_DOMAIN_SERVER: memcpy(&ips[3], &ptr[2], 4); resolv_conf(&ips[3]); break; #endif #ifdef NTP_SUPPORT case TAG_NTP_SERVER: /* This will set the ntp connection to the server set by the bootp * request */ memcpy(&ips[4], &ptr[2], 4); ntp_conf(&ips[4]); break; #endif } ptr = ptr + ptr[1] + 2; } /* Remove the bootp connection */ uip_udp_remove(uip_udp_conn); #ifdef BOOTP_TO_EEPROM_SUPPORT eeprom_save(ip, &ips[0], IPADDR_LEN); eeprom_save(netmask, &ips[1], IPADDR_LEN); eeprom_save(gateway, &ips[2], IPADDR_LEN); #ifdef DNS_SUPPORT eeprom_save(dns_server, &ips[3], IPADDR_LEN); #endif #ifdef NTP_SUPPORT eeprom_save(ntp_server, &ips[4], IPADDR_LEN); #endif #endif /* BOOTP_TO_EEPROM_SUPPORT */ #ifdef DYNDNS_SUPPORT dyndns_update(); #endif #if defined(TFTP_SUPPORT) && defined(BOOTLOADER_SUPPORT) if(pk->bp_file[0] == 0) return; /* no boot filename provided */ debug_putstr("load:"); debug_putstr(pk->bp_file); debug_putchar('\n'); /* create tftp connection, which will fire the download request */ uip_ipaddr_t ip; uip_ipaddr(&ip, pk->bp_siaddr[0], pk->bp_siaddr[1], pk->bp_siaddr[2], pk->bp_siaddr[3]); tftp_fire_tftpomatic(&ip, pk->bp_file); #endif /* TFTP_SUPPORT */ }
void tftp_handle_packet(void) { /* * overwrite udp connection information (i.e. take from incoming packet) */ uip_ipaddr_copy(uip_udp_conn->ripaddr, BUF->srcipaddr); uip_udp_conn->rport = BUF->srcport; /* * care for incoming tftp packet now ... */ uint16_t i; flash_base_t base; struct tftp_hdr *pk = uip_appdata; switch(HTONS(pk->type)) { #ifndef TFTP_UPLOAD_ONLY /* * streaming data back to the client (download) ... */ case 1: /* read request */ uip_udp_conn->appstate.tftp.download = 1; uip_udp_conn->appstate.tftp.transfered = 0; uip_udp_conn->appstate.tftp.finished = 0; bootload_delay = 0; /* Stop bootloader. */ goto send_data; case 4: /* acknowledgement */ if(uip_udp_conn->appstate.tftp.download != 1) goto error_out; if(HTONS(pk->u.ack.block) < uip_udp_conn->appstate.tftp.transfered || (HTONS(pk->u.ack.block) > uip_udp_conn->appstate.tftp.transfered + 1)) goto error_out; /* ack out of order */ uip_udp_conn->appstate.tftp.transfered = HTONS(pk->u.ack.block); send_data: if(uip_udp_conn->appstate.tftp.finished) { bootload_delay = CONF_BOOTLOAD_DELAY; /* Restart bootloader. */ return; /* nothing more to do */ } pk->type = HTONS(3); /* data packet */ pk->u.data.block = HTONS(uip_udp_conn->appstate.tftp.transfered + 1); base = TFTP_BLOCK_SIZE * uip_udp_conn->appstate.tftp.transfered; /* base overflowed ! */ #if FLASHEND == UINT16_MAX if(uip_udp_conn->appstate.tftp.transfered && base == 0) #else if(base > FLASHEND) #endif { uip_udp_send(4); /* send empty packet to finish transfer */ uip_udp_conn->appstate.tftp.finished = 1; return; } for (i = 0; i < TFTP_BLOCK_SIZE; i++) pk->u.data.data[i] = __pgm_read_byte (base + i); uip_udp_send(4 + TFTP_BLOCK_SIZE); uip_udp_conn->appstate.tftp.transfered ++; break; #endif /* not TFTP_UPLOAD_ONLY */ /* * streaming data from the client (firmware upload) ... */ case 2: /* write request */ uip_udp_conn->appstate.tftp.download = 0; uip_udp_conn->appstate.tftp.transfered = 0; uip_udp_conn->appstate.tftp.finished = 0; pk->u.ack.block = HTONS(0); goto send_ack; case 3: /* data packet */ bootload_delay = 0; /* Stop bootloader. */ if(uip_udp_conn->appstate.tftp.download != 0) goto error_out; if(HTONS(pk->u.ack.block) < uip_udp_conn->appstate.tftp.transfered) goto error_out; /* too early */ if(HTONS(pk->u.ack.block) == uip_udp_conn->appstate.tftp.transfered) goto send_ack; /* already handled */ if(HTONS(pk->u.ack.block) > uip_udp_conn->appstate.tftp.transfered + 1) goto error_out; /* too late */ base = TFTP_BLOCK_SIZE * (HTONS(pk->u.ack.block) - 1); for(i = uip_datalen() - 4; i < TFTP_BLOCK_SIZE; i ++) pk->u.data.data[i] = 0xFF; /* EOF reached, init rest */ debug_putchar('.'); for(i = 0; i < TFTP_BLOCK_SIZE / SPM_PAGESIZE; i ++) flash_page(base + i * SPM_PAGESIZE, pk->u.data.data + i * SPM_PAGESIZE); if(uip_datalen() < TFTP_BLOCK_SIZE + 4) { uip_udp_conn->appstate.tftp.finished = 1; # ifdef TFTPOMATIC_SUPPORT bootload_delay = 1; /* ack, then start app */ # else bootload_delay = CONF_BOOTLOAD_DELAY; /* Restart bootloader. */ # endif debug_putstr("end\n"); } uip_udp_conn->appstate.tftp.transfered = HTONS(pk->u.ack.block); send_ack: pk->type = HTONS(4); uip_udp_send(4); /* send ack */ break; /* * protocol errors */ error_out: case 5: /* error */ default: pk->type = HTONS(5); /* data packet */ pk->u.error.code = HTONS(0); /* undefined error code */ pk->u.error.msg[0] = 0; /* yes, really expressive */ uip_udp_send(5); break; } }