uint64 page_map_mmio(uint64 paddr){ // Do the identity map vaddr_t va; pm_t *pml3; pm_t *pml2; pm_t *pml1; va.raw = page_normalize_vaddr(paddr); if (!_pml4[va.s.drawer_idx].s.present){ page_set_frame(_page_offset); pml3 = (pm_t *)_page_offset; mem_fill((uint8 *)pml3, sizeof(pm_t) * 512, 0); _pml4[va.s.drawer_idx].raw = (uint64)pml3; _pml4[va.s.drawer_idx].s.present = 1; _pml4[va.s.drawer_idx].s.writable = 1; _pml4[va.s.drawer_idx].s.write_through = 1; _pml4[va.s.drawer_idx].s.cache_disable = 1; _page_offset += (sizeof(pm_t) * 512); } pml3 = (pm_t *)(_pml4[va.s.drawer_idx].raw & PAGE_MASK); if (!pml3[va.s.directory_idx].s.present){ page_set_frame(_page_offset); pml2 = (pm_t *)_page_offset; mem_fill((uint8 *)pml2, sizeof(pm_t) * 512, 0); pml3[va.s.directory_idx].raw = (uint64)pml2; pml3[va.s.directory_idx].s.present = 1; pml3[va.s.directory_idx].s.writable = 1; pml3[va.s.directory_idx].s.write_through = 1; pml3[va.s.directory_idx].s.cache_disable = 1; _page_offset += (sizeof(pm_t) * 512); } pml2 = (pm_t *)(pml3[va.s.directory_idx].raw & PAGE_MASK); if (!pml2[va.s.table_idx].s.present){ page_set_frame(_page_offset); pml1 = (pm_t *)_page_offset; mem_fill((uint8 *)pml1, sizeof(pm_t) * 512, 0); pml2[va.s.table_idx].raw = (uint64)pml1; pml2[va.s.table_idx].s.present = 1; pml2[va.s.table_idx].s.writable = 1; pml2[va.s.table_idx].s.write_through = 1; pml2[va.s.table_idx].s.cache_disable = 1; _page_offset += (sizeof(pm_t) * 512); } pml1 = (pm_t *)(pml2[va.s.table_idx].raw & PAGE_MASK); if (!pml1[va.s.page_idx].s.present){ pml1[va.s.page_idx].raw = (paddr & PAGE_MASK); pml1[va.s.page_idx].s.present = 1; pml1[va.s.page_idx].s.writable = 1; pml1[va.s.page_idx].s.write_through = 1; pml1[va.s.page_idx].s.cache_disable = 1; } return va.raw; }
void mem_init(mem_state * s, tw_lp * lp) { tw_event *e; tw_memory *b; int i; for (i = 0; i < g_mem_start_events; i++) { e = tw_event_new(lp->gid, tw_rand_exponential(lp->rng, mean), lp); for(i = 0; i < nbufs; i++) { b = mem_alloc(lp); mem_fill(b); tw_event_memory_set(e, b, my_fd); s->stats.s_mem_alloc++; } tw_event_send(e); s->stats.s_sent++; } }
void mem_event_handler(mem_state * s, tw_bf * bf, mem_message * m, tw_lp * lp) { tw_lpid dest; tw_event *e; tw_memory *b; int i; s->stats.s_recv++; // read membufs off inbound event, check it and free it for(i = 0; i < nbufs; i++) { b = tw_event_memory_get(lp); if(!b) tw_error(TW_LOC, "Missing memory buffers: %d of %d", i+1, nbufs); mem_verify(b); tw_memory_free(lp, b, my_fd); s->stats.s_mem_free++; s->stats.s_mem_get++; } if(tw_rand_unif(lp->rng) <= percent_remote) { bf->c1 = 1; dest = tw_rand_integer(lp->rng, 0, ttl_lps - 1); dest += offset_lpid; if(dest >= ttl_lps) dest -= ttl_lps; } else { bf->c1 = 0; dest = lp->gid; } e = tw_event_new(dest, tw_rand_exponential(lp->rng, mean), lp); // allocate membufs and attach them to the event for(i = 0; i < nbufs; i++) { b = mem_alloc(lp); if(!b) tw_error(TW_LOC, "no membuf allocated!"); mem_fill(b); tw_event_memory_set(e, b, my_fd); s->stats.s_mem_alloc++; } tw_event_send(e); }
void ykfs_new_file(uintptr_t ykfs, uintptr_t entry, char* name, uintptr_t address, size_t size) { ykfs_header_t* header = (ykfs_header_t*)ykfs; if (!ykfs_check_format(ykfs)) { Output("\nInvalid filesystem format."); return; } mem_fill((uint8_t*)entry, 64, 0); memCopyRange(name + '\0', (char*)entry, sizeof(name) + 1); *(uint32_t*)(entry + 64) = address; *(uint32_t*)(entry + 68) = size; }
void page_init(){ // Read E820 memory map and mark used regions e820map_t *mem_map = (e820map_t *)E820_LOC; // Sort memory map sort_e820(mem_map); // Single page (PML1 entry) holds 4KB of RAM uint64 page_count = INIT_MEM / PAGE_SIZE; if (INIT_MEM % PAGE_SIZE > 0){ page_count ++; } // Single table (PML2 entry) holds 2MB of RAM uint64 table_count = page_count / 512; if (page_count % 512 > 0){ table_count ++; } // Single directory (PML3 entry, directory table pointer) holds 1GB of RAM uint64 directory_count = table_count / 512; if (table_count % 512 > 0){ directory_count ++; } // Single drawer (PML4 entry) holds 512GB of RAM uint64 drawer_count = directory_count / 512; if (directory_count % 512 > 0){ drawer_count ++; } // Determine the end of PMLx structures to add new ones _page_offset += (sizeof(pm_t) * 512) * (1 + drawer_count + directory_count + table_count); // Calculate total frame count _page_count = _total_mem / PAGE_SIZE; // Allocate frame bitset at the next page boundary _page_frames = (uint64 *)_page_offset; // Clear bitset mem_fill((uint8 *)_page_count, _page_count / 8, 0); // Move offset further _page_offset += (_page_count / 8); // Align the offset if ((_page_offset & PAGE_MASK) != _page_offset){ _page_offset &= PAGE_MASK; _page_offset += PAGE_SIZE; } #if DEBUG == 1 debug_print(DC_WB, "Frames: %d", _page_count); #endif // Determine unsuable memory regions uint64 i; uint64 paddr_from; uint64 paddr_to; for (i = 0; i < mem_map->size; i ++){ if (mem_map->entries[i].type != kMemOk){ // Mark all unusable regions used paddr_from = (mem_map->entries[i].base & PAGE_MASK); paddr_to = ((mem_map->entries[i].base + mem_map->entries[i].length) & PAGE_MASK); while (paddr_from < paddr_to){ page_set_frame(paddr_from); paddr_from += PAGE_SIZE; } } } }
void ykfs_wipe_entries(uintptr_t ykfs) { ykfs_header_t* header = (ykfs_header_t*)ykfs; mem_fill((uint8_t*)ykfs_get_entries(ykfs), header->format.Length, 0); }
/* K in a little-endian byte array */ void ecp_PointMultiply( OUT U8 *PublicKey, IN const U8 *BasePoint, IN const U8 *SecretKey, IN int len) { int i, j, k; U_WORD X[K_WORDS]; XZ_POINT P, Q, *PP[2], *QP[2]; ecp_BytesToWords(X, BasePoint); /* 1: P = (2k+1)G, Q = (2k+2)G */ /* 0: Q = (2k+1)G, P = (2k)G */ /* Find first non-zero bit */ while (len-- > 0) { k = SecretKey[len]; for (i = 0; i < 8; i++, k <<= 1) { /* P = kG, Q = (k+1)G */ if (k & 0x80) { /* We have first non-zero bit // This is always bit 254 for keys created according to the spec. // Start with randomized base point */ ecp_Add(P.Z, X, edp_custom_blinding.zr); /* P.Z = random */ ecp_MulReduce(P.X, X, P.Z); ecp_MontDouble(&Q, &P); PP[1] = &P; PP[0] = &Q; QP[1] = &Q; QP[0] = &P; /* Everything we reference in the below loop are on the stack // and already touched (cached) */ while (++i < 8) { k <<= 1; ECP_MONT(7); } while (len > 0) { k = SecretKey[--len]; ECP_MONT(7); ECP_MONT(6); ECP_MONT(5); ECP_MONT(4); ECP_MONT(3); ECP_MONT(2); ECP_MONT(1); ECP_MONT(0); } ecp_Inverse(Q.Z, P.Z); ecp_MulMod(X, P.X, Q.Z); ecp_WordsToBytes(PublicKey, X); return; } } } /* K is 0 */ mem_fill(PublicKey, 0, 32); }
static void run_service(struct omap_sdma_thc_service_binding_t *sv) { omap_sdma_service_msg_t msg; bool loop = true; // this is the bitmap of messages we are interested in receiving struct omap_sdma_service_selector selector = { .mem_copy = 1, .mem_copy_2d = 1, .mem_fill = 1, .mem_fill_2d = 1, }; while (loop) { // receive any message sv->recv_any(sv, &msg, selector); errval_t reterr = SYS_ERR_OK; // dispatch it switch(msg.msg) { case omap_sdma_mem_copy: reterr = mem_copy( msg.args.mem_copy.in.dst, msg.args.mem_copy.in.src); sv->send.mem_copy(sv, reterr); break; case omap_sdma_mem_fill: reterr = mem_fill( msg.args.mem_fill.in.dst, msg.args.mem_fill.in.color); sv->send.mem_fill(sv, reterr); break; case omap_sdma_mem_copy_2d: reterr = mem_copy_2d( msg.args.mem_copy_2d.in.dst, msg.args.mem_copy_2d.in.src, msg.args.mem_copy_2d.in.count, msg.args.mem_copy_2d.in.transparent, msg.args.mem_copy_2d.in.color); sv->send.mem_copy_2d(sv, reterr); break; case omap_sdma_mem_fill_2d: reterr = mem_fill_2d( msg.args.mem_fill_2d.in.dst, msg.args.mem_fill_2d.in.count, msg.args.mem_fill_2d.in.color); sv->send.mem_fill_2d(sv, reterr); break; default: debug_printf("unexpected message: %d\n", msg.msg); loop = false; break; } } free(sv); } void start_service(void) { errval_t err; struct omap_sdma_thc_export_info e_info; struct omap_sdma_thc_service_binding_t *sv; struct omap_sdma_binding *b; iref_t iref; err = omap_sdma_thc_export(&e_info, "sdma", get_default_waitset(), IDC_EXPORT_FLAGS_DEFAULT, &iref); if (err_is_fail(err)) { USER_PANIC_ERR(err, "thc export failed"); } DO_FINISH({ while(true) { err = omap_sdma_thc_accept(&e_info, &b); if (err_is_fail(err)) { USER_PANIC_ERR(err, "thc accept failed"); } sv = malloc(sizeof(struct omap_sdma_thc_service_binding_t)); assert(sv != NULL); err = omap_sdma_thc_init_service(sv, b, b); if (err_is_fail(err)) { USER_PANIC_ERR(err, "thc init failed"); } ASYNC({run_service(sv);}); } });