static int mmap_core(vmm_t *vmm, uintptr_t addr, page_idx_t first_page, pgoff_t npages, kmap_flags_t flags) { pgoff_t i; int ret = 0; page_frame_t *page; vmrange_t *vmr; vmr = vmrange_find(vmm, addr, addr + 1, NULL); ASSERT(vmr != NULL); RPD_LOCK_WRITE(&vmm->rpd); for (i = 0; i < npages; i++, first_page++, addr += PAGE_SIZE) { ret = mmap_page(&vmm->rpd, addr, first_page, flags); if (ret) goto out; if (likely(page_idx_is_present(first_page))) { page = pframe_by_id(first_page); pin_page_frame(page); lock_page_frame(page, PF_LOCK); rmap_register_anon(page, vmm, addr); unlock_page_frame(page, PF_LOCK); page->offset = addr2pgoff(vmr, addr); } } out: RPD_UNLOCK_WRITE(&vmm->rpd); return ret; }
int main(int argc, char *argv[]) { unsigned char *mem = NULL; unsigned int page = 0, pos = 0; unsigned int offset = 0, data = 0; if(argc != 3) { printf("vocore command, used to set data to core.\n"); printf("usage: mems offset [data: 32bits]\n"); return 0; } // offset = atou(argv[1]) + 0x10000000; offset = atou(argv[1]); data = atou(argv[2]); printf("offset: 0x%08X, data: 0x%08X\n", offset, data); page = offset / PAGE_SIZE; pos = offset % PAGE_SIZE; mem = mmap_page(page); if(mem == NULL) { printf("can not map memory.\n"); return -1; } printf("old: 0x%08X\n", *(unsigned int *)(mem + pos)); *((unsigned int *)(mem + pos)) = data; printf("new: 0x%08X\n", *(unsigned int *)(mem + pos)); munmap(mem, PAGE_SIZE); return 0; }
void* mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) { HANDLE fm, h; void* map = MAP_FAILED; const DWORD dwFileOffsetLow = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD)off : (DWORD)(off & 0xFFFFFFFFL); const DWORD dwFileOffsetHigh = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD)0 : (DWORD)((off >> 32) & 0xFFFFFFFFL); const DWORD protect = mmap_page(prot); const DWORD desiredAccess = mmap_file(prot); const off_t maxSize = off + (off_t)len; const DWORD dwMaxSizeLow = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD)maxSize : (DWORD)(maxSize & 0xFFFFFFFFL); const DWORD dwMaxSizeHigh = (sizeof(off_t) <= sizeof(DWORD)) ? (DWORD)0 : (DWORD)((maxSize >> 32) & 0xFFFFFFFFL); errno = 0; if (len == 0 || (flags & MAP_FIXED) != 0 || prot == PROT_EXEC) // Here we check for unsupported flags { errno = EINVAL; return MAP_FAILED; } h = ((flags & MAP_ANONYMOUS) == 0) ? (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE; if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) { errno = EBADF; return MAP_FAILED; } fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL); if (fm == NULL) { errno = windowsErrorToErrno(GetLastError()); return MAP_FAILED; } map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len); CloseHandle(fm); if (map == NULL) { errno = windowsErrorToErrno(GetLastError()); return MAP_FAILED; } return map; }
int mprotect(void *addr, size_t len, int prot) { DWORD newProtect = mmap_page(prot); DWORD oldProtect = 0; if (VirtualProtect(addr, len, newProtect, &oldProtect)) return 0; else { errno = windowsErrorToErrno(GetLastError()); return -1; } }
/* * This function must be called after page table lock is aquired. * It guaranties that src_page refcount will be at least equal to 1. * This prevents tasks from simultaneously copying and(later) * freeing the same page. dst_page *must* be already pinned. */ int handle_copy_on_write(vmrange_t *vmr, uintptr_t addr, page_frame_t *dst_page, page_frame_t *src_page) { int ret; vmm_t *vmm = vmr->parent_vmm; page_idx_t pidx; ASSERT_DBG(!(vmr->flags & (VMR_PHYS | VMR_NONE))); ASSERT_DBG((addr >= vmr->bounds.space_start) && (addr < vmr->bounds.space_end)); pidx = vaddr_to_pidx(&vmm->rpd, addr); /* * Check if another thread hasn't already made all work itself. * If so, there is nothing to do here anymore. */ if (unlikely(pidx != pframe_number(src_page))) { return 0; } copy_page_frame(dst_page, src_page); dst_page->offset = src_page->offset; lock_page_frame(src_page, PF_LOCK); ret = rmap_unregister_shared(src_page, vmm, addr); if (ret) { unlock_page_frame(src_page, PF_LOCK); return ret; } unlock_page_frame(src_page, PF_LOCK); unpin_page_frame(src_page); ret = mmap_page(&vmm->rpd, addr, pframe_number(dst_page), vmr->flags); if (ret) return ret; /* * Ok, page is mapped now and we're free to register new anonymous * reverse mapping for it. */ ret = rmap_register_anon(dst_page, vmm, addr); return ret; }
int mmap_kern(uintptr_t va_from, page_idx_t first_page, pgoff_t npages, long flags) { pgoff_t i; int ret = 0; RPD_LOCK_WRITE(KERNEL_ROOT_PDIR()); for (i = 0; i < npages; i++, va_from += PAGE_SIZE, first_page++) { if (page_is_mapped(KERNEL_ROOT_PDIR(), va_from) && (flags & KMAP_REMAP)) munmap_page(KERNEL_ROOT_PDIR(), va_from); ret = mmap_page(KERNEL_ROOT_PDIR(), va_from, first_page, flags); if (ret) goto out; } out: RPD_UNLOCK_WRITE(KERNEL_ROOT_PDIR()); return ret; }
int main(void) { void *mptr; /* memory pointer */ pid_t pid1, pid2; cpu_set_t set, *setp = &set; int res; int *minimum_priority = (int*)&prio_min; *minimum_priority = sched_get_priority_min(policy); if (check_privs()) exit(-1); mptr = mmap_page(); /* Get a page of shared memory */ resource = (pthread_mutex_t*)mptr; /* point our lock to it */ mptr += sizeof(pthread_mutex_t); /* advance the memory pointer */ /* Initialize our mutex via the resource pointer */ init_shared_pthread_mutex(resource, PTHREAD_PRIO_INHERIT, policy); statep = (struct State*)mptr; mptr += sizeof(struct State); init_state(); /* Initialize the state structure */ statep->mutex = (pthread_mutex_t*)mptr; /* point the next lock to it */ mptr += sizeof(pthread_mutex_t); /* advance the memory pointer */ /* Initialize our State mutex */ init_shared_pthread_mutex(statep->mutex, PTHREAD_PRIO_NONE, policy); set_rt_prio(0, prio_min, policy); /* We restrict this program to the first cpu, inorder to increase * the likelihood of a priority inversion */ CPU_ZERO(setp); CPU_SET(0, setp); res = sched_setaffinity(0, sizeof(set), setp); if (res == -1) { int err = errno; err_msg("sched_setaffinity: "); err_exit(err, NULL); } pid1 = fork(); if (pid1 == -1) { perror("fork"); exit(1); } else if (pid1 != 0) { /* parent code */ low(pid1); } else { /* child code */ pid2 = fork(); /* parent code */ if (pid2 == -1) { perror("fork: "); exit(-1); } else if (pid2 != 0) { /* parent code */ high(pid2); } else { /* child code */ medium(); } } exit(0); }