UNICORN_EXPORT uc_err uc_mem_unmap(struct uc_struct *uc, uint64_t address, size_t size) { MemoryRegion *mr; uint64_t addr; size_t count, len; if (size == 0) // nothing to unmap return UC_ERR_OK; // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) return UC_ERR_ARG; // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) return UC_ERR_MAP; if (uc->mem_redirect) { address = uc->mem_redirect(address); } // check that user's entire requested block is mapped if (!check_mem_area(uc, address, size)) return UC_ERR_NOMEM; // Now we know entire region is mapped, so do the unmap // We may need to split regions if this area spans adjacent regions addr = address; count = 0; while(count < size) { mr = memory_mapping(uc, addr); len = MIN(size - count, mr->end - addr); if (!split_region(uc, mr, addr, len, true)) return UC_ERR_NOMEM; // if we can retrieve the mapping, then no splitting took place // so unmap here mr = memory_mapping(uc, addr); if (mr != NULL) uc->memory_unmap(uc, mr); count += len; addr += len; } return UC_ERR_OK; }
UNICORN_EXPORT uc_err uc_mem_read(uc_engine *uc, uint64_t address, void *_bytes, size_t size) { size_t count = 0, len; uint8_t *bytes = _bytes; if (uc->mem_redirect) { address = uc->mem_redirect(address); } if (!check_mem_area(uc, address, size)) return UC_ERR_READ_UNMAPPED; // memory area can overlap adjacent memory blocks while(count < size) { MemoryRegion *mr = memory_mapping(uc, address); if (mr) { len = MIN(size - count, mr->end - address); if (uc->read_mem(&uc->as, address, bytes, len) == false) break; count += len; address += len; bytes += len; } else // this address is not mapped in yet break; } if (count == size) return UC_ERR_OK; else return UC_ERR_READ_UNMAPPED; }
UNICORN_EXPORT uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) { MemoryRegion *mr; uint64_t addr = address; size_t count, len; if (size == 0) // trivial case, no change return UC_ERR_OK; // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) return UC_ERR_ARG; // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) return UC_ERR_ARG; // check for only valid permissions if ((perms & ~UC_PROT_ALL) != 0) return UC_ERR_ARG; // check that user's entire requested block is mapped if (!check_mem_area(uc, address, size)) return UC_ERR_NOMEM; // Now we know entire region is mapped, so change permissions // We may need to split regions if this area spans adjacent regions addr = address; count = 0; while(count < size) { mr = memory_mapping(uc, addr); len = MIN(size - count, mr->end - addr); if (!split_region(uc, mr, addr, len, false)) return UC_ERR_NOMEM; mr = memory_mapping(uc, addr); mr->perms = perms; uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); count += len; addr += len; } return UC_ERR_OK; }
// check if a memory area is mapped // this is complicated because an area can overlap adjacent blocks static bool check_mem_area(uc_engine *uc, uint64_t address, size_t size) { size_t count = 0, len; while(count < size) { MemoryRegion *mr = memory_mapping(uc, address); if (mr) { len = MIN(size - count, mr->end - address); count += len; address += len; } else // this address is not mapped in yet break; } return (count == size); }
UNICORN_EXPORT uc_err uc_mem_write(uc_engine *uc, uint64_t address, const void *_bytes, size_t size) { size_t count = 0, len; const uint8_t *bytes = _bytes; if (uc->mem_redirect) { address = uc->mem_redirect(address); } if (!check_mem_area(uc, address, size)) return UC_ERR_WRITE_UNMAPPED; // memory area can overlap adjacent memory blocks while(count < size) { MemoryRegion *mr = memory_mapping(uc, address); if (mr) { uint32_t operms = mr->perms; if (!(operms & UC_PROT_WRITE)) // write protected // but this is not the program accessing memory, so temporarily mark writable uc->readonly_mem(mr, false); len = MIN(size - count, mr->end - address); if (uc->write_mem(&uc->as, address, bytes, len) == false) break; if (!(operms & UC_PROT_WRITE)) // write protected // now write protect it again uc->readonly_mem(mr, true); count += len; address += len; bytes += len; } else // this address is not mapped in yet break; } if (count == size) return UC_ERR_OK; else return UC_ERR_WRITE_UNMAPPED; }
UNICORN_EXPORT uc_err uc_mem_protect(struct uc_struct *uc, uint64_t address, size_t size, uint32_t perms) { MemoryRegion *mr; uint64_t addr = address; size_t count, len; bool remove_exec = false; if (size == 0) // trivial case, no change return UC_ERR_OK; // address must be aligned to uc->target_page_size if ((address & uc->target_page_align) != 0) return UC_ERR_ARG; // size must be multiple of uc->target_page_size if ((size & uc->target_page_align) != 0) return UC_ERR_ARG; // check for only valid permissions if ((perms & ~UC_PROT_ALL) != 0) return UC_ERR_ARG; if (uc->mem_redirect) { address = uc->mem_redirect(address); } // check that user's entire requested block is mapped if (!check_mem_area(uc, address, size)) return UC_ERR_NOMEM; // Now we know entire region is mapped, so change permissions // We may need to split regions if this area spans adjacent regions addr = address; count = 0; while(count < size) { mr = memory_mapping(uc, addr); len = MIN(size - count, mr->end - addr); if (!split_region(uc, mr, addr, len, false)) return UC_ERR_NOMEM; mr = memory_mapping(uc, addr); // will this remove EXEC permission? if (((mr->perms & UC_PROT_EXEC) != 0) && ((perms & UC_PROT_EXEC) == 0)) remove_exec = true; mr->perms = perms; uc->readonly_mem(mr, (perms & UC_PROT_WRITE) == 0); count += len; addr += len; } // if EXEC permission is removed, then quit TB and continue at the same place if (remove_exec) { uc->quit_request = true; uc_emu_stop(uc); } return UC_ERR_OK; }