void* __mmap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) { if (fd_off & (PAGE_SIZE - 1)) { errno = EINVAL; return MAP_FAILED; } if (len == 0) { errno = EINVAL; return MAP_FAILED; } if (len >= PTRDIFF_MAX) { errno = ENOMEM; return MAP_FAILED; } if (!(flags & (MAP_PRIVATE | MAP_SHARED)) || (flags & MAP_PRIVATE && flags & MAP_SHARED)) { errno = EINVAL; return MAP_FAILED; } // round up to page size len = (len + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); // build zircon flags for this zx_vm_option_t zx_options = 0; zx_options |= (prot & PROT_READ) ? ZX_VM_PERM_READ : 0; zx_options |= (prot & PROT_WRITE) ? ZX_VM_PERM_WRITE : 0; zx_options |= (prot & PROT_EXEC) ? ZX_VM_PERM_EXECUTE : 0; size_t offset = 0; zx_status_t status = ZX_OK; if (flags & MAP_FIXED) { zx_options |= ZX_VM_SPECIFIC; zx_info_vmar_t info; status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info, sizeof(info), NULL, NULL); if (status < 0 || (uintptr_t)start < info.base) { goto fail; } offset = (uintptr_t)start - info.base; } zx_handle_t vmo; uintptr_t ptr = 0; if (flags & MAP_ANON) { if (_zx_vmo_create(len, 0, &vmo) < 0) { errno = ENOMEM; return MAP_FAILED; } _zx_object_set_property(vmo, ZX_PROP_NAME, mmap_vmo_name, strlen(mmap_vmo_name)); if (flags & MAP_JIT) { status = _zx_vmo_replace_as_executable(vmo, ZX_HANDLE_INVALID, &vmo); if (status < 0) { goto fail; } } } else { status = _mmap_file(offset, len, zx_options, flags, fd, fd_off, &ptr); if (status < 0) { goto fail; } return (void*) ptr; } status = _zx_vmar_map(_zx_vmar_root_self(), zx_options, offset, vmo, fd_off, len, &ptr); _zx_handle_close(vmo); // TODO: map this as shared if we ever implement forking if (status < 0) { goto fail; } return (void*)ptr; fail: switch(status) { case ZX_ERR_BAD_HANDLE: errno = EBADF; break; case ZX_ERR_NOT_SUPPORTED: errno = ENODEV; break; case ZX_ERR_ACCESS_DENIED: errno = EACCES; break; case ZX_ERR_NO_MEMORY: errno = ENOMEM; break; case ZX_ERR_INVALID_ARGS: case ZX_ERR_BAD_STATE: default: errno = EINVAL; } return MAP_FAILED; }
static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, uint32_t NumIOVecs) { /* Allocate VMO if it hasn't been created yet. */ if (__llvm_profile_vmo == ZX_HANDLE_INVALID) { /* Get information about the current process. */ zx_info_handle_basic_t Info; zx_status_t Status = _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info, sizeof(Info), NULL, NULL); if (Status != ZX_OK) return -1; /* Create VMO to hold the profile data. */ Status = _zx_vmo_create(0, 0, &__llvm_profile_vmo); if (Status != ZX_OK) return -1; /* Give the VMO a name including our process KOID so it's easy to spot. */ char VmoName[ZX_MAX_NAME_LEN]; snprintf(VmoName, sizeof(VmoName), "%s.%" PRIu64, ProfileSinkName, Info.koid); _zx_object_set_property(__llvm_profile_vmo, ZX_PROP_NAME, VmoName, strlen(VmoName)); /* Duplicate the handle since __sanitizer_publish_data consumes it. */ zx_handle_t Handle; Status = _zx_handle_duplicate(__llvm_profile_vmo, ZX_RIGHT_SAME_RIGHTS, &Handle); if (Status != ZX_OK) return -1; /* Publish the VMO which contains profile data to the system. */ __sanitizer_publish_data(ProfileSinkName, Handle); /* Use the dumpfile symbolizer markup element to write the name of VMO. */ lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", ProfileSinkName, VmoName); } /* Compute the total length of data to be written. */ size_t Length = 0; for (uint32_t I = 0; I < NumIOVecs; I++) Length += IOVecs[I].ElmSize * IOVecs[I].NumElm; /* Resize the VMO to ensure there's sufficient space for the data. */ zx_status_t Status = _zx_vmo_set_size(__llvm_profile_vmo, __llvm_profile_offset + Length); if (Status != ZX_OK) return -1; /* Copy the data into VMO. */ for (uint32_t I = 0; I < NumIOVecs; I++) { size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm; if (IOVecs[I].Data) { Status = _zx_vmo_write(__llvm_profile_vmo, IOVecs[I].Data, __llvm_profile_offset, Length); if (Status != ZX_OK) return -1; } __llvm_profile_offset += Length; } return 0; }
__NO_SAFESTACK thrd_t __allocate_thread( size_t requested_guard_size, size_t requested_stack_size, const char* thread_name, char vmo_name[ZX_MAX_NAME_LEN]) { thread_allocation_acquire(); const size_t guard_size = requested_guard_size == 0 ? 0 : round_up_to_page(requested_guard_size); const size_t stack_size = round_up_to_page(requested_stack_size); const size_t tls_size = libc.tls_size; const size_t tcb_size = round_up_to_page(tls_size); const size_t vmo_size = tcb_size + stack_size * 2; zx_handle_t vmo; zx_status_t status = _zx_vmo_create(vmo_size, 0, &vmo); if (status != ZX_OK) { __thread_allocation_release(); return NULL; } struct iovec tcb, tcb_region; if (map_block(_zx_vmar_root_self(), vmo, 0, tcb_size, PAGE_SIZE, PAGE_SIZE, &tcb, &tcb_region)) { __thread_allocation_release(); _zx_handle_close(vmo); return NULL; } thrd_t td = copy_tls(tcb.iov_base, tcb.iov_len); // At this point all our access to global TLS state is done, so we // can allow dlopen again. __thread_allocation_release(); // For the initial thread, it's too early to call snprintf because // it's not __NO_SAFESTACK. if (vmo_name != NULL) { // For other threads, try to give the VMO a name that includes // the thrd_t value (and the TLS size if that fits too), but // don't use a truncated value since that would be confusing to // interpret. if (snprintf(vmo_name, ZX_MAX_NAME_LEN, "%s:%p/TLS=%#zx", thread_name, td, tls_size) < ZX_MAX_NAME_LEN || snprintf(vmo_name, ZX_MAX_NAME_LEN, "%s:%p", thread_name, td) < ZX_MAX_NAME_LEN) thread_name = vmo_name; } _zx_object_set_property(vmo, ZX_PROP_NAME, thread_name, strlen(thread_name)); if (map_block(_zx_vmar_root_self(), vmo, tcb_size, stack_size, guard_size, 0, &td->safe_stack, &td->safe_stack_region)) { _zx_vmar_unmap(_zx_vmar_root_self(), (uintptr_t)tcb_region.iov_base, tcb_region.iov_len); _zx_handle_close(vmo); return NULL; } if (map_block(_zx_vmar_root_self(), vmo, tcb_size + stack_size, stack_size, guard_size, 0, &td->unsafe_stack, &td->unsafe_stack_region)) { _zx_vmar_unmap(_zx_vmar_root_self(), (uintptr_t)td->safe_stack_region.iov_base, td->safe_stack_region.iov_len); _zx_vmar_unmap(_zx_vmar_root_self(), (uintptr_t)tcb_region.iov_base, tcb_region.iov_len); _zx_handle_close(vmo); return NULL; } _zx_handle_close(vmo); td->tcb_region = tcb_region; td->locale = &libc.global_locale; td->head.tp = (uintptr_t)pthread_to_tp(td); td->abi.stack_guard = __stack_chk_guard; td->abi.unsafe_sp = (uintptr_t)td->unsafe_stack.iov_base + td->unsafe_stack.iov_len; return td; }