/**
 * Initialize a new CFE reader using the provided memory object. Any resources held by a successfully initialized
 * instance must be freed via plcrash_async_cfe_reader_free();
 *
 * @param reader The reader instance to initialize.
 * @param mobj The memory object containing CFE data at the start address. This instance must survive for the lifetime
 * of the reader.
 * @param cpu_type The target architecture of the CFE data, encoded as a Mach-O CPU type. Interpreting CFE data is
 * architecture-specific, and Apple has not defined encodings for all supported architectures.
 */
apigee_plcrash_error_t apigee_plcrash_async_cfe_reader_init (apigee_plcrash_async_cfe_reader_t *reader, apigee_plcrash_async_mobject_t *mobj, cpu_type_t cputype) {
    reader->mobj = mobj;
    reader->cpu_type = cputype;

    /* Determine the expected encoding */
    switch (cputype) {
        case CPU_TYPE_X86:
        case CPU_TYPE_X86_64:
            reader->byteorder = apigee_plcrash_async_byteorder_little_endian();
            break;

        default:
            PLCF_DEBUG("Unsupported CPU type: %" PRIu32, cputype);
            return APIGEE_PLCRASH_ENOTSUP;
    }

    /* Fetch and verify the header */
    pl_vm_address_t base_addr = apigee_plcrash_async_mobject_base_address(mobj);
    struct unwind_info_section_header *header = apigee_plcrash_async_mobject_remap_address(mobj, base_addr, 0, sizeof(*header));
    if (header == NULL) {
        PLCF_DEBUG("Could not map the unwind info section header");
        return APIGEE_PLCRASH_EINVAL;
    }

    /* Verify the format version */
    uint32_t version = reader->byteorder->swap32(header->version);
    if (version != 1) {
        PLCF_DEBUG("Unsupported CFE version: %" PRIu32, version);
        return APIGEE_PLCRASH_ENOTSUP;
    }

    reader->header = *header;
    return APIGEE_PLCRASH_ESUCCESS;
}
/**
 * Initialize the DWARF opcode stream.
 *
 * @param mobj The memory object from which the expression opcodes will be read. This object must
 * remain valid for the lifetime of the opstream instance.
 * @param byteorder The byte order of the data referenced by @a mobj and @a thread_state.
 * @param address The task-relative address within @a mobj at which the opcodes will be fetched.
 * @param offset An offset to be applied to @a address.
 * @param length The total length of the opcodes readable at @a address + @a offset.
 */
plcrash_error_t dwarf_opstream::init (plcrash_async_mobject_t *mobj,
                                      const plcrash_async_byteorder_t *byteorder,
                                      pl_vm_address_t address,
                                      pl_vm_off_t offset,
                                      pl_vm_size_t length)
{
    _mobj = mobj;
    _byteorder = byteorder;
    
    /* Calculate the start and end addresses */
    if (!plcrash_async_address_apply_offset(address, offset, &_start)) {
        PLCF_DEBUG("Offset overflows base address");
        return PLCRASH_EINVAL;
    }
    
    if (length > PL_VM_OFF_MAX || !plcrash_async_address_apply_offset(_start, length, &_end)) {
        PLCF_DEBUG("Length overflows base address");
        return PLCRASH_EINVAL;
    }
    
    /* Map in the full instruction range */
    _instr = plcrash_async_mobject_remap_address(mobj, _start, 0, _end-_start);
    _instr_max = (uint8_t *)_instr + (_end - _start);
    _p = _instr;
    
    if (_instr == NULL) {
        PLCF_DEBUG("Could not map the DWARF instructions; range falls outside mapped pages");
        return PLCRASH_EINVAL;
    }
    
    return PLCRASH_ESUCCESS;
}
/**
 * Remove a specific entry node from the list.
 *
 * @param deleted_node The node to be removed.
 *
 * @warning This method is not async safe.
 */
template <typename V> void async_list<V>::nasync_remove_node (node *deleted_node) {
    /* Lock the list from other writers. */
    OSSpinLockLock(&_write_lock); {
        /* Find the record. */
        node *item = _head;
        while (item != NULL) {
            if (item == deleted_node)
                break;
            
            item = item->_next;
        }
        
        /* If not found, nothing to do */
        if (item == NULL) {
            OSSpinLockUnlock(&_write_lock);
            return;
        }
        
        /*
         * Atomically make the item unreachable by readers.
         *
         * This serves as a synchronization point -- after the CAS, the item is no longer reachable via the list.
         */
        if (item == _head) {
            if (!OSAtomicCompareAndSwapPtrBarrier(item, item->_next, (void **) &_head)) {
                PLCF_DEBUG("Failed to remove image list head despite holding lock");
            }
        } else {
            /* There MUST be a non-NULL prev pointer, as this is not HEAD. */
            if (!OSAtomicCompareAndSwapPtrBarrier(item, item->_next, (void **) &item->_prev->_next)) {
                PLCF_DEBUG("Failed to remove image list item despite holding lock");
            }
        }
        
        /* Now that the item is unreachable, update the prev/tail pointers. These are never accessed without a lock,
         * and need not be updated atomically. */
        if (item->_next != NULL) {
            /* Item is not the tail (otherwise next would be NULL), so simply update the next item's prev pointer. */
            item->_next->_prev = item->_prev;
        } else {
            /* Item is the tail (next is NULL). Simply update the tail record. */
            _tail = item->_prev;
        }
        
        /* If a reader is active, place the node on the free list. The item is unreachable here when readers
         * aren't active, so if we have a 0 refcount, we can safely delete the item, and be sure that no
         * reader holds a reference to it. */
        if (_refcount > 0) {
            item->_prev = NULL;
            item->_next = _free;
            
            if (_free != NULL)
                _free->_prev = item;
            _free = item;
        } else {
            delete item;
        }
    } OSSpinLockUnlock(&_write_lock);
}
// PLFrameWalker API
plframe_error_t plframe_cursor_thread_init (plframe_cursor_t *cursor, thread_t thread, plcrash_async_image_list_t *image_list) {
    kern_return_t kr;
    ucontext_t *uap;
    
    /*
        Note: This code has been left untouched when implementing libunwind(3)
        usage, as 1) Apple's implementation of libunwind on x86_64 doesn't
        handle floating-point and vector registers, 2) libunwind's general API
        doesn't provide access to some of the other information retrieved here.
    */
    
    /* Perform basic initialization */
    uap = &cursor->_uap_data;
    uap->uc_mcontext = (void *) &cursor->_mcontext_data;
    
    /* Zero the signal mask */
    sigemptyset(&uap->uc_sigmask);
    
    /* Fetch the thread states */
    mach_msg_type_number_t state_count;
    
    /* Sanity check */
    assert(sizeof(cursor->_mcontext_data.__ss) == sizeof(x86_thread_state64_t));
    assert(sizeof(cursor->_mcontext_data.__es) == sizeof(x86_exception_state64_t));
    assert(sizeof(cursor->_mcontext_data.__fs) == sizeof(x86_float_state64_t));
    
    // thread state
    state_count = x86_THREAD_STATE64_COUNT;
    kr = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t) &cursor->_mcontext_data.__ss, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 thread state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    // floating point state
    state_count = x86_FLOAT_STATE64_COUNT;
    kr = thread_get_state(thread, x86_FLOAT_STATE64, (thread_state_t) &cursor->_mcontext_data.__fs, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 float state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    // exception state
    state_count = x86_EXCEPTION_STATE64_COUNT;
    kr = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t) &cursor->_mcontext_data.__es, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 exception state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    /* Perform standard initialization and return result */
    return plframe_cursor_init(cursor, uap, image_list);
}
PLCR_CPP_BEGIN_ASYNC_NS

/**
 * @internal
 * @ingroup plcrash_async_dwarf_private
 * @defgroup plcrash_async_dwarf_private_opstream Generic DWARF Opcode Stream
 * @{
 */

/**
 * Initialize the DWARF opcode stream.
 *
 * @param mobj The memory object from which the expression opcodes will be read. This object must
 * remain valid for the lifetime of the opstream instance.
 * @param byteorder The byte order of the data referenced by @a mobj and @a thread_state.
 * @param address The task-relative address within @a mobj at which the opcodes will be fetched.
 * @param offset An offset to be applied to @a address.
 * @param length The total length of the opcodes readable at @a address + @a offset.
 */
plcrash_error_t dwarf_opstream::init (plcrash_async_mobject_t *mobj,
                                      const plcrash_async_byteorder_t *byteorder,
                                      pl_vm_address_t address,
                                      pl_vm_off_t offset,
                                      pl_vm_size_t length)
{
    _mobj = mobj;
    _byteorder = byteorder;
    
    /* Calculate the start and end addresses */
    if (!plcrash_async_address_apply_offset(address, offset, &_start)) {
        PLCF_DEBUG("Offset overflows base address");
        return PLCRASH_EINVAL;
    }
    
    if (length > PL_VM_OFF_MAX || !plcrash_async_address_apply_offset(_start, length, &_end)) {
        PLCF_DEBUG("Length overflows base address");
        return PLCRASH_EINVAL;
    }
    
    /* Map in the full instruction range */
    _instr = plcrash_async_mobject_remap_address(mobj, _start, 0, _end-_start);
    _instr_max = (uint8_t *)_instr + (_end - _start);
    _p = _instr;
    
    if (_instr == NULL) {
        PLCF_DEBUG("Could not map the DWARF instructions; range falls outside mapped pages");
        return PLCRASH_EINVAL;
    }
    
    return PLCRASH_ESUCCESS;
}
/**
 * Prepend a new entry value to the list
 *
 * @param value The value to be prepended.
 *
 * @warning This method is not async safe.
 */
template <typename V> void async_list<V>::nasync_prepend (V value) {
    /* Lock the list from other writers. */
    OSSpinLockLock(&_write_lock); {
        /* Construct the new entry, or recycle an existing one. */
        node *new_node;
        if (_free != NULL) {
            /* Fetch a node from the free list */
            new_node = _free;
            new_node->reset(value);
            
            /* Update the free list */
            _free = _free->_next;
        } else {
            new_node = new node(value);
        }
        
        /* Issue a memory barrier to ensure a consistent view of the value. */
        OSMemoryBarrier();
        
        /* If this is the first entry, initialize the list. */
        if (_tail == NULL) {
            
            /* Update the list tail. This need not be done atomically, as tail is never accessed by a lockless reader. */
            _tail = new_node;
            
            /* Atomically update the list head; this will be iterated upon by lockless readers. */
            if (!OSAtomicCompareAndSwapPtrBarrier(NULL, new_node, (void **) (&_head))) {
                /* Should never occur */
                PLCF_DEBUG("An async image head was set with tail == NULL despite holding lock.");
            }
        }
        
        /* Otherwise, prepend to the head of the list */
        else {
            new_node->_next = _head;
            new_node->_prev = NULL;
            
            /* Update the prev pointers. This is never accessed without a lock, so no additional synchronization
             * is required here. */
            _head->_prev = new_node;

            /* Issue a memory barrier to ensure a consistent view of the nodes. */
            OSMemoryBarrier();

            /* Atomically slot the new record into place; this may be iterated on by a lockless reader. */
            if (!OSAtomicCompareAndSwapPtrBarrier(new_node->_next, new_node, (void **) (&_head))) {
                PLCF_DEBUG("Failed to prepend to image list despite holding lock");
            }
        }
    } OSSpinLockUnlock(&_write_lock);
}
// PLFrameWalker API
plframe_error_t plframe_cursor_thread_init (plframe_cursor_t *cursor, thread_t thread) {
    kern_return_t kr;
    ucontext_t *uap;
    
    /* Perform basic initialization */
    uap = &cursor->_uap_data;
    uap->uc_mcontext = (void *) &cursor->_mcontext_data;
    
    /* Zero the signal mask */
    sigemptyset(&uap->uc_sigmask);
    
    /* Fetch the thread states */
    mach_msg_type_number_t state_count;
    
    /* Sanity check */
    assert(sizeof(cursor->_mcontext_data.__ss) == sizeof(x86_thread_state64_t));
    assert(sizeof(cursor->_mcontext_data.__es) == sizeof(x86_exception_state64_t));
    assert(sizeof(cursor->_mcontext_data.__fs) == sizeof(x86_float_state64_t));
    
    // thread state
    state_count = x86_THREAD_STATE64_COUNT;
    kr = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t) &cursor->_mcontext_data.__ss, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 thread state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    // floating point state
    state_count = x86_FLOAT_STATE64_COUNT;
    kr = thread_get_state(thread, x86_FLOAT_STATE64, (thread_state_t) &cursor->_mcontext_data.__fs, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 float state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    // exception state
    state_count = x86_EXCEPTION_STATE64_COUNT;
    kr = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t) &cursor->_mcontext_data.__es, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86-64 exception state failed with mach error: %d", kr);
        return PLFRAME_INTERNAL;
    }
    
    /* Perform standard initialization */
    plframe_cursor_init(cursor, uap);
    
    return PLFRAME_ESUCCESS;
}
示例#8
0
/**
 * 
 * Write len bytes to fd, looping until all bytes are written
 * or an error occurs. For the local file system, only one call to write()
 * should be necessary
 */
static ssize_t writen (int fd, const void *data, size_t len) {
    const void *p;
    size_t left;
    ssize_t written = 0;
    
    /* Loop until all bytes are written */
    p = data;
    left = len;
    while (left > 0) {
        if ((written = write(fd, p, left)) <= 0) {
            if (errno == EINTR) {
                // Try again
                written = 0;
            } else {
                PLCF_DEBUG("Error occured writing to crash log: %s", strerror(errno));
                return -1;
            }
        }
        
        left -= written;
        p += written;
    }
    
    return written;
}
/**
 * @internal
 * 32-bit implementation of plcrash_async_thread_state_get_regname()
 */
static char const *plcrash_async_thread_state_get_regname_32 (apigee_plcrash_regnum_t regnum) {
    /* All word-sized registers */
    switch (regnum) {
        case APIGEE_PLCRASH_X86_EAX:
            return "eax";
            
        case APIGEE_PLCRASH_X86_EDX:
            return "edx";
            
        case APIGEE_PLCRASH_X86_ECX:
            return "ecx";
            
        case APIGEE_PLCRASH_X86_EBX:
            return "ebx";
            
        case APIGEE_PLCRASH_X86_EBP:
            return "ebp";
            
        case APIGEE_PLCRASH_X86_ESI:
            return "esi";
            
        case APIGEE_PLCRASH_X86_EDI:
            return "edi";
            
        case APIGEE_PLCRASH_X86_ESP:
            return "esp";
            
        case APIGEE_PLCRASH_X86_EIP:
            return "eip";
            
        case APIGEE_PLCRASH_X86_EFLAGS:
            return "eflags";
            
        case APIGEE_PLCRASH_X86_TRAPNO:
            return "trapno";
            
        case APIGEE_PLCRASH_X86_CS:
            return "cs";
            
        case APIGEE_PLCRASH_X86_DS:
            return "ds";
            
        case APIGEE_PLCRASH_X86_ES:
            return "es";
            
        case APIGEE_PLCRASH_X86_FS:
            return "fs";
            
        case APIGEE_PLCRASH_X86_GS:
            return "gs";
            
        default:
            // Unsupported register
            break;
    }
    
    /* Unsupported register is an implementation error (checked in unit tests) */
    PLCF_DEBUG("Missing register name for register id: %d", regnum);
    abort();
}
/**
 * Free the memory mapping.
 *
 * @note Unlike most free() functions in this API, this function is async-safe.
 */
void plcrash_async_mobject_free (plcrash_async_mobject_t *mobj) {
    if (mobj->vm_address == 0x0)
        return;
    
    kern_return_t kt;
    if ((kt = vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length)) != KERN_SUCCESS)
        PLCF_DEBUG("vm_deallocate() failure: %d", kt);
}
/**
 * Iterate over the available Mach-O LC_CMD entries.
 *
 * @param image The image to iterate
 * @param previous The previously returned LC_CMD address value, or 0 to iterate from the first LC_CMD.
 * @return Returns the address of the next load_command on success, or NULL on failure.
 *
 * @note A returned command is gauranteed to be readable, and fully within mapped address space. If the command
 * command can not be verified to have available MAX(sizeof(struct load_command), cmd->cmdsize) bytes, NULL will be
 * returned.
 */
void *apigee_plcrash_async_macho_next_command (apigee_plcrash_async_macho_t *image, void *previous) {
    struct load_command *cmd;

    /* On the first iteration, determine the LC_CMD offset from the Mach-O header. */
    if (previous == NULL) {
        /* Sanity check */
        if (image->byteorder->swap32(image->header.sizeofcmds) < sizeof(struct load_command)) {
            PLCF_DEBUG("Mach-O sizeofcmds is less than sizeof(struct load_command) in %s", image->name);
            return NULL;
        }

        return apigee_plcrash_async_mobject_remap_address(&image->load_cmds, image->header_addr, image->header_size, sizeof(struct load_command));
    }

    /* We need the size from the previous load command; first, verify the pointer. */
    cmd = previous;
    if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) cmd, 0, sizeof(*cmd))) {
        PLCF_DEBUG("Failed to map LC_CMD at address %p in: %s", cmd, image->name);
        return NULL;
    }

    /* Advance to the next command */
    uint32_t cmdsize = image->byteorder->swap32(cmd->cmdsize);
    void *next = ((uint8_t *)previous) + cmdsize;

    /* Avoid walking off the end of the cmd buffer */
    if ((uintptr_t)next >= image->load_cmds.address + image->load_cmds.length)
        return NULL;

    /* Verify that it holds at least load_command */
    if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) next, 0, sizeof(struct load_command))) {
        PLCF_DEBUG("Failed to map LC_CMD at address %p in: %s", cmd, image->name);
        return NULL;
    }

    /* Verify the actual size. */
    cmd = next;
    if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) next, 0, image->byteorder->swap32(cmd->cmdsize))) {
        PLCF_DEBUG("Failed to map LC_CMD at address %p in: %s", cmd, image->name);
        return NULL;
    }

    return next;
}
示例#12
0
/**
 * Close the backing file descriptor.
 */
bool plcrash_async_file_close (plcrash_async_file_t *file) {
    /* Flush any pending data */
    if (!plcrash_async_file_flush(file))
        return false;

    /* Close the file descriptor */
    if (close(file->fd) != 0) {
        PLCF_DEBUG("Error closing file: %s", strerror(errno));
        return false;
    }

    return true;
}
/**
 * Find the best-guess matching symbol name for a given @a pc address, using heuristics based on symbol and @a pc address locality.
 *
 * @param image The Mach-O image to search for this symbol.
 * @param strategy The look-up strategy to be used to find the symbol.
 * @param cache The task-specific cache to use for lookups.
 * @param pc The program counter (instruction pointer) address for which a symbol will be searched.
 * @param callback The callback to be issued when a matching symbol is found. If no symbol is found, the provided function will not be called, and an error other than PLCRASH_ESUCCESS
 * will be returned.
 * @param ctx The context to be provided to @a callback.
 *
 * @return Calls @a callback and returns PLCRASH_ESUCCESS if a matching symbol is found. Otherwise, returns one of the other defined plcrash_error_t error values.
 */
plcrash_error_t plcrash_async_find_symbol (plcrash_async_macho_t *image,
                                           plcrash_async_symbol_strategy_t strategy,
                                           plcrash_async_symbol_cache_t *cache,
                                           pl_vm_address_t pc,
                                           plcrash_async_found_symbol_cb callback,
                                           void *ctx)
{
    struct symbol_lookup_ctx lookup_ctx;
    plcrash_error_t machoErr = PLCRASH_ENOTFOUND;
    plcrash_error_t objcErr = PLCRASH_ENOTFOUND;

    lookup_ctx.symbol_address = 0x0;
    lookup_ctx.found = false;

    /* Perform lookups; our callbacks will only update the lookup_ctx if they find a better match than the
     * previously run callbacks */
    if (strategy & PLCRASH_ASYNC_SYMBOL_STRATEGY_SYMBOL_TABLE)
        machoErr = plcrash_async_macho_find_symbol_by_pc(image, pc, macho_symbol_callback, &lookup_ctx);
    
    if (strategy & PLCRASH_ASYNC_SYMBOL_STRATEGY_OBJC)
        objcErr = plcrash_async_objc_find_method(image, &cache->objc_cache, pc, objc_symbol_callback, &lookup_ctx);

    if (machoErr != PLCRASH_ESUCCESS && objcErr != PLCRASH_ESUCCESS) {
        PLCF_DEBUG("Could not find symbol for PC %" PRIx64 " image %p", (uint64_t) pc, image);
        PLCF_DEBUG("pl_async_macho_find_symbol error %d, pl_async_objc_find_method error %d", machoErr, objcErr);
        return machoErr;
    }

    /* Even if a symbol was found above, our callbacks could have errored out, in which case they would have
     * logged a debug message, not set 'found' */
    if (!lookup_ctx.found) {
        PLCF_DEBUG("Unexpected error occured in symbol lookup callbacks for PC %" PRIx64 "image %p; returning error", (uint64_t) pc, image);
        return PLCRASH_EINTERNAL;
    }

    callback(lookup_ctx.symbol_address, lookup_ctx.buffer, ctx);
    return PLCRASH_ESUCCESS;
}
/*
 * Loop over all function pointers in unwind_tester_list
 * and call unwind_tester() on each one.  If it returns
 * false, then that test failed.
 */
bool unwind_test_harness (void) {
    for (struct unwind_test_case *tc = unwind_test_cases; tc->test_list != NULL; tc++) {
        global_harness_state.test_case = tc;
        for (void **tests = tc->test_list; *tests != NULL; tests++) {
            int ret;
            if ((ret = unwind_tester(*tests, &tc->expected_sp)) != 0) {
                PLCF_DEBUG("Tester returned error %d for %p", ret, *tests);
                __builtin_trap();
            }
        }
    }
    
	return true;
}
/**
 * Attempt to locate a symbol address and name for @a pc within @a image. This is performed using best-guess heuristics, and may
 * be incorrect.
 *
 * @param image The Mach-O image to search for @a pc
 * @param pc The PC value within the target process for which symbol information should be found.
 * @param symbol_cb A callback to be called if the symbol is found.
 * @param context Context to be passed to @a found_symbol.
 *
 * @return Returns PLCRASH_ESUCCESS if the symbol is found. If the symbol is not found, @a found_symbol will not be called.
 *
 * @todo Migrate this API to use the new non-callback based plcrash_async_macho_symtab_reader support for symbol (and symbol name)
 * reading.
 */
apigee_plcrash_error_t apigee_plcrash_async_macho_find_symbol_by_pc (apigee_plcrash_async_macho_t *image, pl_vm_address_t pc, apigee_pl_async_macho_found_symbol_cb symbol_cb, void *context) {
    apigee_plcrash_error_t retval;
    
    /* Initialize a symbol table reader */
    apigee_plcrash_async_macho_symtab_reader_t reader;
    retval = apigee_plcrash_async_macho_symtab_reader_init(&reader, image);
    if (retval != APIGEE_PLCRASH_ESUCCESS)
        return retval;

    /* Compute the on-disk PC. */
    pl_vm_address_t slide_pc = pc - image->vmaddr_slide;

    /* Walk the symbol table. */
    apigee_plcrash_async_macho_symtab_entry_t found_symbol;
    bool did_find_symbol;

    if (reader.symtab_global != NULL && reader.symtab_local != NULL) {
        /* dysymtab is available; use it to constrain our symbol search to the global and local sections of the symbol table. */
        apigee_plcrash_async_macho_find_best_symbol(&reader, slide_pc, reader.symtab_global, reader.nsyms_global, &found_symbol, NULL, &did_find_symbol);
        apigee_plcrash_async_macho_find_best_symbol(&reader, slide_pc, reader.symtab_local, reader.nsyms_local, &found_symbol, &found_symbol, &did_find_symbol);
    } else {
        /* If dysymtab is not available, search all symbols */
        apigee_plcrash_async_macho_find_best_symbol(&reader, slide_pc, reader.symtab, reader.nsyms, &found_symbol, NULL, &did_find_symbol);
    }

    /* No symbol found. */
    if (!did_find_symbol) {
        retval = APIGEE_PLCRASH_ENOTFOUND;
        goto cleanup;
    }

    /* Symbol found! */
    const char *sym_name = apigee_plcrash_async_macho_symtab_reader_symbol_name(&reader, found_symbol.n_strx);
    if (sym_name == NULL) {
        PLCF_DEBUG("Failed to read symbol name\n");
        retval = APIGEE_PLCRASH_EINVAL;
        goto cleanup;
    }

    /* Inform our caller */
    symbol_cb(found_symbol.normalized_value + image->vmaddr_slide, sym_name, context);

    // fall through to cleanup
    retval = APIGEE_PLCRASH_ESUCCESS;

cleanup:
    apigee_plcrash_async_macho_symtab_reader_free(&reader);
    return retval;
}
/**
 * Free the memory mapping.
 *
 * @note Unlike most free() functions in this API, this function is async-safe.
 */
void plcrash_async_mobject_free (plcrash_async_mobject_t *mobj) {
    kern_return_t kt;
    
#ifdef PL_HAVE_MACH_VM
    kt = mach_vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length);
#else
    kt = vm_deallocate(mach_task_self(), mobj->vm_address, mobj->vm_length);
#endif
    
    if (kt != KERN_SUCCESS)
        PLCF_DEBUG("vm_deallocate() failure: %d", kt);

    /* Decrement our task refcount */
    mach_port_mod_refs(mach_task_self(), mobj->task, MACH_PORT_RIGHT_SEND, -1);
}
示例#17
0
/**
 * Flush all buffered bytes from the file buffer.
 */
bool plcrash_async_file_flush (plcrash_async_file_t *file) {
    /* Anything to do? */
    if (file->buflen == 0)
        return true;
    
    /* Write remaining */
    if (plcrash_async_writen(file->fd, file->buffer, file->buflen) < 0) {
        PLCF_DEBUG("Error occured writing to crash log: %s", strerror(errno));
        return false;
    }
    
    file->buflen = 0;
    
    return true;
}
示例#18
0
/**
 * Write all bytes from @a data to the file buffer. Returns true on success,
 * or false if an error occurs.
 */
bool plcrash_async_file_write (plcrash_async_file_t *file, const void *data, size_t len) {
    /* Check and update output limit */
    if (file->limit_bytes != 0 && len + file->total_bytes > file->limit_bytes) {
        return false;
    } else if (file->limit_bytes != 0) {
        file->total_bytes += len;
    }

    /* Check if the buffer will fill */
    if (file->buflen + len > sizeof(file->buffer)) {
        /* Flush the buffer */
        if (plcrash_async_writen(file->fd, file->buffer, file->buflen) < 0) {
            PLCF_DEBUG("Error occured writing to crash log: %s", strerror(errno));
            return false;
        }
        
        file->buflen = 0;
    }
    
    /* Check if the new data fits within the buffer, if so, buffer it */
    if (len + file->buflen <= sizeof(file->buffer)) {
        plcrash_async_memcpy(file->buffer + file->buflen, data, len);
        file->buflen += len;
        
        return true;
        
    } else {
        /* Won't fit in the buffer, just write it */
        if (plcrash_async_writen(file->fd, data, len) < 0) {
            PLCF_DEBUG("Error occured writing to crash log: %s", strerror(errno));
            return false;
        }
        
        return true;
    } 
}
PLCR_CPP_BEGIN_ASYNC_NS

/*
 * Shared new() implementation.
 */
static void *perform_new (size_t size, AsyncAllocator *allocator) {
    /* Try to allocate space for the instance. */
    void *buffer;
    plcrash_error_t err = allocator->alloc(&buffer, size);
    if (err != PLCRASH_ESUCCESS) {
        PLCF_DEBUG("async-safe new() allocation failed!");
        return NULL;
    }
    
    /* Return the buffer to be used for instance construction */
    return buffer;
}
/**
 * Find the first LC_CMD matching the given @a cmd type.
 *
 * @param image The image to search.
 * @param expectedCommand The LC_CMD type to find.
 *
 * @return Returns the address of the matching load_command on success, or 0 on failure.
 *
 * @note A returned command is gauranteed to be readable, and fully within mapped address space. If the command
 * command can not be verified to have available MAX(sizeof(struct load_command), cmd->cmdsize) bytes, NULL will be
 * returned.
 */
void *apigee_plcrash_async_macho_find_command (apigee_plcrash_async_macho_t *image, uint32_t expectedCommand) {
    struct load_command *cmd = NULL;

    /* Iterate commands until we either find a match, or reach the end */
    while ((cmd = apigee_plcrash_async_macho_next_command(image, cmd)) != NULL) {
        /* Read the load command type */
        if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) cmd, 0, sizeof(*cmd))) {
            PLCF_DEBUG("Failed to map LC_CMD at address %p in: %s", cmd, image->name);
            return NULL;
        }

        /* Return a match */
        if (image->byteorder->swap32(cmd->cmd) == expectedCommand) {
            return cmd;
        }
    }
    
    /* No match found */
    return NULL;
}
// called by test function
// we unwind through the test function
// and resume at caller (unwind_tester)
void uwind_to_main () {
    /* Invoke our handler with our current thread state; we use this state to try to roll back the tests
     * and verify that the expected registers are restored. */
    if (plcrash_async_thread_state_current(unwind_current_state, NULL) != PLCRASH_ESUCCESS) {
        __builtin_trap();
    }

    /* Now use libunwind to verify that our test data can be unwound sucessfully. This will unwind the current
     * thread to the unwind_tester, and we'll never return from this function */
#ifdef LIBUNWIND_VERIFICATION
    if (global_harness_state.test_case->skip_libunwind_verification)
        return;

    unw_cursor_t cursor;
	unw_context_t uc;
	
	unw_getcontext(&uc);
	unw_init_local(&cursor, &uc);
    
    /* Walk the frames until we hit the test function. Unlike our unwinder, the first frame is implicitly
     * available -- a step isn't required, and so we skip one call to unw_step(). */
    for (uint32_t i = 1; i < global_harness_state.test_case->intermediate_frames; i++) {
        int ret;
        if ((ret = unw_step(&cursor)) <= 0) {
            PLCF_DEBUG("Step %" PRIu32 " failed: %d", i, ret);
            __builtin_trap();
        }
    }


    /* Once inside the test implementation, resume */
    if (unw_step(&cursor) > 0) {
        unw_resume(&cursor);
    }

	/* This should be unreachable */
	__builtin_trap();
#endif
}
/**
 * Initialize the @a thread_state using thread state fetched from the given mach @a thread. If the thread is not
 * suspended, the fetched state may be inconsistent.
 *
 * All registers will be marked as available.
 *
 * @param thread_state The thread state to be initialized.
 * @param thread The thread from which to fetch thread state.
 *
 * @return Returns PLFRAME_ESUCCESS on success, or standard plframe_error_t code if an error occurs.
 */
plcrash_error_t plcrash_async_thread_state_mach_thread_init (plcrash_async_thread_state_t *thread_state, thread_t thread) {
    mach_msg_type_number_t state_count;
    kern_return_t kr;
    
#if defined(PLCRASH_ASYNC_THREAD_ARM_SUPPORT) && defined(PLCRASH_ASYNC_THREAD_ARM_UNIFIED_SUPPORT)
    /* Fetch the thread state */
    state_count = ARM_UNIFIED_THREAD_STATE_COUNT;
    kr = thread_get_state(thread, ARM_UNIFIED_THREAD_STATE, (thread_state_t) &thread_state->arm_state.thread, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of ARM thread state failed with Mach error: %d", kr);
        return PLCRASH_EINTERNAL;
    }
    
    /* Platform meta-data */
    thread_state->stack_direction = PLCRASH_ASYNC_THREAD_STACK_DIRECTION_DOWN;
    if (thread_state->arm_state.thread.ash.flavor == ARM_THREAD_STATE64) {
        thread_state->greg_size = 8;
    } else {
        thread_state->greg_size = 4;
    }

#elif defined(PLCRASH_ASYNC_THREAD_ARM_SUPPORT) && !defined(PLCRASH_ASYNC_THREAD_ARM_UNIFIED_SUPPORT)
    /* Legacy non-unified ARM32 thread state */
    // Sanity check to assert that the state32 and legacy state structures are identical.
    PLCF_ASSERT_STATIC(ARM_STATE_COUNT, ARM_THREAD_STATE32_COUNT == ARM_THREAD_STATE_COUNT);
    PLCF_ASSERT_STATIC(ARM_STATE_SIZE, sizeof(arm_thread_state_t) == sizeof(arm_thread_state32_t));
    
    state_count = ARM_THREAD_STATE_COUNT;
    kr = thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t) &thread_state->arm_state.thread.ts_32, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of ARM thread state failed with Mach error: %d", kr);
        return PLCRASH_EINTERNAL;
    }
    
    /* Configure the state header */
    thread_state->arm_state.thread.ash.flavor = ARM_THREAD_STATE32;
    thread_state->arm_state.thread.ash.count = ARM_THREAD_STATE32_COUNT;
    
    /* Platform meta-data */
    thread_state->stack_direction = PLCRASH_ASYNC_THREAD_STACK_DIRECTION_DOWN;
    thread_state->greg_size = 4;
    
#elif defined(PLCRASH_ASYNC_THREAD_X86_SUPPORT)
    /* Fetch the thread state */
    state_count = x86_THREAD_STATE_COUNT;
    kr = thread_get_state(thread, x86_THREAD_STATE, (thread_state_t) &thread_state->x86_state.thread, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86 thread state failed with Mach error: %d", kr);
        return PLCRASH_EINTERNAL;
    }
    
    /* Fetch the exception state */
    state_count = x86_EXCEPTION_STATE_COUNT;
    kr = thread_get_state(thread, x86_EXCEPTION_STATE, (thread_state_t) &thread_state->x86_state.exception, &state_count);
    if (kr != KERN_SUCCESS) {
        PLCF_DEBUG("Fetch of x86 exception state failed with Mach error: %d", kr);
        return PLCRASH_EINTERNAL;
    }
    
    /* Platform meta-data */
    thread_state->stack_direction = PLCRASH_ASYNC_THREAD_STACK_DIRECTION_DOWN;
    if (thread_state->x86_state.thread.tsh.flavor == x86_THREAD_STATE64) {
        thread_state->greg_size = 8;
    } else {
        thread_state->greg_size = 4;
    }

#else
#error Add platform support
#endif

    /* Mark all registers as available */
    memset(&thread_state->valid_regs, 0xFF, sizeof(thread_state->valid_regs));

    return PLCRASH_ESUCCESS;
}
/**
 * Initialize a new Mach-O binary image parser.
 *
 * @param image The image structure to be initialized.
 * @param name The file name or path for the Mach-O image.
 * @param header The task-local address of the image's Mach-O header.
 *
 * @return PLCRASH_ESUCCESS on success. PLCRASH_EINVAL will be returned in the Mach-O file can not be parsed,
 * or PLCRASH_EINTERNAL if an error occurs reading from the target task.
 *
 * @warning This method is not async safe.
 */
apigee_plcrash_error_t apigee_plcrash_nasync_macho_init (apigee_plcrash_async_macho_t *image, mach_port_t task, const char *name, pl_vm_address_t header) {
    apigee_plcrash_error_t ret;

    /* Defaults checked in the  error cleanup handler */
    bool mobj_initialized = false;
    bool task_initialized = false;
    image->name = NULL;

    /* Basic initialization */
    image->task = task;
    image->header_addr = header;
    image->name = strdup(name);

    mach_port_mod_refs(mach_task_self(), image->task, MACH_PORT_RIGHT_SEND, 1);
    task_initialized = true;

    /* Read in the Mach-O header */
    kern_return_t kt;
    if ((kt = apigee_plcrash_async_read_addr(image->task, image->header_addr, &image->header, sizeof(image->header))) != KERN_SUCCESS) {
        /* NOTE: The image struct must be fully initialized before returning here, as otherwise our _free() function
         * will crash */
        PLCF_DEBUG("Failed to read Mach-O header from 0x%" PRIx64 " for image %s, kern_error=%d", (uint64_t) image->header_addr, name, kt);
        ret = APIGEE_PLCRASH_EINTERNAL;
        goto error;
    }
    
    /* Set the default byte order*/
    image->byteorder = &apigee_plcrash_async_byteorder_direct;

    /* Parse the Mach-O magic identifier. */
    switch (image->header.magic) {
        case MH_CIGAM:
            // Enable byte swapping
            image->byteorder = &apigee_plcrash_async_byteorder_swapped;
            // Fall-through

        case MH_MAGIC:
            image->m64 = false;
            break;            
            
        case MH_CIGAM_64:
            // Enable byte swapping
            image->byteorder = &apigee_plcrash_async_byteorder_swapped;
            // Fall-through
            
        case MH_MAGIC_64:
            image->m64 = true;
            break;

        case FAT_CIGAM:
        case FAT_MAGIC:
            PLCF_DEBUG("%s called with an unsupported universal Mach-O archive in: %s", __func__, image->name);
            return APIGEE_PLCRASH_EINVAL;
            break;

        default:
            PLCF_DEBUG("Unknown Mach-O magic: 0x%" PRIx32 " in: %s", image->header.magic, image->name);
            return APIGEE_PLCRASH_EINVAL;
    }

    /* Save the header size */
    if (image->m64) {
        image->header_size = sizeof(struct mach_header_64);
    } else {
        image->header_size = sizeof(struct mach_header);
    }
    
    /* Map in header + load commands */
    pl_vm_size_t cmd_len = image->byteorder->swap32(image->header.sizeofcmds);
    pl_vm_size_t cmd_offset = image->header_addr + image->header_size;
    image->ncmds = image->byteorder->swap32(image->header.ncmds);

    ret = apigee_plcrash_async_mobject_init(&image->load_cmds, image->task, cmd_offset, cmd_len, true);
    if (ret != APIGEE_PLCRASH_ESUCCESS) {
        PLCF_DEBUG("Failed to map Mach-O load commands in image %s", image->name);
        goto error;
    } else {
        mobj_initialized = true;
    }

    /* Now that the image has been sufficiently initialized, determine the __TEXT segment size */
    void *cmdptr = NULL;
    image->text_size = 0x0;
    bool found_text_seg = false;
    while ((cmdptr = apigee_plcrash_async_macho_next_command_type(image, cmdptr, image->m64 ? LC_SEGMENT_64 : LC_SEGMENT)) != 0) {
        if (image->m64) {
            struct segment_command_64 *segment = cmdptr;
            if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) segment, 0, sizeof(*segment))) {
                PLCF_DEBUG("LC_SEGMENT command was too short");
                ret = APIGEE_PLCRASH_EINVAL;
                goto error;
            }
            
            if (apigee_plcrash_async_strncmp(segment->segname, SEG_TEXT, sizeof(segment->segname)) != 0)
                continue;

            image->text_size = image->byteorder->swap64(segment->vmsize);
            image->text_vmaddr = image->byteorder->swap64(segment->vmaddr);
            found_text_seg = true;
            break;
        } else {
            struct segment_command *segment = cmdptr;
            if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, (uintptr_t) segment, 0, sizeof(*segment))) {
                PLCF_DEBUG("LC_SEGMENT command was too short");
                ret = APIGEE_PLCRASH_EINVAL;
                goto error;
            }
            
            if (apigee_plcrash_async_strncmp(segment->segname, SEG_TEXT, sizeof(segment->segname)) != 0)
                continue;
            
            image->text_size = image->byteorder->swap32(segment->vmsize);
            image->text_vmaddr = image->byteorder->swap32(segment->vmaddr);
            found_text_seg = true;
            break;
        }
    }

    if (!found_text_seg) {
        PLCF_DEBUG("Could not find __TEXT segment!");
        ret = APIGEE_PLCRASH_EINVAL;
        goto error;
    }

    /* Compute the vmaddr slide */
    if (image->text_vmaddr < header) {
        image->vmaddr_slide = header - image->text_vmaddr;
    } else if (image->text_vmaddr > header) {
        image->vmaddr_slide = -((pl_vm_off_t) (image->text_vmaddr - header));
    } else {
        image->vmaddr_slide = 0;
    }

    return APIGEE_PLCRASH_ESUCCESS;
    
error:
    if (mobj_initialized)
        apigee_plcrash_async_mobject_free(&image->load_cmds);
    
    if (image->name != NULL)
        free(image->name);
    
    if (task_initialized)
        mach_port_mod_refs(mach_task_self(), image->task, MACH_PORT_RIGHT_SEND, -1);

    return ret;
}
/**
 * Find and map a named section within a named segment, initializing @a mobj.
 * It is the caller's responsibility to dealloc @a mobj after a successful
 * initialization
 *
 * @param image The image to search for @a segname.
 * @param segname The name of the segment to search.
 * @param sectname The name of the section to map.
 * @param mobj The mobject to be initialized with a mapping of the section's data. It is the caller's responsibility to dealloc @a mobj after
 * a successful initialization.
 *
 * @return Returns PLCRASH_ESUCCESS on success, PLCRASH_ENOTFOUND if the section is not found, or an error result on failure.
 */
apigee_plcrash_error_t apigee_plcrash_async_macho_map_section (apigee_plcrash_async_macho_t *image, const char *segname, const char *sectname, apigee_plcrash_async_mobject_t *mobj) {
    struct segment_command *cmd_32;
    struct segment_command_64 *cmd_64;
    
    void *segment =  apigee_plcrash_async_macho_find_segment_cmd(image, segname);
    if (segment == NULL)
        return APIGEE_PLCRASH_ENOTFOUND;

    cmd_32 = segment;
    cmd_64 = segment;
    
    uint32_t nsects;
    uintptr_t cursor = (uintptr_t) segment;

    if (image->m64) {
        nsects = image->byteorder->swap32(cmd_64->nsects);
        cursor += sizeof(*cmd_64);
    } else {
        nsects = image->byteorder->swap32(cmd_32->nsects);
        cursor += sizeof(*cmd_32);
    }

    for (uint32_t i = 0; i < nsects; i++) {        
        struct section *sect_32 = NULL;
        struct section_64 *sect_64 = NULL;
       
        if (image->m64) {
            if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, cursor, 0, sizeof(*sect_64))) {
                PLCF_DEBUG("Section table entry outside of expected range; searching for (%s,%s)", segname, sectname);
                return APIGEE_PLCRASH_EINVAL;
            }
            
            sect_64 = (void *) cursor;
            cursor += sizeof(*sect_64);
        } else {
            if (!apigee_plcrash_async_mobject_verify_local_pointer(&image->load_cmds, cursor, 0, sizeof(*sect_32))) {
                PLCF_DEBUG("Section table entry outside of expected range; searching for (%s,%s)", segname, sectname);
                return APIGEE_PLCRASH_EINVAL;
            }
            
            sect_32 = (void *) cursor;
            cursor += sizeof(*sect_32);
        }
        
        const char *image_sectname = image->m64 ? sect_64->sectname : sect_32->sectname;
        if (apigee_plcrash_async_strncmp(sectname, image_sectname, sizeof(sect_64->sectname)) == 0) {
            /* Calculate the in-memory address and size */
            pl_vm_address_t sectaddr;
            pl_vm_size_t sectsize;
            if (image->m64) {
                sectaddr = image->byteorder->swap64(sect_64->addr) + image->vmaddr_slide;
                sectsize = image->byteorder->swap64(sect_64->size);
            } else {
                sectaddr = image->byteorder->swap32(sect_32->addr) + image->vmaddr_slide;
                sectsize = image->byteorder->swap32(sect_32->size);
            }
            
            
            /* Perform and return the mapping */
            return apigee_plcrash_async_mobject_init(mobj, image->task, sectaddr, sectsize, true);
        }
    }
    
    return APIGEE_PLCRASH_ENOTFOUND;
}
/**
 * Initialize a new decoded CFE entry using the provided encoded CFE data. Any resources held by a successfully
 * initialized instance must be freed via plcrash_async_cfe_entry_free();
 *
 * @param entry The entry instance to initialize.
 * @param cpu_type The target architecture of the CFE data, encoded as a Mach-O CPU type. Interpreting CFE data is
 * architecture-specific, and Apple has not defined encodings for all supported architectures.
 * @param encoding The CFE entry data, in the hosts' native byte order.
 *
 * @internal
 * This code supports sparse register lists for the EBP_FRAME and RBP_FRAME modes. It's unclear as to whether these
 * actually ever occur in the wild, but they are supported by Apple's unwinddump tool.
 */
apigee_plcrash_error_t apigee_plcrash_async_cfe_entry_init (apigee_plcrash_async_cfe_entry_t *entry, cpu_type_t cpu_type, uint32_t encoding) {
    apigee_plcrash_error_t ret;
    
    /* Target-neutral initialization */
    entry->cpu_type = cpu_type;
    entry->stack_adjust = 0;

    /* Perform target-specific decoding */
    if (cpu_type == CPU_TYPE_X86) {
        uint32_t mode = encoding & UNWIND_X86_MODE_MASK;
        switch (mode) {
            case UNWIND_X86_MODE_EBP_FRAME: {
                entry->type = APIGEE_PLCRASH_ASYNC_CFE_ENTRY_TYPE_FRAME_PTR;

                /* Extract the register frame offset */
                entry->stack_offset = -(EXTRACT_BITS(encoding, UNWIND_X86_EBP_FRAME_OFFSET) * sizeof(uint32_t));

                /* Extract the register values. They're stored as a bitfield of of 3 bit values. We support
                 * sparse entries, but terminate the loop if no further entries remain. */
                uint32_t regs = EXTRACT_BITS(encoding, UNWIND_X86_EBP_FRAME_REGISTERS);
                entry->register_count = 0;
                for (uint32_t i = 0; i < PLCRASH_ASYNC_CFE_SAVED_REGISTER_MAX; i++) {
                    /* Check for completion */
                    uint32_t remaining = regs >> (3 * i);
                    if (remaining == 0)
                        break;

                    /* Map to the correct PLCrashReporter register name */
                    uint32_t reg = remaining & 0x7;
                    ret = apigee_plcrash_async_map_register_name(reg, &entry->register_list[i], cpu_type);
                    if (ret != APIGEE_PLCRASH_ESUCCESS) {
                        PLCF_DEBUG("Failed to map register value of %" PRIx32, reg);
                        return ret;
                    }

                    /* Update the register count */
                    entry->register_count++;
                }
                
                return APIGEE_PLCRASH_ESUCCESS;
            }

            case UNWIND_X86_MODE_STACK_IMMD:
            case UNWIND_X86_MODE_STACK_IND: {
                /* These two types are identical except for the interpretation of the stack offset and adjustment values */
                if (mode == UNWIND_X86_MODE_STACK_IMMD) {
                    entry->type = APIGEE_PLCRASH_ASYNC_CFE_ENTRY_TYPE_FRAMELESS_IMMD;
                    entry->stack_offset = EXTRACT_BITS(encoding, UNWIND_X86_FRAMELESS_STACK_SIZE) * sizeof(uint32_t);
                } else {
                    entry->type = APIGEE_PLCRASH_ASYNC_CFE_ENTRY_TYPE_FRAMELESS_INDIRECT;
                    entry->stack_offset = EXTRACT_BITS(encoding, UNWIND_X86_FRAMELESS_STACK_SIZE);
                    entry->stack_adjust = EXTRACT_BITS(encoding, UNWIND_X86_FRAMELESS_STACK_ADJUST) * sizeof(uint32_t);
                }

                /* Extract the register values */
                entry->register_count = EXTRACT_BITS(encoding, UNWIND_X86_FRAMELESS_STACK_REG_COUNT);
                uint32_t encoded_regs = EXTRACT_BITS(encoding, UNWIND_X86_FRAMELESS_STACK_REG_PERMUTATION);
                uint32_t decoded_regs[PLCRASH_ASYNC_CFE_SAVED_REGISTER_MAX];
                
                apigee_plcrash_async_cfe_register_decode(encoded_regs, entry->register_count, decoded_regs);
                
                /* Map to the correct PLCrashReporter register names */
                for (uint32_t i = 0; i < entry->register_count; i++) {
                    ret = apigee_plcrash_async_map_register_name(decoded_regs[i], &entry->register_list[i], cpu_type);
                    if (ret != APIGEE_PLCRASH_ESUCCESS) {
                        PLCF_DEBUG("Failed to map register value of %" PRIx32, entry->register_list[i]);
                        return ret;
                    }
                }

                return APIGEE_PLCRASH_ESUCCESS;
            }

            case UNWIND_X86_MODE_DWARF:
                entry->type = APIGEE_PLCRASH_ASYNC_CFE_ENTRY_TYPE_DWARF;

                /* Extract the register frame offset */
                entry->stack_offset = EXTRACT_BITS(encoding, UNWIND_X86_DWARF_SECTION_OFFSET);
                entry->register_count = 0;

                return APIGEE_PLCRASH_ESUCCESS;

            case 0:
                /* Handle a NULL encoding. This interpretation is derived from Apple's actual implementation; the correct interpretation of
                 * a 0x0 value is not defined in what documentation exists. */
                entry->type = APIGEE_PLCRASH_ASYNC_CFE_ENTRY_TYPE_NONE;
                entry->stack_offset = 0;
                entry->register_count = 0;
                return APIGEE_PLCRASH_ESUCCESS;
                
            default:
                PLCF_DEBUG("Unexpected entry mode of %" PRIx32, mode);
                return APIGEE_PLCRASH_ENOTSUP;
        }
        
        // Unreachable
        __builtin_trap();
        return APIGEE_PLCRASH_EINTERNAL;

    } else if (cpu_type == CPU_TYPE_X86_64) {
// PLFrameWalker API
const char *plframe_get_regname (plframe_regnum_t regnum) {
    switch (regnum) {
        case PLFRAME_X86_64_RAX:
            return "rax";

        case PLFRAME_X86_64_RBX:
            return "rbx";
            
        case PLFRAME_X86_64_RCX:
            return "rcx";
            
        case PLFRAME_X86_64_RDX:
            return "rdx";
            
        case PLFRAME_X86_64_RDI:
            return "rdi";
            
        case PLFRAME_X86_64_RSI:
            return "rsi";
            
        case PLFRAME_X86_64_RBP:
            return "rbp";
            
        case PLFRAME_X86_64_RSP:
            return "rsp";
            
        case PLFRAME_X86_64_R10:
            return "r10";
            
        case PLFRAME_X86_64_R11:
            return "r11";
            
        case PLFRAME_X86_64_R12:
            return "r12";
            
        case PLFRAME_X86_64_R13:
            return "r13";
            
        case PLFRAME_X86_64_R14:    
            return "r14";
            
        case PLFRAME_X86_64_R15:
            return "r15";
            
        case PLFRAME_X86_64_RIP:
            return "rip";
            
        case PLFRAME_X86_64_RFLAGS:
            return "rflags";
            
        case PLFRAME_X86_64_CS:
            return "cs";
            
        case PLFRAME_X86_64_FS:
            return "fs";
            
        case PLFRAME_X86_64_GS:
            return "gs";
            
        default:
            // Unsupported register
            break;
    }
    
    /* Unsupported register is an implementation error (checked in unit tests) */
    PLCF_DEBUG("Missing register name for register id: %d", regnum);
    abort();
}
/**
 * Initialize a new symbol table reader, mapping the LINKEDIT segment from @a image into the current process.
 *
 * @param reader The reader to be initialized.
 * @param image The image from which the symbol table will be mapped.
 *
 * @return On success, returns PLCRASH_ESUCCESS. On failure, one of the plcrash_error_t error values will be returned, and no
 * mapping will be performed.
 */
apigee_plcrash_error_t apigee_plcrash_async_macho_symtab_reader_init (apigee_plcrash_async_macho_symtab_reader_t *reader, apigee_plcrash_async_macho_t *image) {
    apigee_plcrash_error_t retval;

    /* Fetch the symtab commands, if available. */
    struct symtab_command *symtab_cmd = apigee_plcrash_async_macho_find_command(image, LC_SYMTAB);
    struct dysymtab_command *dysymtab_cmd = apigee_plcrash_async_macho_find_command(image, LC_DYSYMTAB);

    /* The symtab command is required */
    if (symtab_cmd == NULL) {
        PLCF_DEBUG("could not find LC_SYMTAB load command");
        return APIGEE_PLCRASH_ENOTFOUND;
    }
    
    /* Map in the __LINKEDIT segment, which includes the symbol and string tables */
    apigee_plcrash_error_t err = apigee_plcrash_async_macho_map_segment(image, "__LINKEDIT", &reader->linkedit);
    if (err != APIGEE_PLCRASH_ESUCCESS) {
        PLCF_DEBUG("plcrash_async_mobject_init() failure: %d in %s", err, image->name);
        return APIGEE_PLCRASH_EINTERNAL;
    }
    
    /* Determine the string and symbol table sizes. */
    uint32_t nsyms = image->byteorder->swap32(symtab_cmd->nsyms);
    size_t nlist_struct_size = image->m64 ? sizeof(struct nlist_64) : sizeof(struct nlist);
    size_t nlist_table_size = nsyms * nlist_struct_size;
    
    size_t string_size = image->byteorder->swap32(symtab_cmd->strsize);
    
    /* Fetch pointers to the symbol and string tables, and verify their size values */
    void *nlist_table;
    char *string_table;
    
    nlist_table = apigee_plcrash_async_mobject_remap_address(&reader->linkedit.mobj, reader->linkedit.mobj.task_address, (image->byteorder->swap32(symtab_cmd->symoff) - reader->linkedit.fileoff), nlist_table_size);
    if (nlist_table == NULL) {
        PLCF_DEBUG("plcrash_async_mobject_remap_address(mobj, %" PRIx64 ", %" PRIx64") returned NULL mapping __LINKEDIT.symoff in %s",
                   (uint64_t) reader->linkedit.mobj.address + image->byteorder->swap32(symtab_cmd->symoff), (uint64_t) nlist_table_size, image->name);
        retval = APIGEE_PLCRASH_EINTERNAL;
        goto cleanup;
    }
    
    string_table = apigee_plcrash_async_mobject_remap_address(&reader->linkedit.mobj, reader->linkedit.mobj.task_address, (image->byteorder->swap32(symtab_cmd->stroff) - reader->linkedit.fileoff), string_size);
    if (string_table == NULL) {
        PLCF_DEBUG("plcrash_async_mobject_remap_address(mobj, %" PRIx64 ", %" PRIx64") returned NULL mapping __LINKEDIT.stroff in %s",
                   (uint64_t) reader->linkedit.mobj.address + image->byteorder->swap32(symtab_cmd->stroff), (uint64_t) string_size, image->name);
        retval = APIGEE_PLCRASH_EINTERNAL;
        goto cleanup;
    }

    /* Initialize common elements. */
    reader->image = image;
    reader->string_table = string_table;
    reader->string_table_size = string_size;
    reader->symtab = nlist_table;
    reader->nsyms = nsyms;

    /* Initialize the local/global table pointers, if available */
    if (dysymtab_cmd != NULL) {
        /* dysymtab is available; use it to constrain our symbol search to the global and local sections of the symbol table. */
        
        uint32_t idx_syms_global = image->byteorder->swap32(dysymtab_cmd->iextdefsym);
        uint32_t idx_syms_local = image->byteorder->swap32(dysymtab_cmd->ilocalsym);
        
        uint32_t nsyms_global = image->byteorder->swap32(dysymtab_cmd->nextdefsym);
        uint32_t nsyms_local = image->byteorder->swap32(dysymtab_cmd->nlocalsym);
        
        /* Sanity check the symbol offsets to ensure they're within our known-valid ranges */
        if (idx_syms_global + nsyms_global > nsyms || idx_syms_local + nsyms_local > nsyms) {
            PLCF_DEBUG("iextdefsym=%" PRIx32 ", ilocalsym=%" PRIx32 " out of range nsym=%" PRIx32, idx_syms_global+nsyms_global, idx_syms_local+nsyms_local, nsyms);
            retval = APIGEE_PLCRASH_EINVAL;
            goto cleanup;
        }

        /* Initialize reader state */
        reader->nsyms_global = nsyms_global;
        reader->nsyms_local = nsyms_local;

        if (image->m64) {
            struct nlist_64 *n64 = nlist_table;
            reader->symtab_global = (pl_nlist_common *) (n64 + idx_syms_global);
            reader->symtab_local = (pl_nlist_common *) (n64 + idx_syms_local);
        } else {
            struct nlist *n32 = nlist_table;
            reader->symtab_global = (pl_nlist_common *) (n32 + idx_syms_global);
            reader->symtab_local = (pl_nlist_common *) (n32 + idx_syms_local);
        }        
    }

    return APIGEE_PLCRASH_ESUCCESS;
    
cleanup:
    apigee_plcrash_async_macho_mapped_segment_free(&reader->linkedit);
    return retval;
}
/**
 * Fetch the entry corresponding to @a index.
 *
 * @param reader The reader from which @a table was mapped.
 * @param symtab The symbol table to read.
 * @param index The index of the entry to return.
 *
 * @warning The implementation implements no bounds checking on @a index, and it is the caller's responsibility to ensure
 * that they do not read an invalid entry.
 */
apigee_plcrash_async_macho_symtab_entry_t apigee_plcrash_async_macho_symtab_reader_read (apigee_plcrash_async_macho_symtab_reader_t *reader, void *symtab, uint32_t index) {
    const apigee_plcrash_async_byteorder_t *byteorder = reader->image->byteorder;

    /* nlist_64 and nlist are identical other than the trailing address field, so we use
     * a union to share a common implementation of symbol lookup. The following asserts
     * provide a sanity-check of that assumption, in the case where this code is moved
     * to a new platform ABI. */
    {
#define pl_m_sizeof(type, field) sizeof(((type *)NULL)->field)
        
        PLCF_ASSERT(__offsetof(struct nlist_64, n_type) == __offsetof(struct nlist, n_type));
        PLCF_ASSERT(pl_m_sizeof(struct nlist_64, n_type) == pl_m_sizeof(struct nlist, n_type));
        
        PLCF_ASSERT(__offsetof(struct nlist_64, n_un.n_strx) == __offsetof(struct nlist, n_un.n_strx));
        PLCF_ASSERT(pl_m_sizeof(struct nlist_64, n_un.n_strx) == pl_m_sizeof(struct nlist, n_un.n_strx));
        
        PLCF_ASSERT(__offsetof(struct nlist_64, n_value) == __offsetof(struct nlist, n_value));
        
#undef pl_m_sizeof
    }

#define pl_sym_value(image, nl) (image->m64 ? image->byteorder->swap64((nl)->n64.n_value) : image->byteorder->swap32((nl)->n32.n_value))

    /* Perform 32-bit/64-bit dependent aliased pointer math. */
    pl_nlist_common *symbol;
    if (reader->image->m64) {
        symbol = (pl_nlist_common *) &(((struct nlist_64 *) symtab)[index]);
    } else {
        symbol = (pl_nlist_common *) &(((struct nlist *) symtab)[index]);
    }
    
    apigee_plcrash_async_macho_symtab_entry_t entry = {
        .n_strx = byteorder->swap32(symbol->n32.n_un.n_strx),
        .n_type = symbol->n32.n_type,
        .n_sect = symbol->n32.n_sect,
        .n_desc = byteorder->swap16(symbol->n32.n_desc),
        .n_value = pl_sym_value(reader->image, symbol)
    };
    
    entry.normalized_value = entry.n_value;
    
    /* Normalize the symbol address. We have to set the low-order bit ourselves for ARM THUMB functions. */
    if (entry.n_desc & N_ARM_THUMB_DEF)
        entry.normalized_value = (entry.n_value|1);
    else
        entry.normalized_value = entry.n_value;
    
#undef pl_sym_value
    
    return entry;
}

/**
 * Given a string table offset for @a reader, returns the pointer to the validated NULL terminated string, or returns
 * NULL if the string does not fall within the reader's mapped string table.
 *
 * @param reader The reader containing a mapped string table.
 * @param n_strx The index within the @a reader string table to a symbol name.
 */
const char *apigee_plcrash_async_macho_symtab_reader_symbol_name (apigee_plcrash_async_macho_symtab_reader_t *reader, uint32_t n_strx) {
    /* 
     * It's possible, though unlikely, that the n_strx index value is invalid. To handle this,
     * we walk the string until \0 is hit, verifying that it can be found in its entirety within
     *
     * TODO: Evaluate effeciency of per-byte calling of plcrash_async_mobject_verify_local_pointer(). We should
     * probably validate whole pages at a time instead.
     */
    const char *sym_name = reader->string_table + n_strx;
    const char *p = sym_name;
    do {
        if (!apigee_plcrash_async_mobject_verify_local_pointer(&reader->linkedit.mobj, (uintptr_t) p, 0, 1)) {
            PLCF_DEBUG("End of mobject reached while walking string\n");
            return NULL;
        }
        p++;
    } while (*p != '\0');

    return sym_name;
}

/**
 * Free all mapped reader resources.
 *
 * @note Unlike most free() functions in this API, this function is async-safe.
 */
void apigee_plcrash_async_macho_symtab_reader_free (apigee_plcrash_async_macho_symtab_reader_t *reader) {
    apigee_plcrash_async_macho_mapped_segment_free(&reader->linkedit);
}
/**
 * @internal
 * 64-bit implementation of plcrash_async_thread_state_get_regname()
 */
static const char *plcrash_async_thread_state_get_regname_64 (apigee_plcrash_regnum_t regnum) {
    switch (regnum) {
        case APIGEE_PLCRASH_X86_64_RAX:
            return "rax";
            
        case APIGEE_PLCRASH_X86_64_RBX:
            return "rbx";
            
        case APIGEE_PLCRASH_X86_64_RCX:
            return "rcx";
            
        case APIGEE_PLCRASH_X86_64_RDX:
            return "rdx";
            
        case APIGEE_PLCRASH_X86_64_RDI:
            return "rdi";
            
        case APIGEE_PLCRASH_X86_64_RSI:
            return "rsi";
            
        case APIGEE_PLCRASH_X86_64_RBP:
            return "rbp";
            
        case APIGEE_PLCRASH_X86_64_RSP:
            return "rsp";
            
        case APIGEE_PLCRASH_X86_64_R8:
            return "r8";
            
        case APIGEE_PLCRASH_X86_64_R9:
            return "r9";
            
        case APIGEE_PLCRASH_X86_64_R10:
            return "r10";
            
        case APIGEE_PLCRASH_X86_64_R11:
            return "r11";
            
        case APIGEE_PLCRASH_X86_64_R12:
            return "r12";
            
        case APIGEE_PLCRASH_X86_64_R13:
            return "r13";
            
        case APIGEE_PLCRASH_X86_64_R14:
            return "r14";
            
        case APIGEE_PLCRASH_X86_64_R15:
            return "r15";
            
        case APIGEE_PLCRASH_X86_64_RIP:
            return "rip";
            
        case APIGEE_PLCRASH_X86_64_RFLAGS:
            return "rflags";
            
        case APIGEE_PLCRASH_X86_64_CS:
            return "cs";
            
        case APIGEE_PLCRASH_X86_64_FS:
            return "fs";
            
        case APIGEE_PLCRASH_X86_64_GS:
            return "gs";
            
        default:
            // Unsupported register
            break;
    }
    
    /* Unsupported register is an implementation error (checked in unit tests) */
    PLCF_DEBUG("Missing register name for register id: %d", regnum);
    abort();
}
/**
 * Attempt to fetch next frame using compact frame unwinding data from @a image_list.
 *
 * @param task The task containing the target frame stack.
 * @param image_list The list of images loaded in the target @a task.
 * @param current_frame The current stack frame.
 * @param previous_frame The previous stack frame, or NULL if this is the first frame.
 * @param next_frame The new frame to be initialized.
 *
 * @return Returns PLFRAME_ESUCCESS on success, PLFRAME_ENOFRAME is no additional frames are available, or a standard plframe_error_t code if an error occurs.
 */
plframe_error_t plframe_cursor_read_compact_unwind (task_t task,
                                                    plcrash_async_image_list_t *image_list,
                                                    const plframe_stackframe_t *current_frame,
                                                    const plframe_stackframe_t *previous_frame,
                                                    plframe_stackframe_t *next_frame)
{
    plframe_error_t result;
    plcrash_error_t err;

    /* Fetch the IP. It should always be available */
    if (!plcrash_async_thread_state_has_reg(&current_frame->thread_state, PLCRASH_REG_IP)) {
        PLCF_DEBUG("Frame is missing a valid IP register, skipping compact unwind encoding");
        return PLFRAME_EBADFRAME;
    }
    plcrash_greg_t pc = plcrash_async_thread_state_get_reg(&current_frame->thread_state, PLCRASH_REG_IP);
    
    /* Find the corresponding image */
    plcrash_async_macho_t *image = plcrash_async_image_containing_address(image_list, pc);
    if (image == NULL) {
        PLCF_DEBUG("Could not find a loaded image for the current frame pc: 0x%" PRIx64, (uint64_t) pc);
        return PLFRAME_ENOTSUP;
    }
    
    /* Map the unwind section */
    plcrash_async_mobject_t unwind_mobj;
    err = plcrash_async_macho_map_section(image, SEG_TEXT, "__unwind_info", &unwind_mobj);
    if (err != PLCRASH_ESUCCESS) {
        if (err != PLCRASH_ENOTFOUND)
            PLCF_DEBUG("Could not map the compact unwind info section for image %s: %d", image->name, err);
        return PLFRAME_ENOTSUP;
    }

    /* Initialize the CFE reader. */
    cpu_type_t cputype = image->byteorder->swap32(image->header.cputype);
    plcrash_async_cfe_reader_t reader;

    err = plcrash_async_cfe_reader_init(&reader, &unwind_mobj, cputype);
    if (err != PLCRASH_ESUCCESS) {
        PLCF_DEBUG("Could not parse the compact unwind info section for image '%s': %d", image->name, err);
        return PLFRAME_EINVAL;
    }

    /* Find the encoding entry (if any) and free the reader */
    pl_vm_address_t function_base;
    uint32_t encoding;
    err = plcrash_async_cfe_reader_find_pc(&reader, pc - image->header_addr, &function_base, &encoding);
    plcrash_async_cfe_reader_free(&reader);
    if (err != PLCRASH_ESUCCESS) {
        PLCF_DEBUG("Did not find CFE entry for PC 0x%" PRIx64 ": %d", (uint64_t) pc, err);
        return PLFRAME_ENOTSUP;
    }
    
    /* Decode the entry */
    plcrash_async_cfe_entry_t entry;
    err = plcrash_async_cfe_entry_init(&entry, cputype, encoding);
    if (err != PLCRASH_ESUCCESS) {
        PLCF_DEBUG("Could not decode CFE encoding 0x%" PRIx32 " for PC 0x%" PRIx64 ": %d", encoding, (uint64_t) pc, err);
        return PLFRAME_ENOTSUP;
    }

    /* Skip entries for which no unwind information is unavailable */
    if (plcrash_async_cfe_entry_type(&entry) == PLCRASH_ASYNC_CFE_ENTRY_TYPE_NONE) {

        plcrash_async_cfe_entry_free(&entry);
        return PLFRAME_ENOFRAME;
    }
    
    /* Compute the in-core function address */
    pl_vm_address_t function_address;
    if (!plcrash_async_address_apply_offset(image->header_addr, function_base, &function_address)) {
        PLCF_DEBUG("The provided function base (0x%" PRIx64 ") plus header address (0x%" PRIx64 ") will overflow pl_vm_address_t",
                   (uint64_t) function_base, (uint64_t) image->header_addr);

        plcrash_async_cfe_entry_free(&entry);
        return PLFRAME_EINVAL;
    }

    /* Apply the frame delta -- this may fail. */
    if ((err = plcrash_async_cfe_entry_apply(task, function_address, &current_frame->thread_state, &entry, &next_frame->thread_state)) == PLCRASH_ESUCCESS) {
        result = PLFRAME_ESUCCESS;
    } else {
        PLCF_DEBUG("Failed to apply CFE encoding 0x%" PRIx32 " for PC 0x%" PRIx64 ": %d", encoding, (uint64_t) pc, err);
        result = PLFRAME_ENOFRAME;
    }

    plcrash_async_cfe_entry_free(&entry);
    return result;
}