Exemplo n.º 1
0
void deliver_signal (siginfo_t * info, PAL_CONTEXT * context)
{
    shim_tcb_t * tcb = SHIM_GET_TLS();
    struct shim_thread * cur_thread = (struct shim_thread *) tcb->tp;
    int sig = info->si_signo;

    __disable_preempt(tcb);

    struct shim_signal * signal = __alloca(sizeof(struct shim_signal));
    /* save in signal */
    memset(signal, 0, sizeof(struct shim_signal));
    __store_info(info, signal);
    __store_context(tcb, context, signal);

    if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1)
        goto delay;

    if (__sigismember(&cur_thread->signal_mask, sig))
        goto delay;

    __handle_signal(tcb, sig, &signal->context);
    __handle_one_signal(tcb, sig, signal);
    goto out;

delay:
    {
        if (!(signal = remalloc(signal,sizeof(struct shim_signal))))
            goto out;

        struct shim_signal ** signal_log = allocate_signal_log(cur_thread, sig);

        if (!signal_log) {
            sys_printf("signal queue is full (TID = %u, SIG = %d)\n",
                       tcb->tid, sig);
            free(signal);
            goto out;
        }

        *signal_log = signal;
    }

out:
    __enable_preempt(tcb);
}
Exemplo n.º 2
0
static void resume_upcall (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
{
    if (IS_INTERNAL_TID(get_cur_tid()))
        goto ret_exception;

    shim_tcb_t * tcb = SHIM_GET_TLS();
    assert(tcb && tcb->tp);

    __disable_preempt(tcb);

    if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1) {
        tcb->context.preempt |= SIGNAL_DELAYED;
        __enable_preempt(tcb);
        goto ret_exception;
    }

    __handle_signal(tcb, 0, NULL);
    __enable_preempt(tcb);

ret_exception:
    DkExceptionReturn(event);
}
Exemplo n.º 3
0
void handle_signal (bool delayed_only)
{
    shim_tcb_t * tcb = SHIM_GET_TLS();
    struct shim_thread * thread = (struct shim_thread *) tcb->tp;
    assert(tcb && tcb->tp);

    /* Fast path */
    if (!thread->has_signal.counter)
        return;

    __disable_preempt(tcb);

    if ((tcb->context.preempt & ~SIGNAL_DELAYED) > 1) {
        tcb->context.preempt |= SIGNAL_DELAYED;
        goto out;
    }

    if (delayed_only && !(tcb->context.preempt & SIGNAL_DELAYED))
        goto out;

    __handle_signal(tcb, 0, NULL);
out:
    __enable_preempt(tcb);
}
Exemplo n.º 4
0
int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
                         const char ** envp)
{
    BEGIN_PROFILE_INTERVAL();

    struct shim_thread * cur_thread = get_cur_thread();
    int ret;

    if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
        return ret;

    SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);

    void * tcb = malloc(sizeof(__libc_tcb_t));
    if (!tcb)
        return -ENOMEM;

    populate_tls(tcb, false);
    __disable_preempt(&((__libc_tcb_t *) tcb)->shim_tcb); // Temporarily disable preemption
                                                          // during execve().
    debug("set tcb to %p\n", tcb);

    put_handle(cur_thread->exec);
    get_handle(hdl);
    cur_thread->exec = hdl;

    old_stack_top = cur_thread->stack_top;
    old_stack     = cur_thread->stack;
    old_stack_red = cur_thread->stack_red;
    cur_thread->stack_top = NULL;
    cur_thread->stack     = NULL;
    cur_thread->stack_red = NULL;

    initial_envp = NULL;
    new_argc = 0;
    for (const char ** a = argv ; *a ; a++, new_argc++);

    new_argcp = &new_argc;
    if ((ret = init_stack(argv, envp, &new_argcp, &new_argp,
                          REQUIRED_ELF_AUXV, &new_auxp)) < 0)
        return ret;

    SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);

    SWITCH_STACK(new_argp);
    cur_thread = get_cur_thread();

    UPDATE_PROFILE_INTERVAL();

    DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
    DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);

    if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 ||
        bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0)
        BUG();

    remove_loaded_libraries();
    clean_link_map_list();
    SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);

    reset_brk();

    size_t count = DEFAULT_VMA_COUNT;
    struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count);

    if (!vmas)
        return -ENOMEM;

retry_dump_vmas:
    ret = dump_all_vmas(vmas, count);

    if (ret == -EOVERFLOW) {
        struct shim_vma_val * new_vmas
                = malloc(sizeof(struct shim_vma_val) * count * 2);
        if (!new_vmas) {
            free(vmas);
            return -ENOMEM;
        }
        free(vmas);
        vmas = new_vmas;
        count *= 2;
        goto retry_dump_vmas;
    }

    if (ret < 0) {
        free(vmas);
        return ret;
    }

    count = ret;
    for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) {
        /* Don't free the current stack */
        if (vma->addr == cur_thread->stack)
            continue;

        /* Free all the mapped VMAs */
        if (!(vma->flags & VMA_UNMAPPED))
            DkVirtualMemoryFree(vma->addr, vma->length);

        /* Remove the VMAs */
        bkeep_munmap(vma->addr, vma->length, vma->flags);
    }

    free_vma_val_array(vmas, count);

    SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);

    if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
        shim_terminate(ret);

    init_brk_from_executable(cur_thread->exec);
    load_elf_interp(cur_thread->exec);

    SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);

    cur_thread->robust_list = NULL;

#ifdef PROFILE
    if (ENTER_TIME)
        SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME);
#endif

    debug("execve: start execution\n");
    execute_elf_object(cur_thread->exec, new_argcp, new_argp,
                       REQUIRED_ELF_AUXV, new_auxp);

    return 0;
}