Example #1
0
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size,
                             uint32_t flags, void **retbase)
{
    errval_t err;

    struct spawninfo *si = state;

    // Increase size by space wasted on first page due to page-alignment
    size_t base_offset = BASE_PAGE_OFFSET(base);
    size += base_offset;
    base -= base_offset;
    // Page-align
    size = ROUND_UP(size, BASE_PAGE_SIZE);

    cslot_t vspace_slot = si->elfload_slot;

    // Allocate the frames
    size_t sz = 0;
    for (lpaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = si->elfload_slot++,
        };
        err = frame_create(frame, sz, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_CREATE);
        }
    }

    cslot_t spawn_vspace_slot = si->elfload_slot;
    cslot_t new_slot_count = si->elfload_slot - vspace_slot;

    // create copies of the frame capabilities for spawn vspace
    for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot = vspace_slot + copy_idx,
        };
        struct capref spawn_frame = {
            .cnode = si->segcn,
            .slot = si->elfload_slot++,
        };
        err = cap_copy(spawn_frame, frame);
        if (err_is_fail(err)) {
            // TODO: make debug printf
            printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot);
            return err_push(err, LIB_ERR_CAP_COPY);
        }
    }

    /* Map into my vspace */
    struct memobj *memobj = malloc(sizeof(struct memobj_anon));
    if (!memobj) {
        return LIB_ERR_MALLOC_FAIL;
    }
    struct vregion *vregion = malloc(sizeof(struct vregion));
    if (!vregion) {
        return LIB_ERR_MALLOC_FAIL;
    }
    // Create the objects
    err = memobj_create_anon((struct memobj_anon*)memobj, size, 0);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON);
    }
    err = vregion_map(vregion, get_current_vspace(), memobj, 0, size,
                      VREGION_FLAGS_READ_WRITE);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }
    for (lvaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = vspace_slot++,
        };
        genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset);
        err = memobj->f.fill(memobj, genvaddr, frame, sz);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }
        err = memobj->f.pagefault(memobj, vregion, offset, 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "lib_err_memobj_pagefault_handler");
            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
        }
    }

    /* Map into spawn vspace */
    struct memobj *spawn_memobj = NULL;
    struct vregion *spawn_vregion = NULL;
    err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion,
                                           &spawn_memobj,
                                           elf_to_vregion_flags(flags));
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_VSPACE_MAP);
    }
    for (lvaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = spawn_vspace_slot++,
        };
        genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset);
        err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }
        err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "lib_err_memobj_pagefault_handler");
            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
        }
    }

    genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset;
    *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr);
    return SYS_ERR_OK;
}

/**
 * \brief Load the elf image
 */
errval_t spawn_arch_load(struct spawninfo *si,
                         lvaddr_t binary, size_t binary_size,
                         genvaddr_t *entry, void** arch_info)
{
    errval_t err;

    // Reset the elfloader_slot
    si->elfload_slot = 0;
    struct capref cnode_cap = {
        .cnode = si->rootcn,
        .slot  = ROOTCN_SLOT_SEGCN,
    };
    err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SEGCN);
    }

    // TLS is NYI
    si->tls_init_base = 0;
    si->tls_init_len = si->tls_total_len = 0;

    // Load the binary
    err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry);
    if (err_is_fail(err)) {
        return err;
    }

    struct Elf32_Shdr* got_shdr =
        elf32_find_section_header_name(binary, binary_size, ".got");
    if (got_shdr)
    {
        *arch_info = (void*)got_shdr->sh_addr;
    }
    else {
        return SPAWN_ERR_LOAD;
    }

    return SYS_ERR_OK;
}

void spawn_arch_set_registers(void *arch_load_info,
                              dispatcher_handle_t handle,
                              arch_registers_state_t *enabled_area,
                              arch_registers_state_t *disabled_area)
{
    assert(arch_load_info != NULL);
    uintptr_t got_base = (uintptr_t)arch_load_info;

    struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle);
    disp_arm->got_base = got_base;

    enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
    disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;

#ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags
    enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
    disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
#endif
}
Example #2
0
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size,
                             uint32_t flags, void **retbase)
{
    errval_t err;

    struct spawninfo *si = state;

    // Increase size by space wasted on first page due to page-alignment
    size_t base_offset = BASE_PAGE_OFFSET(base);
    size += base_offset;
    base -= base_offset;
    // Page-align
    size = ROUND_UP(size, BASE_PAGE_SIZE);

    cslot_t vspace_slot = si->elfload_slot;

    // Allocate the frames
    size_t sz = 0;
    for (lpaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = si->elfload_slot++,
        };
        err = frame_create(frame, sz, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_CREATE);
        }
    }

    cslot_t spawn_vspace_slot = si->elfload_slot;
    cslot_t new_slot_count = si->elfload_slot - vspace_slot;

    // create copies of the frame capabilities for spawn vspace
    for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot = vspace_slot + copy_idx,
        };
        struct capref spawn_frame = {
            .cnode = si->segcn,
            .slot = si->elfload_slot++,
        };
        err = cap_copy(spawn_frame, frame);
        if (err_is_fail(err)) {
            // TODO: make debug printf
            printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot);
            return err_push(err, LIB_ERR_CAP_COPY);
        }
    }

    /* Map into my vspace */
    struct memobj *memobj = malloc(sizeof(struct memobj_anon));
    if (!memobj) {
        return LIB_ERR_MALLOC_FAIL;
    }
    struct vregion *vregion = malloc(sizeof(struct vregion));
    if (!vregion) {
        return LIB_ERR_MALLOC_FAIL;
    }
    // Create the objects
    err = memobj_create_anon((struct memobj_anon*)memobj, size, 0);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON);
    }
    err = vregion_map(vregion, get_current_vspace(), memobj, 0, size,
                      VREGION_FLAGS_READ_WRITE);
    if (err_is_fail(err)) {
        return err_push(err, LIB_ERR_VSPACE_MAP);
    }
    for (lvaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = vspace_slot++,
        };
        genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset);
        err = memobj->f.fill(memobj, genvaddr, frame, sz);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }
        err = memobj->f.pagefault(memobj, vregion, offset, 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "lib_err_memobj_pagefault_handler");
            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
        }
    }

    /* Map into spawn vspace */
    struct memobj *spawn_memobj = NULL;
    struct vregion *spawn_vregion = NULL;
    err = spawn_vspace_map_anon_fixed_attr(si, base, size, &spawn_vregion,
                                           &spawn_memobj,
                                           elf_to_vregion_flags(flags));
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_VSPACE_MAP);
    }
    for (lvaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref spawn_frame = {
            .cnode = si->segcn,
            .slot = spawn_vspace_slot++,
        };
        genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset);
        err = memobj->f.fill(spawn_memobj, genvaddr, spawn_frame, sz);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_MEMOBJ_FILL);
        }
        err = spawn_memobj->f.pagefault(spawn_memobj, spawn_vregion, offset, 0);
        if (err_is_fail(err)) {
            DEBUG_ERR(err, "lib_err_memobj_pagefault_handler");
            return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
        }
    }

    si->vregion[si->vregions] = vregion;
    si->base[si->vregions++] = base;

    genvaddr_t genvaddr = vregion_get_base_addr(vregion) + base_offset;
    *retbase = (void*)vspace_genvaddr_to_lvaddr(genvaddr);
    return SYS_ERR_OK;
}

/**
 * \brief Load the elf image
 */
errval_t spawn_arch_load(struct spawninfo *si,
                         lvaddr_t binary, size_t binary_size,
                         genvaddr_t *entry, void** arch_load_info)
{
    errval_t err;

    // Reset the elfloader_slot
    si->elfload_slot = 0;
    si->vregions = 0;

    struct capref cnode_cap = {
        .cnode = si->rootcn,
        .slot  = ROOTCN_SLOT_SEGCN,
    };
    // XXX: this code assumes that elf_load never needs more than 32 slots for 
    // text frame capabilities.
    err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SEGCN);
    }

    // Load the binary
    si->tls_init_base = 0;
    si->tls_init_len = si->tls_total_len = 0;
    err = elf_load_tls(EM_HOST, elf_allocate, si, binary, binary_size, entry,
                       &si->tls_init_base, &si->tls_init_len, &si->tls_total_len);
    if (err_is_fail(err)) {
        return err;
    }

    return SYS_ERR_OK;
}

void spawn_arch_set_registers(void *arch_load_info,
                              dispatcher_handle_t handle,
                              arch_registers_state_t *enabled_area,
                              arch_registers_state_t *disabled_area)
{
#if defined(__x86_64__)
    /* XXX: 1st argument to _start is the dispatcher pointer
     * see lib/crt/arch/x86_64/crt0.s */
    disabled_area->rdi = get_dispatcher_shared_generic(handle)->udisp;
#elif defined(__i386__)
    /* XXX: 1st argument to _start is the dispatcher pointer
     * see lib/crt/arch/x86_32/crt0.s */
    disabled_area->edi = get_dispatcher_shared_generic(handle)->udisp;
#endif
}
Example #3
0
/* Bach and Sorenson (1993) would be better */
static int is_perfect_power(UV n) {
  UV b, last;
  if ((n <= 3) || (n == UV_MAX)) return 0;
  if ((n & (n-1)) == 0)  return 1;          /* powers of 2    */
  last = log2floor(n-1) + 1;
#if (BITS_PER_WORD == 32) || (DBL_DIG > 19)
  if (1) {
#elif DBL_DIG == 10
  if (n < UVCONST(10000000000)) {
#elif DBL_DIG == 15
  if (n < UVCONST(1000000000000000)) {
#else
  if ( n < (UV) pow(10, DBL_DIG) ) {
#endif
    /* Simple floating point method.  Fast, but need enough mantissa. */
    b = sqrt(n)+0.5; if (b*b == n)  return 1; /* perfect square */
    for (b = 3; b < last; b = _XS_next_prime(b)) {
      UV root = pow(n, 1.0 / (double)b) + 0.5;
      if ( ((UV)(pow(root, b)+0.5)) == n)  return 1;
    }
  } else {
    /* Dietzfelbinger, algorithm 2.3.5 (without optimized exponential) */
    for (b = 2; b <= last; b++) {
      UV a = 1;
      UV c = n;
      while (c >= HALF_WORD) c = (1+c)>>1;
      while ((c-a) >= 2) {
        UV m, maxm, p, i;
        m = (a+c) >> 1;
        maxm = UV_MAX / m;
        p = m;
        for (i = 2; i <= b; i++) {
          if (p > maxm) { p = n+1; break; }
          p *= m;
        }
        if (p == n)  return 1;
        if (p < n)
          a = m;
        else
          c = m;
      }
    }
  }
  return 0;
}

static UV order(UV r, UV n, UV limit) {
  UV j;
  UV t = 1;
  for (j = 1; j <= limit; j++) {
    t = (t * n) % r;
    if (t == 1)
      break;
  }
  return j;
}

static void poly_print(UV* poly, UV r)
{
  int i;
  for (i = r-1; i >= 1; i--) {
    if (poly[i] != 0)
      printf("%lux^%d + ", poly[i], i);
  }
  if (poly[0] != 0) printf("%lu", poly[0]);
  printf("\n");
}

static void poly_mod_mul(UV* px, UV* py, UV* res, UV r, UV mod)
{
  UV i, j, pxi, pyj, rindex;

  memset(res, 0, r * sizeof(UV));
  for (i = 0; i < r; i++) {
    pxi = px[i];
    if (pxi == 0)  continue;
    for (j = 0; j < r; j++) {
      pyj = py[j];
      if (pyj == 0)  continue;
      rindex = (i+j) < r ? i+j : i+j-r; /* (i+j) % r */
      if (mod < HALF_WORD) {
        res[rindex] = (res[rindex] + (pxi*pyj) ) % mod;
      } else {
        res[rindex] = muladdmod(pxi, pyj, res[rindex], mod);
      }
    }
  }
  memcpy(px, res, r * sizeof(UV)); /* put result in px */
}
Example #4
0
static errval_t cow_init(size_t bufsize, size_t granularity,
        struct cnoderef *cow_cn, size_t *frame_count)
{
    assert(cow_cn);
    assert(frame_count);

    errval_t err;
    struct capref frame, cncap;
    struct cnoderef cnode;

    // get RAM cap bufsize = (bufsize / granularity + 1) * granularity;
    err = slot_alloc(&frame);
    assert(err_is_ok(err));
    size_t rambits = log2floor(bufsize);
    debug_printf("bits = %zu\n", rambits);
    err = ram_alloc(&frame, rambits);
    assert(err_is_ok(err));
    // calculate #slots
    cslot_t cap_count = bufsize / granularity;
    cslot_t slots;
    // get CNode
    err = cnode_create(&cncap, &cnode, cap_count, &slots);
    assert(err_is_ok(err));
    assert(slots >= cap_count);
    // retype RAM into Frames
    struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 };
    err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity));
    assert(err_is_ok(err));
    err = cap_destroy(frame);
    assert(err_is_ok(err));
    *frame_count = slots;
    *cow_cn = cnode;
    return SYS_ERR_OK;
}

// create cow-enabled vregion & backing
// Can copy-on-write in granularity-sized chunks
static errval_t vspace_map_one_frame_cow(void **buf, size_t size,
        struct capref frame, vregion_flags_t flags,
        struct memobj **memobj, struct vregion **vregion,
        size_t granularity)
{
    errval_t err;
    if (!memobj) {
        memobj = malloc(sizeof(*memobj));
    }
    assert(memobj);
    if (!vregion) {
        vregion = malloc(sizeof(*vregion));
    }
    assert(vregion);
    err = vspace_map_anon_attr(buf, memobj, vregion, size, &size, flags);
    assert(err_is_ok(err));
    size_t chunks = size / granularity;
    cslot_t slots;
    struct capref cncap;
    struct cnoderef cnode;
    err = cnode_create(&cncap, &cnode, chunks, &slots);
    assert(err_is_ok(err));
    assert(slots >= chunks);
    struct capref fc = (struct capref) { .cnode = cnode, .slot = 0 };
    for (int i = 0; i < chunks; i++) {
        err = cap_copy(fc, frame);
        assert(err_is_ok(err));
        err = (*memobj)->f.fill_foff(*memobj, i * granularity, fc, granularity, i*granularity);
        assert(err_is_ok(err));
        err = (*memobj)->f.pagefault(*memobj, *vregion, i * granularity, 0);
        assert(err_is_ok(err));
        fc.slot++;
    }
    return SYS_ERR_OK;
}

int main(int argc, char *argv[])
{
    errval_t err;
    struct capref frame;
    size_t retsize;
    void *vbuf;
    struct vregion *vregion;
    uint8_t *buf;

    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = frame_alloc(&frame, BUFSIZE, &retsize);
    assert(retsize >= BUFSIZE);
    if (err_is_fail(err)) {
        debug_printf("frame_alloc: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("%s:%d: %zu\n", __FUNCTION__, __LINE__, retsize);
    // setup region
    err = vspace_map_one_frame_attr(&vbuf, retsize, frame,
            VREGION_FLAGS_READ_WRITE, NULL, &vregion);
    if (err_is_fail(err)) {
        debug_printf("vspace_map: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("vaddr: %p\n", vbuf);

    // write stuff to region
    buf = vbuf;
    debug_printf("%s:%d: %p, %lu pages\n", __FUNCTION__, __LINE__, buf, BUFSIZE / BASE_PAGE_SIZE);
    memset(buf, 0xAA, BUFSIZE);

    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    // create cow copy
    //  setup exception handler
    thread_set_exception_handler(handler, NULL, ex_stack,
            ex_stack+EX_STACK_SIZE, NULL, NULL);
    assert(err_is_ok(err));
    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = cow_init(BUFSIZE, BASE_PAGE_SIZE, &cow_frames, &cow_frame_count);
    assert(err_is_ok(err));
    //  create r/o copy of region and tell exception handler bounds
    debug_printf("%s:%d\n", __FUNCTION__, __LINE__);
    err = vspace_map_one_frame_cow(&cow_vbuf, retsize, frame,
            VREGION_FLAGS_READ, NULL, &cow_vregion, BASE_PAGE_SIZE);
    if (err_is_fail(err)) {
        debug_printf("vspace_map: %s\n", err_getstring(err));
        return 1;
    }
    debug_printf("cow_vaddr: %p\n", cow_vbuf);

    // do stuff cow copy
    uint8_t *cbuf = cow_vbuf;
    for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i+=2) {
        cbuf[i * BASE_PAGE_SIZE + 1] = 0x55;
    }
    // verify results
    for (int i = 0; i < BUFSIZE / BASE_PAGE_SIZE; i++) {
        printf("page %d\n", i);
        printf("buf[0] = %d; cbuf[0] = %d\n", buf[i*BASE_PAGE_SIZE],
                cbuf[i*BASE_PAGE_SIZE]);
        printf("buf[1] = %d; cbuf[1] = %d\n", buf[i*BASE_PAGE_SIZE+1],
                cbuf[i*BASE_PAGE_SIZE+1]);
    }
    debug_dump_hw_ptables();
    return EXIT_SUCCESS;
}
Example #5
0
static errval_t elf_allocate(void *state, genvaddr_t base, size_t size,
                             uint32_t flags, void **retbase)
{
    errval_t err;
    lvaddr_t vaddr;
    size_t used_size;

    struct spawninfo *si = state;

    // Increase size by space wasted on first page due to page-alignment
    size_t base_offset = BASE_PAGE_OFFSET(base);
    size += base_offset;
    base -= base_offset;
    // Page-align
    size = ROUND_UP(size, BASE_PAGE_SIZE);

    cslot_t vspace_slot = si->elfload_slot;

    // Step 1: Allocate the frames
    size_t sz = 0;
    for (lpaddr_t offset = 0; offset < size; offset += sz) {
        sz = 1UL << log2floor(size - offset);
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = si->elfload_slot++,
        };
        err = frame_create(frame, sz, NULL);
        if (err_is_fail(err)) {
            return err_push(err, LIB_ERR_FRAME_CREATE);
        }
    }

    cslot_t spawn_vspace_slot = si->elfload_slot;
    cslot_t new_slot_count = si->elfload_slot - vspace_slot;

    // Step 2: create copies of the frame capabilities for child vspace
    for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot = vspace_slot + copy_idx,
        };

        struct capref spawn_frame = {
            .cnode = si->segcn,
            .slot = si->elfload_slot++,
        };
        err = cap_copy(spawn_frame, frame);
        if (err_is_fail(err)) {
            debug_printf("cap_copy failed for src_slot = %"PRIuCSLOT
                    ", dest_slot = %"PRIuCSLOT"\n", frame.slot,
                    spawn_frame.slot);
            return err_push(err, LIB_ERR_CAP_COPY);
        }
    }

    // Step 3: map into own vspace

    // Get virtual address range to hold the module
    void *vaddr_range;
    err = paging_alloc(get_current_paging_state(), &vaddr_range, size);
    if (err_is_fail(err)) {
        debug_printf("elf_allocate: paging_alloc failed\n");
        return (err);
    }

    // map allocated physical memory in virutal memory of parent process
    vaddr = (lvaddr_t)vaddr_range;
    used_size = size;

    while (used_size > 0) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = vspace_slot++,
        };       // find out the size of the frame

        struct frame_identity id;
        err = invoke_frame_identify(frame, &id);
        assert(err_is_ok(err));
        size_t slot_size = (1UL << id.bits);

        // map frame to provide physical memory backing
        err = paging_map_fixed_attr(get_current_paging_state(), vaddr, frame, slot_size,
                VREGION_FLAGS_READ_WRITE);

        if (err_is_fail(err)) {
            debug_printf("elf_allocate: paging_map_fixed_attr failed\n");
            return err;
        }

        used_size -= slot_size;
        vaddr +=  slot_size;
    } // end while:


    // Step 3: map into new process
    struct paging_state *cp = si->vspace;

    // map allocated physical memory in virutal memory of child process
    vaddr = (lvaddr_t)base;
    used_size = size;

    while (used_size > 0) {
        struct capref frame = {
            .cnode = si->segcn,
            .slot  = spawn_vspace_slot++,
        };

        // find out the size of the frame
        struct frame_identity id;
        err = invoke_frame_identify(frame, &id);
        assert(err_is_ok(err));
        size_t slot_size = (1UL << id.bits);

        // map frame to provide physical memory backing
        err = paging_map_fixed_attr(cp, vaddr, frame, slot_size,
                elf_to_vregion_flags(flags));

        if (err_is_fail(err)) {
            debug_printf("elf_allocate: paging_map_fixed_attr failed\n");
            return err;
        }

        used_size -= slot_size;
        vaddr +=  slot_size;
    } // end while:

    *retbase = (void*) vaddr_range + base_offset;

    return SYS_ERR_OK;
} // end function: elf_allocate

/**
 * \brief Load the elf image
 */
errval_t spawn_arch_load(struct spawninfo *si,
                         lvaddr_t binary, size_t binary_size,
                         genvaddr_t *entry, void** arch_info)
{
    errval_t err;

    // Reset the elfloader_slot
    si->elfload_slot = 0;
    struct capref cnode_cap = {
        .cnode = si->rootcn,
        .slot  = ROOTCN_SLOT_SEGCN,
    };
    err = cnode_create_raw(cnode_cap, &si->segcn, DEFAULT_CNODE_SLOTS, NULL);
    if (err_is_fail(err)) {
        return err_push(err, SPAWN_ERR_CREATE_SEGCN);
    }

    // TLS is NYI
    si->tls_init_base = 0;
    si->tls_init_len = si->tls_total_len = 0;

    //debug_printf("spawn_arch_load: about to load elf %p\n", elf_allocate);
    // Load the binary
    err = elf_load(EM_HOST, elf_allocate, si, binary, binary_size, entry);
    if (err_is_fail(err)) {
        return err;
    }

    //debug_printf("hello here\n");
    struct Elf32_Shdr* got_shdr =
        elf32_find_section_header_name(binary, binary_size, ".got");
    if (got_shdr)
    {
        *arch_info = (void*)got_shdr->sh_addr;
    }
    else {
        return SPAWN_ERR_LOAD;
    }

    return SYS_ERR_OK;
}

void spawn_arch_set_registers(void *arch_load_info,
                              dispatcher_handle_t handle,
                              arch_registers_state_t *enabled_area,
                              arch_registers_state_t *disabled_area)
{
    assert(arch_load_info != NULL);
    uintptr_t got_base = (uintptr_t)arch_load_info;

    struct dispatcher_shared_arm* disp_arm = get_dispatcher_shared_arm(handle);
    disp_arm->got_base = got_base;

    enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
    enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;

    disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
    disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
}
Example #6
0
    void DrrnPsiClient::recv(Channel s0, Channel s1, span<block> inputs)
    {
        if (inputs.size() != mClientSetSize)
            throw std::runtime_error(LOCATION);

        Matrix<u64> bins(mNumSimpleBins, mBinSize);
        std::vector<u64> binSizes(mNumSimpleBins);
        u64 cuckooSlotsPerBin = (mCuckooParams.numBins() + mNumSimpleBins) / mNumSimpleBins;

        // Simple hashing with a PRP
        std::vector<block> hashs(inputs.size());
        AES hasher(mHashingSeed);
        u64 numCuckooBins = mCuckooParams.numBins();
        for (u64 i = 0; i < u64(inputs.size());)
        {
            auto min = std::min<u64>(inputs.size() - i, 8);
            auto end = i + min;
            hasher.ecbEncBlocks(inputs.data() + i, min, hashs.data() + i);
            for (; i < end; ++i)
            {
                hashs[i] = hashs[i] ^ inputs[i];
                for (u64 j = 0; j < mCuckooParams.mNumHashes; ++j)
                {
                    u64 idx = CuckooIndex<>::getHash(hashs[i], j, numCuckooBins) * mNumSimpleBins / mCuckooParams.numBins();

                    // insert this item in this bin. pack together the hash index and input index
                    bins(idx, binSizes[idx]++) = (j << 56) | i;
                }

                //if (!i)
                //{
                //    ostreamLock(std::cout) << "cinput[" << i << "] = " << inputs[i] << " -> " << hashs[i] << " ("
                //        << CuckooIndex<>::getHash(hashs[i], 0, numCuckooBins) << ", "
                //        << CuckooIndex<>::getHash(hashs[i], 1, numCuckooBins) << ", "
                //        << CuckooIndex<>::getHash(hashs[i], 2, numCuckooBins) << ")"
                //        << std::endl;
                //}
            }
        }



        // power of 2
        u64 numLeafBlocks = (cuckooSlotsPerBin + mBigBlockSize * 128 - 1) / (mBigBlockSize * 128);
        u64 gDepth = 2;
        u64 kDepth = std::max<u64>(gDepth, log2floor(numLeafBlocks)) - gDepth;
        u64 groupSize = (numLeafBlocks + (u64(1) << kDepth) - 1) / (u64(1) << kDepth);
        if (groupSize > 8) throw std::runtime_error(LOCATION);

        //std::cout << "kDepth:   " << kDepth << std::endl;
        //std::cout << "mBinSize: " << mBinSize << std::endl;

        u64 numQueries = mNumSimpleBins * mBinSize;
        auto permSize = numQueries * mBigBlockSize;

        // mask generation
        block rSeed = CCBlock;// mPrng.get<block>();
        AES rGen(rSeed);


        std::vector<block> shares(mClientSetSize * mCuckooParams.mNumHashes), r(permSize), piS1(permSize), s(permSize);
        //std::vector<u32> rIdxs(numQueries);
        //std::vector<u64> sharesIdx(shares.size());

        //TODO("use real masks");
        //memset(r.data(), 0, r.size() * sizeof(block));
        rGen.ecbEncCounterMode(r.size() * 0, r.size(), r.data());
        rGen.ecbEncCounterMode(r.size() * 1, r.size(), piS1.data());
        rGen.ecbEncCounterMode(r.size() * 2, r.size(), s.data());

        //auto encIter = enc.begin();
        auto shareIter = shares.begin();
        //auto shareIdxIter = sharesIdx.begin();
        u64 queryIdx = 0, dummyPermIdx = mClientSetSize * mCuckooParams.mNumHashes;

        std::unordered_map<u64, u64> inputMap;
        inputMap.reserve(mClientSetSize * mCuckooParams.mNumHashes);

        std::vector<u32> pi(permSize);
        auto piIter = pi.begin();


        u64 keySize = kDepth + 1 + groupSize;
        u64 mask = (u64(1) << 56) - 1;
        auto binIter = bins.begin();
        for (u64 bIdx = 0; bIdx < mNumSimpleBins; ++bIdx)
        {
            u64 i = 0;

            auto binOffset = (bIdx * numCuckooBins + mNumSimpleBins - 1) / mNumSimpleBins;

            std::vector<block> k0(keySize * mBinSize), k1(keySize * mBinSize);
            //std::vector<u64> idx0(mBinSize), idx1(mBinSize);
            auto k0Iter = k0.data(), k1Iter = k1.data();
            //auto idx0Iter = idx0.data(), idx1Iter = idx1.data();

            for (; i < binSizes[bIdx]; ++i)
            {
                span<block>
                    kk0(k0Iter, kDepth + 1),
                    g0(k0Iter + kDepth + 1, groupSize),
                    kk1(k1Iter, kDepth + 1),
                    g1(k1Iter + kDepth + 1, groupSize);

                k0Iter += keySize;
                k1Iter += keySize;

                u8 hashIdx = *binIter >> 56;
                u64 itemIdx = *binIter & mask;
                u64 cuckooIdx = CuckooIndex<>::getHash(hashs[itemIdx], hashIdx, numCuckooBins) - binOffset;
                ++binIter;

                auto bigBlockoffset = cuckooIdx % mBigBlockSize;
                auto bigBlockIdx = cuckooIdx / mBigBlockSize;

                BgiPirClient::keyGen(bigBlockIdx, mPrng.get<block>(), kk0, g0, kk1, g1);



                // the index of the mask that will mask this item
                auto rIdx = *piIter = itemIdx * mCuckooParams.mNumHashes + hashIdx * mBigBlockSize + bigBlockoffset;

                // the masked value that will be inputted into the PSI
                *shareIter = r[rIdx] ^ inputs[itemIdx];
                //*shareIter = inputs[itemIdx];

                //if (itemIdx == 0)
                //    ostreamLock(std::cout)
                //    << "item[" << i << "]    bin " << bIdx
                //    << " block " << bigBlockIdx 
                //    << " offset " << bigBlockoffset 
                //    << " psi " << *shareIter << std::endl;

                // This will be used to map itemed items in the intersection back to their input item
                inputMap.insert({ queryIdx, itemIdx });

                ++shareIter;
                ++piIter;
                ++queryIdx;
            }

            u64 rem = mBinSize - i;


            binIter += rem;
            for (u64 i = 0; i < rem; ++i)
            {
                *piIter++ = dummyPermIdx++;
            }

            //s0.asyncSendCopy(k0);
            //s0.asyncSendCopy(k1);
            //s1.asyncSendCopy(k1);
            //s1.asyncSendCopy(k0);

            s0.asyncSend(std::move(k0));
            s1.asyncSend(std::move(k1));

        }

        std::vector<u32> pi1(permSize), pi0(permSize), pi1Inv(permSize);
        for (u32 i = 0; i < pi1.size(); ++i) pi1[i] = i;
        PRNG prng(rSeed ^ OneBlock);
        std::random_shuffle(pi1.begin(), pi1.end(), prng);


        //std::vector<block> pi1RS(pi.size());
        for (u64 i = 0; i < permSize; ++i)
        {
            //auto pi1i = pi1[i];
            //pi1RS[i] = r[pi1i] ^ s[pi1i];

            pi1Inv[pi1[i]] = i;
            //std::cout << "pi1(r + s)[" << i << "] " << pi1RS[i] << std::endl;
        }
        std::vector<block> piS0(r.size());
        for (u64 i = 0; i < permSize; ++i)
        {
            //std::cout << "r[" << i << "] " << r[i] << std::endl;
            //std::cout << "pi(r + s)[" << i << "]=" << (r[pi[i]] ^ s[pi[i]]) << std::endl;


            pi0[i] = pi1Inv[pi[i]];
            piS0[i] = piS1[i] ^ s[pi[i]];
            //std::cout << "pi (r + s)[" << i << "] = " << (r[pi[i]] ^ s[pi[i]]) << " = " << r[pi[i]] << " ^ " << s[pi[i]] << " c " << pi[i] << std::endl;
            //std::cout << "pi`(r + s)[" << i << "] = " << pi1RS[pi0[i]]  <<" c " << pi0[pi1[i]] << std::endl;
        }

        s0.asyncSend(std::move(pi0));
        s0.asyncSend(std::move(piS0));
        //rGen.ecbEncBlocks(r.data(), r.size(), r.data());
        //for (u64 i = 0; i < shares.size(); ++i)
        //{
        //    std::cout << IoStream::lock << "cshares[" << i << "] " << shares[i] << " input[" << sharesIdx[i]<<"]" << std::endl << IoStream::unlock;
        //}
        mPsi.sendInput(shares, s0);

        mIntersection.reserve(mPsi.mIntersection.size());
        for (u64 i = 0; i < mPsi.mIntersection.size(); ++i) {
            // divide index by #hashes
            mIntersection.emplace(inputMap[mPsi.mIntersection[i]]);
        }

    }