コード例 #1
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE cap_t
create_root_cnode(void)
{
    pptr_t  pptr;
    cap_t   cap;

    /* write the number of root CNode slots to global state */
    ndks_boot.slot_pos_max = BIT(CONFIG_ROOT_CNODE_SIZE_BITS);

    /* create an empty root CNode */
    pptr = alloc_region(CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS);
    if (!pptr) {
        printf("Kernel init failing: could not create root cnode\n");
        return cap_null_cap_new();
    }
    memzero(CTE_PTR(pptr), 1U << (CONFIG_ROOT_CNODE_SIZE_BITS + CTE_SIZE_BITS));
    cap =
        cap_cnode_cap_new(
            CONFIG_ROOT_CNODE_SIZE_BITS,      /* radix      */
            wordBits - CONFIG_ROOT_CNODE_SIZE_BITS, /* guard size */
            0,                                /* guard      */
            pptr                              /* pptr       */
        );

    /* write the root CNode cap into the root CNode */
    write_slot(SLOT_PTR(pptr, BI_CAP_IT_CNODE), cap);

    return cap;
}
コード例 #2
0
ファイル: boot.c プロジェクト: buaacszj/myseL4
BOOT_CODE static void
init_irqs(cap_t root_cnode_cap, bool_t mask_irqs)
{
    irq_t i;

    for (i = 0; i <= maxIRQ; i++) {
        if (i == irq_timer) {
            setIRQState(IRQTimer, i);
        } else if (i == irq_iommu) {
            setIRQState(IRQReserved, i);
#ifdef CONFIG_IRQ_PIC
        } else if (i == 2) {
            /* cascaded legacy PIC */
            setIRQState(IRQReserved, i);
#endif
        } else if (i >= irq_controller_min && i <= irq_controller_max)
            if (mask_irqs)
                /* Don't use setIRQState() here because it implicitly also enables */
                /* the IRQ on the interrupt controller which only node 0 is allowed to do. */
            {
                intStateIRQTable[i] = IRQReserved;
            } else {
                setIRQState(IRQInactive, i);
            }
        else if (i >= irq_msi_min && i <= irq_msi_max) {
            setIRQState(IRQInactive, i);
        }
    }

    /* provide the IRQ control cap */
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IRQ_CTRL), cap_irq_control_cap_new());
}
コード例 #3
0
ファイル: boot.c プロジェクト: UNIVERSAL-IT-SYSTEMS/seL4
BOOT_CODE static void
init_irqs(cap_t root_cnode_cap)
{
    irq_t i;

    for (i = 0; i <= maxIRQ; i++) {
        if (i == irq_timer) {
            setIRQState(IRQTimer, i);
        } else if (i == irq_iommu) {
            setIRQState(IRQReserved, i);
        } else if (i == 2 && config_set(CONFIG_IRQ_PIC)) {
            /* cascaded legacy PIC */
            setIRQState(IRQReserved, i);
        } else if (i >= irq_isa_min && i <= irq_isa_max) {
            if (config_set(CONFIG_IRQ_PIC)) {
                setIRQState(IRQInactive, i);
            } else {
                setIRQState(IRQReserved, i);
            }
        } else if (i >= irq_user_min && i <= irq_user_max) {
            if (config_set(CONFIG_IRQ_IOAPIC)) {
                setIRQState(IRQInactive, i);
            } else {
                setIRQState(IRQReserved, i);
            }
        } else {
            setIRQState(IRQReserved, i);
        }
    }
    Arch_irqStateInit();
    /* provide the IRQ control cap */
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIRQControl), cap_irq_control_cap_new());
}
コード例 #4
0
/* Finalizable objs falls back to objs with fin when resurrection fallback happens */
static void finalizable_objs_fallback(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  Vector_Block *obj_with_fin_block = finref_get_free_block(gc);
    
  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      /* Perhaps obj has been resurrected by previous resurrections. If the fin-obj was resurrected, we need put it back to obj_with_fin pool.
         For minor collection, the resurrected obj was forwarded, so we need use the new copy.*/
      if(!gc_obj_is_dead(gc, p_obj) && obj_belongs_to_nos(p_obj)){
        /* Even in NOS, not all live objects are forwarded due to the partial-forward algortihm */ 
        if(!NOS_PARTIAL_FORWARD || fspace_obj_to_be_forwarded(p_obj)){
          write_slot(p_ref , obj_get_fw_in_oi(p_obj));
          p_obj = read_slot(p_ref);
        }
      }
      /* Perhaps obj_with_fin_block has been allocated with a new free block if it is full */
      obj_with_fin_block = gc_add_finalizer(gc, obj_with_fin_block, p_obj);
    }
    block = pool_get_entry(finalizable_obj_pool);
  }
  
  pool_put_entry(obj_with_fin_pool, obj_with_fin_block);
  metadata->pending_finalizers = FALSE;
}
コード例 #5
0
ファイル: boot.c プロジェクト: seL4/seL4
BOOT_CODE static void init_irqs(cap_t root_cnode_cap)
{
    irq_t i;

    for (i = 0; i <= maxIRQ; i++) {
        setIRQState(IRQInactive, i);
    }
    setIRQState(IRQTimer, KERNEL_TIMER_IRQ);
#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
    setIRQState(IRQReserved, INTERRUPT_VGIC_MAINTENANCE);
#endif
#ifdef CONFIG_ARM_SMMU
    setIRQState(IRQReserved, INTERRUPT_SMMU);
#endif

#ifdef CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT
#ifdef KERNEL_PMU_IRQ
    setIRQState(IRQReserved, KERNEL_PMU_IRQ);
#if (defined CONFIG_PLAT_TX1 && defined ENABLE_SMP_SUPPORT)
//SELFOUR-1252
#error "This platform doesn't support tracking CPU utilisation on multicore"
#endif /* CONFIG_PLAT_TX1 && ENABLE_SMP_SUPPORT */
#else
#error "This platform doesn't support tracking CPU utilisation feature"
#endif /* KERNEL_TIMER_IRQ */
#endif /* CONFIG_ARM_ENABLE_PMU_OVERFLOW_INTERRUPT */

#ifdef ENABLE_SMP_SUPPORT
    setIRQState(IRQIPI, irq_remote_call_ipi);
    setIRQState(IRQIPI, irq_reschedule_ipi);
#endif /* ENABLE_SMP_SUPPORT */

    /* provide the IRQ control cap */
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIRQControl), cap_irq_control_cap_new());
}
コード例 #6
0
/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/
static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  REF *p_ref;
  Partial_Reveal_Object *p_obj;
  
  /* NOTE:: this is nondestructive to the root sets. */
  pool_iterator_init(pool);
  Vector_Block *repset = pool_iterator_next(pool);
  while(repset){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
    for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
      if(pointer_addr_in_pool)
        p_ref = (REF*)*iter;
      else
        p_ref = (REF*)iter;
      p_obj = read_slot(p_ref);
      
      if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */
        move_compaction_update_ref(gc, p_ref);
      } else if(collect_is_ms_compact()){
        if(obj_is_fw_in_oi(p_obj))
          moving_mark_sweep_update_ref(gc, p_ref, double_fix);
      } else { /* major slide compact */
        assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
      }
    }
    repset = pool_iterator_next(pool);
  }
}
コード例 #7
0
ファイル: mcp_packet.c プロジェクト: louwers/mcbuild
ENCODE_BEGIN(SP_WindowItems,_1_8_1) {
    Wchar(wid);
    Wshort(count);
    int i;
    for(i=0; i<tpkt->count; i++) {
        w = write_slot(w, &tpkt->slots[i]);
    }
} ENCODE_END;
コード例 #8
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE bool_t
provide_cap(cap_t root_cnode_cap, cap_t cap)
{
    if (ndks_boot.slot_pos_cur >= ndks_boot.slot_pos_max) {
        printf("Kernel init failed: ran out of cap slots\n");
        return false;
    }
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), ndks_boot.slot_pos_cur), cap);
    ndks_boot.slot_pos_cur++;
    return true;
}
コード例 #9
0
static inline void fallback_update_fw_ref(REF *p_ref)
{
  assert(collect_is_fallback());
  
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
    assert(!obj_is_marked_in_vt(p_obj));
    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(p_obj);
    write_slot(p_ref, p_obj);
  }
}
コード例 #10
0
static void resurrect_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  
  if(finalizable_obj_pool_is_empty(gc))
    return;
  
  DURING_RESURRECTION = TRUE;
  
  pool_iterator_init(finalizable_obj_pool);
  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      
      /* Perhaps obj has been resurrected by previous resurrections */
      if(!gc_obj_is_dead(gc, p_obj)){
        if(collect_is_minor() && obj_need_move(gc, p_obj))
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        continue;
      }
      
      resurrect_obj_tree(collector, p_ref);
      if(collector->result == FALSE){
        /* Resurrection fallback happens */
        assert(collect_is_minor());
        return; /* force return */
      }
    }
    
    block = pool_iterator_next(finalizable_obj_pool);
  }
  
  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
   * Because it is outside heap, we can't update it in ref fixing.
   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
   */
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, finalizable_obj_pool);
  metadata->pending_finalizers = TRUE;
  
  DURING_RESURRECTION = FALSE;
  
  /* fianlizable objs have been added to finref repset pool or updated by tracing */
}
コード例 #11
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE void
create_bi_frame_cap(
    cap_t      root_cnode_cap,
    cap_t      pd_cap,
    pptr_t     pptr,
    vptr_t     vptr
)
{
    cap_t cap;

    /* create a cap of it and write it into the root CNode */
    cap = create_mapped_it_frame_cap(pd_cap, pptr, vptr, false, false);
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_BI_FRAME), cap);
}
コード例 #12
0
ファイル: boot.c プロジェクト: dnm/seL4
BOOT_CODE cap_t
create_it_asid_pool(cap_t root_cnode_cap)
{
    pptr_t ap_pptr;
    cap_t  ap_cap;

    /* create ASID pool */
    ap_pptr = alloc_region(ASID_POOL_SIZE_BITS);
    if (!ap_pptr) {
        printf("Kernel init failed: failed to create initial thread asid pool\n");
        return cap_null_cap_new();
    }
    memzero(ASID_POOL_PTR(ap_pptr), 1 << ASID_POOL_SIZE_BITS);
    ap_cap = cap_asid_pool_cap_new(IT_ASID >> asidLowBits, ap_pptr);
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_ASID_POOL), ap_cap);

    /* create ASID control cap */
    write_slot(
        SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_ASID_CTRL),
        cap_asid_control_cap_new()
    );

    return ap_cap;
}
コード例 #13
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE void
create_domain_cap(cap_t root_cnode_cap)
{
    cap_t cap;
    word_t i;

    /* Check domain scheduler assumptions. */
    assert(ksDomScheduleLength > 0);
    for (i = 0; i < ksDomScheduleLength; i++) {
        assert(ksDomSchedule[i].domain < CONFIG_NUM_DOMAINS);
        assert(ksDomSchedule[i].length > 0);
    }

    cap = cap_domain_cap_new();
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_DOM), cap);
}
コード例 #14
0
ファイル: testmailslot.c プロジェクト: 0x24bin/winexe-1
int main(int argc, char **argv)
{
	if (argc < 3 || 
			(strcmp(argv[1], "read") && strcmp(argv[1], "write"))) {
		printf("Usage: %s read|write mailslot\n", argv[0]);
		return 1;
	}

	if (!strcmp(argv[1], "read")) {
		return read_slot(argv[2]);
	}

	if (!strcmp(argv[1], "write")) {
		return write_slot(argv[2]);
	}

	return 0;
}
コード例 #15
0
/* In two cases mark-sweep needs fixing repointed refs:
 * 1. ms with compaction
 * 2. ms as a mos collection algorithm
 */
static inline void moving_mark_sweep_update_ref(GC *gc, REF *p_ref, Boolean double_fix)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap((void*)p_ref, gc)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    if(obj_is_fw_in_oi(p_old_ref)){
      Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref);
      /* Only major collection in MS Gen GC might need double_fix.
       * Double fixing happens when both forwarding and compaction happen.
       */
      if(double_fix && obj_is_fw_in_oi(p_new_ref)){
        assert(major_is_marksweep());
        p_new_ref = obj_get_fw_in_oi(p_new_ref);
        assert(address_belongs_to_gc_heap(p_new_ref, gc));
      }
      p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
    }
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  /* assert(obj_need_move(gc, p_obj));
   * This assertion is commented out because it assert(!obj_is_dead(gc, p_obj)).
   * When gc_fix_rootset is invoked, mark bit and alloc bit have been flipped in Mark-Sweep,
   * so this assertion will fail.
   * But for sure p_obj here must be an one needing moving.
   */
  p_obj = obj_get_fw_in_oi(p_obj);
  /* Only major collection in MS Gen GC might need double_fix.
   * Double fixing happens when both forwarding and compaction happen.
   */
  if(double_fix && obj_is_fw_in_oi(p_obj)){
    assert(major_is_marksweep());
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(address_belongs_to_gc_heap(p_obj, gc));
  }
  write_slot(p_ref, p_obj);
}
コード例 #16
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE cap_t
create_ipcbuf_frame(cap_t root_cnode_cap, cap_t pd_cap, vptr_t vptr)
{
    cap_t cap;
    pptr_t pptr;

    /* allocate the IPC buffer frame */
    pptr = alloc_region(PAGE_BITS);
    if (!pptr) {
        printf("Kernel init failing: could not create ipc buffer frame\n");
        return cap_null_cap_new();
    }
    clearMemory((void*)pptr, PAGE_BITS);

    /* create a cap of it and write it into the root CNode */
    cap = create_mapped_it_frame_cap(pd_cap, pptr, vptr, false, false);
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), cap);

    return cap;
}
コード例 #17
0
static void identify_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  
  gc_reset_finalizable_objects(gc);
  pool_iterator_init(obj_with_fin_pool);
  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
  while(block){
    unsigned int block_has_ref = 0;
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      if(collect_is_fallback())
        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(!p_obj)
        continue;
      if(gc_obj_is_dead(gc, p_obj)){
        gc_add_finalizable_obj(gc, p_obj);
        *p_ref = (REF)NULL;
      } else {
        if(collect_is_minor() && obj_need_move(gc, p_obj)){
          assert(obj_is_fw_in_oi(p_obj));
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        }
        ++block_has_ref;
      }
    }
    if(!block_has_ref)
      vector_block_clear(block);
    
    block = pool_iterator_next(obj_with_fin_pool);
  }
  gc_put_finalizable_objects(gc);
  
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
コード例 #18
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
コード例 #19
0
/* Move compaction needs special treament when updating referent field */
static inline void move_compaction_update_ref(GC *gc, REF *p_ref)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref);
    p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(space_of_addr(gc, p_obj)->move_object);
  
  if(p_obj < los_boundary)
    p_obj = obj_get_fw_in_oi(p_obj);
  else
    p_obj = obj_get_fw_in_table(p_obj);

  write_slot(p_ref, p_obj);
}
コード例 #20
0
ファイル: boot.c プロジェクト: UNIVERSAL-IT-SYSTEMS/seL4
BOOT_CODE bool_t
init_sys_state(
    cpu_id_t      cpu_id,
    mem_p_regs_t  mem_p_regs,
    dev_p_regs_t* dev_p_regs,
    ui_info_t     ui_info,
    p_region_t    boot_mem_reuse_p_reg,
    /* parameters below not modeled in abstract specification */
    uint32_t      num_drhu,
    paddr_t*      drhu_list,
    acpi_rmrr_list_t *rmrr_list
)
{
    cap_t         root_cnode_cap;
    vptr_t        bi_frame_vptr;
    vptr_t        ipcbuf_vptr;
    cap_t         it_vspace_cap;
    cap_t         it_ap_cap;
    cap_t         ipcbuf_cap;
    pptr_t        bi_frame_pptr;
    create_frames_of_region_ret_t create_frames_ret;
#ifdef CONFIG_ENABLE_BENCHMARKS
    vm_attributes_t buffer_attr = {{ 0 }};
    word_t paddr;
    pde_t pde;
#endif /* CONFIG_ENABLE_BENCHMARKS */

    /* convert from physical addresses to kernel pptrs */
    region_t ui_reg             = paddr_to_pptr_reg(ui_info.p_reg);
    region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg);

    /* convert from physical addresses to userland vptrs */
    v_region_t ui_v_reg;
    v_region_t it_v_reg;
    ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset;
    ui_v_reg.end   = ui_info.p_reg.end   - ui_info.pv_offset;

    ipcbuf_vptr = ui_v_reg.end;
    bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS);

    /* The region of the initial thread is the user image + ipcbuf and boot info */
    it_v_reg.start = ui_v_reg.start;
    it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS);

    init_freemem(ui_info.p_reg, mem_p_regs);

    /* initialise virtual-memory-related data structures (not in abstract spec) */
    if (!init_vm_state()) {
        return false;
    }

#ifdef CONFIG_ENABLE_BENCHMARKS
    /* allocate and create the log buffer */
    buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH;

    paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(X86_LargePage)));

    /* allocate a large frame for logging */
    pde = x86_make_pde_mapping(paddr, buffer_attr);
    ia32KSGlobalPD[IA32_KSLOG_IDX] = pde;


    /* flush the tlb */
    invalidateTranslationAll();

    /* if we crash here, the log isn't working */
#ifdef CONFIG_DEBUG_BUILD
#if CONFIG_MAX_NUM_TRACE_POINTS > 0
    printf("Testing log\n");
    ksLog[0].data = 0xdeadbeef;
    printf("Wrote to ksLog %x\n", ksLog[0].data);
    assert(ksLog[0].data == 0xdeadbeef);
#endif /* CONFIG_MAX_NUM_TRACE_POINTS */
#endif /* CONFIG_DEBUG_BUILD */
#endif /* CONFIG_ENABLE_BENCHMARKS */

    /* create the root cnode */
    root_cnode_cap = create_root_cnode();

    /* create the IO port cap */
    write_slot(
        SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort),
        cap_io_port_cap_new(
            0,                /* first port */
            NUM_IO_PORTS - 1 /* last port  */
        )
    );

    /* create the cap for managing thread domains */
    create_domain_cap(root_cnode_cap);

    /* create the IRQ CNode */
    if (!create_irq_cnode()) {
        return false;
    }

    /* initialise the IRQ states and provide the IRQ control cap */
    init_irqs(root_cnode_cap);

    /* create the bootinfo frame */
    bi_frame_pptr = allocate_bi_frame(0, 1, ipcbuf_vptr);
    if (!bi_frame_pptr) {
        return false;
    }

    /* Construct an initial address space with enough virtual addresses
     * to cover the user image + ipc buffer and bootinfo frames */
    it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg);
    if (cap_get_capType(it_vspace_cap) == cap_null_cap) {
        return false;
    }

    /* Create and map bootinfo frame cap */
    create_bi_frame_cap(
        root_cnode_cap,
        it_vspace_cap,
        bi_frame_pptr,
        bi_frame_vptr
    );

    /* create the initial thread's IPC buffer */
    ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr);
    if (cap_get_capType(ipcbuf_cap) == cap_null_cap) {
        return false;
    }

    /* create all userland image frames */
    create_frames_ret =
        create_frames_of_region(
            root_cnode_cap,
            it_vspace_cap,
            ui_reg,
            true,
            ui_info.pv_offset
        );
    if (!create_frames_ret.success) {
        return false;
    }
    ndks_boot.bi_frame->userImageFrames = create_frames_ret.region;

    /* create the initial thread's ASID pool */
    it_ap_cap = create_it_asid_pool(root_cnode_cap);
    if (cap_get_capType(it_ap_cap) == cap_null_cap) {
        return false;
    }
    write_it_asid_pool(it_ap_cap, it_vspace_cap);

    /*
     * Initialise the NULL FPU state. This is different from merely zero'ing it
     * out (i.e., the NULL FPU state is non-zero), and must be performed before
     * the first thread is created.
     */
    resetFpu();
    saveFpuState(&x86KSnullFpuState);
    x86KSfpuOwner = NULL;

    /* create the idle thread */
    if (!create_idle_thread()) {
        return false;
    }

    /* create the initial thread */
    if (!create_initial_thread(
                root_cnode_cap,
                it_vspace_cap,
                ui_info.v_entry,
                bi_frame_vptr,
                ipcbuf_vptr,
                ipcbuf_cap
            )) {
        return false;
    }

    if (config_set(CONFIG_IOMMU)) {
        /* initialise VTD-related data structures and the IOMMUs */
        if (!vtd_init(cpu_id, num_drhu, rmrr_list)) {
            return false;
        }

        /* write number of IOMMU PT levels into bootinfo */
        ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels;

        /* write IOSpace master cap */
        write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap());
    } else {
        ndks_boot.bi_frame->numIOPTLevels = -1;
    }

    /* convert the remaining free memory into UT objects and provide the caps */
    if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) {
        return false;
    }
    /* WARNING: alloc_region() must not be called anymore after here! */

    /* create device frames */
    if (!create_device_frames(root_cnode_cap, dev_p_regs)) {
        return false;
    }

    /* finalise the bootinfo frame */
    bi_finalise();

    return true;
}
コード例 #21
0
ファイル: boot.c プロジェクト: buaacszj/myseL4
/* Create an address space for the initial thread.
 * This includes page directory and page tables */
BOOT_CODE static cap_t
create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
{
    cap_t      vspace_cap;
    vptr_t     vptr;
    pptr_t     pptr;
    slot_pos_t slot_pos_before;
    slot_pos_t slot_pos_after;

    slot_pos_before = ndks_boot.slot_pos_cur;
    if (PDPT_BITS == 0) {
        cap_t pd_cap;
        pptr_t pd_pptr;
        /* just create single PD obj and cap */
        pd_pptr = alloc_region(PD_SIZE_BITS);
        if (!pd_pptr) {
            return cap_null_cap_new();
        }
        memzero(PDE_PTR(pd_pptr), 1 << PD_SIZE_BITS);
        copyGlobalMappings(PDE_PTR(pd_pptr));
        pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID);
        if (!provide_cap(root_cnode_cap, pd_cap)) {
            return cap_null_cap_new();
        }
        write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pd_cap);
        vspace_cap = pd_cap;
    } else {
        cap_t pdpt_cap;
        pptr_t pdpt_pptr;
        unsigned int i;
        /* create a PDPT obj and cap */
        pdpt_pptr = alloc_region(PDPT_SIZE_BITS);
        if (!pdpt_pptr) {
            return cap_null_cap_new();
        }
        memzero(PDPTE_PTR(pdpt_pptr), 1 << PDPT_SIZE_BITS);
        pdpt_cap = cap_pdpt_cap_new(
                       true,       /* capPDPTISMapped */
                       IT_ASID,    /* capPDPTMappedASID */
                       pdpt_pptr        /* capPDPTBasePtr */
                   );
        /* create all PD objs and caps necessary to cover userland image. For simplicity
         * to ensure we also cover the kernel window we create all PDs */
        for (i = 0; i < BIT(PDPT_BITS); i++) {
            /* The compiler is under the mistaken belief here that this shift could be
             * undefined. However, in the case that it would be undefined this code path
             * is not reachable because PDPT_BITS == 0 (see if statement at the top of
             * this function), so to work around it we must both put in a redundant
             * if statement AND place the shift in a variable. While the variable
             * will get compiled away it prevents the compiler from evaluating
             * the 1 << 32 as a constant when it shouldn't
             * tl;dr gcc evaluates constants even if code is unreachable */
            int shift = (PD_BITS + PT_BITS + PAGE_BITS);
            if (shift != 32) {
                vptr = i << shift;
            } else {
                return cap_null_cap_new();
            }

            pptr = alloc_region(PD_SIZE_BITS);
            if (!pptr) {
                return cap_null_cap_new();
            }
            memzero(PDE_PTR(pptr), 1 << PD_SIZE_BITS);
            if (!provide_cap(root_cnode_cap,
                             create_it_page_directory_cap(pdpt_cap, pptr, vptr, IT_ASID))
               ) {
                return cap_null_cap_new();
            }
        }
        /* now that PDs exist we can copy the global mappings */
        copyGlobalMappings(PDPTE_PTR(pdpt_pptr));
        write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE), pdpt_cap);
        vspace_cap = pdpt_cap;
    }

    slot_pos_after = ndks_boot.slot_pos_cur;
    ndks_boot.bi_frame->ui_pd_caps = (slot_region_t) {
        slot_pos_before, slot_pos_after
    };
    /* create all PT objs and caps necessary to cover userland image */
    slot_pos_before = ndks_boot.slot_pos_cur;

    for (vptr = ROUND_DOWN(it_v_reg.start, PT_BITS + PAGE_BITS);
            vptr < it_v_reg.end;
            vptr += BIT(PT_BITS + PAGE_BITS)) {
        pptr = alloc_region(PT_SIZE_BITS);
        if (!pptr) {
            return cap_null_cap_new();
        }
        memzero(PTE_PTR(pptr), 1 << PT_SIZE_BITS);
        if (!provide_cap(root_cnode_cap,
                         create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID))
           ) {
            return cap_null_cap_new();
        }
    }

    slot_pos_after = ndks_boot.slot_pos_cur;
    ndks_boot.bi_frame->ui_pt_caps = (slot_region_t) {
        slot_pos_before, slot_pos_after
    };

    return vspace_cap;
}
コード例 #22
0
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
{
  Space* space = collector->collect_space; 
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. 
     We don't rem p_ref because it was remembered in first time it's met. 
     FIXME:: the situation obj_belongs_to_tospace() should never be true if we
     remember object rather than slot. Currently, mutator remembers objects, and
     collector remembers slots. Although collectors remember slots, we are sure 
     there are no chances to have repetitive p_ref because an object is scanned only
     when it is marked or forwarded atomically, hence only one collector has chance
     to do the scanning. */   
  if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; 

  Partial_Reveal_Object* p_target_obj = NULL;
  Boolean to_rem_slot = FALSE;

  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)){
    p_target_obj = obj_get_fw_in_oi(p_obj);
    write_slot(p_ref, p_target_obj);

    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
    if(obj_belongs_to_tospace(p_target_obj))
      if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
        collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 

    return; 
  }  
    
  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      Note: a race condition here, it might be forwarded by other, but not set the 
      forwarding pointer yet. We need spin here to get the forwarding pointer. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }
    /* forwarded already*/
    p_target_obj = obj_get_fw_in_oi(p_obj);
  
  }else{  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif

    scan_object(collector, p_target_obj);
  }
  
  assert(p_target_obj);
  write_slot(p_ref, p_target_obj);
  
  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
  if(obj_belongs_to_tospace(p_target_obj)){
    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
  }
   
  return;
}
コード例 #23
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
コード例 #24
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
コード例 #25
0
ファイル: mcp_packet.c プロジェクト: louwers/mcbuild
ENCODE_BEGIN(SP_SetSlot,_1_8_1) {
    Wchar(wid);
    Wshort(sid);
    w = write_slot(w, &tpkt->slot);
} ENCODE_END;
コード例 #26
0
ファイル: boot.c プロジェクト: celskeggs/seL4
BOOT_CODE bool_t
create_initial_thread(
    cap_t  root_cnode_cap,
    cap_t  it_pd_cap,
    vptr_t ui_v_entry,
    vptr_t bi_frame_vptr,
    vptr_t ipcbuf_vptr,
    cap_t  ipcbuf_cap
)
{
    pptr_t pptr;
    cap_t  cap;
    tcb_t* tcb;
    deriveCap_ret_t dc_ret;

    /* allocate TCB */
    pptr = alloc_region(TCB_BLOCK_SIZE_BITS);
    if (!pptr) {
        printf("Kernel init failed: Unable to allocate tcb for initial thread\n");
        return false;
    }
    memzero((void*)pptr, 1 << TCB_BLOCK_SIZE_BITS);
    tcb = TCB_PTR(pptr + TCB_OFFSET);
    tcb->tcbTimeSlice = CONFIG_TIME_SLICE;
    Arch_initContext(&tcb->tcbArch.tcbContext);

    /* derive a copy of the IPC buffer cap for inserting */
    dc_ret = deriveCap(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF), ipcbuf_cap);
    if (dc_ret.status != EXCEPTION_NONE) {
        printf("Failed to derive copy of IPC Buffer\n");
        return false;
    }

    /* initialise TCB (corresponds directly to abstract specification) */
    cteInsert(
        root_cnode_cap,
        SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_CNODE),
        SLOT_PTR(pptr, tcbCTable)
    );
    cteInsert(
        it_pd_cap,
        SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_VSPACE),
        SLOT_PTR(pptr, tcbVTable)
    );
    cteInsert(
        dc_ret.cap,
        SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_IPCBUF),
        SLOT_PTR(pptr, tcbBuffer)
    );
    tcb->tcbIPCBuffer = ipcbuf_vptr;
    setRegister(tcb, capRegister, bi_frame_vptr);
    setNextPC(tcb, ui_v_entry);

    /* initialise TCB */
    tcb->tcbPriority = seL4_MaxPrio;
    setupReplyMaster(tcb);
    setThreadState(tcb, ThreadState_Running);
    ksSchedulerAction = SchedulerAction_ResumeCurrentThread;
    ksCurThread = ksIdleThread;
    ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain;
    ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length;
    assert(ksCurDomain < CONFIG_NUM_DOMAINS && ksDomainTime > 0);

    /* initialise current thread pointer */
    switchToThread(tcb); /* initialises ksCurThread */

    /* create initial thread's TCB cap */
    cap = cap_thread_cap_new(TCB_REF(tcb));
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IT_TCB), cap);

#ifdef DEBUG
    setThreadName(tcb, "rootserver");
#endif

    return true;
}
コード例 #27
0
ファイル: boot.c プロジェクト: mewbak/seL4
BOOT_CODE bool_t
init_sys_state(
    cpu_id_t      cpu_id,
    mem_p_regs_t  mem_p_regs,
    ui_info_t     ui_info,
    p_region_t    boot_mem_reuse_p_reg,
    /* parameters below not modeled in abstract specification */
    uint32_t      num_drhu,
    paddr_t*      drhu_list,
    acpi_rmrr_list_t *rmrr_list,
    seL4_X86_BootInfo_VBE *vbe
)
{
    cap_t         root_cnode_cap;
    vptr_t        extra_bi_frame_vptr;
    vptr_t        bi_frame_vptr;
    vptr_t        ipcbuf_vptr;
    cap_t         it_vspace_cap;
    cap_t         it_ap_cap;
    cap_t         ipcbuf_cap;
    pptr_t        bi_frame_pptr;
    word_t        extra_bi_size = sizeof(seL4_BootInfoHeader);
    region_t      extra_bi_region;
    pptr_t        extra_bi_offset = 0;
    create_frames_of_region_ret_t create_frames_ret;
    create_frames_of_region_ret_t extra_bi_ret;

    /* convert from physical addresses to kernel pptrs */
    region_t ui_reg             = paddr_to_pptr_reg(ui_info.p_reg);
    region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg);

    /* convert from physical addresses to userland vptrs */
    v_region_t ui_v_reg;
    v_region_t it_v_reg;
    ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset;
    ui_v_reg.end   = ui_info.p_reg.end   - ui_info.pv_offset;

    ipcbuf_vptr = ui_v_reg.end;
    bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS);
    extra_bi_frame_vptr = bi_frame_vptr + BIT(PAGE_BITS);

    if (vbe->vbeMode != -1) {
        extra_bi_size += sizeof(seL4_X86_BootInfo_VBE);
    }

    /* The region of the initial thread is the user image + ipcbuf and boot info */
    it_v_reg.start = ui_v_reg.start;
    it_v_reg.end = ROUND_UP(extra_bi_frame_vptr + extra_bi_size, PAGE_BITS);

    init_freemem(ui_info.p_reg, mem_p_regs);

    /* create the root cnode */
    root_cnode_cap = create_root_cnode();

    /* create the IO port cap */
    write_slot(
        SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOPort),
        cap_io_port_cap_new(
            0,                /* first port */
            NUM_IO_PORTS - 1, /* last port  */
            VPID_INVALID
        )
    );

    /* create the cap for managing thread domains */
    create_domain_cap(root_cnode_cap);

    /* create the IRQ CNode */
    if (!create_irq_cnode()) {
        return false;
    }

    /* initialise the IRQ states and provide the IRQ control cap */
    init_irqs(root_cnode_cap);

    /* create the bootinfo frame */
    bi_frame_pptr = allocate_bi_frame(0, ksNumCPUs, ipcbuf_vptr);
    if (!bi_frame_pptr) {
        return false;
    }

    extra_bi_region = allocate_extra_bi_region(extra_bi_size);
    if (extra_bi_region.start == 0) {
        return false;
    }

    /* populate vbe info block */
    if (vbe->vbeMode != -1) {
        vbe->header.id = SEL4_BOOTINFO_HEADER_X86_VBE;
        vbe->header.len = sizeof(seL4_X86_BootInfo_VBE);
        memcpy((void*)(extra_bi_region.start + extra_bi_offset), vbe, sizeof(seL4_X86_BootInfo_VBE));
        extra_bi_offset += sizeof(seL4_X86_BootInfo_VBE);
    }

    /* provde a chunk for any leftover padding in the extended boot info */
    seL4_BootInfoHeader padding_header;
    padding_header.id = SEL4_BOOTINFO_HEADER_PADDING;
    padding_header.len = (extra_bi_region.end - extra_bi_region.start) - extra_bi_offset;
    memcpy((void*)(extra_bi_region.start + extra_bi_offset), &padding_header, sizeof(seL4_BootInfoHeader));

    /* Construct an initial address space with enough virtual addresses
     * to cover the user image + ipc buffer and bootinfo frames */
    it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg);
    if (cap_get_capType(it_vspace_cap) == cap_null_cap) {
        return false;
    }

    /* Create and map bootinfo frame cap */
    create_bi_frame_cap(
        root_cnode_cap,
        it_vspace_cap,
        bi_frame_pptr,
        bi_frame_vptr
    );

    /* create and map extra bootinfo region */
    extra_bi_ret =
        create_frames_of_region(
            root_cnode_cap,
            it_vspace_cap,
            extra_bi_region,
            true,
            pptr_to_paddr((void*)(extra_bi_region.start - extra_bi_frame_vptr))
        );
    if (!extra_bi_ret.success) {
        return false;
    }
    ndks_boot.bi_frame->extraBIPages = extra_bi_ret.region;

    /* create the initial thread's IPC buffer */
    ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr);
    if (cap_get_capType(ipcbuf_cap) == cap_null_cap) {
        return false;
    }

    /* create all userland image frames */
    create_frames_ret =
        create_frames_of_region(
            root_cnode_cap,
            it_vspace_cap,
            ui_reg,
            true,
            ui_info.pv_offset
        );
    if (!create_frames_ret.success) {
        return false;
    }
    ndks_boot.bi_frame->userImageFrames = create_frames_ret.region;

    /* create the initial thread's ASID pool */
    it_ap_cap = create_it_asid_pool(root_cnode_cap);
    if (cap_get_capType(it_ap_cap) == cap_null_cap) {
        return false;
    }
    write_it_asid_pool(it_ap_cap, it_vspace_cap);

    ndks_boot.bi_frame->archInfo = tsc_init();

    /* create the idle thread */
    if (!create_idle_thread()) {
        return false;
    }

    /* create the initial thread */
    if (!create_initial_thread(
                root_cnode_cap,
                it_vspace_cap,
                ui_info.v_entry,
                bi_frame_vptr,
                ipcbuf_vptr,
                ipcbuf_cap
            )) {
        return false;
    }

    if (config_set(CONFIG_IOMMU)) {
        /* initialise VTD-related data structures and the IOMMUs */
        if (!vtd_init(cpu_id, num_drhu, rmrr_list)) {
            return false;
        }

        /* write number of IOMMU PT levels into bootinfo */
        ndks_boot.bi_frame->numIOPTLevels = x86KSnumIOPTLevels;

        /* write IOSpace master cap */
        write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapIOSpace), master_iospace_cap());
    } else {
        ndks_boot.bi_frame->numIOPTLevels = -1;
    }

    /* create all of the untypeds. Both devices and kernel window memory */
    if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) {
        return false;
    }
    /* WARNING: alloc_region() must not be called anymore after here! */

    /* finalise the bootinfo frame */
    bi_finalise();

    return true;
}
コード例 #28
0
ファイル: boot.c プロジェクト: buaacszj/myseL4
BOOT_CODE bool_t
init_node_state(
    p_region_t    avail_p_reg,
    p_region_t    sh_p_reg,
    dev_p_regs_t* dev_p_regs,
    ui_info_t     ui_info,
    p_region_t    boot_mem_reuse_p_reg,
    node_id_t     node_id,
    uint32_t      num_nodes,
    /* parameters below not modeled in abstract specification */
    pdpte_t*      kernel_pdpt,
    pde_t*        kernel_pd,
    pte_t*        kernel_pt
#ifdef CONFIG_IOMMU
    , cpu_id_t      cpu_id,
    uint32_t      num_drhu,
    paddr_t*      drhu_list,
    uint32_t      num_passthrough_dev,
    dev_id_t*     passthrough_dev_list,
    uint32_t*     pci_bus_used_bitmap
#endif
)
{
    cap_t         root_cnode_cap;
    vptr_t        bi_frame_vptr;
    vptr_t        ipcbuf_vptr;
    cap_t         it_vspace_cap;
    cap_t         it_ap_cap;
    cap_t         ipcbuf_cap;
    pptr_t        bi_frame_pptr;
    create_frames_of_region_ret_t create_frames_ret;
    int i;
#ifdef CONFIG_BENCHMARK
    vm_attributes_t buffer_attr = {{ 0 }};
    uint32_t paddr;
    pde_t pde;
#endif /* CONFIG_BENCHMARK */

    /* convert from physical addresses to kernel pptrs */
    region_t avail_reg          = paddr_to_pptr_reg(avail_p_reg);
    region_t ui_reg             = paddr_to_pptr_reg(ui_info.p_reg);
    region_t sh_reg             = paddr_to_pptr_reg(sh_p_reg);
    region_t boot_mem_reuse_reg = paddr_to_pptr_reg(boot_mem_reuse_p_reg);

    /* convert from physical addresses to userland vptrs */
    v_region_t ui_v_reg;
    v_region_t it_v_reg;
    ui_v_reg.start = ui_info.p_reg.start - ui_info.pv_offset;
    ui_v_reg.end   = ui_info.p_reg.end   - ui_info.pv_offset;

    ipcbuf_vptr = ui_v_reg.end;
    bi_frame_vptr = ipcbuf_vptr + BIT(PAGE_BITS);

    /* The region of the initial thread is the user image + ipcbuf and boot info */
    it_v_reg.start = ui_v_reg.start;
    it_v_reg.end = bi_frame_vptr + BIT(PAGE_BITS);

    /* make the free memory available to alloc_region() */
    ndks_boot.freemem[0] = avail_reg;
    for (i = 1; i < MAX_NUM_FREEMEM_REG; i++) {
        ndks_boot.freemem[i] = REG_EMPTY;
    }

    /* initialise virtual-memory-related data structures (not in abstract spec) */
    if (!init_vm_state(kernel_pdpt, kernel_pd, kernel_pt)) {
        return false;
    }

#ifdef CONFIG_BENCHMARK
    /* allocate and create the log buffer */
    buffer_attr.words[0] = IA32_PAT_MT_WRITE_THROUGH;

    paddr = pptr_to_paddr((void *) alloc_region(pageBitsForSize(IA32_LargePage)));

    /* allocate a large frame for logging */
    pde = pde_pde_large_new(
              paddr,                                   /* page_base_address    */
              vm_attributes_get_ia32PATBit(buffer_attr),      /* pat                  */
              0,                                       /* avl_cte_depth        */
              1,                                       /* global               */
              0,                                       /* dirty                */
              0,                                       /* accessed             */
              vm_attributes_get_ia32PCDBit(buffer_attr),      /* cache_disabled       */
              vm_attributes_get_ia32PWTBit(buffer_attr),      /* write_through        */
              0,                                       /* super_user           */
              1,                                       /* read_write           */
              1                                        /* present              */
          );

    /* TODO this shouldn't be hardcoded */
    ia32KSkernelPD[IA32_KSLOG_IDX] = pde;


    /* flush the tlb */
    invalidatePageStructureCache();

    /* if we crash here, the log isn't working */
#ifdef CONFIG_DEBUG_BUILD
    printf("Testing log\n");
    ksLog[0] = 0xdeadbeef;
    printf("Wrote to ksLog %x\n", ksLog[0]);
    assert(ksLog[0] == 0xdeadbeef);
#endif /* CONFIG_DEBUG_BUILD */
#endif /* CONFIG_BENCHMARK */

    /* create the root cnode */
    root_cnode_cap = create_root_cnode();

    /* create the IO port cap */
    write_slot(
        SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_PORT),
        cap_io_port_cap_new(
            0,                /* first port */
            NUM_IO_PORTS - 1 /* last port  */
        )
    );

    /* create the cap for managing thread domains */
    create_domain_cap(root_cnode_cap);

    /* create the IRQ CNode */
    if (!create_irq_cnode()) {
        return false;
    }

    /* initialise the IRQ states and provide the IRQ control cap */
    init_irqs(root_cnode_cap, node_id != 0);

    /* create the bootinfo frame */
    bi_frame_pptr = allocate_bi_frame(node_id, num_nodes, ipcbuf_vptr);
    if (!bi_frame_pptr) {
        return false;
    }

    /* Construct an initial address space with enough virtual addresses
     * to cover the user image + ipc buffer and bootinfo frames */
    it_vspace_cap = create_it_address_space(root_cnode_cap, it_v_reg);
    if (cap_get_capType(it_vspace_cap) == cap_null_cap) {
        return false;
    }

    /* Create and map bootinfo frame cap */
    create_bi_frame_cap(
        root_cnode_cap,
        it_vspace_cap,
        bi_frame_pptr,
        bi_frame_vptr
    );

    /* create the initial thread's IPC buffer */
    ipcbuf_cap = create_ipcbuf_frame(root_cnode_cap, it_vspace_cap, ipcbuf_vptr);
    if (cap_get_capType(ipcbuf_cap) == cap_null_cap) {
        return false;
    }

    /* create all userland image frames */
    create_frames_ret =
        create_frames_of_region(
            root_cnode_cap,
            it_vspace_cap,
            ui_reg,
            true,
            ui_info.pv_offset
        );
    if (!create_frames_ret.success) {
        return false;
    }
    ndks_boot.bi_frame->ui_frame_caps = create_frames_ret.region;

    /* create the initial thread's ASID pool */
    it_ap_cap = create_it_asid_pool(root_cnode_cap);
    if (cap_get_capType(it_ap_cap) == cap_null_cap) {
        return false;
    }
    write_it_asid_pool(it_ap_cap, it_vspace_cap);

    /*
     * Initialise the NULL FPU state. This is different from merely zero'ing it
     * out (i.e., the NULL FPU state is non-zero), and must be performed before
     * the first thread is created.
     */
    resetFpu();
    saveFpuState(&ia32KSnullFpuState);
    ia32KSfpuOwner = NULL;

    /* create the idle thread */
    if (!create_idle_thread()) {
        return false;
    }

    /* create the initial thread */
    if (!create_initial_thread(
                root_cnode_cap,
                it_vspace_cap,
                ui_info.v_entry,
                bi_frame_vptr,
                ipcbuf_vptr,
                ipcbuf_cap
            )) {
        return false;
    }

#ifdef CONFIG_IOMMU
    /* initialise VTD-related data structures and the IOMMUs */
    if (!vtd_init(cpu_id, num_drhu, pci_bus_used_bitmap, num_passthrough_dev, passthrough_dev_list)) {
        return false;
    }

    /* write number of IOMMU PT levels into bootinfo */
    ndks_boot.bi_frame->num_iopt_levels = ia32KSnumIOPTLevels;

    /* write IOSpace master cap */
    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), BI_CAP_IO_SPACE), master_iospace_cap());
#endif

    /* convert the remaining free memory into UT objects and provide the caps */
    if (!create_untypeds(root_cnode_cap, boot_mem_reuse_reg)) {
        return false;
    }
    /* WARNING: alloc_region() must not be called anymore after here! */

    /* create device frames */
    if (!create_device_frames(root_cnode_cap, dev_p_regs)) {
        return false;
    }

    /* create all shared frames */
    create_frames_ret =
        create_frames_of_region(
            root_cnode_cap,
            it_vspace_cap,
            sh_reg,
            false,
            0
        );
    if (!create_frames_ret.success) {
        return false;
    }
    ndks_boot.bi_frame->sh_frame_caps = create_frames_ret.region;;

    /* finalise the bootinfo frame */
    bi_finalise();

#ifdef DEBUG
    ia32KSconsolePort = console_port_of_node(node_id);
    ia32KSdebugPort = debug_port_of_node(node_id);
#endif

    return true;
}
コード例 #29
0
static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) 
{
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  if(obj_belongs_to_tospace(p_obj)) return;
    
  if(!obj_belongs_to_nos(p_obj)){
    if(obj_mark_in_oi(p_obj)){
#ifdef GC_GEN_STATS
      if(gc_profile){
        GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
        gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
      }
#endif
      scan_object(collector, p_obj);
    }
    return;
  }

  Partial_Reveal_Object* p_target_obj = NULL;
  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)) {
    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }

  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }

    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }
  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif
  write_slot(p_ref, p_target_obj);

  scan_object(collector, p_target_obj); 
  return;
}