static forceinline size_t bitmap_search_0(const BITMAP_LIMB_T *bitmap) { BITMAP_LIMB_T t; size_t ret; for (ret = 0; ((t = *bitmap++) == ~(BITMAP_LIMB_T)0); ret += sizeof(BITMAP_LIMB_T)*CHAR_BIT) ; if (!t) return ret; else return ret + CTZ(~t); }
/* Executes the registered callback for incoming IRQS */ static void irq_server_node_handle_irq(struct irq_server_node *n, uint32_t badge) { struct irq_data* irqs; irqs = n->irqs; /* Mask out reserved bits */ badge = badge & n->badge_mask; /* For each bit, call the registered handler */ while (badge) { int irq_idx; struct irq_data* irq; irq_idx = CTZ(badge); irq = &irqs[irq_idx]; DIRQSERVER("Received IRQ %d, badge 0x%x, index %d\n", irq->irq, (unsigned)badge, irq_idx); irq->cb(irq); badge &= ~BIT(irq_idx); } }
{ seL4_Word va; va = DITE_HEADER_START - (1 << bits); /* Ensure we are aligned to bits. If not, round down. */ va = va & ~((1 << bits) - 1); return va; } static void* __map_device_page_failsafe(void* cookie UNUSED, uintptr_t paddr, size_t size, int cached UNUSED, ps_mem_flags_t flags UNUSED) { int bits = CTZ(size); int error; seL4_Word vaddr = 0; cspacepath_t dest; if (device_cap != 0) { /* we only support a single page for the serial */ for (;;); } error = sel4platsupport_copy_frame_cap(vka, simple, (void *) paddr, bits, &dest); if (error != seL4_NoError) { goto error; } device_cap = dest.capPtr; vaddr = platsupport_alloc_device_vaddr(bits);
static int test_large_page_flush_operation(env_t env) { int num_frame_types = ARRAY_SIZE(frame_types); seL4_CPtr frames[num_frame_types]; int error; vka_t *vka = &env->vka; bool pt_mapped = false; /* Grab some free vspace big enough to hold all the tests. */ seL4_Word vstart; reservation_t reserve = vspace_reserve_range(&env->vspace, 2 * (1 << 25), seL4_AllRights, 1, (void **) &vstart); test_assert(reserve.res != 0); vstart = ALIGN_UP(vstart, (1 << 25)); /* Create us some frames to play with. */ for (int i = 0; i < num_frame_types; i++) { frames[i] = vka_alloc_frame_leaky(vka, CTZ(frame_types[i].size)); assert(frames[i]); } /* Also create a pagetable to map the pages into. */ seL4_CPtr pt = vka_alloc_page_table_leaky(vka); /* Map the pages in. */ for (int i = 0; i < num_frame_types; i++) { if (frame_types[i].need_pt && !pt_mapped) { /* Map the pagetable in. */ error = seL4_ARCH_PageTable_Map(pt, env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); pt_mapped = true; } error = seL4_ARCH_Page_Map(frames[i], env->page_directory, vstart + frame_types[i].vaddr_offset, seL4_AllRights, seL4_ARCH_Default_VMAttributes); test_assert(error == 0); } /* See if we can invoke page flush on each of them */ for (int i = 0; i < num_frame_types; i++) { error = seL4_ARM_Page_Invalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Clean_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_CleanInvalidate_Data(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_Page_Unify_Instruction(frames[i], 0, frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Invalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Clean_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_CleanInvalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); error = seL4_ARM_PageDirectory_Unify_Instruction(env->page_directory, vstart + frame_types[i].vaddr_offset, vstart + frame_types[i].vaddr_offset + frame_types[i].size); test_assert(error == 0); } return sel4test_get_result(); }
static uint32_t FFS(uint32_t x) { if(x == 0) return 0; return CTZ(x) + 1; }
/** DONT_TRANSLATE */ BOOT_CODE static uint32_t boot_ctz (uint32_t x) { return CTZ (x); }