void heap_init(void) { LTRACE_ENTRY; // create a mutex mutex_init(&theheap.lock); // initialize the free list list_initialize(&theheap.free_list); // initialize the delayed free list list_initialize(&theheap.delayed_free_list); spin_lock_init(&theheap.delayed_free_lock); // set the heap range #if WITH_KERNEL_VM theheap.base = pmm_alloc_kpages(HEAP_GROW_SIZE / PAGE_SIZE, NULL); theheap.len = HEAP_GROW_SIZE; if (theheap.base == 0) { panic("HEAP: error allocating initial heap size\n"); } #else theheap.base = (void *)HEAP_START; theheap.len = HEAP_LEN; #endif theheap.remaining = 0; // will get set by heap_insert_free_chunk() theheap.low_watermark = theheap.len; LTRACEF("base %p size %zd bytes\n", theheap.base, theheap.len); // create an initial free chunk heap_insert_free_chunk(heap_create_free_chunk(theheap.base, theheap.len, false)); }
static ssize_t heap_grow(size_t size) { #if WITH_KERNEL_VM size = ROUNDUP(size, PAGE_SIZE); void *ptr = pmm_alloc_kpages(size / PAGE_SIZE, NULL); if (!ptr) return ERR_NO_MEMORY; LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr); heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true)); /* change the heap start and end variables */ if ((uintptr_t)ptr < (uintptr_t)theheap.base) theheap.base = ptr; uintptr_t endptr = (uintptr_t)ptr + size; if (endptr > (uintptr_t)theheap.base + theheap.len) { theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base; } return size; #else return ERR_NO_MEMORY; #endif }
void *page_alloc(size_t pages, int arena) { #if WITH_KERNEL_VM void *result = pmm_alloc_kpages(pages, NULL); return result; #else void *result = novm_alloc_pages(pages, arena); return result; #endif }
static status_t get_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t *ppa) { status_t ret; paddr_t pa; uint32_t tt_entry; DEBUG_ASSERT(aspace); DEBUG_ASSERT(ppa); /* lookup an existing l2 pagetable */ for (uint i = 0; i < L1E_PER_PAGE; i++) { tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i]; if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) { *ppa = (paddr_t)ROUNDDOWN(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry), PAGE_SIZE) + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1)); return NO_ERROR; } } /* not found: allocate it */ uint32_t *l2_va = pmm_alloc_kpages(1, &aspace->pt_page_list); if (!l2_va) return ERR_NO_MEMORY; /* wipe it clean to set no access */ memset(l2_va, 0, PAGE_SIZE); /* get physical address */ ret = arm_vtop((vaddr_t)l2_va, &pa); ASSERT(!ret); ASSERT(paddr_to_kvaddr(pa)); DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_va)); DEBUG_ASSERT(IS_PAGE_ALIGNED(pa)); *ppa = pa + (PAGE_SIZE / L1E_PER_PAGE) * (l1_index & (L1E_PER_PAGE-1)); LTRACEF("allocated pagetable at %p, pa 0x%lx, pa 0x%lx\n", l2_va, pa, *ppa); return NO_ERROR; }
status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size, uint flags) { LTRACEF("aspace %p, base 0x%lx, size 0x%zx, flags 0x%x\n", aspace, base, size, flags); DEBUG_ASSERT(aspace); /* validate that the base + size is sane and doesn't wrap */ DEBUG_ASSERT(size > PAGE_SIZE); DEBUG_ASSERT(base + size - 1 > base); list_initialize(&aspace->pt_page_list); if (flags & ARCH_ASPACE_FLAG_KERNEL) { aspace->base = base; aspace->size = size; aspace->tt_virt = arm_kernel_translation_table; aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); } else { // XXX at the moment we can only really deal with 1GB user space, and thus // needing only a single page for the top level translation table DEBUG_ASSERT(base < GB && (base + size) <= GB); aspace->base = base; aspace->size = size; uint32_t *va = pmm_alloc_kpages(1, &aspace->pt_page_list); if (!va) return ERR_NO_MEMORY; aspace->tt_virt = va; aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt); } LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt); return NO_ERROR; }
status_t virtio_gpu_start(struct virtio_device *dev) { status_t err; LTRACEF("dev %p\n", dev); struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv; /* get the display info and see if we find a valid pmode */ err = get_display_info(gdev); if (err < 0) { LTRACEF("failed to get display info\n"); return err; } if (gdev->pmode_id < 0) { LTRACEF("we failed to find a pmode, exiting\n"); return ERR_NOT_FOUND; } /* allocate a resource */ err = allocate_2d_resource(gdev, &gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height); if (err < 0) { LTRACEF("failed to allocate 2d resource\n"); return err; } /* attach a backing store to the resource */ size_t len = gdev->pmode.r.width * gdev->pmode.r.height * 4; gdev->fb = pmm_alloc_kpages(ROUNDUP(len, PAGE_SIZE) / PAGE_SIZE, NULL); if (!gdev->fb) { TRACEF("failed to allocate framebuffer, wanted 0x%zx bytes\n", len); return ERR_NO_MEMORY; } printf("virtio-gpu: framebuffer at %p, 0x%zx bytes\n", gdev->fb, len); err = attach_backing(gdev, gdev->display_resource_id, gdev->fb, len); if (err < 0) { LTRACEF("failed to attach backing store\n"); return err; } /* attach this resource as a scanout */ err = set_scanout(gdev, gdev->pmode_id, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height); if (err < 0) { LTRACEF("failed to set scanout\n"); return err; } /* create the flush thread */ thread_t *t; t = thread_create("virtio gpu flusher", &virtio_gpu_flush_thread, (void *)gdev, HIGH_PRIORITY, DEFAULT_STACK_SIZE); thread_detach_and_resume(t); /* kick it once */ event_signal(&gdev->flush_event, true); LTRACE_EXIT; return NO_ERROR; }