void mm_pginitialize(FAR void *heap_start, size_t heap_size) { #ifdef CONFIG_GRAN_SINGLE int ret; ret = gran_initialize(heap_start, heap_size, MM_PGSHIFT, MM_PGSHIFT); DEBUGASSERT(ret == OK); UNUSED(ret); #else g_pgalloc = gran_initialize(heap_start, heap_size, MM_PGSHIFT, MM_PGSHIFT); DEBUGASSERT(pg_alloc != NULL); #endif }
int up_mmuinit(void) { /* Here we use the granule allocator as a page allocator. We lie and * say that 1 page is 1 byte. */ #ifdef CONFIG_GRAN_SINGLE return gran_initialize((FAR void *)Z180_PHYSHEAP_STARTPAGE, Z180_PHYSHEAP_NPAGES, 0, 0); #else g_physhandle = gran_initialize((FAR void *)Z180_PHYSHEAP_STARTPAGE, Z180_PHYSHEAP_NPAGES, 0, 0); return g_physhandle ? OK : -ENOMEM; #endif }
static void dma_alloc_init(void) { dma_allocator = gran_initialize(g_dma_heap, sizeof(g_dma_heap), 7, /* 128B granule - must be > alignment (XXX bug?) */ 6); /* 64B alignment */ if (dma_allocator == NULL) { message("[boot] DMA allocator setup FAILED"); } else { g_dma_perf = perf_alloc(PC_COUNT, "DMA allocations"); } }
int stm32_dma_alloc_init(void) { dma_allocator = gran_initialize(g_dma_heap, sizeof(g_dma_heap), 7, /* 128B granule - must be > alignment (XXX bug?) */ 6); /* 64B alignment */ if (dma_allocator == NULL) { return -ENOMEM; } return OK; }
/**************************************************************************** * Public Functions ****************************************************************************/ __EXPORT int board_dma_alloc_init(void) { dma_allocator = gran_initialize(g_dma_heap, sizeof(g_dma_heap), 7, /* 128B granule - must be > alignment (XXX bug?) */ 6); /* 64B alignment */ if (dma_allocator == NULL) { return -ENOMEM; } else { dma_heap_inuse = 0; dma_heap_peak_use = 0; g_dma_perf = perf_alloc(PC_COUNT, "dma_alloc"); } return OK; }