/** * @brief provide a specific data area * @param start pointer to begin of data * @param end pointer to end of data */ void heap_provide_address(vaddr_t start, vaddr_t end) { int pages = NUM_PAGES(end - (start & PAGE_MASK)); paddr_t pframe = 0; vaddr_t vframe = start & PAGE_MASK; int i; for(i = 0; i < pages; i++) { if(! vmm_is_present(current_context, vframe) && vaddr2paddr(current_context, vframe) == 0) { pframe = pmm_alloc_page(); vmm_map(current_context, pframe, vframe, VMM_WRITABLE); } vframe += PAGE_SIZE; } }
int cdi_alloc_phys_mem(size_t size, void** vaddr, void** paddr) { LOG cdi_check_init(-1); cdi_check_arg(vaddr, != NULL, -1); cdi_check_arg(paddr, != NULL, -1); size_t pages = NUM_PAGES(size); if (!pages) return -1; paddr_t phys = mm_alloc_range(pages); if (paddr == NO_PAGE) return -1; vaddr_t virt = km_alloc_addr(phys, PE_PRESENT | PE_READWRITE, size); if (!virt) return -1; *vaddr = virt; *paddr = phys; return 0; }
/* * Ugh, this is ugly, but we want the default case to run * straight through, which is why we have the ugly goto's */ void *kmalloc(size_t size, int priority) { unsigned long flags; unsigned long type; int order, dma; struct block_header *p; struct page_descriptor *page, **pg; struct size_descriptor *bucket = sizes; /* Get order */ order = 0; { unsigned int realsize = size + sizeof(struct block_header); for (;;) { int ordersize = BLOCKSIZE(order); if (realsize <= ordersize) break; order++; bucket++; if (ordersize) continue; printk("kmalloc of too large a block (%d bytes).\n", (int) size); return NULL; } } dma = 0; type = MF_USED; pg = &bucket->firstfree; if (priority & GFP_DMA) { dma = 1; type = MF_DMA; pg = &bucket->dmafree; } priority &= GFP_LEVEL_MASK; /* Sanity check... */ if (intr_count && priority != GFP_ATOMIC) { static int count = 0; if (++count < 5) { printk("kmalloc called nonatomically from interrupt %p\n", __builtin_return_address(0)); priority = GFP_ATOMIC; } } save_flags(flags); cli(); page = *pg; if (!page) goto no_bucket_page; p = page->firstfree; if (p->bh_flags != MF_FREE) goto not_free_on_freelist; found_it: page->firstfree = p->bh_next; page->nfree--; if (!page->nfree) *pg = page->next; restore_flags(flags); bucket->nmallocs++; bucket->nbytesmalloced += size; p->bh_flags = type; /* As of now this block is officially in use */ p->bh_length = size; #ifdef SADISTIC_KMALLOC memset(p+1, 0xf0, size); #endif return p + 1; /* Pointer arithmetic: increments past header */ no_bucket_page: /* * If we didn't find a page already allocated for this * bucket size, we need to get one.. * * This can be done with ints on: it is private to this invocation */ restore_flags(flags); { int i, realpages; if (BLOCKSIZE(order) < PAGE_SIZE) realpages = 1; else realpages = NUM_PAGES(size + sizeof(struct block_header) + sizeof(struct page_descriptor)); page = get_kmalloc_pages(priority, realpages, dma); if (!page) goto no_free_page; found_cached_page: bucket->npages++; page->order = order | (realpages * PAGE_SIZE); /* Loop for all but last block: */ i = (page->nfree = bucket->nblocks) - 1; p = BH(page + 1); while (i > 0) { /* doesn't happen except for small ^2 mallocs */ i--; p->bh_flags = MF_FREE; p->bh_next = BH(((long) p) + BLOCKSIZE(order)); p = p->bh_next; } /* Last block: */ p->bh_flags = MF_FREE; p->bh_next = NULL; p = BH(page+1); } /* * Now we're going to muck with the "global" freelist * for this size: this should be uninterruptible */ cli(); page->next = *pg; *pg = page; goto found_it; no_free_page: /* * No free pages, check the kmalloc cache of * pages to see if maybe we have something available */ if (!dma && order < MAX_CACHE_ORDER) { page = xchg(kmalloc_cache+order, page); if (page) goto found_cached_page; } { static unsigned long last = 0; if (priority != GFP_BUFFER && priority != GFP_IO && (last + 10 * HZ < jiffies)) { last = jiffies; printk("Couldn't get a free page.....\n"); } return NULL; } not_free_on_freelist: restore_flags(flags); printk("Problem: block on freelist at %08lx isn't free.\n", (long) p); return NULL; }