int add_memarea_element(size_t start_address, int required_pages){ //Return -1 on error 0 on success int posizione; posizione = mem_info->next_free; if(posizione>0 && posizione<510){ #ifdef DEBUG printf("Posizione <510\n"); #endif mem_info->pages_info[posizione].inizio = (void*)start_address; mem_info->pages_info[posizione].num_pagine = required_pages; mem_info->next_free++; return 0; } else { mem_info->next = request_pages(1, NOT_ADD_LIST); mem_info = (mem_area_pointer) mem_info->next; if(mem_info->next!=NULL){ mem_info = (mem_area_pointer)mem_info->next; #ifdef DEBUG printf("Posizione >510\n"); #endif mem_info->pages_info[0].inizio = (void*)start_address; mem_info->pages_info[0].num_pagine = required_pages; mem_info->next_free = 1; } else return -1; return 0; } return -1; }
static struct page * alloc_new_pte_page(struct pte_cache_list *clist) { struct page *page; unsigned long pte_free_base = 0; if (!clist->pte_free_size) { /* * alloc new page for pte pgt */ page = request_pages(1, GFP_PGT); if (!page) return NULL; add_page_to_list_tail(page, &clist->pte_list); clist->pte_alloc_size += PAGE_SIZE; } else { /* * fetch a new page from the pte_list */ clist->pte_current_page = list_next(clist->pte_current_page); page = list_to_page(clist->pte_current_page); clist->pte_free_size -= PAGE_SIZE; } pte_free_base = page_to_va(page); memset((char *)pte_free_base, 0, PAGE_SIZE); return page; }
/*Da spostare appena termintao il gestore della memoria*/ void *fis_malloc(const size_t size){ return request_pages(size % 4096 ? size / 4096 + 1 : size / 4096, ADD_LIST); }
int init_task_page_table(struct task_page_table *table) { unsigned long base = 0; struct page *page; struct pgt_temp_buffer *tb = &table->pgt_temp_buffer; if (!table) return -EINVAL; /* * if the page table has been alloced * we reinit the pde and pte page table */ if (!table->pde_base) { memset((char *)table, 0, sizeof(struct task_page_table)); page = alloc_new_pde(); if (!page) { kernel_error("No memory for task PDE\n"); return -ENOMEM; } table->pde_base = page_to_va(page); table->pde_base_pa = page_to_pa(page); /* * init temp buffer */ tb->tbuf_pte_page = request_pages(1, GFP_PGT); if (!tb->tbuf_pte_page) { release_pde(base); return -ENOMEM; } tb->tbuf_pte_base = page_to_va(tb->tbuf_pte_page); tb->tbuf_page_nr = PTES_PER_PDE; } /* * if do memset op here, it will cause much time * to be fix */ mmu_copy_kernel_pde(table->pde_base); init_pte(table); /* * init temp_buffer member */ base = pgt_get_pde_entry_addr(table->pde_base, KERNEL_TEMP_BUFFER_BASE); mmu_create_pde_entry(base, page_to_pa(tb->tbuf_pte_page), KERNEL_TEMP_BUFFER_BASE); table->mmap_current_base = PROCESS_USER_MMAP_BASE; return 0; }