Exemplo n.º 1
0
void vm_map_page(struct vm_translation_map *map, unsigned int va, unsigned int pa)
{
    int vpindex = va / PAGE_SIZE;
    int pgdindex = vpindex / 1024;
    int pgtindex = vpindex % 1024;
    unsigned int *pgdir;
    unsigned int *pgtbl;
    struct list_node *other_map;
    unsigned int new_pgt;
    int old_flags;

    if (va >= KERNEL_BASE)
    {
        // Map into kernel space
        old_flags = acquire_spinlock_int(&kernel_space_lock);

        // The page tables for kernel space are shared by all page directories.
        // Check the first page directory to see if this is present. If not,
        // allocate a new one and stick it into all page directories.
        pgdir = (unsigned int*) PA_TO_VA(kernel_map.page_dir);
        if ((pgdir[pgdindex] & PAGE_PRESENT) == 0)
        {
            new_pgt = page_to_pa(vm_allocate_page()) | PAGE_PRESENT;
            list_for_each(&map_list, other_map, struct list_node)
            {
                pgdir = (unsigned int*) PA_TO_VA(((struct vm_translation_map*)other_map)->page_dir);
                pgdir[pgdindex] = new_pgt;
            }
        }
Exemplo n.º 2
0
void destroy_translation_map(struct vm_translation_map *map)
{
    int i;
    unsigned int *pgdir;
    int old_flags;

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    list_remove_node(map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    // Free user space page tables
    pgdir = (unsigned int*) PA_TO_VA(map->page_dir);
    for (i = 0; i < 768; i++)
    {
        if (pgdir[i] & PAGE_PRESENT)
            dec_page_ref(pa_to_page(PAGE_ALIGN(pgdir[i])));
    }

    dec_page_ref(pa_to_page(map->page_dir));
    slab_free(&translation_map_slab, map);
}
Exemplo n.º 3
0
struct vm_translation_map *create_translation_map(void)
{
    struct vm_translation_map *map;
    int old_flags;

    map = slab_alloc(&translation_map_slab);
    map->page_dir = page_to_pa(vm_allocate_page());

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    // Copy kernel page tables into new page directory
    memcpy((unsigned int*) PA_TO_VA(map->page_dir) + 768,
           (unsigned int*) PA_TO_VA(kernel_map.page_dir) + 768,
           256 * sizeof(unsigned int));

    map->asid = next_asid++;
    map->lock = 0;

    list_add_tail(&map_list, (struct list_node*) map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    return map;
}
Exemplo n.º 4
0
Arquivo: sync.c Projeto: AlexSnet/toy
INTERNAL err_code __sleep_in_mutex(struct mutex *mutex) {
  struct __mutex_node *node = NULL;
  bool acquired;

  acquire_spinlock(&mutex->ilock, 0);
  acquired = acquire_spinlock_int(&mutex->mlock, 1);
  if (!acquired) {
    node = alloc_block(&mutex_node_pool);
    if (node) {
      node->next = NULL;
      node->id = get_thread();
      if (mutex->head)
        mutex->head->next = node;
      mutex->head = node;
      if (!mutex->tail)
        mutex->tail = node;
      pause_this_thread(&mutex->ilock);
    }
  }
  if (!node)
    release_spinlock(&mutex->ilock);

  return (acquired || node) ? ERR_NONE : ERR_OUT_OF_MEMORY;
}