Пример #1
0
INTERNAL void __awake_in_mutex(struct mutex *mutex) {
  struct __mutex_node *node;
  err_code err;

  do {
    node = mutex->tail;
    mutex->tail = node->next;
    if (mutex->head == node)
      mutex->head = NULL;
    err = resume_thread(node->id);
    free_block(&mutex_node_pool, node);
  }
  while (mutex->tail && err);

  if (!mutex->tail)
    release_spinlock_int(&mutex->mlock);
}
Пример #2
0
void destroy_translation_map(struct vm_translation_map *map)
{
    int i;
    unsigned int *pgdir;
    int old_flags;

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    list_remove_node(map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    // Free user space page tables
    pgdir = (unsigned int*) PA_TO_VA(map->page_dir);
    for (i = 0; i < 768; i++)
    {
        if (pgdir[i] & PAGE_PRESENT)
            dec_page_ref(pa_to_page(PAGE_ALIGN(pgdir[i])));
    }

    dec_page_ref(pa_to_page(map->page_dir));
    slab_free(&translation_map_slab, map);
}
Пример #3
0
struct vm_translation_map *create_translation_map(void)
{
    struct vm_translation_map *map;
    int old_flags;

    map = slab_alloc(&translation_map_slab);
    map->page_dir = page_to_pa(vm_allocate_page());

    old_flags = acquire_spinlock_int(&kernel_space_lock);
    // Copy kernel page tables into new page directory
    memcpy((unsigned int*) PA_TO_VA(map->page_dir) + 768,
           (unsigned int*) PA_TO_VA(kernel_map.page_dir) + 768,
           256 * sizeof(unsigned int));

    map->asid = next_asid++;
    map->lock = 0;

    list_add_tail(&map_list, (struct list_node*) map);
    release_spinlock_int(&kernel_space_lock, old_flags);

    return map;
}