Exemple #1
0
void rwlock_acquire_write(rwlock* l)
{
    sched_thread* t = sched_thread_current();
    uint32 eflags = eflags_save();
    asm volatile ("cli");

    spinlock_acquire(&l->lock);
    l->writers++;

    if (l->writers == 1 && l->readers == 0)
    {
        spinlock_release(&l->lock);
    }
    else
    {
        spinlock_acquire(&l->write_queue.lock);
        t->status = STS_BLOCKING;
        sched_thread_enqueue(&l->write_queue, t);
        spinlock_release(&l->write_queue.lock);
        spinlock_release(&l->lock);
        sched_yield();
    }

    eflags_load(eflags);
}
/* dettach shared memory from calling process addres space */
void sys_shmdt() {
	Trapframe *tf = task_curr->registers();
	const void* shmaddr = (const void*)get_param2(tf);

	uint32_t eflags = eflags_read();
	cli();

	SharedMemList_t::iterator i = task_curr->shared_mem_list.begin();
	SharedMemList_t::iterator end = task_curr->shared_mem_list.end();
	for ( ; i != end ; i++) {
		if (i->value()->start == (uint32_t)shmaddr) {
			break;
		}
	}
	if (i == end) {
		eflags_load(eflags);
		return set_return(tf, -1);
	}

	detach_shared_mem(i->value());
	eflags_load(eflags);

	return set_return(tf, 0);
}
Exemple #3
0
bool mutex_try_acquire(mutex* m)
{
    sched_thread* t = sched_thread_current();
    uint32 eflags = eflags_save();
    bool result;
    asm volatile ("cli");

    if (m->owner == t)
        crash("Kernel mutex recursive locking detected!");

    if ((result = mutex_acquire_fast(m)))
    {
        m->owner = t;
        m->owner_next = t->held_mutexes;
        t->held_mutexes = m;
    }

    eflags_load(eflags);
    return result;
}
Exemple #4
0
void mutex_acquire(mutex* m)
{
    sched_thread* t = sched_thread_current();
    uint32 eflags = eflags_save();
    asm volatile ("cli");

    if (m->owner == t)
        crash("Kernel mutex recursive locking detected!");

    if (!mutex_acquire_fast(m))
    {
        spinlock_acquire(&m->wait_queue.lock);

        if (!mutex_acquire_fast(m))
        {
            t->status = STS_BLOCKING;
            sched_thread_enqueue(&m->wait_queue, t);
            spinlock_release(&m->wait_queue.lock);
            sched_yield();
        }
        else
        {
            spinlock_release(&m->wait_queue.lock);

            m->owner = t;
            m->owner_next = t->held_mutexes;
            t->held_mutexes = m;
        }
    }
    else
    {
        m->owner = t;
        m->owner_next = t->held_mutexes;
        t->held_mutexes = m;
    }

    eflags_load(eflags);
}
/* allocate shared memory */
void sys_shmget() {
	Trapframe *tf = task_curr->registers();
	key_t key = (key_t)get_param2(tf);
	size_t size = (size_t)get_param3(tf);
	int shmflg = (int)get_param4(tf);

	SharedMemInfo *shm = NULL;
	Page *p = NULL;

	if (size > 0x1000) {
		print_warning(">> shm max size: 4096 byte\n");
		return set_return(tf, -1);
	}

	if ((shmflg != 0) && (shmflg != 1)) {
		print_warning(">> shmflg not supported. Use [0, 1]\n");
		return set_return(tf, -1);
	}

	uint32_t eflags = eflags_read();
	cli();

	/* var olan shared memory */
	if (shmflg == 0) {
		shm = shm_key_ht.get(key);
		eflags_load(eflags);
		if (shm == NULL)
			return set_return(tf, -1);
		return set_return(tf, shm->id);
	}

	// shmflg == 1

	shm = shm_key_ht.get(key);
	if (shm != NULL)
		return set_return(tf, -1);

	int shmid = next_shm_id++;
	if (shmid > 1023) {
		print_warning(">> shm id kalmadi\n");
		return set_return(tf, -1);
	}

	shm = (SharedMemInfo*)kmalloc(sizeof(SharedMemInfo));
	if (shm == NULL)
		goto bad_sys_shmget;

	/* shared memoryde kullanilacak fiziksel bellek */
	if ( page_alloc(&p) < 0)
		goto bad_sys_shmget;
	p->refcount_inc();

	shm->init();
	shm->size = size;
	shm->id = shmid;
	shm->key = key;
	shm->page = p;

	shm_key_ht.put(&shm->key_hash_node);
	shm_id_ht.put(&shm->id_hash_node);

	eflags_load(eflags);
	return set_return(tf, shmid);

bad_sys_shmget:
	if (shm)
		kfree(shm);
	if (p)
		page_free(p);

	eflags_load(eflags);
	return set_return(tf, -1);
}
/* attach shared memory to calling process addres space */
void sys_shmat() {
	Trapframe *tf = task_curr->registers();
	int shmid = (int)get_param2(tf);
	const void* shmaddr = (const void*)get_param3(tf);
	int shmflg = (int)get_param4(tf);

	PTE_t * pte = NULL;
	uint32_t va = -1;

	uint32_t eflags = eflags_read();
	cli();

	SharedMemInfo *shm = shm_id_ht.get(shmid);

	if (shm == NULL) {
		eflags_load(eflags);
		return set_return(tf, -1);
	}

	if (shmaddr != NULL) {
		print_warning(">> shmaddr not supported. Use NULL\n");
		eflags_load(eflags);
		return set_return(tf, -1);
	}

	if (shmflg != 0) {
		print_warning(">> shmflg not supported. Use 0\n");
		eflags_load(eflags);
		return set_return(tf, -1);
	}

	SharedMemDesc *desc = (SharedMemDesc*)kmalloc(sizeof(SharedMemDesc));
	if (desc == NULL)
		goto bad_sys_shmat;

	desc->init();

	/* shared memory icin sanal adres bul */
	for (va = MMAP_USER_SHARED_MEM_BASE ;
		 va < MMAP_USER_SHARED_MEM_TOP ;
		 va += 0x1000) {
		PTE_t *p = task_curr->pgdir.page_get_c(VA_t(va));
		if (!p->present) {
			pte = p;
			break;
		}
	}

	if (pte == NULL) {
		print_warning(">> shm: no virtual memory\n");
		goto bad_sys_shmat;
	}

	task_curr->pgdir.page_insert(shm->page, va,  PTE_P | PTE_U | PTE_W);
	task_curr->pgdir.count_shared++;

	desc->start = va2uaddr(va);
	desc->end = desc->start + shm->size;
	desc->info = shm;

	/* taskin shared memory listesine ekle */
	task_curr->shared_mem_list.push_back(&desc->list_node);
	/* shared memorynin task listesine ekle */
	desc->info->task_list.push_back(&desc->info_task_list_node);

	/* desc veriyapisi icin gecici kontrol */
	ASSERT(task_curr == desc->task());

	eflags_load(eflags);
	return set_return(tf, va2uaddr(va));

bad_sys_shmat:
	if (desc)
		kfree(desc);

	eflags_load(eflags);
	return set_return(tf, -1);
}