Example #1
0
struct process *tm_process_get(pid_t pid)
{
	struct process *proc;
	mutex_acquire(&process_refs_lock);
	if((proc = hash_lookup(process_table, &pid, sizeof(pid))) == NULL) {
		mutex_release(&process_refs_lock);
		return 0;
	}
	atomic_fetch_add(&proc->refs, 1);
	mutex_release(&process_refs_lock);
	return proc;
}
Example #2
0
struct thread *tm_thread_get(pid_t tid)
{
	struct thread *thr;
	mutex_acquire(&thread_refs_lock);
	if((thr = hash_lookup(thread_table, &tid, sizeof(tid))) == NULL) {
		mutex_release(&thread_refs_lock);
		return 0;
	}
	atomic_fetch_add(&thr->refs, 1);
	mutex_release(&thread_refs_lock);
	return thr;
}
Example #3
0
void tm_thread_put(struct thread *thr)
{
	ASSERT(thr->refs >= 1);
	mutex_acquire(&thread_refs_lock);
	if(atomic_fetch_sub(&thr->refs, 1) == 1) {
		hash_delete(thread_table, &thr->tid, sizeof(thr->tid));
		mutex_release(&thread_refs_lock);
		kfree(thr);
	} else {
		mutex_release(&thread_refs_lock);
	}
}
Example #4
0
int mutex_test(void)
{
	static mutex_t imutex = MUTEX_INITIAL_VALUE(imutex);
	printf("preinitialized mutex:\n");
	hexdump(&imutex, sizeof(imutex));

	mutex_t m;
	mutex_init(&m);

	thread_t *threads[5];

	for (uint i=0; i < countof(threads); i++) {
		threads[i] = thread_create("mutex tester", &mutex_thread, &m, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
		thread_resume(threads[i]);
	}

	for (uint i=0; i < countof(threads); i++) {
		thread_join(threads[i], NULL, INFINITE_TIME);
	}

	printf("done with simple mutex tests\n");

	printf("testing mutex timeout\n");

	mutex_t timeout_mutex;

	mutex_init(&timeout_mutex);
	mutex_acquire(&timeout_mutex);

	for (uint i=0; i < 2; i++) {
		threads[i] = thread_create("mutex timeout tester", &mutex_timeout_thread, (void *)&timeout_mutex, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
		thread_resume(threads[i]);
	}

	for (uint i=2; i < 4; i++) {
		threads[i] = thread_create("mutex timeout tester", &mutex_zerotimeout_thread, (void *)&timeout_mutex, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE);
		thread_resume(threads[i]);
	}

	thread_sleep(5000);
	mutex_release(&timeout_mutex);

	for (uint i=0; i < 4; i++) {
		thread_join(threads[i], NULL, INFINITE_TIME);
	}

	printf("done with mutex tests\n");

	mutex_destroy(&timeout_mutex);

	return 0;
}
Example #5
0
// decrement the ref to the mount structure, which may
// cause an unmount operation
static void put_mount(struct fs_mount *mount)
{
    mutex_acquire(&mount_lock);
    if ((--mount->ref) == 0) {
        list_delete(&mount->node);
        mount->api->unmount(mount->cookie);
        free(mount->path);
        if (mount->dev)
            bio_close(mount->dev);
        free(mount);
    }
    mutex_release(&mount_lock);
}
Example #6
0
static status_t get_display_info(struct virtio_gpu_dev *gdev)
{
    status_t err;

    LTRACEF("gdev %p\n", gdev);

    DEBUG_ASSERT(gdev);

    /* grab a lock to keep this single message at a time */
    mutex_acquire(&gdev->lock);

    /* construct the get display info message */
    struct virtio_gpu_ctrl_hdr req;
    memset(&req, 0, sizeof(req));
    req.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;

    /* send the message and get a response */
    struct virtio_gpu_resp_display_info *info;
    err = send_command_response(gdev, &req, sizeof(req), (void **)&info, sizeof(*info));
    DEBUG_ASSERT(err == NO_ERROR);
    if (err < NO_ERROR) {
        mutex_release(&gdev->lock);
        return ERR_NOT_FOUND;
    }

    /* we got response */
    if (info->hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
        mutex_release(&gdev->lock);
        return ERR_NOT_FOUND;
    }

    LTRACEF("response:\n");
    for (uint i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
        if (info->pmodes[i].enabled) {
            LTRACEF("%u: x %u y %u w %u h %u flags 0x%x\n", i,
                    info->pmodes[i].r.x, info->pmodes[i].r.y, info->pmodes[i].r.width, info->pmodes[i].r.height,
                    info->pmodes[i].flags);
            if (gdev->pmode_id < 0) {
                /* save the first valid pmode we see */
                memcpy(&gdev->pmode, &info->pmodes[i], sizeof(gdev->pmode));
                gdev->pmode_id = i;
            }
        }
    }

    /* release the lock */
    mutex_release(&gdev->lock);

    return NO_ERROR;
}
Example #7
0
void cond_broadcast(COND * c) {
    if(c){
        mutex_acquire(c->lock, LOCK_INFINITE);
        if (c->waiting > c->signals) {
            int i, num_waiting;

            num_waiting = (c->waiting - c->signals);
            c->signals = c->waiting;
            for (i = 0; i < num_waiting; ++i) {
                mutex_release(c->wait_sem);
            }
            /* Now all released threads are blocked here, waiting for us.
               Collect them all (and win fabulous prizes!) :-)
             */
            mutex_release(c->lock);
            for (i = 0; i < num_waiting; ++i) {
                mutex_acquire(c->wait_done, LOCK_INFINITE);
            }
        } else {
            mutex_release(c->lock);
        }
    }
};
Example #8
0
void fs_inode_sync_region(struct inode *node, addr_t virt, size_t offset, size_t length)
{
	mutex_acquire(&node->mappings_lock);
	ASSERT(node->flags & INODE_PCACHE);
	ASSERT(!(offset & ~PAGE_MASK));
	ASSERT(!(virt & ~PAGE_MASK));
	int page_number = offset / PAGE_SIZE;
	int npages = ((length-1) / PAGE_SIZE) + 1;
	for(int i = page_number; i < (page_number + npages); i++)
	{
		fs_inode_sync_physical_page(node, virt + i * PAGE_SIZE, offset + i * PAGE_SIZE, PAGE_SIZE);
	}
	mutex_release(&node->mappings_lock);
}
Example #9
0
void consumer(struct buffer *b)
  /*@ requires [_]b->m |-> ?m &*&
               [_]b->v |-> ?v &*& 
               [_]mutex(m) &*& 
               inv(m) == buffer(b) &*& 
               [_]b->gid |-> ?gid &*&
               obs(?O) &*&
               tic(gid) &*&
               no_cycle(v,O) == true &*&
               no_cycle(m,O) == true; @*/
  /*@ ensures  [_]b->m |-> m &*&
               [_]b->v |-> v &*& 
               [_]mutex(m) &*& 
               obs(O); @*/
{
  //@ close mutex_inv(m,buffer(b));
  mutex_acquire(b->m);
  //@ open buffer(b)(?Wt1,?Ot1);
  //@ leak [_]b->v |-> v;
  
  while (size_of(b->q)==0)
  /*@ invariant [_]b->m |-> m &*& 
                [_]b->v |-> v &*& 
                b->q |-> ?q &*& 
                [_]b->gid |-> gid &*& 
                queue(q,?s) &*& 
                s>=0 &*& 
                mutex_held(m, _, ?Wt, ?Ot) &*& 
                ctr(gid,?Ct) &*&
                Wt(v) + Ct <= Ot(v) + s &*&
                Wt(v) <= Ot(v) &*&
                obs(cons(m,O)) &*&
                tic(gid); @*/
  {
    //@ dec_ctr(gid);
    //@ close buffer(b)(finc(Wt,v),Ot);
    //@ close mutex_inv(m,buffer(b));
    //@ close condvar_trn(v,vtrn(gid));
    condvar_wait(b->v, b->m);
    //@ open buffer(b)(_,_);
    //@ open vtrn(gid)();
  }
  dequeue(b->q);
  //@ dec_ctr(gid);
  //@ close buffer(b)(Wt, Ot);
  //@ close mutex_inv(m,buffer(b));
  mutex_release(b->m);
  //@ leak [_]mutex(m);
}
Example #10
0
static size_t pty_write_master(struct pty *pty, uint8_t *buffer, size_t length, bool block)
{
	if(pty->term.c_lflag & ICANON) {
		mutex_acquire(&pty->cbuf_lock);
		for(size_t i = 0;i<length;i++) {
			process_input(pty, *buffer++);
		}
		mutex_release(&pty->cbuf_lock);
		return length;
	} else {
		if(pty->term.c_lflag & ECHO)
			charbuffer_write(&pty->output, buffer, length, block);
		return charbuffer_write(&pty->input, buffer, length, block);
	}
}
Example #11
0
int vm_do_unmap(addr_t virt, unsigned locked)
{
	/* This gives the virtual address of the table needed, and sets
	 * the correct place as zero */
	#if CONFIG_SWAP
	if(current_task && num_swapdev && current_task->num_swapped)
		swap_in_page((task_t *)current_task, virt & PAGE_MASK);
	#endif
	addr_t vpage = (virt&PAGE_MASK)/0x1000;
	unsigned vp4 = PML4_IDX(vpage);
	unsigned vpdpt = PDPT_IDX(vpage);
	unsigned vdir = PAGE_DIR_IDX(vpage);
	unsigned vtbl = PAGE_TABLE_IDX(vpage);
	if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked)
		mutex_acquire(&pd_cur_data->lock);
	page_dir_t *pd;
	page_table_t *pt;
	pdpt_t *pdpt;
	pml4_t *pml4;
	
	pml4 = (pml4_t *)((kernel_task && current_task) ? current_task->pd : kernel_dir);
	if(!pml4[vp4])
		pml4[vp4] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE;
	pdpt = (addr_t *)((pml4[vp4]&PAGE_MASK) + PHYS_PAGE_MAP);
	if(!pdpt[vpdpt])
		pdpt[vpdpt] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE;
	pd = (addr_t *)((pdpt[vpdpt]&PAGE_MASK) + PHYS_PAGE_MAP);
	if(!pd[vdir])
		pd[vdir] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE;
	pt = (addr_t *)((pd[vdir]&PAGE_MASK) + PHYS_PAGE_MAP);
	
	addr_t p = pt[vtbl];
	pt[vtbl] = 0;
	asm("invlpg (%0)"::"r" (virt));
	#if CONFIG_SMP
	if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) {
		if(IS_KERN_MEM(virt))
			send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB);
		else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1))
			send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB);
	}
	#endif
	if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked)
		mutex_release(&pd_cur_data->lock);
	if(p && !(p & PAGE_COW))
		pm_free_page(p & PAGE_MASK);
	return 0;
}
Example #12
0
ER
ploc_mtx(ID mtxid)
{
	MTXCB	*p_mtxcb;
	ER		ercd;

	LOG_PLOC_MTX_ENTER(mtxid);
	CHECK_TSKCTX_UNL();
	CHECK_ID(VALID_MTXID(mtxid));
	p_mtxcb = get_mtxcb(mtxid);

	lock_cpu();
	if (p_mtxcb->p_mtxinib->mtxatr == TA_NOEXS) {
		ercd = E_NOEXS;
	}
	else if (VIOLATE_ACPTN(p_mtxcb->p_mtxinib->acvct.acptn1)) {
		ercd = E_OACV;
	}
	else if (MTX_CEILING(p_mtxcb)
			&& p_mtxcb->p_mtxinib->ceilpri < p_runtsk->p_dominib->minpriority
			&& VIOLATE_ACPTN(p_runtsk->p_dominib->acvct.acptn2)) {
		ercd = E_OACV;									/*[NGKI5124]*/
	}
	else if (MTX_CEILING(p_mtxcb)
				&& p_runtsk->bpriority < p_mtxcb->p_mtxinib->ceilpri) {
		ercd = E_ILUSE;
	}
	else if (p_mtxcb->p_loctsk == NULL) {
		mutex_acquire(p_runtsk, p_mtxcb);
		/*
		 *  優先度上限ミューテックスをロックした場合,p_runtskの優先度
		 *  が上がる可能性があるが,ディスパッチが必要になることはない.
		 */
		assert(p_runtsk == p_schedtsk);
		ercd = E_OK;
	}
	else if (p_mtxcb->p_loctsk == p_runtsk) {
		ercd = E_OBJ;
	}
	else {
		ercd = E_TMOUT;
	}
	unlock_cpu();

  error_exit:
	LOG_PLOC_MTX_LEAVE(ercd);
	return(ercd);
}
Example #13
0
void queue_add(int sockfd, upload_queue_entry_t *entry)
{
  socket_data_t *socket_data = get_socket_data(sockfd);

  mutex_acquire(&g_mutex);
  if (socket_data->queue.head == NULL) {
    socket_data->queue.head = entry;
    socket_data->queue.tail = entry;
  } else {
    socket_data->queue.tail->next = entry;
    socket_data->queue.tail = entry;
  }

  manage_queue(socket_data);
  mutex_release(&g_mutex);
}
Example #14
0
ER
tloc_mtx(ID mtxid, TMO tmout)
{
	MTXCB	*p_mtxcb;
	WINFO_MTX winfo_mtx;
	TMEVTB	tmevtb;
	ER		ercd;

	LOG_TLOC_MTX_ENTER(mtxid, tmout);
	CHECK_DISPATCH();
	CHECK_MTXID(mtxid);
	CHECK_TMOUT(tmout);
	p_mtxcb = get_mtxcb(mtxid);

	t_lock_cpu();
	if (MTX_CEILING(p_mtxcb)
				&& p_runtsk->bpriority < p_mtxcb->p_mtxinib->ceilpri) {
		ercd = E_ILUSE;
	}
	else if (p_mtxcb->p_loctsk == NULL) {
		(void) mutex_acquire(p_runtsk, p_mtxcb);
		/*
		 *  優先度上限ミューテックスをロックした場合,p_runtskの優先度
		 *  が上がる可能性があるが,ディスパッチが必要になることはない.
		 */
		assert(!(p_runtsk != p_schedtsk && dspflg));
		ercd = E_OK;
	}
	else if (p_mtxcb->p_loctsk == p_runtsk) {
		ercd = E_OBJ;
	}
	else if (tmout == TMO_POL) {
		ercd = E_TMOUT;
	}
	else {
		p_runtsk->tstat = (TS_WAITING | TS_WAIT_MTX);
		wobj_make_wait_tmout((WOBJCB *) p_mtxcb, (WINFO_WOBJ *) &winfo_mtx,
														&tmevtb, tmout);
		dispatch();
		ercd = winfo_mtx.winfo.wercd;
	}
	t_unlock_cpu();

  error_exit:
	LOG_TLOC_MTX_LEAVE(ercd);
	return(ercd);
}
Example #15
0
void *pmm_alloc(void) {
	uint64_t addr;
	
	mutex_acquire(&pmm_mutex);

	if (pmm_alloc_base == pmm_alloc_limit) {
		addr = 0;
	}
	else {
		addr = pmm_alloc_base + pmm_base;
		pmm_alloc_base += 0x1000;
	}

	mutex_release(&pmm_mutex);

	return (void*) addr;
}
Example #16
0
int register_interrupt_handler(u8int num, isr_t stage1_handler, isr_t stage2_handler)
{
	mutex_acquire(&isr_lock);
	int i;
	for(i=0;i<MAX_HANDLERS;i++)
	{
		if(!interrupt_handlers[num][i][0] && !interrupt_handlers[num][i][1])
		{
			interrupt_handlers[num][i][0] = stage1_handler;
			interrupt_handlers[num][i][1] = stage2_handler;
			break;
		}
	}
	mutex_release(&isr_lock);
	if(i == MAX_HANDLERS) panic(0, "ran out of interrupt handlers");
	return i;
}
Example #17
0
int cpu_interrupt_register_handler(int num, void (*fn)(struct registers *, int, int))
{
	mutex_acquire(&isr_lock);
	int i;
	for(i = 0; i < MAX_HANDLERS; i++)
	{
		if(!interrupt_handlers[num][i].fn)
		{
			interrupt_handlers[num][i].fn = fn;
			break;
		}
	}
	mutex_release(&isr_lock);
	if(i == MAX_HANDLERS)
		PANIC(0, "Ran out of interrupt handlers.", EOOM);
	return i;
}
Example #18
0
void queue_complete_head(int sockfd)
{
  socket_data_t *socket_data = get_socket_data(sockfd);
  upload_queue_entry_t *entry = socket_data->queue.head;
  assert(entry != NULL);

  mutex_acquire(&g_mutex);
  socket_data->queue.head = entry->next;
  if (socket_data->queue.tail == entry)
    socket_data->queue.tail = NULL;

  free_entry(entry);
  socket_data->upload_active = 0;

  manage_queue(socket_data);
  mutex_release(&g_mutex);
}
Example #19
0
void task_b(void) {
	const char s[] = "BBBbbb";
	
	for ( ;; ) {
		mutex_acquire(&test_mutex);
		
		const char *p = s;
		while (*p != '\0') {
			uart_write(*(p++));
			_delay_ms(100);
		}
		
		mutex_release(&test_mutex);
		
		_delay_ms(300);
	}
}
Example #20
0
File: mutex.c Project: dankex/lk
/**
 * @brief  Mutex wait with timeout
 *
 * This function waits up to \a timeout ms for the mutex to become available.
 * Timeout may be zero, in which case this function returns immediately if
 * the mutex is not free.
 *
 * @return  NO_ERROR on success, ERR_TIMED_OUT on timeout,
 * other values on error
 */
status_t mutex_acquire_timeout(mutex_t *m, lk_time_t timeout)
{
	status_t ret = NO_ERROR;

#if MUTEX_CHECK
	if (timeout == INFINITE_TIME)
		return mutex_acquire(m); // Unecessary overhead for correct calls, this function can handle this anyway

	ASSERT(m->magic == MUTEX_MAGIC);

	if (current_thread == m->holder)
		panic("mutex_acquire_timeout: thread %p (%s) tried to acquire mutex %p it already owns.\n",
		      current_thread, current_thread->name, m);
#endif


	enter_critical_section();

	if (unlikely(++m->count > 1)) {
		ret = wait_queue_block(&m->wait, timeout);
		if (ret < NO_ERROR) {
			/* if the acquisition timed out, back out the acquire and exit */
			if (ret == ERR_TIMED_OUT) {
				/*
				 * XXX race: the mutex may have been destroyed after the timeout,
				 * but before we got scheduled again which makes messing with the
				 * count variable dangerous.
				 */
				m->count--;
				goto err;
			}
			/* if there was a general error, it may have been destroyed out from
			 * underneath us, so just exit (which is really an invalid state anyway)
			 */
		}
	}

#if MUTEX_CHECK
	m->holder = current_thread;
#endif

err:
	exit_critical_section();
	return ret;
}
int mutex_test(void)
{
	mutex_init(&m);

	int i;
	for (i = 0; i < 5; i++)
		thread_resume(thread_create
			      ("mutex tester", &mutex_thread, NULL,
			       DEFAULT_PRIORITY, DEFAULT_STACK_SIZE));

	thread_sleep(1000);

	while (mutex_thread_count > 0)
		thread_yield();

	printf("done with simple mutex tests\n");

	printf("testing mutex timeout\n");

	mutex_t timeout_mutex;

	mutex_init(&timeout_mutex);
	mutex_acquire(&timeout_mutex);

	for (i = 0; i < 2; i++)
		thread_resume(thread_create
			      ("mutex timeout tester", &mutex_timeout_thread,
			       (void *)&timeout_mutex, DEFAULT_PRIORITY,
			       DEFAULT_STACK_SIZE));
	for (i = 0; i < 2; i++)
		thread_resume(thread_create
			      ("mutex timeout tester",
			       &mutex_zerotimeout_thread,
			       (void *)&timeout_mutex, DEFAULT_PRIORITY,
			       DEFAULT_STACK_SIZE));

	thread_sleep(5000);
	mutex_release(&timeout_mutex);

	printf("done with mutex tests\n");

	mutex_destroy(&timeout_mutex);

	return 0;
}
Example #22
0
File: sys_arch.c Project: chychc/lk
err_t sys_mbox_trypost(sys_mbox_t * mbox, void *msg)
{
    status_t res;

    res = sem_trywait(&mbox->empty);
    if (res == ERR_NOT_READY)
        return ERR_TIMEOUT;

    mutex_acquire(&mbox->lock);

    mbox->queue[mbox->head] = msg;
    mbox->head = (mbox->head + 1) % mbox->size;

    mutex_release(&mbox->lock);
    sem_post(&mbox->full, true);

    return ERR_OK;
}
Example #23
0
void tm_process_put(struct process *proc)
{
	ASSERT(proc->magic == PROCESS_MAGIC);
	mutex_acquire(&process_refs_lock);
	if(!(proc->refs >= 1)) {
		printk(KERN_PANIC, "[TM/PANIC]: Panic in tm_process_put!\n");
		printk(KERN_PANIC, "[TM/PANIC]: Pid of the process: %d (%s), refs = %d\n", proc->pid, proc->command, proc->refs);
		PANIC(PANIC_NOSYNC, "Process refcount error (put)\n", EFAULT);
	}
	if(atomic_fetch_sub(&proc->refs, 1) == 1) {
		hash_delete(process_table, &proc->pid, sizeof(proc->pid));
		mutex_release(&process_refs_lock);
		// Do this here, since we must wait for every thread to give up their refs. This happens in schedule, after it gets scheduled away.
		mm_context_destroy(&proc->vmm_context);
		kfree(proc);
	} else {
		mutex_release(&process_refs_lock);
	}
}
Example #24
0
static void heap_dump(void)
{
	dprintf(INFO, "Heap dump:\n");
	dprintf(INFO, "\tbase %p, len 0x%zx\n", theheap.base, theheap.len);
	dprintf(INFO, "\tfree list:\n");

	mutex_acquire(&theheap.lock);

	struct free_heap_chunk *chunk;
	list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
		dump_free_chunk(chunk);
	}

	dprintf(INFO, "\tdelayed free list:\n");
	list_for_every_entry(&theheap.delayed_free_list, chunk, struct free_heap_chunk, node) {
		dump_free_chunk(chunk);
	}
	mutex_release(&theheap.lock);
}
Example #25
0
void putchar(struct buffer *b, int c)
/*@ requires
      [?f]buffer(?id, b)
      &*& token(?t1)
      &*& putchar_io(id, t1, c, ?t2);
@*/
/*@ ensures token(t2)
     &*& [f]buffer(id, b);
@*/
{
  //@ open buffer(id, b);
  mutex_acquire(b->mutex);
  //@ open buffer_invar(id, b)();
  b->c = c;
  //@ open putchar_io(id, t1, c, t2);
  //@ open token(t1);
  
  /*@
  if (place_iot(t1) == iot_split_left(iot_init)){
    // note: this merge_fractions needs the branching provided by the if.
    // we put them inside the if to make this clear, although for VeriFast
    // it'll also work if they are put after the if because the branching
    // that VeriFast performs continues after the if.
    merge_fractions(gcf_instance(id, iot_split_left(iot_init), _));
    merge_fractions(gcf_instance(id, iot_split_right(iot_init), _));
    open exists<pair<int, list<int> > >(pair('l', ?l_todo));
    close exists<pair<int, list<int> > >(pair('l', {}));
  }else{
    merge_fractions(gcf_instance(id, iot_split_left(iot_init), _));
    merge_fractions(gcf_instance(id, iot_split_right(iot_init), _));
    open exists<pair<int, list<int> > >(pair('r', ?r_todo));
    close exists<pair<int, list<int> > >(pair('r', {}));
  }
  @*/
    
  //@ gcf_update(id, place_iot(t1), {c});
  //@ assert place_id(t1) == place_id(t2);
  //@ close token(t2);
  //@ close buffer_invar(id, b)();
  mutex_release(b->mutex);
  //@ close [f]buffer(id, b);
}
Example #26
0
static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id, void *ptr, size_t buf_len)
{
    status_t err;

    LTRACEF("gdev %p, resource_id %u, ptr %p, buf_len %zu\n", gdev, resource_id, ptr, buf_len);

    DEBUG_ASSERT(gdev);
    DEBUG_ASSERT(ptr);

    /* grab a lock to keep this single message at a time */
    mutex_acquire(&gdev->lock);

    /* construct the request */
    struct {
        struct virtio_gpu_resource_attach_backing req;
        struct virtio_gpu_mem_entry mem;
    } req;
    memset(&req, 0, sizeof(req));

    req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
    req.req.resource_id = resource_id;
    req.req.nr_entries = 1;

    paddr_t pa;
    pa = vaddr_to_paddr(ptr);
    req.mem.addr = pa;
    req.mem.length = buf_len;

    /* send the command and get a response */
    struct virtio_gpu_ctrl_hdr *res;
    err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
    DEBUG_ASSERT(err == NO_ERROR);

    /* see if we got a valid response */
    LTRACEF("response type 0x%x\n", res->type);
    err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;

    /* release the lock */
    mutex_release(&gdev->lock);

    return err;
}
Example #27
0
int vm_map(addr_t virt, addr_t phys, unsigned attr, unsigned opt)
{
	addr_t vpage = (virt&PAGE_MASK)/0x1000;
	unsigned vp4 = PML4_IDX(vpage);
	unsigned vpdpt = PDPT_IDX(vpage);
	unsigned vdir = PAGE_DIR_IDX(vpage);
	unsigned vtbl = PAGE_TABLE_IDX(vpage);
	if(kernel_task && !(opt & MAP_PDLOCKED))
		mutex_acquire(&pd_cur_data->lock);
	page_dir_t *pd;
	page_table_t *pt;
	pdpt_t *pdpt;
	pml4_t *pml4;
	
	pml4 = (pml4_t *)((kernel_task && current_task) ? current_task->pd : kernel_dir);
	if(!pml4[vp4])
		pml4[vp4] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER);
	pdpt = (addr_t *)((pml4[vp4]&PAGE_MASK) + PHYS_PAGE_MAP);
	if(!pdpt[vpdpt])
		pdpt[vpdpt] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER);
	pd = (addr_t *)((pdpt[vpdpt]&PAGE_MASK) + PHYS_PAGE_MAP);
	if(!pd[vdir])
		pd[vdir] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER);
	pt = (addr_t *)((pd[vdir]&PAGE_MASK) + PHYS_PAGE_MAP);
	
	pt[vtbl] = (phys & PAGE_MASK) | attr;
	asm("invlpg (%0)"::"r" (virt));
	if(!(opt & MAP_NOCLEAR)) 
		memset((void *)(virt&PAGE_MASK), 0, 0x1000);
	
	#if CONFIG_SMP
	if(kernel_task) {
		if(IS_KERN_MEM(virt))
			send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB);
		else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1))
			send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB);
	}
	#endif
	if(kernel_task && !(opt & MAP_PDLOCKED))
		mutex_release(&pd_cur_data->lock);
	return 0;
}
Example #28
0
/*
 * Spinlock based trylock, we take the spinlock and check whether we
 * can get the lock:
 */
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
	struct mutex *lock = container_of(lock_count, struct mutex, count);
	unsigned long flags;
	int prev;

	spin_lock_mutex(&lock->wait_lock, flags);

	prev = atomic_xchg(&lock->count, -1);
	if (likely(prev == 1))
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);

	/* Set it back to 0 if there are no waiters: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	return prev == 1;
}
Example #29
0
static int semaphore_consumer(void *unused)
{
	unsigned int iterations = 0;

	mutex_acquire(&sem_test_mutex);
	if (sem_remaining_its >= sem_thread_max_its) {
		iterations = rand();
		iterations %= sem_thread_max_its;
	} else {
		iterations = sem_remaining_its;
	}
	sem_remaining_its -= iterations;
	mutex_release(&sem_test_mutex);

	printf("semaphore consumer %p starting up, running for %u iterations\n", get_current_thread(), iterations);
	for (unsigned int x = 0; x < iterations; x++)
		sem_wait(&sem);
	printf("semaphore consumer %p done\n", get_current_thread());
	atomic_add(&sem_threads, -1);
	return 0;
}
Example #30
0
size_t fs_dirent_reclaim_lru(void)
{
	mutex_acquire(dirent_cache_lock);
	struct queue_item *qi = queue_dequeue_item(dirent_lru);
	if(!qi) {
		mutex_release(dirent_cache_lock);
		return 0;
	}
	struct dirent *dir = qi->ent;
	struct inode *parent = dir->parent;
	rwlock_acquire(&parent->lock, RWL_WRITER);
	atomic_fetch_add(&parent->count, 1);
	if(dir && dir->count == 0) {
		/* reclaim this node */
		vfs_inode_del_dirent(parent, dir);
		vfs_dirent_destroy(dir);
	}
	atomic_fetch_sub(&parent->count, 1);
	rwlock_release(&parent->lock, RWL_WRITER);
	mutex_release(dirent_cache_lock);
	return sizeof(struct dirent);
}