Пример #1
0
//MUST PROVIDE LOCKED MUTEX!
int mesa_cond_wait(mesa_cond_t* cond, int mutex_id)
{
	printf(1,"%d | [%s] start \n",kthread_id(), __FUNCTION__);
	cond->numOfThreadsWaiting++;

	//releasing given mutex
	if( kthread_mutex_unlock(mutex_id) < 0){
		printf(1,"%d | [%s] failed, unlocking of given mutex %d failed \n",kthread_id(), __FUNCTION__, mutex_id);
		return -1;
	}

	//waiting till signaled
	if( kthread_mutex_lock(cond->inner_mutex_id) < 0){
		printf(1,"%d | [%s] failed, locking of  inner mutex %d after signaled failed \n",kthread_id(), __FUNCTION__, cond->inner_mutex_id);
		return -1;
	}

	//taking back given mutex
	if( kthread_mutex_lock(mutex_id) < 0){
		printf(1,"%d | [%s] failed, locking of  mutex %d after signaled failed \n",kthread_id(), __FUNCTION__, mutex_id);
		return -1;
	}

	cond->numOfThreadsWaiting--;

	printf(1,"%d | [%s] success \n",kthread_id(), __FUNCTION__);
	return 0;
}
Пример #2
0
void hfree(void *ptr)
{
    unsigned long addr = (unsigned long)ptr;
    unsigned long index = (addr - heap_start) / HALLOC_CHUNK_SIZE;
    int e = (int)(index / BITS_PER_ENTRY);
    int b = (int)(index % BITS_PER_ENTRY);
    
    kthread_mutex_lock(&heap_mutex);
    
    bit_clear(e, b);
    if (cur_last_bitmap_entry > e) {
        goto done;
    }
    
    // Resize the heap
    for (; e; e--) {
        if (entry_inuse(e)) {
            cur_last_bitmap_entry = e;
            break;
        }
    }
    
    resize_heap();
    
done:
atomic_membar();
    kthread_mutex_unlock(&heap_mutex);
}
Пример #3
0
void * thread2 (void){
	kthread_mutex_lock(lock);
	printf(1,"I will wake him up!! %d", kthread_id());
	kthread_cond_signal(cond);
	kthread_exit();
	return (void *) 0;
}
Пример #4
0
bool CopyToUser(void* userdst_ptr, const void* ksrc_ptr, size_t count)
{
	uintptr_t userdst = (uintptr_t) userdst_ptr;
	uintptr_t ksrc = (uintptr_t) ksrc_ptr;
	bool result = true;
	Process* process = CurrentProcess();
	assert(IsInProcessAddressSpace(process));
	kthread_mutex_lock(&process->segment_lock);
	while ( count )
	{
		struct segment* segment = FindSegment(process, userdst);
		if ( !segment || !(segment->prot & PROT_WRITE) )
		{
			errno = EFAULT;
			result = false;
			break;
		}
		size_t amount = count;
		size_t segment_available = segment->addr + segment->size - userdst;
		if ( segment_available < amount )
			amount = segment_available;
		memcpy((void*) userdst, (const void*) ksrc, amount);
		userdst += amount;
		ksrc += amount;
		count -= amount;
	}
	kthread_mutex_unlock(&process->segment_lock);
	return result;
}
Пример #5
0
int
hoare_slots_monitor_takeslot(hoare_slots_monitor_t* monitor){
	if (kthread_mutex_lock(monitor->mutex_id) < 0){
		return -1;
	}
	
	if (monitor->count > 0){
		goto keepOnRolling;
	}
	
	if (hoare_cond_wait(monitor->hasElements, monitor->mutex_id) < 0){
		kthread_mutex_unlock(monitor->mutex_id);
		return -1;
	}
	
keepOnRolling:

	monitor->count--;
	if (monitor->count == 0){
		hoare_cond_signal(monitor->empty, monitor->mutex_id);
	}
	else{
		hoare_cond_signal(monitor->hasElements, monitor->mutex_id);
	}	
	return 0;
}
Пример #6
0
int
main(void)
{
  printf(1,"testing mutex yield\n");

  mutexA = kthread_mutex_alloc();
  mutexB = kthread_mutex_alloc();

  kthread_mutex_lock(mutexA);

  void* stk1 = malloc(MAX_STACK_SIZE);
  void* stk2 = malloc(MAX_STACK_SIZE);
  int id1 = kthread_create(waitingThread, stk1, MAX_STACK_SIZE);
  int id2 = kthread_create(signalingThread, stk2, MAX_STACK_SIZE);

  kthread_join(id2);
  kthread_mutex_unlock(mutexA);
  kthread_join(id1);


  printf(1,"done\n");

  kthread_exit();
  return 0;
}
Пример #7
0
void dlist_push_back(dlist_t *l, void *n)
{
    // Allocate a dlist node
    dlist_node_t *s = (dlist_node_t *)salloc(dlist_node_salloc_id);
    //assert(s);
    s->node = n;
    
    kthread_mutex_lock(&l->lock);
    
    // Push back
    s->next = NULL;
    s->prev = l->tail;
    
    if (l->tail) {
        l->tail->next = s;
    }
    l->tail = s;
    
    if (!l->head) {
        l->head = s;
    }
    
    l->count++;
    
    kthread_mutex_unlock(&l->lock);
}
Пример #8
0
void* signalingThread(){
	kthread_mutex_lock(mutexB);
//	kthread_mutex_unlock(mutexB);

	kthread_mutex_yieldlock(mutexB,mutexA);
	kthread_exit();
	return 0;
}
Пример #9
0
void dlist_remove(dlist_t *l, dlist_node_t *s)
{
    kthread_mutex_lock(&l->lock);
    dlist_detach(l, s);
    kthread_mutex_unlock(&l->lock);
    
    sfree(s);
}
Пример #10
0
void * thread1 (void){
	printf(1, "I went to sleep %d\n", kthread_id());
	kthread_mutex_lock(lock);
	kthread_cond_wait(cond, lock);
	printf(1, "im came alive!! %d\n", kthread_id());
	kthread_mutex_unlock(lock);
	kthread_exit();
	return (void *) 0;
}
Пример #11
0
int
hoare_slots_monitor_stopadding(hoare_slots_monitor_t* monitor){
	if (kthread_mutex_lock(monitor->mutex_id) < 0){
		return -1;
	}
	monitor->doneAddingSlots = 1;
	hoare_cond_signal(monitor->empty, monitor->mutex_id);		
	return 0;
}
Пример #12
0
void* waitingThread(){

	kthread_mutex_lock(mutexA);

	kthread_mutex_unlock(mutexB);

	kthread_mutex_unlock(mutexA);

	kthread_exit();
	return 0;
}
Пример #13
0
// NOTE: No overflow can happen here because the user can't make an infinitely
//       long string spanning the entire address space because the user can't
//       control the entire address space.
char* GetStringFromUser(const char* usersrc_str)
{
	uintptr_t usersrc = (uintptr_t) usersrc_str;
	size_t result_length = 0;
	Process* process = CurrentProcess();
	assert(IsInProcessAddressSpace(process));

	kthread_mutex_lock(&process->segment_lock);
	bool done = false;
	while ( !done )
	{
		uintptr_t current_at = usersrc + result_length;
		struct segment* segment = FindSegment(process, current_at);
		if ( !segment || !(segment->prot & PROT_READ) )
		{
			kthread_mutex_unlock(&process->segment_lock);
			return errno = EFAULT, (char*) NULL;
		}
		size_t segment_available = segment->addr + segment->size - current_at;
		volatile const char* str = (volatile const char*) current_at;
		size_t length = 0;
		for ( ; length < segment_available; length++ )
		{
			char c = str[length];
			if ( c == '\0' )
			{
				done = true;
				break;
			}
		}
		result_length += length;
	}

	char* result = new char[result_length + 1];
	if ( !result )
	{
		kthread_mutex_unlock(&process->segment_lock);
		return (char*) NULL;
	}

	memcpy(result, (const char*) usersrc, result_length);
	result[result_length] = '\0';

	// We have transferred a bunch of bytes from user-space and appended a zero
	// byte. This is a string. If no concurrent threads were modifying the
	// memory, this is the intended string. If the memory was modified, we got
	// potential garbage followed by a NUL byte. This is a string, but probably
	// not what was intended. If the garbage itself had a premature unexpected
	// NUL byte, that's okay, the garbage string just got truncated.

	kthread_mutex_unlock(&process->segment_lock);
	return result;
}
Пример #14
0
void *halloc()
{
    int e;
    int b;
    int found = 0;
    
    int prev_entry;
    unsigned long addr;
    void *result = NULL;
    
    kthread_mutex_lock(&heap_mutex);
    
    // Find the first avail entry
    for (e = 0; e < BITMAP_ENTRY_COUNT; e++) {
        if (entry_avail(e)) {
            found = 1;
            break;
        }
    }
    
    if (!found) {
        goto done;
    }
    
    // Find and set the first avail bit
    b = find_first(e, 0);
    bit_set(e, b);
    
    // Resize the heap
    prev_entry = cur_last_bitmap_entry;
    if (e > cur_last_bitmap_entry) {
        cur_last_bitmap_entry = e;
    }
    
    if (!resize_heap()) {
        bit_clear(e, b);
        cur_last_bitmap_entry = prev_entry;
        
        goto done;
    }
    
    // Calculate the final address
    addr = heap_start + HALLOC_CHUNK_SIZE * (BITS_PER_ENTRY * e + b);
    result = (void *)addr;
    
done:
    atomic_membar();
    kthread_mutex_unlock(&heap_mutex);
    
    return result;
}
Пример #15
0
extern "C" void kthread_cond_wait(kthread_cond_t* cond, kthread_mutex_t* mutex)
{
	kthread_cond_elem_t elem;
	elem.next = NULL;
	elem.woken = 0;
	if ( cond->last ) { cond->last->next = &elem; }
	if ( !cond->last ) { cond->first = &elem; }
	cond->last = &elem;
	while ( !elem.woken )
	{
		kthread_mutex_unlock(mutex);
		Scheduler::Yield();
		kthread_mutex_lock(mutex);
	}
}
Пример #16
0
unsigned long ktls_alloc(size_t size)
{
    unsigned long result = 0;
    
    kthread_mutex_lock(&tls_mutex);
    
    if (cur_tls_offset + size <= tls_size) {
        result = cur_tls_offset;
        cur_tls_offset += size;
    }
    atomic_membar();
    
    kthread_mutex_unlock(&tls_mutex);
    
    return result;
}
Пример #17
0
static asmlinkage void kapi_stdin_read_handler(msg_t *msg)
{
    unsigned long reply_mbox_id = msg->mailbox_id;
    
    unsigned long console_id = msg->params[0].value;
    struct console *con = get_console(console_id);
    
    unsigned long buf_size = msg->params[1].value;
    
    // Setup the msg
    msg_t *s = syscall_msg();
    s->mailbox_id = reply_mbox_id;
    
    if (con) {
        while (!con->stdin_buf.index) {
            sys_yield();
            atomic_membar();
        }
        
        kthread_mutex_lock(&con->stdin_mutex);
        
        if (buf_size >= con->stdin_buf.index) {
            msg_param_buffer(s, con->stdin_buf.data, con->stdin_buf.index);
            msg_param_value(s, (unsigned long)con->stdin_buf.index);
            con->stdin_buf.index = 0;
        } else {
            msg_param_buffer(s, con->stdin_buf.data, buf_size);
            msg_param_value(s, buf_size);
            con->stdin_buf.index -= buf_size;
            memcpy(con->stdin_buf.data, con->stdin_buf.data + buf_size, con->stdin_buf.index);
        }
        
        kthread_mutex_unlock(&con->stdin_mutex);
    } else {
        msg_param_buffer(s, NULL, 0);
        msg_param_value(s, 0);
    }
    
    // Issue the reply
    syscall_respond();
    
    // Should never reach here
    sys_unreahable();
}
Пример #18
0
int stdin_write(unsigned long console_id, char *buf, size_t size)
{
    struct console *con = get_console(console_id);
    if (!con) {
        return 0;
    }
    
    kthread_mutex_lock(&con->stdin_mutex);
    
    if (con->stdin_buf.index + (int)size <= con->stdin_buf.size) {
        memcpy(&con->stdin_buf.data[con->stdin_buf.index], buf, size);
        con->stdin_buf.index += (int)size;
        atomic_membar();
    }
    
    kthread_mutex_unlock(&con->stdin_mutex);
    
    return (int)size;
}
Пример #19
0
mesa_cond_t* mesa_cond_alloc()
{
	printf(1,"%d | [%s] start \n",kthread_id(), __FUNCTION__);

	mesa_cond_t* cv = malloc(sizeof(mesa_cond_t));
	if(cv == 0){
		printf(1,"%d | [%s] malloc cv failed \n",kthread_id(), __FUNCTION__);
		return 0;
	}
	cv->inner_mutex_id = kthread_mutex_alloc();
	cv->numOfThreadsWaiting = 0;

	kthread_mutex_lock(cv->inner_mutex_id);

	if(cv->inner_mutex_id < 0){
		printf(1,"%d | [%s] inner mutex alloc failed \n",kthread_id(), __FUNCTION__);
		return 0;
	}
	printf(1,"%d | [%s] success \n",kthread_id(), __FUNCTION__);

	return cv;
}
Пример #20
0
extern "C" unsigned long kthread_cond_wait_signal(kthread_cond_t* cond,
                                                  kthread_mutex_t* mutex)
{
	if ( Signal::IsPending() )
		return 0;
	kthread_cond_elem_t elem;
	elem.next = NULL;
	elem.woken = 0;
	if ( cond->last ) { cond->last->next = &elem; }
	if ( !cond->last ) { cond->first = &elem; }
	cond->last = &elem;
	while ( !elem.woken )
	{
		if ( Signal::IsPending() )
		{
			if ( cond->first == &elem )
			{
				cond->first = elem.next;
				if ( cond->last == &elem )
					cond->last = NULL;
			}
			else
			{
				kthread_cond_elem_t* prev = cond->first;
				while ( prev->next != &elem )
					prev = prev->next;
				prev->next = elem.next;
				if ( cond->last == &elem )
					cond->last = prev;
			}
			// Note that the thread still owns the mutex.
			return 0;
		}
		kthread_mutex_unlock(mutex);
		Scheduler::Yield();
		kthread_mutex_lock(mutex);
	}
	return 1;
}
Пример #21
0
void UserCrashHandler(struct interrupt_context* intctx)
{
	Scheduler::SaveInterruptedContext(intctx, &CurrentThread()->registers);

	// Execute this crash handler with preemption on.
	Interrupt::Enable();

	// TODO: Also send signals for other types of user-space crashes.
	if ( intctx->int_no == 14 /* Page fault */ )
	{
		struct sigaction* act = &CurrentProcess()->signal_actions[SIGSEGV];
		kthread_mutex_lock(&CurrentProcess()->signal_lock);
		bool handled = act->sa_handler != SIG_DFL && act->sa_handler != SIG_IGN;
		if ( handled )
			CurrentThread()->DeliverSignalUnlocked(SIGSEGV);
		kthread_mutex_unlock(&CurrentProcess()->signal_lock);
		if ( handled )
		{
			assert(Interrupt::IsEnabled());
			return Signal::DispatchHandler(intctx, NULL);
		}
	}

	// Issue a diagnostic message to the kernel log concerning the crash.
	Log::PrintF("The current process (pid %ji `%s') crashed and was terminated:\n",
	            (intmax_t) CurrentProcess()->pid, CurrentProcess()->program_image_path);
	Log::PrintF("%s exception at ip=0x%zx (cr2=0x%zx, err_code=0x%zx)\n",
	            ExceptionName(intctx), ExceptionLocation(intctx), intctx->cr2,
	            intctx->err_code);

	// Exit the process with the right error code.
	// TODO: Send a SIGINT, SIGBUS, or whatever instead.
	CurrentProcess()->ExitThroughSignal(SIGSEGV);

	// Deliver signals to this thread so it can exit correctly.
	assert(Interrupt::IsEnabled());
	Signal::DispatchHandler(intctx, NULL);
}
Пример #22
0
void *dlist_pop_back(dlist_t *l)
{
    dlist_node_t *s = NULL;
    void *n = NULL;
    
    kthread_mutex_lock(&l->lock);
    
    if (l->count) {
        //assert(l->next);
        
        s = l->tail;
        dlist_detach(l, s);
    }
    
    kthread_mutex_unlock(&l->lock);
    
    if (s) {
        n = s->node;
        sfree(s);
    }
    
    return n;
}
Пример #23
0
int
hoare_slots_monitor_addslots(hoare_slots_monitor_t* monitor,int n){
	if (n <= 0 || monitor->doneAddingSlots || kthread_mutex_lock(monitor->mutex_id) < 0){
		return -1;
	}

	if (monitor->count > 0 && !monitor->doneAddingSlots){
		if (hoare_cond_wait(monitor->empty, monitor->mutex_id) < 0){
			kthread_mutex_unlock(monitor->mutex_id);
			return -1;
		}
	}

	if (monitor->doneAddingSlots){
		kthread_mutex_unlock(monitor->mutex_id);
		return -1;
	}
		
	monitor->count += n;
	
	hoare_cond_signal(monitor->hasElements,monitor->mutex_id);
	
	return 0;
}
Пример #24
0
InterruptContext* Thread::handleSignal(InterruptContext* context) {
    kthread_mutex_lock(&signalMutex);
    assert(pendingSignals);
    assert(signalPending);

    // Choose the next unblocked pending signal.
    PendingSignal* pending;

    if (!sigismember(&signalMask, pendingSignals->siginfo.si_signo)) {
        pending = pendingSignals;
        pendingSignals = pending->next;
    } else {
        PendingSignal* currentSignal = pendingSignals;
        while (currentSignal->next && sigismember(&signalMask,
                currentSignal->next->siginfo.si_signo)) {
            currentSignal = currentSignal->next;
        }
        assert(currentSignal->next);

        pending = currentSignal->next;
        currentSignal->next = pending->next;
    }

    siginfo_t siginfo = pending->siginfo;
    delete pending;

    updatePendingSignals();
    kthread_mutex_unlock(&signalMutex);

    struct sigaction action = process->sigactions[siginfo.si_signo];
    assert(!(action.sa_handler == SIG_IGN || (action.sa_handler == SIG_DFL &&
            sigismember(&defaultIgnoredSignals, siginfo.si_signo))));

    if (action.sa_handler == SIG_DFL) {
        process->terminateBySignal(siginfo);
        sched_yield();
        __builtin_unreachable();
    }

    uintptr_t frameAddress = (context->STACK_POINTER - sizeof(SignalStackFrame))
            & ~0xF;
    SignalStackFrame* frame = (SignalStackFrame*) frameAddress;
    frame->siginfo = siginfo;

    frame->ucontext.uc_link = nullptr;
    frame->ucontext.uc_sigmask = signalMask;
    frame->ucontext.uc_stack.ss_sp = nullptr;
    frame->ucontext.uc_stack.ss_size = 0;
    frame->ucontext.uc_stack.ss_flags = SS_DISABLE;

    Registers::save(context, &frame->ucontext.uc_mcontext.__regs);
    Registers::saveFpu(&frame->ucontext.uc_mcontext.__fpuEnv);

#ifdef __i386__
    frame->signoParam = siginfo.si_signo;
    frame->infoParam = &frame->siginfo;
    frame->contextParam = &frame->ucontext;
    context->eflags &= ~0x400; // Direction Flag
#elif defined(__x86_64__)
    context->rdi = siginfo.si_signo;
    context->rsi = (uintptr_t) &frame->siginfo;
    context->rdx = (uintptr_t) &frame->ucontext;
    context->rflags &= ~0x400; // Direction Flag
#else
#  error "Signal handler parameters are unimplemented for this architecture."
#endif

    uintptr_t* sigreturnPointer = (uintptr_t*) frameAddress - 1;
    *sigreturnPointer = process->sigreturn;
    context->INSTRUCTION_POINTER = (uintptr_t) action.sa_sigaction;
    context->STACK_POINTER = (uintptr_t) sigreturnPointer;

    signalMask |= action.sa_mask | _SIGSET(siginfo.si_signo);
    return context;
}
Пример #25
0
int sys_ppoll(struct pollfd* user_fds, size_t nfds,
              const struct timespec* user_timeout_ts,
              const sigset_t* user_sigmask)
{
	ioctx_t ctx; SetupKernelIOCtx(&ctx);

	struct timespec timeout_ts;
	if ( !FetchTimespec(&timeout_ts, user_timeout_ts) )
		return -1;

	if ( user_sigmask )
		return errno = ENOSYS, -1;

	struct pollfd* fds = CopyFdsFromUser(user_fds, nfds);
	if ( !fds ) { return -1; }

	PollNode* nodes = new PollNode[nfds];
	if ( !nodes ) { delete[] fds; return -1; }

	Process* process = CurrentProcess();

	kthread_mutex_t wakeup_mutex = KTHREAD_MUTEX_INITIALIZER;
	kthread_cond_t wakeup_cond = KTHREAD_COND_INITIALIZER;

	kthread_mutex_lock(&wakeup_mutex);

	int ret = -1;
	bool self_woken = false;
	bool remote_woken = false;
	bool unexpected_error = false;

	Timer timer;
	struct poll_timeout pts;
	if ( timespec_le(timespec_make(0, 1), timeout_ts) )
	{
		timer.Attach(Time::GetClock(CLOCK_MONOTONIC));
		struct itimerspec its;
		its.it_interval = timespec_nul();
		its.it_value = timeout_ts;
		pts.wake_mutex = &wakeup_mutex;
		pts.wake_cond = &wakeup_cond;
		pts.woken = &remote_woken;
		timer.Set(&its, NULL, 0, poll_timeout_callback, &pts);
	}

	size_t reqs;
	for ( reqs = 0; !unexpected_error && reqs < nfds; )
	{
		PollNode* node = nodes + reqs;
		if ( fds[reqs].fd < 0 )
		{
			fds[reqs].revents = POLLNVAL;
			// TODO: Should we set POLLNVAL in node->revents too? Should this
			// system call ignore this error and keep polling, or return to
			// user-space immediately? What if conditions are already true on
			// some of the file descriptors (those we have processed so far?)?
			node->revents = 0;
			reqs++;
			continue;
		}
		Ref<Descriptor> desc = process->GetDescriptor(fds[reqs].fd);
		if ( !desc ) { self_woken = unexpected_error = true; break; }
		node->events = fds[reqs].events | POLL__ONLY_REVENTS;
		node->revents = 0;
		node->wake_mutex = &wakeup_mutex;
		node->wake_cond = &wakeup_cond;
		node->woken = &remote_woken;
		reqs++;
		// TODO: How should errors be handled?
		if ( desc->poll(&ctx, node) == 0 )
			self_woken = true;
		else if ( errno == EAGAIN )
			errno = 0;
		else
			unexpected_error = self_woken = true;
	}

	if ( timeout_ts.tv_sec == 0 && timeout_ts.tv_nsec == 0 )
		self_woken = true;

	while ( !(self_woken || remote_woken) )
	{
		if ( !kthread_cond_wait_signal(&wakeup_cond, &wakeup_mutex) )
			errno = -EINTR,
			self_woken = true;
	}

	kthread_mutex_unlock(&wakeup_mutex);

	for ( size_t i = 0; i < reqs; i++ )
		if ( 0 <= fds[i].fd )
			nodes[i].Cancel();

	if ( timespec_le(timespec_make(0, 1), timeout_ts) )
	{
		timer.Cancel();
		timer.Detach();
	}

	if ( !unexpected_error )
	{
		int num_events = 0;
		for ( size_t i = 0; i < reqs; i++ )
		{
			if ( fds[i].fd < -1 )
				continue;
			if ( (fds[i].revents = nodes[i].revents) )
				num_events++;
		}

		if ( CopyFdsToUser(user_fds, fds, nfds) )
			ret = num_events;
	}

	delete[] nodes;
	delete[] fds;
	return ret;
}