Example #1
0
/*
 * In this function, you will be modifying the run queue, which can
 * also be modified from an interrupt context. In order for thread
 * contexts and interrupt contexts to play nicely, you need to mask
 * all interrupts before reading or modifying the run queue and
 * re-enable interrupts when you are done. This is analagous to
 * locking a mutex before modifying a data structure shared between
 * threads. Masking interrupts is accomplished by setting the IPL to
 * high.
 *
 * Once you have masked interrupts, you need to remove a thread from
 * the run queue and switch into its context from the currently
 * executing context.
 *
 * If there are no threads on the run queue (assuming you do not have
 * any bugs), then all kernel threads are waiting for an interrupt
 * (for example, when reading from a block device, a kernel thread
 * will wait while the block device seeks). You will need to re-enable
 * interrupts and wait for one to occur in the hopes that a thread
 * gets put on the run queue from the interrupt context.
 *
 * The proper way to do this is with the intr_wait call. See
 * interrupt.h for more details on intr_wait.
 *
 * Note: When waiting for an interrupt, don't forget to modify the
 * IPL. If the IPL of the currently executing thread masks the
 * interrupt you are waiting for, the interrupt will never happen, and
 * your run queue will remain empty. This is very subtle, but
 * _EXTREMELY_ important.
 *
 * Note: Don't forget to set curproc and curthr. When sched_switch
 * returns, a different thread should be executing than the thread
 * which was executing when sched_switch was called.
 *
 * Note: The IPL is process specific.
 */
void
sched_switch(void)
{
       /*MASKING THE INTERRUPT LEVELS*/
        uint8_t curr_intr_level = apic_getipl();
        apic_setipl(IPL_HIGH);

        if(list_empty(&(kt_runq.tq_list)))
        {
                apic_setipl(IPL_LOW);
                intr_wait();
                apic_setipl(curr_intr_level);
                sched_switch();          
        }
        else
        {
                kthread_t *old_thr = curthr;

                dbg(DBG_THR,"PROCESS FORMERLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                if(kt_runq.tq_size > 0)
                    {
                            while(1)
                            {
                                curthr = ktqueue_dequeue(&kt_runq);
                                curproc = curthr->kt_proc;
                                if(curthr->kt_state == KT_EXITED)
                                    continue;
                                else
                                    break;
                                
                                if(kt_runq.tq_size == 0)
                                    sched_switch();
                            }

                    }
                else
                    sched_switch();

                     if(curthr->kt_cancelled == 1)
                    {
                        dbg(DBG_THR,"%s was cancelled\n", curproc->p_comm);
                        do_exit(0); 
                    }
                 
                    
                    apic_setipl(curr_intr_level);
                 dbg(DBG_THR,"PROCESS CURRENTLY EXECUTING: %s\n", curthr->kt_proc->p_comm);

                context_switch(&(old_thr->kt_ctx), &(curthr->kt_ctx));

        }
}
Example #2
0
/*
 * A Thread function that exhibits a race condition on the race global.  It
 * loads increments and stores race, context switching between each line of C.
 */
void *racer_test(int arg1, void *arg2) {
    int local;

    sched_switch();
    local = race;
    sched_switch();
    local++;
    sched_switch();
    race = local;
    sched_switch();
    do_exit(race);
    return NULL;
}
Example #3
0
static int open_fifofs(struct vfs_node_t* node, int oflag, mode_t mode)
{
	(void)mode;

	struct fifofs_hdr_t* hdr = fifofs_nodes[node->status.st_ino];

	/* POSIX leaves behavior undefined when using O_RDWR, so we treat
	 * it as O_RDONLY */
	if ((oflag & O_ACCMODE) == O_RDWR)
		oflag &= ~O_WRONLY; /* Remove writable flag */

	if (oflag & O_RDONLY) /* Opening the read end */
	{
		hdr->readers++;

		/* Do not block if we are in non-blocking mode */
		if (!(oflag & O_NONBLOCK))
		{
			/* Wait until there is a writer */
			while(hdr->writers < 1)
				sched_switch();
		}

		/* Now we have a writer so we can return */
		return get_free_fd(3);
	}
	else if (oflag & O_WRONLY) /* Opening the write end */
	{
		hdr->writers++;

		/* Wait until there is a reader */
		while(hdr->readers < 1)
		{
			/* Or fail if we are in non-blocking mode */
			if (oflag & O_NONBLOCK)
			{
				errno = ENXIO;
				return -1;
			}

			sched_switch();
		}

		/* Now we have a reader -> return */
		return get_free_fd(3);
	}
	else
		return -1;
}
Example #4
0
/*
 * A Thread function that exhibits a race condition on the race global being
 * removed by a mutex.  It loads increments and stores race, context switching
 * between each line of C after acquiring mutex.  The mutex acquire cannot
 * be cancelled.
 */
void *mutex_uncancellable_test(int arg1, void *arg2) {
    int local;

    kmutex_lock(&mutex); 
    sched_switch();
    local = race;
    sched_switch();
    local++;
    sched_switch();
    race = local;
    sched_switch();
    kmutex_unlock(&mutex);
    do_exit(race);
    return NULL;
}
Example #5
0
/*
 * Updates the thread's state and enqueues it on the given
 * queue. Returns when the thread has been woken up with wakeup_on or
 * broadcast_on.
 *
 * Use the private queue manipulation functions above.
 */
void
sched_sleep_on(ktqueue_t *q)
{
	curthr->kt_state = KT_SLEEP;
	ktqueue_enqueue(q,curthr);
	sched_switch();
}
Example #6
0
int pthread_cond_broadcast(struct pthread_cond_t *cond)
{
    unsigned old_state = disableIRQ();

    int other_prio = -1;

    while (1) {
        queue_node_t *head = queue_remove_head(&(cond->queue));
        if (head == NULL) {
            break;
        }

        tcb_t *other_thread = (tcb_t *) sched_threads[head->data];
        if (other_thread) {
            other_prio = max_prio(other_prio, other_thread->priority);
            sched_set_status(other_thread, STATUS_PENDING);
        }
        head->data = -1u;
    }

    restoreIRQ(old_state);

    if (other_prio >= 0) {
        sched_switch(sched_active_thread->priority, other_prio);
    }

    return 0;
}
Example #7
0
int msg_reply(msg_t *m, msg_t *reply)
{
    unsigned state = irq_disable();

    thread_t *target = (thread_t*) sched_threads[m->sender_pid];
    assert(target != NULL);

    if (target->status != STATUS_REPLY_BLOCKED) {
        DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
              "\" not waiting for reply.", sched_active_thread->pid, target->pid);
        irq_restore(state);
        return -1;
    }

    DEBUG("msg_reply(): %" PRIkernel_pid ": Direct msg copy.\n",
          sched_active_thread->pid);
    /* copy msg to target */
    msg_t *target_message = (msg_t*) target->wait_data;
    *target_message = *reply;
    sched_set_status(target, STATUS_PENDING);
    uint16_t target_prio = target->priority;
    irq_restore(state);
    sched_switch(target_prio);

    return 1;
}
/* This tests is for the reader writer test, it reads a value, and
* checks to make sure that it is the same value that is expected.
*/
void *reader(int arg1, void *arg2) {
	kmutex_lock(&reader_mutex);
	kmutex_lock(&mutex);
	num_readers++;
	if(num_readers == 1)
	{
		kmutex_lock(&writer_mutex);
	}
	kmutex_unlock(&mutex);
	kmutex_unlock(&reader_mutex);

	sched_switch();

	dbg_print("reader_test: Expected to Read: %i, Did Read: %i\n", arg1, rwval);
	KASSERT(arg1 == rwval);

	kmutex_lock(&mutex);
	num_readers--;
	if(num_readers == 0)
	{
		kmutex_unlock(&writer_mutex);
	}
	kmutex_unlock(&mutex);

	return NULL;
}
Example #9
0
File: sched.c Project: tdz/opsys
/**
 * This function is the high-level entry point for the thread-schedule
 * interrupt. It triggers switches to other runnable threads.
 */
static timeout_t
alarm_handler(struct alarm* alarm)
{
    sched_switch(cpuid());

    return sched_timeout();
}
Example #10
0
void mutex_unlock(struct mutex_t *mutex)
{
    unsigned irqstate = disableIRQ();
    DEBUG("mutex_unlock(): val: %u pid: %" PRIkernel_pid "\n", ATOMIC_VALUE(mutex->val), sched_active_pid);

    if (ATOMIC_VALUE(mutex->val) == 0) {
        /* the mutex was not locked */
        restoreIRQ(irqstate);
        return;
    }

    priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue));
    if (!next) {
        /* the mutex was locked and no thread was waiting for it */
        ATOMIC_VALUE(mutex->val) = 0;
        restoreIRQ(irqstate);
        return;
    }

    thread_t *process = (thread_t *) next->data;
    DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid);
    sched_set_status(process, STATUS_PENDING);

    uint16_t process_priority = process->priority;
    restoreIRQ(irqstate);
    sched_switch(process_priority);
}
Example #11
0
/*
 * Updates the thread's state and enqueues it on the given
 * queue. Returns when the thread has been woken up with wakeup_on or
 * broadcast_on.
 *
 * Use the private queue manipulation functions above.
 */
void
sched_sleep_on(ktqueue_t *q)
{
        /* Update thread state */
        curthr->kt_state = KT_SLEEP;

        /* DON'T NEED TO TAKE OFF RUN QUEUE BECAUSE IT'S ALREADY RUNNING */
        /* Need to set interrupt levels to protect run queue */
        /*uint8_t oldIPL = intr_getipl();*/ /* Check what currently running IPL is */
        /*intr_setipl(IPL_HIGH);*/ /* Block all hardware interrupts */
        /* Take off run queue (kt_runq) */
        /*ktqueue_remove(&kt_runq, curthr);*/
        /* Reset IPL level */
        /*intr_setipl(oldIPL);*/

        /* Enqueue to wait queue */
        ktqueue_enqueue(q, curthr);

        /* Set pointer to queue curthr is waiting on */
        curthr->kt_wchan = q;

        /* Context switch from old to new. context_switch() */
        sched_switch();

        /* NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on"); */
}
Example #12
0
/*
 * Similar to sleep on, but the sleep can be cancelled.
 *
 * Don't forget to check the kt_cancelled flag at the correct times.
 *
 * Use the private queue manipulation functions above.
 */
int
sched_cancellable_sleep_on(ktqueue_t *q)
{
        /* Update thread state */
        curthr->kt_state = KT_SLEEP_CANCELLABLE;

        /* Do NOT enqueue if thread cancelled flag is set. Return different value. */
        if (curthr->kt_cancelled) return -EINTR;
        else {
          /* Take curthr off run queue and add to wait queue */
          /* CURTHR SHOULD NOT BE ON THE RUN QUEUE */

          /* Need to set interrupt levels to protect run queue */
          /*uint8_t oldIPL = intr_getipl();*/ /* Check what currently running IPL is */
          /*intr_setipl(IPL_HIGH);*/ /* Block all hardware interrupts */
          /* Take off run queue (kt_runq) */
          /*ktqueue_remove(&kt_runq, curthr);*/
          /* Reset IPL level */
          /*intr_setipl(oldIPL);*/
  
          ktqueue_enqueue(q, curthr);
        }

        /* Set pointer to queue curthr is waiting on */
        curthr->kt_wchan = q;

        /* Context switch from old to new. context_switch() */
        sched_switch();

        /* NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on"); */
        if (curthr->kt_cancelled) return -EINTR;
        return 0;
}
Example #13
0
/*
 * Similar to sleep on, but the sleep can be cancelled.
 *
 * Don't forget to check the kt_cancelled flag at the correct times.
 *
 * Use the private queue manipulation functions above.
 */
int
sched_cancellable_sleep_on(ktqueue_t *q)
{
	/*----Kernel1:PROCS:sched_cancellable:Begins---*/
	/* exit point here if its been cancelled*/
	if (curthr->kt_cancelled)
	{
		kthread_exit((void*)-EINTR);
		return -EINTR;
	}

	curthr->kt_state = KT_SLEEP_CANCELLABLE;
	ktqueue_enqueue(q,curthr);
	sched_switch();

	/* exit point here if its been cancelled*/
	if (curthr->kt_cancelled)
	{
		kthread_exit((void*)-EINTR);
		return -EINTR;
	}
	
	return 0;
	/*----Ends---*/
}
Example #14
0
static ssize_t read_fifofs(int fd, void* buf, size_t size)
{
	struct vfs_node_t* node = cur_task->files[fd].vfs_node;
	struct fifofs_hdr_t* hdr = fifofs_nodes[node->status.st_ino];
	struct fifofs_data_t* curnode = hdr->data;

	/* According to POSIX/Oracle reading from empty fifo: */
	while (node->status.st_size == 0)
	{
		/* Return 0 if write side is closed */
		if (hdr->writers == 0)
			return 0;

		if (cur_task->files[fd].oflag & O_NONBLOCK)
		{
			errno = EAGAIN;
			return -1;
		}

		/* If O_NONBLOCK is not set and the fifo is empty, block
		 * until size > 0 or there are no writers */
		sched_switch();
	}

	/* Data is splitted in 256 byte blocks */
	/* Offset in the first node */
	off_t offset = hdr->read_at % 0x100;

	/* How much we can read */
	if (size > (size_t)node->status.st_size)
		size = node->status.st_size;

	/* Read the data to buf */
	size_t read = 0;
	while (read < size)
	{
		for (; offset < 256; offset++)
		{
			/* Stop at EOF */
			if (read >= size)
				goto exit;

			*(uint8_t*)(buf+read) = curnode->data[offset];
			read++;
			hdr->read_at++;
			node->status.st_size--;
		}

		/* The whole node is read, free it and go on to the next node */
		struct fifofs_data_t* temp = curnode->next;
		kfree(curnode);
		curnode = temp;
		hdr->write_at -= 256;
		offset = 0;
	}

exit:
	return read;
}
Example #15
0
/*
 * Updates the thread's state and enqueues it on the given
 * queue. Returns when the thread has been woken up with wakeup_on or
 * broadcast_on.
 *
 * Use the private queue manipulation functions above.
 */
void
sched_sleep_on(ktqueue_t *q)
{
        //NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on");
        ktqueue_enqueue(q, curthr);
        curthr->kt_state = KT_SLEEP;
        sched_switch();
}
Example #16
0
/*
 * A Thread function that exhibits a race condition on the race global being
 * removed by a mutex.  It loads increments and stores race, context switching
 * between each line of C after acquiring mutex.  The mutex acquire can
 * be cancelled, but will print an error message if the mutex acquire succeeds
 * - it expects to be cancelled.
 */
void *mutex_test_cancelme(int arg1, void *arg2) {
    int local;

    if ( kmutex_lock_cancellable(&mutex) ) 
	do_exit(0);
    dbg_print("Mutex not cancelled? %d", curproc->p_pid);
    sched_switch();
    local = race;
    sched_switch();
    local++;
    sched_switch();
    race = local;
    sched_switch();
    kmutex_unlock(&mutex);
    do_exit(race);
    return NULL;
}
Example #17
0
/*
 * A Thread function that exhibits a race condition on the race global.  It
 * loads increments and stores race, context switching between each line of C.
 */
void *racer_test(int arg1, void *arg2) {
    int local;

    sched_make_runnable(curthr);
    sched_switch();
    local = race;
    sched_make_runnable(curthr);
    sched_switch();
    local++;
    sched_make_runnable(curthr);
    sched_switch();
    race = local;
    sched_make_runnable(curthr);
    sched_switch();
    do_exit(race);
    return NULL;
}
Example #18
0
void *init_child9(int arg1,void *arg2) 
{
if(curtest == TEST_3){
 while(curthr->kt_cancelled != 1){
 sched_make_runnable(curthr);
 sched_switch(); }}
return NULL;
}
Example #19
0
/*
 * Updates the thread's state and enqueues it on the given
 * queue. Returns when the thread has been woken up with wakeup_on or
 * broadcast_on.
 *
 * Use the private queue manipulation functions above.
 */
void
sched_sleep_on(ktqueue_t *q)
{
        kthread_t *temp = curthr;
        temp->kt_state = KT_SLEEP;
        ktqueue_enqueue(q, temp);
        sched_switch();
        return;
}
Example #20
0
/*
 * Updates the thread's state and enqueues it on the given
 * queue. Returns when the thread has been woken up with wakeup_on or
 * broadcast_on.
 *
 * Use the private queue manipulation functions above.
 */
void
sched_sleep_on(ktqueue_t *q)
{
    curthr->kt_state = KT_SLEEP;
    ktqueue_enqueue(q, curthr);
    dbg(DBG_PRINT, "(GRADING1D 1)\n");
    sched_switch();

    /* NOT_YET_IMPLEMENTED("PROCS: sched_sleep_on"); */
}
Example #21
0
int pthread_create(pthread_t *newthread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
{
    pthread_thread_t *pt = calloc(1, sizeof(pthread_thread_t));

    kernel_pid_t pthread_pid = insert(pt);
    if (pthread_pid == KERNEL_PID_UNDEF) {
        free(pt);
        return -1;
    }
    *newthread = pthread_pid;

    pt->status = attr && attr->detached ? PTS_DETACHED : PTS_RUNNING;
    pt->start_routine = start_routine;
    pt->arg = arg;

    bool autofree = attr == NULL || attr->ss_sp == NULL || attr->ss_size == 0;
    size_t stack_size = attr && attr->ss_size > 0 ? attr->ss_size : PTHREAD_STACKSIZE;
    void *stack = autofree ? malloc(stack_size) : attr->ss_sp;
    pt->stack = autofree ? stack : NULL;

    if (autofree && pthread_reaper_pid != KERNEL_PID_UNDEF) {
        mutex_lock(&pthread_mutex);
        if (pthread_reaper_pid != KERNEL_PID_UNDEF) {
            /* volatile pid to overcome problems with double checking */
            volatile kernel_pid_t pid = thread_create(pthread_reaper_stack,
                                             PTHREAD_REAPER_STACKSIZE,
                                             0,
                                             THREAD_CREATE_STACKTEST,
                                             pthread_reaper,
                                             NULL,
                                             "pthread-reaper");
            pthread_reaper_pid = pid;
        }
        mutex_unlock(&pthread_mutex);
    }

    pt->thread_pid = thread_create(stack,
                                   stack_size,
                                   THREAD_PRIORITY_MAIN,
                                   THREAD_CREATE_WOUT_YIELD |
                                   THREAD_CREATE_STACKTEST,
                                   pthread_start_routine,
                                   pt,
                                   "pthread");
    if (pt->thread_pid == KERNEL_PID_UNDEF) {
        free(pt->stack);
        free(pt);
        pthread_sched_threads[pthread_pid-1] = NULL;
        return -1;
    }

    sched_switch(THREAD_PRIORITY_MAIN);

    return 0;
}
/* This test writes to the value and makes sure that there are no race conditions */
void *writer(int arg1, void *arg2) {
	kmutex_lock(&reader_mutex);
	kmutex_lock(&writer_mutex);

	sched_switch();

	rwval++;

	kmutex_unlock(&writer_mutex);
	kmutex_unlock(&reader_mutex);
	return NULL;
}
Example #23
0
/*
 * A Thread function that exhibits a race condition on the race global being
 * removed by a mutex.  It loads increments and stores race, context switching
 * between each line of C after acquiring mutex.  The mutex acquire can
 * be cancelled, but will print an error message if this happens.
 */
void *mutex_test(int arg1, void *arg2) {
    int local;

    if ( kmutex_lock_cancellable(&mutex) ) {
	dbg_print("Mutex cancelled? %d", curproc->p_pid);
	do_exit(-1);
    }
    sched_make_runnable(curthr);
    sched_switch();
    local = race;
    sched_make_runnable(curthr);
    sched_switch();
    local++;
    sched_make_runnable(curthr);
    sched_switch();
    race = local;
    sched_make_runnable(curthr);
    sched_switch();
    kmutex_unlock(&mutex);
    do_exit(race);
    return NULL;
}
Example #24
0
/*
 * Similar to sleep on, but the sleep can be cancelled.
 *
 * Don't forget to check the kt_cancelled flag at the correct times.
 *
 * Use the private queue manipulation functions above.
 */
int
sched_cancellable_sleep_on(ktqueue_t *q)
{
	if (curthr->kt_cancelled)
		return -EINTR;
	curthr->kt_state = KT_SLEEP_CANCELLABLE;
	ktqueue_enqueue(q, curthr);
	
	sched_switch();
	
	if (curthr->kt_cancelled)
		return -EINTR;
	return 0;
}
Example #25
0
sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg,
                            int stacksize, int prio)
{
    kernel_pid_t res;
    char *stack = mem_malloc((size_t)stacksize);

    if (stack == NULL) {
        return ERR_MEM;
    }
    if ((res = thread_create(stack, stacksize, prio, THREAD_CREATE_STACKTEST,
                             (thread_task_func_t)thread, arg, name)) <= KERNEL_PID_UNDEF) {
        abort();
    }
    sched_switch((char)prio);
    return res;
}
int sem_post(sem_t *sem)
{
    int old_state = disableIRQ();
    ++sem->value;

    queue_node_t *next = queue_remove_head(&sem->queue);
    if (next) {
        tcb_t *next_process = (tcb_t*) next->data;
        DEBUG("%s: waking up %s\n", active_thread->name, next_process->name);
        sched_set_status(next_process, STATUS_PENDING);
        sched_switch(active_thread->priority, next_process->priority);
    }

    restoreIRQ(old_state);
    return 1;
}
Example #27
0
/*
 * Similar to sleep on, but the sleep can be cancelled.
 *
 * Don't forget to check the kt_cancelled flag at the correct times.
 *
 * Use the private queue manipulation functions above.
 */
int
sched_cancellable_sleep_on(ktqueue_t *q)
{
        //NOT_YET_IMPLEMENTED("PROCS: sched_cancellable_sleep_on");
        if (curthr->kt_cancelled) {
                return -EINTR;
        }
        ktqueue_enqueue(q, curthr);
        curthr->kt_state = KT_SLEEP_CANCELLABLE;
        sched_switch();

        if (curthr->kt_cancelled) {
                return -EINTR;
        }
        return 0;
}
Example #28
0
/*
 * Similar to sleep on, but the sleep can be cancelled.
 *
 * Don't forget to check the kt_cancelled flag at the correct times.
 *
 * Use the private queue manipulation functions above.
 */
int
sched_cancellable_sleep_on(ktqueue_t *q)
{
        kthread_t *temp = curthr;
        temp->kt_state = KT_SLEEP_CANCELLABLE;
        
        if(temp->kt_cancelled == 1)
                return -EINTR;
        else
               {
                    ktqueue_enqueue(q, temp);
                    sched_switch();
               } 
        
        return 0;

}
Example #29
0
static int
process_sample_event(event_t *event)
{
	struct sample_data data;
	struct trace_entry *te;

	memset(&data, 0, sizeof(data));

	event__parse_sample(event, sample_type, &data);

	if (sample_type & PERF_SAMPLE_TIME) {
		if (!first_time || first_time > data.time)
			first_time = data.time;
		if (last_time < data.time)
			last_time = data.time;
	}

	te = (void *)data.raw_data;
	if (sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
		char *event_str;
		struct power_entry *pe;

		pe = (void *)te;

		event_str = perf_header__find_event(te->type);

		if (!event_str)
			return 0;

		if (strcmp(event_str, "power:power_start") == 0)
			c_state_start(data.cpu, data.time, pe->value);

		if (strcmp(event_str, "power:power_end") == 0)
			c_state_end(data.cpu, data.time);

		if (strcmp(event_str, "power:power_frequency") == 0)
			p_state_change(data.cpu, data.time, pe->value);

		if (strcmp(event_str, "sched:sched_wakeup") == 0)
			sched_wakeup(data.cpu, data.time, data.pid, te);

		if (strcmp(event_str, "sched:sched_switch") == 0)
			sched_switch(data.cpu, data.time, te);
	}
	return 0;
}
Example #30
0
File: pipe.c Project: A-L-E-X/RIOT
static ssize_t pipe_rw(ringbuffer_t *rb,
                       void *buf,
                       size_t n,
                       tcb_t **other_op_blocked,
                       tcb_t **this_op_blocked,
                       ringbuffer_op_t ringbuffer_op)
{
    if (n == 0) {
        return 0;
    }

    while (1) {
        unsigned old_state = disableIRQ();

        unsigned count = ringbuffer_op(rb, buf, n);

        if (count > 0) {
            tcb_t *other_thread = *other_op_blocked;
            int other_prio = -1;
            if (other_thread) {
                *other_op_blocked = NULL;
                other_prio = other_thread->priority;
                sched_set_status(other_thread, STATUS_PENDING);
            }

            restoreIRQ(old_state);

            if (other_prio >= 0) {
                sched_switch(other_prio);
            }

            return count;
        }
        else if (*this_op_blocked || inISR()) {
            restoreIRQ(old_state);
            return 0;
        }
        else {
            *this_op_blocked = (tcb_t *) sched_active_thread;

            sched_set_status((tcb_t *) sched_active_thread, STATUS_SLEEPING);
            restoreIRQ(old_state);
            thread_yield();
        }
    }
}