Пример #1
0
static void task_func1(long dummy)
{
	rt_printk("Starting task1, waiting on the conditional variable to be 1.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 1) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 1) {
		rt_printk("task1, conditional variable signalled, value: %d.\n", cond_data);
	}
	rt_printk("task1 signals after setting data to 2.\n");
	rt_printk("task1 waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	cond_data = 2;
	rt_cond_signal(&cond);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task1, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task1.\n");
	atomic_inc(&cleanup);
}
Пример #2
0
/**
 * Auteur : Christian Muller
 *
 * Effectue un nouveau tir au niveau des vaisseaux alliés
 */
void player_shots_handler() {
	int i, k;

	/* Pour chaque vaisseau actif */
	for(k=0; k<NB_PLAYER; k++) {

		/* S'il n'est pas actif, passe au suivant */
		if(!player[k].enable)
			continue;

		/* Parcours le tableau des tirs */
		for (i = 0; i < NB_MAX_SHOTS; i++) {

			rt_mutex_lock(&mutex_shots, TM_INFINITE);

			/* Si le tir courant est inactif */
			if (shot[i].enable == 0) {
				/* L'initialise et l'active */
				shot[i].x = player[k].x + SHIP_SIZE / 2;
				shot[i].y = player[k].y;
				shot[i].direction = DIRECTION_UP; // Moves up
				shot[i].enable = 1;
				rt_mutex_unlock(&mutex_shots);
				break;
			} else {
				rt_mutex_unlock(&mutex_shots);
			}
		}
	}
}
Пример #3
0
static int ach_ch_open(struct inode *inode, struct file *file)
{
    int ret = 0;
    struct ach_ch_device *device;

    /* Synchronize to protect refcounting */
    if (rt_mutex_lock_interruptible(&ctrl_data.lock)) {
        ret = -ERESTARTSYS;
        goto out;
    }

    device = &ctrl_data.devices[iminor(inode)];

    if (unlikely(device->minor != iminor(inode))) {
        printk(KERN_ERR "ach: Internal data problem\n");
        ret = -ERESTARTSYS;
        goto out_unlock;
    }

    file->private_data = ach_ch_file_alloc(device);

    if (!file->private_data) {
        printk(KERN_ERR "ach: Failed allocating file data\n");
        ret = -ENOBUFS;
        goto out_unlock;
    }

    KDEBUG( "ach: opened device %s\n", ach_ch_device_name(device) );

out_unlock:
    rt_mutex_unlock(&ctrl_data.lock);
out:
    return ret;
}
Пример #4
0
static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	unsigned long this_khz, this_sleep_khz;
	int rc = 0;

	rt_mutex_lock(&rpm_clock_lock);

	if (r->enabled) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;

		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
					&peer_khz, &peer_sleep_khz);

		value = max(this_khz, peer_khz);
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = max(this_sleep_khz, peer_sleep_khz);
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}

out:
	rt_mutex_unlock(&rpm_clock_lock);

	return rc;
}
Пример #5
0
static void rpm_clk_unprepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);

	rt_mutex_lock(&rpm_clock_lock);

	if (r->c.rate) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

		value = r->branch ? !!peer_khz : peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}
	r->enabled = false;
out:
	rt_mutex_unlock(&rpm_clock_lock);

	return;
}
Пример #6
0
static inline void __down_complete(struct semaphore *sem)
{
	int count = atomic_dec_return(&sem->count);

	if (unlikely(count > 0))
		rt_mutex_unlock(&sem->lock);
}
Пример #7
0
/************************************************************
 *
 * main state machine worker function
 *
 ************************************************************/
static void hdmi_state_machine_worker(struct work_struct *work)
{
	int pending_hpd_evt, cur_hpd;

	/* Observe and clear the pending flag and latch the current HPD state.
	 */
	rt_mutex_lock(&work_lock);
	pending_hpd_evt = work_state.pending_hpd_evt;
	work_state.pending_hpd_evt = 0;
	cur_hpd = tegra_dc_hpd(work_state.hdmi->dc);
	rt_mutex_unlock(&work_lock);

	pr_info("%s (tid %p): state %d (%s), hpd %d, pending_hpd_evt %d\n",
		__func__, current, work_state.state,
		state_names[work_state.state], cur_hpd, pending_hpd_evt);

	if (pending_hpd_evt) {
		/* If we were woken up because of HPD activity, just schedule
		 * the next appropriate task and get out.
		 */
		hdmi_state_machine_handle_hpd_l(cur_hpd);
	} else if (work_state.state < ARRAY_SIZE(state_machine_dispatch)) {
		dispatch_func_t func = state_machine_dispatch[work_state.state];

		if (NULL == func)
			pr_warn("NULL state machine handler while in state %d; how did we end up here?",
				work_state.state);
		else
			func(work_state.hdmi);
	} else {
		pr_warn("hdmi state machine worker scheduled unexpected state %d",
			work_state.state);
	}
}
Пример #8
0
static void hdmi_state_machine_set_state_l(int target_state, int resched_time)
{
	rt_mutex_lock(&work_lock);

	pr_info("%s: switching from state %d (%s) to state %d (%s)\n",
		__func__, work_state.state, state_names[work_state.state],
		target_state, state_names[target_state]);
	work_state.state = target_state;

	/* If the pending_hpd_evt flag is already set, don't bother to
	 * reschedule the state machine worker.  We should be able to assert
	 * that there is a worker callback already scheduled, and that it is
	 * scheduled to run immediately.  This is particularly important when
	 * making the transition to the steady state ENABLED or DISABLED states.
	 * If an HPD event occurs while the worker is in flight, after the
	 * worker checks the state of the pending HPD flag, and then the state
	 * machine transitions to ENABLE or DISABLED, the system would end up
	 * canceling the callback to handle the HPD event were it not for this
	 * check.
	 */
	if (!work_state.pending_hpd_evt)
		hdmi_state_machine_sched_work_l(resched_time);

	rt_mutex_unlock(&work_lock);
}
/**
 * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
 * @cl: Handle to the client
 */
void msm_bus_scale_unregister_client(uint32_t cl)
{
	int i;
	struct msm_bus_client *client = (struct msm_bus_client *)(cl);
	bool warn = false;
	if (IS_ERR_OR_NULL(client))
		return;

	for (i = 0; i < client->pdata->usecase->num_paths; i++) {
		if ((client->pdata->usecase[0].vectors[i].ab) ||
			(client->pdata->usecase[0].vectors[i].ib)) {
			warn = true;
			break;
		}
	}

	if (warn) {
		int num_paths = client->pdata->usecase->num_paths;
		int ab[num_paths], ib[num_paths];
		WARN(1, "%s called unregister with non-zero vectors\n",
			client->pdata->name);

		/*
		 * Save client values and zero them out to
		 * cleanly unregister
		 */
		for (i = 0; i < num_paths; i++) {
			ab[i] = client->pdata->usecase[0].vectors[i].ab;
			ib[i] = client->pdata->usecase[0].vectors[i].ib;
			client->pdata->usecase[0].vectors[i].ab = 0;
			client->pdata->usecase[0].vectors[i].ib = 0;
		}

		msm_bus_scale_client_update_request(cl, 0);

		/* Restore client vectors if required for re-registering. */
		for (i = 0; i < num_paths; i++) {
			client->pdata->usecase[0].vectors[i].ab = ab[i];
			client->pdata->usecase[0].vectors[i].ib = ib[i];
		}
	} else if (client->curr != 0)
		msm_bus_scale_client_update_request(cl, 0);

	MSM_BUS_DBG("Unregistering client %d\n", cl);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_lock(&msm_bus_lock);
#else
	mutex_lock(&msm_bus_lock);
#endif
	msm_bus_scale_client_reset_pnodes(cl);
	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_unlock(&msm_bus_lock);
#else
	mutex_unlock(&msm_bus_lock);
#endif
	kfree(client->src_pnode);
	kfree(client);
}
Пример #10
0
static enum ach_status
chan_lock( ach_channel_t *chan )
{
    int i = rt_mutex_lock_interruptible(&chan->shm->sync.mutex);
    if( -EINTR == i ) return ACH_EINTR;
    if( i ) return ACH_BUG;
    if( chan->cancel ) {
        rt_mutex_unlock(&chan->shm->sync.mutex);
        return ACH_CANCELED;
    }
    if( chan->shm->sync.dirty ) {
        rt_mutex_unlock(&chan->shm->sync.mutex);
        ACH_ERRF("ach bug: channel dirty on lock acquisition\n");
        return ACH_CORRUPT;
    }
    return ACH_OK;
}
Пример #11
0
static void task_func4(long dummy)
{
	rt_printk("Starting task4, signalling after setting data to 1, then waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	cond_data = 1;
  	rt_mutex_unlock(&mtx);
	rt_cond_signal(&cond);
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task4, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task4.\n");
	atomic_inc(&cleanup);
}
Пример #12
0
static void i2c_parent_unlock_bus(struct i2c_adapter *adapter,
				  unsigned int flags)
{
	struct i2c_mux_priv *priv = adapter->algo_data;
	struct i2c_adapter *parent = priv->muxc->parent;

	i2c_unlock_bus(parent, flags);
	rt_mutex_unlock(&parent->mux_lock);
}
Пример #13
0
static enum ach_status unrdlock(struct ach_header *shm)
{
    int dirty = shm->sync.dirty;
    rt_mutex_unlock(&shm->sync.mutex);
    if( dirty ) {
        ACH_ERRF("ach bug: channel dirty on read unlock\n");
        return ACH_CORRUPT;
    }
    return ACH_OK;
}
Пример #14
0
void hdmi_state_machine_set_pending_hpd(void)
{
	rt_mutex_lock(&work_lock);

	/* We always schedule work any time there is a pending HPD event */
	work_state.pending_hpd_evt = 1;
	hdmi_state_machine_sched_work_l(0);

	rt_mutex_unlock(&work_lock);
}
Пример #15
0
int hdmi_state_machine_get_state(void)
{
	int ret;

	rt_mutex_lock(&work_lock);
	ret = work_state.state;
	rt_mutex_unlock(&work_lock);

	return ret;
}
Пример #16
0
msg_data_t channel_recv(channel_t *ch)
{
	msg_data_t ret = {-1, {0}};
	msg_t *msg;

	rt_mutex_lock(&ch->lock);
	if ((msg = Q_GET_HEAD(&ch->msgs))) {
		Q_REMOVE(&ch->msgs, msg, q_link);
		ret = msg->data;

		rt_mutex_unlock(&ch->lock);
		free(msg);
	} else {
		rt_mutex_unlock(&ch->lock);
	}


	return ret;
}
Пример #17
0
static enum ach_status unwrlock(struct ach_header *shm)
{
    int dirty = shm->sync.dirty;
    shm->sync.dirty = 0;
    rt_mutex_unlock(&shm->sync.mutex);
    wake_up_all(&shm->sync.readq);
    if( !dirty ) {
        ACH_ERRF("ach bug: channel not dirty on write unlock\n");
        return ACH_CORRUPT;
    }
    return ACH_OK;
}
Пример #18
0
void channel_send(channel_t *ch, int tag, data_t payload)
{
	msg_t *msg = calloc(1, sizeof(msg_t));
	if (!msg) fail(1, "allocating msg");
	msg->data.tag = tag;
	msg->data.payload = payload;

	rt_mutex_lock(&ch->lock);
	Q_INSERT_TAIL(&ch->msgs, msg, q_link);
	handle_event(&ch->ev);
	rt_mutex_unlock(&ch->lock);
}
Пример #19
0
static void task_func3(long dummy)
{
	rt_printk("Starting task3, waiting on the conditional variable to be 3 with a 2 s timeout.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		if (rt_cond_timedwait(&cond, &mtx, rt_get_time() + nano2count(2000000000LL)) < 0) {
			break;
		}
	}
	rt_mutex_unlock(&mtx);
	if(cond_data < 3) {
		rt_printk("task3, timed out, conditional variable value: %d.\n", cond_data);
	}
	rt_mutex_lock(&mtx);
	cond_data = 3;
	rt_mutex_unlock(&mtx);
	rt_printk("task3 broadcasts after setting data to 3.\n");
	rt_cond_broadcast(&cond);
	rt_printk("Ending task3.\n");
	atomic_inc(&cleanup);
}
Пример #20
0
void register_channel_event(thread_t *thread, event_t *e)
{
	channel_t *ch = e->u.ch;

	// if there is data in the channel, keep running
	rt_mutex_lock(&ch->lock);
	if (Q_GET_HEAD(&ch->msgs)) {
		make_runnable(thread);
	} else {
		e->thread = thread;
	}
	rt_mutex_unlock(&ch->lock);
}
Пример #21
0
static int i2c_parent_trylock_bus(struct i2c_adapter *adapter,
				  unsigned int flags)
{
	struct i2c_mux_priv *priv = adapter->algo_data;
	struct i2c_adapter *parent = priv->muxc->parent;

	if (!rt_mutex_trylock(&parent->mux_lock))
		return 0;	/* mux_lock not locked, failure */
	if (i2c_trylock_bus(parent, flags))
		return 1;	/* parent locked too, success */
	rt_mutex_unlock(&parent->mux_lock);
	return 0;		/* parent not locked, failure */
}
Пример #22
0
static void task_func2(long dummy)
{
	rt_printk("Starting task2, waiting on the conditional variable to be 2.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 2) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 2) {
		rt_printk("task2, conditional variable signalled, value: %d.\n", cond_data);
	}
	rt_printk("task2 waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task2, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task2.\n");
	atomic_inc(&cleanup);
}
Пример #23
0
static int rpm_clk_prepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	uint32_t value;
	int rc = 0;
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	rt_mutex_lock(&rpm_clock_lock);

	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);

	/* Don't send requests to the RPM if the rate has not been set. */
	if (this_khz == 0)
		goto out;

	/* Take peer clock's rate into account only if it's enabled. */
	if (peer->enabled)
		to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

	value = max(this_khz, peer_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_active(r, value);
	if (rc)
		goto out;

	value = max(this_sleep_khz, peer_sleep_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_sleep(r, value);
	if (rc) {
		/* Undo the active set vote and restore it to peer_khz */
		value = peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
	}

out:
	if (!rc)
		r->enabled = true;

	rt_mutex_unlock(&rpm_clock_lock);

	return rc;
}
Пример #24
0
void fastcall rt_up_read_non_owner(struct rw_semaphore *rwsem)
{
	unsigned long flags;
	/*
	 * Read locks within the self-held write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
	if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		rwsem->read_depth--;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_unlock(&rwsem->lock);
}
Пример #25
0
void  rt_up(struct semaphore *sem)
{
	int count;

	/*
	 * Disable preemption to make sure a highprio trylock-er cannot
	 * preempt us here and get into an infinite loop:
	 */
	preempt_disable();
	count = atomic_inc_return(&sem->count);
	/*
	 * If we did the 0 -> 1 transition then we are the ones to unlock it:
	 */
	if (likely(count == 1))
		rt_mutex_unlock(&sem->lock);
	preempt_enable();
}
Пример #26
0
void player_died()
{
	int i, j;

	player[0].lifes--;
	hp_update_leds();


	for(i=1; i<NB_PLAYER; i++) {
		player[i].enable = 0;
	}

	if(player[0].lifes == 0)
		return;

	player[0].enable = 1;
	player[0].x = LCD_MAX_X / 2 - 8;
	player[0].y = LCD_MAX_Y - 20;

	for(i = 0; i < NB_MAX_SHOTS; i++)
	{
		shot[i].enable = 0;
	}

	rt_mutex_lock(&mutex_ennemi, TM_INFINITE);

	// initialisation vaisseaux ennemis
	for (i = 0; i < nbVagueEnnemis; i++) {

		for (j = 0; j < nbEnnemiParVague; j++) {

			// Réinitialise les positions
			ennemi[i * nbEnnemiParVague + j].x = xStart + (j * (SHIP_SIZE
					+ X_SPACE));
			ennemi[i * nbEnnemiParVague + j].y = yStart + (i * (SHIP_SIZE
					+ Y_SPACE));

		}
	}
	rt_mutex_unlock(&mutex_ennemi);
}
Пример #27
0
static enum ach_status
rdlock_wait(ach_channel_t * chan, const struct timespec *reltime)
{
    int res;
    struct ach_header *shm = chan->shm;
    volatile uint64_t *c_seq = &chan->seq_num, *s_seq = &shm->last_seq;
    volatile unsigned int *cancel = &chan->cancel;
    enum ach_status r;

    for(;;) {
        /* do the wait */
        if (reltime->tv_sec != 0 || reltime->tv_nsec != 0) {
            res = wait_event_interruptible_timeout( shm->sync. readq,
            ((*c_seq != *s_seq) || *cancel),
            timespec_to_jiffies (reltime) );
            if (0 == res) return ACH_TIMEOUT;
        } else {
            res = wait_event_interruptible( shm->sync.readq,
                                            ((*c_seq != *s_seq) || *cancel) );
        }

        /* check what happened */
        if (-ERESTARTSYS == res) return ACH_EINTR;
        if( res < 0 ) {
            ACH_ERRF("ach bug: rdlock_wait(), "
                     "could not wait for event, "
                     "timeout: (%lu,%ld), result=%d\n",
                     reltime->tv_sec, reltime->tv_nsec, res);
            return ACH_BUG;
        }

        r = chan_lock( chan );
        /* Check condition with the lock held in case someone
         * else flushed the channel, or someone else unset the
         * cancel */
        if( (*c_seq != *s_seq) || (ACH_OK != r) || *cancel ) {
            return r;
        }
        rt_mutex_unlock(&shm->sync.mutex);
    }
}
Пример #28
0
/**********************************************************************************
 * ach channel device driver
 **********************************************************************************/
static int ach_ch_close(struct inode *inode, struct file *file)
{
    struct ach_ch_file *ch_file;
    int ret = 0;

    KDEBUG("ach: in ach_ch_close (inode %d)\n", iminor(inode));

    /* Synchronize to protect refcounting */
    if (rt_mutex_lock_interruptible(&ctrl_data.lock)) {
        ret = -ERESTARTSYS;
        goto out;
    }

    ch_file = (struct ach_ch_file *)file->private_data;
    kref_put( &ch_file->shm->refcount, ach_shm_release );
    kfree(ch_file);

    rt_mutex_unlock(&ctrl_data.lock);

out:
    return ret;
}
Пример #29
0
static unsigned int ach_ch_poll(struct file *file, poll_table * wait)
{
    unsigned int mask = POLLOUT | POLLWRNORM;
    struct ach_ch_file *ch_file = (struct ach_ch_file *)file->private_data;
    struct ach_header *shm = ch_file->shm;
    enum ach_status r;

    /* KDEBUG1("In ach_ch_poll (minor=%d)\n", ch_file->dev->minor); */

    /* Add ourselves wait queue */
    poll_wait(file, &shm->sync.readq, wait);

    /* Lock channel and check what happened */
    r = chan_lock(ch_file);
    if( ACH_OK != r ) return -get_errno(r);

    if (ch_file->seq_num != shm->last_seq) {
        mask |= POLLIN | POLLRDNORM;
    }

    rt_mutex_unlock(&shm->sync.mutex);

    return mask;
}
Пример #30
0
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
	int i, id, ret = -EINVAL;

	switch(td->opcode) {

	case RTTEST_NOP:
		return 0;

	case RTTEST_LOCKCONT:
		td->mutexes[td->opdata] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		return 0;

	case RTTEST_RESET:
		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
			if (td->mutexes[i] == 4) {
				rt_mutex_unlock(&mutexes[i]);
				td->mutexes[i] = 0;
			}
		}

		if (!lockwakeup && td->bkl == 4) {
#ifdef CONFIG_LOCK_KERNEL
			unlock_kernel();
#endif
			td->bkl = 0;
		}
		return 0;

	case RTTEST_RESETEVENT:
		atomic_set(&rttest_event, 0);
		return 0;

	default:
		if (lockwakeup)
			return ret;
	}

	switch(td->opcode) {

	case RTTEST_LOCK:
	case RTTEST_LOCKNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_lock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 4;
		return 0;

	case RTTEST_LOCKINT:
	case RTTEST_LOCKINTNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = ret ? 0 : 4;
		return ret ? -EINTR : 0;

	case RTTEST_UNLOCK:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
			return ret;

		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_unlock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 0;
		return 0;

	case RTTEST_LOCKBKL:
		if (td->bkl)
			return 0;
		td->bkl = 1;
#ifdef CONFIG_LOCK_KERNEL
		lock_kernel();
#endif
		td->bkl = 4;
		return 0;

	case RTTEST_UNLOCKBKL:
		if (td->bkl != 4)
			break;
#ifdef CONFIG_LOCK_KERNEL
		unlock_kernel();
#endif
		td->bkl = 0;
		return 0;

	default:
		break;
	}
	return ret;
}