Esempio n. 1
0
static void task_func1(long dummy)
{
	rt_printk("Starting task1, waiting on the conditional variable to be 1.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 1) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 1) {
		rt_printk("task1, conditional variable signalled, value: %d.\n", cond_data);
	}
	rt_printk("task1 signals after setting data to 2.\n");
	rt_printk("task1 waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	cond_data = 2;
	rt_cond_signal(&cond);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task1, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task1.\n");
	atomic_inc(&cleanup);
}
Esempio n. 2
0
static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	unsigned long this_khz, this_sleep_khz;
	int rc = 0;

	rt_mutex_lock(&rpm_clock_lock);

	if (r->enabled) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;

		to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
					&peer_khz, &peer_sleep_khz);

		value = max(this_khz, peer_khz);
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = max(this_sleep_khz, peer_sleep_khz);
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}

out:
	rt_mutex_unlock(&rpm_clock_lock);

	return rc;
}
Esempio n. 3
0
static void rpm_clk_unprepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);

	rt_mutex_lock(&rpm_clock_lock);

	if (r->c.rate) {
		uint32_t value;
		struct rpm_clk *peer = r->peer;
		unsigned long peer_khz = 0, peer_sleep_khz = 0;
		int rc;

		/* Take peer clock's rate into account only if it's enabled. */
		if (peer->enabled)
			to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

		value = r->branch ? !!peer_khz : peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
		if (rc)
			goto out;

		value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
		rc = clk_rpmrs_set_rate_sleep(r, value);
	}
	r->enabled = false;
out:
	rt_mutex_unlock(&rpm_clock_lock);

	return;
}
/************************************************************
 *
 * main state machine worker function
 *
 ************************************************************/
static void hdmi_state_machine_worker(struct work_struct *work)
{
	int pending_hpd_evt, cur_hpd;

	/* Observe and clear the pending flag and latch the current HPD state.
	 */
	rt_mutex_lock(&work_lock);
	pending_hpd_evt = work_state.pending_hpd_evt;
	work_state.pending_hpd_evt = 0;
	cur_hpd = tegra_dc_hpd(work_state.hdmi->dc);
	rt_mutex_unlock(&work_lock);

	pr_info("%s (tid %p): state %d (%s), hpd %d, pending_hpd_evt %d\n",
		__func__, current, work_state.state,
		state_names[work_state.state], cur_hpd, pending_hpd_evt);

	if (pending_hpd_evt) {
		/* If we were woken up because of HPD activity, just schedule
		 * the next appropriate task and get out.
		 */
		hdmi_state_machine_handle_hpd_l(cur_hpd);
	} else if (work_state.state < ARRAY_SIZE(state_machine_dispatch)) {
		dispatch_func_t func = state_machine_dispatch[work_state.state];

		if (NULL == func)
			pr_warn("NULL state machine handler while in state %d; how did we end up here?",
				work_state.state);
		else
			func(work_state.hdmi);
	} else {
		pr_warn("hdmi state machine worker scheduled unexpected state %d",
			work_state.state);
	}
}
Esempio n. 5
0
/**
 * Auteur : Christian Muller
 *
 * Effectue un nouveau tir au niveau des vaisseaux alliés
 */
void player_shots_handler() {
	int i, k;

	/* Pour chaque vaisseau actif */
	for(k=0; k<NB_PLAYER; k++) {

		/* S'il n'est pas actif, passe au suivant */
		if(!player[k].enable)
			continue;

		/* Parcours le tableau des tirs */
		for (i = 0; i < NB_MAX_SHOTS; i++) {

			rt_mutex_lock(&mutex_shots, TM_INFINITE);

			/* Si le tir courant est inactif */
			if (shot[i].enable == 0) {
				/* L'initialise et l'active */
				shot[i].x = player[k].x + SHIP_SIZE / 2;
				shot[i].y = player[k].y;
				shot[i].direction = DIRECTION_UP; // Moves up
				shot[i].enable = 1;
				rt_mutex_unlock(&mutex_shots);
				break;
			} else {
				rt_mutex_unlock(&mutex_shots);
			}
		}
	}
}
static void hdmi_state_machine_set_state_l(int target_state, int resched_time)
{
	rt_mutex_lock(&work_lock);

	pr_info("%s: switching from state %d (%s) to state %d (%s)\n",
		__func__, work_state.state, state_names[work_state.state],
		target_state, state_names[target_state]);
	work_state.state = target_state;

	/* If the pending_hpd_evt flag is already set, don't bother to
	 * reschedule the state machine worker.  We should be able to assert
	 * that there is a worker callback already scheduled, and that it is
	 * scheduled to run immediately.  This is particularly important when
	 * making the transition to the steady state ENABLED or DISABLED states.
	 * If an HPD event occurs while the worker is in flight, after the
	 * worker checks the state of the pending HPD flag, and then the state
	 * machine transitions to ENABLE or DISABLED, the system would end up
	 * canceling the callback to handle the HPD event were it not for this
	 * check.
	 */
	if (!work_state.pending_hpd_evt)
		hdmi_state_machine_sched_work_l(resched_time);

	rt_mutex_unlock(&work_lock);
}
/**
 * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
 * @cl: Handle to the client
 */
void msm_bus_scale_unregister_client(uint32_t cl)
{
	int i;
	struct msm_bus_client *client = (struct msm_bus_client *)(cl);
	bool warn = false;
	if (IS_ERR_OR_NULL(client))
		return;

	for (i = 0; i < client->pdata->usecase->num_paths; i++) {
		if ((client->pdata->usecase[0].vectors[i].ab) ||
			(client->pdata->usecase[0].vectors[i].ib)) {
			warn = true;
			break;
		}
	}

	if (warn) {
		int num_paths = client->pdata->usecase->num_paths;
		int ab[num_paths], ib[num_paths];
		WARN(1, "%s called unregister with non-zero vectors\n",
			client->pdata->name);

		/*
		 * Save client values and zero them out to
		 * cleanly unregister
		 */
		for (i = 0; i < num_paths; i++) {
			ab[i] = client->pdata->usecase[0].vectors[i].ab;
			ib[i] = client->pdata->usecase[0].vectors[i].ib;
			client->pdata->usecase[0].vectors[i].ab = 0;
			client->pdata->usecase[0].vectors[i].ib = 0;
		}

		msm_bus_scale_client_update_request(cl, 0);

		/* Restore client vectors if required for re-registering. */
		for (i = 0; i < num_paths; i++) {
			client->pdata->usecase[0].vectors[i].ab = ab[i];
			client->pdata->usecase[0].vectors[i].ib = ib[i];
		}
	} else if (client->curr != 0)
		msm_bus_scale_client_update_request(cl, 0);

	MSM_BUS_DBG("Unregistering client %d\n", cl);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_lock(&msm_bus_lock);
#else
	mutex_lock(&msm_bus_lock);
#endif
	msm_bus_scale_client_reset_pnodes(cl);
	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_unlock(&msm_bus_lock);
#else
	mutex_unlock(&msm_bus_lock);
#endif
	kfree(client->src_pnode);
	kfree(client);
}
Esempio n. 8
0
static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
				unsigned int flags)
{
	struct i2c_mux_priv *priv = adapter->algo_data;
	struct i2c_adapter *parent = priv->muxc->parent;

	rt_mutex_lock(&parent->mux_lock);
	i2c_lock_bus(parent, flags);
}
Esempio n. 9
0
static void task_func4(long dummy)
{
	rt_printk("Starting task4, signalling after setting data to 1, then waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	cond_data = 1;
  	rt_mutex_unlock(&mtx);
	rt_cond_signal(&cond);
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task4, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task4.\n");
	atomic_inc(&cleanup);
}
Esempio n. 10
0
static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
{
	struct i2c_mux_priv *priv = adapter->algo_data;
	struct i2c_adapter *parent = priv->muxc->parent;

	rt_mutex_lock(&parent->mux_lock);
	if (!(flags & I2C_LOCK_ROOT_ADAPTER))
		return;
	i2c_lock_bus(parent, flags);
}
Esempio n. 11
0
void hdmi_state_machine_set_pending_hpd(void)
{
	rt_mutex_lock(&work_lock);

	/* We always schedule work any time there is a pending HPD event */
	work_state.pending_hpd_evt = 1;
	hdmi_state_machine_sched_work_l(0);

	rt_mutex_unlock(&work_lock);
}
Esempio n. 12
0
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
{
	struct rt_mutex *lock = &rwsem->lock;

	rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);

	if (rt_mutex_real_owner(lock) != current)
		rt_mutex_lock(&rwsem->lock);
	rwsem->read_depth++;
}
Esempio n. 13
0
int hdmi_state_machine_get_state(void)
{
	int ret;

	rt_mutex_lock(&work_lock);
	ret = work_state.state;
	rt_mutex_unlock(&work_lock);

	return ret;
}
Esempio n. 14
0
void channel_send(channel_t *ch, int tag, data_t payload)
{
	msg_t *msg = calloc(1, sizeof(msg_t));
	if (!msg) fail(1, "allocating msg");
	msg->data.tag = tag;
	msg->data.payload = payload;

	rt_mutex_lock(&ch->lock);
	Q_INSERT_TAIL(&ch->msgs, msg, q_link);
	handle_event(&ch->ev);
	rt_mutex_unlock(&ch->lock);
}
Esempio n. 15
0
static void task_func3(long dummy)
{
	rt_printk("Starting task3, waiting on the conditional variable to be 3 with a 2 s timeout.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		if (rt_cond_timedwait(&cond, &mtx, rt_get_time() + nano2count(2000000000LL)) < 0) {
			break;
		}
	}
	rt_mutex_unlock(&mtx);
	if(cond_data < 3) {
		rt_printk("task3, timed out, conditional variable value: %d.\n", cond_data);
	}
	rt_mutex_lock(&mtx);
	cond_data = 3;
	rt_mutex_unlock(&mtx);
	rt_printk("task3 broadcasts after setting data to 3.\n");
	rt_cond_broadcast(&cond);
	rt_printk("Ending task3.\n");
	atomic_inc(&cleanup);
}
Esempio n. 16
0
void register_channel_event(thread_t *thread, event_t *e)
{
	channel_t *ch = e->u.ch;

	// if there is data in the channel, keep running
	rt_mutex_lock(&ch->lock);
	if (Q_GET_HEAD(&ch->msgs)) {
		make_runnable(thread);
	} else {
		e->thread = thread;
	}
	rt_mutex_unlock(&ch->lock);
}
Esempio n. 17
0
void  __sema_init(struct semaphore *sem, int val,
			  char *name, char *file, int line)
{
	atomic_set(&sem->count, val);
	switch (val) {
	case 0:
		__rt_mutex_init(&sem->lock, name);
		rt_mutex_lock(&sem->lock);
		break;
	default:
		__rt_mutex_init(&sem->lock, name);
		break;
	}
}
Esempio n. 18
0
static void task_func2(long dummy)
{
	rt_printk("Starting task2, waiting on the conditional variable to be 2.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 2) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 2) {
		rt_printk("task2, conditional variable signalled, value: %d.\n", cond_data);
	}
	rt_printk("task2 waits for a broadcast.\n");
	rt_mutex_lock(&mtx);
	while(cond_data < 3) {
		rt_cond_wait(&cond, &mtx);
	}
	rt_mutex_unlock(&mtx);
	if(cond_data == 3) {
		rt_printk("task2, conditional variable broadcasted, value: %d.\n", cond_data);
	}
	rt_printk("Ending task2.\n");
	atomic_inc(&cleanup);
}
Esempio n. 19
0
static int rpm_clk_prepare(struct clk *clk)
{
	struct rpm_clk *r = to_rpm_clk(clk);
	uint32_t value;
	int rc = 0;
	unsigned long this_khz, this_sleep_khz;
	unsigned long peer_khz = 0, peer_sleep_khz = 0;
	struct rpm_clk *peer = r->peer;

	rt_mutex_lock(&rpm_clock_lock);

	to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);

	/* Don't send requests to the RPM if the rate has not been set. */
	if (this_khz == 0)
		goto out;

	/* Take peer clock's rate into account only if it's enabled. */
	if (peer->enabled)
		to_active_sleep_khz(peer, peer->c.rate,
				&peer_khz, &peer_sleep_khz);

	value = max(this_khz, peer_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_active(r, value);
	if (rc)
		goto out;

	value = max(this_sleep_khz, peer_sleep_khz);
	if (r->branch)
		value = !!value;

	rc = clk_rpmrs_set_rate_sleep(r, value);
	if (rc) {
		/* Undo the active set vote and restore it to peer_khz */
		value = peer_khz;
		rc = clk_rpmrs_set_rate_active(r, value);
	}

out:
	if (!rc)
		r->enabled = true;

	rt_mutex_unlock(&rpm_clock_lock);

	return rc;
}
Esempio n. 20
0
/*
 * Same as rt_down_read() but no lockdep calls:
 */
void fastcall rt_down_read_non_owner(struct rw_semaphore *rwsem)
{
	unsigned long flags;
	/*
	 * Read locks within the write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);

	if (rt_mutex_real_owner(&rwsem->lock) == current) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		rwsem->read_depth++;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_lock(&rwsem->lock);
}
Esempio n. 21
0
msg_data_t channel_recv(channel_t *ch)
{
	msg_data_t ret = {-1, {0}};
	msg_t *msg;

	rt_mutex_lock(&ch->lock);
	if ((msg = Q_GET_HEAD(&ch->msgs))) {
		Q_REMOVE(&ch->msgs, msg, q_link);
		ret = msg->data;

		rt_mutex_unlock(&ch->lock);
		free(msg);
	} else {
		rt_mutex_unlock(&ch->lock);
	}


	return ret;
}
Esempio n. 22
0
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
{
	unsigned long flags;

	rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);

	/*
	 * Read locks within the write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);

	if (rt_mutex_real_owner(&rwsem->lock) == current) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		rwsem->read_depth++;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_lock(&rwsem->lock);
}
Esempio n. 23
0
void player_died()
{
	int i, j;

	player[0].lifes--;
	hp_update_leds();


	for(i=1; i<NB_PLAYER; i++) {
		player[i].enable = 0;
	}

	if(player[0].lifes == 0)
		return;

	player[0].enable = 1;
	player[0].x = LCD_MAX_X / 2 - 8;
	player[0].y = LCD_MAX_Y - 20;

	for(i = 0; i < NB_MAX_SHOTS; i++)
	{
		shot[i].enable = 0;
	}

	rt_mutex_lock(&mutex_ennemi, TM_INFINITE);

	// initialisation vaisseaux ennemis
	for (i = 0; i < nbVagueEnnemis; i++) {

		for (j = 0; j < nbEnnemiParVague; j++) {

			// Réinitialise les positions
			ennemi[i * nbEnnemiParVague + j].x = xStart + (j * (SHIP_SIZE
					+ X_SPACE));
			ennemi[i * nbEnnemiParVague + j].y = yStart + (i * (SHIP_SIZE
					+ Y_SPACE));

		}
	}
	rt_mutex_unlock(&mutex_ennemi);
}
/*************I2C functions*******************/
static int ds2482_get_i2c_bus(struct i2c_client *client)
{
    struct i2c_adapter *adap = client->adapter;
    int ret;

    if (adap->algo->master_xfer) {
        if (in_atomic() || irqs_disabled()) {
            ret = rt_mutex_trylock(&adap->bus_lock);
            if (!ret)
                /* I2C activity is ongoing. */
                return -EAGAIN;
        } else {
            rt_mutex_lock(&adap->bus_lock);
        }

        return 0;
    } else {
        dev_err(&client->dev, "I2C level transfers not supported\n");
        return -EOPNOTSUPP;
    }
}
Esempio n. 25
0
void fastcall rt_down_read(struct rw_semaphore *rwsem)
{
	unsigned long flags;

	/*
	 * NOTE: we handle it as a write-lock:
	 */
	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);

	/*
	 * Read locks within the write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);

	if (rt_mutex_real_owner(&rwsem->lock) == current) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		/* TODO: lockdep: acquire-read here? */
		rwsem->read_depth++;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_lock(&rwsem->lock);
}
Esempio n. 26
0
void  rt_down(struct semaphore *sem)
{
	rt_mutex_lock(&sem->lock);
	__down_complete(sem);
}
Esempio n. 27
0
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
	int i, id, ret = -EINVAL;

	switch(td->opcode) {

	case RTTEST_NOP:
		return 0;

	case RTTEST_LOCKCONT:
		td->mutexes[td->opdata] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		return 0;

	case RTTEST_RESET:
		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
			if (td->mutexes[i] == 4) {
				rt_mutex_unlock(&mutexes[i]);
				td->mutexes[i] = 0;
			}
		}

		if (!lockwakeup && td->bkl == 4) {
#ifdef CONFIG_LOCK_KERNEL
			unlock_kernel();
#endif
			td->bkl = 0;
		}
		return 0;

	case RTTEST_RESETEVENT:
		atomic_set(&rttest_event, 0);
		return 0;

	default:
		if (lockwakeup)
			return ret;
	}

	switch(td->opcode) {

	case RTTEST_LOCK:
	case RTTEST_LOCKNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_lock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 4;
		return 0;

	case RTTEST_LOCKINT:
	case RTTEST_LOCKINTNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return(1, &rttest_event);
		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = ret ? 0 : 4;
		return ret ? -EINTR : 0;

	case RTTEST_UNLOCK:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
			return ret;

		td->event = atomic_add_return(1, &rttest_event);
		rt_mutex_unlock(&mutexes[id]);
		td->event = atomic_add_return(1, &rttest_event);
		td->mutexes[id] = 0;
		return 0;

	case RTTEST_LOCKBKL:
		if (td->bkl)
			return 0;
		td->bkl = 1;
#ifdef CONFIG_LOCK_KERNEL
		lock_kernel();
#endif
		td->bkl = 4;
		return 0;

	case RTTEST_UNLOCKBKL:
		if (td->bkl != 4)
			break;
#ifdef CONFIG_LOCK_KERNEL
		unlock_kernel();
#endif
		td->bkl = 0;
		return 0;

	default:
		break;
	}
	return ret;
}
Esempio n. 28
0
void __lockfunc _mutex_lock(struct mutex *lock)
{
	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	rt_mutex_lock(&lock->lock);
}
/**
 * msm_bus_scale_client_update_request() - Update the request for bandwidth
 * from a particular client
 *
 * cl: Handle to the client
 * index: Index into the vector, to which the bw and clock values need to be
 * updated
 */
int msm_bus_scale_client_update_request(uint32_t cl, unsigned index)
{
	int i, ret = 0;
	struct msm_bus_scale_pdata *pdata;
	int pnode, src, curr, ctx;
	uint64_t req_clk, req_bw, curr_clk, curr_bw;
	struct msm_bus_client *client = (struct msm_bus_client *)cl;
#ifdef DEBUG_MSM_BUS_ARB_REQ
	static int log_cnt = 0;
#endif
	if (IS_ERR_OR_NULL(client)) {
		MSM_BUS_ERR("msm_bus_scale_client update req error %d\n",
				(uint32_t)client);
		return -ENXIO;
	}
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_lock(&msm_bus_lock);
#else
	mutex_lock(&msm_bus_lock);
#endif
	if (client->curr == index)
		goto err;

	curr = client->curr;
	pdata = client->pdata;
	if (!pdata) {
		MSM_BUS_ERR("Null pdata passed to update-request\n");
		return -ENXIO;
	}

	if (index >= pdata->num_usecases) {
		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
			(uint32_t)client, index);
		ret = -ENXIO;
		goto err;
	}

	MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n",
		cl, index, client->curr, client->pdata->usecase->num_paths);

	for (i = 0; i < pdata->usecase->num_paths; i++) {
		src = msm_bus_board_get_iid(client->pdata->usecase[index].
			vectors[i].src);
		if (src == -ENXIO) {
			MSM_BUS_ERR("Master %d not supported. Request cannot"
				" be updated\n", client->pdata->usecase->
				vectors[i].src);
			goto err;
		}

		if (msm_bus_board_get_iid(client->pdata->usecase[index].
			vectors[i].dst) == -ENXIO) {
			MSM_BUS_ERR("Slave %d not supported. Request cannot"
				" be updated\n", client->pdata->usecase->
				vectors[i].dst);
		}

		pnode = client->src_pnode[i];
		req_clk = client->pdata->usecase[index].vectors[i].ib;
		req_bw = client->pdata->usecase[index].vectors[i].ab;

#ifdef DEBUG_MSM_BUS_ARB_REQ
		//Debug code to collect client info
		{
			struct msm_bus_fabric_device *fabdev_d = msm_bus_get_fabric_device(GET_FABID(src));
			if (MSM_BUS_FAB_APPSS  == fabdev_d->id)
			{
				if (log_cnt >= 1000)
					log_cnt = 0;
				
				log_req[log_cnt].ab = client->pdata->usecase[index].vectors[i].ab;
				log_req[log_cnt].ib = client->pdata->usecase[index].vectors[i].ib;
				log_req[log_cnt].src = client->pdata->usecase[index].vectors[i].src;
				log_req[log_cnt].dst = client->pdata->usecase[index].vectors[i].dst;
				log_req[log_cnt].cnt = arch_counter_get_cntpct(); 
				strncpy(log_req[log_cnt].name, client->pdata->name, 19);
				log_cnt++;
				//printk("*** cl: %s ab: %llu ib: %llu\n", client->pdata->name, req_bw, req_clk);
			}
		}
#endif

		if (curr < 0) {
			curr_clk = 0;
			curr_bw = 0;
		} else {
			curr_clk = client->pdata->usecase[curr].vectors[i].ib;
			curr_bw = client->pdata->usecase[curr].vectors[i].ab;
			MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk);
		}

		if (!pdata->active_only) {
			ret = update_path(src, pnode, req_clk, req_bw,
				curr_clk, curr_bw, 0, pdata->active_only);
			if (ret) {
				MSM_BUS_ERR("Update path failed! %d\n", ret);
				goto err;
			}
		}

		ret = update_path(src, pnode, req_clk, req_bw, curr_clk,
				curr_bw, ACTIVE_CTX, pdata->active_only);
		if (ret) {
			MSM_BUS_ERR("Update Path failed! %d\n", ret);
			goto err;
		}
	}

	client->curr = index;
	ctx = ACTIVE_CTX;
	msm_bus_dbg_client_data(client->pdata, index, cl);
	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn);

err:
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_unlock(&msm_bus_lock);
#else
	mutex_unlock(&msm_bus_lock);
#endif
	return ret;
}
/**
 * msm_bus_scale_register_client() - Register the clients with the msm bus
 * driver
 * @pdata: Platform data of the client, containing src, dest, ab, ib
 *
 * Client data contains the vectors specifying arbitrated bandwidth (ab)
 * and instantaneous bandwidth (ib) requested between a particular
 * src and dest.
 */
uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
{
	struct msm_bus_client *client = NULL;
	int i;
	int src, dest, nfab;
	struct msm_bus_fabric_device *deffab;

	deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT);
	if (!deffab) {
		MSM_BUS_ERR("Error finding default fabric\n");
		return 0;
	}

	nfab = msm_bus_get_num_fab();
	if (nfab < deffab->board_algo->board_nfab) {
		MSM_BUS_ERR("Can't register client!\n"
				"Num of fabrics up: %d\n",
				nfab);
		return 0;
	}

	if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) {
		MSM_BUS_ERR("Cannot register client with null data\n");
		return 0;
	}

	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
	if (!client) {
		MSM_BUS_ERR("Error allocating client\n");
		return 0;
	}

#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_lock(&msm_bus_lock);
#else
	mutex_lock(&msm_bus_lock);
#endif
	client->pdata = pdata;
	client->curr = -1;
	for (i = 0; i < pdata->usecase->num_paths; i++) {
		int *pnode;
		struct msm_bus_fabric_device *srcfab;
		pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)),
			GFP_KERNEL);
		if (ZERO_OR_NULL_PTR(pnode)) {
			MSM_BUS_ERR("Invalid Pnode ptr!\n");
			continue;
		} else
			client->src_pnode = pnode;

		if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) {
			MSM_BUS_ERR("Invalid Master ID %d in request!\n",
				pdata->usecase->vectors[i].src);
			goto err;
		}

		if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) {
			MSM_BUS_ERR("Invalid Slave ID %d in request!\n",
				pdata->usecase->vectors[i].dst);
			goto err;
		}

		src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src);
		if (src == -ENXIO) {
			MSM_BUS_ERR("Master %d not supported. Client cannot be"
				" registered\n",
				pdata->usecase->vectors[i].src);
			goto err;
		}
		dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst);
		if (dest == -ENXIO) {
			MSM_BUS_ERR("Slave %d not supported. Client cannot be"
				" registered\n",
				pdata->usecase->vectors[i].dst);
			goto err;
		}
		srcfab = msm_bus_get_fabric_device(GET_FABID(src));
		if (!srcfab) {
			MSM_BUS_ERR("Fabric not found\n");
			goto err;
		}

		srcfab->visited = true;
		pnode[i] = getpath(src, dest);
		bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag);
		if (pnode[i] == -ENXIO) {
			MSM_BUS_ERR("Cannot register client now! Try again!\n");
			goto err;
		}
	}
	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
		(uint32_t)client);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_unlock(&msm_bus_lock);
#else
	mutex_unlock(&msm_bus_lock);
#endif
	MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client,
		pdata->usecase->num_paths);
	return (uint32_t)(client);
err:
	kfree(client->src_pnode);
	kfree(client);
#ifdef SEC_FEATURE_USE_RT_MUTEX
	rt_mutex_unlock(&msm_bus_lock);
#else
	mutex_unlock(&msm_bus_lock);
#endif
	return 0;
}