Ejemplo n.º 1
0
static int mpll_set_rate(struct clk_hw *hw,
			 unsigned long rate,
			 unsigned long parent_rate)
{
	struct clk_regmap *clk = to_clk_regmap(hw);
	struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk);
	unsigned int sdm, n2;
	unsigned long flags = 0;

	params_from_rate(rate, parent_rate, &sdm, &n2, mpll->flags);

	if (mpll->lock)
		spin_lock_irqsave(mpll->lock, flags);
	else
		__acquire(mpll->lock);

	/* Set the fractional part */
	meson_parm_write(clk->map, &mpll->sdm, sdm);

	/* Set the integer divider part */
	meson_parm_write(clk->map, &mpll->n2, n2);

	if (mpll->lock)
		spin_unlock_irqrestore(mpll->lock, flags);
	else
		__release(mpll->lock);

	return 0;
}
/*
 * It works on following logic:
 *
 * For enabling clock, enable = 1
 *	set2dis = 1	-> clear bit	-> set = 0
 *	set2dis = 0	-> set bit	-> set = 1
 *
 * For disabling clock, enable = 0
 *	set2dis = 1	-> set bit	-> set = 1
 *	set2dis = 0	-> clear bit	-> set = 0
 *
 * So, result is always: enable xor set2dis.
 */
static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
	struct clk_gate *gate = to_clk_gate(hw);
	int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
	unsigned long uninitialized_var(flags);
	u32 reg;

	set ^= enable;

	if (gate->lock)
		spin_lock_irqsave(gate->lock, flags);
	else
		__acquire(gate->lock);

	if (gate->flags & CLK_GATE_HIWORD_MASK) {
		reg = BIT(gate->bit_idx + 16);
		if (set)
			reg |= BIT(gate->bit_idx);
	} else {
		reg = clk_readl(gate->reg);

		if (set)
			reg |= BIT(gate->bit_idx);
		else
			reg &= ~BIT(gate->bit_idx);
	}

	clk_writel(reg, gate->reg);

	if (gate->lock)
		spin_unlock_irqrestore(gate->lock, flags);
	else
		__release(gate->lock);
}
Ejemplo n.º 3
0
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	unsigned long m, n;
	u32 val;

	rational_best_approximation(rate, parent_rate,
			GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
			&m, &n);

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
#if 0
	else
		__acquire(fd->lock);
#endif

	val = clk_readl(fd->reg);
	val &= ~(fd->mmask | fd->nmask);
	val |= (m << fd->mshift) | (n << fd->nshift);
	clk_writel(val, fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
#if 0
	else
		__release(fd->lock);
#endif

	return 0;
}
Ejemplo n.º 4
0
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	unsigned long div;
	unsigned n, m;
	u32 val;

	div = gcd(parent_rate, rate);
	m = rate / div;
	n = parent_rate / div;

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
	else
		__acquire(fd->lock);

	val = clk_readl(fd->reg);
	val &= ~(fd->mmask | fd->nmask);
	val |= (m << fd->mshift) | (n << fd->nshift);
	clk_writel(val, fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
	else
		__release(fd->lock);

	return 0;
}
Ejemplo n.º 5
0
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
					unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	u32 val, m, n;
	u64 ret;

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
	else
		__acquire(fd->lock);

	val = clk_readl(fd->reg);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
	else
		__release(fd->lock);

	m = (val & fd->mmask) >> fd->mshift;
	n = (val & fd->nmask) >> fd->nshift;

	if (!n || !m)
		return parent_rate;

	ret = (u64)parent_rate * m;
	do_div(ret, n);

	return ret;
}
Ejemplo n.º 6
0
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
			   unsigned long parent_rate)
{
	struct clk_fractional_divider *fd = to_clk_fd(hw);
	unsigned long flags = 0;
	unsigned long m, n;
	u32 val;

	rational_best_approximation(rate, parent_rate,
			GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
			&m, &n);

	if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
		m--;
		n--;
	}

	if (fd->lock)
		spin_lock_irqsave(fd->lock, flags);
	else
		__acquire(fd->lock);

	val = clk_fd_readl(fd);
	val &= ~(fd->mmask | fd->nmask);
	val |= (m << fd->mshift) | (n << fd->nshift);
	clk_fd_writel(fd, val);

	if (fd->lock)
		spin_unlock_irqrestore(fd->lock, flags);
	else
		__release(fd->lock);

	return 0;
}
Ejemplo n.º 7
0
ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
{
	int queue = ieee80211_ac_from_tid(tid);

	if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
		ieee80211_stop_queue_by_reason(
			&local->hw, queue,
			IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
	__acquire(agg_queue);
}
Ejemplo n.º 8
0
ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
    int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];

    if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
        ieee80211_stop_queue_by_reason(
            &sdata->local->hw, queue,
            IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
    __acquire(agg_queue);
}
Ejemplo n.º 9
0
ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
{
	int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];

	/* we do refcounting here, so don't use the queue reason refcounting */

	if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
		ieee80211_stop_queue_by_reason(
			&sdata->local->hw, queue,
			IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
			false);
	__acquire(agg_queue);
}
Ejemplo n.º 10
0
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
	struct cvm_mmc_host *host = dev_id;
	struct mmc_request *req;
	unsigned long flags = 0;
	u64 emm_int, rsp_sts;
	bool host_done;

	if (host->need_irq_handler_lock)
		spin_lock_irqsave(&host->irq_handler_lock, flags);
	else
		__acquire(&host->irq_handler_lock);

	/* Clear interrupt bits (write 1 clears ). */
	emm_int = readq(host->base + MIO_EMM_INT(host));
	writeq(emm_int, host->base + MIO_EMM_INT(host));

	if (emm_int & MIO_EMM_INT_SWITCH_ERR)
		check_switch_errors(host);

	req = host->current_req;
	if (!req)
		goto out;

	rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
	/*
	 * dma_val set means DMA is still in progress. Don't touch
	 * the request and wait for the interrupt indicating that
	 * the DMA is finished.
	 */
	if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
		goto out;

	if (!host->dma_active && req->data &&
	    (emm_int & MIO_EMM_INT_BUF_DONE)) {
		unsigned int type = (rsp_sts >> 7) & 3;

		if (type == 1)
			do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
		else if (type == 2)
			do_write(req);
	}