Beispiel #1
0
static int __init pci_check_type1(void)
{
	unsigned long flags;
	unsigned int tmp;
	int works = 0;

	atomic_spin_lock_irqsave(&pci_config_lock, flags);

	outb(0x01, 0xCFB);
	tmp = inl(0xCF8);
	outl(0x80000000, 0xCF8);

	if (inl(0xCF8) == 0x80000000) {
		atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

		if (pci_sanity_check(&pci_direct_conf1))
			works = 1;

		atomic_spin_lock_irqsave(&pci_config_lock, flags);
	}
	outl(tmp, 0xCF8);

	atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

	return works;
}
Beispiel #2
0
static int pci_conf1_read(unsigned int seg, unsigned int bus,
			  unsigned int devfn, int reg, int len, u32 *value)
{
	unsigned long flags;

	if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
		*value = -1;
		return -EINVAL;
	}

	atomic_spin_lock_irqsave(&pci_config_lock, flags);

	outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);

	switch (len) {
	case 1:
		*value = inb(0xCFC + (reg & 3));
		break;
	case 2:
		*value = inw(0xCFC + (reg & 2));
		break;
	case 4:
		*value = inl(0xCFC);
		break;
	}

	atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

	return 0;
}
Beispiel #3
0
static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
                                      int psize, int ssize, int local)
{
    unsigned long want_v;
    unsigned long lpar_rc;
    u64 dummy1, dummy2;
    unsigned long flags;

    DBG_LOW("    inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
            slot, va, psize, local);
    want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);

    atomic_spin_lock_irqsave(&beat_htab_lock, flags);
    dummy1 = beat_lpar_hpte_getword0(slot);

    if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
        DBG_LOW("not found !\n");
        atomic_spin_unlock_irqrestore(&beat_htab_lock, flags);
        return;
    }

    lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
                                    &dummy1, &dummy2);
    atomic_spin_unlock_irqrestore(&beat_htab_lock, flags);

    BUG_ON(lpar_rc != 0);
}
Beispiel #4
0
acpi_status acpi_hw_clear_acpi_status(void)
{
	acpi_status status;
	acpi_cpu_flags lock_flags = 0;

	ACPI_FUNCTION_TRACE(hw_clear_acpi_status);

	ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %8.8X%8.8X\n",
			  ACPI_BITMASK_ALL_FIXED_STATUS,
			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));

	atomic_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);

	/* Clear the fixed events in PM1 A/B */

	status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
					ACPI_BITMASK_ALL_FIXED_STATUS);
	if (ACPI_FAILURE(status)) {
		goto unlock_and_exit;
	}

	/* Clear the GPE Bits in all GPE registers in all GPE blocks */

	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);

      unlock_and_exit:
	atomic_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
	return_ACPI_STATUS(status);
}
Beispiel #5
0
static int pci_conf1_write(unsigned int seg, unsigned int bus,
			   unsigned int devfn, int reg, int len, u32 value)
{
	unsigned long flags;

	if ((bus > 255) || (devfn > 255) || (reg > 4095))
		return -EINVAL;

	atomic_spin_lock_irqsave(&pci_config_lock, flags);

	outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8);

	switch (len) {
	case 1:
		outb((u8)value, 0xCFC + (reg & 3));
		break;
	case 2:
		outw((u16)value, 0xCFC + (reg & 2));
		break;
	case 4:
		outl((u32)value, 0xCFC);
		break;
	}

	atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

	return 0;
}
Beispiel #6
0
int show_interrupts(struct seq_file *p, void *v)
{
    int i = *(loff_t *) v, j;
    struct irqaction * action;
    unsigned long flags;

    if (i == 0) {
        seq_printf(p, "           ");
        for_each_online_cpu(j)
        seq_printf(p, "CPU%d       ",j);
        seq_putc(p, '\n');
    }

    if (i < NR_IRQS) {
        atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
        action = irq_desc[i].action;
        if (!action)
            goto skip;
        seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
        seq_printf(p, "%10u ", kstat_irqs(i));
#else
        for_each_online_cpu(j)
        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
        seq_printf(p, " %14s", irq_desc[i].chip->typename);
        seq_printf(p, "  %s", action->name);

        for (action=action->next; action; action = action->next)
            seq_printf(p, ", %s", action->name);

        seq_putc(p, '\n');
skip:
        atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
    }
Beispiel #7
0
static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
{
	struct macio_chip *macio = func->driver_data;
	unsigned long flags;

	atomic_spin_lock_irqsave(&feature_lock, flags);
	MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
	atomic_spin_unlock_irqrestore(&feature_lock, flags);
	return 0;
}
Beispiel #8
0
static inline void sync_writel(unsigned long val, unsigned long reg,
			       unsigned long complete_mask)
{
	unsigned long flags;

	atomic_spin_lock_irqsave(&l2x0_lock, flags);
	writel(val, l2x0_base + reg);
	/* wait for the operation to complete */
	while (readl(l2x0_base + reg) & complete_mask)
		;
	atomic_spin_unlock_irqrestore(&l2x0_lock, flags);
}
Beispiel #9
0
/** 
 * irq_in_use - return true if this irq is being used 
 */
static int irq_in_use(unsigned int irq)
{
	int rc = 0;
	unsigned long flags;
	struct irq_desc *desc = irq_desc + irq;

	atomic_spin_lock_irqsave(&desc->lock, flags);
	if (desc->action)
		rc = 1;
	atomic_spin_unlock_irqrestore(&desc->lock, flags);
	return rc;
}
Beispiel #10
0
static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
	unsigned long flags;

	atomic_spin_lock_irqsave(&feature_lock, flags);
	/* This is fairly bogus in darwin, but it should work for our needs
	 * implemeted that way:
	 */
	UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
	atomic_spin_unlock_irqrestore(&feature_lock, flags);
	return 0;
}
Beispiel #11
0
static unsigned int get_time_pit(void)
{
	unsigned long flags;
	unsigned int count;

	atomic_spin_lock_irqsave(&i8253_lock, flags);
	outb_p(0x00, 0x43);
	count = inb_p(0x40);
	count |= inb_p(0x40) << 8;
	atomic_spin_unlock_irqrestore(&i8253_lock, flags);

	return count;
}
Beispiel #12
0
static void i8259_unmask_irq(unsigned int irq_nr)
{
	unsigned long flags;

	pr_debug("i8259_unmask_irq(%d)\n", irq_nr);

	atomic_spin_lock_irqsave(&i8259_lock, flags);
	if (irq_nr < 8)
		cached_21 &= ~(1 << irq_nr);
	else
		cached_A1 &= ~(1 << (irq_nr-8));
	i8259_set_irq_mask(irq_nr);
	atomic_spin_unlock_irqrestore(&i8259_lock, flags);
}
static void
check_critical_timing(struct trace_array *tr,
		      struct trace_array_cpu *data,
		      unsigned long parent_ip,
		      int cpu)
{
	cycle_t T0, T1, delta;
	unsigned long flags;
	int pc;

	T0 = data->preempt_timestamp;
	T1 = ftrace_now(cpu);
	delta = T1-T0;

	local_save_flags(flags);

	pc = preempt_count();

	if (!report_latency(delta))
		goto out;

	atomic_spin_lock_irqsave(&max_trace_lock, flags);

	/* check if we are still the max latency */
	if (!report_latency(delta))
		goto out_unlock;

	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);

	if (data->critical_sequence != max_sequence)
		goto out_unlock;

	data->critical_end = parent_ip;

	if (likely(!is_tracing_stopped())) {
		tracing_max_latency = delta;
		update_max_tr_single(tr, current, cpu);
	}

	max_sequence++;

out_unlock:
	atomic_spin_unlock_irqrestore(&max_trace_lock, flags);

out:
	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
Beispiel #14
0
/*!
 * This function disables the DVFS module.
 */
static void stop_dvfs(void)
{
	u32 reg = 0;
	unsigned long flags;
	u32 curr_cpu;

	if (dvfs_core_is_active) {

		/* Mask dvfs irq, disable DVFS */
		reg = __raw_readl(dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);
		/* FSVAIM=1 */
		reg |= MXC_DVFSCNTR_FSVAIM;
		__raw_writel(reg, dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);

		curr_wp = 0;
		if (!high_bus_freq_mode)
			set_high_bus_freq(1);

		curr_cpu = clk_get_rate(cpu_clk);
		if (curr_cpu != cpu_wp_tbl[curr_wp].cpu_rate) {
			set_cpu_freq(curr_wp);
#if defined(CONFIG_CPU_FREQ)
			if (cpufreq_trig_needed == 1) {
				cpufreq_trig_needed = 0;
				cpufreq_update_policy(0);
			}
#endif
		}
		atomic_spin_lock_irqsave(&mxc_dvfs_core_lock, flags);

		reg = __raw_readl(dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);
		reg = (reg & ~MXC_DVFSCNTR_DVFEN);
		__raw_writel(reg, dvfs_data->membase
				  + MXC_DVFSCORE_CNTR);

		atomic_spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);

		dvfs_core_is_active = 0;

		clk_disable(dvfs_clk);
	}

	printk(KERN_DEBUG "DVFS is stopped\n");
}
Beispiel #15
0
static int name_unique(unsigned int irq, struct irqaction *new_action)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	unsigned long flags;
	int ret = 1;

	atomic_spin_lock_irqsave(&desc->lock, flags);
	for (action = desc->action ; action; action = action->next) {
		if ((action != new_action) && action->name &&
				!strcmp(new_action->name, action->name)) {
			ret = 0;
			break;
		}
	}
	atomic_spin_unlock_irqrestore(&desc->lock, flags);
	return ret;
}
Beispiel #16
0
static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
				   u32 mask)
{
	struct macio_chip *macio = func->driver_data;
	unsigned long flags;
	u32 tmp, val;

	/* Check args */
	if (args == NULL || args->count == 0)
		return -EINVAL;

	atomic_spin_lock_irqsave(&feature_lock, flags);
	tmp = MACIO_IN8(offset);
	val = args->u[0].v << shift;
	tmp = (tmp & ~mask) | (val & mask);
	MACIO_OUT8(offset, tmp);
	atomic_spin_unlock_irqrestore(&feature_lock, flags);
	return 0;
}
Beispiel #17
0
static void i8259_mask_and_ack_irq(unsigned int irq_nr)
{
	unsigned long flags;

	atomic_spin_lock_irqsave(&i8259_lock, flags);
	if (irq_nr > 7) {
		cached_A1 |= 1 << (irq_nr-8);
		inb(0xA1); 	/* DUMMY */
		outb(cached_A1, 0xA1);
		outb(0x20, 0xA0);	/* Non-specific EOI */
		outb(0x20, 0x20);	/* Non-specific EOI to cascade */
	} else {
		cached_21 |= 1 << irq_nr;
		inb(0x21); 	/* DUMMY */
		outb(cached_21, 0x21);
		outb(0x20, 0x20);	/* Non-specific EOI */
	}
	atomic_spin_unlock_irqrestore(&i8259_lock, flags);
}
Beispiel #18
0
static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
{
	u8 __iomem *addr = (u8 __iomem *)func->driver_data;
	unsigned long flags;
	u8 tmp;

	/* Check polarity */
	if (args && args->count && !args->u[0].v)
		value = ~value;

	/* Toggle the GPIO */
	atomic_spin_lock_irqsave(&feature_lock, flags);
	tmp = readb(addr);
	tmp = (tmp & ~mask) | (value & mask);
	DBG("Do write 0x%02x to GPIO %s (%p)\n",
	    tmp, func->node->full_name, addr);
	writeb(tmp, addr);
	atomic_spin_unlock_irqrestore(&feature_lock, flags);

	return 0;
}
Beispiel #19
0
static int __init pci_check_type2(void)
{
	unsigned long flags;
	int works = 0;

	atomic_spin_lock_irqsave(&pci_config_lock, flags);

	outb(0x00, 0xCFB);
	outb(0x00, 0xCF8);
	outb(0x00, 0xCFA);

	if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00) {
		atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

		if (pci_sanity_check(&pci_direct_conf2))
			works = 1;
	} else
		atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

	return works;
}
Beispiel #20
0
int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
{
	char tmpbuf[TMPBUFSIZE];
	unsigned long flags;

	if (!count)
		return 0;

	if (count > TMPBUFSIZE - 1)
		return -EINVAL;

	memset(tmpbuf, 0x0, TMPBUFSIZE);

	if (copy_from_user(tmpbuf, buf, count))
		return -EFAULT;

	atomic_spin_lock_irqsave(&oprofilefs_lock, flags);
	*val = simple_strtoul(tmpbuf, NULL, 0);
	atomic_spin_unlock_irqrestore(&oprofilefs_lock, flags);
	return 0;
}
Beispiel #21
0
static int pci_conf2_read(unsigned int seg, unsigned int bus,
			  unsigned int devfn, int reg, int len, u32 *value)
{
	unsigned long flags;
	int dev, fn;

	if ((bus > 255) || (devfn > 255) || (reg > 255)) {
		*value = -1;
		return -EINVAL;
	}

	dev = PCI_SLOT(devfn);
	fn = PCI_FUNC(devfn);

	if (dev & 0x10) 
		return PCIBIOS_DEVICE_NOT_FOUND;

	atomic_spin_lock_irqsave(&pci_config_lock, flags);

	outb((u8)(0xF0 | (fn << 1)), 0xCF8);
	outb((u8)bus, 0xCFA);

	switch (len) {
	case 1:
		*value = inb(PCI_CONF2_ADDRESS(dev, reg));
		break;
	case 2:
		*value = inw(PCI_CONF2_ADDRESS(dev, reg));
		break;
	case 4:
		*value = inl(PCI_CONF2_ADDRESS(dev, reg));
		break;
	}

	outb(0, 0xCF8);

	atomic_spin_unlock_irqrestore(&pci_config_lock, flags);

	return 0;
}
/*
 * We're finished using the context for an address space.
 */
void destroy_context(struct mm_struct *mm)
{
	unsigned long flags;
	unsigned int id;

	if (mm->context.id == MMU_NO_CONTEXT)
		return;

	WARN_ON(mm->context.active != 0);

	atomic_spin_lock_irqsave(&context_lock, flags);
	id = mm->context.id;
	if (id != MMU_NO_CONTEXT) {
		__clear_bit(id, context_map);
		mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
		mm->context.active = 0;
#endif
		context_mm[id] = NULL;
		nr_free_contexts++;
	}
	atomic_spin_unlock_irqrestore(&context_lock, flags);
}
Beispiel #23
0
int show_interrupts(struct seq_file *p, void *v)
{
	int i = *(loff_t *) v, j;
	unsigned long flags;

	if (i == 0) {
		seq_puts(p, "    ");
		for_each_online_cpu(j)
			seq_printf(p, "       CPU%d", j);

#ifdef PARISC_IRQ_CR16_COUNTS
		seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
		seq_putc(p, '\n');
	}

	if (i < NR_IRQS) {
		struct irqaction *action;

		atomic_spin_lock_irqsave(&irq_desc[i].lock, flags);
		action = irq_desc[i].action;
		if (!action)
			goto skip;
		seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
		seq_printf(p, "%10u ", kstat_irqs(i));
#endif

		seq_printf(p, " %14s", irq_desc[i].chip->typename);
#ifndef PARISC_IRQ_CR16_COUNTS
		seq_printf(p, "  %s", action->name);

		while ((action = action->next))
			seq_printf(p, ", %s", action->name);
#else
		for ( ;action; action = action->next) {
			unsigned int k, avg, min, max;

			min = max = action->cr16_hist[0];

			for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
				int hist = action->cr16_hist[k];

				if (hist) {
					avg += hist;
				} else
					break;

				if (hist > max) max = hist;
				if (hist < min) min = hist;
			}

			avg /= k;
			seq_printf(p, " %s[%d/%d/%d]", action->name,
					min,avg,max);
		}
#endif

		seq_putc(p, '\n');
 skip:
		atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
	}

	return 0;
}
Beispiel #24
0
/**
 * i8259_init - Initialize the legacy controller
 * @node: device node of the legacy PIC (can be NULL, but then, it will match
 *        all interrupts, so beware)
 * @intack_addr: PCI interrupt acknowledge (real) address which will return
 *             	 the active irq from the 8259
 */
void i8259_init(struct device_node *node, unsigned long intack_addr)
{
	unsigned long flags;

	/* initialize the controller */
	atomic_spin_lock_irqsave(&i8259_lock, flags);

	/* Mask all first */
	outb(0xff, 0xA1);
	outb(0xff, 0x21);

	/* init master interrupt controller */
	outb(0x11, 0x20); /* Start init sequence */
	outb(0x00, 0x21); /* Vector base */
	outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
	outb(0x01, 0x21); /* Select 8086 mode */

	/* init slave interrupt controller */
	outb(0x11, 0xA0); /* Start init sequence */
	outb(0x08, 0xA1); /* Vector base */
	outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
	outb(0x01, 0xA1); /* Select 8086 mode */

	/* That thing is slow */
	udelay(100);

	/* always read ISR */
	outb(0x0B, 0x20);
	outb(0x0B, 0xA0);

	/* Unmask the internal cascade */
	cached_21 &= ~(1 << 2);

	/* Set interrupt masks */
	outb(cached_A1, 0xA1);
	outb(cached_21, 0x21);

	atomic_spin_unlock_irqrestore(&i8259_lock, flags);

	/* create a legacy host */
	i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY,
				    0, &i8259_host_ops, 0);
	if (i8259_host == NULL) {
		printk(KERN_ERR "i8259: failed to allocate irq host !\n");
		return;
	}

	/* reserve our resources */
	/* XXX should we continue doing that ? it seems to cause problems
	 * with further requesting of PCI IO resources for that range...
	 * need to look into it.
	 */
	request_resource(&ioport_resource, &pic1_iores);
	request_resource(&ioport_resource, &pic2_iores);
	request_resource(&ioport_resource, &pic_edgectrl_iores);

	if (intack_addr != 0)
		pci_intack = ioremap(intack_addr, 1);

	printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
}
Beispiel #25
0
static int start_dvfs(void)
{
	u32 reg;
	unsigned long flags;

	if (dvfs_core_is_active)
		return 0;

	atomic_spin_lock_irqsave(&mxc_dvfs_core_lock, flags);

	clk_enable(dvfs_clk);

	dvfs_load_config(0);

	/* config reg GPC_CNTR */
	reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);

	reg &= ~MXC_GPCCNTR_GPCIRQM;
	/* GPCIRQ=1, select ARM IRQ */
	reg |= MXC_GPCCNTR_GPCIRQ_ARM;
	/* ADU=1, select ARM domain */
	reg |= MXC_GPCCNTR_ADU;
	__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);

	/* Set PREDIV bits */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	reg = (reg & ~(dvfs_data->prediv_mask));
	reg |= (dvfs_data->prediv_val) << (dvfs_data->prediv_offset);
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);

	/* Enable DVFS interrupt */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	/* FSVAIM=0 */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
	/* Set MAXF, MINF */
	reg = (reg & ~(MXC_DVFSCNTR_MAXF_MASK | MXC_DVFSCNTR_MINF_MASK));
	reg |= 1 << MXC_DVFSCNTR_MAXF_OFFSET;
	/* Select ARM domain */
	reg |= MXC_DVFSCNTR_DVFIS;
	/* Enable DVFS frequency adjustment interrupt */
	reg = (reg & ~MXC_DVFSCNTR_FSVAIM);
	/* Set load tracking buffer register source */
	reg = (reg & ~MXC_DVFSCNTR_LTBRSR_MASK);
	reg |= DVFS_LTBRSR;
	/* Set DIV3CK */
	reg = (reg & ~(dvfs_data->div3ck_mask));
	reg |= (dvfs_data->div3ck_val) << (dvfs_data->div3ck_offset);
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);

	/* Enable DVFS */
	reg = __raw_readl(dvfs_data->membase + MXC_DVFSCORE_CNTR);
	reg |= MXC_DVFSCNTR_DVFEN;
	__raw_writel(reg, dvfs_data->membase + MXC_DVFSCORE_CNTR);

	dvfs_core_is_active = 1;

	atomic_spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);

	printk(KERN_DEBUG "DVFS is started\n");

	return 0;
}
Beispiel #26
0
static int set_cpu_freq(int wp)
{
	int arm_podf;
	int podf;
	int vinc = 0;
	int ret = 0;
	int org_cpu_rate;
	unsigned long rate = 0;
	int gp_volt = 0;
	u32 reg;
	u32 reg1;
	unsigned long flags;

	if (cpu_wp_tbl[wp].pll_rate != cpu_wp_tbl[old_wp].pll_rate) {
		org_cpu_rate = clk_get_rate(cpu_clk);
		rate = cpu_wp_tbl[wp].cpu_rate;

		if (org_cpu_rate == rate)
			return ret;

		gp_volt = cpu_wp_tbl[wp].cpu_voltage;
		if (gp_volt == 0)
			return ret;

		/*Set the voltage for the GP domain. */
		if (rate > org_cpu_rate) {
			ret = regulator_set_voltage(core_regulator, gp_volt,
						    gp_volt);
			if (ret < 0) {
				printk(KERN_DEBUG "COULD NOT SET GP VOLTAGE\n");
				return ret;
			}
			udelay(dvfs_data->delay_time);
		}
		atomic_spin_lock_irqsave(&mxc_dvfs_core_lock, flags);
		/* PLL_RELOCK, set ARM_FREQ_SHIFT_DIVIDER */
		reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
		reg &= 0xFFFFFFFB;
		__raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);

		setup_pll();
		/* START the GPC main control FSM */
		/* set VINC */
		reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
		reg &= ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
			 MXC_GPCVCR_VCNT_MASK);

		if (rate > org_cpu_rate)
			reg |= 1 << MXC_GPCVCR_VINC_OFFSET;

		reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
		       (1 << MXC_GPCVCR_VCNT_OFFSET);
		__raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);

		reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
		reg &= ~(MXC_GPCCNTR_ADU_MASK | MXC_GPCCNTR_FUPD_MASK);
		reg |= MXC_GPCCNTR_FUPD;
		reg |= MXC_GPCCNTR_ADU;
		__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);

		reg |= MXC_GPCCNTR_STRT;
		__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
		while (__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset)
				& 0x4000)
			udelay(10);
		atomic_spin_unlock_irqrestore(&mxc_dvfs_core_lock, flags);

		if (rate < org_cpu_rate) {
			ret = regulator_set_voltage(core_regulator,
						    gp_volt, gp_volt);
			if (ret < 0) {
				printk(KERN_DEBUG
				       "COULD NOT SET GP VOLTAGE!!!!\n");
				return ret;
			}
			udelay(dvfs_data->delay_time);
		}
		clk_set_rate(cpu_clk, rate);
	} else {
		podf = cpu_wp_tbl[wp].cpu_podf;
		gp_volt = cpu_wp_tbl[wp].cpu_voltage;

		/* Change arm_podf only */
		/* set ARM_FREQ_SHIFT_DIVIDER */
		reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
		reg &= 0xFFFFFFFB;
		reg |= 1 << 2;
		__raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);

		/* Get ARM_PODF */
		reg = __raw_readl(ccm_base + dvfs_data->ccm_cacrr_offset);
		arm_podf = reg & 0x07;
		if (podf == arm_podf) {
			printk(KERN_DEBUG
			       "No need to change freq and voltage!!!!\n");
			return 0;
		}

		/* Check if FSVAI indicate freq up */
		if (podf < arm_podf) {
			ret = regulator_set_voltage(core_regulator,
						    gp_volt, gp_volt);
			if (ret < 0) {
				printk(KERN_DEBUG
				       "COULD NOT SET GP VOLTAGE!!!!\n");
				return 0;
			}
			udelay(dvfs_data->delay_time);
			vinc = 1;
		} else {
			vinc = 0;
		}

		arm_podf = podf;
		/* Set ARM_PODF */
		reg &= 0xFFFFFFF8;
		reg |= arm_podf;

		reg1 = __raw_readl(ccm_base + dvfs_data->ccm_cdhipr_offset);
		if ((reg1 & 0x00010000) == 0)
			__raw_writel(reg,
				ccm_base + dvfs_data->ccm_cacrr_offset);
		else {
			printk(KERN_DEBUG "ARM_PODF still in busy!!!!\n");
			return 0;
		}

		/* START the GPC main control FSM */
		reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
		reg |= MXC_GPCCNTR_FUPD;
		/* ADU=1, select ARM domain */
		reg |= MXC_GPCCNTR_ADU;
		__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);
		/* set VINC */
		reg = __raw_readl(gpc_base + dvfs_data->gpc_vcr_offset);
		reg &=
		    ~(MXC_GPCVCR_VINC_MASK | MXC_GPCVCR_VCNTU_MASK |
		      MXC_GPCVCR_VCNT_MASK);
		reg |= (1 << MXC_GPCVCR_VCNTU_OFFSET) |
		    (100 << MXC_GPCVCR_VCNT_OFFSET) |
		    (vinc << MXC_GPCVCR_VINC_OFFSET);
		__raw_writel(reg, gpc_base + dvfs_data->gpc_vcr_offset);

		reg = __raw_readl(gpc_base + dvfs_data->gpc_cntr_offset);
		reg &= (~(MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD));
		reg |= MXC_GPCCNTR_ADU | MXC_GPCCNTR_FUPD | MXC_GPCCNTR_STRT;
		__raw_writel(reg, gpc_base + dvfs_data->gpc_cntr_offset);

		/* Wait for arm podf Enable */
		while ((__raw_readl(gpc_base + dvfs_data->gpc_cntr_offset) &
			MXC_GPCCNTR_STRT) == MXC_GPCCNTR_STRT) {
			printk(KERN_DEBUG "Waiting arm_podf enabled!\n");
			udelay(10);
		}

		if (vinc == 0) {
			ret = regulator_set_voltage(core_regulator,
						    gp_volt, gp_volt);
			if (ret < 0) {
				printk(KERN_DEBUG
				       "COULD NOT SET GP VOLTAGE!!!!\n");
				return ret;
			}
			udelay(dvfs_data->delay_time);
		}

		propagate_rate(pll1_sw_clk);
		/* Clear the ARM_FREQ_SHIFT_DIVIDER */
		reg = __raw_readl(ccm_base + dvfs_data->ccm_cdcr_offset);
		reg &= 0xFFFFFFFB;
		__raw_writel(reg, ccm_base + dvfs_data->ccm_cdcr_offset);
	}
#if defined(CONFIG_CPU_FREQ)
		cpufreq_trig_needed = 1;
#endif
	old_wp = wp;

	return ret;
}