コード例 #1
0
static void native_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */

	/*
	 * We start by using the REBOOT_VECTOR irq.
	 * The irq is treated as a sync point to allow critical
	 * regions of code on other cpus to release their spin locks
	 * and re-enable irqs.  Jumping straight to an NMI might
	 * accidentally cause deadlocks with further shutdown/panic
	 * code.  By syncing, we give the cpus up to one second to
	 * finish their work before we force them off with the NMI.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
			return;

		/* sync above data before sending IRQ */
		wmb();

		apic->send_IPI_allbutself(REBOOT_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}
	
	/* if the REBOOT_VECTOR didn't work, try with the NMI */
	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  {
		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			/* Hope the REBOOT_IRQ is good enough */
			goto finish;

		/* sync above data before sending IRQ */
		wmb();

		pr_emerg("Shutting down cpus with NMI\n");

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a 10 ms if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_MSEC * 10;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

finish:
	local_irq_save(flags);
	disable_local_APIC();
	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
	local_irq_restore(flags);
}
コード例 #2
0
ファイル: fasttimer.c プロジェクト: 0x0f/adam-kernel
static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len
                       ,int *eof, void *data_unused)
{
  unsigned long flags;
  int i = 0;
  int num_to_show;
	struct fasttime_t tv;
  struct fast_timer *t, *nextt;
  static char *bigbuf = NULL;
  static unsigned long used;

  if (!bigbuf && !(bigbuf = vmalloc(BIG_BUF_SIZE)))
  {
    used = 0;
	if (buf)
		buf[0] = '\0';
    return 0;
  }

  if (!offset || !used)
  {
    do_gettimeofday_fast(&tv);

    used = 0;
    used += sprintf(bigbuf + used, "Fast timers added:     %i\n",
                    fast_timers_added);
    used += sprintf(bigbuf + used, "Fast timers started:   %i\n",
                    fast_timers_started);
    used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n",
                    fast_timer_ints);
    used += sprintf(bigbuf + used, "Fast timers expired:   %i\n",
                    fast_timers_expired);
    used += sprintf(bigbuf + used, "Fast timers deleted:   %i\n",
                    fast_timers_deleted);
    used += sprintf(bigbuf + used, "Fast timer running:    %s\n",
                    fast_timer_running ? "yes" : "no");
    used += sprintf(bigbuf + used, "Current time:          %lu.%06lu\n",
			(unsigned long)tv.tv_jiff,
                    (unsigned long)tv.tv_usec);
#ifdef FAST_TIMER_SANITY_CHECKS
    used += sprintf(bigbuf + used, "Sanity failed:         %i\n",
                    sanity_failed);
#endif
    used += sprintf(bigbuf + used, "\n");

#ifdef DEBUG_LOG_INCLUDED
    {
      int end_i = debug_log_cnt;
      i = 0;

      if (debug_log_cnt_wrapped)
      {
        i = debug_log_cnt;
      }

      while ((i != end_i || (debug_log_cnt_wrapped && !used)) &&
             used+100 < BIG_BUF_SIZE)
      {
        used += sprintf(bigbuf + used, debug_log_string[i],
                        debug_log_value[i]);
        i = (i+1) % DEBUG_LOG_MAX;
      }
    }
    used += sprintf(bigbuf + used, "\n");
#endif

    num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started:
                   NUM_TIMER_STATS);
    used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started);
    for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++)
    {
      int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS;

#if 1 //ndef FAST_TIMER_LOG
      used += sprintf(bigbuf + used, "div: %i freq: %i delay: %i"
                      "\n",
                      timer_div_settings[cur],
                      timer_freq_settings[cur],
                      timer_delay_settings[cur]
                      );
#endif
#ifdef FAST_TIMER_LOG
      t = &timer_started_log[cur];
      used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
                      "d: %6li us data: 0x%08lX"
                      "\n",
                      t->name,
			(unsigned long)t->tv_set.tv_jiff,
                      (unsigned long)t->tv_set.tv_usec,
			(unsigned long)t->tv_expires.tv_jiff,
                      (unsigned long)t->tv_expires.tv_usec,
                      t->delay_us,
                      t->data
                      );
#endif
    }
    used += sprintf(bigbuf + used, "\n");

#ifdef FAST_TIMER_LOG
    num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added:
                   NUM_TIMER_STATS);
    used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added);
    for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++)
    {
      t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS];
      used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
                      "d: %6li us data: 0x%08lX"
                      "\n",
                      t->name,
			(unsigned long)t->tv_set.tv_jiff,
                      (unsigned long)t->tv_set.tv_usec,
			(unsigned long)t->tv_expires.tv_jiff,
                      (unsigned long)t->tv_expires.tv_usec,
                      t->delay_us,
                      t->data
                      );
    }
    used += sprintf(bigbuf + used, "\n");

    num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired:
                   NUM_TIMER_STATS);
    used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired);
    for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++)
    {
      t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS];
      used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
                      "d: %6li us data: 0x%08lX"
                      "\n",
                      t->name,
			(unsigned long)t->tv_set.tv_jiff,
                      (unsigned long)t->tv_set.tv_usec,
			(unsigned long)t->tv_expires.tv_jiff,
                      (unsigned long)t->tv_expires.tv_usec,
                      t->delay_us,
                      t->data
                      );
    }
    used += sprintf(bigbuf + used, "\n");
#endif

    used += sprintf(bigbuf + used, "Active timers:\n");
    local_irq_save(flags);
    t = fast_timer_list;
    while (t != NULL && (used+100 < BIG_BUF_SIZE))
    {
      nextt = t->next;
      local_irq_restore(flags);
      used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu "
                      "d: %6li us data: 0x%08lX"
/*                      " func: 0x%08lX" */
                      "\n",
                      t->name,
			(unsigned long)t->tv_set.tv_jiff,
                      (unsigned long)t->tv_set.tv_usec,
			(unsigned long)t->tv_expires.tv_jiff,
                      (unsigned long)t->tv_expires.tv_usec,
                      t->delay_us,
                      t->data
/*                      , t->function */
                      );
	local_irq_save(flags);
      if (t->next != nextt)
      {
        printk(KERN_WARNING "timer removed!\n");
      }
      t = nextt;
    }
    local_irq_restore(flags);
  }

  if (used - offset < len)
  {
    len = used - offset;
  }

  memcpy(buf, bigbuf + offset, len);
  *start = buf;
  *eof = 1;

  return len;
}
コード例 #3
0
ファイル: smp.c プロジェクト: AllenDou/linux
static int __cpuinit smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem struct epapr_spin_table *spin_table;
	struct device_node *np;
	int hw_cpu = get_hard_smp_processor_id(nr);
	int ioremappable;
	int ret = 0;

	WARN_ON(nr < 0 || nr >= NR_CPUS);
	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return -ENOENT;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		spin_table = ioremap(*cpu_rel_addr,
				sizeof(struct epapr_spin_table));
	else
		spin_table = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);
#ifdef CONFIG_PPC32
#ifdef CONFIG_HOTPLUG_CPU
	/* Corresponding to generic_set_cpu_dead() */
	generic_set_cpu_up(nr);

	if (system_state == SYSTEM_RUNNING) {
		out_be32(&spin_table->addr_l, 0);

		/*
		 * We don't set the BPTR register here since it already points
		 * to the boot page properly.
		 */
		mpic_reset_core(hw_cpu);

		/* wait until core is ready... */
		if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
						10000, 100)) {
			pr_err("%s: timeout waiting for core %d to reset\n",
							__func__, hw_cpu);
			ret = -ENOENT;
			goto out;
		}

		/*  clear the acknowledge status */
		__secondary_hold_acknowledge = -1;
	}
#endif
	out_be32(&spin_table->pir, hw_cpu);
	out_be32(&spin_table->addr_l, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));

	/* Wait a bit for the CPU to ack. */
	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
					10000, 100)) {
		pr_err("%s: timeout waiting for core %d to ack\n",
						__func__, hw_cpu);
		ret = -ENOENT;
		goto out;
	}
out:
#else
	smp_generic_kick_cpu(nr);

	out_be32(&spin_table->pir, hw_cpu);
	out_be64((u64 *)(&spin_table->addr_h),
	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(spin_table);

	return ret;
}
コード例 #4
0
/**
 * speedstep_set_state - set the SpeedStep state
 * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
 *
 */
static void speedstep_set_state(unsigned int state)
{
	unsigned int result = 0, command, new_state, dummy;
	unsigned long flags;
	unsigned int function = SET_SPEEDSTEP_STATE;
	unsigned int retry = 0;

	if (state > 0x1)
		return;

	/* Disable IRQs */
	preempt_disable();
	local_irq_save(flags);

	command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);

	pr_debug("trying to set frequency to state %u "
		"with command %x at port %x\n",
		state, command, smi_port);

	do {
		if (retry) {
			/*
			 * We need to enable interrupts, otherwise the blockage
			 * won't resolve.
			 *
			 * We disable preemption so that other processes don't
			 * run. If other processes were running, they could
			 * submit more DMA requests, making the blockage worse.
			 */
			pr_debug("retry %u, previous result %u, waiting...\n",
					retry, result);
			local_irq_enable();
			mdelay(retry * 50);
			local_irq_disable();
		}
		retry++;
		__asm__ __volatile__(
			"push %%ebp\n"
			"out %%al, (%%dx)\n"
			"pop %%ebp"
			: "=b" (new_state), "=D" (result),
			  "=c" (dummy), "=a" (dummy),
			  "=d" (dummy), "=S" (dummy)
			: "a" (command), "b" (function), "c" (state),
			  "d" (smi_port), "S" (0), "D" (0)
			);
	} while ((new_state != state) && (retry <= SMI_TRIES));

	/* enable IRQs */
	local_irq_restore(flags);
	preempt_enable();

	if (new_state == state)
		pr_debug("change to %u MHz succeeded after %u tries "
			"with result %u\n",
			(speedstep_freqs[new_state].frequency / 1000),
			retry, result);
	else
		printk(KERN_ERR "cpufreq: change to state %u "
			"failed with new_state %u and result %u\n",
			state, new_state, result);

	return;
}
コード例 #5
0
ファイル: fasttimer.c プロジェクト: 0x0f/adam-kernel
/* In version 1.4 this function takes 27 - 50 us */
void start_one_shot_timer(struct fast_timer *t,
                          fast_timer_function_type *function,
                          unsigned long data,
                          unsigned long delay_us,
                          const char *name)
{
  unsigned long flags;
  struct fast_timer *tmp;

  D1(printk("sft %s %d us\n", name, delay_us));

  local_irq_save(flags);

  do_gettimeofday_fast(&t->tv_set);
  tmp = fast_timer_list;

#ifdef FAST_TIMER_SANITY_CHECKS
	/* Check so this is not in the list already... */
	while (tmp != NULL) {
		if (tmp == t) {
			printk(KERN_WARNING "timer name: %s data: "
				"0x%08lX already in list!\n", name, data);
			sanity_failed++;
			goto done;
		} else
			tmp = tmp->next;
	}
	tmp = fast_timer_list;
#endif

  t->delay_us = delay_us;
  t->function = function;
  t->data = data;
  t->name = name;

  t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000;
	t->tv_expires.tv_jiff = t->tv_set.tv_jiff + delay_us / 1000000 / HZ;
  if (t->tv_expires.tv_usec > 1000000)
  {
    t->tv_expires.tv_usec -= 1000000;
		t->tv_expires.tv_jiff += HZ;
  }
#ifdef FAST_TIMER_LOG
  timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t;
#endif
  fast_timers_added++;

  /* Check if this should timeout before anything else */
	if (tmp == NULL || fasttime_cmp(&t->tv_expires, &tmp->tv_expires) < 0)
  {
    /* Put first in list and modify the timer value */
    t->prev = NULL;
    t->next = fast_timer_list;
    if (fast_timer_list)
    {
      fast_timer_list->prev = t;
    }
    fast_timer_list = t;
#ifdef FAST_TIMER_LOG
    timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t;
#endif
    start_timer1(delay_us);
  } else {
    /* Put in correct place in list */
		while (tmp->next && fasttime_cmp(&t->tv_expires,
				&tmp->next->tv_expires) > 0)
    {
      tmp = tmp->next;
    }
    /* Insert t after tmp */
    t->prev = tmp;
    t->next = tmp->next;
    if (tmp->next)
    {
      tmp->next->prev = t;
    }
    tmp->next = t;
  }

  D2(printk("start_one_shot_timer: %d us done\n", delay_us));

done:
  local_irq_restore(flags);
} /* start_one_shot_timer */
コード例 #6
0
ファイル: clock24xx.c プロジェクト: rminnich/lunacy
/* Sets basic clocks based on the specified rate */
static int omap2_select_table_rate(struct clk *clk, unsigned long rate)
{
	u32 cur_rate, done_rate, bypass = 0, tmp;
	struct prcm_config *prcm;
	unsigned long found_speed = 0;
	unsigned long flags;

	if (clk != &virt_prcm_set)
		return -EINVAL;

	for (prcm = rate_table; prcm->mpu_speed; prcm++) {
		if (!(prcm->flags & cpu_mask))
			continue;

		if (prcm->xtal_speed != sys_ck.rate)
			continue;

		if (prcm->mpu_speed <= rate) {
			found_speed = prcm->mpu_speed;
			break;
		}
	}

	if (!found_speed) {
		printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
		       rate / 1000000);
		return -EINVAL;
	}

	curr_prcm_set = prcm;
	cur_rate = omap2xxx_clk_get_core_rate(&dpll_ck);

	if (prcm->dpll_speed == cur_rate / 2) {
		omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
	} else if (prcm->dpll_speed == cur_rate * 2) {
		omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
	} else if (prcm->dpll_speed != cur_rate) {
		local_irq_save(flags);

		if (prcm->dpll_speed == prcm->xtal_speed)
			bypass = 1;

		if ((prcm->cm_clksel2_pll & OMAP24XX_CORE_CLK_SRC_MASK) ==
		    CORE_CLK_SRC_DPLL_X2)
			done_rate = CORE_CLK_SRC_DPLL_X2;
		else
			done_rate = CORE_CLK_SRC_DPLL;

		/* MPU divider */
		cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);

		/* dsp + iva1 div(2420), iva2.1(2430) */
		cm_write_mod_reg(prcm->cm_clksel_dsp,
				 OMAP24XX_DSP_MOD, CM_CLKSEL);

		cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);

		/* Major subsystem dividers */
		tmp = cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
		cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
				 CM_CLKSEL1);

		if (cpu_is_omap2430())
			cm_write_mod_reg(prcm->cm_clksel_mdm,
					 OMAP2430_MDM_MOD, CM_CLKSEL);

		/* x2 to enter omap2xxx_sdrc_init_params() */
		omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);

		omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
			       bypass);

		omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
		omap2xxx_sdrc_reprogram(done_rate, 0);

		local_irq_restore(flags);
	}

	return 0;
}
コード例 #7
0
ファイル: sun4m_irq.c プロジェクト: 0-T-0/ps4-linux
static void __init sun4m_init_timers(void)
{
	struct device_node *dp = of_find_node_by_name(NULL, "counter");
	int i, err, len, num_cpu_timers;
	unsigned int irq;
	const u32 *addr;

	if (!dp) {
		printk(KERN_ERR "sun4m_init_timers: No 'counter' node.\n");
		return;
	}

	addr = of_get_property(dp, "address", &len);
	of_node_put(dp);
	if (!addr) {
		printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n");
		return;
	}

	num_cpu_timers = (len / sizeof(u32)) - 1;
	for (i = 0; i < num_cpu_timers; i++) {
		timers_percpu[i] = (void __iomem *)
			(unsigned long) addr[i];
	}
	timers_global = (void __iomem *)
		(unsigned long) addr[num_cpu_timers];

	/* Every per-cpu timer works in timer mode */
	sbus_writel(0x00000000, &timers_global->timer_config);

#ifdef CONFIG_SMP
	sparc_config.cs_period = SBUS_CLOCK_RATE * 2;  /* 2 seconds */
	sparc_config.features |= FEAT_L14_ONESHOT;
#else
	sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec  */
	sparc_config.features |= FEAT_L10_CLOCKEVENT;
#endif
	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
	sbus_writel(timer_value(sparc_config.cs_period),
	            &timers_global->l10_limit);

	master_l10_counter = &timers_global->l10_count;

	irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ);

	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
	if (err) {
		printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
			err);
		return;
	}

	for (i = 0; i < num_cpu_timers; i++)
		sbus_writel(0, &timers_percpu[i]->l14_limit);
	if (num_cpu_timers == 4)
		sbus_writel(SUN4M_INT_E14, &sun4m_irq_global->mask_set);

#ifdef CONFIG_SMP
	{
		unsigned long flags;
		struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];

		/* For SMP we use the level 14 ticker, however the bootup code
		 * has copied the firmware's level 14 vector into the boot cpu's
		 * trap table, we must fix this now or we get squashed.
		 */
		local_irq_save(flags);
		trap_table->inst_one = lvl14_save[0];
		trap_table->inst_two = lvl14_save[1];
		trap_table->inst_three = lvl14_save[2];
		trap_table->inst_four = lvl14_save[3];
		local_ops->cache_all();
		local_irq_restore(flags);
	}
#endif
}
コード例 #8
0
ファイル: ds1302.c プロジェクト: 10x-Amin/x10_Th_kernel
static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	unsigned long flags;

	switch(cmd) {
		case RTC_RD_TIME:	/* read the time/date from RTC	*/
		{
			struct rtc_time rtc_tm;

			memset(&rtc_tm, 0, sizeof (struct rtc_time));
			lock_kernel();
			get_rtc_time(&rtc_tm);
			unlock_kernel();
			if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
				return -EFAULT;
			return 0;
		}

		case RTC_SET_TIME:	/* set the RTC */
		{
			struct rtc_time rtc_tm;
			unsigned char mon, day, hrs, min, sec, leap_yr;
			unsigned int yrs;

			if (!capable(CAP_SYS_TIME))
				return -EPERM;

			if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
				return -EFAULT;

			yrs = rtc_tm.tm_year + 1900;
			mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
			day = rtc_tm.tm_mday;
			hrs = rtc_tm.tm_hour;
			min = rtc_tm.tm_min;
			sec = rtc_tm.tm_sec;


			if ((yrs < 1970) || (yrs > 2069))
				return -EINVAL;

			leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));

			if ((mon > 12) || (day == 0))
				return -EINVAL;

			if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
				return -EINVAL;

			if ((hrs >= 24) || (min >= 60) || (sec >= 60))
				return -EINVAL;

			if (yrs >= 2000)
				yrs -= 2000;	/* RTC (0, 1, ... 69) */
			else
				yrs -= 1900;	/* RTC (70, 71, ... 99) */

			sec = bin2bcd(sec);
			min = bin2bcd(min);
			hrs = bin2bcd(hrs);
			day = bin2bcd(day);
			mon = bin2bcd(mon);
			yrs = bin2bcd(yrs);

			lock_kernel();
			local_irq_save(flags);
			CMOS_WRITE(yrs, RTC_YEAR);
			CMOS_WRITE(mon, RTC_MONTH);
			CMOS_WRITE(day, RTC_DAY_OF_MONTH);
			CMOS_WRITE(hrs, RTC_HOURS);
			CMOS_WRITE(min, RTC_MINUTES);
			CMOS_WRITE(sec, RTC_SECONDS);
			local_irq_restore(flags);
			unlock_kernel();

			/* Notice that at this point, the RTC is updated but
			 * the kernel is still running with the old time.
			 * You need to set that separately with settimeofday
			 * or adjtimex.
			 */
			return 0;
		}

		case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
		{
			int tcs_val;

			if (!capable(CAP_SYS_TIME))
				return -EPERM;

			if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
				return -EFAULT;

			lock_kernel();
			tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
			ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
			unlock_kernel();
			return 0;
		}
		default:
			return -EINVAL;
	}
}
コード例 #9
0
ファイル: leds-lart.c プロジェクト: 7L/pi_plus
void lart_leds_event(led_event_t evt)
{
	unsigned long flags;

	local_irq_save(flags);

	switch(evt) {
	case led_start:
		/* pin 23 is output pin */
		GPDR |= LED_23;
		hw_led_state = LED_MASK;
		led_state = LED_STATE_ENABLED;
		break;

	case led_stop:
		led_state &= ~LED_STATE_ENABLED;
		break;

	case led_claim:
		led_state |= LED_STATE_CLAIMED;
		hw_led_state = LED_MASK;
		break;

	case led_release:
		led_state &= ~LED_STATE_CLAIMED;
		hw_led_state = LED_MASK;
		break;

#ifdef CONFIG_LEDS_TIMER
	case led_timer:
		if (!(led_state & LED_STATE_CLAIMED))
			hw_led_state ^= LED_23;
		break;
#endif

#ifdef CONFIG_LEDS_CPU
	case led_idle_start:
		/* The LART people like the LED to be off when the
                   system is idle... */
		if (!(led_state & LED_STATE_CLAIMED))
			hw_led_state &= ~LED_23;
		break;

	case led_idle_end:
		/* ... and on if the system is not idle */
		if (!(led_state & LED_STATE_CLAIMED))
			hw_led_state |= LED_23;
		break;
#endif

	case led_red_on:
		if (led_state & LED_STATE_CLAIMED)
			hw_led_state &= ~LED_23;
		break;

	case led_red_off:
		if (led_state & LED_STATE_CLAIMED)
			hw_led_state |= LED_23;
		break;

	default:
		break;
	}

	/* Now set the GPIO state, or nothing will happen at all */
	if (led_state & LED_STATE_ENABLED) {
		GPSR = hw_led_state;
		GPCR = hw_led_state ^ LED_MASK;
	}

	local_irq_restore(flags);
}
コード例 #10
0
ファイル: spi.c プロジェクト: AntMinerCE/A8-spi
int spi_tranfer(uint8_t cmd, uint8_t *tx_data, uint16_t tx_len, uint8_t *rx_data, uint16_t *rx_len)
{
	uint16_t i, j;
	uint32_t cnt;
	int32_t value32;
	uint16_t h_tx_len;
	unsigned long flags;
	iowrite32(CH_ENA, spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHCTRL0);
	//printk("OMAP2_MCSPI_CHCTRL0 %#x\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHCTRL0));
	//cs low  FORCE==1
	iowrite32(ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHCONF0) | (0x01 << 20), spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHCONF0);
	value32 = ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHCONF0);
	//printk("OMAP2_MCSPI_CHCONF0 %#x\n", value32);
	//printk( "000status %#x\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0));
#if SPI_USE_INTERRUPT
		spi_dev.p_rx = rx_data;
		spi_dev.p_tx = tx_data;
		spi_dev.length = tx_len;
		spi_dev.rev_len = 0;
		spi_dev.have_wake_up = false;
		h_tx_len = tx_len < 32 ? tx_len: tx_len - 32;
		while((ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0) & (0x01<<5)) == 0)
		{
			spi_dev.p_rx[spi_dev.rev_len]= ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_RX0);
		}
		cnt = 0;
		while((ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0) & (0x01<<4)) != 0)
		{
			if(cnt++ == 0xff)
			{
				printk(KERN_ERR "wait tx fifo empty timeout\n");
				return 0;
			}
		}
		local_irq_save(flags);
		for(i = 0; i < 32; i++)//32字节 FIFO, 数据长度52字节
		{
			iowrite32(tx_data[i], spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_TX0);
			/*
			if((ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0) & (0x01<<4)) != 0)
			{
				printk(KERN_ERR "fifo full status %#x\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0));
			}
			if(i == (tx_len - 1)) //total < 32
				break;
			*/
		}
		//while((spi_dev.rev_len < tx_len ) && (spi_dev.rev_len < (tx_len - 32)))
		while(spi_dev.rev_len < h_tx_len)
		{
			if((ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0) & (0x01<<5)) == 0)
			{
				spi_dev.p_rx[spi_dev.rev_len++]= ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_RX0);
				cnt = 0;
			}
			else if(cnt++ == 0xff)
			{
				printk(KERN_ERR "status %#x\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0));
				break;
			}
		}
		iowrite32(0x01<<8, gpio2_vaddr + GPIO_SETDATAOUT);
		if(spi_dev.rev_len != tx_len)
		{
			for(; i< tx_len; i++)
			{
				iowrite32(tx_data[i], spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_TX0);
				/*
				if((ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0) & (0x01<<4)) != 0)
				{
					printk(KERN_ERR "fifo full status %#x\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_CHSTAT0));
				}
				*/
			}
			//clear tx empty
			iowrite32(0x0f, spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_IRQSTATUS);
			//enable interrupt
			iowrite32(ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_IRQENABLE) | (TX0_EMPTY), spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_IRQENABLE);
			local_irq_restore(flags);
			iowrite32(0x01<<8, gpio2_vaddr + GPIO_CLEARDATAOUT);
			//printk(KERN_ERR "OMAP2_MCSPI_IRQENABLE{%#x}\n", ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_IRQSTATUS),\
			ioread32(spi_vaddr + OMAP2_MCSPI_OFFSET + OMAP2_MCSPI_IRQENABLE));
			#if 0
			if(spi_dev.have_wake_up == false)
			{
				//if(0 == interruptible_sleep_on_timeout(&spi_dev.wait_transfer_complete, 10 * HZ/1000))//10ms
				if(0 == wait_event_interruptible_timeout(&spi_dev.wait_transfer_complete, spi_dev.have_wake_up == true, 10 * HZ/1000))//10ms
				{
					iowrite32(0x01<<6, gpio2_vaddr + GPIO_SETDATAOUT);
					printk(KERN_ERR "spi transfer timeout spi_dev.rev_len{%d}\n", spi_dev.rev_len);
					//*rx_len = 0;
					iowrite32(0x01<<6, gpio2_vaddr + GPIO_CLEARDATAOUT);
				}
			}
			#endif
			if(0 == wait_event_interruptible_timeout(spi_dev.wait_transfer_complete, spi_dev.have_wake_up == true, 10 * HZ/1000))//10ms
			{
				iowrite32(0x01<<6, gpio2_vaddr + GPIO_SETDATAOUT);
				printk(KERN_ERR "spi transfer timeout spi_dev.rev_len{%d}\n", spi_dev.rev_len);
				//*rx_len = 0;
				iowrite32(0x01<<6, gpio2_vaddr + GPIO_CLEARDATAOUT);
			}
			*rx_len = spi_dev.rev_len;
			//printk(KERN_ERR "tx_len %d\n", i);
		}
		else
		{
コード例 #11
0
ファイル: edac_pci_sysfs.c プロジェクト: 020gzh/linux
/*
 *  PCI Parity polling
 *
 *	Function to retrieve the current parity status
 *	and decode it
 *
 */
static void edac_pci_dev_parity_test(struct pci_dev *dev)
{
	unsigned long flags;
	u16 status;
	u8 header_type;

	/* stop any interrupts until we can acquire the status */
	local_irq_save(flags);

	/* read the STATUS register on this device */
	status = get_pci_parity_status(dev, 0);

	/* read the device TYPE, looking for bridges */
	pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);

	local_irq_restore(flags);

	edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));

	/* check the status reg for errors on boards NOT marked as broken
	 * if broken, we cannot trust any of the status bits
	 */
	if (status && !dev->broken_parity_status) {
		if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
			edac_printk(KERN_CRIT, EDAC_PCI,
				"Signaled System Error on %s\n",
				pci_name(dev));
			atomic_inc(&pci_nonparity_count);
		}

		if (status & (PCI_STATUS_PARITY)) {
			edac_printk(KERN_CRIT, EDAC_PCI,
				"Master Data Parity Error on %s\n",
				pci_name(dev));

			atomic_inc(&pci_parity_count);
		}

		if (status & (PCI_STATUS_DETECTED_PARITY)) {
			edac_printk(KERN_CRIT, EDAC_PCI,
				"Detected Parity Error on %s\n",
				pci_name(dev));

			atomic_inc(&pci_parity_count);
		}
	}


	edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
		 header_type, dev_name(&dev->dev));

	if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
		/* On bridges, need to examine secondary status register  */
		status = get_pci_parity_status(dev, 1);

		edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
			 status, dev_name(&dev->dev));

		/* check the secondary status reg for errors,
		 * on NOT broken boards
		 */
		if (status && !dev->broken_parity_status) {
			if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
					"Signaled System Error on %s\n",
					pci_name(dev));
				atomic_inc(&pci_nonparity_count);
			}

			if (status & (PCI_STATUS_PARITY)) {
				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
					"Master Data Parity Error on "
					"%s\n", pci_name(dev));

				atomic_inc(&pci_parity_count);
			}

			if (status & (PCI_STATUS_DETECTED_PARITY)) {
				edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
					"Detected Parity Error on %s\n",
					pci_name(dev));

				atomic_inc(&pci_parity_count);
			}
		}
	}
}
コード例 #12
0
int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
		unsigned long delta_ns, const enum hrtimer_mode mode,
		int wakeup)
{
	struct hrtimer_clock_base *base, *new_base;
	unsigned long flags;
	int ret, leftmost;

	base = lock_hrtimer_base(timer, &flags);

	/* Remove an active timer from the queue: */
	ret = remove_hrtimer(timer, base);

	/* Switch the timer base, if necessary: */
	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);

	if (mode & HRTIMER_MODE_REL) {
		tim = ktime_add_safe(tim, new_base->get_time());
		/*
		 * CONFIG_TIME_LOW_RES is a temporary way for architectures
		 * to signal that they simply return xtime in
		 * do_gettimeoffset(). In this case we want to round up by
		 * resolution when starting a relative timer, to avoid short
		 * timeouts. This will go away with the GTOD framework.
		 */
#ifdef CONFIG_TIME_LOW_RES
		tim = ktime_add_safe(tim, base->resolution);
#endif
	}

	hrtimer_set_expires_range_ns(timer, tim, delta_ns);

	timer_stats_hrtimer_set_start_info(timer);

	leftmost = enqueue_hrtimer(timer, new_base);

	/*
	 * Only allow reprogramming if the new base is on this CPU.
	 * (it might still be on another CPU if the timer was pending)
	 *
	 * XXX send_remote_softirq() ?
	 */
	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
		&& hrtimer_enqueue_reprogram(timer, new_base)) {
		if (wakeup) {
			/*
			 * We need to drop cpu_base->lock to avoid a
			 * lock ordering issue vs. rq->lock.
			 */
			raw_spin_unlock(&new_base->cpu_base->lock);
			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
			local_irq_restore(flags);
			return ret;
		} else {
			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
		}
	}

	unlock_hrtimer_base(timer, &flags);

	return ret;
}
コード例 #13
0
static int tlb_seq_show(struct seq_file *file, void *iter)
{
	unsigned int tlb_type = (unsigned int)file->private;
	unsigned long addr1, addr2, data1, data2;
	unsigned long flags;
	unsigned long mmucr;
	unsigned int nentries, entry;
	unsigned int urb;

	mmucr = __raw_readl(MMUCR);
	if ((mmucr & 0x1) == 0) {
		seq_printf(file, "address translation disabled\n");
		return 0;
	}

	if (tlb_type == TLB_TYPE_ITLB) {
		addr1 = MMU_ITLB_ADDRESS_ARRAY;
		addr2 = MMU_ITLB_ADDRESS_ARRAY2;
		data1 = MMU_ITLB_DATA_ARRAY;
		data2 = MMU_ITLB_DATA_ARRAY2;
		nentries = 4;
	} else {
		addr1 = MMU_UTLB_ADDRESS_ARRAY;
		addr2 = MMU_UTLB_ADDRESS_ARRAY2;
		data1 = MMU_UTLB_DATA_ARRAY;
		data2 = MMU_UTLB_DATA_ARRAY2;
		nentries = 64;
	}

	local_irq_save(flags);
	jump_to_uncached();

	urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;

	/* Make the "entry >= urb" test fail. */
	if (urb == 0)
		urb = MMUCR_URB_NENTRIES + 1;

	if (tlb_type == TLB_TYPE_ITLB) {
		addr1 = MMU_ITLB_ADDRESS_ARRAY;
		addr2 = MMU_ITLB_ADDRESS_ARRAY2;
		data1 = MMU_ITLB_DATA_ARRAY;
		data2 = MMU_ITLB_DATA_ARRAY2;
		nentries = 4;
	} else {
		addr1 = MMU_UTLB_ADDRESS_ARRAY;
		addr2 = MMU_UTLB_ADDRESS_ARRAY2;
		data1 = MMU_UTLB_DATA_ARRAY;
		data2 = MMU_UTLB_DATA_ARRAY2;
		nentries = 64;
	}

	seq_printf(file, "entry:     vpn        ppn     asid  size valid wired\n");

	for (entry = 0; entry < nentries; entry++) {
		unsigned long vpn, ppn, asid, size;
		unsigned long valid;
		unsigned long val;
		const char *sz = "    ?";
		int i;

		val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		vpn = val & 0xfffffc00;
		valid = val & 0x100;

		val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		asid = val & MMU_CONTEXT_ASID_MASK;

		val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		ppn = (val & 0x0ffffc00) << 4;

		val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		size = (val & 0xf0) >> 4;

		for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
			if (tlb_sizes[i].bits == size)
				break;
		}

		if (i != ARRAY_SIZE(tlb_sizes))
			sz = tlb_sizes[i].size;

		seq_printf(file, "%2d:    0x%08lx 0x%08lx %5lu %s   %s     %s\n",
			   entry, vpn, ppn, asid,
			   sz, valid ? "V" : "-",
			   (urb <= entry) ? "W" : "-");
	}

	back_to_cached();
	local_irq_restore(flags);

	return 0;
}
コード例 #14
0
static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int exception_state)
{
unsigned long flags;
int sstep_tries = 100;
int error;
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();

kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;

if (exception_state == DCPU_WANT_MASTER)
atomic_inc(&masters_in_kgdb);
else
atomic_inc(&slaves_in_kgdb);

if (arch_kgdb_ops.disable_hw_break)
arch_kgdb_ops.disable_hw_break(regs);

acquirelock:
/*
* Interrupts will be restored by the 'trap return' code, except when
* single stepping.
*/
local_irq_save(flags);

cpu = ks->cpu;
kgdb_info[cpu].debuggerinfo = regs;
kgdb_info[cpu].task = current;
kgdb_info[cpu].ret_state = 0;
kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;

/* Make sure the above info reaches the primary CPU */
smp_mb();

if (exception_level == 1) {
if (raw_spin_trylock(&dbg_master_lock))
atomic_xchg(&kgdb_active, cpu);
goto cpu_master_loop;
}

/*
* CPU will loop if it is a slave or request to become a kgdb
* master cpu and acquire the kgdb_active lock:
*/
while (1) {
cpu_loop:
if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
goto cpu_master_loop;
} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
if (raw_spin_trylock(&dbg_master_lock)) {
atomic_xchg(&kgdb_active, cpu);
break;
}
} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
if (!raw_spin_is_locked(&dbg_slave_lock))
goto return_normal;
} else {
return_normal:
/* Return to normal operation by executing any
* hw breakpoint fixup.
*/
if (arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
smp_mb__before_atomic_dec();
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
local_irq_restore(flags);
return 0;
}
cpu_relax();
}

/*
* For single stepping, try to only enter on the processor
* that was single stepping. To guard against a deadlock, the
* kernel will only try for the value of sstep_tries before
* giving up and continuing on.
*/
if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
(kgdb_info[cpu].task &&
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);

goto acquirelock;
}

if (!kgdb_io_ready(1)) {
kgdb_info[cpu].ret_state = 1;
goto kgdb_restore; /* No I/O connection, resume the system */
}

/*
* Don't enter if we have hit a removed breakpoint.
*/
if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
goto kgdb_restore;

/* Call the I/O driver's pre_exception routine */
if (dbg_io_ops->pre_exception)
dbg_io_ops->pre_exception();

/*
* Get the passive CPU lock which will hold all the non-primary
* CPU in a spin state while the debugger is active
*/
if (!kgdb_single_step)
raw_spin_lock(&dbg_slave_lock);

#ifdef CONFIG_SMP
/* Signal the other CPUs to enter kgdb_wait() */
if ((!kgdb_single_step) && kgdb_do_roundup)
kgdb_roundup_cpus(flags);
#endif

/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
atomic_read(&slaves_in_kgdb)) != online_cpus)
cpu_relax();

/*
* At this point the primary processor is completely
* in the debugger and all secondary CPUs are quiescent
*/
dbg_deactivate_sw_breakpoints();
kgdb_single_step = 0;
kgdb_contthread = current;
exception_level = 0;
trace_on = tracing_is_on();
if (trace_on)
tracing_off();

while (1) {
cpu_master_loop:
if (dbg_kdb_mode) {
kgdb_connected = 1;
error = kdb_stub(ks);
if (error == -1)
continue;
kgdb_connected = 0;
} else {
error = gdb_serial_stub(ks);
}

if (error == DBG_PASS_EVENT) {
dbg_kdb_mode = !dbg_kdb_mode;
} else if (error == DBG_SWITCH_CPU_EVENT) {
kgdb_info[dbg_switch_cpu].exception_state |=
DCPU_NEXT_MASTER;
goto cpu_loop;
} else {
kgdb_info[cpu].ret_state = error;
break;
}
}

/* Call the I/O driver's post_exception routine */
if (dbg_io_ops->post_exception)
dbg_io_ops->post_exception();

if (!kgdb_single_step) {
raw_spin_unlock(&dbg_slave_lock);
/* Wait till all the CPUs have quit from the debugger. */
while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
cpu_relax();
}

kgdb_restore:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
if (kgdb_info[sstep_cpu].task)
kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
else
kgdb_sstep_pid = 0;
}
if (arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();

kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
smp_mb__before_atomic_dec();
atomic_dec(&masters_in_kgdb);
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
local_irq_restore(flags);

return kgdb_info[cpu].ret_state;
}
コード例 #15
0
/*balong v8r1 soc related function, to enable slave power down*/
void platform_cpu_power_down(int cpu)
{
    unsigned int reg, cnt = 0;
    unsigned int tmp = 0;
    unsigned long flag = 0;
    acpu_core_sc_stru *acpu_core_sc;

    if(cpu >= (sizeof(g_acpu_core_sc_baseaddr) / sizeof(g_acpu_core_sc_baseaddr[0])))
    {
    	printk(KERN_ERR"%s : cpu id:%d support max:%lu.\n", __FUNCTION__, cpu, (sizeof(g_acpu_core_sc_baseaddr) / sizeof(g_acpu_core_sc_baseaddr[0])));
		return;
    }

    acpu_core_sc = (acpu_core_sc_stru *)g_acpu_core_sc_baseaddr[cpu];

    if(NULL == acpu_core_sc)
    {
        printk(KERN_ERR"%s : error acore(%d) virtual is NULL!!!.\n", __FUNCTION__, cpu);
        return;
    }

    tmp = readl((unsigned long)&(acpu_core_sc->acpu_sc_isostat)) & 0x1;
    if(0 != tmp)
    {
        printk(KERN_ERR"%s: cpu:%d already power down.\n",__FUNCTION__,cpu);
        return;
    }

    /*make sure cpu in wfi status*/
    do {
        reg = readl((unsigned long)&(acpu_core_sc->acpu_sc_stat));
        //printk(KERN_ERR"%s : cpu %d stat %d addr:0x%x.\n", __FUNCTION__, cpu, reg, (unsigned int)&(acpu_core_sc->acpu_sc_stat));
        if (reg & BIT(0x1)) {
            break;
        }

        //msleep(1);
        if (cnt++ > 5000) {
            printk(KERN_ERR"%s : cpu %d not in wfi state.\n", __FUNCTION__, cpu);
            return;
        }
    }while(1);
    local_irq_save(flag);

	/*增加关闭HPM的操作*/
    writel(0x1, (unsigned long)&(acpu_core_sc->acpu_sc_isoen));
    do{
        tmp = readl((unsigned long)&(acpu_core_sc->acpu_sc_isostat)) & 0x1;
     } while (0x1 != tmp);

	/*可以一起复位*/
    writel(0x1F, (unsigned long)&(acpu_core_sc->acpu_sc_rsten));
    do{
        tmp = readl((unsigned long)&(acpu_core_sc->acpu_sc_rststat)) & 0x1F;
     } while (0x1F != tmp);

    writel(0x7, (unsigned long)&(acpu_core_sc->acpu_sc_clkdis));
    do{
        tmp = readl((unsigned long)&(acpu_core_sc->acpu_sc_clkstat)) & 0x7;
     } while (0x0 != tmp);

    writel(0x1, (unsigned long)&(acpu_core_sc->acpu_sc_mtcmos_dis));
    do{
        tmp = readl((unsigned long)&(acpu_core_sc->acpu_sc_mtcmos_timer_state)) & 0x1;
     } while (0x0 != tmp);
    g_acpu_core_hotplug_stat[cpu]=1;
    local_irq_restore(flag);

    return;
}
コード例 #16
0
/*
 * Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
 *
 * We set up our own source and destination PTEs that we fully control.
 * This is the only way to guarantee that we don't race with another
 * thread that is modifying the PTE; we can't afford to try the
 * copy_{to,from}_user() technique of catching the interrupt, since
 * we must run with interrupts disabled to avoid the risk of some
 * other code seeing the incoherent data in our cache.  (Recall that
 * our cache is indexed by PA, so even if the other code doesn't use
 * our KM_MEMCPY virtual addresses, they'll still hit in cache using
 * the normal VAs that aren't supposed to hit in cache.)
 */
static void memcpy_multicache(void *dest, const void *source,
			      pte_t dst_pte, pte_t src_pte, int len)
{
	int idx;
	unsigned long flags, newsrc, newdst;
	pmd_t *pmdp;
	pte_t *ptep;
	int cpu = get_cpu();

	/*
	 * Disable interrupts so that we don't recurse into memcpy()
	 * in an interrupt handler, nor accidentally reference
	 * the PA of the source from an interrupt routine.  Also
	 * notify the simulator that we're playing games so we don't
	 * generate spurious coherency warnings.
	 */
	local_irq_save(flags);
	sim_allow_multiple_caching(1);

	/* Set up the new dest mapping */
	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
	newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
	ptep = pte_offset_kernel(pmdp, newdst);
	if (pte_val(*ptep) != pte_val(dst_pte)) {
		set_pte(ptep, dst_pte);
		local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
	}

	/* Set up the new source mapping */
	idx += (KM_MEMCPY0 - KM_MEMCPY1);
	src_pte = hv_pte_set_nc(src_pte);
	src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
	newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
	pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
	ptep = pte_offset_kernel(pmdp, newsrc);
	*ptep = src_pte;   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/* Actually move the data. */
	__memcpy_asm((void *)newdst, (const void *)newsrc, len);

	/*
	 * Remap the source as locally-cached and not OLOC'ed so that
	 * we can inval without also invaling the remote cpu's cache.
	 * This also avoids known errata with inv'ing cacheable oloc data.
	 */
	src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
	src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
	*ptep = src_pte;   /* set_pte() would be confused by this */
	local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);

	/*
	 * Do the actual invalidation, covering the full L2 cache line
	 * at the end since __memcpy_asm() is somewhat aggressive.
	 */
	__inv_buffer((void *)newsrc, len);

	/*
	 * We're done: notify the simulator that all is back to normal,
	 * and re-enable interrupts and pre-emption.
	 */
	sim_allow_multiple_caching(0);
	local_irq_restore(flags);
	put_cpu();
}
コード例 #17
0
ファイル: s3c-i2s-v2.c プロジェクト: 12rafael/jellytimekernel
static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
			       struct snd_soc_dai *dai)
{
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct s3c_i2sv2_info *i2s = to_info(rtd->dai->cpu_dai);
	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
	unsigned long irqs;
	int ret = 0;
	struct s3c_dma_params *dma_data =
		snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);

	pr_debug("Entered %s\n", __func__);

	switch (cmd) {
	case SNDRV_PCM_TRIGGER_START:
		/* On start, ensure that the FIFOs are cleared and reset. */

		writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH,
		       i2s->regs + S3C2412_IISFIC);

		/* clear again, just in case */
		writel(0x0, i2s->regs + S3C2412_IISFIC);

	case SNDRV_PCM_TRIGGER_RESUME:
	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
		if (!i2s->master) {
			ret = s3c2412_snd_lrsync(i2s);
			if (ret)
				goto exit_err;
		}

		local_irq_save(irqs);

		if (capture)
			s3c2412_snd_rxctrl(i2s, 1);
		else
			s3c2412_snd_txctrl(i2s, 1);

		local_irq_restore(irqs);

		/*
		 * Load the next buffer to DMA to meet the reqirement
		 * of the auto reload mechanism of S3C24XX.
		 * This call won't bother S3C64XX.
		 */
		s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);

		break;

	case SNDRV_PCM_TRIGGER_STOP:
	case SNDRV_PCM_TRIGGER_SUSPEND:
	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
		local_irq_save(irqs);

		if (capture)
			s3c2412_snd_rxctrl(i2s, 0);
		else
			s3c2412_snd_txctrl(i2s, 0);

		local_irq_restore(irqs);
		break;
	default:
		ret = -EINVAL;
		break;
	}

exit_err:
	return ret;
}
コード例 #18
0
static int
jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
	unsigned int pa_dwr_mask, pa_dwr_set;
	int ret;

	printk(KERN_INFO "%s(): config socket %d vcc %d vpp %d\n", __func__,
		skt->nr, state->Vcc, state->Vpp);

	switch (skt->nr) {
	case 0:
		pa_dwr_mask = SOCKET0_POWER | SOCKET0_3V;

		switch (state->Vcc) {
		default:
		case  0:
			pa_dwr_set = 0;
			break;
		case 33:
			pa_dwr_set = SOCKET0_POWER | SOCKET0_3V;
			break;
		case 50:
			pa_dwr_set = SOCKET0_POWER;
			break;
		}
		break;

	case 1:
		pa_dwr_mask = SOCKET1_POWER;

		switch (state->Vcc) {
		default:
		case 0:
			pa_dwr_set = 0;
			break;
		case 33:
			pa_dwr_set = SOCKET1_POWER;
			break;
		case 50:
			pa_dwr_set = SOCKET1_POWER;
			break;
		}
		break;

	default:
		return -1;
	}

	if (state->Vpp != state->Vcc && state->Vpp != 0) {
		printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
			__func__, state->Vpp);
		return -EPERM;
	}

	ret = sa1111_pcmcia_configure_socket(skt, state);
	if (ret == 0) {
		unsigned long flags;

		local_irq_save(flags);
		sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set);
		local_irq_restore(flags);
	}

	return ret;
}
コード例 #19
0
/* Probe for the CS8900 card in slot E.  We won't bother looking
   anywhere else until we have a really good reason to do so. */
struct net_device * __init mac89x0_probe(int unit)
{
	struct net_device *dev;
	static int once_is_enough;
	struct net_local *lp;
	static unsigned version_printed;
	int i, slot;
	unsigned rev_type = 0;
	unsigned long ioaddr;
	unsigned short sig;
	int err = -ENODEV;

	dev = alloc_etherdev(sizeof(struct net_local));
	if (!dev)
		return ERR_PTR(-ENOMEM);

	if (unit >= 0) {
		sprintf(dev->name, "eth%d", unit);
		netdev_boot_setup_check(dev);
	}

	SET_MODULE_OWNER(dev);

	if (once_is_enough)
		goto out;
	once_is_enough = 1;

	/* We might have to parameterize this later */
	slot = 0xE;
	/* Get out now if there's a real NuBus card in slot E */
	if (nubus_find_slot(slot, NULL) != NULL)
		goto out;

	/* The pseudo-ISA bits always live at offset 0x300 (gee,
           wonder why...) */
	ioaddr = (unsigned long)
		nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
	{
		unsigned long flags;
		int card_present;

		local_irq_save(flags);
		card_present = hwreg_present((void*) ioaddr+4)
		  && hwreg_present((void*) ioaddr + DATA_PORT);
		local_irq_restore(flags);

		if (!card_present)
			goto out;
	}

	nubus_writew(0, ioaddr + ADD_PORT);
	sig = nubus_readw(ioaddr + DATA_PORT);
	if (sig != swab16(CHIP_EISA_ID_SIG))
		goto out;

	/* Initialize the net_device structure. */
	lp = netdev_priv(dev);

	/* Fill in the 'dev' fields. */
	dev->base_addr = ioaddr;
	dev->mem_start = (unsigned long)
		nubus_slot_addr(slot) | (((slot&0xf) << 20) + MMIOBASE);
	dev->mem_end = dev->mem_start + 0x1000;

	/* Turn on shared memory */
	writereg_io(dev, PP_BusCTL, MEMORY_ON);

	/* get the chip type */
	rev_type = readreg(dev, PRODUCT_ID_ADD);
	lp->chip_type = rev_type &~ REVISON_BITS;
	lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';

	/* Check the chip type and revision in order to set the correct send command
	CS8920 revision C and CS8900 revision F can use the faster send. */
	lp->send_cmd = TX_AFTER_381;
	if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
		lp->send_cmd = TX_NOW;
	if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
		lp->send_cmd = TX_NOW;

	if (net_debug && version_printed++ == 0)
		printk(version);

	printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx",
	       dev->name,
	       lp->chip_type==CS8900?'0':'2',
	       lp->chip_type==CS8920M?"M":"",
	       lp->chip_revision,
	       dev->base_addr);

	/* Try to read the MAC address */
	if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
		printk("\nmac89x0: No EEPROM, giving up now.\n");
		goto out1;
        } else {
                for (i = 0; i < ETH_ALEN; i += 2) {
			/* Big-endian (why??!) */
			unsigned short s = readreg(dev, PP_IA + i);
                        dev->dev_addr[i] = s >> 8;
                        dev->dev_addr[i+1] = s & 0xff;
                }
        }

	dev->irq = SLOT2IRQ(slot);
	printk(" IRQ %d ADDR ", dev->irq);

	/* print the ethernet address. */
	for (i = 0; i < ETH_ALEN; i++)
		printk("%2.2x%s", dev->dev_addr[i],
		       ((i < ETH_ALEN-1) ? ":" : ""));
	printk("\n");

	dev->open		= net_open;
	dev->stop		= net_close;
	dev->hard_start_xmit = net_send_packet;
	dev->get_stats	= net_get_stats;
	dev->set_multicast_list = &set_multicast_list;
	dev->set_mac_address = &set_mac_address;

	err = register_netdev(dev);
	if (err)
		goto out1;
	return 0;
out1:
	nubus_writew(0, dev->base_addr + ADD_PORT);
out:
	free_netdev(dev);
	return ERR_PTR(err);
}
コード例 #20
0
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
{
	unsigned long flags;
	struct sas_task *task;
	struct scatterlist *sg;
	int ret = AC_ERR_SYSTEM;
	unsigned int si, xfer = 0;
	struct ata_port *ap = qc->ap;
	struct domain_device *dev = ap->private_data;
	struct sas_ha_struct *sas_ha = dev->port->ha;
	struct Scsi_Host *host = sas_ha->core.shost;
	struct sas_internal *i = to_sas_internal(host->transportt);

	/* TODO: audit callers to ensure they are ready for qc_issue to
	 * unconditionally re-enable interrupts
	 */
	local_irq_save(flags);
	spin_unlock(ap->lock);

	/* If the device fell off, no sense in issuing commands */
	if (test_bit(SAS_DEV_GONE, &dev->state))
		goto out;

	task = sas_alloc_task(GFP_ATOMIC);
	if (!task)
		goto out;
	task->dev = dev;
	task->task_proto = SAS_PROTOCOL_STP;
	task->task_done = sas_ata_task_done;

	if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
	    qc->tf.command == ATA_CMD_FPDMA_READ) {
		/* Need to zero out the tag libata assigned us */
		qc->tf.nsect = 0;
	}

	ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
	task->uldd_task = qc;
	if (ata_is_atapi(qc->tf.protocol)) {
		memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
		task->total_xfer_len = qc->nbytes;
		task->num_scatter = qc->n_elem;
	} else {
		for_each_sg(qc->sg, sg, qc->n_elem, si)
			xfer += sg->length;

		task->total_xfer_len = xfer;
		task->num_scatter = si;
	}

	task->data_dir = qc->dma_dir;
	task->scatter = qc->sg;
	task->ata_task.retry_count = 1;
	task->task_state_flags = SAS_TASK_STATE_PENDING;
	qc->lldd_task = task;

	switch (qc->tf.protocol) {
	case ATA_PROT_NCQ:
		task->ata_task.use_ncq = 1;
		/* fall through */
	case ATAPI_PROT_DMA:
	case ATA_PROT_DMA:
		task->ata_task.dma_xfer = 1;
		break;
	}

	if (qc->scsicmd)
		ASSIGN_SAS_TASK(qc->scsicmd, task);

	if (sas_ha->lldd_max_execute_num < 2)
		ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
	else
		ret = sas_queue_up(task);

	/* Examine */
	if (ret) {
		SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);

		if (qc->scsicmd)
			ASSIGN_SAS_TASK(qc->scsicmd, NULL);
		sas_free_task(task);
		ret = AC_ERR_SYSTEM;
	}

 out:
	spin_lock(ap->lock);
	local_irq_restore(flags);
	return ret;
}
コード例 #21
0
ファイル: rtc.c プロジェクト: 12rafael/jellytimekernel
static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
	unsigned char msr;
	unsigned long flags;
	struct rtc_time wtime;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case RTC_RD_TIME:	/* Read the time/date from RTC	*/
	{
		local_irq_save(flags);
		/* Ensure clock and real-time-mode-register are accessible */
		msr = rtc->msr & 0xc0;
		rtc->msr = 0x40;
		memset(&wtime, 0, sizeof(struct rtc_time));
		do {
			wtime.tm_sec =  bcd2bin(rtc->bcd_sec);
			wtime.tm_min =  bcd2bin(rtc->bcd_min);
			wtime.tm_hour = bcd2bin(rtc->bcd_hr);
			wtime.tm_mday =  bcd2bin(rtc->bcd_dom);
			wtime.tm_mon =  bcd2bin(rtc->bcd_mth)-1;
			wtime.tm_year = bcd2bin(rtc->bcd_year);
			if (wtime.tm_year < 70)
				wtime.tm_year += 100;
			wtime.tm_wday = bcd2bin(rtc->bcd_dow)-1;
		} while (wtime.tm_sec != bcd2bin(rtc->bcd_sec));
		rtc->msr = msr;
		local_irq_restore(flags);
		return copy_to_user(argp, &wtime, sizeof wtime) ?
								-EFAULT : 0;
	}
	case RTC_SET_TIME:	/* Set the RTC */
	{
		struct rtc_time rtc_tm;
		unsigned char mon, day, hrs, min, sec, leap_yr;
		unsigned int yrs;

		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;

		if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
			return -EFAULT;

		yrs = rtc_tm.tm_year;
		if (yrs < 1900)
			yrs += 1900;
		mon = rtc_tm.tm_mon + 1;   /* tm_mon starts at zero */
		day = rtc_tm.tm_mday;
		hrs = rtc_tm.tm_hour;
		min = rtc_tm.tm_min;
		sec = rtc_tm.tm_sec;

		leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));

		if ((mon > 12) || (mon < 1) || (day == 0))
			return -EINVAL;

		if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
			return -EINVAL;

		if ((hrs >= 24) || (min >= 60) || (sec >= 60))
			return -EINVAL;

		if (yrs >= 2070)
			return -EINVAL;

		local_irq_save(flags);
		/* Ensure clock and real-time-mode-register are accessible */
		msr = rtc->msr & 0xc0;
		rtc->msr = 0x40;

		rtc->t0cr_rtmr = yrs%4;
		rtc->bcd_tenms = 0;
		rtc->bcd_sec   = bin2bcd(sec);
		rtc->bcd_min   = bin2bcd(min);
		rtc->bcd_hr    = bin2bcd(hrs);
		rtc->bcd_dom   = bin2bcd(day);
		rtc->bcd_mth   = bin2bcd(mon);
		rtc->bcd_year  = bin2bcd(yrs%100);
		if (rtc_tm.tm_wday >= 0)
			rtc->bcd_dow = bin2bcd(rtc_tm.tm_wday+1);
		rtc->t0cr_rtmr = yrs%4 | 0x08;

		rtc->msr = msr;
		local_irq_restore(flags);
		return 0;
	}
	default:
		return -EINVAL;
	}
}
コード例 #22
0
ファイル: sync-rlx.c プロジェクト: LXiong/openwrt-rtk
void __cpuinit synchronise_count_master(int cpu)
{
	int i;
	unsigned long flags;
	unsigned int initcount;

	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);

	local_irq_save(flags);

	/*
	 * Notify the slaves that it's time to start
	 */
	atomic_set(&count_reference, read_c0_count());
	atomic_set(&count_start_flag, cpu);
	smp_wmb();

	/* Count will be initialised to current timer for all CPU's */
	initcount = read_c0_count();

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronised and
	 * the master and slaves each set their cycle counters to a known
	 * value all at once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		/* slaves loop on '!= 2' */
		while (atomic_read(&count_count_start) != 1)
			mb();
		atomic_set(&count_count_stop, 0);
		smp_wmb();

		/* this lets the slaves write their count register */
		atomic_inc(&count_count_start);

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		/*
		 * Wait for all slaves to leave the synchronization point:
		 */
		while (atomic_read(&count_count_stop) != 1)
			mb();
		atomic_set(&count_count_start, 0);
		smp_wmb();
		atomic_inc(&count_count_stop);
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
	atomic_set(&count_start_flag, 0);

	local_irq_restore(flags);

	/*
	 * i386 code reported the skew here, but the
	 * count registers were almost certainly out of sync
	 * so no point in alarming people
	 */
	printk("done.\n");
}
コード例 #23
0
static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{

	struct vpif_fh *fh = priv;
	struct channel_obj *ch = fh->channel;
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	struct v4l2_buffer tbuf = *buf;
	struct videobuf_buffer *buf1;
	unsigned long addr = 0;
	unsigned long flags;
	int ret = 0;

	if (common->fmt.type != tbuf.type)
		return -EINVAL;

	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
		vpif_err("fh->io_allowed\n");
		return -EACCES;
	}

	if (!(list_empty(&common->dma_queue)) ||
	    (common->cur_frm != common->next_frm) ||
	    !(common->started) ||
	    (common->started && (0 == ch->field_id)))
		return videobuf_qbuf(&common->buffer_queue, buf);

	/* bufferqueue is empty store buffer address in VPIF registers */
	mutex_lock(&common->buffer_queue.vb_lock);
	buf1 = common->buffer_queue.bufs[tbuf.index];
	if (buf1->memory != tbuf.memory) {
		vpif_err("invalid buffer type\n");
		goto qbuf_exit;
	}

	if ((buf1->state == VIDEOBUF_QUEUED) ||
	    (buf1->state == VIDEOBUF_ACTIVE)) {
		vpif_err("invalid state\n");
		goto qbuf_exit;
	}

	switch (buf1->memory) {
	case V4L2_MEMORY_MMAP:
		if (buf1->baddr == 0)
			goto qbuf_exit;
		break;

	case V4L2_MEMORY_USERPTR:
		if (tbuf.length < buf1->bsize)
			goto qbuf_exit;

		if ((VIDEOBUF_NEEDS_INIT != buf1->state)
			    && (buf1->baddr != tbuf.m.userptr))
			vpif_buffer_release(&common->buffer_queue, buf1);
			buf1->baddr = tbuf.m.userptr;
		break;

	default:
		goto qbuf_exit;
	}

	local_irq_save(flags);
	ret = vpif_buffer_prepare(&common->buffer_queue, buf1,
					common->buffer_queue.field);
	if (ret < 0) {
		local_irq_restore(flags);
		goto qbuf_exit;
	}

	buf1->state = VIDEOBUF_ACTIVE;
	addr = buf1->boff;
	common->next_frm = buf1;
	if (tbuf.type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
		common->set_addr((addr + common->ytop_off),
				 (addr + common->ybtm_off),
				 (addr + common->ctop_off),
				 (addr + common->cbtm_off));
	}

	local_irq_restore(flags);
	list_add_tail(&buf1->stream, &common->buffer_queue.stream);
	mutex_unlock(&common->buffer_queue.vb_lock);
	return 0;

qbuf_exit:
	mutex_unlock(&common->buffer_queue.vb_lock);
	return -EINVAL;
}
コード例 #24
0
/* Change the current ring parameters, stopping the controller if
 * necessary so that we don't mess things up while we're in
 * motion.  We wait for the ring to be clean before reallocating
 * the rings. */
static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
{
	struct gfar_private *priv = netdev_priv(dev);
	int err = 0, i = 0;

	if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
		return -EINVAL;

	if (!is_power_of_2(rvals->rx_pending)) {
		printk("%s: Ring sizes must be a power of 2\n",
				dev->name);
		return -EINVAL;
	}

	if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
		return -EINVAL;

	if (!is_power_of_2(rvals->tx_pending)) {
		printk("%s: Ring sizes must be a power of 2\n",
				dev->name);
		return -EINVAL;
	}


	if (dev->flags & IFF_UP) {
		unsigned long flags;

		/* Halt TX and RX, and process the frames which
		 * have already been received */
		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);

		gfar_halt(dev);

		unlock_rx_qs(priv);
		unlock_tx_qs(priv);
		local_irq_restore(flags);

		for (i = 0; i < priv->num_rx_queues; i++)
			gfar_clean_rx_ring(priv->rx_queue[i],
					priv->rx_queue[i]->rx_ring_size);

		/* Now we take down the rings to rebuild them */
		stop_gfar(dev);
	}

	/* Change the size */
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
		priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
		priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
	}

	/* Rebuild the rings with the new size */
	if (dev->flags & IFF_UP) {
		err = startup_gfar(dev);
		netif_tx_wake_all_queues(dev);
	}
	return err;
}
コード例 #25
0
ファイル: fasttimer.c プロジェクト: 0x0f/adam-kernel
static irqreturn_t
timer1_handler(int irq, void *dev_id)
{
  struct fast_timer *t;
  unsigned long flags;

	/* We keep interrupts disabled not only when we modify the
	 * fast timer list, but any time we hold a reference to a
	 * timer in the list, since del_fast_timer may be called
	 * from (another) interrupt context.  Thus, the only time
	 * when interrupts are enabled is when calling the timer
	 * callback function.
	 */
  local_irq_save(flags);

  /* Clear timer1 irq */
  *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr);

  /* First stop timer, then ack interrupt */
  /* Stop timer */
  *R_TIMER_CTRL = r_timer_ctrl_shadow =
    (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) |
    IO_STATE(R_TIMER_CTRL, tm1, stop_ld);

  /* Ack interrupt */
  *R_TIMER_CTRL =  r_timer_ctrl_shadow | IO_STATE(R_TIMER_CTRL, i1, clr);

  fast_timer_running = 0;
  fast_timer_ints++;

  t = fast_timer_list;
  while (t)
  {
		struct fasttime_t tv;
		fast_timer_function_type *f;
		unsigned long d;

    /* Has it really expired? */
    do_gettimeofday_fast(&tv);
		D1(printk(KERN_DEBUG "t: %is %06ius\n",
			tv.tv_jiff, tv.tv_usec));

		if (fasttime_cmp(&t->tv_expires, &tv) <= 0)
    {
      /* Yes it has expired */
#ifdef FAST_TIMER_LOG
      timer_expired_log[fast_timers_expired % NUM_TIMER_STATS] = *t;
#endif
      fast_timers_expired++;

      /* Remove this timer before call, since it may reuse the timer */
      if (t->prev)
      {
        t->prev->next = t->next;
      }
      else
      {
        fast_timer_list = t->next;
      }
      if (t->next)
      {
        t->next->prev = t->prev;
      }
      t->prev = NULL;
      t->next = NULL;

			/* Save function callback data before enabling
			 * interrupts, since the timer may be removed and
			 * we don't know how it was allocated
			 * (e.g. ->function and ->data may become overwritten
			 * after deletion if the timer was stack-allocated).
			 */
			f = t->function;
			d = t->data;

			if (f != NULL) {
				/* Run callback with interrupts enabled. */
				local_irq_restore(flags);
				f(d);
				local_irq_save(flags);
			} else
        DEBUG_LOG("!timer1 %i function==NULL!\n", fast_timer_ints);
    }
    else
    {
      /* Timer is to early, let's set it again using the normal routines */
      D1(printk(".\n"));
    }

    if ((t = fast_timer_list) != NULL)
    {
      /* Start next timer.. */
			long us = 0;
			struct fasttime_t tv;

      do_gettimeofday_fast(&tv);

			/* time_after_eq takes care of wrapping */
			if (time_after_eq(t->tv_expires.tv_jiff, tv.tv_jiff))
				us = ((t->tv_expires.tv_jiff - tv.tv_jiff) *
					1000000 / HZ + t->tv_expires.tv_usec -
					tv.tv_usec);

      if (us > 0)
      {
        if (!fast_timer_running)
        {
#ifdef FAST_TIMER_LOG
          timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t;
#endif
          start_timer1(us);
        }
        break;
      }
      else
      {
        /* Timer already expired, let's handle it better late than never.
         * The normal loop handles it
         */
        D1(printk("e! %d\n", us));
      }
    }
  }

	local_irq_restore(flags);

  if (!t)
  {
    D1(printk("t1 stop!\n"));
  }

  return IRQ_HANDLED;
}
コード例 #26
0
void platform_cluster_power_down(int clusterId)
{

	volatile unsigned int tmp=0;
	unsigned long flag = 0;
	unsigned int cnt=0;
	switch(clusterId)
		{
		case ACPU_PD_ID_CLUSTER1:
			if(2 == (readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_1_sta_START)))
			{
				printk(KERN_ERR"cluster1 already power down.\n");
				return;
			}
			/*-------------cluster1--------------*/

			/*系统配置控制状态机关闭Snoop接收打开状态检测*/

	             tmp=readl(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr))|(BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_pd_detect_start1_START));
	             writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr));
			/*pwc_set_bits(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(SOC_ACPU_SCTRL_BASE_ADDR),BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_set_acinactm_high1_START));  */


			/*make sure cluster in standby status*/
			do {
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CPU_STAT_ADDR(acpu_sctrl_base_addr));
				if ((0x1f << SOC_ACPU_SCTRL_ACPU_SC_CPU_STAT_a53_1_standbywfil2_START) == (tmp & (0x1f << SOC_ACPU_SCTRL_ACPU_SC_CPU_STAT_a53_1_standbywfil2_START))) {
					break;
			}

			if (cnt++ > 0x100000) {

				printk(KERN_ERR"cluster1 not in standby state tmp=0x%x.\n", tmp);

				return ;
			}
			}while(1);

			/*iso*/
			local_irq_save(flag);
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_EN_pw_iso_a53_1_en_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_EN_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_1_sta_START);
			} while (0x0 == tmp);


			/*hpm clk*/
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_hpm_l2_1_clkdis_START),SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_hpm_l2_1_clksta_START);
			} while (0x0 != tmp);
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_g_cpu_1_clkdis_START),SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_g_cpu_1_clksta_START);
			}while(0x0 != tmp);

			/*mtcmos*/
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_DIS_pw_mtcmos_en_a53_1_dis_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_DIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_STA_pw_mtcmos_en_a53_1_sta_START);
			} while (0x0 != tmp);
			g_cluster_pd_stat[1]=1;
			local_irq_restore(flag);

			break;
		case ACPU_PD_ID_CLUSTER0:
			/*---------------cluster0-----------------*/
			if(1 == (readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_0_sta_START)))
			{
				printk(KERN_ERR"cluster0 already power down.\n");
				return;
			}

			/*系统配置控制状态机关闭Snoop接收打开状态检测*/
                     tmp=readl(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr))|(BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_pd_detect_start0_START));
	              writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr));
			/*pwc_set_bits(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(SOC_ACPU_SCTRL_BASE_ADDR),BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_set_acinactm_high0_START)); */


			/*make sure cluster in standby status*/
			do {
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CPU_STAT_ADDR(acpu_sctrl_base_addr));
				if (0x1f == (tmp & 0x1f)) {
					break;
				}

				if (cnt++ > 5000) {
					printk(KERN_ERR"cluster0 not in standby state.\n");
					return ;
				}
			}while(1);

			/*iso*/
			local_irq_save(flag);
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_EN_pw_iso_a53_0_en_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_EN_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_0_sta_START);
			} while (0x0 == tmp);



			/*hpm clk*/
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_hpm_l2_clkdis_START),SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_hpm_l2_clksta_START);
			} while (0x0 != tmp);
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_g_cpu_clkdis_START),SOC_ACPU_SCTRL_ACPU_SC_CLKDIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_g_cpu_clksta_START);
			}while(0x0 != tmp);

			/*mtcmos*/
			writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_DIS_pw_mtcmos_en_a53_0_dis_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_DIS_ADDR(acpu_sctrl_base_addr));
			do{
				tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_STA_pw_mtcmos_en_a53_0_sta_START);
			} while (0x0 != tmp);
			g_cluster_pd_stat[0]=1;
			local_irq_restore(flag);
			break;
		default:
			break;
		return;
		}
}
コード例 #27
0
ファイル: lirc_parallel.c プロジェクト: 020gzh/linux
static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n,
			  loff_t *ppos)
{
	int count;
	unsigned int i;
	unsigned int level, newlevel;
	unsigned long flags;
	int counttimer;
	int *wbuf;
	ssize_t ret;

	if (!is_claimed)
		return -EBUSY;

	count = n / sizeof(int);

	if (n % sizeof(int) || count % 2 == 0)
		return -EINVAL;

	wbuf = memdup_user(buf, n);
	if (IS_ERR(wbuf))
		return PTR_ERR(wbuf);

#ifdef LIRC_TIMER
	if (timer == 0) {
		/* try again if device is ready */
		timer = init_lirc_timer();
		if (timer == 0) {
			ret = -EIO;
			goto out;
		}
	}

	/* adjust values from usecs */
	for (i = 0; i < count; i++) {
		__u64 helper;

		helper = ((__u64)wbuf[i]) * timer;
		do_div(helper, 1000000);
		wbuf[i] = (int)helper;
	}

	local_irq_save(flags);
	i = 0;
	while (i < count) {
		level = lirc_get_timer();
		counttimer = 0;
		lirc_on();
		do {
			newlevel = lirc_get_timer();
			if (level == 0 && newlevel != 0)
				counttimer++;
			level = newlevel;
			if (check_pselecd && (in(1) & LP_PSELECD)) {
				lirc_off();
				local_irq_restore(flags);
				ret = -EIO;
				goto out;
			}
		} while (counttimer < wbuf[i]);
		i++;

		lirc_off();
		if (i == count)
			break;
		counttimer = 0;
		do {
			newlevel = lirc_get_timer();
			if (level == 0 && newlevel != 0)
				counttimer++;
			level = newlevel;
			if (check_pselecd && (in(1) & LP_PSELECD)) {
				local_irq_restore(flags);
				ret = -EIO;
				goto out;
			}
		} while (counttimer < wbuf[i]);
		i++;
	}
	local_irq_restore(flags);
#else
	/* place code that handles write without external timer here */
#endif
	ret = n;
out:
	kfree(wbuf);

	return ret;
}
コード例 #28
0
void platform_cluster_power_up(int clusterId)
{
	volatile unsigned int  tmp=0;
	unsigned long flag = 0;
	switch(clusterId)
	{
		case ACPU_PD_ID_CLUSTER0:

		        /*-----------------cluster 0----------------------*/
			if(0 == (readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_0_sta_START)))
			{
				printk(KERN_ERR"cluster0 already power on.\n");
				return;
			}
		        /*pwc_clr_bits(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(SOC_ACPU_SCTRL_BASE_ADDR),BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_set_acinactm_high0_START));*/
		        /*系统配置控制状态机开始Snoop接收打开状态检测*/
			 local_irq_save(flag);
			 tmp=readl(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr))&(~BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_pd_detect_start0_START));
		        writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr));
		        /*时钟配置*/
		        tmp = 0xff;
		        writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_A53_0_MTCMOS_TIMER_ADDR(acpu_sctrl_base_addr));

		        /*复位*/
		        /*tmp = pwc_read_reg32(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_ADDR(SOC_ACPU_SCTRL_BASE_ADDR));*/
			 tmp = 0;
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_aarm_l2_rsten_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_l2_hpm_rsten_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_cluster0_rsten_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_preset0_rsten_START);
		        writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_RSTEN_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_ADDR(acpu_sctrl_base_addr));
		        }while((0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_aarm_l2_rststa_START)))||(0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_l2_hpm_rststa_START))) || \
		        (0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_cluster0_rststa_START)))||(0 ==( tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_preset0_rststa_START))));

		        /*MTCMOS使能*/
		        writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_EN_pw_mtcmos_en_a53_0_en_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_EN_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_0_MTCMOS_TIMER_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_0_MTCMOS_TIMER_STAT_a53_0_mtcmos_timer_sta_START);
		        } while (0x0 == tmp);
		        /*CPU0根据需要配置ACPU_SC_CLKEN [hpm_l2_1_clken ]=1,判断ACPU_SC_CLK_STAT [hpm_l2_1_clksta]=1标示hpm l2cache 1时钟打开;*/
		        writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKEN_hpm_l2_clken_START),SOC_ACPU_SCTRL_ACPU_SC_CLKEN_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_hpm_l2_clksta_START);
		        } while (0x0 == tmp);
		        writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKEN_g_cpu_clken_START),SOC_ACPU_SCTRL_ACPU_SC_CLKEN_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_g_cpu_clksta_START);
		        } while (0x0 == tmp);


		        /*解ISO*/
		        writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_DIS_pw_iso_a53_0_dis_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_DIS_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_0_sta_START);
		        } while (0x0 != tmp);
		        /*解复位*/
		        /*tmp = pwc_read_reg32(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_ADDR(SOC_ACPU_SCTRL_BASE_ADDR));*/
				tmp = 0;
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_aarm_l2_rstdis_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_l2_hpm_rstdis_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_cluster0_rstdis_START);
		        tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_preset0_rstdis_START);
		        writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_ADDR(acpu_sctrl_base_addr));
		        do{
		            tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_ADDR(acpu_sctrl_base_addr));
		        }while((0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_aarm_l2_rststa_START)))||(0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_l2_hpm_rststa_START))) || \
		        (0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_cluster0_rststa_START)))||(0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_preset0_rststa_START))));
			 g_cluster_pd_stat[0]=0;
			 local_irq_restore(flag);
		        break;
		case ACPU_PD_ID_CLUSTER1:
		            /*----------------cluster 1------------------*/
				if(0 == (readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_1_sta_START)))
				{
					printk(KERN_ERR"cluster1 already power on.\n");
					return;
				}
		            /*pwc_clr_bits(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(SOC_ACPU_SCTRL_BASE_ADDR),BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_set_acinactm_high1_START));  */
		            /*系统配置控制状态机开始Snoop接收打开状态检测*/
				local_irq_save(flag);
		             tmp=readl(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr))&(~BIT(SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_pd_detect_start1_START));
		             writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_SNOOP_PWD_ADDR(acpu_sctrl_base_addr));


		            /*CPU0配置电源稳定时间数值ACPU_SC_A53_1_MTCMOS_TIMER[15:0]为16'hff,表示MTCMOS开启的稳定时间约为13.3us,参考时钟为19.2MHz;*/
		            tmp = 0xff;
		            writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_A53_1_MTCMOS_TIMER_ADDR(acpu_sctrl_base_addr));

		            /*复位*/
		            /*tmp = pwc_read_reg32(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_ADDR(SOC_ACPU_SCTRL_BASE_ADDR));*/
					tmp = 0;
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_aarm_l2_1_rsten_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_l2_hpm_1_rsten_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_cluster1_rsten_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTEN_srst_preset1_rsten_START);
		            writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_RSTEN_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_ADDR(acpu_sctrl_base_addr));
		            }while((0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_aarm_l2_1_rststa_START)))||(0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_l2_hpm_1_rststa_START))) || \
		            (0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_cluster1_rststa_START)))||(0 == (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_preset1_rststa_START))));


		            /*CPU0配置ACPU_SC_A53_CLUSTER_MTCMOS_EN[pw_mtcmos_en_a53_1_en]为1'b1,等待ACPU_SC_A53_MTCMOS_TIMER_STAT[pw_mtcmos_en_a53_1_sta]=1,表示电源可靠稳定;*/
		            writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_EN_pw_mtcmos_en_a53_1_en_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_MTCMOS_EN_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_1_MTCMOS_TIMER_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_1_MTCMOS_TIMER_STAT_a53_1_mtcmos_timer_sta_START);
		            } while (0x0 == tmp);
		            /*CPU0根据需要配置ACPU_SC_CLKEN [hpm_l2_1_clken ]=1,判断ACPU_SC_CLK_STAT [hpm_l2_1_clksta]=1标示hpm l2cache 1时钟打开;*/
		            writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKEN_hpm_l2_1_clken_START),SOC_ACPU_SCTRL_ACPU_SC_CLKEN_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_hpm_l2_1_clksta_START);
		            } while (0x0 == tmp);
		            writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_CLKEN_g_cpu_1_clken_START),SOC_ACPU_SCTRL_ACPU_SC_CLKEN_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_CLK_STAT_g_cpu_1_clksta_START);
		            } while (0x0 == tmp);
		            /*解ISO*/
		            writel(BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_DIS_pw_iso_a53_1_dis_START),SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_DIS_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_ADDR(acpu_sctrl_base_addr)) & BIT(SOC_ACPU_SCTRL_ACPU_SC_A53_CLUSTER_ISO_STA_pw_iso_a53_1_sta_START);
		            } while (0x0 != tmp);
		            /*解复位*/
		            /*tmp = pwc_read_reg32(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_ADDR(SOC_ACPU_SCTRL_BASE_ADDR));*/
					tmp = 0;
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_aarm_l2_1_rstdis_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_l2_hpm_1_rstdis_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_cluster1_rstdis_START);
		            tmp |= BIT(SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_srst_preset1_rstdis_START);
		            writel(tmp,SOC_ACPU_SCTRL_ACPU_SC_RSTDIS_ADDR(acpu_sctrl_base_addr));
		            do{
		                tmp = readl(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_ADDR(acpu_sctrl_base_addr));
		            }while((0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_aarm_l2_1_rststa_START)))||(0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_l2_hpm_1_rststa_START))) || \
		            (0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_cluster1_rststa_START)))||(0 != (tmp&BIT(SOC_ACPU_SCTRL_ACPU_SC_RST_STAT_srst_preset1_rststa_START))));
				 g_cluster_pd_stat[1]=0;
				 local_irq_restore(flag);
		            break;
			default:
				break;
			return;
		}

}
コード例 #29
0
ファイル: sa1100_neponset.c プロジェクト: 3sOx/asuswrt-merlin
static int
neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
	unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set;
	int ret;

	switch (skt->nr) {
	case 0:
		pa_dwr_mask = GPIO_A0 | GPIO_A1;
		ncr_mask = NCR_A0VPP | NCR_A1VPP;

		if (state->Vpp == 0)
			ncr_set = 0;
		else if (state->Vpp == 120)
			ncr_set = NCR_A1VPP;
		else if (state->Vpp == state->Vcc)
			ncr_set = NCR_A0VPP;
		else {
			printk(KERN_ERR "%s(): unrecognized VPP %u\n",
			       __FUNCTION__, state->Vpp);
			return -1;
		}
		break;

	case 1:
		pa_dwr_mask = GPIO_A2 | GPIO_A3;
		ncr_mask = 0;
		ncr_set = 0;

		if (state->Vpp != state->Vcc && state->Vpp != 0) {
			printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n",
			       __FUNCTION__, state->Vpp);
			return -1;
		}
		break;

	default:
		return -1;
	}

	/*
	 * pa_dwr_set is the mask for selecting Vcc on both sockets.
	 * pa_dwr_mask selects which bits (and therefore socket) we change.
	 */
	switch (state->Vcc) {
	default:
	case 0:  pa_dwr_set = 0;		break;
	case 33: pa_dwr_set = GPIO_A1|GPIO_A2;	break;
	case 50: pa_dwr_set = GPIO_A0|GPIO_A3;	break;
	}

	ret = sa1111_pcmcia_configure_socket(skt, state);
	if (ret == 0) {
		unsigned long flags;

		local_irq_save(flags);
		NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set;

		local_irq_restore(flags);
		sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set);
	}

	return 0;
}
コード例 #30
0
ファイル: omap-serial.c プロジェクト: ARMP/ARM-Project
static void
serial_omap_console_write(struct console *co, const char *s,
		unsigned int count)
{
	struct uart_omap_port *up = serial_omap_console_ports[co->index];
	unsigned long flags;
	unsigned int ier;
	int console_lock = 0, locked = 1;

	if (console_trylock())
		console_lock = 1;

	/*
	 * If console_lock is not available and we are in suspending
	 * state then we can avoid the console usage scenario
	 * as this may introduce recursive prints.
	 * Basically this scenario occurs during boot while
	 * printing debug bootlogs.
	 */

	if (!console_lock &&
		up->pdev->dev.power.runtime_status == RPM_SUSPENDING)
		return;

	local_irq_save(flags);
	if (up->port.sysrq)
		locked = 0;
	else if (oops_in_progress)
		locked = spin_trylock(&up->port.lock);
	else
		spin_lock(&up->port.lock);

	serial_omap_port_enable(up);

	/*
	 * First save the IER then disable the interrupts
	 */
	ier = serial_in(up, UART_IER);
	serial_out(up, UART_IER, 0);

	uart_console_write(&up->port, s, count, serial_omap_console_putchar);

	/*
	 * Finally, wait for transmitter to become empty
	 * and restore the IER
	 */
	wait_for_xmitr(up);
	serial_out(up, UART_IER, ier);
	/*
	 * The receive handling will happen properly because the
	 * receive ready bit will still be set; it is not cleared
	 * on read.  However, modem control will not, we must
	 * call it if we have saved something in the saved flags
	 * while processing with interrupts off.
	 */
	if (up->msr_saved_flags)
		check_modem_status(up);

	if (console_lock)
		console_unlock();

	serial_omap_port_disable(up);
	if (locked)
		spin_unlock(&up->port.lock);
	local_irq_restore(flags);
}