Exemple #1
0
static int __init bfin_clocksource_init(void)
{
    set_cyc2ns_scale(get_cclk() / 1000);

    clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift);

    if (clocksource_register(&clocksource_bfin))
        panic("failed to register clocksource");

    return 0;
}
Exemple #2
0
static int __init bfin_cs_cycles_init(void)
{
	set_cyc2ns_scale(get_cclk() / 1000);

	bfin_cs_cycles.mult = \
		clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);

	if (clocksource_register(&bfin_cs_cycles))
		panic("failed to register clocksource");

	return 0;
}
Exemple #3
0
int request_dma(unsigned int channel, const char *device_id)
{
	pr_debug("request_dma() : BEGIN\n");

	if (device_id == NULL)
		printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);

#if defined(CONFIG_BF561) && ANOMALY_05000182
	if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
		if (get_cclk() > 500000000) {
			printk(KERN_WARNING
			       "Request IMDMA failed due to ANOMALY 05000182\n");
			return -EFAULT;
		}
	}
#endif

	if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
		pr_debug("DMA CHANNEL IN USE\n");
		return -EBUSY;
	}

	set_dma_peripheral_map(channel, device_id);
	dma_ch[channel].device_id = device_id;
	dma_ch[channel].irq = 0;

	pr_debug("request_dma() : END\n");
	return 0;
}
Exemple #4
0
static void bfin_timer_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC: {
		unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
		bfin_write_TCNTL(TMPWR);
		CSYNC();
		bfin_write_TSCALE(TIME_SCALE - 1);
		bfin_write_TPERIOD(tcount);
		bfin_write_TCOUNT(tcount);
		CSYNC();
		bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
		break;
	}
	case CLOCK_EVT_MODE_ONESHOT:
		bfin_write_TCNTL(TMPWR);
		CSYNC();
		bfin_write_TSCALE(TIME_SCALE - 1);
		bfin_write_TPERIOD(0);
		bfin_write_TCOUNT(0);
		break;
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		bfin_write_TCNTL(0);
		CSYNC();
		break;
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}
Exemple #5
0
static int __init bfin_cs_cycles_init(void)
{
	if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
		panic("failed to register clocksource");

	return 0;
}
Exemple #6
0
static void
time_sched_init(irq_handler_t timer_routine)
{
    u32 tcount;

    /* power up the timer, but don't enable it just yet */
    bfin_write_TCNTL(1);
    CSYNC();

    /*
     * the TSCALE prescaler counter.
     */
    bfin_write_TSCALE((TIME_SCALE - 1));

    tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
    bfin_write_TPERIOD(tcount);
    bfin_write_TCOUNT(tcount);

    /* now enable the timer */
    CSYNC();

    bfin_write_TCNTL(7);

    bfin_timer_irq.handler = (irq_handler_t)timer_routine;
    /* call setup_irq instead of request_irq because request_irq calls
     * kmalloc which has not been initialized yet
     */
    setup_irq(IRQ_CORETMR, &bfin_timer_irq);
}
Exemple #7
0
void show_regs(struct pt_regs *fp)
{
	char buf[150];
	struct irqaction *action;
	unsigned int i;
	unsigned long flags = 0;
	unsigned int cpu = raw_smp_processor_id();
	unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();

	pr_notice("\n");
	if (CPUID != bfin_cpuid())
		pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
			"but running on:0x%04x (Rev %d)\n",
			CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());

	pr_notice("ADSP-%s-0.%d",
		CPU, bfin_compiled_revid());

	if (bfin_compiled_revid() !=  bfin_revid())
		pr_cont("(Detected 0.%d)", bfin_revid());

	pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
		get_cclk()/1000000, get_sclk()/1000000,
#ifdef CONFIG_MPU
		"mpu on"
#else
		"mpu off"
#endif
		);

	if(board_rom_type())
		pr_notice("%s", linux_banner_stockui);
	else
		pr_notice("%s", linux_banner);

	pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
	pr_notice(" SEQSTAT: %08lx  IPEND: %04lx  IMASK: %04lx  SYSCFG: %04lx\n",
		(long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
	if (fp->ipend & EVT_IRPTEN)
		pr_notice("  Global Interrupts Disabled (IPEND[4])\n");
	if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
			EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
		pr_notice("  Peripheral interrupts masked off\n");
	if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
		pr_notice("  Kernel interrupts masked off\n");
	if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
		pr_notice("  HWERRCAUSE: 0x%lx\n",
			(fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
#ifdef EBIU_ERRMST
		
		if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
			pr_notice("  EBIU Error Reason  : 0x%04x\n",
				bfin_read_EBIU_ERRMST());
			pr_notice("  EBIU Error Address : 0x%08x\n",
				bfin_read_EBIU_ERRADD());
		}
#endif
	}
Exemple #8
0
/**
 *	request_dma - request a DMA channel
 *
 * Request the specific DMA channel from the system if it's available.
 */
int request_dma(unsigned int channel, const char *device_id)
{
	pr_debug("request_dma() : BEGIN \n");

	if (device_id == NULL)
		printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);

#if defined(CONFIG_BF561) && ANOMALY_05000182
	if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
		if (get_cclk() > 500000000) {
			printk(KERN_WARNING
			       "Request IMDMA failed due to ANOMALY 05000182\n");
			return -EFAULT;
		}
	}
#endif

	mutex_lock(&(dma_ch[channel].dmalock));

	if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED)
	    || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) {
		mutex_unlock(&(dma_ch[channel].dmalock));
		pr_debug("DMA CHANNEL IN USE  \n");
		return -EBUSY;
	} else {
		dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
		pr_debug("DMA CHANNEL IS ALLOCATED  \n");
	}

	mutex_unlock(&(dma_ch[channel].dmalock));

#ifdef CONFIG_BF54x
	if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
		unsigned int per_map;
		per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
		if (strncmp(device_id, "BFIN_UART", 9) == 0)
			dma_ch[channel].regs->peripheral_map = per_map |
				((channel - CH_UART2_RX + 0xC)<<12);
		else
			dma_ch[channel].regs->peripheral_map = per_map |
				((channel - CH_UART2_RX + 0x6)<<12);
	}
#endif

	dma_ch[channel].device_id = device_id;
	dma_ch[channel].irq = 0;

	/* This is to be enabled by putting a restriction -
	 * you have to request DMA, before doing any operations on
	 * descriptor/channel
	 */
	pr_debug("request_dma() : END  \n");
	return 0;
}
Exemple #9
0
int __init
__timerbench_init (void)
{
    int ret = 0;

    ret = register_chrdev (TB_MAJOR, TB_DEVNAME, &tb_fops);

    tb_cclk = get_cclk ();
    tb_sclk = get_sclk ();

    return ret;
}
Exemple #10
0
/*
 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
 * interrupts are off, and secondary CPUs are still lost in space.
 */
void __ipipe_enable_pipeline(void)
{
	unsigned irq;

	__ipipe_core_clock = get_cclk(); /* Fetch this once. */
	__ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;

	for (irq = 0; irq < NR_IRQS; ++irq)
		ipipe_virtualize_irq(ipipe_root_domain,
				     irq,
				     (ipipe_irq_handler_t)&asm_do_IRQ,
				     NULL,
				     &__ipipe_ack_irq,
				     IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
}
Exemple #11
0
static int __init bfin_clockevent_init(void)
{
    unsigned long timer_clk;

    timer_clk = get_cclk() / TIME_SCALE;

    setup_irq(IRQ_CORETMR, &bfin_timer_irq);
    bfin_timer_init();

    clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
    clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
    clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
    clockevents_register_device(&clockevent_bfin);

    return 0;
}
void __init setup_core_timer(void)
{
	u32 tcount;

	/* power up the timer, but don't enable it just yet */
	bfin_write_TCNTL(1);
	CSYNC();

	/* the TSCALE prescaler counter */
	bfin_write_TSCALE(TIME_SCALE - 1);

	tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
	bfin_write_TPERIOD(tcount);
	bfin_write_TCOUNT(tcount);

	/* now enable the timer */
	CSYNC();

	bfin_write_TCNTL(7);
}
static int global_board_data_init(void)
{
#ifndef CONFIG_SYS_GBL_DATA_ADDR
# define CONFIG_SYS_GBL_DATA_ADDR 0
#endif
#ifndef CONFIG_SYS_BD_INFO_ADDR
# define CONFIG_SYS_BD_INFO_ADDR 0
#endif

	bd_t *bd;

	if (CONFIG_SYS_GBL_DATA_ADDR) {
		gd = (gd_t *)(CONFIG_SYS_GBL_DATA_ADDR);
		memset((void *)gd, 0, GENERATED_GBL_DATA_SIZE);
	} else {
		static gd_t _bfin_gd;
		gd = &_bfin_gd;
	}
	if (CONFIG_SYS_BD_INFO_ADDR) {
		bd = (bd_t *)(CONFIG_SYS_BD_INFO_ADDR);
		memset(bd, 0, GENERATED_BD_INFO_SIZE);
	} else {
		static bd_t _bfin_bd;
		bd = &_bfin_bd;
	}

	gd->bd = bd;

	bd->bi_r_version = version_string;
	bd->bi_cpu = __stringify(CONFIG_BFIN_CPU);
	bd->bi_board_name = CONFIG_SYS_BOARD;
	bd->bi_vco = get_vco();
	bd->bi_cclk = get_cclk();
	bd->bi_sclk = get_sclk();
	bd->bi_memstart = CONFIG_SYS_SDRAM_BASE;
	bd->bi_memsize = CONFIG_SYS_MAX_RAM_SIZE;

	gd->ram_size = CONFIG_SYS_MAX_RAM_SIZE;

	return 0;
}
Exemple #14
0
static unsigned long  __init bfin_clockevent_check(void)
{
	setup_irq(IRQ_CORETMR, &bfin_timer_irq);
	return get_cclk() / TIME_SCALE;
}
Exemple #15
0
static int debug_cclk_get(void *data, u64 *val)
{
	*val = get_cclk();
	return 0;
}