Пример #1
0
static int __init
i2c_init(void)
{
	int res;

	/* Setup and enable the Port B I2C interface */

#ifndef CONFIG_ETRAX_I2C_USES_PB_NOT_PB_I2C
	*R_PORT_PB_I2C = port_pb_i2c_shadow |= 
		IO_STATE(R_PORT_PB_I2C, i2c_en,  on) |
		IO_FIELD(R_PORT_PB_I2C, i2c_d,   1)  |
		IO_FIELD(R_PORT_PB_I2C, i2c_clk, 1)  |
		IO_STATE(R_PORT_PB_I2C, i2c_oe_, enable);
#endif

	port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir0);
	port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir1);

	*R_PORT_PB_DIR = (port_pb_dir_shadow |=
			  IO_STATE(R_PORT_PB_DIR, dir0, input)  |
			  IO_STATE(R_PORT_PB_DIR, dir1, output));

	/* register char device */

	res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops);
	if(res < 0) {
		printk(KERN_ERR "i2c: couldn't get a major number.\n");
		return res;
	}

	printk("I2C driver v2.2, (c) 1999-2001 Axis Communications AB\n");
	
	return 0;
}
Пример #2
0
void
flush_tlb_mm(struct mm_struct *mm)
{
	int i;
	int page_id = mm->context.page_id;
	unsigned long flags;

	D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));

	if(page_id == NO_CONTEXT)
		return;


	local_irq_save(flags);
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
		if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );

			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
				      IO_STATE(R_TLB_LO, valid, no  ) |
				      IO_STATE(R_TLB_LO, kernel,no  ) |
				      IO_STATE(R_TLB_LO, we,    no  ) |
				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
		}
	}
	local_irq_restore(flags);
}
Пример #3
0
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
	struct mm_struct *mm = vma->vm_mm;
	int page_id = mm->context.page_id;
	int i;
	unsigned long flags;

	D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));

	if(page_id == NO_CONTEXT)
		return;

	addr &= PAGE_MASK; 


	local_irq_save(flags);
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		unsigned long tlb_hi;
		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
		tlb_hi = *R_TLB_HI;
		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
		    (tlb_hi & PAGE_MASK) == addr) {
			*R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
				addr; 

			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
				      IO_STATE(R_TLB_LO, valid, no  ) |
				      IO_STATE(R_TLB_LO, kernel,no  ) |
				      IO_STATE(R_TLB_LO, we,    no  ) |
				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
		}
	}
	local_irq_restore(flags);
}
Пример #4
0
void
flush_tlb_all(void)
{
	int i;
	unsigned long flags;

	/* the vpn of i & 0xf is so we dont write similar TLB entries
	 * in the same 4-way entry group. details.. 
	 */

	save_and_cli(flags); /* flush needs to be atomic */
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
		*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
			      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
		
		*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no       ) |
			      IO_STATE(R_TLB_LO, valid, no       ) |
			      IO_STATE(R_TLB_LO, kernel,no	 ) |
			      IO_STATE(R_TLB_LO, we,    no       ) |
			      IO_FIELD(R_TLB_LO, pfn,   0        ) );
	}
	restore_flags(flags);
	D(printk("tlb: flushed all\n"));
}
Пример #5
0
static inline irqreturn_t
timer_interrupt(int irq, void *dev_id)
{
	struct pt_regs *regs = get_irq_regs();
	/* acknowledge the timer irq */

#ifdef USE_CASCADE_TIMERS
	*R_TIMER_CTRL =
		IO_FIELD( R_TIMER_CTRL, timerdiv1, 0) |
		IO_FIELD( R_TIMER_CTRL, timerdiv0, 0) |
		IO_STATE( R_TIMER_CTRL, i1, clr) |
		IO_STATE( R_TIMER_CTRL, tm1, run) |
		IO_STATE( R_TIMER_CTRL, clksel1, cascade0) |
		IO_STATE( R_TIMER_CTRL, i0, clr) |
		IO_STATE( R_TIMER_CTRL, tm0, run) |
		IO_STATE( R_TIMER_CTRL, clksel0, c6250kHz);
#else
	*R_TIMER_CTRL = r_timer_ctrl_shadow |
		IO_STATE(R_TIMER_CTRL, i0, clr);
#endif

	/* reset watchdog otherwise it resets us! */
	reset_watchdog();

	/* Update statistics. */
	update_process_times(user_mode(regs));

	/* call the real timer interrupt handler */

	do_timer(1);

        cris_do_profile(regs); /* Save profiling information */
        return IRQ_HANDLED;
}
Пример #6
0
void
flush_tlb_mm(struct mm_struct *mm)
{
	int i;
	int page_id = mm->context;
	unsigned long flags;

	D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));

	if(page_id == NO_CONTEXT)
		return;
	
	/* mark the TLB entries that match the page_id as invalid.
	 * here we could also check the _PAGE_GLOBAL bit and NOT flush
	 * global pages. is it worth the extra I/O ? 
	 */

	save_and_cli(flags);  /* flush needs to be atomic */
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
		if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
			
			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
				      IO_STATE(R_TLB_LO, valid, no  ) |
				      IO_STATE(R_TLB_LO, kernel,no  ) |
				      IO_STATE(R_TLB_LO, we,    no  ) |
				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
		}
	}
	restore_flags(flags);
}
Пример #7
0
int __init
i2c_init(void)
{
    static int res = 0;
    static int first = 1;

    if (!first) {
        return res;
    }
    first = 0;

    /* Setup and enable the Port B I2C interface */

#ifndef CONFIG_ETRAX_I2C_USES_PB_NOT_PB_I2C
    if ((res = cris_request_io_interface(if_i2c, "I2C"))) {
        printk(KERN_CRIT "i2c_init: Failed to get IO interface\n");
        return res;
    }

    *R_PORT_PB_I2C = port_pb_i2c_shadow |=
                         IO_STATE(R_PORT_PB_I2C, i2c_en,  on) |
                         IO_FIELD(R_PORT_PB_I2C, i2c_d,   1)  |
                         IO_FIELD(R_PORT_PB_I2C, i2c_clk, 1)  |
                         IO_STATE(R_PORT_PB_I2C, i2c_oe_, enable);

    port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir0);
    port_pb_dir_shadow &= ~IO_MASK(R_PORT_PB_DIR, dir1);

    *R_PORT_PB_DIR = (port_pb_dir_shadow |=
                          IO_STATE(R_PORT_PB_DIR, dir0, input)  |
                          IO_STATE(R_PORT_PB_DIR, dir1, output));
#else
    if ((res = cris_io_interface_allocate_pins(if_i2c,
               'b',
               CONFIG_ETRAX_I2C_DATA_PORT,
               CONFIG_ETRAX_I2C_DATA_PORT))) {
        printk(KERN_WARNING "i2c_init: Failed to get IO pin for I2C data port\n");
        return res;
    } else if ((res = cris_io_interface_allocate_pins(if_i2c,
                      'b',
                      CONFIG_ETRAX_I2C_CLK_PORT,
                      CONFIG_ETRAX_I2C_CLK_PORT))) {
        cris_io_interface_free_pins(if_i2c,
                                    'b',
                                    CONFIG_ETRAX_I2C_DATA_PORT,
                                    CONFIG_ETRAX_I2C_DATA_PORT);
        printk(KERN_WARNING "i2c_init: Failed to get IO pin for I2C clk port\n");
    }
#endif

    return res;
}
Пример #8
0
static inline irqreturn_t
timer_interrupt(int irq, void *dev_id)
{
	struct pt_regs *regs = get_irq_regs();
	/* acknowledge the timer irq */

#ifdef USE_CASCADE_TIMERS
	*R_TIMER_CTRL =
		IO_FIELD( R_TIMER_CTRL, timerdiv1, 0) |
		IO_FIELD( R_TIMER_CTRL, timerdiv0, 0) |
		IO_STATE( R_TIMER_CTRL, i1, clr) |
		IO_STATE( R_TIMER_CTRL, tm1, run) |
		IO_STATE( R_TIMER_CTRL, clksel1, cascade0) |
		IO_STATE( R_TIMER_CTRL, i0, clr) |
		IO_STATE( R_TIMER_CTRL, tm0, run) |
		IO_STATE( R_TIMER_CTRL, clksel0, c6250kHz);
#else
	*R_TIMER_CTRL = r_timer_ctrl_shadow | 
		IO_STATE(R_TIMER_CTRL, i0, clr);
#endif

	/* reset watchdog otherwise it resets us! */
	reset_watchdog();
	
	/* Update statistics. */
	update_process_times(user_mode(regs));

	/* call the real timer interrupt handler */

	do_timer(1);
	
        cris_do_profile(regs); /* Save profiling information */

	/*
	 * If we have an externally synchronized Linux clock, then update
	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
	 * called as close as possible to 500 ms before the new second starts.
	 *
	 * The division here is not time critical since it will run once in 
	 * 11 minutes
	 */
	if (ntp_synced() &&
	    xtime.tv_sec > last_rtc_update + 660 &&
	    (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
	    (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
		if (set_rtc_mmss(xtime.tv_sec) == 0)
			last_rtc_update = xtime.tv_sec;
		else
			last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
	}
        return IRQ_HANDLED;
}
Пример #9
0
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	struct task_struct *tsk)
{
	if (prev != next) {
		/* make sure we have a context */
		get_mmu_context(next);

		/* remember the pgd for the fault handlers
		 * this is similar to the pgd register in some other CPU's.
		 * we need our own copy of it because current and active_mm
		 * might be invalid at points where we still need to derefer
		 * the pgd.
		 */

		per_cpu(current_pgd, smp_processor_id()) = next->pgd;

		/* switch context in the MMU */

		D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
			next->context, next));

		*R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
					  page_id, next->context.page_id);
	}
}
Пример #10
0
void hard_reset_now (void)
{
    /*
     * Don't declare this variable elsewhere.  We don't want any other
     * code to know about it than the watchdog handler in entry.S and
     * this code, implementing hard reset through the watchdog.
     */
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
    extern int cause_of_death;
#endif

    printk("*** HARD RESET ***\n");
    local_irq_disable();

#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
    cause_of_death = 0xbedead;
#else
    /* Since we dont plan to keep on resetting the watchdog,
       the key can be arbitrary hence three */
    *R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) |
                  IO_STATE(R_WATCHDOG, enable, start);
#endif

    while(1) /* waiting for RETRIBUTION! */ ;
}
Пример #11
0
void
stop_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
	watchdog_key ^= 0x7; /* invert key, which is 3 bits */
	*R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, watchdog_key) |
		IO_STATE(R_WATCHDOG, enable, stop);
#endif
}
Пример #12
0
void
flush_tlb_range(struct mm_struct *mm, 
		unsigned long start,
		unsigned long end)
{
	int page_id = mm->context;
	int i;
	unsigned long flags;

	D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
		 start, end, page_id, mm));

	if(page_id == NO_CONTEXT)
		return;

	start &= PAGE_MASK;  /* probably not necessary */
	end &= PAGE_MASK;    /* dito */

	/* invalidate those TLB entries that match both the mm context
	 * and the virtual address range
	 */

	save_and_cli(flags);  /* flush needs to be atomic */
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		unsigned long tlb_hi, vpn;
		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
		tlb_hi = *R_TLB_HI;
		vpn = tlb_hi & PAGE_MASK;
		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
		    vpn >= start && vpn < end) {
			*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
				      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );
			
			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
				      IO_STATE(R_TLB_LO, valid, no  ) |
				      IO_STATE(R_TLB_LO, kernel,no  ) |
				      IO_STATE(R_TLB_LO, we,    no  ) |
				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
		}
	}
	restore_flags(flags);
}
Пример #13
0
void
flush_tlb_page(struct vm_area_struct *vma, 
	       unsigned long addr)
{
	struct mm_struct *mm = vma->vm_mm;
	int page_id = mm->context;
	int i;
	unsigned long flags;

	D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));

	if(page_id == NO_CONTEXT)
		return;

	addr &= PAGE_MASK; /* perhaps not necessary */

	/* invalidate those TLB entries that match both the mm context
	 * and the virtual address requested 
	 */

	save_and_cli(flags);  /* flush needs to be atomic */
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		unsigned long tlb_hi;
		*R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
		tlb_hi = *R_TLB_HI;
		if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
		    (tlb_hi & PAGE_MASK) == addr) {
			*R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
				addr; /* same addr as before works. */
			
			*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no  ) |
				      IO_STATE(R_TLB_LO, valid, no  ) |
				      IO_STATE(R_TLB_LO, kernel,no  ) |
				      IO_STATE(R_TLB_LO, we,    no  ) |
				      IO_FIELD(R_TLB_LO, pfn,   0   ) );
		}
	}
	restore_flags(flags);
}
Пример #14
0
void
flush_tlb_all(void)
{
	int i;
	unsigned long flags;


	local_irq_save(flags);
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
		*R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
			      IO_FIELD(R_TLB_HI, vpn,     i & 0xf ) );

		*R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no       ) |
			      IO_STATE(R_TLB_LO, valid, no       ) |
			      IO_STATE(R_TLB_LO, kernel,no	 ) |
			      IO_STATE(R_TLB_LO, we,    no       ) |
			      IO_FIELD(R_TLB_LO, pfn,   0        ) );
	}
	local_irq_restore(flags);
	D(printk("tlb: flushed all\n"));
}
Пример #15
0
void
reset_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
	/* only keep watchdog happy as long as we have memory left! */
	if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
		/* reset the watchdog with the inverse of the old key */
		watchdog_key ^= 0x7; /* invert key, which is 3 bits */
		*R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, watchdog_key) |
			IO_STATE(R_WATCHDOG, enable, start);
	}
#endif
}
Пример #16
0
void
dump_tlb_all(void)
{
	int i;
	unsigned long flags;
	
	printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we  |\n");

	save_and_cli(flags);
	for(i = 0; i < NUM_TLB_ENTRIES; i++) {
		*R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
		printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
		       i, *R_TLB_HI, *R_TLB_LO);
	}
	restore_flags(flags);
}
Пример #17
0
void hard_reset_now (void)
{
#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
	extern int cause_of_death;
#endif

	printk("*** HARD RESET ***\n");
	local_irq_disable();

#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
	cause_of_death = 0xbedead;
#else
	*R_WATCHDOG = IO_FIELD(R_WATCHDOG, key, 3) |
		IO_STATE(R_WATCHDOG, enable, start);
#endif

	while(1)  ;
}
Пример #18
0
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	struct task_struct *tsk)
{
	if (prev != next) {
		
		get_mmu_context(next);


		per_cpu(current_pgd, smp_processor_id()) = next->pgd;

		

		D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
			next->context, next));

		*R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
					  page_id, next->context.page_id);
	}
}
Пример #19
0
void __init 
paging_init(void)
{
	int i;
	unsigned long zones_size[MAX_NR_ZONES];

	printk("Setting up paging and the MMU.\n");
	
	/* clear out the init_mm.pgd that will contain the kernel's mappings */

	for(i = 0; i < PTRS_PER_PGD; i++)
		swapper_pg_dir[i] = __pgd(0);
	
	/* make sure the current pgd table points to something sane
	 * (even if it is most probably not used until the next 
	 *  switch_mm)
	 */

	current_pgd = init_mm.pgd;

	/* initialise the TLB (tlb.c) */

	tlb_init();

	/* see README.mm for details on the KSEG setup */

#ifdef CONFIG_CRIS_LOW_MAP
	/* Etrax-100 LX version 1 has a bug so that we cannot map anything
	 * across the 0x80000000 boundary, so we need to shrink the user-virtual
	 * area to 0x50000000 instead of 0xb0000000 and map things slightly
	 * different. The unused areas are marked as paged so that we can catch
	 * freak kernel accesses there.
	 *
	 * The ARTPEC chip is mapped at 0xa so we pass that segment straight
	 * through. We cannot vremap it because the vmalloc area is below 0x8
	 * and Juliette needs an uncached area above 0x8.
	 *
	 * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards.
	 * We map them straight over in LOW_MAP, but use vremap in LX version 2.
	 */

#define CACHED_BOOTROM (KSEG_F | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) |  /* bootrom */
			IO_STATE(R_MMU_KSEG, seg_e, page ) |
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, page ) |   
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) |  /* kernel reg area */
#ifdef CONFIG_JULIETTE
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) |  /* ARTPEC etc. */
#else
			IO_STATE(R_MMU_KSEG, seg_a, page ) |
#endif
			IO_STATE(R_MMU_KSEG, seg_9, seg  ) |  /* LED's on some boards */
			IO_STATE(R_MMU_KSEG, seg_8, seg  ) |  /* CSE0/1, flash and I/O */
			IO_STATE(R_MMU_KSEG, seg_7, page ) |  /* kernel vmalloc area */
			IO_STATE(R_MMU_KSEG, seg_6, seg  ) |  /* kernel DRAM area */
			IO_STATE(R_MMU_KSEG, seg_5, seg  ) |  /* cached flash */
			IO_STATE(R_MMU_KSEG, seg_4, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_3, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_2, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_1, page ) |  /* user area */
			IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
#ifdef CONFIG_JULIETTE
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
#else
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
#endif
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
	/* This code is for the corrected Etrax-100 LX version 2... */

#define CACHED_BOOTROM (KSEG_A | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) | /* cached flash */
			IO_STATE(R_MMU_KSEG, seg_e, seg  ) | /* uncached flash */
			IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */
			IO_STATE(R_MMU_KSEG, seg_c, seg  ) | /* kernel area */
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) | /* kernel reg area */
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) | /* bootrom */
			IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */
			IO_STATE(R_MMU_KSEG, seg_8, page ) |
			IO_STATE(R_MMU_KSEG, seg_7, page ) |
			IO_STATE(R_MMU_KSEG, seg_6, page ) |
			IO_STATE(R_MMU_KSEG, seg_5, page ) |
			IO_STATE(R_MMU_KSEG, seg_4, page ) |
			IO_STATE(R_MMU_KSEG, seg_3, page ) |
			IO_STATE(R_MMU_KSEG, seg_2, page ) |
			IO_STATE(R_MMU_KSEG, seg_1, page ) |
			IO_STATE(R_MMU_KSEG, seg_0, page ) );

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#endif

	*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
	
	/* The MMU has been enabled ever since head.S but just to make
	 * it totally obvious we do it here as well.
	 */

	*R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) |
			IO_STATE(R_MMU_CTRL, acc_excp, enable ) |
			IO_STATE(R_MMU_CTRL, we_excp,  enable ) );
	
	*R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable);

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */

	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	/* All pages are DMA'able in Etrax, so put all in the DMA'able zone */

	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	for (i = 1; i < MAX_NR_ZONES; i++)
		zones_size[i] = 0;

	/* Use free_area_init_node instead of free_area_init, because the former
	 * is designed for systems where the DRAM starts at an address substantially
	 * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the
	 * mem_map page array.
	 */

	free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
	mem_map = contig_page_data.node_mem_map;
}
Пример #20
0
inline void start_timer1(unsigned long delay_us)
{
  int freq_index = 0; /*                               */
  unsigned long upper_limit = MAX_DELAY_US;

  unsigned long div;
  /*                                                  */
  /*                            
                                                
   */
#if 1 /*                         */
  while (delay_us < upper_limit && freq_index < MAX_USABLE_TIMER_FREQ)
  {
    freq_index++;
    upper_limit >>= 1; /*                         */
  }
  if (freq_index > 0)
  {
    freq_index--;
  }
#else
  freq_index = 6;
#endif
  div = delay_us * timer_freq_100[freq_index]/10000;
  if (div < 2)
  {
    /*                            */
    div = 2;
  }
  if (div > 255)
  {
    div = 0; /*                                         */
    /*                                                       
                                                
     */
  }

  timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = div;
  timer_freq_settings[fast_timers_started % NUM_TIMER_STATS] = freq_index;
  timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us;

	D1(printk(KERN_DEBUG "start_timer1 : %d us freq: %i div: %i\n",
            delay_us, freq_index, div));
  /*                  */
  *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr);

  /*                  */
  *R_TIMER_CTRL = r_timer_ctrl_shadow =
    (r_timer_ctrl_shadow &
     ~IO_MASK(R_TIMER_CTRL, timerdiv1) &
     ~IO_MASK(R_TIMER_CTRL, tm1) &
     ~IO_MASK(R_TIMER_CTRL, clksel1)) |
    IO_FIELD(R_TIMER_CTRL, timerdiv1, div) |
    IO_STATE(R_TIMER_CTRL, tm1, stop_ld) |
    IO_FIELD(R_TIMER_CTRL, clksel1, freq_index ); /*           */

  /*               */
  *R_TIMER_CTRL =  r_timer_ctrl_shadow |
    IO_STATE(R_TIMER_CTRL, i1, clr);

  /*             */
  *R_TIMER_CTRL = r_timer_ctrl_shadow =
    (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) |
    IO_STATE(R_TIMER_CTRL, tm1, run);

  /*                   */
  *R_IRQ_MASK0_SET = IO_STATE(R_IRQ_MASK0_SET, timer1, set);
  fast_timers_started++;
  fast_timer_running = 1;
}
Пример #21
0
static void
start_port(struct dbg_port* p)
{
	unsigned long rec_ctrl = 0;
	unsigned long tr_ctrl = 0;

	if (!p)
		return;

	if (p->started)
		return;
	p->started = 1;

	if (p->index == 0)
	{
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma6);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma6, unused);
	}
	else if (p->index == 1)
	{
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma8);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma8, usb);
	}
	else if (p->index == 2)
	{
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma2);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma2, par0);
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma3);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma3, par0);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, ser2, select);
	}
	else
	{
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma4);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, par1);
		genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma5);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, par1);
		genconfig_shadow |= IO_STATE(R_GEN_CONFIG, ser3, select);
	}

	*R_GEN_CONFIG = genconfig_shadow;

	*p->xoff =
		IO_STATE(R_SERIAL0_XOFF, tx_stop, enable) |
		IO_STATE(R_SERIAL0_XOFF, auto_xoff, disable) |
		IO_FIELD(R_SERIAL0_XOFF, xoff_char, 0);

	switch (p->baudrate)
	{
	case 0:
	case 115200:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c115k2Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c115k2Hz);
		break;
	case 1200:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c1200Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c1200Hz);
		break;
	case 2400:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c2400Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c2400Hz);
		break;
	case 4800:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c4800Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c4800Hz);
		break;
	case 9600:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c9600Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c9600Hz);
		  break;
	case 19200:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c19k2Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c19k2Hz);
		 break;
	case 38400:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c38k4Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c38k4Hz);
		break;
	case 57600:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c57k6Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c57k6Hz);
		break;
	default:
		*p->baud =
		  IO_STATE(R_SERIAL0_BAUD, tr_baud, c115k2Hz) |
		  IO_STATE(R_SERIAL0_BAUD, rec_baud, c115k2Hz);
		  break;
        }

	if (p->parity == 'E') {
		rec_ctrl =
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par, even) |
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
		tr_ctrl =
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par, even) |
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
	} else if (p->parity == 'O') {
		rec_ctrl =
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par, odd) |
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, enable);
		tr_ctrl =
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par, odd) |
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, enable);
	} else {
		rec_ctrl =
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par, even) |
		  IO_STATE(R_SERIAL0_REC_CTRL, rec_par_en, disable);
		tr_ctrl =
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par, even) |
		  IO_STATE(R_SERIAL0_TR_CTRL, tr_par_en, disable);
	}
	if (p->bits == 7)
	{
		rec_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_7bit);
		tr_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_7bit);
	}
	else
	{
		rec_ctrl |= IO_STATE(R_SERIAL0_REC_CTRL, rec_bitnr, rec_8bit);
		tr_ctrl |= IO_STATE(R_SERIAL0_TR_CTRL, tr_bitnr, tr_8bit);
	}

	*p->rec_ctrl =
		IO_STATE(R_SERIAL0_REC_CTRL, dma_err, stop) |
		IO_STATE(R_SERIAL0_REC_CTRL, rec_enable, enable) |
		IO_STATE(R_SERIAL0_REC_CTRL, rts_, active) |
		IO_STATE(R_SERIAL0_REC_CTRL, sampling, middle) |
		IO_STATE(R_SERIAL0_REC_CTRL, rec_stick_par, normal) |
		rec_ctrl;

	*p->tr_ctrl =
		IO_FIELD(R_SERIAL0_TR_CTRL, txd, 0) |
		IO_STATE(R_SERIAL0_TR_CTRL, tr_enable, enable) |
		IO_STATE(R_SERIAL0_TR_CTRL, auto_cts, disabled) |
		IO_STATE(R_SERIAL0_TR_CTRL, stop_bits, one_bit) |
		IO_STATE(R_SERIAL0_TR_CTRL, tr_stick_par, normal) |
		tr_ctrl;
}
Пример #22
0
/*#---------------------------------------------------------------------------
 *#
 *# FUNCTION NAME: i2c_init
 *#
 *# DESCRIPTION  : initialises the I2C device driver
 *#
 *# PARAMETERS   :
 *#
 *#---------------------------------------------------------------------------
 */
int __init i2c_init( void )
{
    static int res = 0;
    static int first = 1;

    if ( !first )
    {
        return res;
    }

    first = 0;

    /* Setup and enable the Port B I2C interface */

#ifndef CONFIG_ETRAX_I2C_USES_PB_NOT_PB_I2C
    /* here, we're using the dedicated I2C pins of FoxBoard */
    if ( ( res = cris_request_io_interface( if_i2c, "I2C" ) ) )
    {
        printk( KERN_CRIT "i2c_init: Failed to get IO interface\n" );
        return res;
    }

    *R_PORT_PB_I2C = port_pb_i2c_shadow |=
        IO_STATE( R_PORT_PB_I2C, i2c_en,  on ) |
        IO_FIELD( R_PORT_PB_I2C, i2c_d,   1 )  |
        IO_FIELD( R_PORT_PB_I2C, i2c_set_scl, 1 )  |
        IO_STATE( R_PORT_PB_I2C, i2c_oe_, enable );

    port_pb_dir_shadow &= ~IO_MASK( R_PORT_PB_DIR, dir0 );
    port_pb_dir_shadow &= ~IO_MASK( R_PORT_PB_DIR, dir1 );

    *R_PORT_PB_DIR = ( port_pb_dir_shadow |=
              IO_STATE( R_PORT_PB_DIR, dir0, input )  |
              IO_STATE( R_PORT_PB_DIR, dir1, output ) );
#else
        /* If everything goes fine, res = 0, meaning "if" fails =>
         * will do the "else" too and as such initialise the clock port...
         * Clever trick!
         */
        if ( ( res = cris_io_interface_allocate_pins( if_i2c
                                                    , 'b'
                                                    , CONFIG_ETRAX_I2C_DATA_PORT
                                                    , CONFIG_ETRAX_I2C_DATA_PORT
                                                    )
             )
           )
        {
            printk( KERN_WARNING "i2c_init: Failed to get IO pin for I2C data port\n" );
            return ( res );
        }
        /* Same here...*/
        else if ( ( res = cris_io_interface_allocate_pins( if_i2c
                                                         , 'b'
                                                         , CONFIG_ETRAX_I2C_CLK_PORT
                                                         , CONFIG_ETRAX_I2C_CLK_PORT
                                                         )
                  )
                )
        {
            cris_io_interface_free_pins( if_i2c
                                       , 'b'
                                       , CONFIG_ETRAX_I2C_DATA_PORT
                                       , CONFIG_ETRAX_I2C_DATA_PORT
                                       );
            printk( KERN_WARNING "i2c_init: Failed to get IO pin for I2C clk port\n" );
        }
#endif

    return ( res );
}   /* i2c_init */
Пример #23
0
static void __init
parport_etrax_init_registers(void)
{
	struct etrax100par_struct *info;
	int i;

	for (i = 0, info = port_table; i < 2; i++, info++) {
#ifndef CONFIG_ETRAX_PARALLEL_PORT0
		if (i == 0)
			continue;
#endif
#ifndef CONFIG_ETRAX_PARALLEL_PORT1
		if (i == 1)
			continue;
#endif
		info->reg_config_shadow = 
			IO_STATE(R_PAR0_CONFIG, iseli, inv)       |
			IO_STATE(R_PAR0_CONFIG, iautofd, inv)     |
			IO_STATE(R_PAR0_CONFIG, istrb, inv)       |
			IO_STATE(R_PAR0_CONFIG, iinit, inv)       |
			IO_STATE(R_PAR0_CONFIG, rle_in, disable)  |
			IO_STATE(R_PAR0_CONFIG, rle_out, disable) |
			IO_STATE(R_PAR0_CONFIG, enable, on)       |
			IO_STATE(R_PAR0_CONFIG, force, off)       |
			IO_STATE(R_PAR0_CONFIG, ign_ack, wait)    |
			IO_STATE(R_PAR0_CONFIG, oe_ack, wait_oe)  |
			IO_STATE(R_PAR0_CONFIG, mode, manual);

		if ((i == 0 && PAR0_USE_DMA) || (i == 1 && PAR1_USE_DMA))
			info->reg_config_shadow |=
				IO_STATE(R_PAR0_CONFIG, dma, enable);
		else
			info->reg_config_shadow |=
				IO_STATE(R_PAR0_CONFIG, dma, disable);

		*info->reg_config = info->reg_config_shadow;

		info->reg_ctrl_data_shadow = 
			IO_STATE(R_PAR0_CTRL_DATA, peri_int, nop)    |
			IO_STATE(R_PAR0_CTRL_DATA, oe, enable)       |
			IO_STATE(R_PAR0_CTRL_DATA, seli, inactive)   |
			IO_STATE(R_PAR0_CTRL_DATA, autofd, inactive) |
			IO_STATE(R_PAR0_CTRL_DATA, strb, inactive)   |
			IO_STATE(R_PAR0_CTRL_DATA, init, inactive)   |
			IO_STATE(R_PAR0_CTRL_DATA, ecp_cmd, data)    |
			IO_FIELD(R_PAR0_CTRL_DATA, data, 0);
		*info->reg_ctrl_data = info->reg_ctrl_data_shadow;

		/* Clear peri int without setting shadow */
		*info->reg_ctrl_data = info->reg_ctrl_data_shadow |
			IO_STATE(R_PAR0_CTRL_DATA, peri_int, ack);

		info->reg_delay_shadow = 
			IO_FIELD(R_PAR0_DELAY, setup, 5)  |
			IO_FIELD(R_PAR0_DELAY, strobe, 5) |
			IO_FIELD(R_PAR0_DELAY, hold, 5);
		*info->reg_delay = info->reg_delay_shadow;
	}

#ifdef CONFIG_ETRAX_PARALLEL_PORT0
#ifdef CONFIG_ETRAX_PAR0_DMA
	RESET_DMA(PAR0_TX_DMA_NBR);
	WAIT_DMA(PAR0_TX_DMA_NBR);
#ifdef CONFIG_ETRAX_SERIAL_PORT2
	printk(" Warning - DMA clash with ser2!\n");
#endif /* SERIAL_PORT2 */
#endif /* DMA */
#endif /* PORT0 */

#ifdef CONFIG_ETRAX_PARALLEL_PORT1
#ifdef CONFIG_ETRAX_PAR1_DMA
	RESET_DMA(PAR1_TX_DMA_NBR);
	WAIT_DMA(PAR1_TX_DMA_NBR);
#ifdef CONFIG_ETRAX_SERIAL_PORT3
	printk(" Warning - DMA clash with ser3!\n");
#endif /* SERIAL_PORT3 */
#endif /* DMA */
#endif /* PORT1 */
} 
Пример #24
0
inline void start_timer1(unsigned long delay_us)
{
  int freq_index = 0; /* This is the lowest resolution */
  unsigned long upper_limit = MAX_DELAY_US;

  unsigned long div;
  /* Start/Restart the timer to the new shorter value */
  /* t = 1/freq = 1/19200 = 53us
   * T=div*t,  div = T/t = delay_us*freq/1000000
   */
#if 1 /* Adaptive timer settings */
  while (delay_us < upper_limit && freq_index < MAX_USABLE_TIMER_FREQ)
  {
    freq_index++;
    upper_limit >>= 1; /* Divide by 2 using shift */
  }
  if (freq_index > 0)
  {
    freq_index--;
  }
#else
  freq_index = 6;
#endif
  div = delay_us * timer_freq_100[freq_index]/10000;
  if (div < 2)
  {
    /* Maybe increase timer freq? */
    div = 2;
  }
  if (div > 255)
  {
    div = 0; /* This means 256, the max the timer takes */
    /* If a longer timeout than the timer can handle is used,
     * then we must restart it when it goes off.
     */
  }

  timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = div;
  timer_freq_settings[fast_timers_started % NUM_TIMER_STATS] = freq_index;
  timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us;

	D1(printk(KERN_DEBUG "start_timer1 : %d us freq: %i div: %i\n",
            delay_us, freq_index, div));
  /* Clear timer1 irq */
  *R_IRQ_MASK0_CLR = IO_STATE(R_IRQ_MASK0_CLR, timer1, clr);

  /* Set timer values */
  *R_TIMER_CTRL = r_timer_ctrl_shadow =
    (r_timer_ctrl_shadow &
     ~IO_MASK(R_TIMER_CTRL, timerdiv1) &
     ~IO_MASK(R_TIMER_CTRL, tm1) &
     ~IO_MASK(R_TIMER_CTRL, clksel1)) |
    IO_FIELD(R_TIMER_CTRL, timerdiv1, div) |
    IO_STATE(R_TIMER_CTRL, tm1, stop_ld) |
    IO_FIELD(R_TIMER_CTRL, clksel1, freq_index ); /* 6=c19k2Hz */

  /* Ack interrupt */
  *R_TIMER_CTRL =  r_timer_ctrl_shadow |
    IO_STATE(R_TIMER_CTRL, i1, clr);

  /* Start timer */
  *R_TIMER_CTRL = r_timer_ctrl_shadow =
    (r_timer_ctrl_shadow & ~IO_MASK(R_TIMER_CTRL, tm1)) |
    IO_STATE(R_TIMER_CTRL, tm1, run);

  /* Enable timer1 irq */
  *R_IRQ_MASK0_SET = IO_STATE(R_IRQ_MASK0_SET, timer1, set);
  fast_timers_started++;
  fast_timer_running = 1;
}
Пример #25
0
void __init 
paging_init(void)
{
	int i;
	unsigned long zones_size[MAX_NR_ZONES];

	printk("Setting up paging and the MMU.\n");
	
	

	for(i = 0; i < PTRS_PER_PGD; i++)
		swapper_pg_dir[i] = __pgd(0);
	

	per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;

	

	tlb_init();

	

#ifdef CONFIG_CRIS_LOW_MAP

#define CACHED_BOOTROM (KSEG_F | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_e, page ) |
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, page ) |   
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) |  
#ifdef CONFIG_JULIETTE
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) |  
#else
			IO_STATE(R_MMU_KSEG, seg_a, page ) |
#endif
			IO_STATE(R_MMU_KSEG, seg_9, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_8, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_7, page ) |  
			IO_STATE(R_MMU_KSEG, seg_6, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_5, seg  ) |  
			IO_STATE(R_MMU_KSEG, seg_4, page ) |  
			IO_STATE(R_MMU_KSEG, seg_3, page ) |  
			IO_STATE(R_MMU_KSEG, seg_2, page ) |  
			IO_STATE(R_MMU_KSEG, seg_1, page ) |  
			IO_STATE(R_MMU_KSEG, seg_0, page ) ); 

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
#ifdef CONFIG_JULIETTE
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) |
#else
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) |
#endif
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#else
	

#define CACHED_BOOTROM (KSEG_A | 0x08000000UL)

	*R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_e, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_d, page ) | 
			IO_STATE(R_MMU_KSEG, seg_c, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_b, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_a, seg  ) | 
			IO_STATE(R_MMU_KSEG, seg_9, page ) | 
			IO_STATE(R_MMU_KSEG, seg_8, page ) |
			IO_STATE(R_MMU_KSEG, seg_7, page ) |
			IO_STATE(R_MMU_KSEG, seg_6, page ) |
			IO_STATE(R_MMU_KSEG, seg_5, page ) |
			IO_STATE(R_MMU_KSEG, seg_4, page ) |
			IO_STATE(R_MMU_KSEG, seg_3, page ) |
			IO_STATE(R_MMU_KSEG, seg_2, page ) |
			IO_STATE(R_MMU_KSEG, seg_1, page ) |
			IO_STATE(R_MMU_KSEG, seg_0, page ) );

	*R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) );
	
	*R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) |
			    IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) );
#endif

	*R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) );
	

	*R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) |
			IO_STATE(R_MMU_CTRL, acc_excp, enable ) |
			IO_STATE(R_MMU_CTRL, we_excp,  enable ) );
	
	*R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable);


	empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
	memset((void *)empty_zero_page, 0, PAGE_SIZE);

	

	zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;

	for (i = 1; i < MAX_NR_ZONES; i++)
		zones_size[i] = 0;


	free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
}
Пример #26
0
		/* DMA interrupt stuff */
		2,
		1U << 4, /* uses DMA 2 and 3 */
		R_DMA_CH2_CLR_INTR,
		R_DMA_CH2_FIRST,
		R_DMA_CH2_CMD,
		R_DMA_CH3_CLR_INTR,
		R_DMA_CH3_FIRST,
		R_DMA_CH3_CMD,
		/* Non DMA interrupt stuff */
		IO_BITNR(R_VECT_MASK_RD, par0),
		R_IRQ_MASK0_RD,
		R_IRQ_MASK0_CLR,
		R_IRQ_READ0,
		R_IRQ_MASK0_SET,
		IO_FIELD(R_IRQ_MASK0_RD, par0_ready, 1U), /* tx (ready)*/
		IO_FIELD(R_IRQ_MASK0_RD, par0_data, 1U), /* rx (data)*/
		IO_FIELD(R_IRQ_MASK0_RD, par0_ecp_cmd, 1U), /* ecp_cmd */
		IO_FIELD(R_IRQ_MASK0_RD, par0_peri, 1U), /* peri */
		0
	},
	{
		R_PAR1_CTRL_DATA,
		R_PAR1_STATUS_DATA,
		R_PAR1_CONFIG,
		R_PAR1_DELAY,
		/* DMA interrupt stuff */
		4,
		1U << 8, /* uses DMA 4 and 5 */
		
		R_DMA_CH4_CLR_INTR,
Пример #27
0
void __init
time_init(void)
{
	/* probe for the RTC and read it if it exists
	 * Before the RTC can be probed the loops_per_usec variable needs
	 * to be initialized to make usleep work. A better value for
	 * loops_per_usec is calculated by the kernel later once the
	 * clock has started.
	 */
	loops_per_usec = 50;

	if(RTC_INIT() < 0)
		have_rtc = 0;
	else
		have_rtc = 1;

	/* Setup the etrax timers
	 * Base frequency is 25000 hz, divider 250 -> 100 HZ
	 * In normal mode, we use timer0, so timer1 is free. In cascade
	 * mode (which we sometimes use for debugging) both timers are used.
	 * Remember that linux/timex.h contains #defines that rely on the
	 * timer settings below (hz and divide factor) !!!
	 */

#ifdef USE_CASCADE_TIMERS
	*R_TIMER_CTRL =
		IO_FIELD( R_TIMER_CTRL, timerdiv1, 0) |
		IO_FIELD( R_TIMER_CTRL, timerdiv0, 0) |
		IO_STATE( R_TIMER_CTRL, i1, nop) |
		IO_STATE( R_TIMER_CTRL, tm1, stop_ld) |
		IO_STATE( R_TIMER_CTRL, clksel1, cascade0) |
		IO_STATE( R_TIMER_CTRL, i0, nop) |
		IO_STATE( R_TIMER_CTRL, tm0, stop_ld) |
		IO_STATE( R_TIMER_CTRL, clksel0, c6250kHz);

	*R_TIMER_CTRL = r_timer_ctrl_shadow =
		IO_FIELD( R_TIMER_CTRL, timerdiv1, 0) |
		IO_FIELD( R_TIMER_CTRL, timerdiv0, 0) |
		IO_STATE( R_TIMER_CTRL, i1, nop) |
		IO_STATE( R_TIMER_CTRL, tm1, run) |
		IO_STATE( R_TIMER_CTRL, clksel1, cascade0) |
		IO_STATE( R_TIMER_CTRL, i0, nop) |
		IO_STATE( R_TIMER_CTRL, tm0, run) |
		IO_STATE( R_TIMER_CTRL, clksel0, c6250kHz);
#else
	*R_TIMER_CTRL =
		IO_FIELD(R_TIMER_CTRL, timerdiv1, 192)      |
		IO_FIELD(R_TIMER_CTRL, timerdiv0, TIMER0_DIV)      |
		IO_STATE(R_TIMER_CTRL, i1,        nop)      |
		IO_STATE(R_TIMER_CTRL, tm1,       stop_ld)  |
		IO_STATE(R_TIMER_CTRL, clksel1,   c19k2Hz)  |
		IO_STATE(R_TIMER_CTRL, i0,        nop)      |
		IO_STATE(R_TIMER_CTRL, tm0,       stop_ld)  |
		IO_STATE(R_TIMER_CTRL, clksel0,   flexible);

	*R_TIMER_CTRL = r_timer_ctrl_shadow =
		IO_FIELD(R_TIMER_CTRL, timerdiv1, 192)      |
		IO_FIELD(R_TIMER_CTRL, timerdiv0, TIMER0_DIV)      |
		IO_STATE(R_TIMER_CTRL, i1,        nop)      |
		IO_STATE(R_TIMER_CTRL, tm1,       run)      |
		IO_STATE(R_TIMER_CTRL, clksel1,   c19k2Hz)  |
		IO_STATE(R_TIMER_CTRL, i0,        nop)      |
		IO_STATE(R_TIMER_CTRL, tm0,       run)      |
		IO_STATE(R_TIMER_CTRL, clksel0,   flexible);

	*R_TIMER_PRESCALE = PRESCALE_VALUE;
#endif

	*R_IRQ_MASK0_SET =
		IO_STATE(R_IRQ_MASK0_SET, timer0, set); /* unmask the timer irq */

	/* now actually register the timer irq handler that calls timer_interrupt() */

	setup_irq(2, &irq2); /* irq 2 is the timer0 irq in etrax */

	/* enable watchdog if we should use one */

#if defined(CONFIG_ETRAX_WATCHDOG) && !defined(CONFIG_SVINTO_SIM)
	printk("Enabling watchdog...\n");
	start_watchdog();

	/* If we use the hardware watchdog, we want to trap it as an NMI
	   and dump registers before it resets us.  For this to happen, we
	   must set the "m" NMI enable flag (which once set, is unset only
	   when an NMI is taken).

	   The same goes for the external NMI, but that doesn't have any
	   driver or infrastructure support yet.  */
	asm ("setf m");

	*R_IRQ_MASK0_SET =
		IO_STATE(R_IRQ_MASK0_SET, watchdog_nmi, set);
	*R_VECT_MASK_SET =
		IO_STATE(R_VECT_MASK_SET, nmi, set);
#endif
}
Пример #28
0
void 
net_init(void)
{
  *R_NETWORK_GA_1 = *R_NETWORK_GA_0 = 0;
  
  SET_ETHER_ADDR(0x01,0x40,0x8c,0x00,0x01,0x00,
                 0x01,0x40,0x8c,0x00,0x01,0x00);

  *R_NETWORK_REC_CONFIG =  
    IO_STATE (R_NETWORK_REC_CONFIG, duplex,     half)    |
    IO_STATE (R_NETWORK_REC_CONFIG, bad_crc,    discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, oversize,   discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, undersize,  discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, all_roots,  discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, broadcast,  discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, individual, discard) |
    IO_STATE (R_NETWORK_REC_CONFIG, ma1,        disable) |
    IO_STATE (R_NETWORK_REC_CONFIG, ma0,        enable);

  *R_NETWORK_MGM_CTRL =  
    IO_FIELD(R_NETWORK_MGM_CTRL, txd_pins, 0)       |
    IO_FIELD(R_NETWORK_MGM_CTRL, txer_pin, 0)       |
    IO_FIELD(R_NETWORK_MGM_CTRL, mdck,     0)       |
    IO_STATE(R_NETWORK_MGM_CTRL, mdoe,     disable) |
    IO_FIELD(R_NETWORK_MGM_CTRL, mdio,     0);
  
  *R_NETWORK_TR_CTRL = 
    (IO_STATE (R_NETWORK_TR_CTRL, clr_error, clr)     |
     IO_STATE (R_NETWORK_TR_CTRL, delay,     none)    |
     IO_STATE (R_NETWORK_TR_CTRL, cancel,    dont)    |
     IO_STATE (R_NETWORK_TR_CTRL, cd,        enable)  |
     IO_STATE (R_NETWORK_TR_CTRL, pad,       enable) |
     IO_STATE (R_NETWORK_TR_CTRL, crc,       enable)  |
     IO_STATE (R_NETWORK_TR_CTRL, retry,     enable));
     
  *R_NETWORK_GEN_CONFIG =  
    IO_STATE (R_NETWORK_GEN_CONFIG, loopback, off)   |
    IO_STATE (R_NETWORK_GEN_CONFIG, frame,    ether) |
    IO_STATE (R_NETWORK_GEN_CONFIG, vg,       off)   |
    IO_STATE (R_NETWORK_GEN_CONFIG, phy,      mii_clk)   |
    IO_STATE (R_NETWORK_GEN_CONFIG, enable,   on);

  *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset);
  *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset);

  rx_descr.sw_len = NETWORK_HEADER_LENGTH;
  rx_descr.ctrl   = TX_CTRL;
  rx_descr.next   = (udword)&rx_descr2;
  rx_descr.buf    = (udword)&rx_header;

  rx_descr2.sw_len = 1500;
  rx_descr2.ctrl   = TX_CTRL_EOP;
  rx_descr2.buf    = target_address;

  /* Endian dependent, but saves a few bytes... */
  *(udword*)&tx_header.src[0] = htonl(0x02408c00); 
  *(uword*)&tx_header.src[4]  = htons(0x0100); 
  tx_header.length = htons(64);
  tx_header.snap1  = htonl(0xaaaa0300);
  tx_header.snap2  = htonl(0x408c8856);
  tx_header.tag    = htonl(0xfffffffe);
  tx_header.seq    = 0;
  tx_header.type   = 0;

  tx_descr.sw_len = NETWORK_HEADER_LENGTH;
  tx_descr.ctrl   = TX_CTRL_EOP;
  tx_descr.buf    = (udword)&tx_header;
  
  set_dest        = FALSE;
  seq = 0;

  *R_DMA_CH1_FIRST = (udword)&rx_descr;
  *R_DMA_CH1_CMD   = IO_STATE(R_DMA_CH1_CMD, cmd, start);
}