Esempio n. 1
0
/*
 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
 * then try to fall into the debugger
 */
static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
			    void *ptr)
{
	struct die_args *args = (struct die_args *)ptr;
	struct pt_regs *regs = args->regs;
	int trap = (regs->cp0_cause & 0x7c) >> 2;

	/* Userpace events, ignore. */
	if (user_mode(regs))
		return NOTIFY_DONE;

	if (atomic_read(&kgdb_active) != -1)
		kgdb_nmicallback(smp_processor_id(), regs);

	if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
		return NOTIFY_DONE;

	if (atomic_read(&kgdb_setting_breakpoint))
		if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
			regs->cp0_epc += 4;

	/* In SMP mode, __flush_cache_all does IPI */
	local_irq_enable();
	__flush_cache_all();

	return NOTIFY_STOP;
}
Esempio n. 2
0
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
	/* This should flush more selectivly ...  */
	__flush_cache_all();

	return 0;
}
Esempio n. 3
0
/*
 * We could optimize the case where the cache argument is not BCACHE but
 * that seems very atypical use ...
 */
asmlinkage int sys_cacheflush(unsigned long addr,
	unsigned long bytes, unsigned int cache)
{
	struct vm_area_struct* vma;

	if (bytes == 0)
		return 0;
	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
		return -EFAULT;

	if (cache & DCACHE)
        {
                vma = find_vma(current->mm, (unsigned long) addr);
                if (vma) {
                        flush_cache_range(vma,(unsigned long)addr,((unsigned long)addr) + bytes);
                }
                else {
                        __flush_cache_all();
                }
        }
        if (cache & ICACHE)
        {
                flush_icache_range(addr, addr + bytes);
        }	

	return 0;
}
Esempio n. 4
0
static void __cpuinit per_hub_init(cnodeid_t cnode)
{
	struct hub_data *hub = hub_data(cnode);
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
	int i;

	cpu_set(smp_processor_id(), hub->h_cpus);

	if (test_and_set_bit(cnode, hub_init_mask))
		return;
	/*
	 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
	 */
	REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
	REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);

	hub_rtc_init(cnode);
	xtalk_probe_node(cnode);

#ifdef CONFIG_REPLICATE_EXHANDLERS
	/*
	 * If this is not a headless node initialization,
	 * copy over the caliased exception handlers.
	 */
	if (get_compact_nodeid() == cnode) {
		extern char except_vec2_generic, except_vec3_generic;
		extern void build_tlb_refill_handler(void);

		memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
		build_tlb_refill_handler();
		memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
		__flush_cache_all();
	}
#endif

	/*
	 * Some interrupts are reserved by hardware or by software convention.
	 * Mark these as reserved right away so they won't be used accidently
	 * later.
	 */
	for (i = 0; i <= BASE_PCI_IRQ; i++) {
		__set_bit(i, hub->irq_alloc_mask);
		LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
	}

	__set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
	LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);

	for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
		__set_bit(i, hub->irq_alloc_mask);
		LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
	}
}
static void __cpuinit per_hub_init(cnodeid_t cnode)
{
	struct hub_data *hub = hub_data(cnode);
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
	int i;

	cpu_set(smp_processor_id(), hub->h_cpus);

	if (test_and_set_bit(cnode, hub_init_mask))
		return;
	/*
                                                  
  */
	REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
	REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);

	hub_rtc_init(cnode);
	xtalk_probe_node(cnode);

#ifdef CONFIG_REPLICATE_EXHANDLERS
	/*
                                                  
                                              
  */
	if (get_compact_nodeid() == cnode) {
		extern char except_vec2_generic, except_vec3_generic;
		extern void build_tlb_refill_handler(void);

		memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
		build_tlb_refill_handler();
		memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
		__flush_cache_all();
	}
#endif

	/*
                                                                       
                                                                        
          
  */
	for (i = 0; i <= BASE_PCI_IRQ; i++) {
		__set_bit(i, hub->irq_alloc_mask);
		LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
	}

	__set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
	LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);

	for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
		__set_bit(i, hub->irq_alloc_mask);
		LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
	}
}
Esempio n. 6
0
asmlinkage int
_sys_sysmips(int cmd, int arg1, int arg2, int arg3)
{
	char	*name;
	int	tmp, len, retval;

	switch(cmd) {
	case SETNAME: {
		char nodename[__NEW_UTS_LEN + 1];

		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;

		name = (char *) arg1;

		len = strncpy_from_user(nodename, name, sizeof(nodename));
		if (len < 0) 
			return -EFAULT;

		down_write(&uts_sem);
		strncpy(system_utsname.nodename, name, len);
		up_write(&uts_sem);
		system_utsname.nodename[len] = '\0'; 
		return 0;
	}

	case MIPS_ATOMIC_SET:
		printk(KERN_CRIT "How did I get here?\n");
		retval = -EINVAL;
		goto out;

	case MIPS_FIXADE:
		tmp = current->thread.mflags & ~3;
		current->thread.mflags = tmp | (arg1 & 3);
		retval = 0;
		goto out;

	case FLUSH_CACHE:
		__flush_cache_all();
		retval = 0;
		goto out;

	case MIPS_RDNVRAM:
		retval = -EIO;
		goto out;

	default:
		retval = -EINVAL;
		goto out;
	}

out:
	return retval;
}
Esempio n. 7
0
void
machine_kexec(struct kimage *image)
{
	unsigned long reboot_code_buffer;
	unsigned long entry;
	unsigned long *ptr;

	reboot_code_buffer =
	  (unsigned long)page_address(image->control_code_page);

	kexec_start_address =
		(unsigned long) phys_to_virt(image->start);

	if (image->type == KEXEC_TYPE_DEFAULT) {
		kexec_indirection_page =
			(unsigned long) phys_to_virt(image->head & PAGE_MASK);
	} else {
		kexec_indirection_page = (unsigned long)&image->head;
	}

	memcpy((void*)reboot_code_buffer, relocate_new_kernel,
	       relocate_new_kernel_size);

	/*
	 * The generic kexec code builds a page list with physical
	 * addresses. they are directly accessible through KSEG0 (or
	 * CKSEG0 or XPHYS if on 64bit system), hence the
	 * phys_to_virt() call.
	 */
	for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
	     ptr = (entry & IND_INDIRECTION) ?
	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
		    *ptr & IND_DESTINATION)
			*ptr = (unsigned long) phys_to_virt(*ptr);
	}

	/*
	 * we do not want to be bothered.
	 */
	local_irq_disable();

	printk("Will call new kernel at %08lx\n", image->start);
	printk("Bye ...\n");
	__flush_cache_all();
#ifdef CONFIG_SMP
	/* All secondary cpus now may jump to kexec_wait cycle */
	relocated_kexec_smp_wait = reboot_code_buffer +
		(void *)(kexec_smp_wait - relocate_new_kernel);
	smp_wmb();
	atomic_set(&kexec_ready_to_reboot, 1);
#endif
	((noretfun_t) reboot_code_buffer)();
}
Esempio n. 8
0
void Chip_Flush_Cache_All(void)
{
    unsigned long   flags = 0;

    local_irq_save(flags);

    // write back and invalid L1 cache
    __flush_cache_all();

    // write back and invalid L2 cache
    Chip_L2_cache_wback_inv(0,0);

    local_irq_restore(flags);
}
Esempio n. 9
0
void
machine_kexec(struct kimage *image)
{
	unsigned long reboot_code_buffer;
	unsigned long entry;
	unsigned long *ptr;

	reboot_code_buffer =
	  (unsigned long)page_address(image->control_code_page);

	kexec_start_address =
		(unsigned long) phys_to_virt(image->start);

	kexec_indirection_page =
		(unsigned long) phys_to_virt(image->head & PAGE_MASK);

	memcpy((void*)reboot_code_buffer, relocate_new_kernel,
	       relocate_new_kernel_size);

	/*
	 * The generic kexec code builds a page list with physical
	 * addresses. they are directly accessible through KSEG0 (or
	 * CKSEG0 or XPHYS if on 64bit system), hence the
	 * pys_to_virt() call.
	 */
	for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
	     ptr = (entry & IND_INDIRECTION) ?
	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
		    *ptr & IND_DESTINATION)
			*ptr = (unsigned long) phys_to_virt(*ptr);
	}

	/*
	 * we do not want to be bothered.
	 */
	local_irq_disable();

	printk("Will call new kernel at %08lx\n", image->start);
	printk("Bye ...\n");
	__flush_cache_all();
	((noretfun_t) reboot_code_buffer)();
}
Esempio n. 10
0
static int octeon_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

	if (!octeon_bootloader_entry_addr)
		return -ENOTSUPP;

	set_cpu_online(cpu, false);
	calculate_cpu_foreign_map();
	octeon_fixup_irqs();

	__flush_cache_all();
	local_flush_tlb_all();

	return 0;
}
/*
 * Set up exception handlers for tracing and breakpoints
 */
void handle_exception(struct pt_regs *regs)
{
	int trap = (regs->cp0_cause & 0x7c) >> 2;

	if (fixup_exception(regs)) {
		return;
	}

	if (atomic_read(&debugger_active))
		kgdb_nmihook(smp_processor_id(), regs);

	if (atomic_read(&kgdb_setting_breakpoint))
		if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
			regs->cp0_epc += 4;

	kgdb_handle_exception(0, compute_signal(trap), 0, regs);

	/* In SMP mode, __flush_cache_all does IPI */
	__flush_cache_all();
}
Esempio n. 12
0
static inline void software_reset(void)
{
	uint16_t pmucnt2;

	switch (current_cpu_type()) {
	case CPU_VR4122:
	case CPU_VR4131:
	case CPU_VR4133:
		pmucnt2 = pmu_read(PMUCNT2REG);
		pmucnt2 |= SOFTRST;
		pmu_write(PMUCNT2REG, pmucnt2);
		break;
	default:
		set_c0_status(ST0_BEV | ST0_ERL);
		change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
		__flush_cache_all();
		write_c0_wired(0);
		__asm__("jr	%0"::"r"(0xbfc00000));
		break;
	}
}
Esempio n. 13
0
static void __init per_hub_init(cnodeid_t cnode)
{
	struct hub_data *hub = hub_data(cnode);
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);

	cpu_set(smp_processor_id(), hub->h_cpus);

	if (test_and_set_bit(cnode, hub_init_mask))
		return;

	/*
	 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
	 */
	REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
	REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);

	hub_rtc_init(cnode);
	xtalk_probe_node(cnode);

#ifdef CONFIG_REPLICATE_EXHANDLERS
	/*
	 * If this is not a headless node initialization,
	 * copy over the caliased exception handlers.
	 */
	if (get_compact_nodeid() == cnode) {
		extern char except_vec2_generic, except_vec3_generic;
		extern void build_tlb_refill_handler(void);

		memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80);
		build_tlb_refill_handler();
		memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80);
		memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100);
		__flush_cache_all();
	}
#endif
}
Esempio n. 14
0
void brcm_flush_cache_all(void)
{
   __flush_cache_all();
}