/* if return value == 0, success 
 * if return value < 0, scm call fail
 * if return value > 0, status error to read qfprom
 * This API can use in range 0x700XXX
 */
int qfuse_read_single_row(u32 fuse_addr, u32 addr_type, u32 * r_buf)
{
	struct qfprom_read_cmd_buffer request;
	u32 *p_status = NULL;
	u32 scm_ret = 0;
	int ret = 0;
	u32 tzbsp_boot_milestone_status = TZBSP_MILESTONE_TRUE;

	p_status = kmalloc(sizeof(u32), GFP_KERNEL);
	if (!p_status) {
		printk("%s : status memory alloc fail\n", __func__);
		ret = -ENOMEM;
		goto error_p_status;
	}

	memset(p_status, 0, sizeof(u32));
	request.qfprom_addr = fuse_addr;
	request.qfprom_addr_type = addr_type;
	request.read_buf = virt_to_phys((void *)r_buf);
	request.qfprom_status = virt_to_phys((void *)p_status);

	tzbsp_boot_milestone_status = TZBSP_MILESTONE_FALSE;
	if (TZBSP_MILESTONE_FALSE !=
	    scm_call(TZBSP_SVC_OEM, TZBSP_ADDITIONAL_CMD,
		     &tzbsp_boot_milestone_status,
		     sizeof(tzbsp_boot_milestone_status), &scm_ret,
		     sizeof(scm_ret))) {
		printk("change failed to %x ,  status = %x\n",
		       TZBSP_MILESTONE_FALSE, tzbsp_boot_milestone_status);
		ret = -EMILE;
		goto error_p_status;
	}else {
		printk("change succeeded to %x ,  status = %x\n", TZBSP_MILESTONE_FALSE,
	       		tzbsp_boot_milestone_status);
	}
	
	msleep(10);
	ret = scm_call(QFPROM_SVC_ID, QFPROM_READ_CMD, &request,
			sizeof(request), &scm_ret, sizeof(scm_ret));

	if (ret < 0) {
		printk("%s: scm call fail\n", __func__);
		goto error_p_status;
	}
	ret = *((u32 *) phys_to_virt(request.qfprom_status));
	printk("%s: qfprom_status = 0x%x\n", __func__, ret);

	tzbsp_boot_milestone_status = TZBSP_MILESTONE_TRUE;
	msleep(10);
	if (TZBSP_MILESTONE_TRUE !=
	    scm_call(TZBSP_SVC_OEM, TZBSP_ADDITIONAL_CMD,
		     &tzbsp_boot_milestone_status,
		     sizeof(tzbsp_boot_milestone_status), &scm_ret,
		     sizeof(scm_ret))) {
		printk("change failed to %x ,  status = %x\n",
		       TZBSP_MILESTONE_TRUE, tzbsp_boot_milestone_status);
		ret = -EMILE;
		goto error_p_status;
	}else {
		printk("change succeeded to %x ,  status = %x\n", TZBSP_MILESTONE_TRUE,
		       tzbsp_boot_milestone_status);
	}

error_p_status:
	if(p_status!=NULL)
		kfree(p_status);
	return ret;
}
Exemplo n.º 2
0
unsigned long sleep_phys_sp(void *sp)
{
	return virt_to_phys(sp);
}
Exemplo n.º 3
0
static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	tango_set_aux_boot_addr(virt_to_phys(secondary_startup));
	tango_start_aux_core(cpu);
	return 0;
}
Exemplo n.º 4
0
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
	u64 reg;
	struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);

	if (!data)
		return NULL;

	/* TCR */
	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);

	switch (1 << data->pg_shift) {
	case SZ_4K:
		reg |= ARM_LPAE_TCR_TG0_4K;
		break;
	case SZ_16K:
		reg |= ARM_LPAE_TCR_TG0_16K;
		break;
	case SZ_64K:
		reg |= ARM_LPAE_TCR_TG0_64K;
		break;
	}

	switch (cfg->oas) {
	case 32:
		reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 36:
		reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 40:
		reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 42:
		reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 44:
		reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	case 48:
		reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
		break;
	default:
		goto out_free_data;
	}

	reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;

	/* Disable speculative walks through TTBR1 */
	reg |= ARM_LPAE_TCR_EPD1;
	cfg->arm_lpae_s1_cfg.tcr = reg;

	/* MAIRs */
	reg = (ARM_LPAE_MAIR_ATTR_NC
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
	      (ARM_LPAE_MAIR_ATTR_WBRWA
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
	      (ARM_LPAE_MAIR_ATTR_DEVICE
	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));

	cfg->arm_lpae_s1_cfg.mair[0] = reg;
	cfg->arm_lpae_s1_cfg.mair[1] = 0;

	/* Looking good; allocate a pgd */
	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
	if (!data->pgd)
		goto out_free_data;

	/* Ensure the empty pgd is visible before any actual TTBR write */
	wmb();

	/* TTBRs */
	cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
	cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
	return &data->iop;

out_free_data:
	kfree(data);
	return NULL;
}
Exemplo n.º 5
0
Arquivo: mmap.c Projeto: Endika/linux
int __virt_addr_valid(const volatile void *kaddr)
{
	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
Exemplo n.º 6
0
Arquivo: smp.c Projeto: AllenDou/linux
static int __cpuinit smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem struct epapr_spin_table *spin_table;
	struct device_node *np;
	int hw_cpu = get_hard_smp_processor_id(nr);
	int ioremappable;
	int ret = 0;

	WARN_ON(nr < 0 || nr >= NR_CPUS);
	WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return -ENOENT;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		spin_table = ioremap(*cpu_rel_addr,
				sizeof(struct epapr_spin_table));
	else
		spin_table = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);
#ifdef CONFIG_PPC32
#ifdef CONFIG_HOTPLUG_CPU
	/* Corresponding to generic_set_cpu_dead() */
	generic_set_cpu_up(nr);

	if (system_state == SYSTEM_RUNNING) {
		out_be32(&spin_table->addr_l, 0);

		/*
		 * We don't set the BPTR register here since it already points
		 * to the boot page properly.
		 */
		mpic_reset_core(hw_cpu);

		/* wait until core is ready... */
		if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
						10000, 100)) {
			pr_err("%s: timeout waiting for core %d to reset\n",
							__func__, hw_cpu);
			ret = -ENOENT;
			goto out;
		}

		/*  clear the acknowledge status */
		__secondary_hold_acknowledge = -1;
	}
#endif
	out_be32(&spin_table->pir, hw_cpu);
	out_be32(&spin_table->addr_l, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));

	/* Wait a bit for the CPU to ack. */
	if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
					10000, 100)) {
		pr_err("%s: timeout waiting for core %d to ack\n",
						__func__, hw_cpu);
		ret = -ENOENT;
		goto out;
	}
out:
#else
	smp_generic_kick_cpu(nr);

	out_be32(&spin_table->pir, hw_cpu);
	out_be64((u64 *)(&spin_table->addr_h),
	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));

	if (!ioremappable)
		flush_dcache_range((ulong)spin_table,
			(ulong)spin_table + sizeof(struct epapr_spin_table));
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(spin_table);

	return ret;
}
Exemplo n.º 7
0
static int s3c_pm_enter(suspend_state_t state)
{
	static unsigned long regs_save[16];

	/* ensure the debug is initialised (if enabled) */

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	/* check if we have anything to wake-up with... bad things seem
	 * to happen if you suspend with no wakeup (system will often
	 * require a full power-cycle)
	*/

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	/* store the physical address of the register recovery block */

	s3c_sleep_save_phys = virt_to_phys(regs_save);

	S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);

	/* save all necessary core registers not covered by the drivers */

	s3c_pm_save_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	/* set the irq configuration for wake */

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
	    s3c_irqwake_intmask, s3c_irqwake_eintmask);

	s3c_pm_arch_prepare_irqs();

	/* call cpu specific preparation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c_pm_check_store();

	/* send the cpu to sleep... */

	s3c_pm_arch_stop_clocks();

	/* s3c_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state and restores it
	 * during the resume.  */

	S3C_PMINFO("%s: s3c_cpu_save\n", __func__);
	if (s3c_cpu_save(regs_save) == 0) {
		S3C_PMDBG("S3C PM Resume by early wakeup.\n");
		goto early_wakeup;
	}

	/* restore the cpu state using the kernel's cpu init code. */

	S3C_PMINFO("%s: cpu_init\n", __func__);
	cpu_init();

	/* restore the system state */
	S3C_PMINFO("%s: s3c_pm_restore_core\n", __func__);
	s3c_pm_restore_core();
	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();

	S3C_PMINFO("%s: s3c_pm_debug_init\n", __func__);
	s3c_pm_debug_init();

	/* check what irq (if any) restored the system */

	s3c_pm_arch_show_resume_irqs();

	S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);

	/* LEDs should now be 1110 */
	/* s3c_pm_debug_smdkled(1 << 1, 0); */

	s3c_pm_check_restore();

	/* ok, let's return from sleep */

	S3C_PMDBG("S3C PM Resume (post-restore)\n");

early_wakeup:
	/* exit early wakeup */

	return 0;
}
Exemplo n.º 8
0
	struct p {
		struct hv_get_perf_counter_info_params params;
		struct cv_system_performance_capabilities caps;
	} __packed __aligned(sizeof(uint64_t));

	struct p arg = {
		.params = {
			.counter_request = cpu_to_be32(
					CIR_SYSTEM_PERFORMANCE_CAPABILITIES),
			.starting_index = cpu_to_be32(-1),
			.counter_info_version_in = 0,
		}
	};

	r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
			       virt_to_phys(&arg), sizeof(arg));

	if (r)
		return r;

	pr_devel("capability_mask: 0x%x\n", arg.caps.capability_mask);

	caps->version = arg.params.counter_info_version_out;
	caps->collect_privileged = !!arg.caps.perf_collect_privileged;
	caps->ga = !!(arg.caps.capability_mask & CV_CM_GA);
	caps->expanded = !!(arg.caps.capability_mask & CV_CM_EXPANDED);
	caps->lab = !!(arg.caps.capability_mask & CV_CM_LAB);

	return r;
}
Exemplo n.º 9
0
int ftmac110_initialize(bd_t *bis)
{
	int i, card_nr = 0;
	struct eth_device *dev;
	struct ftmac110_chip *chip;

	dev = malloc(sizeof(*dev) + sizeof(*chip));
	if (dev == NULL) {
		panic("ftmac110: out of memory 1\n");
		return -1;
	}
	chip = (struct ftmac110_chip *)(dev + 1);
	memset(dev, 0, sizeof(*dev) + sizeof(*chip));

	sprintf(dev->name, "FTMAC110#%d", card_nr);

	dev->iobase = CONFIG_FTMAC110_BASE;
	chip->regs = (void __iomem *)dev->iobase;
	dev->priv = chip;
	dev->init = ftmac110_probe;
	dev->halt = ftmac110_halt;
	dev->send = ftmac110_send;
	dev->recv = ftmac110_recv;

	if (!eth_getenv_enetaddr_by_index("eth", card_nr, dev->enetaddr))
		eth_random_addr(dev->enetaddr);

	/* allocate tx descriptors (it must be 16 bytes aligned) */
	chip->txd = dma_alloc_coherent(
		sizeof(struct ftmac110_desc) * CFG_TXDES_NUM, &chip->txd_dma);
	if (!chip->txd)
		panic("ftmac110: out of memory 3\n");
	memset(chip->txd, 0,
	       sizeof(struct ftmac110_desc) * CFG_TXDES_NUM);
	for (i = 0; i < CFG_TXDES_NUM; ++i) {
		void *va = memalign(ARCH_DMA_MINALIGN, CFG_XBUF_SIZE);

		if (!va)
			panic("ftmac110: out of memory 4\n");
		chip->txd[i].vbuf = va;
		chip->txd[i].pbuf = cpu_to_le32(virt_to_phys(va));
		chip->txd[i].ctrl = 0;	/* owned by SW */
	}
	chip->txd[i - 1].ctrl |= cpu_to_le64(FTMAC110_TXD_END);
	chip->txd_idx = 0;

	/* allocate rx descriptors (it must be 16 bytes aligned) */
	chip->rxd = dma_alloc_coherent(
		sizeof(struct ftmac110_desc) * CFG_RXDES_NUM, &chip->rxd_dma);
	if (!chip->rxd)
		panic("ftmac110: out of memory 4\n");
	memset((void *)chip->rxd, 0,
	       sizeof(struct ftmac110_desc) * CFG_RXDES_NUM);
	for (i = 0; i < CFG_RXDES_NUM; ++i) {
		void *va = memalign(ARCH_DMA_MINALIGN, CFG_XBUF_SIZE + 2);

		if (!va)
			panic("ftmac110: out of memory 5\n");
		/* it needs to be exactly 2 bytes aligned */
		va = ((uint8_t *)va + 2);
		chip->rxd[i].vbuf = va;
		chip->rxd[i].pbuf = cpu_to_le32(virt_to_phys(va));
		chip->rxd[i].ctrl = cpu_to_le64(FTMAC110_RXD_OWNER
			| FTMAC110_RXD_BUFSZ(CFG_XBUF_SIZE));
	}
	chip->rxd[i - 1].ctrl |= cpu_to_le64(FTMAC110_RXD_END);
	chip->rxd_idx = 0;

	eth_register(dev);

#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
	miiphy_register(dev->name, ftmac110_mdio_read, ftmac110_mdio_write);
#endif

	card_nr++;

	return card_nr;
}
Exemplo n.º 10
0
static ssize_t _rmparser_write(const char __user *buf, size_t count)
{
    size_t r = count;
    const char __user *p = buf;
    u32 len;
    int ret;
    static int halt_droped_len=0;
    u32 vwp,awp;
    if (r > 0) {
        len = min(r, (size_t)FETCHBUF_SIZE);

        if (copy_from_user(fetchbuf_remap, p, len)) {
            return -EFAULT;
        }

        fetch_done = 0;

        wmb();
        vwp=buf_wp(BUF_TYPE_VIDEO);
        awp=buf_wp(BUF_TYPE_AUDIO);
        WRITE_MPEG_REG(PARSER_FETCH_ADDR, virt_to_phys((u8 *)fetchbuf));
        
        WRITE_MPEG_REG(PARSER_FETCH_CMD,
                       (7 << FETCH_ENDIAN) | len);

        ret = wait_event_interruptible_timeout(rm_wq, fetch_done != 0, HZ/10);
        if (ret == 0) {
            WRITE_MPEG_REG(PARSER_FETCH_CMD, 0);
            parse_halt ++;
			printk("write timeout, retry,halt_count=%d parse_control=%x \n",
			    parse_halt,READ_MPEG_REG(PARSER_CONTROL));

            vreal_set_fatal_flag(1);
			
			if(parse_halt > 10) {			    
			    WRITE_MPEG_REG(PARSER_CONTROL, (ES_SEARCH | ES_PARSER_START));
			    printk("reset parse_control=%x\n",READ_MPEG_REG(PARSER_CONTROL));
			}			
            return -EAGAIN;
        } else if (ret < 0) {
            return -ERESTARTSYS;
        }

        
        if(vwp==buf_wp(BUF_TYPE_VIDEO) && awp==buf_wp(BUF_TYPE_AUDIO)){
			if((parse_halt+1)%10==1)
            printk("Video&Audio  WP not changed after write,video %x->%x,Audio:%x-->%x,parse_halt=%d\n",
            vwp,buf_wp(BUF_TYPE_VIDEO),awp,buf_wp(BUF_TYPE_AUDIO),parse_halt);
            parse_halt ++;/*wp not changed ,we think have bugs on parser now.*/
            if(parse_halt > 10 && (stbuf_level(get_buf_by_type(BUF_TYPE_VIDEO))< 1000 || stbuf_level(get_buf_by_type(BUF_TYPE_AUDIO))< 100)) 
            {/*reset while at  least one is underflow.*/
                WRITE_MPEG_REG(PARSER_CONTROL, (ES_SEARCH | ES_PARSER_START));
                printk("reset parse_control=%x\n",READ_MPEG_REG(PARSER_CONTROL));
            }
            if(parse_halt <= 10 || halt_droped_len <100*1024){/*drops first 10 pkt ,some times maybe no av data*/
				 printk("drop this pkt=%d,len=%d\n",parse_halt,len);
                p += len;
                r -= len;
                halt_droped_len+=len;
            }else{
                return -EAGAIN;
            }
        }else{
            halt_droped_len=0;	
            parse_halt = 0;
            p += len;
            r -= len;
        }
    }   
    return count - r;
}
Exemplo n.º 11
0
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	int err = 0;
	int signal;
	unsigned long address = 0;
#ifdef CONFIG_MMU
	pmd_t *pmdp;
	pte_t *ptep;
#endif

	frame = get_sigframe(ka, regs, sizeof(*frame));

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		goto give_sigsegv;

	signal = current_thread_info()->exec_domain
		&& current_thread_info()->exec_domain->signal_invmap
		&& sig < 32
		? current_thread_info()->exec_domain->signal_invmap[sig]
		: sig;

	if (info)
		err |= copy_siginfo_to_user(&frame->info, info);

	/* Create the ucontext. */
	err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(NULL, &frame->uc.uc_link);
	err |= __put_user((void __user *)current->sas_ss_sp,
			&frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->r1),
			&frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= setup_sigcontext(&frame->uc.uc_mcontext,
			regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	/* Set up to return from userspace. If provided, use a stub
	 already in userspace. */
	/* minus 8 is offset to cater for "rtsd r15,8" */
	/* addi r12, r0, __NR_sigreturn */
	err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
			frame->tramp + 0);
	/* brki r14, 0x8 */
	err |= __put_user(0xb9cc0008, frame->tramp + 1);

	/* Return from sighandler will jump to the tramp.
	 Negative 8 offset because return is rtsd r15, 8 */
	regs->r15 = ((unsigned long)frame->tramp)-8;

	address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
	pmdp = pmd_offset(pud_offset(
			pgd_offset(current->mm, address),
					address), address);

	preempt_disable();
	ptep = pte_offset_map(pmdp, address);
	if (pte_present(*ptep)) {
		address = (unsigned long) page_address(pte_page(*ptep));
		/* MS: I need add offset in page */
		address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
		/* MS address is virtual */
		address = virt_to_phys(address);
		invalidate_icache_range(address, address + 8);
		flush_dcache_range(address, address + 8);
	}
	pte_unmap(ptep);
	preempt_enable();
#else
	flush_icache_range(address, address + 8);
	flush_dcache_range(address, address + 8);
#endif
	if (err)
		goto give_sigsegv;

	/* Set up registers for signal handler */
	regs->r1 = (unsigned long) frame;

	/* Signal handler args: */
	regs->r5 = signal; /* arg 0: signum */
	regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
	regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
	/* Offset to handle microblaze rtid r14, 0 */
	regs->pc = (unsigned long)ka->sa.sa_handler;

	set_fs(USER_DS);

#ifdef DEBUG_SIG
	printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
		current->comm, current->pid, frame, regs->pc);
#endif

	return 0;

give_sigsegv:
	force_sigsegv(sig, current);
	return -EFAULT;
}
Exemplo n.º 12
0
int
elf_load(struct sys_info *info, ihandle_t dev, const char *cmdline, void **boot_notes)
{
    Elf_ehdr ehdr;
    Elf_phdr *phdr = NULL;
    unsigned long checksum_offset, file_size;
    unsigned short checksum = 0;
    int retval = -1;
    unsigned int offset;

    image_name = image_version = NULL;

    /* Mark the saved-program-state as invalid */
    feval("0 state-valid !");

    fd = open_ih(dev);
    if (fd == -1) {
        goto out;
    }

    offset = find_elf(&ehdr);
    if (!offset) {
        retval = LOADER_NOT_SUPPORT;
        goto out;
    }

#if DEBUG
    printk("ELF header:\n");
    printk(" ehdr.e_type    = %d\n", (int)ehdr.e_type);
    printk(" ehdr.e_machine = %d\n", (int)ehdr.e_machine);
    printk(" ehdr.e_version = %d\n", (int)ehdr.e_version);
    printk(" ehdr.e_entry   = 0x%08x\n", (int)ehdr.e_entry);
    printk(" ehdr.e_phoff   = 0x%08x\n", (int)ehdr.e_phoff);
    printk(" ehdr.e_shoff   = 0x%08x\n", (int)ehdr.e_shoff);
    printk(" ehdr.e_flags   = %d\n", (int)ehdr.e_flags);
    printk(" ehdr.e_ehsize  = 0x%08x\n", (int)ehdr.e_ehsize);
    printk(" ehdr.e_phentsize = 0x%08x\n", (int)ehdr.e_phentsize);
    printk(" ehdr.e_phnum   = %d\n", (int)ehdr.e_phnum);
#endif

    if (ehdr.e_phnum > MAX_HEADERS) {
        printk ("elfload: too many program headers (MAX_HEADERS)\n");
        retval = 0;
        goto out;
    }

    phdr = elf_readhdrs(offset, &ehdr);
    if (!phdr)
        goto out;

    if (!check_mem_ranges(info, phdr, ehdr.e_phnum))
        goto out;

    checksum_offset = process_image_notes(phdr, ehdr.e_phnum, &checksum, offset);

    printf("Loading %s", image_name ? image_name : "image");
    if (image_version)
        printf(" version %s", image_version);
    printf("...\n");

    if (!load_segments(phdr, ehdr.e_phnum, checksum_offset, offset, &file_size))
        goto out;

    if (checksum_offset) {
        if (!verify_image(&ehdr, phdr, ehdr.e_phnum, checksum))
            goto out;
    }

    /* If we are attempting an ELF boot image, we pass a non-NULL pointer
       into boot_notes and mark the image as elf-boot rather than standard
       ELF */
    if (boot_notes) {
        *boot_notes = (void *)virt_to_phys(build_boot_notes(info, cmdline));
        feval("elf-boot saved-program-state >sps.file-type !");
    } else {
        feval("elf saved-program-state >sps.file-type !");
    }

    //debug("current time: %lu\n", currticks());

    debug("entry point is " FMT_elf "\n", addr_fixup(ehdr.e_entry));

    // Initialise saved-program-state
    PUSH(addr_fixup(ehdr.e_entry));
    feval("saved-program-state >sps.entry !");
    PUSH(file_size);
    feval("saved-program-state >sps.file-size !");

    feval("-1 state-valid !");

out:
    close_io(fd);
    if (phdr)
        free(phdr);
    if (image_name)
        free(image_name);
    if (image_version)
        free(image_version);
    return retval;
}
Exemplo n.º 13
0
void free_initrd_mem(unsigned long start, unsigned long end)
{
	free_init_pages("initrd memory",
		virt_to_phys((void *) start),
		virt_to_phys((void *) end));
}
Exemplo n.º 14
0
static int falcon_probe_nic(struct efx_nic *efx)
{
	struct falcon_nic_data *nic_data;
	struct falcon_board *board;
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
	if (!nic_data)
		return -ENOMEM;
	efx->nic_data = nic_data;

	rc = -ENODEV;

	if (efx_nic_fpga_ver(efx) != 0) {
		netif_err(efx, probe, efx->net_dev,
			  "Falcon FPGA not supported\n");
		goto fail1;
	}

	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
		efx_oword_t nic_stat;
		struct pci_dev *dev;
		u8 pci_rev = efx->pci_dev->revision;

		if ((pci_rev == 0xff) || (pci_rev == 0)) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A0 not supported\n");
			goto fail1;
		}
		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 1G not supported\n");
			goto fail1;
		}
		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
			netif_err(efx, probe, efx->net_dev,
				  "Falcon rev A1 PCI-X not supported\n");
			goto fail1;
		}

		dev = pci_dev_get(efx->pci_dev);
		while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
					     dev))) {
			if (dev->bus == efx->pci_dev->bus &&
			    dev->devfn == efx->pci_dev->devfn + 1) {
				nic_data->pci_dev2 = dev;
				break;
			}
		}
		if (!nic_data->pci_dev2) {
			netif_err(efx, probe, efx->net_dev,
				  "failed to find secondary function\n");
			rc = -ENODEV;
			goto fail2;
		}
	}

	/* Now we can reset the NIC */
	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
		goto fail3;
	}

	/* Allocate memory for INT_KER */
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (u64)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (u64)virt_to_phys(efx->irq_status.addr));

	falcon_probe_spi_devices(efx);

	/* Read in the non-volatile configuration */
	rc = falcon_probe_nvconfig(efx);
	if (rc) {
		if (rc == -EINVAL)
			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
		goto fail5;
	}

	/* Initialise I2C adapter */
	board = falcon_board(efx);
	board->i2c_adap.owner = THIS_MODULE;
	board->i2c_data = falcon_i2c_bit_operations;
	board->i2c_data.data = efx;
	board->i2c_adap.algo_data = &board->i2c_data;
	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
		sizeof(board->i2c_adap.name));
	rc = i2c_bit_add_bus(&board->i2c_adap);
	if (rc)
		goto fail5;

	rc = falcon_board(efx)->type->init(efx);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "failed to initialise board\n");
		goto fail6;
	}

	nic_data->stats_disable_count = 1;
	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
		    (unsigned long)efx);

	return 0;

 fail6:
	BUG_ON(i2c_del_adapter(&board->i2c_adap));
	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
 fail5:
	efx_nic_free_buffer(efx, &efx->irq_status);
 fail4:
 fail3:
	if (nic_data->pci_dev2) {
		pci_dev_put(nic_data->pci_dev2);
		nic_data->pci_dev2 = NULL;
	}
 fail2:
 fail1:
	kfree(efx->nic_data);
	return rc;
}
Exemplo n.º 15
0
static int siena_probe_nic(struct efx_nic *efx)
{
	struct siena_nic_data *nic_data;
	efx_oword_t reg;
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
	if (!nic_data)
		return -ENOMEM;
	nic_data->efx = efx;
	efx->nic_data = nic_data;

	if (efx_farch_fpga_ver(efx) != 0) {
		netif_err(efx, probe, efx->net_dev,
			  "Siena FPGA not supported\n");
		rc = -ENODEV;
		goto fail1;
	}

	efx->max_channels = EFX_MAX_CHANNELS;
	efx->max_tx_channels = EFX_MAX_CHANNELS;

	efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
	efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;

	rc = efx_mcdi_init(efx);
	if (rc)
		goto fail1;

	/* Now we can reset the NIC */
	rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
	if (rc) {
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
		goto fail3;
	}

	siena_init_wol(efx);

	/* Allocate memory for INT_KER */
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
				  GFP_KERNEL);
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (unsigned long long)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (unsigned long long)virt_to_phys(efx->irq_status.addr));

	/* Read in the non-volatile configuration */
	rc = siena_probe_nvconfig(efx);
	if (rc == -EINVAL) {
		netif_err(efx, probe, efx->net_dev,
			  "NVRAM is invalid therefore using defaults\n");
		efx->phy_type = PHY_TYPE_NONE;
		efx->mdio.prtad = MDIO_PRTAD_NONE;
	} else if (rc) {
		goto fail5;
	}

	rc = efx_mcdi_mon_probe(efx);
	if (rc)
		goto fail5;

#ifdef CONFIG_SFC_SRIOV
	efx_siena_sriov_probe(efx);
#endif
	efx_ptp_defer_probe_with_channel(efx);

	return 0;

fail5:
	efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
	efx_mcdi_fini(efx);
fail1:
	kfree(efx->nic_data);
	return rc;
}
Exemplo n.º 16
0
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (mem_init_done && (p < virt_to_phys(high_memory))) {
		printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
		       __builtin_return_address(0));
		return NULL;
	}

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * BAT mapping.  If the whole area is mapped then we're done,
	 * otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */
	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
		goto out;

	if ((v = p_mapped_by_tlbcam(p)))
		goto out;

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area(size, VM_IOREMAP);
		if (area == 0)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	if ((flags & _PAGE_PRESENT) == 0)
		flags |= _PAGE_KERNEL;
	if (flags & _PAGE_NO_CACHE)
		flags |= _PAGE_GUARDED;

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (mem_init_done)
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Exemplo n.º 17
0
int ixpdev_init(int __nds_count, struct net_device **__nds,
		void (*__set_port_admin_status)(int port, int up))
{
	int i;
	int err;

	BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);

	printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);

	nds_count = __nds_count;
	nds = __nds;
	set_port_admin_status = __set_port_admin_status;

	for (i = 0; i < RX_BUF_COUNT; i++) {
		void *buf;

		buf = (void *)get_zeroed_page(GFP_KERNEL);
		if (buf == NULL) {
			err = -ENOMEM;
			while (--i >= 0)
				free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
			goto err_out;
		}
		rx_desc[i].buf_addr = virt_to_phys(buf);
		rx_desc[i].buf_length = PAGE_SIZE;
	}

	/* @@@ Maybe we shouldn't be preallocating TX buffers.  */
	for (i = 0; i < TX_BUF_COUNT; i++) {
		void *buf;

		buf = (void *)get_zeroed_page(GFP_KERNEL);
		if (buf == NULL) {
			err = -ENOMEM;
			while (--i >= 0)
				free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
			goto err_free_rx;
		}
		tx_desc[i].buf_addr = virt_to_phys(buf);
	}

	/* 256 entries, ring status set means 'empty', base address 0x0000.  */
	ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
	ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
	ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);

	/* 256 entries, ring status set means 'full', base address 0x0400.  */
	ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
	ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
	ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);

	for (i = 0; i < RX_BUF_COUNT; i++) {
		ixp2000_reg_write(RING_RX_PENDING,
			RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
	}

	ixp2000_uengine_load(0, &ixp2400_rx);
	ixp2000_uengine_start_contexts(0, 0xff);

	/* 256 entries, ring status set means 'empty', base address 0x0800.  */
	ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
	ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
	ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);

	/* 256 entries, ring status set means 'full', base address 0x0c00.  */
	ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
	ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
	ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);

	ixp2000_uengine_load(1, &ixp2400_tx);
	ixp2000_uengine_start_contexts(1, 0xff);

	for (i = 0; i < nds_count; i++) {
		err = register_netdev(nds[i]);
		if (err) {
			while (--i >= 0)
				unregister_netdev(nds[i]);
			goto err_free_tx;
		}
	}

	for (i = 0; i < nds_count; i++) {
		printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), "
			"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", nds[i]->name, i,
			nds[i]->dev_addr[0], nds[i]->dev_addr[1],
			nds[i]->dev_addr[2], nds[i]->dev_addr[3],
			nds[i]->dev_addr[4], nds[i]->dev_addr[5]);
	}

	return 0;

err_free_tx:
	for (i = 0; i < TX_BUF_COUNT; i++)
		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));

err_free_rx:
	for (i = 0; i < RX_BUF_COUNT; i++)
		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));

err_out:
	return err;
}
Exemplo n.º 18
0
static int pxa_pm_enter(suspend_state_t state)
{
	unsigned long sleep_save[SLEEP_SAVE_SIZE];
	unsigned long checksum = 0;
	struct timespec delta, rtc;
	int i;

	if (state != PM_SUSPEND_MEM)
		return -EINVAL;

#ifdef CONFIG_IWMMXT
	/* force any iWMMXt context to ram **/
	iwmmxt_task_disable(NULL);
#endif

	/* preserve current time */
	rtc.tv_sec = RCNR;
	rtc.tv_nsec = 0;
	save_time_delta(&delta, &rtc);

	SAVE(GPLR0); SAVE(GPLR1); SAVE(GPLR2);
	SAVE(GPDR0); SAVE(GPDR1); SAVE(GPDR2);
	SAVE(GRER0); SAVE(GRER1); SAVE(GRER2);
	SAVE(GFER0); SAVE(GFER1); SAVE(GFER2);
	SAVE(PGSR0); SAVE(PGSR1); SAVE(PGSR2);

	SAVE(GAFR0_L); SAVE(GAFR0_U);
	SAVE(GAFR1_L); SAVE(GAFR1_U);
	SAVE(GAFR2_L); SAVE(GAFR2_U);

#ifdef CONFIG_PXA27x
	SAVE(GPLR3); SAVE(GPDR3); SAVE(GRER3); SAVE(GFER3); SAVE(PGSR3);
	SAVE(GAFR3_L); SAVE(GAFR3_U);
#endif

	SAVE(ICMR);
	ICMR = 0;

	SAVE(CKEN);
	CKEN = 0;

	SAVE(PSTR);

	/* Note: wake up source are set up in each machine specific files */

	/* clear GPIO transition detect  bits */
	GEDR0 = GEDR0; GEDR1 = GEDR1; GEDR2 = GEDR2;
#ifdef CONFIG_PXA27x
	GEDR3 = GEDR3;
#endif

	/* Clear sleep reset status */
	RCSR = RCSR_SMR;

	/* set resume return address */
	PSPR = virt_to_phys(pxa_cpu_resume);

	/* before sleeping, calculate and save a checksum */
	for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
		checksum += sleep_save[i];
	sleep_save[SLEEP_SAVE_CKSUM] = checksum;

	/* *** go zzz *** */
	pxa_cpu_suspend();

	/* after sleeping, validate the checksum */
	checksum = 0;
	for (i = 0; i < SLEEP_SAVE_SIZE - 1; i++)
		checksum += sleep_save[i];

	/* if invalid, display message and wait for a hardware reset */
	if (checksum != sleep_save[SLEEP_SAVE_CKSUM]) {
#ifdef CONFIG_ARCH_LUBBOCK
		LUB_HEXLED = 0xbadbadc5;
#endif
		while (1)
			pxa_cpu_suspend();
	}

	/* ensure not to come back here if it wasn't intended */
	PSPR = 0;

	/* restore registers */
	RESTORE(GAFR0_L); RESTORE(GAFR0_U);
	RESTORE(GAFR1_L); RESTORE(GAFR1_U);
	RESTORE(GAFR2_L); RESTORE(GAFR2_U);
	RESTORE_GPLEVEL(0); RESTORE_GPLEVEL(1); RESTORE_GPLEVEL(2);
	RESTORE(GPDR0); RESTORE(GPDR1); RESTORE(GPDR2);
	RESTORE(GRER0); RESTORE(GRER1); RESTORE(GRER2);
	RESTORE(GFER0); RESTORE(GFER1); RESTORE(GFER2);
	RESTORE(PGSR0); RESTORE(PGSR1); RESTORE(PGSR2);

#ifdef CONFIG_PXA27x
	RESTORE(GAFR3_L); RESTORE(GAFR3_U); RESTORE_GPLEVEL(3);
	RESTORE(GPDR3); RESTORE(GRER3); RESTORE(GFER3); RESTORE(PGSR3);
#endif

	PSSR = PSSR_RDH | PSSR_PH;

	RESTORE(CKEN);

	ICLR = 0;
	ICCR = 1;
	RESTORE(ICMR);

	RESTORE(PSTR);

	/* restore current time */
	rtc.tv_sec = RCNR;
	restore_time_delta(&delta, &rtc);

#ifdef DEBUG
	printk(KERN_DEBUG "*** made it back from resume\n");
#endif

	return 0;
}
void mali_meson_poweron(void)
{
    unsigned long flags;
    u32 p, p_aligned;
    dma_addr_t p_phy;
    int i;
    unsigned int_mask;
    
    if ((last_power_mode != -1) && (last_power_mode != MALI_POWER_MODE_DEEP_SLEEP)) {
        return;
    }

    if (READ_MALI_REG(MALI_PP_PP_VERSION) != MALI_PP_PP_VERSION_MAGIC) {
        printk("mali_meson_poweron: Mali APB bus access failed.");
        return;
    }

    if (READ_MALI_REG(MALI_MMU_DTE_ADDR) != 0) {
        printk("mali_meson_poweron: Mali is not really powered off.");
        return;
    }

    p = (u32)kcalloc(4096 * 4, 1, GFP_KERNEL);
    if (!p) {
        printk("mali_meson_poweron: NOMEM in meson_poweron\n");
        return;
    }

    p_aligned = __ALIGN_MASK(p, 4096);

    /* DTE */
    *(u32 *)(p_aligned) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_PTE) | MMU_FLAG_DTE_PRESENT;
    /* PTE */
    for (i=0; i<1024; i++) {
        *(u32 *)(p_aligned + OFFSET_MMU_PTE + i*4) = 
            (virt_to_phys((void *)p_aligned) + OFFSET_MMU_VIRTUAL_ZERO + 4096 * i) |
            MMU_FLAG_PTE_PAGE_PRESENT |
            MMU_FLAG_PTE_RD_PERMISSION;
    }

    /* command & data */
    memcpy((void *)(p_aligned + OFFSET_MMU_VIRTUAL_ZERO), poweron_data, 4096);

    p_phy = dma_map_single(NULL, (void *)p_aligned, 4096 * 3, DMA_TO_DEVICE);
    
    /* Set up Mali GP MMU */
    WRITE_MALI_REG(MALI_MMU_DTE_ADDR, p_phy);
    WRITE_MALI_REG(MALI_MMU_CMD, 0);

    if ((READ_MALI_REG(MALI_MMU_STATUS) & 1) != 1) {
        printk("mali_meson_poweron: MMU enabling failed.\n");
    }

    /* Set up Mali command registers */
    WRITE_MALI_REG(MALI_APB_GP_VSCL_START, 0);
    WRITE_MALI_REG(MALI_APB_GP_VSCL_END, 0x38);
    WRITE_MALI_REG(MALI_APB_GP_INT_MASK, 0x3ff);

    spin_lock_irqsave(&lock, flags);

    int_mask = READ_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK);

    /* Set up ARM Mali interrupt */
    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
    SET_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);

    /* Start GP */
    WRITE_MALI_REG(MALI_APB_GP_CMD, 1);

    for (i = 0; i<100; i++)
        udelay(500);

    /* check Mali GP interrupt */
    if (READ_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT) & (1<<16)) {
        printk("mali_meson_poweron: Interrupt received.\n");
    } else {
        printk("mali_meson_poweron: No interrupt received.\n");
    }

    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
    CLEAR_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);

    /* force reset GP */
    WRITE_MALI_REG(MALI_APB_GP_CMD, 1 << 5);

    /* stop MMU paging and reset */
    WRITE_MALI_REG(MALI_MMU_CMD, 1);
    WRITE_MALI_REG(MALI_MMU_CMD, 1 << 6);

    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK, int_mask);

    spin_unlock_irqrestore(&lock, flags);

    dma_unmap_single(NULL, p_phy, 4096 * 3, DMA_TO_DEVICE);

    kfree((void *)p);
}
Exemplo n.º 20
0
static int s5pc11x_pm_enter(suspend_state_t state)
{
	unsigned long regs_save[16];
	unsigned int tmp;


#ifdef CONFIG_HAS_WAKELOCK
	//wake_unlock(&pm_wake_lock);
#endif

	/* ensure the debug is initialised (if enabled) */
	DBG("s5pc11x_pm_enter(%d)\n", state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR PFX "error: no cpu sleep functions set\n");
		return -EINVAL;
	}

#ifdef CONFIG_CPU_FREQ
	s5pc110_pm_target(BOOT_ARM_CLK);
#endif

	/* store the physical address of the register recovery block */
	s5pc110_sleep_save_phys = virt_to_phys(regs_save);

	DBG("s5pc11x_sleep_save_phys=0x%08lx\n", s5pc110_sleep_save_phys);

	s5pc11x_pm_do_save(gpio_save, ARRAY_SIZE(gpio_save));
#ifdef S5PC11X_ALIVEGPIO_STORE
	s5pc11x_pm_do_save(gpio_save_alive, ARRAY_SIZE(gpio_save_alive));
#endif
	s5pc11x_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
	s5pc11x_pm_do_save(core_save, ARRAY_SIZE(core_save));
	s5pc11x_pm_do_save(sromc_save, ARRAY_SIZE(sromc_save));
	s5pc11x_pm_do_save(uart_save, ARRAY_SIZE(uart_save));


	/* ensure INF_REG0  has the resume address */
	__raw_writel(virt_to_phys(s5pc110_cpu_resume), S5P_INFORM0);

	/* call cpu specific preperation */
	pm_cpu_prep();

	/* flush cache back to ram */
	flush_cache_all();

#if 0		// To preserve 24MHz clock.
	/* USB & OSC Clock pad Enable */
	tmp = __raw_readl(S5P_SLEEP_CFG);
	//tmp |= (S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
	tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
	__raw_writel(tmp , S5P_SLEEP_CFG);
#endif
	__raw_writel(0xffffffff , S5P_EINT_WAKEUP_MASK);

	/* Power mode Config setting */
	tmp = __raw_readl(S5P_PWR_CFG);
	tmp &= S5P_CFG_WFI_CLEAN;
	tmp |= S5P_CFG_WFI_SLEEP;
	__raw_writel(tmp,S5P_PWR_CFG);

	if (!hw_version_check()) {
	/* Set wakeup mask regsiter */
	__raw_writel(0xFFED, S5P_WAKEUP_MASK); 
	} else {

		if((is_calling_or_playing & IS_VOICE_CALL_2G) || (is_calling_or_playing & IS_VOICE_CALL_3G) || (is_calling_or_playing & IS_DATA_CALL)){
			__raw_writel(0xFFDD, S5P_WAKEUP_MASK); //0xFFDD:key, RTC_ALARM	
		}else{
		__raw_writel(0xFFFD, S5P_WAKEUP_MASK); //0xFFDD:key, RTC_ALARM	
	}
	}

	__raw_writel(0xffffffff, S5PC110_VIC0REG(VIC_INT_ENABLE_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC1REG(VIC_INT_ENABLE_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC2REG(VIC_INT_ENABLE_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC3REG(VIC_INT_ENABLE_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC0REG(VIC_INT_SOFT_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC1REG(VIC_INT_SOFT_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC2REG(VIC_INT_SOFT_CLEAR));
	__raw_writel(0xffffffff, S5PC110_VIC3REG(VIC_INT_SOFT_CLEAR));

	/* SYSC INT Disable */
	tmp = __raw_readl(S5P_OTHERS);
	tmp |= (S5P_OTHER_SYSC_INTOFF);
	__raw_writel(tmp,S5P_OTHERS);

	 /* Clear WAKEUP_STAT register for next wakeup */
        tmp = __raw_readl(S5P_WAKEUP_STAT);
        __raw_writel(tmp, S5P_WAKEUP_STAT);

	/* Wake up source setting */
        //s5pc11x_pm_configure_extint();

	// key pad direction control for evt0
	//s5pc11x_set_keypad_sleep_gpio();
	
	/*Set EINT 22 as wake up source*/
	s5pc11x_pm_set_eint(11, 0x2);
	s5pc11x_pm_set_eint(22, 0x2);
	s5pc11x_pm_set_eint(15, 0x4);
	s5pc11x_pm_set_eint(21, 0x4);
	s5pc11x_pm_set_eint(7,  0x02);		//PMIC	
	s5pc11x_pm_set_eint(6, 0x4); //det_3.5

	s5pc11x_pm_set_eint(28, 0x4);	// T_FLASH_DETECT
//[hdlnc_bp_ytkwon : 20100326
	#ifdef CONFIG_KEPLER_AUDIO_A1026
		if(HWREV!=0x08)
		{
    			if(get_headset_status() & SEC_HEADSET_4_POLE_DEVICE)
			{
				s5pc11x_pm_set_eint(30, 0x4); //sendend
				s5pc11x_pm_set_eint(18, 0x4); //sendend 2.5
			}
   			else
   			{
       			s5pc11x_pm_clear_eint(30);
	   			s5pc11x_pm_clear_eint(18);
   			}
		}
	#else
		if(HWREV==0x0a ||HWREV==0x0c)
		{
   			if(get_headset_status() & SEC_HEADSET_4_POLE_DEVICE)
			{
				s5pc11x_pm_set_eint(30, 0x4); //sendend
			}
			else
   			{
     			s5pc11x_pm_clear_eint(30);
       		}
			
		}
		else
		{
   			if(get_headset_status() & SEC_HEADSET_4_POLE_DEVICE)
			{
				s5pc11x_pm_set_eint(30, 0x4); //sendend
				s5pc11x_pm_set_eint(18, 0x4); //sendend 2.5
			}
   			else
   			{
	   			s5pc11x_pm_clear_eint(30);
       			s5pc11x_pm_clear_eint(18);
  			}
		}
	#endif
//]hdlnc_bp_ytkwon : 20100326
		
	if(gp2a_get_proximity_enable())
	{
	    s5pc11x_pm_set_eint(2, 0x4);//proximity
	}
	s5pc11x_pm_set_eint(20, 0x3);//WiFi
	s5pc11x_pm_set_eint(23, 0x2);//microusb

#if defined CONFIG_T959_VER_B0
	s5pc11x_pm_set_eint(29, 0x4);
// [[junghyunseok edit for fuel_int interrupt control of fuel_gauge 20100504
#elif defined CONFIG_KEPLER_VER_B0
#elif defined(CONFIG_KEPLER_VER_B2) || defined(CONFIG_T959_VER_B5)
	s5pc11x_pm_set_eint(27, 0x2);
// ]]junghyunseok edit for fuel_int interrupt control of fuel_gauge 20100504
#else	
	//gpio key
	if(HWREV >= 0xB)
	{
		s5pc11x_pm_set_eint(27, 0x4);
		s5pc11x_pm_set_eint(29, 0x4);
	}
#endif
	
	if (!hw_version_check()) {
	/*Set keypad as EINT for EVT0 wake up workaround*/
	s5pc11x_pm_set_eint(24, 0x2);
	s5pc11x_pm_set_eint(25, 0x2);
	s5pc11x_pm_set_eint(26, 0x2);
	s5pc11x_pm_set_eint(27, 0x2);

	/*Column pull down enabled*/
	tmp = readl(S5PC11X_GPH2PUD);
	tmp &= ~(0xFF);
	tmp |= 0x55;	
	writel(tmp, S5PC11X_GPH2PUD);
	}

	s3c_config_sleep_gpio();

	s3c_gpio_slp_cfgpin(S5PC11X_MP03(3),  S3C_GPIO_SLP_OUT0);
	s3c_gpio_slp_setpull_updown(S5PC11X_MP03(3),  S3C_GPIO_SLP_OUT0);
#if 0
        tmp = __raw_readl(S5P_OTHERS);
        tmp &= ~(3 << 8);
        tmp |= (3 << 8);
        __raw_writel(tmp, S5P_OTHERS);

        __raw_writel(0,S5P_MIE_CONTROL);
        __raw_writel(0,S5P_HDMI_CONTROL);
        __raw_writel(0,S5P_USB_PHY_CONTROL);
        __raw_writel(0,S5P_DAC_CONTROL);
        __raw_writel(0,S5P_MIPI_PHY_CONTROL);
        __raw_writel(0,S5P_ADC_CONTROL);
        __raw_writel(0,S5P_PSHOLD_CONTROL);
#endif

#if (!(defined CONFIG_ARIES_VER_B0) && !(defined CONFIG_ARIES_VER_B4) && !(defined CONFIG_ARIES_VER_B5))
// Enable PS_HOLD pin to avoid reset failure */
        __raw_writel((0x5 << 12 | 0x1<<9 | 0x1<<8 | 0x1<<0),S5P_PSHOLD_CONTROL);
#endif

	/* s5pc11x_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state, so use the return
	 * code to differentiate return from save and return from sleep */

	if (s5pc110_cpu_save(regs_save) == 0) {
		flush_cache_all();
		if (!hw_version_check()) {		
		/* This function for Chip bug on EVT0 */
		tmp = __raw_readl(S5P_EINT_WAKEUP_MASK + 4); //PWR_MODE
		tmp |= (1 << 2);
		__raw_writel(tmp , S5P_EINT_WAKEUP_MASK + 4);
		// end mod
		}
		pm_cpu_sleep();
	}

	/* restore the cpu state */
	cpu_init();

	s5pc11x_pm_do_restore(gpio_save, ARRAY_SIZE(gpio_save));
#ifdef S5PC11X_ALIVEGPIO_STORE
	s5pc11x_pm_do_restore_alive(gpio_save_alive, ARRAY_SIZE(gpio_save_alive));
#endif
	s5pc11x_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
	__raw_writel(0x0, S3C24XX_VA_UART2+S3C2410_UCON);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTP);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTSP);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTM);

	/*		Temporary workaround to protect lockup by UART	- 20100316	*/
	    __raw_writel(0x0, S3C24XX_VA_UART3+S3C2410_UCON);
	    __raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTM);
	    __raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTSP);
	    __raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTP);
	    __raw_writel(0x0, S3C24XX_VA_UART2+S3C2410_UCON);
	    __raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTM);
	    __raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTSP);
	    __raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTP);
	    __raw_writel(0x0, S3C24XX_VA_UART1+S3C2410_UCON);
	    __raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTM);
	    __raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTSP);
	    __raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTP);
	    __raw_writel(0x0, S3C24XX_VA_UART0+S3C2410_UCON);
	    __raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTM);
	    __raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTSP);
	    __raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTP);


	s5pc11x_pm_do_restore(uart_save, ARRAY_SIZE(uart_save));
	s5pc11x_pm_do_restore(core_save, ARRAY_SIZE(core_save));
	s5pc11x_pm_do_restore(sromc_save, ARRAY_SIZE(sromc_save));

	/*enable gpio, uart, mmc*/
        tmp = __raw_readl(S5P_OTHERS);
#if ((defined CONFIG_ARIES_VER_B0) || (defined CONFIG_ARIES_VER_B4) || (defined CONFIG_ARIES_VER_B5))
        tmp |= (1<<31) | (1<<28) | (1<<29);
#else
        tmp |= (1<<31) | (0x1<<30) | (1<<28) | (1<<29);
#endif
        __raw_writel(tmp, S5P_OTHERS);

	/* EINT22 Pending clear */ 
	//s5pc11x_pm_clear_eint(22); //<= do action in s3c-keypad.c
//	s5pc11x_pm_clear_eint(21);
	if (!hw_version_check()) {
	// for evt 0 keypad wakeup workaround
		s5pc11x_pm_clear_eint(24);
		s5pc11x_pm_clear_eint(25);
		s5pc11x_pm_clear_eint(26);
		s5pc11x_pm_clear_eint(27);
	        s5pc11x_pm_clear_eint(21);
	} else {
		 /* Clear WAKEUP_STAT register for next wakeup */
		tmp = __raw_readl(S5P_WAKEUP_STAT);
		__raw_writel(tmp, S5P_WAKEUP_STAT);	

		printk("wakeup source is 0x%x  \n", tmp);
		printk(" EXT_INT_0_PEND       %x \n", __raw_readl(S5PC11X_EINTPEND(0)));
		printk(" EXT_INT_1_PEND       %x \n", __raw_readl(S5PC11X_EINTPEND(1)));
		printk(" EXT_INT_2_PEND       %x \n", __raw_readl(S5PC11X_EINTPEND(2)));
		printk(" EXT_INT_3_PEND       %x \n", __raw_readl(S5PC11X_EINTPEND(3)));
	}

	DBG("\npost sleep, preparing to return 2\n");

	s5pc11x_pm_check_restore();

#ifdef CONFIG_HAS_WAKELOCK
        //wake_lock_timeout(&pm_wake_lock, 5 * HZ);
#endif

	/* ok, let's return from sleep */
	DBG("S5PC110 PM Resume (post-restore)\n");

	return 0;
}
Exemplo n.º 21
0
static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
{
	return phys_to_dma(dev, virt_to_phys(pages));
}
Exemplo n.º 22
0
void relocate(void)
{
	unsigned long addr, eaddr, size;
	unsigned i;
	/* Walk through the memory map and find the highest address
	 * below 4GB that etherboot will fit into.  Ensure etherboot
	 * lies entirely within a range with A20=0.  This means that
	 * even if something screws up the state of the A20 line, the
	 * etherboot code is still visible and we have a chance to
	 * diagnose the problem.
	 */
	/* First find the size of etherboot */
	addr = virt_to_phys(_text);
	eaddr = virt_to_phys(_end);
	size = (eaddr - addr + 0xf) & ~0xf;

	/* If the current etherboot is beyond MAX_ADDR pretend it is
	 * at the lowest possible address.
	 */
	if (eaddr > MAX_ADDR) {
		eaddr = 0;
	}

	for(i = 0; i < meminfo.map_count; i++) {
		unsigned long r_start, r_end;
		if (meminfo.map[i].type != E820_RAM) {
			continue;
		}
		if (meminfo.map[i].addr > MAX_ADDR) {
			continue;
		}
		if (meminfo.map[i].size > MAX_ADDR) {
			continue;
		}
		r_start = meminfo.map[i].addr;
		r_end = r_start + meminfo.map[i].size;
		/* Make the addresses 16 byte (128 bit) aligned */
		r_start = (r_start + 15) & ~15;
		r_end = r_end & ~15;
		if (r_end < r_start) {
			r_end = MAX_ADDR;
		}
		if (r_end < size) {
			/* Avoid overflow weirdness when r_end - size < 0 */
			continue;
		}
		/* Shrink the range down to use only even megabytes
		 * (i.e. A20=0).
		 */
		if ( r_end & 0x100000 ) {
			/* If r_end is in an odd megabyte, round down
			 * r_end to the top of the next even megabyte.
			 */
			r_end = r_end & ~0xfffff;
		} else if ( ( r_end - size ) & 0x100000 ) {
			/* If r_end is in an even megabyte, but the
			 * start of Etherboot would be in an odd
			 * megabyte, round down to the top of the next
			 * even megabyte.
			*/
			r_end = ( r_end - 0x100000 ) & ~0xfffff;
		}
		/* If we have rounded down r_end below r_ start, skip
		 * this block.
		 */
		if ( r_end < r_start ) {
			continue;
		}
		if (eaddr < r_end - size) {
			addr = r_end - size;
			eaddr = r_end;
		}
	}
	if (addr != virt_to_phys(_text)) {
		unsigned long old_addr = virt_to_phys(_text);
#ifndef VBOX
		printf("Relocating _text from: [%lx,%lx) to [%lx,%lx)\n",
			old_addr, virt_to_phys(_end),
			addr, eaddr);
#endif /* !VBOX */
		arch_relocate_to(addr);
		cleanup();
		relocate_to(addr);
		arch_relocated_from(old_addr);
	}
}
Exemplo n.º 23
0
static int psci_system_suspend(unsigned long unused)
{
	struct psci_power_state state = {
		.id = 0,
		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
		.affinity_level = 2, /* system level */
	};

	return psci_cpu_suspend(state, virt_to_phys(cpu_resume));
}

static int psci_system_suspend_enter(suspend_state_t state)
{
	return cpu_suspend(0, psci_system_suspend);
}

static const struct platform_suspend_ops psci_suspend_ops = {
	.valid          = suspend_valid_only_mem,
	.enter          = psci_system_suspend_enter,
};

static void __init psci_init_system_suspend(void)
{
	if (!IS_ENABLED(CONFIG_SUSPEND))
		return;

	suspend_set_ops(&psci_suspend_ops);
}

/*
 * PSCI Function IDs for v0.2+ are well defined so use
 * standard values.
 */
static int psci_0_2_init(struct device_node *np)
{
	int err, ver;

	err = get_set_conduit_method(np);

	if (err)
		goto out_put_node;

	ver = psci_get_version();

	if (ver == PSCI_RET_NOT_SUPPORTED) {
		/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
		pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
		err = -EOPNOTSUPP;
		goto out_put_node;
	} else {
		pr_info("PSCIv%d.%d detected in firmware.\n",
				PSCI_VERSION_MAJOR(ver),
				PSCI_VERSION_MINOR(ver));

		if (PSCI_VERSION_MAJOR(ver) == 0 &&
				PSCI_VERSION_MINOR(ver) < 2) {
			err = -EINVAL;
			pr_err("Conflicting PSCI version detected.\n");
			goto out_put_node;
		}
	}

	pr_info("Using standard PSCI v0.2 function IDs\n");
	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
	psci_ops.cpu_suspend = psci_cpu_suspend;

	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
	psci_ops.cpu_off = psci_cpu_off;

	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
	psci_ops.cpu_on = psci_cpu_on;

	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
	psci_ops.migrate = psci_migrate;

	psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
	psci_ops.affinity_info = psci_affinity_info;

	psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
		PSCI_0_2_FN_MIGRATE_INFO_TYPE;
	psci_ops.migrate_info_type = psci_migrate_info_type;

	arm_pm_restart = psci_sys_reset;

	pm_power_off = psci_sys_poweroff;

	psci_init_system_suspend();

out_put_node:
	of_node_put(np);
	return err;
}
Exemplo n.º 24
0
static int __init mem_prot_init(void)
{
	phys_addr_t phys = virt_to_phys(_stext);
	mem_prot_region((u64)phys, (u64)(_etext - _stext), true);
	return 0;
}
Exemplo n.º 25
0
static int siena_probe_nic(struct efx_nic *efx)
{
	struct siena_nic_data *nic_data;
	bool already_attached = false;
	efx_oword_t reg;
	int rc;

	/* Allocate storage for hardware specific data */
	nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
	if (!nic_data)
		return -ENOMEM;
	efx->nic_data = nic_data;

	if (efx_nic_fpga_ver(efx) != 0) {
		netif_err(efx, probe, efx->net_dev,
			  "Siena FPGA not supported\n");
		rc = -ENODEV;
		goto fail1;
	}

	efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
	efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;

	efx_mcdi_init(efx);

	/* Recover from a failed assertion before probing */
	rc = efx_mcdi_handle_assertion(efx);
	if (rc)
		goto fail1;

	/* Let the BMC know that the driver is now in charge of link and
	 * filter settings. We must do this before we reset the NIC */
	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
	if (rc) {
		netif_err(efx, probe, efx->net_dev,
			  "Unable to register driver with MCPU\n");
		goto fail2;
	}
	if (already_attached)
		/* Not a fatal error */
		netif_err(efx, probe, efx->net_dev,
			  "Host already registered with MCPU\n");

	/* Now we can reset the NIC */
	rc = siena_reset_hw(efx, RESET_TYPE_ALL);
	if (rc) {
		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
		goto fail3;
	}

	siena_init_wol(efx);

	/* Allocate memory for INT_KER */
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);

	netif_dbg(efx, probe, efx->net_dev,
		  "INT_KER at %llx (virt %p phys %llx)\n",
		  (unsigned long long)efx->irq_status.dma_addr,
		  efx->irq_status.addr,
		  (unsigned long long)virt_to_phys(efx->irq_status.addr));

	/* Read in the non-volatile configuration */
	rc = siena_probe_nvconfig(efx);
	if (rc == -EINVAL) {
		netif_err(efx, probe, efx->net_dev,
			  "NVRAM is invalid therefore using defaults\n");
		efx->phy_type = PHY_TYPE_NONE;
		efx->mdio.prtad = MDIO_PRTAD_NONE;
	} else if (rc) {
		goto fail5;
	}

	rc = efx_mcdi_mon_probe(efx);
	if (rc)
		goto fail5;

	efx_sriov_probe(efx);
	efx_ptp_probe(efx);

	return 0;

fail5:
	efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
	efx_mcdi_drv_attach(efx, false, NULL);
fail2:
fail1:
	kfree(efx->nic_data);
	return rc;
}
Exemplo n.º 26
0
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
				      volatile void *address)
{
	return phys_to_dma(hwdev, virt_to_phys(address));
}
Exemplo n.º 27
0
/**
 * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
 * The purpose of this function is to manage low power programming
 * of OMAP4 MPUSS subsystem
 * @cpu : CPU ID
 * @power_state: Low power state.
 *
 * MPUSS states for the context save:
 * save_state =
 *	0 - Nothing lost and no need to save: MPUSS INACTIVE
 *	1 - CPUx L1 and logic lost: MPUSS CSWR
 *	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
 *	3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
 */
int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
{
	struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
	unsigned int save_state = 0;
	unsigned int wakeup_cpu;

	if (omap_rev() == OMAP4430_REV_ES1_0)
		return -ENXIO;

	switch (power_state) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		save_state = 0;
		break;
	case PWRDM_POWER_OFF:
		save_state = 1;
		break;
	case PWRDM_POWER_RET:
	default:
		/*
		 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
		 * doesn't make much scense, since logic is lost and $L1
		 * needs to be cleaned because of coherency. This makes
		 * CPUx OSWR equivalent to CPUX OFF and hence not supported
		 */
		WARN_ON(1);
		return -ENXIO;
	}

	pwrdm_pre_transition(NULL);

	/*
	 * Check MPUSS next state and save interrupt controller if needed.
	 * In MPUSS OSWR or device OFF, interrupt controller  contest is lost.
	 */
	mpuss_clear_prev_logic_pwrst();
	if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
		(pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
		save_state = 2;

	cpu_clear_prev_logic_pwrst(cpu);
	pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
	set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
	scu_pwrst_prepare(cpu, power_state);
	l2x0_pwrst_prepare(cpu, save_state);

	/*
	 * Call low level function  with targeted low power state.
	 */
	cpu_suspend(save_state, omap4_finish_suspend);

	/*
	 * Restore the CPUx power state to ON otherwise CPUx
	 * power domain can transitions to programmed low power
	 * state while doing WFI outside the low powe code. On
	 * secure devices, CPUx does WFI which can result in
	 * domain transition
	 */
	wakeup_cpu = smp_processor_id();
	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);

	pwrdm_post_transition(NULL);

	return 0;
}
Exemplo n.º 28
0
/*
 * Consistent memory allocators. Used for DMA devices that want to
 * share uncached memory with the processor core.
 * My crufty no-MMU approach is simple. In the HW platform we can optionally
 * mirror the DDR up above the processor cacheable region.  So, memory accessed
 * in this mirror region will not be cached.  It's alloced from the same
 * pool as normal memory, but the handle we return is shifted up into the
 * uncached region.  This will no doubt cause big problems if memory allocated
 * here is not also freed properly. -- JW
 */
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
{
	unsigned long order, vaddr;
	void *ret;
	unsigned int i, err = 0;
	struct page *page, *end;

#ifdef CONFIG_MMU
	phys_addr_t pa;
	struct vm_struct *area;
	unsigned long va;
#endif

	if (in_interrupt())
		BUG();

	/* Only allocate page size areas. */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	vaddr = __get_free_pages(gfp, order);
	if (!vaddr)
		return NULL;

	/*
	 * we need to ensure that there are no cachelines in use,
	 * or worse dirty in this area.
	 */
	flush_dcache_range(virt_to_phys((void *)vaddr),
					virt_to_phys((void *)vaddr) + size);

#ifndef CONFIG_MMU
	ret = (void *)vaddr;
	/*
	 * Here's the magic!  Note if the uncached shadow is not implemented,
	 * it's up to the calling code to also test that condition and make
	 * other arranegments, such as manually flushing the cache and so on.
	 */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
# endif
	if ((unsigned int)ret > cpuinfo.dcache_base &&
				(unsigned int)ret < cpuinfo.dcache_high)
		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");

	/* dma_handle is same as physical (shadowed) address */
	*dma_handle = (dma_addr_t)ret;
#else
	/* Allocate some common virtual space to map the new pages. */
	area = get_vm_area(size, VM_ALLOC);
	if (!area) {
		free_pages(vaddr, order);
		return NULL;
	}
	va = (unsigned long) area->addr;
	ret = (void *)va;

	/* This gives us the real physical address of the first page. */
	*dma_handle = pa = __virt_to_phys(vaddr);
#endif

	/*
	 * free wasted pages.  We skip the first page since we know
	 * that it will have count = 1 and won't require freeing.
	 * We also mark the pages in use as reserved so that
	 * remap_page_range works.
	 */
	page = virt_to_page(vaddr);
	end = page + (1 << order);

	split_page(page, order);

	for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
#ifdef CONFIG_MMU
		/* MS: This is the whole magic - use cache inhibit pages */
		err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
#endif

		SetPageReserved(page);
		page++;
	}

	/* Free the otherwise unused pages. */
	while (page < end) {
		__free_page(page);
		page++;
	}

	if (err) {
		free_pages(vaddr, order);
		return NULL;
	}

	return ret;
}
Exemplo n.º 29
0
/**
 * adv_check_dma - check DMA status until the DMA transfer terminated
 *
 * @device: Points to the device object
 */
static INT32S adv_check_dma(adv_device *device)
{
	private_data *privdata = (private_data *) (device->private_data);
	INT32U *trash_buf = NULL;
	INT32U tb_phyaddr;
	INT16U tmp;
	INT16U timeout;
	


	tmp = advInpDMAw(privdata, 0xa8);
	tmp &= 0x10;
	if (tmp != 0x10) {
		/* initialize PCI9054 */
		trash_buf = kmalloc(256, GFP_DMA);
		if (!trash_buf) {
			return -ENOMEM;
		}
		tb_phyaddr = virt_to_phys((void *) trash_buf);

		advOutpDMAw(privdata, 0x04, 0x0001); /* enable pci addresses */
		advOutpDMAdw(privdata, 0x08, 0x0000);
		advOutpDMAdw(privdata, 0x0a, 0x0024);
	
		advOutpDMAw(privdata, 0x68, 0x0000);
		advOutpDMAw(privdata, 0x6a, 0x0000);
		advOutpDMAw(privdata, 0x18, 0x000d); /* 32 bits width */
		advOutpDMAw(privdata, 0xb0, 0x0000);

		advOutpDMAdw(privdata, 0x80, 0x1d0d);
		advOutpDMAdw(privdata, 0x82, 0x0002);
		advOutpDMAdw(privdata, 0x84, tb_phyaddr);
		advOutpDMAdw(privdata, 0x88, 0x0040);
		advOutpDMAdw(privdata, 0x8c, 0x0080);
		advOutpDMAdw(privdata, 0x90, 0x0003);
		advOutpDMAdw(privdata, 0x92, 0x0000);


		/* init divisor */
		advOutpw(privdata, 0x36, 0x0076);
		advOutpw(privdata, 0x32, privdata->divisor2);
		advOutpw(privdata, 0x32, (privdata->divisor2 >> 8));

		advOutpw(privdata, 0x36, 0x00b6);
		advOutpw(privdata, 0x34, privdata->divisor1);
		advOutpw(privdata, 0x34, (privdata->divisor1 >> 8));

		
		advOutpw(privdata, 0x2a, 0x0000);
		advOutpw(privdata, 0x2c, 0x0000);
		
		advOutpDMAdw(privdata, 0xa8, 0x0009);
		advOutpDMAdw(privdata, 0xa8, 0x0009);

		tmp = advInpDMAw(privdata, 0xa8);
		tmp &= 0x10;
		while (tmp != 0x10) {
			tmp = advInpDMAw(privdata, 0xa8);
			tmp &= 0x10;
			advOutpw(privdata, 0x2c, 0x00);
		}
		advOutpw(privdata, 0x2c, 0x00);
	}	
Exemplo n.º 30
0
static int __init ion_dummy_init(void)
{
	int i, err;

	idev = ion_device_create(NULL);
	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
			GFP_KERNEL);
	if (!heaps)
		return -ENOMEM;


	/* Allocate a dummy carveout heap */
	carveout_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size,
				GFP_KERNEL);
	if (carveout_ptr)
		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base =
						virt_to_phys(carveout_ptr);
	else
		pr_err("ion_dummy: Could not allocate carveout\n");

	/* Allocate a dummy chunk heap */
	chunk_ptr = alloc_pages_exact(
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size,
				GFP_KERNEL);
	if (chunk_ptr)
		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr);
	else
		pr_err("ion_dummy: Could not allocate chunk\n");

	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i];

		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT &&
							!heap_data->base)
			continue;

		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base)
			continue;

		heaps[i] = ion_heap_create(heap_data);
		if (IS_ERR_OR_NULL(heaps[i])) {
			err = PTR_ERR(heaps[i]);
			goto err;
		}
		ion_device_add_heap(idev, heaps[i]);
	}
	return 0;
err:
	for (i = 0; i < dummy_ion_pdata.nr; i++) {
		if (heaps[i])
			ion_heap_destroy(heaps[i]);
	}
	kfree(heaps);

	if (carveout_ptr) {
		free_pages_exact(carveout_ptr,
				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size);
		carveout_ptr = NULL;
	}
	if (chunk_ptr) {
		free_pages_exact(chunk_ptr,
				dummy_heaps[ION_HEAP_TYPE_CHUNK].size);
		chunk_ptr = NULL;
	}
	return err;
}