Exemplo n.º 1
0
/*lint -e960 */
unsigned long OM_Printf(char * pcformat, ... )
/*lint +e960 */
{
    VOS_UINT32          ulReturn = OM_OK;
    VOS_CHAR            *pcWarning;
    VOS_UINT32          ulTempLen;
    VOS_INT32           lRetLen;
    va_list             argument;
    VOS_UINT32          ulDataLen = 0;

    /* 数组前四字节存储模块ID,接着四个直接存储打印机别,从第九字节开始为转换后字符串,为确保在转换为字符串
     过程中不越界,多定义四字节作保护 */
    /*lint -e813 */
    VOS_CHAR            acOutput[VOS_MAX_PRINT_LEN + 12];
    /*lint +e813 */

#if (VOS_OS_VER == VOS_LINUX)
    if(in_interrupt())
    {
        printk("\r\nOM_Printf: RUN in the IRQ");

        return OM_ERR_RUNINIRQ;
    }
#endif

    *((VOS_UINT32*)acOutput)     = ACPU_PID_OM;
    *(((VOS_UINT32*)acOutput)+1) = LOG_LEVEL_INFO;

    /* 将格式化字符串和可变参数转换为字符串 */
    va_start( argument, pcformat );
    lRetLen = VOS_nvsprintf(acOutput + OM_PRINTF_OFFSET, VOS_MAX_PRINT_LEN, pcformat, argument);
    va_end( argument );

    /* 添加字符串结束标志 */
    acOutput[VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET - 1] = '\0';

    /* 对转换结果进行判断,并在转换后字符串中添加相应提示信息 */
    if( lRetLen >= (VOS_MAX_PRINT_LEN - 1) )
    {
        pcWarning = "OM_Printf: Warning!Print too long!!!";
        ulTempLen = VOS_StrLen(pcWarning );
        VOS_MemCpy(acOutput + OM_PRINTF_OFFSET, pcWarning, ulTempLen);

        /* 在转换后字符串倒数第二个字节添加换行符 */
        acOutput[VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET- 2] = '\n';
        ulDataLen = VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET - 1;
    }
    else if( lRetLen < 0 )
    {
        pcWarning = "OM_Printf:unknown internal error.\r\n";
        VOS_StrCpy(acOutput + OM_PRINTF_OFFSET, pcWarning );
        ulDataLen = VOS_StrLen(pcWarning ) + OM_PRINTF_OFFSET;
    }
    else
    {
        ulDataLen = (VOS_UINT32)lRetLen + OM_PRINTF_OFFSET;
    }

    ulReturn = OM_PrintfDataPut(acOutput, ulDataLen);

    return ulReturn;
}
/*****************************************************************************
 函 数 名  : MNTN_ErrorLog
 功能描述  : 将错误信息记录到ERRORLOG中
 输入参数  : cFileName: 调用函数处的文件名
             ulFileId: 调用函数处的文件ID
             ulLine: 调用函数处在文件中行号
             ulErrNo: 错误号索引
             pRecord: 记录存放位置
             ulLen: 记录整个pRecord的长度,包括记录头,单位:字节
 输出参数  : VOID
 返 回 值  : VOS_ERR:函数执行过程中出现错误
             VOS_OK:函数执行正常
 修改历史      :
  1.日    期   : 2011年7月1日
    作    者   : g47350
    修改内容   : 新生成函数
*****************************************************************************/
unsigned int MNTN_ErrorLog(char * cFileName, unsigned int ulFileId, unsigned int ulLine,
                unsigned int ulErrNo, void *pRecord, unsigned int ulLen)
{
    ERRORLOG_REQ_STRU       *pstErrorLogReq;
    ERRORLOG_CNF_STRU       *pstErrorLogCnf;
    VOS_UINT32              ulResult;

    /* 参数检测 */
    if ((VOS_NULL_PTR == cFileName) || (VOS_NULL_PTR == pRecord))
    {
        return OM_ACPU_PARA_ERR;
    }

#if (VOS_OS_VER == VOS_LINUX)
    if( in_interrupt() )
    {
        return OM_ACPU_RUN_IRQ;
    }
#endif

    pstErrorLogReq = (ERRORLOG_REQ_STRU*)VOS_AllocMsg(ACPU_PID_OMAGENT,
                                                ERRORLOG_HEAD_LEN + ulLen);

    /* 分配消息失败 */
    if (VOS_NULL_PTR == pstErrorLogReq)
    {
        return OM_ACPU_ALLOC_FAIL;
    }

    pstErrorLogReq->ulReceiverPid = CCPU_PID_OMAGENT;
    pstErrorLogReq->usPrimId      = ERRORLOG_REQ;
    pstErrorLogReq->ulFileId      = ulFileId;
    pstErrorLogReq->ulLine        = ulLine;
    pstErrorLogReq->ulErrNo       = ulErrNo;
    pstErrorLogReq->ulLen         = ulLen;

    /* 为了确保aucFileName最后字节为'\0',拷贝长度需要加1 */
    VOS_MemCpy(pstErrorLogReq->aucFileName, cFileName, VOS_StrLen(cFileName) + 1);
    VOS_MemCpy(pstErrorLogReq->aucData, pRecord, ulLen);


    /* 如果有任务正在进行中,需要等待其完成 */
    if (VOS_OK != VOS_SmP(g_ulOmAcpuSyncSem, 0))
    {
        VOS_FreeMsg(ACPU_PID_OMAGENT, pstErrorLogReq);

        return OM_ACPU_SYNC_TIMEOUT;
    }

    /* 将请求消息发送给CCPU */
    if (VOS_OK != VOS_SendMsg(ACPU_PID_OMAGENT, pstErrorLogReq))
    {
        VOS_SmV(g_ulOmAcpuSyncSem);

        return OM_ACPU_SEND_FAIL;
    }

    /* 等待CCPU的回复 */
    if (VOS_OK != VOS_SmP(g_ulOmAcpuCnfSem, WAITING_CNF_TIMEOUT_LEN))
    {
        VOS_SmV(g_ulOmAcpuSyncSem);

        return OM_ACPU_CNF_TIMEOUT;
    }

    pstErrorLogCnf = (ERRORLOG_CNF_STRU*)g_pstOmAcpuCnfMsg;

    /* 判断回复消息内容是否正确 */
    if (ERRORLOG_CNF != pstErrorLogCnf->usPrimId)
    {
        VOS_FreeReservedMsg(ACPU_PID_OMAGENT, g_pstOmAcpuCnfMsg);

        VOS_SmV(g_ulOmAcpuSyncSem);

        return OM_ACPU_CNF_ERR;
    }

    ulResult = (VOS_UINT32)pstErrorLogCnf->usResult;

    VOS_FreeReservedMsg(ACPU_PID_OMAGENT, g_pstOmAcpuCnfMsg);

    VOS_SmV(g_ulOmAcpuSyncSem);

    return ulResult;
}
Exemplo n.º 3
0
/**
 *	cpu_setfreq - change the CPU clock frequency.
 *	@freq: frequency (in kHz) at which we should run.
 *
 *	Set the CPU clock frequency, informing all registered users of
 *	the change. We bound the frequency according to the cpufreq_max
 *	command line parameter, and the parameters the registered users
 *	will allow.
 *
 *	This function must be called from process context, and on the
 *	cpu that we wish to change the frequency of.
 *
 *	We return 0 if successful. (we are currently always successful).
 */
int cpufreq_set(unsigned int freq)
{
	unsigned long old_cpus;
	struct cpufreq_info clkinfo;
	struct cpufreq_minmax minmax;
	int cpu = smp_processor_id();
	int ret;

	if (!cpufreq_initialised)
		panic("cpufreq_set() called before initialisation!");
	if (in_interrupt())
		panic("cpufreq_set() called from interrupt context!");

	/*
	 * Bind to the current CPU.
	 */
	old_cpus = current->cpus_allowed;
	current->cpus_allowed = 1UL << cpu_logical_map(cpu);

	down(&cpufreq_sem);
	ret = -ENXIO;
	if (!cpufreq_setspeed || !cpufreq_validatespeed)
		goto out;

	/*
	 * Don't allow the CPU to be clocked over the limit.
	 */
	minmax.min_freq = cpufreq_min(cpu);
	minmax.max_freq = cpufreq_max(cpu);
	minmax.cur_freq = cpufreq_current(cpu);
	minmax.new_freq = freq;

	/*
	 * Find out what the registered devices will currently tolerate,
	 * and limit the requested clock rate to these values.  Drivers
	 * must not rely on the 'new_freq' value - it is only a guide.
	 */
	notifier_call_chain(&cpufreq_notifier_list, CPUFREQ_MINMAX, &minmax);
	if (freq < minmax.min_freq)
		freq = minmax.min_freq;
	if (freq > minmax.max_freq)
		freq = minmax.max_freq;

	/*
	 * Ask the CPU specific code to validate the speed.  If the speed
	 * is not acceptable, make it acceptable.  Current policy is to
	 * round the frequency down to the value the processor actually
	 * supports.
	 */
	freq = cpufreq_validatespeed(freq);

	if (cpufreq_current(cpu) != freq) {
		clkinfo.old_freq = cpufreq_current(cpu);
		clkinfo.new_freq = freq;

		notifier_call_chain(&cpufreq_notifier_list, CPUFREQ_PRECHANGE,
				    &clkinfo);

		adjust_jiffies(CPUFREQ_PRECHANGE, &clkinfo);

		/*
		 * Actually set the CPU frequency.
		 */
		cpufreq_setspeed(freq);
		cpufreq_current(cpu) = freq;
		adjust_jiffies(CPUFREQ_POSTCHANGE, &clkinfo);

		notifier_call_chain(&cpufreq_notifier_list, CPUFREQ_POSTCHANGE,
				    &clkinfo);
	}

	ret = 0;

 out:
	up(&cpufreq_sem);

	current->cpus_allowed = old_cpus;

	return ret;
}
Exemplo n.º 4
0
static void
common_shutdown_1(void *generic_ptr)
{
	struct halt_info *how = (struct halt_info *)generic_ptr;
	struct percpu_struct *cpup;
	unsigned long *pflags, flags;
	int cpuid = smp_processor_id();

	/* No point in taking interrupts anymore. */
	local_irq_disable();

	cpup = (struct percpu_struct *)
			((unsigned long)hwrpb + hwrpb->processor_offset
			 + hwrpb->processor_size * cpuid);
	pflags = &cpup->flags;
	flags = *pflags;

	/* Clear reason to "default"; clear "bootstrap in progress". */
	flags &= ~0x00ff0001UL;

#ifdef CONFIG_SMP
	/* Secondaries halt here. */
	if (cpuid != boot_cpuid) {
		flags |= 0x00040000UL; /* "remain halted" */
		*pflags = flags;
		set_cpu_present(cpuid, false);
		set_cpu_possible(cpuid, false);
		halt();
	}
#endif

	if (how->mode == LINUX_REBOOT_CMD_RESTART) {
		if (!how->restart_cmd) {
			flags |= 0x00020000UL; /* "cold bootstrap" */
		} else {
			/* For SRM, we could probably set environment
			   variables to get this to work.  We'd have to
			   delay this until after srm_paging_stop unless
			   we ever got srm_fixup working.

			   At the moment, SRM will use the last boot device,
			   but the file and flags will be the defaults, when
			   doing a "warm" bootstrap.  */
			flags |= 0x00030000UL; /* "warm bootstrap" */
		}
	} else {
		flags |= 0x00040000UL; /* "remain halted" */
	}
	*pflags = flags;

#ifdef CONFIG_SMP
	/* Wait for the secondaries to halt. */
	set_cpu_present(boot_cpuid, false);
	set_cpu_possible(boot_cpuid, false);
	while (cpus_weight(cpu_present_map))
		barrier();
#endif

	/* If booted from SRM, reset some of the original environment. */
	if (alpha_using_srm) {
#ifdef CONFIG_DUMMY_CONSOLE
		/* If we've gotten here after SysRq-b, leave interrupt
		   context before taking over the console. */
		if (in_interrupt())
			irq_exit();
		/* This has the effect of resetting the VGA video origin.  */
		take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
#endif
		pci_restore_srm_config();
		set_hae(srm_hae);
	}

	if (alpha_mv.kill_arch)
		alpha_mv.kill_arch(how->mode);

	if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
		/* Unfortunately, since MILO doesn't currently understand
		   the hwrpb bits above, we can't reliably halt the 
		   processor and keep it halted.  So just loop.  */
		return;
	}

	if (alpha_using_srm)
		srm_paging_stop();

	halt();
}
Exemplo n.º 5
0
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
			      unsigned long vector, int write_acc)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	siginfo_t info;
	int fault;
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	tsk = current;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * NOTE2: This is done so that, when updating the vmalloc
	 * mappings we don't have to walk all processes pgdirs and
	 * add the high mappings all at once. Instead we do it as they
	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
	 * bit set so sometimes the TLB can use a lingering entry.
	 *
	 * This verifies that the fault happens in kernel space
	 * and that the fault was not a protection error.
	 */

	if (address >= VMALLOC_START &&
	    (vector != 0x300 && vector != 0x400) &&
	    !user_mode(regs))
		goto vmalloc_fault;

	/* If exceptions were enabled, we can reenable them here */
	if (user_mode(regs)) {
		/* Exception was in userspace: reenable interrupts */
		local_irq_enable();
		flags |= FAULT_FLAG_USER;
	} else {
		/* If exception was in a syscall, then IRQ's may have
		 * been enabled or disabled.  If they were enabled,
		 * reenable them.
		 */
		if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
			local_irq_enable();
	}

	mm = tsk->mm;
	info.si_code = SEGV_MAPERR;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */

	if (in_interrupt() || !mm)
		goto no_context;

retry:
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);

	if (!vma)
		goto bad_area;

	if (vma->vm_start <= address)
		goto good_area;

	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;

	if (user_mode(regs)) {
		/*
		 * accessing the stack below usp is always a bug.
		 * we get page-aligned addresses so we can only check
		 * if we're within a page from usp, but that might be
		 * enough to catch brutal errors at least.
		 */
		if (address + PAGE_SIZE < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */

good_area:
	info.si_code = SEGV_ACCERR;

	/* first do some preliminary protection checks */

	if (write_acc) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags |= FAULT_FLAG_WRITE;
	} else {
		/* not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}

	/* are we trying to execute nonexecutable area */
	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
		goto bad_area;

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */

	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}

	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		/*RGD modeled on Cris */
		if (fault & VM_FAULT_MAJOR)
			tsk->maj_flt++;
		else
			tsk->min_flt++;
		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;

			 /* No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
	}

	up_read(&mm->mmap_sem);
	return;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */

bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:

	/* User mode accesses just cause a SIGSEGV */

	if (user_mode(regs)) {
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void *)address;
		force_sig_info(SIGSEGV, &info, tsk);

		printk("%s%s[%d]: segfault at %lx pc %p sp %p\n",
                       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                       tsk->comm, task_pid_nr(tsk), address,
                       (void *)regs->pc, (void *)regs->sp);
		return;
	}

no_context:

	/* Are we prepared to handle this kernel fault?
	 *
	 * (The kernel has valid exception-points in the source
	 *  when it acesses user-memory. When it fails in one
	 *  of those points, we find it in a table and do a jump
	 *  to some fixup code that loads an appropriate error
	 *  code)
	 */

	{
		const struct exception_table_entry *entry;

		__asm__ __volatile__("l.nop 42");

		if ((entry = search_exception_tables(regs->pc)) != NULL) {
			/* Adjust the instruction pointer in the stackframe */
			regs->pc = entry->fixup;
			return;
		}
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */

	if ((unsigned long)(address) < PAGE_SIZE)
		printk(KERN_ALERT
		       "Unable to handle kernel NULL pointer dereference");
	else
		printk(KERN_ALERT "Unable to handle kernel access");
	printk(" at virtual address 0x%08lx\n", address);

	die("Oops", regs, write_acc);

	do_exit(SIGKILL);

	/*
	 * We ran out of memory, or some other thing happened to us that made
	 * us unable to handle the page fault gracefully.
	 */

out_of_memory:
	__asm__ __volatile__("l.nop 42");
	__asm__ __volatile__("l.nop 1");

	up_read(&mm->mmap_sem);
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);

	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void *)address;
	force_sig_info(SIGBUS, &info, tsk);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	return;

vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Use current_pgd instead of tsk->active_mm->pgd
		 * since the latter might be unavailable if this
		 * code is executed in a misfortunately run irq
		 * (like inside schedule() between switch_mm and
		 *  switch_to...).
		 */

		int offset = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

/*
		phx_warn("do_page_fault(): vmalloc_fault will not work, "
			 "since current_pgd assign a proper value somewhere\n"
			 "anyhow we don't need this at the moment\n");

		phx_mmu("vmalloc_fault");
*/
		pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
		pgd_k = init_mm.pgd + offset;

		/* Since we're two-level, we don't need to do both
		 * set_pgd and set_pmd (they do the same thing). If
		 * we go three-level at some point, do the right thing
		 * with pgd_present and set_pgd here.
		 *
		 * Also, since the vmalloc area is global, we don't
		 * need to copy individual PTE's, it is enough to
		 * copy the pgd pointer into the pte page of the
		 * root task. If that is there, we'll find our pte if
		 * it exists.
		 */

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);

		if (!pmd_present(*pmd_k))
			goto bad_area_nosemaphore;

		set_pmd(pmd, *pmd_k);

		/* Make sure the actual PTE exists as well to
		 * catch kernel vmalloc-area accesses to non-mapped
		 * addresses. If we don't do this, this will just
		 * silently loop forever.
		 */

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;

		return;
	}
}
Exemplo n.º 6
0
/*
 * For 600- and 800-family processors, the error_code parameter is DSISR
 * for a data fault, SRR1 for an instruction fault. For 400-family processors
 * the error_code parameter is ESR for a data fault, 0 for an instruction
 * fault.
 */
void do_page_fault(struct pt_regs *regs, unsigned long address,
		   unsigned long error_code)
{
	struct vm_area_struct * vma;
	struct mm_struct *mm = current->mm;
	siginfo_t info;
	int code = SEGV_MAPERR;
#if defined(CONFIG_4xx)
	int is_write = error_code & ESR_DST;
#else
	int is_write = 0;

	/*
	 * Fortunately the bit assignments in SRR1 for an instruction
	 * fault and DSISR for a data fault are mostly the same for the
	 * bits we are interested in.  But there are some bits which
	 * indicate errors in DSISR but can validly be set in SRR1.
	 */
	if (regs->trap == 0x400)
		error_code &= 0x48200000;
	else
		is_write = error_code & 0x02000000;
#endif /* CONFIG_4xx */

#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
	if (debugger_fault_handler && regs->trap == 0x300) {
		debugger_fault_handler(regs);
		return;
	}
#if !defined(CONFIG_4xx)
	if (error_code & 0x00400000) {
		/* DABR match */
		if (debugger_dabr_match(regs))
			return;
	}
#endif /* !CONFIG_4xx */
#endif /* CONFIG_XMON || CONFIG_KGDB */

	if (in_interrupt() || mm == NULL) {
		bad_page_fault(regs, address);
		return;
	}
	down(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;

good_area:
	code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
	if (error_code & 0x95700000)
		/* an error such as lwarx to I/O controller space,
		   address matching DABR, eciwx, etc. */
		goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
        /* The MPC8xx seems to always set 0x80000000, which is
         * "undefined".  Of those that can be set, this is the only
         * one which seems bad.
         */
	if (error_code & 0x10000000)
                /* Guarded storage error. */
		goto bad_area;
#endif /* CONFIG_8xx */
	
	/* a write */
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	/* a read */
	} else {
		/* protection fault */
		if (error_code & 0x08000000)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
        switch (handle_mm_fault(mm, vma, address, is_write)) {
        case 1:
                current->min_flt++;
                break;
        case 2:
                current->maj_flt++;
                break;
        case 0:
                goto do_sigbus;
        default:
                goto out_of_memory;
	}

	up(&mm->mmap_sem);
	/*
	 * keep track of tlb+htab misses that are good addrs but
	 * just need pte's created via handle_mm_fault()
	 * -- Cort
	 */
	pte_misses++;
	return;

bad_area:
	up(&mm->mmap_sem);
	pte_errors++;	

	/* User mode accesses cause a SIGSEGV */
	if (user_mode(regs)) {
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		info.si_code = code;
		info.si_addr = (void *) address;
		force_sig_info(SIGSEGV, &info, current);
		return;
	}

	bad_page_fault(regs, address);
	return;

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up(&mm->mmap_sem);
	printk("VM: killing process %s\n", current->comm);
	if (user_mode(regs))
		do_exit(SIGKILL);
	bad_page_fault(regs, address);
	return;

do_sigbus:
	up(&mm->mmap_sem);
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void *)address;
	force_sig_info (SIGBUS, &info, current);
	if (!user_mode(regs))
		bad_page_fault(regs, address);
}
Exemplo n.º 7
0
static int myri_open(struct net_device *dev)
{
	struct myri_eth *mp = netdev_priv(dev);

	return myri_init(mp, in_interrupt());
}
int mddi_host_register_multiwrite(uint32 reg_addr,
	uint32 *value_list_ptr,
	uint32 value_count, boolean wait, mddi_llist_done_cb_type done_cb,
	mddi_host_type host)
{
	mddi_linked_list_type *curr_llist_ptr;
	mddi_linked_list_type *curr_llist_dma_ptr;
	mddi_register_access_packet_type *regacc_pkt_ptr;
	uint16 curr_llist_idx;
	int ret = 0;

	if (!value_list_ptr || !value_count ||
		value_count > MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) {
		MDDI_MSG_ERR("\n Invalid value_list or value_count");
		return -EINVAL;
	}

	if (in_interrupt())
		MDDI_MSG_CRIT("Called from ISR context\n");

	if (!mddi_host_powered) {
		MDDI_MSG_ERR("MDDI powered down!\n");
		mddi_init();
	}

	down(&mddi_host_mutex);

	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];

	curr_llist_ptr->link_controller_flags = 1;
	curr_llist_ptr->packet_header_count = 14;
	curr_llist_ptr->packet_data_count =
		(uint16)(value_count * 4);

	curr_llist_ptr->next_packet_pointer = NULL;
	curr_llist_ptr->reserved = 0;

	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;

	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count
		+ curr_llist_ptr->packet_data_count;
	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
	regacc_pkt_ptr->bClient_ID = 0;
	regacc_pkt_ptr->read_write_info = value_count;
	regacc_pkt_ptr->register_address = reg_addr;
	memcpy((void *)&regacc_pkt_ptr->register_data_list[0], value_list_ptr,
		   curr_llist_ptr->packet_data_count);

	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
	curr_llist_ptr->packet_data_pointer =
		(void *)(&regacc_pkt_ptr->register_data_list[0]);
	MDDI_MSG_DEBUG("MultiReg Access write reg=0x%x, value[0]=0x%x\n",
		       regacc_pkt_ptr->register_address,
		       regacc_pkt_ptr->register_data_list[0]);

	/* now adjust pointers */
	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
				   done_cb, host);

	up(&mddi_host_mutex);

	if (wait) {
		int wait_ret;

		mddi_linked_list_notify_type *llist_notify_ptr;
		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
		wait_ret = wait_for_completion_timeout(
					&(llist_notify_ptr->done_comp), 5 * HZ);

		if (wait_ret <= 0)
			ret = -EBUSY;

		if (wait_ret < 0)
			printk(KERN_ERR "%s: failed to wait for completion!\n",
				__func__);
		else if (!wait_ret)
			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
	}

	return ret;
}
int mddi_host_register_multiread(uint32 reg_addr,
	uint32 *value_list_ptr, uint32 value_count,
	boolean wait, mddi_host_type host) {
	mddi_linked_list_type *curr_llist_ptr;
	mddi_register_access_packet_type *regacc_pkt_ptr;
	uint16 curr_llist_idx;
	int ret = 0;

	if (!value_list_ptr || !value_count ||
		value_count >= MDDI_HOST_MAX_CLIENT_REG_IN_SAME_ADDR) {
		MDDI_MSG_ERR("\n Invalid value_list or value_count");
		return -EINVAL;
	}

	if (in_interrupt())
		MDDI_MSG_CRIT("Called from ISR context\n");

	if (!mddi_host_powered) {
		MDDI_MSG_ERR("MDDI powered down!\n");
		mddi_init();
	}

	down(&mddi_host_mutex);

	mddi_reg_read_value_ptr = value_list_ptr;
	curr_llist_idx = mddi_get_reg_read_llist_item(host, TRUE);
	if (curr_llist_idx == UNASSIGNED_INDEX) {
		up(&mddi_host_mutex);

		/* need to change this to some sort of wait */
		MDDI_MSG_ERR("Attempting to queue up more than 1 reg read\n");
		return -EINVAL;
	}

	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
	curr_llist_ptr->link_controller_flags = 0x11;
	curr_llist_ptr->packet_header_count = 14;
	curr_llist_ptr->packet_data_count = 0;

	curr_llist_ptr->next_packet_pointer = NULL;
	curr_llist_ptr->packet_data_pointer = NULL;
	curr_llist_ptr->reserved = 0;

	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;

	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
	regacc_pkt_ptr->bClient_ID = 0;
	regacc_pkt_ptr->read_write_info = 0x8000 | value_count;
	regacc_pkt_ptr->register_address = reg_addr;

	/* now adjust pointers */
	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
				   NULL, host);
	/* need to check if we can write the pointer or not */

	up(&mddi_host_mutex);

	if (wait) {
		int wait_ret;

		mddi_linked_list_notify_type *llist_notify_ptr;
		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
		wait_ret = wait_for_completion_timeout(
					&(llist_notify_ptr->done_comp), 5 * HZ);

		if (wait_ret <= 0)
			ret = -EBUSY;

		if (wait_ret < 0)
			printk(KERN_ERR "%s: failed to wait for completion!\n",
				__func__);
		else if (!wait_ret)
			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);

		if (!ret && (mddi_reg_read_value_ptr == value_list_ptr) &&
			(*value_list_ptr == -EBUSY)) {
			printk(KERN_ERR "%s - failed to get data from client",
				   __func__);
			mddi_reg_read_value_ptr = NULL;
			ret = -EBUSY;
		}
	}

	MDDI_MSG_DEBUG("MultiReg Read value[0]=0x%x\n", *value_list_ptr);

	return ret;
}
Exemplo n.º 10
0
void mddi_host_register_cmds_write8(unsigned reg_addr, unsigned count, unsigned char reg_val[], boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host)
{
	mddi_linked_list_type *curr_llist_ptr;
	mddi_linked_list_type *curr_llist_dma_ptr;
	mddi_register_access_packet_type *regacc_pkt_ptr;
	unsigned curr_llist_idx;

    unsigned *data_list;
    unsigned data32, count32;
    unsigned i;
	
	if (in_interrupt()) {
		MDDI_MSG_CRIT("Called from ISR context\n");
	}

	if (!mddi_host_powered) {
		MDDI_MSG_ERR("MDDI powered down!\n");
		mddi_init();
	}

	count32 = (count + 3) / 4;

	down(&mddi_host_mutex);

	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];

	curr_llist_ptr->link_controller_flags = 1;
	curr_llist_ptr->packet_header_count = 14;
	curr_llist_ptr->packet_data_count = count32 * 4;

	curr_llist_ptr->next_packet_pointer = NULL;
	curr_llist_ptr->reserved = 0;

	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
	
	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + (count32 * 4);
	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
	regacc_pkt_ptr->bClient_ID = 0;
	regacc_pkt_ptr->read_write_info = count32;
	regacc_pkt_ptr->register_address = reg_addr;
		
//	data_list = &regacc_pkt_ptr->register_data_list;
	data_list = (void *)(&(regacc_pkt_ptr->register_data_list[0]));

	for(i = 0; i < count32; i++) {
		data32 = *((unsigned*)(reg_val + (i * 4)));
		*(data_list + i) = data32;
		MDDI_MSG_DEBUG("Mddi command packet data: 0x%x\n", data32);
	}

	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
	curr_llist_ptr->packet_data_pointer =
//	    (void *)(&regacc_pkt_ptr->register_data_list);
		(void *)(&(regacc_pkt_ptr->register_data_list[0]));

	/* now adjust pointers */
	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
				   done_cb, host);

	up(&mddi_host_mutex);

	if (wait) {
		mddi_linked_list_notify_type *llist_notify_ptr;
		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
		wait_for_completion_interruptible(&
						  (llist_notify_ptr->
						   done_comp));
	}

} /* mddi_host_register_cmds_write8 */
Exemplo n.º 11
0
void mddi_host_register_cmds_write32(unsigned reg_addr, unsigned count, unsigned int reg_val[], boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host)
{
	mddi_linked_list_type *curr_llist_ptr;
	mddi_linked_list_type *curr_llist_dma_ptr;
	mddi_register_access_packet_type *regacc_pkt_ptr;
	unsigned curr_llist_idx;

	unsigned *data_list;
	unsigned i;

	MDDI_MSG_DEBUG("%s: started.\n", __func__);

	if (in_interrupt()) {
		MDDI_MSG_CRIT("Called from ISR context\n");
	}

	if (!mddi_host_powered) {
		MDDI_MSG_ERR("MDDI powered down!\n");
		mddi_init();
	}

	down(&mddi_host_mutex);

	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];

	curr_llist_ptr->link_controller_flags = 1;
	curr_llist_ptr->packet_header_count = 14;
	curr_llist_ptr->packet_data_count = count << 2;

	curr_llist_ptr->next_packet_pointer = NULL;
	curr_llist_ptr->reserved = 0;

	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
	
	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + (count << 2);
	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
	regacc_pkt_ptr->bClient_ID = 0;
	regacc_pkt_ptr->read_write_info = count;
	regacc_pkt_ptr->register_address = reg_addr;

//	data_list = &regacc_pkt_ptr->register_data_list;
	data_list = (void *)(&(regacc_pkt_ptr->register_data_list[0]));

	for (i = 0; i < count; i++) {
		data_list[i] = reg_val[i];
	}
	
//	MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n",
//		       regacc_pkt_ptr->register_address,
//		       regacc_pkt_ptr->register_data_list);

	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
	curr_llist_ptr->packet_data_pointer =
//	    (void *)(&regacc_pkt_ptr->register_data_list);
		(void *)(&(regacc_pkt_ptr->register_data_list[0]));

	/* now adjust pointers */
	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
				   done_cb, host);

	up(&mddi_host_mutex);

	if (wait) {
		mddi_linked_list_notify_type *llist_notify_ptr;
		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
		wait_for_completion_interruptible(&
						  (llist_notify_ptr->
						   done_comp));
	}

} /* mddi_host_register_cmds_write32 */
Exemplo n.º 12
0
int mddi_host_register_write(uint32 reg_addr,
     uint32 reg_val, enum mddi_data_packet_size_type packet_size,
     boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host) {
	mddi_linked_list_type *curr_llist_ptr;
	mddi_linked_list_type *curr_llist_dma_ptr;
	mddi_register_access_packet_type *regacc_pkt_ptr;
	uint16 curr_llist_idx;
	int ret = 0;

	if (in_interrupt())
		MDDI_MSG_CRIT("Called from ISR context\n");

	if (!mddi_host_powered) {
		MDDI_MSG_ERR("MDDI powered down!\n");
		mddi_init();
	}

	down(&mddi_host_mutex);

	curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
	curr_llist_ptr = &llist_extern[host][curr_llist_idx];
	curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];

	curr_llist_ptr->link_controller_flags = 1;
	curr_llist_ptr->packet_header_count = 14;
	curr_llist_ptr->packet_data_count = 4;

	curr_llist_ptr->next_packet_pointer = NULL;
	curr_llist_ptr->reserved = 0;

	regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;

	regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count +
					(uint16)packet_size;
	regacc_pkt_ptr->packet_type = 146;	/* register access packet */
	regacc_pkt_ptr->bClient_ID = 0;
	regacc_pkt_ptr->read_write_info = 0x0001;
	regacc_pkt_ptr->register_address = reg_addr;
	regacc_pkt_ptr->register_data_list[0] = reg_val;

	MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n",
		       regacc_pkt_ptr->register_address,
		       regacc_pkt_ptr->register_data_list[0]);

	regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
	curr_llist_ptr->packet_data_pointer =
	    (void *)(&regacc_pkt_ptr->register_data_list[0]);

	/* now adjust pointers */
	mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
				   done_cb, host);

	up(&mddi_host_mutex);

	if (wait) {
		int wait_ret;

		mddi_linked_list_notify_type *llist_notify_ptr;
		llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
		wait_ret = wait_for_completion_timeout(
					&(llist_notify_ptr->done_comp), 5 * HZ);

		if (wait_ret <= 0)
			ret = -EBUSY;

		if (wait_ret < 0)
			printk(KERN_ERR "%s: failed to wait for completion!\n",
				__func__);
		else if (!wait_ret)
			printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
	}

	return ret;
}				/* mddi_host_register_write */
static void ieee80211p_tx(struct ieee80211_hw *hw, struct sk_buff *skb) {
	
	/* Netlink message header */
	struct nlmsghdr *nlh = NULL;

	/* Netlink skb */
	struct sk_buff *nlskb = NULL;

	/* The size of the skb */	
	int skblen;

	/* Get driver's private data */
	struct ieee80211p_priv *priv = hw->priv;

	/* Get the number of the TX queue */
	int qnum = skb_get_queue_mapping(skb);

	/* Return value */
	int ret = 0;

	printk(KERN_ERR "ieee80211p_tx: receiving data from ieee80211\n");
	
	if (qnum >= IEEE80211P_NUM_TXQ) {
		printk(KERN_ERR "ieee80211p_tx: wrong queue number\n");
		dev_kfree_skb_any(skb);
		return;
	}

	if (priv->pid_softmodem == 0) {
		printk(KERN_ERR "ieee80211_tx: softmodem pid unknown\n");
		dev_kfree_skb_any(skb);
		return;
	}

	/* Get the size of the skb */
	if (skb->data_len == 0) {
		skblen = skb->len;
	}
	else {
		printk(KERN_ERR "ieee80211p_tx: skb not linear\n");
		dev_kfree_skb_any(skb);
		return;	
	}	

	/* Allocate nlskb */
	nlskb = alloc_skb(NLMSG_SPACE(skblen), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);

	if (nlskb == NULL) {
		printk(KERN_ERR "ieee80211p_tx: alloc nlskb failed\n");
		return;
	}    

	/* Add room for the nlmsg header */
	skb_put(nlskb, NLMSG_SPACE(skblen));

    /* Configure the nlmsg header */
	nlh = (struct nlmsghdr *)nlskb->data;
    nlh->nlmsg_len = NLMSG_SPACE(skblen);
    nlh->nlmsg_pid = priv->pid_softmodem;
    nlh->nlmsg_flags = 0;

    NETLINK_CB(nlskb).pid = 0; // nlmsg sent from kernel
    NETLINK_CB(nlskb).dst_group = NETLINK_80211P_GROUP;

	/* Copy the data from the skb to the nlskb */
	memcpy(NLMSG_DATA(nlh),skb->data,skb->len);

	/* Free the old skb */
	dev_kfree_skb_any(skb);

	printk(KERN_ERR "ieee80211p_tx: sending data to PHY using pid = %d\n",priv->pid_softmodem);

    ret = netlink_unicast(priv->nl_sock,nlskb,priv->pid_softmodem,NETLINK_80211P_GROUP);

    if (ret <= 0) {
    	printk(KERN_ERR "ieee80211p_tx: netlink mesg not sent ret = %d\n",ret);
	return;
    }

} /* ieee80211p_tx */
Exemplo n.º 14
0
int
nvram_commit(void)
{
	char *buf;
	size_t erasesize, len, magic_len;
	unsigned int i;
	int ret;
	struct nvram_header *header;
	unsigned long flags;
	u_int32_t offset;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	struct erase_info erase;
	u_int32_t magic_offset = 0; /* Offset for writing MAGIC # */

	if (!nvram_mtd) {
		printk("nvram_commit: NVRAM not found\n");
		return -ENODEV;
	}

	if (in_interrupt()) {
		printk("nvram_commit: not committing in interrupt\n");
		return -EINVAL;
	}

	/* Backup sector blocks to be erased */
	erasesize = ROUNDUP(NVRAM_SPACE, nvram_mtd->erasesize);
	if (!(buf = kmalloc(erasesize, GFP_KERNEL))) {
		printk("nvram_commit: out of memory\n");
		return -ENOMEM;
	}

	down(&nvram_sem);

	if ((i = erasesize - NVRAM_SPACE) > 0) {
		offset = nvram_mtd->size - erasesize;
		len = 0;
		ret = MTD_READ(nvram_mtd, offset, i, &len, buf);
		if (ret || len != i) {
			printk("nvram_commit: read error ret = %d, len = %d/%d\n", ret, len, i);
			ret = -EIO;
			goto done;
		}
		header = (struct nvram_header *)(buf + i);
		magic_offset = i + ((void *)&header->magic - (void *)header);
	} else {
		offset = nvram_mtd->size - NVRAM_SPACE;
		magic_offset = ((void *)&header->magic - (void *)header);
		header = (struct nvram_header *)buf;
	}

	/* clear the existing magic # to mark the NVRAM as unusable 
		 we can pull MAGIC bits low without erase	*/
	header->magic = NVRAM_CLEAR_MAGIC; /* All zeros magic */

	/* Unlock sector blocks (for Intel 28F320C3B flash) , 20060309 */
	if(nvram_mtd->unlock)
		nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: clear MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	header->magic = NVRAM_MAGIC; /* reset MAGIC before we regenerate the NVRAM,
																otherwise we'll have an incorrect CRC */
	/* Regenerate NVRAM */
	spin_lock_irqsave(&nvram_lock, flags);
	ret = _nvram_commit(header);
	spin_unlock_irqrestore(&nvram_lock, flags);
	if (ret)
		goto done;

	/* Erase sector blocks */
	init_waitqueue_head(&wait_q);
	for (; offset < nvram_mtd->size - NVRAM_SPACE + header->len; offset += nvram_mtd->erasesize) {
		erase.mtd = nvram_mtd;
		erase.addr = offset;
		erase.len = nvram_mtd->erasesize;
		erase.callback = erase_callback;
		erase.priv = (u_long) &wait_q;

		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&wait_q, &wait);

		/* Unlock sector blocks */
		if (nvram_mtd->unlock)
			nvram_mtd->unlock(nvram_mtd, offset, nvram_mtd->erasesize);

		if ((ret = MTD_ERASE(nvram_mtd, &erase))) {
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&wait_q, &wait);
			printk("nvram_commit: erase error\n");
			goto done;
		}

		/* Wait for erase to finish */
		schedule();
		remove_wait_queue(&wait_q, &wait);
	}

	/* Write partition up to end of data area */
	header->magic = NVRAM_INVALID_MAGIC; /* All ones magic */
	offset = nvram_mtd->size - erasesize;
	i = erasesize - NVRAM_SPACE + header->len;
	ret = MTD_WRITE(nvram_mtd, offset, i, &len, buf);
	if (ret || len != i) {
		printk("nvram_commit: write error\n");
		ret = -EIO;
		goto done;
	}

	/* Now mark the NVRAM in flash as "valid" by setting the correct
		 MAGIC # */
	header->magic = NVRAM_MAGIC;
	ret = MTD_WRITE(nvram_mtd, offset + magic_offset, sizeof(header->magic), 
									&magic_len, (char *)&header->magic);
	if (ret || magic_len != sizeof(header->magic)) {
		printk("nvram_commit: write MAGIC error\n");
		ret = -EIO;
		goto done;
	}

	/*
	 * Reading a few bytes back here will put the device
	 * back to the correct mode on certain flashes */
	offset = nvram_mtd->size - erasesize;
	ret = MTD_READ(nvram_mtd, offset, 4, &len, buf);

 done:
	up(&nvram_sem);
	kfree(buf);

	return ret;
}
Exemplo n.º 15
0
int usb_stor_Bulk_transport(Scsi_Cmnd *srb, struct us_data *us)
{
	struct bulk_cb_wrap *bcb;
	struct bulk_cs_wrap *bcs;
	int result;
	int pipe;
	int partial;
	int ret = USB_STOR_TRANSPORT_ERROR;

	bcb = kmalloc(sizeof *bcb, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
	if (!bcb) {
		return USB_STOR_TRANSPORT_ERROR;
	}
	bcs = kmalloc(sizeof *bcs, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
	if (!bcs) {
		kfree(bcb);
		return USB_STOR_TRANSPORT_ERROR;
	}

	/* set up the command wrapper */
	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
	bcb->DataTransferLength = cpu_to_le32(usb_stor_transfer_length(srb));
	bcb->Flags = srb->sc_data_direction == SCSI_DATA_READ ? 1 << 7 : 0;
	bcb->Tag = ++(us->tag);
	bcb->Lun = srb->cmnd[1] >> 5;
	if (us->flags & US_FL_SCM_MULT_TARG)
		bcb->Lun |= srb->target << 4;
	bcb->Length = srb->cmd_len;

	/* construct the pipe handle */
	pipe = usb_sndbulkpipe(us->pusb_dev, us->ep_out);

	/* copy the command payload */
	memset(bcb->CDB, 0, sizeof(bcb->CDB));
	memcpy(bcb->CDB, srb->cmnd, bcb->Length);

	/* send it to out endpoint */
	US_DEBUGP("Bulk command S 0x%x T 0x%x Trg %d LUN %d L %d F %d CL %d\n",
		  le32_to_cpu(bcb->Signature), bcb->Tag,
		  (bcb->Lun >> 4), (bcb->Lun & 0x0F), 
		  le32_to_cpu(bcb->DataTransferLength), bcb->Flags, bcb->Length);
	result = usb_stor_bulk_msg(us, bcb, pipe, US_BULK_CB_WRAP_LEN, 
				   &partial);
	US_DEBUGP("Bulk command transfer result=%d\n", result);

	/* if the command was aborted, indicate that */
	if (result == -ECONNRESET) {
		ret = USB_STOR_TRANSPORT_ABORTED;
		goto out;
	}

	/* if we stall, we need to clear it before we go on */
	if (result == -EPIPE) {
		US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
		result = usb_stor_clear_halt(us, pipe);

		/* if the command was aborted, indicate that */
		if (result == -ECONNRESET) {
			ret = USB_STOR_TRANSPORT_ABORTED;
			goto out;
		}
		result = -EPIPE;
	} else if (result) {
		/* unknown error -- we've got a problem */
		ret = USB_STOR_TRANSPORT_ERROR;
		goto out;
	}

	/* if the command transfered well, then we go to the data stage */
	if (result == 0) {
		/* send/receive data payload, if there is any */
		if (bcb->DataTransferLength) {
			usb_stor_transfer(srb, us);
			result = srb->result;
			US_DEBUGP("Bulk data transfer result 0x%x\n", result);

			/* if it was aborted, we need to indicate that */
			if (result == US_BULK_TRANSFER_ABORTED) {
				ret = USB_STOR_TRANSPORT_ABORTED;
				goto out;
			}
		}
	}

	/* See flow chart on pg 15 of the Bulk Only Transport spec for
	 * an explanation of how this code works.
	 */

	/* construct the pipe handle */
	pipe = usb_rcvbulkpipe(us->pusb_dev, us->ep_in);

	/* get CSW for device status */
	US_DEBUGP("Attempting to get CSW...\n");
	result = usb_stor_bulk_msg(us, bcs, pipe, US_BULK_CS_WRAP_LEN, 
				   &partial);

	/* if the command was aborted, indicate that */
	if (result == -ECONNRESET) {
		ret = USB_STOR_TRANSPORT_ABORTED;
		goto out;
	}

	/* did the attempt to read the CSW fail? */
	if (result == -EPIPE) {
		US_DEBUGP("clearing endpoint halt for pipe 0x%x\n", pipe);
		result = usb_stor_clear_halt(us, pipe);

		/* if the command was aborted, indicate that */
		if (result == -ECONNRESET) {
			ret = USB_STOR_TRANSPORT_ABORTED;
			goto out;
		}

		/* get the status again */
		US_DEBUGP("Attempting to get CSW (2nd try)...\n");
		result = usb_stor_bulk_msg(us, bcs, pipe,
					   US_BULK_CS_WRAP_LEN, &partial);

		/* if the command was aborted, indicate that */
		if (result == -ECONNRESET) {
			ret = USB_STOR_TRANSPORT_ABORTED;
			goto out;
		}

		/* if it fails again, we need a reset and return an error*/
		if (result == -EPIPE) {
			US_DEBUGP("clearing halt for pipe 0x%x\n", pipe);
			result = usb_stor_clear_halt(us, pipe);

			/* if the command was aborted, indicate that */
			if (result == -ECONNRESET) {
				ret = USB_STOR_TRANSPORT_ABORTED;
			} else {
				ret = USB_STOR_TRANSPORT_ERROR;
			}
			goto out;
		}
	}

	/* if we still have a failure at this point, we're in trouble */
	US_DEBUGP("Bulk status result = %d\n", result);
	if (result) {
		ret = USB_STOR_TRANSPORT_ERROR;
		goto out;
	}

	/* check bulk status */
	US_DEBUGP("Bulk status Sig 0x%x T 0x%x R %d Stat 0x%x\n",
		  le32_to_cpu(bcs->Signature), bcs->Tag, 
		  bcs->Residue, bcs->Status);
	if ((bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) && bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) ||
	    bcs->Tag != bcb->Tag || 
	    bcs->Status > US_BULK_STAT_PHASE || partial != 13) {
		US_DEBUGP("Bulk logical error\n");
		ret = USB_STOR_TRANSPORT_ERROR;
		goto out;
	}

	/* based on the status code, we report good or bad */
	switch (bcs->Status) {
		case US_BULK_STAT_OK:
			/* command good -- note that data could be short */
			ret = USB_STOR_TRANSPORT_GOOD;
			goto out;

		case US_BULK_STAT_FAIL:
			/* command failed */
			ret = USB_STOR_TRANSPORT_FAILED;
			goto out;

		case US_BULK_STAT_PHASE:
			/* phase error -- note that a transport reset will be
			 * invoked by the invoke_transport() function
			 */
			ret = USB_STOR_TRANSPORT_ERROR;
			goto out;
	}

	/* we should never get here, but if we do, we're in trouble */

 out:
	kfree(bcb);
	kfree(bcs);
	return ret;
}
Exemplo n.º 16
0
int dtrace_getipl(void)
{
	return in_interrupt();
}
Exemplo n.º 17
0
int ux500_msp_i2s_open(struct ux500_msp *msp,
                       struct ux500_msp_config *config)
{
    u32 old_reg, new_reg, mask;
    int res;
    unsigned int tx_sel, rx_sel, tx_busy, rx_busy;

    if (in_interrupt()) {
        dev_err(msp->dev,
                "%s: ERROR: Open called in interrupt context!\n",
                __func__);
        return -1;
    }

    tx_sel = (config->direction & MSP_DIR_TX) > 0;
    rx_sel = (config->direction & MSP_DIR_RX) > 0;
    if (!tx_sel && !rx_sel) {
        dev_err(msp->dev, "%s: Error: No direction selected!\n",
                __func__);
        return -EINVAL;
    }

    tx_busy = (msp->dir_busy & MSP_DIR_TX) > 0;
    rx_busy = (msp->dir_busy & MSP_DIR_RX) > 0;
    if (tx_busy && tx_sel) {
        dev_err(msp->dev, "%s: Error: TX is in use!\n", __func__);
        return -EBUSY;
    }
    if (rx_busy && rx_sel) {
        dev_err(msp->dev, "%s: Error: RX is in use!\n", __func__);
        return -EBUSY;
    }

    msp->dir_busy |= (tx_sel ? MSP_DIR_TX : 0) | (rx_sel ? MSP_DIR_RX : 0);

    /* First do the global config register */
    mask = RX_CLK_SEL_MASK | TX_CLK_SEL_MASK | RX_FSYNC_MASK |
           TX_FSYNC_MASK | RX_SYNC_SEL_MASK | TX_SYNC_SEL_MASK |
           RX_FIFO_ENABLE_MASK | TX_FIFO_ENABLE_MASK | SRG_CLK_SEL_MASK |
           LOOPBACK_MASK | TX_EXTRA_DELAY_MASK;

    new_reg = (config->tx_clk_sel | config->rx_clk_sel |
               config->rx_fsync_pol | config->tx_fsync_pol |
               config->rx_fsync_sel | config->tx_fsync_sel |
               config->rx_fifo_config | config->tx_fifo_config |
               config->srg_clk_sel | config->loopback_enable |
               config->tx_data_enable);

    old_reg = readl(msp->registers + MSP_GCR);
    old_reg &= ~mask;
    new_reg |= old_reg;
    writel(new_reg, msp->registers + MSP_GCR);

    res = enable_msp(msp, config);
    if (res < 0) {
        dev_err(msp->dev, "%s: ERROR: enable_msp failed (%d)!\n",
                __func__, res);
        return -EBUSY;
    }
    if (config->loopback_enable & 0x80)
        msp->loopback_enable = 1;

    /* Flush FIFOs */
    flush_fifo_tx(msp);
    flush_fifo_rx(msp);

    msp->msp_state = MSP_STATE_CONFIGURED;
    return 0;
}
Exemplo n.º 18
0
 /*  Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */
int usbip_xmit(int send, struct socket *sock, char *buf, int size, int msg_flags)
{
	int result;
	struct msghdr msg;
	struct kvec iov;
	int total = 0;

	/* for blocks of if (dbg_flag_xmit) */
	char *bp = buf;
	int osize= size;

	dbg_xmit("enter\n");

	if (!sock || !buf || !size) {
		uerr("usbip_xmit: invalid arg, sock %p buff %p size %d\n",
				sock, buf, size);
		return -EINVAL;
	}


	if (dbg_flag_xmit) {
		if (send) {
			if (!in_interrupt())
				printk(KERN_DEBUG "%-10s:", current->comm);
			else
				printk(KERN_DEBUG "interupt  :");

			printk("usbip_xmit: sending... , sock %p, buf %p, size %d, msg_flags %d\n",
					sock, buf, size, msg_flags);
			usbip_dump_buffer(buf, size);
		}
	}


	do {
		sock->sk->sk_allocation = GFP_NOIO;
		iov.iov_base    = buf;
		iov.iov_len     = size;
		msg.msg_name    = NULL;
		msg.msg_namelen = 0;
		msg.msg_control = NULL;
		msg.msg_controllen = 0;
		msg.msg_namelen    = 0;
		msg.msg_flags      = msg_flags | MSG_NOSIGNAL;

		if (send)
			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
		else
			result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);

		if (result <= 0) {
			udbg("usbip_xmit: %s sock %p buf %p size %u ret %d total %d\n",
					send ? "send" : "receive", sock, buf, size, result, total);
			goto err;
		}

		size -= result;
		buf += result;
		total += result;

	} while (size > 0);


	if (dbg_flag_xmit) {
		if (!send) {
			if (!in_interrupt())
				printk(KERN_DEBUG "%-10s:", current->comm);
			else
				printk(KERN_DEBUG "interupt  :");

			printk("usbip_xmit: receiving....\n");
			usbip_dump_buffer(bp, osize);
			printk("usbip_xmit: received, osize %d ret %d size %d total %d\n",
					osize, result, size, total);
		}

		if (send) {
			printk("usbip_xmit: send, total %d\n", total);
		}
	}

	return total;

err:
	return result;
}
Exemplo n.º 19
0
static int myri_open(struct net_device *dev)
{
	struct myri_eth *mp = (struct myri_eth *) dev->priv;

	return myri_init(mp, in_interrupt());
}
Exemplo n.º 20
0
static void *ps2kdma_alloc(struct kdma_buffer *kdb, int min, int max, int *size)
{
    unsigned long flags;
    int free, amin;
    int poll;

    save_flags(flags);
#ifdef __mips__
    /* polling wait is used when
     *  - called from interrupt handler
     *  - interrupt is already disabled (in printk()) 
     */
    poll = in_interrupt() | !(flags & ST0_IE);
#else
#error "for MIPS CPU only"
#endif

    if (down_trylock(&kdb->sem) != 0) {
	if (poll)
	    return NULL;		/* cannot sleep */
	else
	    down(&kdb->sem);
    }

    amin = DMA_ALIGN(min) + sizeof(struct kdma_request);
    if (amin > kdb->size) {
	up(&kdb->sem);
	return NULL;			/* requested size is too large */
    }

    spin_lock_irqsave(&kdb->lock, flags);

    while (1) {
	if (kdb->top == kdb->bottom) {		/* whole buffer is free */
	    kdb->top = kdb->bottom = kdb->start;
	    free = kdb->size - DMA_TRUNIT;
	    break;
	}
	if (kdb->top > kdb->bottom) {		/* [...#####...] */
	    free = kdb->end - kdb->top;
	    if (amin <= free)
		break;
	    if (kdb->bottom > kdb->start) {
		kdb->top = kdb->start;		/* wrap around */
		continue;
	    }
	} else if (kdb->top < kdb->bottom) {	/* [###.....###] */
	    free = kdb->bottom - kdb->top - DMA_TRUNIT;
	    if (amin <= free)
		break;
	}

	spin_unlock_irqrestore(&kdb->lock, flags);
	kdb->error |= ps2dma_intr_safe_wait_for_completion(kdb->channel, poll, &kdb->c);
	spin_lock_irqsave(&kdb->lock, flags);
    }

    if (amin < kdb->allocmax && free > kdb->allocmax)
	free = kdb->allocmax;
    free -= sizeof(struct kdma_request);
    if (size)
	*size = free > max ? max : free;
    kdb->kreq = (struct kdma_request *)kdb->top;
    spin_unlock_irqrestore(&kdb->lock, flags);

    return (void *)kdb->kreq + sizeof(struct kdma_request);
}
Exemplo n.º 21
0
static int ehci_omap_bus_suspend(struct usb_hcd *hcd)
{
	int ret;
	u32 status;
	u32 sysconfig;
	unsigned long flags;
	struct ehci_hcd *ehci;
	struct ehci_hcd_omap_platform_data *pdata;
	struct ehci_hcd_omap *omap = dev_get_drvdata(hcd->self.controller);

	dev_dbg(hcd->self.controller, "%s %ld %lu\n", __func__,
		in_interrupt(), jiffies);
	ehci = hcd_to_ehci(hcd);
	pdata = omap->dev->dev.platform_data;

	/* mask interrupt 77 to avoid race condition with ehci_irq */
	/* omap_writel(0x2000, 0x482000CC); */
	disable_irq(hcd->irq);

	ret = ehci_bus_suspend(hcd);
	if (ret) {
		enable_irq(hcd->irq);
		return ret;
	}

	/* Put UHH in SmartStandby mode */
	sysconfig = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
	sysconfig &= ~OMAP_UHH_SYSCONFIG_MIDLEMODE_MASK;
	sysconfig |= OMAP_UHH_SYSCONFIG_MIDLEMODE;
	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, sysconfig);

	spin_lock_irqsave(&usb_clocks_lock, flags);
	if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
		if (pdata->usbhost_standby_status)
			ret = pdata->usbhost_standby_status();
		if (ret == 0) {
			printk(KERN_ERR "ehci: suspend failed!\n");
			ret = -EBUSY;
			goto end;
		} else
			ret = 0;
		status = ehci_readl(ehci, &ehci->regs->status);
		if (status & INTR_MASK) {
			printk(KERN_ERR "ehci: pending irq, resume!\n");
			ret = -EBUSY;
			goto end;
		}
		ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF,
			ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF) &
			~(1));
		/* Enable the interrupt so that the remote-wakeup
		 * can be detected */
		ehci_omap_writel(omap->tll_base, OMAP_USBTLL_IRQSTATUS, 7);
		ehci_omap_writel(omap->tll_base, OMAP_USBTLL_IRQENABLE, 1);

		/* Put UHH in ForceIdle mode */
		sysconfig = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
		sysconfig &= ~OMAP_UHH_SYSCONFIG_SIDLEMODE_MASK;
		ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, sysconfig);

		if (omap->usbhost1_48m_fck)
			clk_disable(omap->usbhost1_48m_fck);
		if (omap->usbhost2_120m_fck)
			clk_disable(omap->usbhost2_120m_fck);
		if (omap->usbtll_fck)
			clk_disable(omap->usbtll_fck);
		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
		LOG_USBHOST_ACTIVITY(aUsbHostDbg, iUsbHostDbg, jiffies);
	}
	spin_unlock_irqrestore(&usb_clocks_lock, flags);
#ifdef CONFIG_HAS_WAKELOCK
	wake_unlock(&ehci->wake_lock_ehci_pm);
#endif
	return 0;

end:
	/* Put UHH in NoStandby mode */
	sysconfig = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
	sysconfig &= ~OMAP_UHH_SYSCONFIG_MIDLEMODE_MASK;
	sysconfig |= OMAP_UHH_SYSCONFIG_NOSTBYMODE;
	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, sysconfig);

	spin_unlock_irqrestore(&usb_clocks_lock, flags);
	ehci_bus_resume(hcd);
	/* unmask irq 77 */
	/* omap_writel(0x2000, 0x482000C8); */
	enable_irq(hcd->irq);
	return ret;
}
Exemplo n.º 22
0
u64 *ps2con_gsp_alloc(int request, int *avail)
{
    return ps2kdma_alloc(in_interrupt() ? &kkdb : &kdb, request, BUF_SIZE, avail);
}
Exemplo n.º 23
0
static void __free_pages_ok (struct page *page, unsigned int order)
{
	unsigned long index, page_idx, mask, flags;
	free_area_t *area;
	struct page *base;
	zone_t *zone;

	arch_free_page(page, order);
	/*
	 * Yes, think what happens when other parts of the kernel take 
	 * a reference to a page in order to pin it for io. -ben
	 */
	if (PageLRU(page)) {
		if (unlikely(in_interrupt()))
			BUG();
		lru_cache_del(page);
	}

	if (page->buffers)
		BUG();
	if (page->mapping)
		BUG();
	if (!VALID_PAGE(page))
		BUG();
	if (PageLocked(page))
		BUG();
	if (PageActive(page))
		BUG();
	ClearPageReferenced(page);
	ClearPageDirty(page);

	if (current->flags & PF_FREE_PAGES)
		goto local_freelist;
 back_local_freelist:

	zone = page_zone(page);

	mask = (~0UL) << order;
	base = zone->zone_mem_map;
	page_idx = page - base;
	if (page_idx & ~mask)
		BUG();
	index = page_idx >> (1 + order);

	area = zone->free_area + order;

	spin_lock_irqsave(&zone->lock, flags);

	zone->free_pages -= mask;

	while (mask + (1 << (MAX_ORDER-1))) {
		struct page *buddy1, *buddy2;

		if (area >= zone->free_area + MAX_ORDER)
			BUG();
		if (!__test_and_change_bit(index, area->map))
			/*
			 * the buddy page is still allocated.
			 */
			break;
		/*
		 * Move the buddy up one level.
		 * This code is taking advantage of the identity:
		 * 	-mask = 1+~mask
		 */
		buddy1 = base + (page_idx ^ -mask);
		buddy2 = base + page_idx;
		if (BAD_RANGE(zone,buddy1))
			BUG();
		if (BAD_RANGE(zone,buddy2))
			BUG();

		list_del(&buddy1->list);
		mask <<= 1;
		area++;
		index >>= 1;
		page_idx &= mask;
	}
	list_add(&(base + page_idx)->list, &area->free_list);

	spin_unlock_irqrestore(&zone->lock, flags);
	return;

 local_freelist:
	if (current->nr_local_pages)
		goto back_local_freelist;
	if (in_interrupt())
		goto back_local_freelist;		

	list_add(&page->list, &current->local_pages);
	page->index = order;
	current->nr_local_pages++;
}
Exemplo n.º 24
0
void ps2con_gsp_send(int len)
{
    ps2kdma_send(in_interrupt() ? &kkdb : &kdb, len);
}    
Exemplo n.º 25
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * error_code:
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *	bit 2 == 0 means kernel, 1 means user-mode
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct * vma;
	unsigned long address;
	unsigned long page;
	unsigned long fixup;
	int write;
	siginfo_t info;

	/* get the address */
	__asm__("movl %%cr2,%0":"=r" (address));

	/* It's safe to allow irq's after cr2 has been saved */
	if (regs->eflags & X86_EFLAGS_IF)
		local_irq_enable();

	tsk = current;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
	 * protection error (error_code & 1) == 0.
	 */
	if (address >= TASK_SIZE && !(error_code & 5))
		goto vmalloc_fault;

	mm = tsk->mm;
	info.si_code = SEGV_MAPERR;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_interrupt() || !mm)
		goto no_context;

	down_read(&mm->mmap_sem);

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (error_code & 4) {
		/*
		 * accessing the stack below %esp is always a bug.
		 * The "+ 32" is there due to some instructions (like
		 * pusha) doing post-decrement on the stack and that
		 * doesn't show up until later..
		 */
		if (address + 32 < regs->esp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	info.si_code = SEGV_ACCERR;
	write = 0;
	switch (error_code & 3) {
		default:	/* 3: write, present */
#ifdef TEST_VERIFY_AREA
			if (regs->cs == KERNEL_CS)
				printk("WP fault at %08lx\n", regs->eip);
#endif
			/* fall through */
		case 2:		/* write, not present */
			if (!(vma->vm_flags & VM_WRITE))
				goto bad_area;
			write++;
			break;
		case 1:		/* read, present */
			goto bad_area;
		case 0:		/* read, not present */
			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
				goto bad_area;
	}

 survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	switch (handle_mm_fault(mm, vma, address, write)) {
	case 1:
		tsk->min_flt++;
		break;
	case 2:
		tsk->maj_flt++;
		break;
	case 0:
		goto do_sigbus;
	default:
		goto out_of_memory;
	}

	/*
	 * Did it hit the DOS screen memory VA from vm86 mode?
	 */
	if (regs->eflags & VM_MASK) {
		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
		if (bit < 32)
			tsk->thread.screen_bitmap |= 1 << bit;
	}
Exemplo n.º 26
0
static inline void gpio_lock(void)
{
	if (likely(!in_interrupt()))
		mutex_lock(&gpio_mutex);
}
Exemplo n.º 27
0
/*
 * Allocate URBs and start IRQ
 */
int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
			  int num_bufs, int max_pkt_size,
			  int (*isoc_copy) (struct cx231xx *dev,
					    struct urb *urb))
{
	struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
	int i;
	int sb_size, pipe;
	struct urb *urb;
	int rc;

	cx231xx_info(DRIVER_NAME "cx231xx: called cx231xx_prepare_isoc\n");

	/* De-allocates all pending stuff */
	cx231xx_uninit_vbi_isoc(dev);

	/* clear if any halt */
	usb_clear_halt(dev->udev,
		       usb_rcvbulkpipe(dev->udev,
				       dev->vbi_mode.end_point_addr));

	dev->vbi_mode.isoc_ctl.isoc_copy = isoc_copy;
	dev->vbi_mode.isoc_ctl.num_bufs = num_bufs;
	dma_q->pos = 0;
	dma_q->is_partial_line = 0;
	dma_q->last_sav = 0;
	dma_q->current_field = -1;
	dma_q->bytes_left_in_line = dev->width << 1;
	dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ?
				  PAL_VBI_LINES : NTSC_VBI_LINES);
	dma_q->lines_completed = 0;
	for (i = 0; i < 8; i++)
		dma_q->partial_buf[i] = 0;

	dev->vbi_mode.isoc_ctl.urb = kzalloc(sizeof(void *) * num_bufs,
					     GFP_KERNEL);
	if (!dev->vbi_mode.isoc_ctl.urb) {
		cx231xx_errdev("cannot alloc memory for usb buffers\n");
		return -ENOMEM;
	}

	dev->vbi_mode.isoc_ctl.transfer_buffer =
	    kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL);
	if (!dev->vbi_mode.isoc_ctl.transfer_buffer) {
		cx231xx_errdev("cannot allocate memory for usbtransfer\n");
		kfree(dev->vbi_mode.isoc_ctl.urb);
		return -ENOMEM;
	}

	dev->vbi_mode.isoc_ctl.max_pkt_size = max_pkt_size;
	dev->vbi_mode.isoc_ctl.buf = NULL;

	sb_size = max_packets * dev->vbi_mode.isoc_ctl.max_pkt_size;

	/* allocate urbs and transfer buffers */
	for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			cx231xx_err(DRIVER_NAME
				    ": cannot alloc isoc_ctl.urb %i\n", i);
			cx231xx_uninit_vbi_isoc(dev);
			return -ENOMEM;
		}
		dev->vbi_mode.isoc_ctl.urb[i] = urb;
		urb->transfer_flags = 0;

		dev->vbi_mode.isoc_ctl.transfer_buffer[i] =
		    kzalloc(sb_size, GFP_KERNEL);
		if (!dev->vbi_mode.isoc_ctl.transfer_buffer[i]) {
			cx231xx_err(DRIVER_NAME
				    ": unable to allocate %i bytes for transfer"
				    " buffer %i%s\n", sb_size, i,
				    in_interrupt() ? " while in int" : "");
			cx231xx_uninit_vbi_isoc(dev);
			return -ENOMEM;
		}

		pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
		usb_fill_bulk_urb(urb, dev->udev, pipe,
				  dev->vbi_mode.isoc_ctl.transfer_buffer[i],
				  sb_size, cx231xx_irq_vbi_callback, dma_q);
	}

	init_waitqueue_head(&dma_q->wq);

	/* submit urbs and enables IRQ */
	for (i = 0; i < dev->vbi_mode.isoc_ctl.num_bufs; i++) {
		rc = usb_submit_urb(dev->vbi_mode.isoc_ctl.urb[i], GFP_ATOMIC);
		if (rc) {
			cx231xx_err(DRIVER_NAME
				    ": submit of urb %i failed (error=%i)\n", i,
				    rc);
			cx231xx_uninit_vbi_isoc(dev);
			return rc;
		}
	}

	cx231xx_capture_start(dev, 1, Vbi);

	return 0;
}
Exemplo n.º 28
0
static inline void sm_gpio_unlock(void)
{
	if (likely(!in_interrupt()))
		mutex_unlock(&sm_gpio_mutex);
}
Exemplo n.º 29
0
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
        const unsigned char *buffer,
        int offset, size_t count)
{
    struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
    struct onenand_chip *this = mtd->priv;
    dma_addr_t dma_src, dma_dst;
    int bram_offset;
    unsigned long timeout;
    void *buf = (void *)buffer;
    volatile unsigned *done;

    bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
    if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
        goto out_copy;

    /* panic_write() may be in an interrupt context */
    if (in_interrupt() || oops_in_progress)
        goto out_copy;

    if (buf >= high_memory) {
        struct page *p1;

        if (((size_t)buf & PAGE_MASK) !=
                ((size_t)(buf + count - 1) & PAGE_MASK))
            goto out_copy;
        p1 = vmalloc_to_page(buf);
        if (!p1)
            goto out_copy;
        buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
    }

    dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
    dma_dst = c->phys_base + bram_offset;
    if (dma_mapping_error(&c->pdev->dev, dma_src)) {
        dev_err(&c->pdev->dev,
                "Couldn't DMA map a %d byte buffer\n",
                count);
        return -1;
    }

    omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
                                 count >> 2, 1, 0, 0, 0);
    omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
                            dma_src, 0, 0);
    omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
                             dma_dst, 0, 0);

    INIT_COMPLETION(c->dma_done);
    omap_start_dma(c->dma_channel);

    timeout = jiffies + msecs_to_jiffies(20);
    done = &c->dma_done.done;
    while (time_before(jiffies, timeout))
        if (*done)
            break;

    dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);

    if (!*done) {
        dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
        goto out_copy;
    }

    return 0;

out_copy:
    memcpy(this->base + bram_offset, buf, count);
    return 0;
}
Exemplo n.º 30
0
/*lint -e960 */
unsigned long OM_PrintfWithModule(unsigned long ulModuleId, unsigned long ulLevel, char * pcformat, ... )
/*lint +e960 */
{
    VOS_UINT32          ulReturn = OM_OK;
    VOS_CHAR            *pcWarning;
    VOS_UINT32          ulTempLen;
    VOS_INT32           lRetLen;
    va_list             argument;
    VOS_UINT32          ulDataLen = 0;

    /* 数组前四字节存储模块ID,从第九字节开始为转换后字符串,为确保在转换为字符串
     过程中不越界,多定义四字节作保护 */
    /*lint -e813 */
    VOS_CHAR            acOutput[VOS_MAX_PRINT_LEN + 12];
    /*lint +e813 */

#if (VOS_OS_VER == VOS_LINUX)
    if(in_interrupt())
    {
        printk("\r\nOM_PrintfWithModule: RUN in the IRQ");

        return OM_ERR_RUNINIRQ;
    }
#endif

    /* 输入参数检查 */
    if((LOG_MAX_MODULE_ID_APP < ulModuleId)||(LOG_MIN_MODULE_ID_ACPU_DRV > ulModuleId)
                                       ||(LOG_LEVEL_BUTT <= ulLevel))
    {
        vos_printf("\r\nOM_PrintfWithModule: Para Error, ModuleId is %d, Level is %d", ulModuleId, ulLevel);
        return OM_ERR_UNVALIDPARA;
    }

    ulReturn = OM_PrintfGetModuleIdLev(ulModuleId);
    if((ulLevel > ulReturn)||(LOG_LEVEL_OFF == ulLevel))
    {
        return OM_ERR_LOWLEVEL;
    }

    *((VOS_UINT32*)acOutput) = ulModuleId;
    *(((VOS_UINT32*)acOutput)+1) = ulLevel;

    /* 将格式化字符串和可变参数转换为字符串 */
    va_start( argument, pcformat );
    lRetLen = VOS_nvsprintf(acOutput + OM_PRINTF_OFFSET, VOS_MAX_PRINT_LEN, pcformat, argument);
    va_end( argument );

    /* 添加字符串结束标志 */
    acOutput[VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET - 1] = '\0';

    /* 对转换结果进行判断,并在转换后字符串中添加相应提示信息 */
    if(lRetLen >= (VOS_MAX_PRINT_LEN - 1))
    {
        pcWarning = "OM_Printf: Warning!Print too long!!!";
        ulTempLen = VOS_StrLen(pcWarning );
        VOS_MemCpy(acOutput + OM_PRINTF_OFFSET, pcWarning, ulTempLen);

        /* 在转换后字符串倒数第二个字节添加换行符 */
        acOutput[VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET- 2] = '\n';
        ulDataLen = VOS_MAX_PRINT_LEN + OM_PRINTF_OFFSET- 1;
    }
    else if( lRetLen < 0 )
    {
        pcWarning = "OM_Printf:unknown internal error.\r\n";
        VOS_StrCpy(acOutput + OM_PRINTF_OFFSET, pcWarning );
        ulDataLen = VOS_StrLen(pcWarning ) + OM_PRINTF_OFFSET;
    }
    else
    {
        ulDataLen = (VOS_UINT32)lRetLen + OM_PRINTF_OFFSET;
    }

    ulReturn = OM_PrintfDataPut(acOutput, ulDataLen);

    return ulReturn;
}