Ejemplo n.º 1
0
static int lapic_suspend(struct sys_device *dev, pm_message_t state)
{
	unsigned long flags;

	if (!apic_pm_state.active)
		return 0;

	apic_pm_state.apic_id = apic_read(APIC_ID);
	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
	apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
	local_save_flags(flags);
	local_irq_disable();
	disable_local_APIC();
	local_irq_restore(flags);
	return 0;
}
Ejemplo n.º 2
0
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
    struct trace_array_cpu *data;
    int cpu = smp_processor_id();
    unsigned long flags;
    long disabled;
    int pc;

    if (likely(!tracer_enabled))
        return;

    tracing_record_cmdline(p);
    tracing_record_cmdline(current);

    if ((wakeup_rt && !rt_task(p)) ||
            p->prio >= wakeup_prio ||
            p->prio >= current->prio)
        return;

    pc = preempt_count();
    disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
    if (unlikely(disabled != 1))
        goto out;

    /* interrupts should be off from try_to_wake_up */
    __raw_spin_lock(&wakeup_lock);

    /* check for races. */
    if (!tracer_enabled || p->prio >= wakeup_prio)
        goto out_locked;

    /* reset the trace */
    __wakeup_reset(wakeup_trace);

    wakeup_cpu = task_cpu(p);
    wakeup_current_cpu = wakeup_cpu;
    wakeup_prio = p->prio;

    wakeup_task = p;
    get_task_struct(wakeup_task);

    local_save_flags(flags);

    data = wakeup_trace->data[wakeup_cpu];
    data->preempt_timestamp = ftrace_now(cpu);
    tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);

    /*
     * We must be careful in using CALLER_ADDR2. But since wake_up
     * is not called by an assembly function  (where as schedule is)
     * it should be safe to use it here.
     */
    trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
    __raw_spin_unlock(&wakeup_lock);
out:
    atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Ejemplo n.º 3
0
static inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
	int cpu;
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;

	cpu = raw_smp_processor_id();
	/* Always clear the tracing cpu on stopping the trace */
	if (unlikely(per_cpu(tracing_cpu, cpu)))
		per_cpu(tracing_cpu, cpu) = 0;
	else
		return;

	if (!tracer_enabled)
		return;

	data = tr->data[cpu];

	if (unlikely(!data) ||
	    !data->critical_start || atomic_read(&data->disabled))
		return;

	atomic_inc(&data->disabled);

	local_save_flags(flags);
	trace_function(tr, ip, parent_ip, flags, preempt_count());
	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
	data->critical_start = 0;
	atomic_dec(&data->disabled);
}
Ejemplo n.º 4
0
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;

	/*
	 * Does not matter if we preempt. We test the flags
	 * afterward, to see if irqs are disabled or not.
	 * If we preempt and get a false positive, the flags
	 * test will fail.
	 */
	cpu = raw_smp_processor_id();
	if (likely(!per_cpu(tracing_cpu, cpu)))
		return;

	local_save_flags(flags);
	/* slight chance to get a false positive on tracing_cpu */
	if (!irqs_disabled_flags(flags))
		return;

	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, preempt_count());

	atomic_dec(&data->disabled);
}
Ejemplo n.º 5
0
static inline void send_IPI_mask(cpumask_t cpumask, int vector)
{
	unsigned long mask = cpus_addr(cpumask)[0];
	unsigned long cfg;
	unsigned long flags;

	local_save_flags(flags);
	local_irq_disable();

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);

	/*
	 * program the ICR 
	 */
	cfg = __prepare_ICR(0, vector);
	
	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);
	local_irq_restore(flags);
}
Ejemplo n.º 6
0
static void gta02_fiq_kick(void)
{
	unsigned long flags;
	u32 tcon;

	/* we have to take care about FIQ because this modification is
	 * non-atomic, FIQ could come in after the read and before the
	 * writeback and its changes to the register would be lost
	 * (platform INTMSK mod code is taken care of already)
	 */
	local_save_flags(flags);
	local_fiq_disable();
	/* allow FIQs to resume */
	__raw_writel(__raw_readl(S3C2410_INTMSK) &
		     ~(1 << (gta02_fiq_irq - S3C2410_CPUIRQ_OFFSET)),
		     S3C2410_INTMSK);
	tcon = __raw_readl(S3C2410_TCON) & ~S3C2410_TCON_T3START;
	/* fake the timer to a count of 1 */
	__raw_writel(1, S3C2410_TCNTB(gta02_fiq_timer_index));
	__raw_writel(tcon | S3C2410_TCON_T3MANUALUPD, S3C2410_TCON);
	__raw_writel(tcon | S3C2410_TCON_T3MANUALUPD | S3C2410_TCON_T3START,
		     S3C2410_TCON);
	__raw_writel(tcon | S3C2410_TCON_T3START, S3C2410_TCON);
	local_irq_restore(flags);
}
Ejemplo n.º 7
0
/* Kretprobe handler */
static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
					  struct pt_regs *regs)
{
	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
	struct kretprobe_trace_entry *entry;
	struct ring_buffer_event *event;
	struct ring_buffer *buffer;
	int size, i, pc;
	unsigned long irq_flags;
	struct ftrace_event_call *call = &tp->call;

	local_save_flags(irq_flags);
	pc = preempt_count();

	size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);

	event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
						  irq_flags, pc);
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nargs = tp->nr_args;
	entry->func = (unsigned long)tp->rp.kp.addr;
	entry->ret_ip = (unsigned long)ri->ret_addr;
	for (i = 0; i < tp->nr_args; i++)
		entry->args[i] = call_fetch(&tp->args[i].fetch, regs);

	if (!filter_current_check_discard(buffer, call, entry, event))
		trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
}
Ejemplo n.º 8
0
void mac_do_irq_list(int irq, struct pt_regs *fp)
{
	irq_node_t *node, *slow_nodes;
	unsigned long flags;

	kstat_cpu(0).irqs[irq]++;

#ifdef DEBUG_SPURIOUS
	if (!mac_irq_list[irq] && (console_loglevel > 7)) {
		printk("mac_do_irq_list: spurious interrupt %d!\n", irq);
		return;
	}
#endif

	/* serve first fast and normal handlers */
	for (node = mac_irq_list[irq];
	     node && (!(node->flags & IRQ_FLG_SLOW));
	     node = node->next)
		node->handler(irq, node->dev_id, fp);
	if (!node) return;
	local_save_flags(flags);
	local_irq_restore((flags & ~0x0700) | (fp->sr & 0x0700));
	/* if slow handlers exists, serve them now */
	slow_nodes = node;
	for (; node; node = node->next) {
		node->handler(irq, node->dev_id, fp);
	}
}
Ejemplo n.º 9
0
/*
 * Source CPU calls into this - it waits for the freshly booted
 * target CPU to arrive and then starts the measurement:
 */
void __cpuinit check_tsc_sync_source(int cpu)
{
	unsigned long flags;
	int cpus = 2;

	/*
	 * No need to check if we already know that the TSC is not
	 * synchronized:
	 */
	if (unsynchronized_tsc())
		return;

	printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
			  smp_processor_id(), cpu);

	/*
	 * Reset it - in case this is a second bootup:
	 */
	atomic_set(&stop_count, 0);

	/*
	 * Wait for the target to arrive:
	 */
	local_save_flags(flags);
	local_irq_enable();
	while (atomic_read(&start_count) != cpus-1)
		cpu_relax();
	local_irq_restore(flags);
	/*
	 * Trigger the target to continue into the measurement too:
	 */
	atomic_inc(&start_count);

	check_tsc_warp();

	while (atomic_read(&stop_count) != cpus-1)
		cpu_relax();

	/*
	 * Reset it - just in case we boot another CPU later:
	 */
	atomic_set(&start_count, 0);

	if (nr_warps) {
		printk("\n");
		printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
				    " turning off TSC clock.\n", max_warp);
		mark_tsc_unstable();
		nr_warps = 0;
		max_warp = 0;
		last_tsc = 0;
	} else {
		printk(" passed.\n");
	}

	/*
	 * Let the target continue with the bootup:
	 */
	atomic_inc(&stop_count);
}
Ejemplo n.º 10
0
static inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
	int cpu;
	struct trace_array *tr = irqsoff_trace;
	struct trace_array_cpu *data;
	unsigned long flags;

	if (likely(!tracer_enabled))
		return;

	cpu = raw_smp_processor_id();

	if (per_cpu(tracing_cpu, cpu))
		return;

	data = tr->data[cpu];

	if (unlikely(!data) || atomic_read(&data->disabled))
		return;

	atomic_inc(&data->disabled);

	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	data->critical_start = parent_ip ? : ip;

	local_save_flags(flags);

	trace_function(tr, ip, parent_ip, flags, preempt_count());

	per_cpu(tracing_cpu, cpu) = 1;

	atomic_dec(&data->disabled);
}
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
		    struct ftrace_ops *op, struct pt_regs *pt_regs)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	int bit;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	preempt_disable_notrace();

	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
	if (bit < 0)
		goto out;

	cpu = smp_processor_id();
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
	if (!atomic_read(&data->disabled)) {
		local_save_flags(flags);
		trace_function(tr, ip, parent_ip, flags, pc);
	}
	trace_clear_recursion(bit);

 out:
	preempt_enable_notrace();
}
Ejemplo n.º 12
0
static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = func_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	if (unlikely(!ftrace_function_enabled))
		return;

	pc = preempt_count();
	preempt_disable_notrace();
	local_save_flags(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1))
		trace_function(tr, ip, parent_ip, flags, pc);

	atomic_dec(&data->disabled);
	preempt_enable_notrace();
}
Ejemplo n.º 13
0
void
handle_mmu_bus_fault(struct pt_regs *regs)
{
	int cause;
	int select;
#ifdef DEBUG
	int index;
	int page_id;
	int acc, inv;
#endif
	pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id());
	pmd_t *pmd;
	pte_t pte;
	int miss, we, writeac;
	unsigned long address;
	unsigned long flags;

	cause = *R_MMU_CAUSE;

	address = cause & PAGE_MASK; /* get faulting address */
	select = *R_TLB_SELECT;

#ifdef DEBUG
	page_id = IO_EXTRACT(R_MMU_CAUSE,  page_id,   cause);
	acc     = IO_EXTRACT(R_MMU_CAUSE,  acc_excp,  cause);
	inv     = IO_EXTRACT(R_MMU_CAUSE,  inv_excp,  cause);
	index   = IO_EXTRACT(R_TLB_SELECT, index,     select);
#endif
	miss    = IO_EXTRACT(R_MMU_CAUSE,  miss_excp, cause);
	we      = IO_EXTRACT(R_MMU_CAUSE,  we_excp,   cause);
	writeac = IO_EXTRACT(R_MMU_CAUSE,  wr_rd,     cause);

	D(printk("bus_fault from IRP 0x%lx: addr 0x%lx, miss %d, inv %d, we %d, acc %d, dx %d pid %d\n",
		 regs->irp, address, miss, inv, we, acc, index, page_id));

	/* leave it to the MM system fault handler */
	if (miss)
		do_page_fault(address, regs, 0, writeac);
        else
		do_page_fault(address, regs, 1, we);

        /* Reload TLB with new entry to avoid an extra miss exception.
	 * do_page_fault may have flushed the TLB so we have to restore
	 * the MMU registers.
	 */
	local_save_flags(flags);
	local_irq_disable();
	pmd = (pmd_t *)(pgd + pgd_index(address));
	if (pmd_none(*pmd))
		goto exit;
	pte = *pte_offset_kernel(pmd, address);
	if (!pte_present(pte))
		goto exit;
	*R_TLB_SELECT = select;
	*R_TLB_HI = cause;
	*R_TLB_LO = pte_val(pte);
exit:
	local_irq_restore(flags);
}
Ejemplo n.º 14
0
void arch_enable_nmi(void)
{
	unsigned long flags;

	local_save_flags(flags);
	flags |= (1 << 30); /* NMI M flag is at bit 30 */
	local_irq_restore(flags);
}
Ejemplo n.º 15
0
/*
 * This routine busy-waits for the drive status to be not "busy".
 * It then checks the status for all of the "good" bits and none
 * of the "bad" bits, and if all is okay it returns 0.  All other
 * cases return error -- caller may then invoke ide_error().
 *
 * This routine should get fixed to not hog the cpu during extra long waits..
 * That could be done by busy-waiting for the first jiffy or two, and then
 * setting a timer to wake up at half second intervals thereafter,
 * until timeout is achieved, before timing out.
 */
int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
		    unsigned long timeout, u8 *rstat)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	unsigned long flags;
	bool irqs_threaded = force_irqthreads;
	int i;
	u8 stat;

	udelay(1);	/* spec allows drive 400ns to assert "BUSY" */
	stat = tp_ops->read_status(hwif);

	if (stat & ATA_BUSY) {
		if (!irqs_threaded) {
			local_save_flags(flags);
			local_irq_enable_in_hardirq();
		}
		timeout += jiffies;
		while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
			if (time_after(jiffies, timeout)) {
				/*
				 * One last read after the timeout in case
				 * heavy interrupt load made us not make any
				 * progress during the timeout..
				 */
				stat = tp_ops->read_status(hwif);
				if ((stat & ATA_BUSY) == 0)
					break;

				if (!irqs_threaded)
					local_irq_restore(flags);
				*rstat = stat;
				return -EBUSY;
			}
		}
		if (!irqs_threaded)
			local_irq_restore(flags);
	}
	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		stat = tp_ops->read_status(hwif);

		if (OK_STAT(stat, good, bad)) {
			*rstat = stat;
			return 0;
		}
	}
	*rstat = stat;
	return -EFAULT;
}
Ejemplo n.º 16
0
void
enable_irq(unsigned int irq_nr)
{
	unsigned long flags;
	local_save_flags(flags);
	local_irq_disable();
	unmask_irq(irq_nr);
	local_irq_restore(flags);
}
Ejemplo n.º 17
0
static int trace_ctl_write (struct file *fp, const char * buf, unsigned long count, void * data)
{
    char local_buf[31];
    int  ret_val = 0;
    unsigned long flags;

    if(count > 30)
    {
        printk("Error : Buffer Overflow\n");
        trace_ctl_usage();
        return -EFAULT;
    }

    copy_from_user(local_buf,buf,count);
    local_buf[count-1]='\0'; /* Ignoring last \n char */
        ret_val = count;


    local_irq_save(flags);
    if(strcmp("0",local_buf)==0)
    {
		printk("\nTrace disable\n");
        bm_disable();
	}
    else if(strcmp("1",local_buf)==0)
    {
        printk("\nTrace enable\n");
        bm_enable();
    }
    else if(strcmp("l",local_buf)==0 || strcmp("L",local_buf)==0)
    {
        printk("\nPrinting Look Up Table\n");
        bm_act_print_lut();
    }
    else if(strcmp("r",local_buf)==0 || strcmp("R",local_buf)==0)
    {
        printk("\nReset trace logs\n");
        bm_entry_pointer = bm_entry;
    }
	else if(strcmp("s",local_buf)==0 || strcmp("S",local_buf)==0)
    {
		// printk("\n1000 clock cycles = %ld\n", arch_cycle_to_nsec((unsigned long)1000));
        bm_disable();
	}
   	else if(strcmp("a",local_buf)==0 || strcmp("A",local_buf)==0)
    {
        bm_test_all();
	}
    else 
    {
        trace_ctl_usage();
    }
    local_save_flags(flags);

    return ret_val;
}
static void mipi_himax_set_backlight(struct msm_fb_data_type *mfd)
{
	static int first_enable = 0;
	static int prev_bl_level = 0;
	int cnt, bl_level;
	//int count = 0;
	unsigned long flags;
	bl_level = mfd->bl_level;

	PRINT("[LIVED] set_backlight=%d,prev=%d\n", bl_level, prev_bl_level);
	if (bl_level == prev_bl_level || himax_state.disp_on == 0) {
		PRINT("[LIVED] same! or not disp_on\n");
	} else {
		if (bl_level == 0) {
			gpio_set_value_cansleep(gpio16 ,GPIO_LOW_VALUE);
			usleep(250);      // Disable hold time
			PRINT("[LIVED] same! or not disp_on\n");
		} else {
			if (prev_bl_level == 0) {
				//count++;
				gpio_set_value_cansleep(gpio16 ,GPIO_HIGH_VALUE);
				if (first_enable == 0) {
					first_enable = 1;
					msleep(25); // Initial enable time
				} else {
					udelay(300);      // Turn on time
				}
				//PRINT("[LIVED] (0) init!\n");
			}

			if (prev_bl_level < bl_level) {
				gpio_set_value_cansleep(gpio16 ,GPIO_LOW_VALUE);
				udelay(200);// TDIS
				cnt = BL_MAX - bl_level + 1;
			} else {
				cnt = prev_bl_level - bl_level;
			}
			//pr_info("[LIVED] cnt=%d, prev_bl_level=%d, bl_level=%d\n",
			//		cnt, prev_bl_level, bl_level);
			while (cnt) {
				local_save_flags(flags);
				local_irq_disable();
				gpio_set_value_cansleep(gpio16 ,GPIO_LOW_VALUE);
				DELAY_3NS();//udelay(3);      // Turn off time
				gpio_set_value_cansleep(gpio16 ,GPIO_HIGH_VALUE);
				local_irq_restore(flags);
				udelay(300);      // Turn on time
				cnt--;
			}

		}
		prev_bl_level = bl_level;
	}
}
Ejemplo n.º 19
0
static void sysrq_handle_showstate_blocked(int key)
{
	unsigned long flags;

	local_save_flags(flags);
	local_irq_enable();

	show_state_filter(TASK_UNINTERRUPTIBLE);

	local_irq_restore(flags);
}
Ejemplo n.º 20
0
void flush_tlb_mm(struct mm_struct *mm)
{
	if (mm == current->active_mm) {
		int flags;
		local_save_flags(flags);
		__get_new_mmu_context(mm);
		__load_mmu_context(mm);
		local_irq_restore(flags);
	}
	else
		mm->context = 0;
}
Ejemplo n.º 21
0
static inline unsigned long __apm_irq_save(void)
{
	unsigned long flags;
	local_save_flags(flags);
	if (apm_info.allow_ints) {
		if (irqs_disabled_flags(flags))
			local_irq_enable();
	} else
		local_irq_disable();

	return flags;
}
Ejemplo n.º 22
0
int __ipipe_divert_exception(struct pt_regs *regs, int vector)
{
	bool root_entry = false;
	unsigned long flags = 0;

	if (ipipe_root_domain_p) {
		root_entry = true;

		local_save_flags(flags);

		if (irqs_disabled_hw()) {
			/*
			 * Same root state handling as in
			 * __ipipe_handle_exception.
			 */
			local_irq_disable();
		}
	}
#ifdef CONFIG_KGDB
	/* catch int1 and int3 over non-root domains */
	else {
#ifdef CONFIG_X86_32
		if (vector != ex_do_device_not_available)
#endif
		{
			unsigned int condition = 0;

			if (vector == 1)
				get_debugreg(condition, 6);
			if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs))
				return 1;
		}
	}
#endif /* CONFIG_KGDB */

	if (unlikely(ipipe_trap_notify(vector, regs))) {
		if (root_entry)
			local_irq_restore_nosync(flags);
		return 1;
	}

	/* see __ipipe_handle_exception */
	if (likely(ipipe_root_domain_p))
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
					raw_irqs_disabled(), regs);
	/*
	 * No need to restore root state in the 64-bit case, the Linux handler
	 * and the return code will take care of it.
	 */

	return 0;
}
Ejemplo n.º 23
0
static void arc_floppy_data_enable_dma(dmach_t channel, dma_t *dma)
{
	DPRINTK("arc_floppy_data_enable_dma\n");
	switch (dma->dma_mode) {
	case DMA_MODE_READ: { /* read */
		extern unsigned char fdc1772_dma_read, fdc1772_dma_read_end;
		extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
		unsigned long flags;
		DPRINTK("enable_dma fdc1772 data read\n");
		local_save_flags(flags);
		__clf();
			
		memcpy ((void *)0x1c, (void *)&fdc1772_dma_read,
			&fdc1772_dma_read_end - &fdc1772_dma_read);
		fdc1772_setupdma(dma->buf.length, dma->buf.address); /* Sets data pointer up */
		enable_fiq(FIQ_FLOPPYDATA);
		local_irq_restore(flags);
	   }
	   break;

	case DMA_MODE_WRITE: { /* write */
		extern unsigned char fdc1772_dma_write, fdc1772_dma_write_end;
		extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
		unsigned long flags;
		DPRINTK("enable_dma fdc1772 data write\n");
		local_save_flags(flags);
		__clf();
		memcpy ((void *)0x1c, (void *)&fdc1772_dma_write,
			&fdc1772_dma_write_end - &fdc1772_dma_write);
		fdc1772_setupdma(dma->buf.length, dma->buf.address); /* Sets data pointer up */
		enable_fiq(FIQ_FLOPPYDATA;

		local_irq_restore(flags);
	    }
	    break;
	default:
		printk ("enable_dma: dma%d not initialised\n", channel);
	}
}
Ejemplo n.º 24
0
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
	int pc;

	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

	if (likely(!rt_task(p)) ||
			p->prio >= wakeup_prio ||
			p->prio >= current->prio)
		return;

	pc = preempt_count();
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
	__raw_spin_lock(&wakeup_lock);

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
	__wakeup_reset(wakeup_trace);

	wakeup_cpu = task_cpu(p);
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

	wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
	trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
		       CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
	__raw_spin_unlock(&wakeup_lock);
out:
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Ejemplo n.º 25
0
static void
gpio_ext_set_irqmask(unsigned long mask, unsigned long value)
{
	unsigned long flags;
	unsigned long val;

	local_save_flags(flags); 
	local_irq_disable();
	val = GPIO_EXT_IRQMASK;
	val &= ~mask;
	val |= (value & mask);
	GPIO_EXT_IRQMASK = val;
	local_irq_restore(flags);
}
Ejemplo n.º 26
0
static void common_backlight(int level)
{
     int bl_value,i;
	 unsigned long flags;
     // printk("[%s] level = %d \n",__FUNCTION__,level); 
#if 0  // for 30Mhz  Because of brightness, H/W required it at 30.7Mhz shinbrad
     if(level == 6)  // android default 
       level += 1;
#endif
     bl_value = 32-(level*2);
     

     printk("[%s] Current Backlight value : %d, Dimming loop value : %d \n",__FUNCTION__, level , bl_value);

     if(!level) {
        gpio_set_value(LCD_DIM_CON,GPIO_LOW_VALUE);
        udelay(T_DIS);

		  printk("[%s] Backlight OFF!....\n",__FUNCTION__);
#ifdef F_SKYDISP_MDP_VG_CLEAR_HOLD_CHANGE
      force_mdp4_overlay_control(1); //screen_hold
#endif
		return;
     }

#ifdef F_SKYDISP_MDP_VG_CLEAR_HOLD_CHANGE
    force_mdp4_overlay_control(0); //release screen_hold
#endif

/* shinbrad shinjg */
	  local_save_flags(flags);
	  local_irq_disable();

     gpio_set_value(LCD_DIM_CON,GPIO_LOW_VALUE);
     udelay(T_DIS);
     gpio_set_value(LCD_DIM_CON,GPIO_HIGH_VALUE);
     
     udelay(T_ON);


     for(i=0;i<(bl_value)-1;i++) {
        gpio_set_value(LCD_DIM_CON,GPIO_LOW_VALUE);
        udelay(T_OFF);
        gpio_set_value(LCD_DIM_CON,GPIO_HIGH_VALUE);
     	udelay(T_ON);
      }
/* shinbrad shinjg */
	  local_irq_restore(flags);	
}
Ejemplo n.º 27
0
static int mcfrs_write(struct tty_struct * tty,
		    const unsigned char *buf, int count)
{
	volatile unsigned char	*uartp;
	struct mcf_serial	*info = (struct mcf_serial *)tty->driver_data;
	unsigned long		flags;
	int			c, total = 0;

#if 0
	printk("%s(%d): mcfrs_write(tty=%x,buf=%x,count=%d)\n",
		__FILE__, __LINE__, (int)tty, (int)buf, count);
#endif

	if (serial_paranoia_check(info, tty->name, "mcfrs_write"))
		return 0;

	if (!tty || !info->xmit_buf)
		return 0;
	
	local_save_flags(flags);
	while (1) {
		local_irq_disable();		
		c = min(count, (int) min(((int)SERIAL_XMIT_SIZE) - info->xmit_cnt - 1,
			((int)SERIAL_XMIT_SIZE) - info->xmit_head));
		local_irq_restore(flags);

		if (c <= 0)
			break;

		memcpy(info->xmit_buf + info->xmit_head, buf, c);

		local_irq_disable();
		info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
		info->xmit_cnt += c;
		local_irq_restore(flags);

		buf += c;
		count -= c;
		total += c;
	}

	local_irq_disable();
	uartp = info->addr;
	info->imr |= MCFUART_UIR_TXREADY;
	uartp[MCFUART_UIMR] = info->imr;
	local_irq_restore(flags);

	return total;
}
Ejemplo n.º 28
0
static void
check_critical_timing(struct trace_array *tr,
		      struct trace_array_cpu *data,
		      unsigned long parent_ip,
		      int cpu)
{
	cycle_t T0, T1, delta;
	unsigned long flags;
	int pc;

	T0 = data->preempt_timestamp;
	T1 = ftrace_now(cpu);
	delta = T1-T0;

	local_save_flags(flags);

	pc = preempt_count();

	if (!report_latency(delta))
		goto out;

	atomic_spin_lock_irqsave(&max_trace_lock, flags);

	/* check if we are still the max latency */
	if (!report_latency(delta))
		goto out_unlock;

	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);

	if (data->critical_sequence != max_sequence)
		goto out_unlock;

	data->critical_end = parent_ip;

	if (likely(!is_tracing_stopped())) {
		tracing_max_latency = delta;
		update_max_tr_single(tr, current, cpu);
	}

	max_sequence++;

out_unlock:
	atomic_spin_unlock_irqrestore(&max_trace_lock, flags);

out:
	data->critical_sequence = max_sequence;
	data->preempt_timestamp = ftrace_now(cpu);
	trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
}
Ejemplo n.º 29
0
Archivo: process.c Proyecto: 7L/pi_plus
int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
	struct pt_regs regs;

	memset(&regs, 0, sizeof(regs));
	/* store them in non-volatile registers */
	regs.r5 = (unsigned long)fn;
	regs.r6 = (unsigned long)arg;
	local_save_flags(regs.msr);
	regs.pc = (unsigned long)kernel_thread_helper;
	regs.pt_mode = 1;

	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
			&regs, 0, NULL, NULL);
}
Ejemplo n.º 30
0
void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
	unsigned long cur_flags, mismatch;

	local_save_flags(cur_flags);

	mismatch = flags ^ cur_flags;
	if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
		return;

	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
	pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
			   flags, cur_flags, call);
	local_irq_restore(flags);
}