Esempio n. 1
0
void synchronise_count_slave(int cpu)
{
	int i;

	/*
	 * Not every cpu is online at the time this gets called,
	 * so we first wait for the master to say everyone is ready
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc_unchecked(&count_count_start);
		while (atomic_read_unchecked(&count_count_start) != 2)
			mb();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		atomic_inc_unchecked(&count_count_stop);
		while (atomic_read_unchecked(&count_count_stop) != 2)
			mb();
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
}
Esempio n. 2
0
static void add_stats(struct seq_file *seq, const char *aal,
  const struct k_atm_aal_stats *stats)
{
	seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
		   atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
		   atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
		   atomic_read_unchecked(&stats->rx_drop));
}
Esempio n. 3
0
u64 arch_irq_stat(void)
{
	u64 sum = atomic_read_unchecked(&irq_err_count);

#ifdef CONFIG_X86_IO_APIC
	sum += atomic_read_unchecked(&irq_mis_count);
#endif
	return sum;
}
Esempio n. 4
0
bool vmw_seqno_passed(struct vmw_private *dev_priv,
			 uint32_t seqno)
{
	struct vmw_fifo_state *fifo_state;
	bool ret;

	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
		return true;

	fifo_state = &dev_priv->fifo;
	vmw_update_seqno(dev_priv, fifo_state);
	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
		return true;

	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
	    vmw_fifo_idle(dev_priv, seqno))
		return true;

	/**
	 * Then check if the seqno is higher than what we've actually
	 * emitted. Then the fence is stale and signaled.
	 */

	ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
	       > VMW_FENCE_WRAP);

	return ret;
}
Esempio n. 5
0
static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
	poll_wait(file, &lis3_dev.misc_wait, wait);
	if (atomic_read_unchecked(&lis3_dev.count))
		return POLLIN | POLLRDNORM;
	return 0;
}
Esempio n. 6
0
/**
 * sht15_update_single_val() - get a new value from device
 * @data:		device instance specific data
 * @command:		command sent to request value
 * @timeout_msecs:	timeout after which comms are assumed
 *			to have failed are reset.
 **/
static inline int sht15_update_single_val(struct sht15_data *data,
					  int command,
					  int timeout_msecs)
{
	int ret;
	ret = sht15_send_cmd(data, command);
	if (ret)
		return ret;

	gpio_direction_input(data->pdata->gpio_data);
	atomic_set_unchecked(&data->interrupt_handled, 0);

	enable_irq(gpio_to_irq(data->pdata->gpio_data));
	if (gpio_get_value(data->pdata->gpio_data) == 0) {
		disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
		/* Only relevant if the interrupt hasn't occurred. */
		if (!atomic_read_unchecked(&data->interrupt_handled))
			schedule_work(&data->read_work);
	}
	ret = wait_event_timeout(data->wait_queue,
				 (data->flag == SHT15_READING_NOTHING),
				 msecs_to_jiffies(timeout_msecs));
	if (ret == 0) {/* timeout occurred */
		disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
		sht15_connection_reset(data);
		return -ETIME;
	}
	return 0;
}
Esempio n. 7
0
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
{
	const drm_mga_private_t *const dev_priv =
		(drm_mga_private_t *) dev->dev_private;

	if (crtc != 0)
		return 0;

	return atomic_read_unchecked(&dev_priv->vbl_received);
}
Esempio n. 8
0
static unsigned int uio_poll(struct file *filep, poll_table *wait)
{
    struct uio_listener *listener = filep->private_data;
    struct uio_device *idev = listener->dev;

    if (!idev->info->irq)
        return -EIO;

    poll_wait(filep, &idev->wait, wait);
    if (listener->event_count != atomic_read_unchecked(&idev->event))
        return POLLIN | POLLRDNORM;
    return 0;
}
Esempio n. 9
0
static ssize_t uio_read(struct file *filep, char __user *buf,
                        size_t count, loff_t *ppos)
{
    struct uio_listener *listener = filep->private_data;
    struct uio_device *idev = listener->dev;
    DECLARE_WAITQUEUE(wait, current);
    ssize_t retval;
    s32 event_count;

    if (!idev->info->irq)
        return -EIO;

    if (count != sizeof(s32))
        return -EINVAL;

    add_wait_queue(&idev->wait, &wait);

    do {
        set_current_state(TASK_INTERRUPTIBLE);

        event_count = atomic_read_unchecked(&idev->event);
        if (event_count != listener->event_count) {
            if (copy_to_user(buf, &event_count, count))
                retval = -EFAULT;
            else {
                listener->event_count = event_count;
                retval = count;
            }
            break;
        }

        if (filep->f_flags & O_NONBLOCK) {
            retval = -EAGAIN;
            break;
        }

        if (signal_pending(current)) {
            retval = -ERESTARTSYS;
            break;
        }
        schedule();
    } while (1);

    __set_current_state(TASK_RUNNING);
    remove_wait_queue(&idev->wait, &wait);

    return retval;
}
Esempio n. 10
0
static void sht15_bh_read_data(struct work_struct *work_s)
{
	int i;
	uint16_t val = 0;
	struct sht15_data *data
		= container_of(work_s, struct sht15_data,
			       read_work);
	/* Firstly, verify the line is low */
	if (gpio_get_value(data->pdata->gpio_data)) {
		/* If not, then start the interrupt again - care
		   here as could have gone low in meantime so verify
		   it hasn't!
		*/
		atomic_set_unchecked(&data->interrupt_handled, 0);
		enable_irq(gpio_to_irq(data->pdata->gpio_data));
		/* If still not occurred or another handler has been scheduled */
		if (gpio_get_value(data->pdata->gpio_data)
		    || atomic_read_unchecked(&data->interrupt_handled))
			return;
	}
	/* Read the data back from the device */
	for (i = 0; i < 16; ++i) {
		val <<= 1;
		gpio_set_value(data->pdata->gpio_sck, 1);
		ndelay(SHT15_TSCKH);
		val |= !!gpio_get_value(data->pdata->gpio_data);
		gpio_set_value(data->pdata->gpio_sck, 0);
		ndelay(SHT15_TSCKL);
		if (i == 7)
			sht15_ack(data);
	}
	/* Tell the device we are done */
	sht15_end_transmission(data);

	switch (data->flag) {
	case SHT15_READING_TEMP:
		data->val_temp = val;
		break;
	case SHT15_READING_HUMID:
		data->val_humid = val;
		break;
	}

	data->flag = SHT15_READING_NOTHING;
	wake_up(&data->wait_queue);
}
Esempio n. 11
0
static int uio_open(struct inode *inode, struct file *filep)
{
    struct uio_device *idev;
    struct uio_listener *listener;
    int ret = 0;

    mutex_lock(&minor_lock);
    idev = idr_find(&uio_idr, iminor(inode));
    mutex_unlock(&minor_lock);
    if (!idev) {
        ret = -ENODEV;
        goto out;
    }

    if (!try_module_get(idev->owner)) {
        ret = -ENODEV;
        goto out;
    }

    listener = kmalloc(sizeof(*listener), GFP_KERNEL);
    if (!listener) {
        ret = -ENOMEM;
        goto err_alloc_listener;
    }

    listener->dev = idev;
    listener->event_count = atomic_read_unchecked(&idev->event);
    filep->private_data = listener;

    if (idev->info->open) {
        ret = idev->info->open(idev->info, inode);
        if (ret)
            goto err_infoopen;
    }
    return 0;

err_infoopen:
    kfree(listener);

err_alloc_listener:
    module_put(idev->owner);

out:
    return ret;
}
Esempio n. 12
0
int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
	unsigned int cur_fence;
	int ret = 0;

	/* Assume that the user has missed the current sequence number
	 * by about a day rather than she wants to wait for years
	 * using fences.
	 */
	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
		    (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
		      - *sequence) <= (1 << 23)));

	*sequence = cur_fence;

	return ret;
}
Esempio n. 13
0
static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
	struct inet_sock *inet = inet_sk(sp);
	__be32 dest = inet->inet_daddr,
	       src = inet->inet_rcv_saddr;
	__u16 destp = 0,
	      srcp  = inet->inet_num;

	seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
		i, src, srcp, dest, destp, sp->sk_state,
		sk_wmem_alloc_get(sp),
		sk_rmem_alloc_get(sp),
		0, 0L, 0,
		from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
		0, sock_i_ino(sp),
		atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
}
Esempio n. 14
0
static int smtc_proc_show(struct seq_file *m, void *v)
{
	int i;
	extern unsigned long ebase;

	seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
	seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
	seq_printf(m, "EBASE: 0x%08lx\n", ebase);
	seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
	for (i=0; i < NR_CPUS; i++)
		seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
	seq_printf(m, "Self-IPIs by CPU:\n");
	for(i = 0; i < NR_CPUS; i++)
		seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
	seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
		   atomic_read_unchecked(&smtc_fpu_recoveries));
	return 0;
}
Esempio n. 15
0
/**
 *	fill_read_buffer - allocate and fill buffer from object.
 *	@dentry:	dentry pointer.
 *	@buffer:	data buffer for file.
 *
 *	Allocate @buffer->page, if it hasn't been already, then call the
 *	kobject's show() method to fill the buffer with this attribute's 
 *	data. 
 *	This is called only once, on the file's first read unless an error
 *	is returned.
 */
static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
{
	struct sysfs_dirent *attr_sd = dentry->d_fsdata;
	struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
	const struct sysfs_ops * ops = buffer->ops;
	int ret = 0;
	ssize_t count;

	if (!buffer->page)
		buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
	if (!buffer->page)
		return -ENOMEM;

	/* need attr_sd for attr and ops, its parent for kobj */
	if (!sysfs_get_active(attr_sd))
		return -ENODEV;

	buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
	count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);

	sysfs_put_active(attr_sd);

	/*
	 * The code works fine with PAGE_SIZE return but it's likely to
	 * indicate truncated result or overflow in normal use cases.
	 */
	if (count >= (ssize_t)PAGE_SIZE) {
		print_symbol("fill_read_buffer: %s returned bad count\n",
			(unsigned long)ops->show);
		/* Try to struggle along */
		count = PAGE_SIZE - 1;
	}
	if (count >= 0) {
		buffer->needs_read_fill = 0;
		buffer->count = count;
	} else {
		ret = count;
	}
	return ret;
}
Esempio n. 16
0
/* Sysfs attribute files are pollable.  The idea is that you read
 * the content and then you use 'poll' or 'select' to wait for
 * the content to change.  When the content changes (assuming the
 * manager for the kobject supports notification), poll will
 * return POLLERR|POLLPRI, and select will return the fd whether
 * it is waiting for read, write, or exceptions.
 * Once poll/select indicates that the value has changed, you
 * need to close and re-open the file, or seek to 0 and read again.
 * Reminder: this only works for attributes which actively support
 * it, and it is not possible to test an attribute from userspace
 * to see if it supports poll (Neither 'poll' nor 'select' return
 * an appropriate error code).  When in doubt, set a suitable timeout value.
 */
static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
{
	struct sysfs_buffer * buffer = filp->private_data;
	struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
	struct sysfs_open_dirent *od = attr_sd->s_attr.open;

	/* need parent for the kobj, grab both */
	if (!sysfs_get_active(attr_sd))
		goto trigger;

	poll_wait(filp, &od->poll, wait);

	sysfs_put_active(attr_sd);

	if (buffer->event != atomic_read_unchecked(&od->event))
		goto trigger;

	return DEFAULT_POLLMASK;

 trigger:
	buffer->needs_read_fill = 1;
	return DEFAULT_POLLMASK|POLLERR|POLLPRI;
}
Esempio n. 17
0
static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_statistic_info *info = par->matchinfo;
	bool ret = info->flags & XT_STATISTIC_INVERT;
	int nval, oval;

	switch (info->mode) {
	case XT_STATISTIC_MODE_RANDOM:
		if ((prandom_u32() & 0x7FFFFFFF) < info->u.random.probability)
			ret = !ret;
		break;
	case XT_STATISTIC_MODE_NTH:
		do {
			oval = atomic_read_unchecked(&info->master->count);
			nval = (oval == info->u.nth.every) ? 0 : oval + 1;
		} while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
		if (nval == 0)
			ret = !ret;
		break;
	}

	return ret;
}
Esempio n. 18
0
void synchronise_count_master(int cpu)
{
	int i;
	unsigned long flags;

	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);

	local_irq_save(flags);

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronised and
	 * the master and slaves each set their cycle counters to a known
	 * value all at once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		/* slaves loop on '!= 2' */
		while (atomic_read_unchecked(&count_count_start) != 1)
			mb();
		atomic_set_unchecked(&count_count_stop, 0);
		smp_wmb();

		/* Let the slave writes its count register */
		atomic_inc_unchecked(&count_count_start);

		/* Count will be initialised to current timer */
		if (i == 1)
			initcount = read_c0_count();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		/*
		 * Wait for slave to leave the synchronization point:
		 */
		while (atomic_read_unchecked(&count_count_stop) != 1)
			mb();
		atomic_set_unchecked(&count_count_start, 0);
		smp_wmb();
		atomic_inc_unchecked(&count_count_stop);
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);

	local_irq_restore(flags);

	/*
	 * i386 code reported the skew here, but the
	 * count registers were almost certainly out of sync
	 * so no point in alarming people
	 */
	printk("done.\n");
}
Esempio n. 19
0
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
	int j;

	seq_printf(p, "%*s: ", prec, "NMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
	seq_printf(p, "  Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
	seq_printf(p, "  Local timer interrupts\n");

	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
	seq_printf(p, "  Spurious interrupts\n");
	seq_printf(p, "%*s: ", prec, "PMI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
	seq_printf(p, "  Performance monitoring interrupts\n");
	seq_printf(p, "%*s: ", prec, "IWI");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
	seq_printf(p, "  IRQ work interrupts\n");
	seq_printf(p, "%*s: ", prec, "RTR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
	seq_printf(p, "  APIC ICR read retries\n");
#endif
	if (x86_platform_ipi_callback) {
		seq_printf(p, "%*s: ", prec, "PLT");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
		seq_printf(p, "  Platform interrupts\n");
	}
#ifdef CONFIG_SMP
	seq_printf(p, "%*s: ", prec, "RES");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
	seq_printf(p, "  Rescheduling interrupts\n");
	seq_printf(p, "%*s: ", prec, "CAL");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
					irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  Function call interrupts\n");
	seq_printf(p, "%*s: ", prec, "TLB");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
	seq_printf(p, "  TLB shootdowns\n");
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
	seq_printf(p, "%*s: ", prec, "TRM");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
	seq_printf(p, "  Thermal event interrupts\n");
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
	seq_printf(p, "%*s: ", prec, "THR");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
	seq_printf(p, "  Threshold APIC interrupts\n");
#endif
#ifdef CONFIG_X86_MCE
	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
	seq_printf(p, "  Machine check exceptions\n");
	seq_printf(p, "%*s: ", prec, "MCP");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
	seq_printf(p, "  Machine check polls\n");
#endif
	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
#endif
	return 0;
}
Esempio n. 20
0
u64 arch_irq_stat(void)
{
	u64 sum = atomic_read_unchecked(&irq_err_count);
	return sum;
}
Esempio n. 21
0
int vmw_fallback_wait(struct vmw_private *dev_priv,
		      bool lazy,
		      bool fifo_idle,
		      uint32_t seqno,
		      bool interruptible,
		      unsigned long timeout)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;

	uint32_t count = 0;
	uint32_t signal_seq;
	int ret;
	unsigned long end_jiffies = jiffies + timeout;
	bool (*wait_condition)(struct vmw_private *, uint32_t);
	DEFINE_WAIT(__wait);

	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
		&vmw_seqno_passed;

	/**
	 * Block command submission while waiting for idle.
	 */

	if (fifo_idle)
		down_read(&fifo_state->rwsem);
	signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
	ret = 0;

	for (;;) {
		prepare_to_wait(&dev_priv->fence_queue, &__wait,
				(interruptible) ?
				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
		if (wait_condition(dev_priv, seqno))
			break;
		if (time_after_eq(jiffies, end_jiffies)) {
			DRM_ERROR("SVGA device lockup.\n");
			break;
		}
		if (lazy)
			schedule_timeout(1);
		else if ((++count & 0x0F) == 0) {
			/**
			 * FIXME: Use schedule_hr_timeout here for
			 * newer kernels and lower CPU utilization.
			 */

			__set_current_state(TASK_RUNNING);
			schedule();
			__set_current_state((interruptible) ?
					    TASK_INTERRUPTIBLE :
					    TASK_UNINTERRUPTIBLE);
		}
		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
	}
	finish_wait(&dev_priv->fence_queue, &__wait);
	if (ret == 0 && fifo_idle) {
		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
	}
	wake_up_all(&dev_priv->fence_queue);
	if (fifo_idle)
		up_read(&fifo_state->rwsem);

	return ret;
}
Esempio n. 22
0
static ssize_t show_event(struct device *dev,
                          struct device_attribute *attr, char *buf)
{
    struct uio_device *idev = dev_get_drvdata(dev);
    return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
}