Exemple #1
0
/**
 * sht15_update_single_val() - get a new value from device
 * @data:		device instance specific data
 * @command:		command sent to request value
 * @timeout_msecs:	timeout after which comms are assumed
 *			to have failed are reset.
 **/
static inline int sht15_update_single_val(struct sht15_data *data,
					  int command,
					  int timeout_msecs)
{
	int ret;
	ret = sht15_send_cmd(data, command);
	if (ret)
		return ret;

	gpio_direction_input(data->pdata->gpio_data);
	atomic_set_unchecked(&data->interrupt_handled, 0);

	enable_irq(gpio_to_irq(data->pdata->gpio_data));
	if (gpio_get_value(data->pdata->gpio_data) == 0) {
		disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
		/* Only relevant if the interrupt hasn't occurred. */
		if (!atomic_read_unchecked(&data->interrupt_handled))
			schedule_work(&data->read_work);
	}
	ret = wait_event_timeout(data->wait_queue,
				 (data->flag == SHT15_READING_NOTHING),
				 msecs_to_jiffies(timeout_msecs));
	if (ret == 0) {/* timeout occurred */
		disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
		sht15_connection_reset(data);
		return -ETIME;
	}
	return 0;
}
static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
{
	if (test_and_set_bit(0, &lis3_dev.misc_opened))
		return -EBUSY; /* already open */

	if (lis3_dev.pm_dev)
		pm_runtime_get_sync(lis3_dev.pm_dev);

	atomic_set_unchecked(&lis3_dev.count, 0);
	return 0;
}
Exemple #3
0
static int uPD98402_start(struct atm_dev *dev)
{
	DPRINTK("phy_start\n");
	if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
		return -ENOMEM;
	spin_lock_init(&PRIV(dev)->lock);
	memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
	(void) GET(PCR); /* clear performance events */
	PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
	(void) GET(PCOCR); /* clear overflows */
	PUT(~uPD98402_PCO_HECC,PCOMR);
	(void) GET(PICR); /* clear interrupts */
	PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
	  uPD98402_INT_LOS),PIMR); /* enable them */
	(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
	atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
	atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
	atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
	return 0;
}
Exemple #4
0
static int create_timed_output_class(void)
{
	if (!timed_output_class) {
		timed_output_class = class_create(THIS_MODULE, "timed_output");
		if (IS_ERR(timed_output_class))
			return PTR_ERR(timed_output_class);
		atomic_set_unchecked(&device_count, 0);
		timed_output_class->dev_groups = timed_output_groups;
	}

	return 0;
}
void init_smtc_stats(void)
{
	int i;

	for (i=0; i<NR_CPUS; i++) {
		smtc_cpu_stats[i].timerints = 0;
		smtc_cpu_stats[i].selfipis = 0;
	}

	atomic_set_unchecked(&smtc_fpu_recoveries, 0);

	proc_create("smtc", 0444, NULL, &smtc_proc_fops);
}
Exemple #6
0
static int statistic_mt_check(const struct xt_mtchk_param *par)
{
	struct xt_statistic_info *info = par->matchinfo;

	if (info->mode > XT_STATISTIC_MODE_MAX ||
	    info->flags & ~XT_STATISTIC_MASK)
		return -EINVAL;

	info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
	if (info->master == NULL)
		return -ENOMEM;
	atomic_set_unchecked(&info->master->count, info->u.nth.count);

	return 0;
}
Exemple #7
0
static void sht15_bh_read_data(struct work_struct *work_s)
{
	int i;
	uint16_t val = 0;
	struct sht15_data *data
		= container_of(work_s, struct sht15_data,
			       read_work);
	/* Firstly, verify the line is low */
	if (gpio_get_value(data->pdata->gpio_data)) {
		/* If not, then start the interrupt again - care
		   here as could have gone low in meantime so verify
		   it hasn't!
		*/
		atomic_set_unchecked(&data->interrupt_handled, 0);
		enable_irq(gpio_to_irq(data->pdata->gpio_data));
		/* If still not occurred or another handler has been scheduled */
		if (gpio_get_value(data->pdata->gpio_data)
		    || atomic_read_unchecked(&data->interrupt_handled))
			return;
	}
	/* Read the data back from the device */
	for (i = 0; i < 16; ++i) {
		val <<= 1;
		gpio_set_value(data->pdata->gpio_sck, 1);
		ndelay(SHT15_TSCKH);
		val |= !!gpio_get_value(data->pdata->gpio_data);
		gpio_set_value(data->pdata->gpio_sck, 0);
		ndelay(SHT15_TSCKL);
		if (i == 7)
			sht15_ack(data);
	}
	/* Tell the device we are done */
	sht15_end_transmission(data);

	switch (data->flag) {
	case SHT15_READING_TEMP:
		data->val_temp = val;
		break;
	case SHT15_READING_HUMID:
		data->val_humid = val;
		break;
	}

	data->flag = SHT15_READING_NOTHING;
	wake_up(&data->wait_queue);
}
/**
 *	sysfs_get_open_dirent - get or create sysfs_open_dirent
 *	@sd: target sysfs_dirent
 *	@buffer: sysfs_buffer for this instance of open
 *
 *	If @sd->s_attr.open exists, increment its reference count;
 *	otherwise, create one.  @buffer is chained to the buffers
 *	list.
 *
 *	LOCKING:
 *	Kernel thread context (may sleep).
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
				 struct sysfs_buffer *buffer)
{
	struct sysfs_open_dirent *od, *new_od = NULL;

 retry:
	spin_lock_irq(&sysfs_open_dirent_lock);

	if (!sd->s_attr.open && new_od) {
		sd->s_attr.open = new_od;
		new_od = NULL;
	}

	od = sd->s_attr.open;
	if (od) {
		atomic_inc(&od->refcnt);
		list_add_tail(&buffer->list, &od->buffers);
	}

	spin_unlock_irq(&sysfs_open_dirent_lock);

	if (od) {
		kfree(new_od);
		return 0;
	}

	/* not there, initialize a new one and retry */
	new_od = kmalloc(sizeof(*new_od), GFP_KERNEL);
	if (!new_od)
		return -ENOMEM;

	atomic_set(&new_od->refcnt, 0);
	atomic_set_unchecked(&new_od->event, 1);
	init_waitqueue_head(&new_od->poll);
	INIT_LIST_HEAD(&new_od->buffers);
	goto retry;
}
Exemple #9
0
static int __init
init_cifs(void)
{
	int rc = 0;
	cifs_proc_init();
	INIT_LIST_HEAD(&cifs_tcp_ses_list);
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
	INIT_LIST_HEAD(&GlobalDnotifyReqList);
	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount, 0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&smBufAllocCount, 0);
#ifdef CONFIG_CIFS_STATS2
	atomic_set_unchecked(&totBufAllocCount, 0);
	atomic_set_unchecked(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */

	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	spin_lock_init(&cifs_tcp_ses_lock);
	spin_lock_init(&cifs_file_list_lock);
	spin_lock_init(&GlobalMid_Lock);

	if (cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
	} else if (cifs_max_pending > CIFS_MAX_REQ) {
		cifs_max_pending = CIFS_MAX_REQ;
		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
			 CIFS_MAX_REQ);
	}

	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
	if (!cifsiod_wq) {
		rc = -ENOMEM;
		goto out_clean_proc;
	}

	rc = cifs_fscache_register();
	if (rc)
		goto out_destroy_wq;

	rc = cifs_init_inodecache();
	if (rc)
		goto out_unreg_fscache;

	rc = cifs_init_mids();
	if (rc)
		goto out_destroy_inodecache;

	rc = cifs_init_request_bufs();
	if (rc)
		goto out_destroy_mids;

#ifdef CONFIG_CIFS_UPCALL
	rc = register_key_type(&cifs_spnego_key_type);
	if (rc)
		goto out_destroy_request_bufs;
#endif /* CONFIG_CIFS_UPCALL */

#ifdef CONFIG_CIFS_ACL
	rc = init_cifs_idmap();
	if (rc)
		goto out_register_key_type;
#endif /* CONFIG_CIFS_ACL */

	rc = register_filesystem(&cifs_fs_type);
	if (rc)
		goto out_init_cifs_idmap;

	return 0;

out_init_cifs_idmap:
#ifdef CONFIG_CIFS_ACL
	exit_cifs_idmap();
out_register_key_type:
#endif
#ifdef CONFIG_CIFS_UPCALL
	unregister_key_type(&cifs_spnego_key_type);
out_destroy_request_bufs:
#endif
	cifs_destroy_request_bufs();
out_destroy_mids:
	cifs_destroy_mids();
out_destroy_inodecache:
	cifs_destroy_inodecache();
out_unreg_fscache:
	cifs_fscache_unregister();
out_destroy_wq:
	destroy_workqueue(cifsiod_wq);
out_clean_proc:
	cifs_proc_clean();
	return rc;
}
Exemple #10
0
/**
 * uio_register_device - register a new userspace IO device
 * @owner:	module that creates the new device
 * @parent:	parent device
 * @info:	UIO device capabilities
 *
 * returns zero on success or a negative error code.
 */
int __uio_register_device(struct module *owner,
                          struct device *parent,
                          struct uio_info *info)
{
    struct uio_device *idev;
    int ret = 0;

    if (!parent || !info || !info->name || !info->version)
        return -EINVAL;

    info->uio_dev = NULL;

    idev = kzalloc(sizeof(*idev), GFP_KERNEL);
    if (!idev) {
        ret = -ENOMEM;
        goto err_kzalloc;
    }

    idev->owner = owner;
    idev->info = info;
    init_waitqueue_head(&idev->wait);
    atomic_set_unchecked(&idev->event, 0);

    ret = uio_get_minor(idev);
    if (ret)
        goto err_get_minor;

    idev->dev = device_create(&uio_class, parent,
                              MKDEV(uio_major, idev->minor), idev,
                              "uio%d", idev->minor);
    if (IS_ERR(idev->dev)) {
        printk(KERN_ERR "UIO: device register failed\n");
        ret = PTR_ERR(idev->dev);
        goto err_device_create;
    }

    ret = uio_dev_add_attributes(idev);
    if (ret)
        goto err_uio_dev_add_attributes;

    info->uio_dev = idev;

    if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
        ret = request_irq(info->irq, uio_interrupt,
                          info->irq_flags, info->name, idev);
        if (ret)
            goto err_request_irq;
    }

    return 0;

err_request_irq:
    uio_dev_del_attributes(idev);
err_uio_dev_add_attributes:
    device_destroy(&uio_class, MKDEV(uio_major, idev->minor));
err_device_create:
    uio_free_minor(idev);
err_get_minor:
    kfree(idev);
err_kzalloc:
    return ret;
}
Exemple #11
0
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
	int i, id, ret = -EINVAL;

	switch(td->opcode) {

	case RTTEST_NOP:
		return 0;

	case RTTEST_LOCKCONT:
		td->mutexes[td->opdata] = 1;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		return 0;

	case RTTEST_RESET:
		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
			if (td->mutexes[i] == 4) {
				rt_mutex_unlock(&mutexes[i]);
				td->mutexes[i] = 0;
			}
		}
		return 0;

	case RTTEST_RESETEVENT:
		atomic_set_unchecked(&rttest_event, 0);
		return 0;

	default:
		if (lockwakeup)
			return ret;
	}

	switch(td->opcode) {

	case RTTEST_LOCK:
	case RTTEST_LOCKNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		rt_mutex_lock(&mutexes[id]);
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		td->mutexes[id] = 4;
		return 0;

	case RTTEST_LOCKINT:
	case RTTEST_LOCKINTNOWAIT:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
			return ret;

		td->mutexes[id] = 1;
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		td->mutexes[id] = ret ? 0 : 4;
		return ret ? -EINTR : 0;

	case RTTEST_UNLOCK:
		id = td->opdata;
		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
			return ret;

		td->event = atomic_add_return_unchecked(1, &rttest_event);
		rt_mutex_unlock(&mutexes[id]);
		td->event = atomic_add_return_unchecked(1, &rttest_event);
		td->mutexes[id] = 0;
		return 0;

	default:
		break;
	}
	return ret;
}
Exemple #12
0
void synchronise_count_master(int cpu)
{
	int i;
	unsigned long flags;

	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);

	local_irq_save(flags);

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronised and
	 * the master and slaves each set their cycle counters to a known
	 * value all at once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		/* slaves loop on '!= 2' */
		while (atomic_read_unchecked(&count_count_start) != 1)
			mb();
		atomic_set_unchecked(&count_count_stop, 0);
		smp_wmb();

		/* Let the slave writes its count register */
		atomic_inc_unchecked(&count_count_start);

		/* Count will be initialised to current timer */
		if (i == 1)
			initcount = read_c0_count();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		/*
		 * Wait for slave to leave the synchronization point:
		 */
		while (atomic_read_unchecked(&count_count_stop) != 1)
			mb();
		atomic_set_unchecked(&count_count_start, 0);
		smp_wmb();
		atomic_inc_unchecked(&count_count_stop);
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);

	local_irq_restore(flags);

	/*
	 * i386 code reported the skew here, but the
	 * count registers were almost certainly out of sync
	 * so no point in alarming people
	 */
	printk("done.\n");
}