Exemple #1
1
/*
 * uart_device_open() - called when opening the device
 */
static int uart_device_open(struct inode *inode, struct file *file)
{
	struct uart_device_data *dev_data =
	    container_of(inode->i_cdev, struct uart_device_data, cdev);
	if (atomic_cmpxchg(&dev_data->access, 1, 0) != 1)
		return -EBUSY;
	file->private_data = dev_data;
	set_current_state(TASK_INTERRUPTIBLE);
	schedule_timeout(10);
	return 0;
}
Exemple #2
0
static int etb_open(struct inode *inode, struct file *file)
{
	struct etb_drvdata *drvdata = container_of(file->private_data,
						   struct etb_drvdata, miscdev);

	if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
		return -EBUSY;

	dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
	return 0;
}
static int ami306daemon_open(struct inode *inode, struct file *file)
{
	int res = -1;

	if (atomic_cmpxchg(&daemon_open_count, 0, 1) == 0) {
		if (AMI306_DEBUG_FUNC_TRACE & ami306_debug_mask)
			AMID("Open device node:ami306daemon\n");
		res = 0;
	}
	return res;
}
FailoverStatus failover_set_state(FailoverStatus old_state,
                    FailoverStatus new_state)
{
    FailoverStatus old;

    old = atomic_cmpxchg(&failover_state, old_state, new_state);
    if (old == old_state) {
        trace_colo_failover_set_state(FailoverStatus_lookup[new_state]);
    }
    return old;
}
static int yas_pseudo_irq_disable(struct iio_dev *indio_dev)
{
	struct yas_state *st = iio_priv(indio_dev);
	if (atomic_cmpxchg(&st->pseudo_irq_enable, 1, 0)) {
		cancel_delayed_work_sync(&st->work);
		mutex_lock(&st->lock);
		st->mag.set_enable(0);
		mutex_unlock(&st->lock);
	}
	return 0;
}
Exemple #6
0
static int ami304daemon_open(struct inode *inode, struct file *file)
{
	//return nonseekable_open(inode, file);
	int ret = -1;
	if( atomic_cmpxchg(&daemon_open_count, 0, 1)==0 ) {
		if (AMI304_DEBUG_FUNC_TRACE & ami304_debug_mask)
			AMID("Open device node:ami304daemon\n");
		ret = 0;
	}
	return ret;
}
struct data_path *data_path_open(struct data_path_callback *cbs)
{
	struct data_path *dp = &data_path;

	if (!cbs) {
		pr_err("%s: cbs is NULL\n", __func__);
		return NULL;
	}

	if (atomic_cmpxchg(&dp->state, dp_state_idle,
			   dp_state_opening) != dp_state_idle) {
		pr_err("%s: path is already opened(state %d)\n",
			 __func__, atomic_read(&dp->state));
		return NULL;
	}

	dp->tx_q_max_len = MAX_TX_Q_LEN;
	dp->is_tx_stopped = false;
	tx_q_init(dp);
	dp->tx_wm[dp_priority_high] = 0;
	dp->tx_wm[dp_priority_default]
		= dp->rbctl->tx_skbuf_num / 10;

	dp->enable_piggyback = true;

	dp->max_tx_shots = MAX_TX_SHOTS;
	dp->max_rx_shots = MAX_RX_SHOTS;

	dp->tx_sched_delay_in_ms = TX_SCHED_DELAY;
	dp->tx_q_min_sched_len = TX_MIN_SCHED_LEN;

	memset(&dp->stat, 0, sizeof(dp->stat));

	dp->cbs = cbs;
	tasklet_init(&dp->tx_tl, data_path_tx_func,
		     (unsigned long)dp);
	tasklet_init(&dp->rx_tl, data_path_rx_func,
		     (unsigned long)dp);

	init_timer(&dp->tx_sched_timer);
	dp->tx_sched_timer.function = tx_sched_timeout;
	dp->tx_sched_timer.data =
		(unsigned long)dp;

	if (dp_debugfs_init(dp) < 0) {
		pr_err("%s: debugfs failed\n", __func__);
		atomic_set(&dp->state, dp_state_idle);
		return NULL;
	}

	atomic_set(&dp->state, dp_state_opened);

	return dp;
}
static int yas_pseudo_irq_enable(struct iio_dev *indio_dev)
{
	struct yas_state *st = iio_priv(indio_dev);
	if (!atomic_cmpxchg(&st->pseudo_irq_enable, 0, 1)) {
		mutex_lock(&st->lock);
		st->mag.set_enable(1);
		mutex_unlock(&st->lock);
		schedule_delayed_work(&st->work, 0);
	}
	return 0;
}
Exemple #9
0
/*
 * free_dma_channel - after allocating a channel, used to
 *                 free the channel after DMAs are submitted
 *
 * @chan - pointer to the dma_channel struct that was allocated
 *
 * Returns 0 on success, < 1 on error (errorno)
 *
 * NOTE: This function must be called after all do_dma calls are finished,
 *  but can be called before the DMAs actually complete (as long as the comp_cb()
 *  handler in do_dma don't refer to the dma_channel struct).  If called with a
 *  dynamically allocated dma_chan, the caller must be the thread that called
 *  allocate_dma_chan.  When operating on a dynamic channel, free unlocks the
 *  mutex locked in allocate.  Statically allocated channels cannot be freed,
 *  and calling this function with that type of channel will return an error.
 */
int
free_dma_channel(struct dma_channel *chan)
{
	/*
	 * Why can't we use this function with channels that were statically allocated??
	 */
	BUG_ON(CHAN_INUSE !=
		atomic_cmpxchg(&chan->flags, CHAN_INUSE, CHAN_AVAILABLE));
	wake_up(&chan->access_wq);
	return 0;
}
Exemple #10
0
/*
 * queue_write_lock_slowpath - acquire write lock of a queue rwlock
 * @lock : Pointer to queue rwlock structure.
 */
void queue_write_lock_slowpath(rwlock_t *lock)
{
    u32 cnts;

    /* Put the writer into the wait queue. */
    spin_lock(&lock->lock);

    /* Try to acquire the lock directly if no reader is present. */
    if ( !atomic_read(&lock->cnts) &&
         (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) )
        goto unlock;

    /*
     * Set the waiting flag to notify readers that a writer is pending,
     * or wait for a previous writer to go away.
     */
    for ( ; ; )
    {
        cnts = atomic_read(&lock->cnts);
        if ( !(cnts & _QW_WMASK) &&
             (atomic_cmpxchg(&lock->cnts, cnts,
                             cnts | _QW_WAITING) == cnts) )
            break;

        cpu_relax();
    }

    /* When no more readers, set the locked flag. */
    for ( ; ; )
    {
        cnts = atomic_read(&lock->cnts);
        if ( (cnts == _QW_WAITING) &&
             (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
                             _QW_LOCKED) == _QW_WAITING) )
            break;

        cpu_relax();
    }
 unlock:
    spin_unlock(&lock->lock);
}
static void sensor_set_enable(struct device *dev, int enable)
{
	struct input_dev *inputdev = to_input_dev(dev);
    struct sensor_data *sensordata = input_get_drvdata(inputdev);
	int delay = atomic_read(&sensordata->delay);

	dbg_func_in();

	mutex_lock(&sensordata->enable_mutex);

	if (enable) {                   /* enable if state will be changed */
		if (!atomic_cmpxchg(&sensordata->enable, 0, 1)) {
			sensordata->status = apds9900_control_enable(APDS9900_TYPE_PROXIMITY, true) ? 0 : 1;
			if(sensordata->status) {
				schedule_delayed_work(&sensordata->work, delay_to_jiffies(delay) + 1);
			}
		}
	} else {                        /* disable if state will be changed */
		if (atomic_cmpxchg(&sensordata->enable, 1, 0) && sensordata->status) {
			if(sensordata->status) {
				cancel_delayed_work_sync(&sensordata->work);
				sensordata->status = apds9900_control_enable(APDS9900_TYPE_PROXIMITY, false) ? 0 : 1;
			}
		}
	}
	atomic_set(&sensordata->enable, enable);

	mutex_unlock(&sensordata->enable_mutex);
	
#if 1 // to LCD off in calling...
    if(input_pdev != NULL) {

              input_report_abs(input_pdev, ABS_X, -1);
              input_report_abs(input_pdev, ABS_Y, -1);
              input_report_abs(input_pdev, ABS_Z, -1);
              input_sync(input_pdev);
    }
#endif

	dbg_func_out();
}
Exemple #12
0
/**
 * Once-only initialization.
 * @param  once_control The control variable which initialized to PTHREAD_ONCE_INIT.
 * @param  init_routine The initialization code which executed at most once.
 * @return Always return 0.
 */
int pthread_once(pthread_once_t *once_control, void (* init_routine)(void))
{
    if (atomic_cmpxchg((long volatile *) once_control, 1, 0) == 0) {
        init_routine();
        *(volatile int *) once_control = 2;
    } else {
        while(*(volatile int *) once_control != 2)
            SwitchToThread();
    }

    return 0;
}
Exemple #13
0
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
			  bool interruptible,
			  bool no_wait, bool use_sequence, uint32_t sequence)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;

	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
		/**
		 * Deadlock avoidance for multi-bo reserving.
		 */
		if (use_sequence && bo->seq_valid) {
			/**
			 * We've already reserved this one.
			 */
			if (unlikely(sequence == bo->val_seq))
				return -EDEADLK;
			/**
			 * Already reserved by a thread that will not back
			 * off for us. We need to back off.
			 */
			if (unlikely(sequence - bo->val_seq < (1 << 31)))
				return -EAGAIN;
		}

		if (no_wait)
			return -EBUSY;

		spin_unlock(&glob->lru_lock);
		ret = ttm_bo_wait_unreserved(bo, interruptible);
		spin_lock(&glob->lru_lock);

		if (unlikely(ret))
			return ret;
	}

	if (use_sequence) {
		/**
		 * Wake up waiters that may need to recheck for deadlock,
		 * if we decreased the sequence number.
		 */
		if (unlikely((bo->val_seq - sequence < (1 << 31))
			     || !bo->seq_valid))
			wake_up_all(&bo->event_queue);

		bo->val_seq = sequence;
		bo->seq_valid = true;
	} else {
		bo->seq_valid = false;
	}

	return 0;
}
Exemple #14
0
static void bma023_set_enable(struct device *dev, int enable)
{
	struct bma023_data *bma023 = dev_get_drvdata(dev);
	int delay = atomic_read(&bma023->delay);

	mutex_lock(&bma023->enable_mutex);
	if (enable) { /* enable if state will be changed */
		if (!atomic_cmpxchg(&bma023->enable, 0, 1)) {
			bma023_power_up(bma023);
			schedule_delayed_work(&bma023->work,
					      delay_to_jiffies(delay) + 1);
		}
	} else { /* disable if state will be changed */
		if (atomic_cmpxchg(&bma023->enable, 1, 0)) {
			cancel_delayed_work_sync(&bma023->work);
			bma023_power_down(bma023);
		}
	}
	atomic_set(&bma023->enable, enable);
	mutex_unlock(&bma023->enable_mutex);
}
Exemple #15
0
/**
 * bus1_active_deactivate() - deactivate object
 * @active:	object to deactivate
 *
 * This deactivates the given object, if not already done by someone else. Once
 * this returns, no new active references can be acquired.
 *
 * Return: True if this call deactivated the object, false if it was already
 *         deactivated by someone else.
 */
bool bus1_active_deactivate(struct bus1_active *active)
{
	int v;

	v = atomic_cmpxchg(&active->count,
			   BUS1_ACTIVE_NEW, BUS1_ACTIVE_RELEASE_DIRECT);
	if (v != BUS1_ACTIVE_NEW)
		v = bus1_atomic_add_unless_negative(&active->count,
						    BUS1_ACTIVE_BIAS);

	return v;
}
/*
 * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
 * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
 * unknown NMI on the first CPU which gets it.
 */
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
{
	static atomic_t nmi_cpu = ATOMIC_INIT(-1);

	if (!unknown_nmi_panic)
		return NMI_DONE;

	if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
		return NMI_HANDLED;

	return NMI_DONE;
}
void kbase_timeline_pm_check_handle_event(kbase_device *kbdev, kbase_timeline_pm_event event)
{
	int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);

	if (uid != 0) {
		if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
			/* If it changed, raced with another consumer: we've lost this UID */
			uid = 0;

		KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
	}
}
Exemple #18
0
/*
 * A variant of panic() called from NMI context. We return if we've already
 * panicked on this CPU. If another CPU already panicked, loop in
 * nmi_panic_self_stop() which can provide architecture dependent code such
 * as saving register state for crash dump.
 */
void nmi_panic(struct pt_regs *regs, const char *msg)
{
	int old_cpu, cpu;

	cpu = raw_smp_processor_id();
	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);

	if (old_cpu == PANIC_CPU_INVALID)
		panic("%s", msg);
	else if (old_cpu != cpu)
		nmi_panic_self_stop(regs);
}
Exemple #19
0
static int ami304_open(struct inode *inode, struct file *file)
{
	int ret = -1;
#if DEBUG_AMI304
	printk(KERN_ERR  "ami304 - %s \n",__FUNCTION__);
#endif
	if (atomic_cmpxchg(&dev_open_count, 0, 1)==0) {
		printk(KERN_INFO "Open device node:ami304\n");
		ret = nonseekable_open(inode, file);
	}
	return ret;
}
Exemple #20
0
static int boost_migration_notify(struct notifier_block *nb,
				unsigned long unused, void *arg)
{
	struct migration_notify_data *mnd = arg;
	unsigned long flags;
	struct cpu_sync *s = &per_cpu(sync_info, mnd->dest_cpu);

#ifdef CONFIG_IRLED_GPIO
	if (unlikely(gir_boost_disable)) {
		pr_debug("[GPIO_IR][%s] continue~!(cpu:%d)\n", 
			__func__, raw_smp_processor_id());
		return NOTIFY_OK;
	}
#endif

	if (load_based_syncs && (mnd->load <= migration_load_threshold))
		return NOTIFY_OK;

	if (load_based_syncs && ((mnd->load < 0) || (mnd->load > 100))) {
		pr_err("cpu-boost:Invalid load: %d\n", mnd->load);
		return NOTIFY_OK;
	}

	if (!load_based_syncs && (mnd->src_cpu == mnd->dest_cpu))
		return NOTIFY_OK;

	if (!boost_ms)
		return NOTIFY_OK;

	/* Avoid deadlock in try_to_wake_up() */
	if (s->thread == current)
		return NOTIFY_OK;

	pr_debug("Migration: CPU%d --> CPU%d\n", mnd->src_cpu, mnd->dest_cpu);
	spin_lock_irqsave(&s->lock, flags);
	s->pending = true;
	s->src_cpu = mnd->src_cpu;
	s->task_load = load_based_syncs ? mnd->load : 0;
	spin_unlock_irqrestore(&s->lock, flags);
	/*
	* Avoid issuing recursive wakeup call, as sync thread itself could be
	* seen as migrating triggering this notification. Note that sync thread
	* of a cpu could be running for a short while with its affinity broken
	* because of CPU hotplug.
	*/
	if (!atomic_cmpxchg(&s->being_woken, 0, 1)) {
		wake_up(&s->sync_wq);
		atomic_set(&s->being_woken, 0);
	}

	return NOTIFY_OK;
}
int open_send_current(int (*send)(int))
{
	if (!atomic_cmpxchg(&enabled, 0, 1)) {
		schedule_delayed_work(&read_current_work,
				msecs_to_jiffies(READ_CURRENT_INTERVAL));

		send_func = send;
	} else {
		printk("%s allready opend\n", __func__);
	}

	return 0;
}
int iom3_need_recovery(void)
{
	int ret = 0;
	int old_state;
	old_state = atomic_cmpxchg(&iom3_rec_state, IOM3_RECOVERY_IDLE, IOM3_RECOVERY_START);
	hwlog_err("recovery prev state %d\n", old_state);
	if(old_state == IOM3_RECOVERY_IDLE){//prev state is IDLE start recovery progress
		wake_lock_timeout(&iom3_rec_wl, 5*HZ);
		blocking_notifier_call_chain(&iom3_recovery_notifier_list, IOM3_RECOVERY_START, NULL);
		queue_delayed_work(iom3_rec_wq, &iom3_rec_work, 0);
	}
	return ret;
}
Exemple #23
0
int sfh7743_enable(struct sfh7743_data *sfh)
{
    int err;

    if (!atomic_cmpxchg(&sfh->enabled, 0, 1)) {
        err = sfh7743_device_power_on(sfh);
        if (err) {
            atomic_set(&sfh->enabled, 0);
            return err;
        }
    }
    return 0;
}
static int asetm2034a_suspend(struct device *dev)
{
	struct i2c_client *client = to_i2c_client(dev);
	struct asetm2034a_drvdata *drvdata = i2c_get_clientdata(client);

	if (atomic_cmpxchg(&drvdata->suspend_lock, 0, 1))
		return -EAGAIN;

	if (device_may_wakeup(dev))
		enable_irq_wake(drvdata->client->irq);

	return 0;
}
Exemple #25
0
/**
 * Updates the last value read from hardware.
 * (was nvhost_syncpt_update_min)
 */
static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
{
	struct nvhost_master *dev = syncpt_to_dev(sp);
	void __iomem *sync_regs = dev->sync_aperture;
	u32 old, live;

	do {
		old = nvhost_syncpt_read_min(sp, id);
		live = readl(sync_regs + (host1x_sync_syncpt_0_r() + id * 4));
	} while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);

	return live;
}
Exemple #26
0
/* Drop ref count to 1 to effectively disable get_stor_device() */
static inline struct storvsc_device *release_stor_device(
					struct hv_device *device)
{
	struct storvsc_device *stor_device;

	stor_device = (struct storvsc_device *)device->ext;

	/* Busy wait until the ref drop to 2, then set it to 1 */
	while (atomic_cmpxchg(&stor_device->ref_count, 2, 1) != 2)
		udelay(100);

	return stor_device;
}
Exemple #27
0
static int ecs_ctrl_open(struct inode *inode, struct file *file)
{
#if 1
	atomic_set(&reserve_open_flag, 1);
	atomic_set(&open_flag, 1);
	atomic_set(&open_count, 1);
	wake_up(&open_wq);

	return 0;
#else
	int ret = -1;

	if (atomic_cmpxchg(&open_count, 0, 1) == 0) {
		if (atomic_cmpxchg(&open_flag, 0, 1) == 0) {
			atomic_set(&reserve_open_flag, 1);
			wake_up(&open_wq);
			ret = 0;
		}
	}

	return ret;
#endif
}
Exemple #28
0
int ring_append(struct ring_buf *ring, void *elem)
{
	unsigned int back;

	for (;;) {
		back = ring_back(ring);
		if (back - ring_front(ring) >= ring->length)
			return -1;
		if ((unsigned int) atomic_cmpxchg(&ring->back, back, back + 1) == back)
			break;
	}
	ring->data[back % ring->length] = elem;
	return 0;
}
Exemple #29
0
    static void nft_ng_inc_eval(const struct nft_expr *expr,
                                struct nft_regs *regs,
                                const struct nft_pktinfo *pkt)
{
    struct nft_ng_inc *priv = nft_expr_priv(expr);
    u32 nval, oval;

    do {
        oval = atomic_read(&priv->counter);
        nval = (oval + 1 < priv->modulus) ? oval + 1 : 0;
    } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval);

    regs->data[priv->dreg] = nval + priv->offset;
}
Exemple #30
0
static struct netvsc_device *ReleaseOutboundNetDevice(struct hv_device *Device)
{
	struct netvsc_device *netDevice;

	netDevice = Device->Extension;
	if (netDevice == NULL)
		return NULL;

	/* Busy wait until the ref drop to 2, then set it to 1 */
	while (atomic_cmpxchg(&netDevice->RefCount, 2, 1) != 2)
		udelay(100);

	return netDevice;
}