* LIS302D spec says: 18 mG / digit
 * LIS3_ACCURACY is used to increase accuracy of the intermediate
 * calculation results.
 */
#define LIS3_ACCURACY			1024
/* Sensitivity values for -2G +2G scale */
#define LIS3_SENSITIVITY_12B		((LIS3_ACCURACY * 1000) / 1024)
#define LIS3_SENSITIVITY_8B		(18 * LIS3_ACCURACY)

#define LIS3_DEFAULT_FUZZ_12B		3
#define LIS3_DEFAULT_FLAT_12B		3
#define LIS3_DEFAULT_FUZZ_8B		1
#define LIS3_DEFAULT_FLAT_8B		1

struct lis3lv02d lis3_dev = {
	.misc_wait   = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
EXPORT_SYMBOL_GPL(lis3_dev);

/* just like param_set_int() but does sanity-check so that it won't point
 * over the axis array size
 */
static int param_set_axis(const char *val, const struct kernel_param *kp)
{
	int ret = param_set_int(val, kp);
	if (!ret) {
		int val = *(int *)kp->arg;
		if (val < 0)
			val = -val;
		if (!val || val > 3)
			return -EINVAL;
Exemple #2
0
	if (err)
		return err;

	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
			   atomic_long_inc_return(&bdi_seq));
	if (err) {
		bdi_destroy(bdi);
		return err;
	}

	return 0;
}
EXPORT_SYMBOL(bdi_setup_and_register);

static wait_queue_head_t congestion_wqh[2] = {
		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
	};
static atomic_t nr_wb_congested[2];

void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
{
	wait_queue_head_t *wqh = &congestion_wqh[sync];
	enum wb_congested_state bit;

	bit = sync ? WB_sync_congested : WB_async_congested;
	if (test_and_clear_bit(bit, &congested->state))
		atomic_dec(&nr_wb_congested[sync]);
	smp_mb__after_atomic();
	if (waitqueue_active(wqh))
		wake_up(wqh);
Exemple #3
0
#endif

#include "power.h"

const char *const pm_states[PM_SUSPEND_MAX] = {
#ifdef CONFIG_EARLYSUSPEND
	[PM_SUSPEND_ON]		= "on",
#endif
	[PM_SUSPEND_STANDBY]	= "standby",
	[PM_SUSPEND_MEM]	= "mem",
};

static const struct platform_suspend_ops *suspend_ops;

static struct completion second_cpu_complete = {1,
	__WAIT_QUEUE_HEAD_INITIALIZER((second_cpu_complete).wait)
};

/**
 *	suspend_set_ops - Set the global suspend method table.
 *	@ops:	Pointer to ops structure.
 */
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
	mutex_lock(&pm_mutex);
	suspend_ops = ops;
	mutex_unlock(&pm_mutex);
}

bool valid_state(suspend_state_t state)
{
#include "ccid.h"
#include "dccp.h"
#include "feat.h"

DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;

EXPORT_SYMBOL_GPL(dccp_statistics);

atomic_t dccp_orphan_count = ATOMIC_INIT(0);

EXPORT_SYMBOL_GPL(dccp_orphan_count);

struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
	.lhash_lock	= RW_LOCK_UNLOCKED,
	.lhash_users	= ATOMIC_INIT(0),
	.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
};

EXPORT_SYMBOL_GPL(dccp_hashinfo);

/* the maximum queue length for tx in packets. 0 is no limit */
int sysctl_dccp_tx_qlen __read_mostly = 5;

void dccp_set_state(struct sock *sk, const int state)
{
	const int oldstate = sk->sk_state;

	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
		      dccp_state_name(oldstate), dccp_state_name(state));
	WARN_ON(state == oldstate);
static void exi_tasklet(unsigned long param);


/* io memory base for EXI */
static void __iomem *exi_io_mem;


/*
 * These are the available exi channels.
 */
static struct exi_channel exi_channels[EXI_MAX_CHANNELS] = {
	[0] = {
		.channel = 0,
		.lock = __SPIN_LOCK_UNLOCKED(exi_channels[0].lock),
		.io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[0].io_lock),
		.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER(
				exi_channels[0].wait_queue),
	},
	[1] = {
		.channel = 1,
		.lock = __SPIN_LOCK_UNLOCKED(exi_channels[1].lock),
		.io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[1].io_lock),
		.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER(
				exi_channels[1].wait_queue),
	},
	[2] = {
		.channel = 2,
		.lock = __SPIN_LOCK_UNLOCKED(exi_channels[2].lock),
		.io_lock = __SPIN_LOCK_UNLOCKED(exi_channels[2].io_lock),
		.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER(
				exi_channels[2].wait_queue),
	},
Exemple #6
0
static u32 i2c_vr_functionality(struct i2c_adapter *adap)
{
	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}

static struct i2c_algorithm i2c_vr_algorithm = {
	.id = I2C_ALGO_PXA,
	.master_xfer = i2c_vr_xfer,
	.functionality = i2c_vr_functionality,
};

static struct vr_i2c i2c_vr[I2C_VR_ADAP_NR] = {
	{
	 .lock = SPIN_LOCK_UNLOCKED,
	 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_vr[0].wait),
	 .adap = {
		  .owner = THIS_MODULE,
		  .id = I2C_ALGO_PXA,
		  .algo = &i2c_vr_algorithm,
		  .name = "vr_i2c0",
		  .retries = 5,
		  },
	 },
	{
	 .lock = SPIN_LOCK_UNLOCKED,
	 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_vr[1].wait),
	 .adap = {
		  .owner = THIS_MODULE,
		  .id = I2C_ALGO_PXA,
		  .algo = &i2c_vr_algorithm,
Exemple #7
0
	/* wait queue to wake up the active_writer */
	wait_queue_head_t wq;
	/* verifies that no writer will get active while readers are active */
	struct mutex lock;
	/*
	 * Also blocks the new readers during
	 * an ongoing cpu hotplug operation.
	 */
	atomic_t refcount;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
} cpu_hotplug = {
	.active_writer = NULL,
	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "cpu_hotplug.lock" },
#endif
};

/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)


void get_online_cpus(void)
hps_ctxt_t hps_ctxt = {
    //state
    .init_state = INIT_STATE_NOT_READY,
    .state = STATE_LATE_RESUME,

    //enabled
    .enabled = 0, /* don't allow hotplug so all cores are online */
    .early_suspend_enabled = 1,
    .suspend_enabled = 1,
    .cur_dump_enabled = 0,
    .stats_dump_enabled = 0,

    //core
    .lock = __MUTEX_INITIALIZER(hps_ctxt.lock), /* Synchronizes accesses to loads statistics */
    .tsk_struct_ptr = NULL,
    .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER(hps_ctxt.wait_queue),
#ifdef CONFIG_HAS_EARLYSUSPEND
    .es_handler = {
        .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 250,
        .suspend = hps_early_suspend,
        .resume  = hps_late_resume,
    },
#endif //#ifdef CONFIG_HAS_EARLYSUSPEND
    .pdrv = {
        .remove     = NULL,
        .shutdown   = NULL,
        .probe      = hps_probe,
        .driver     = {
            .name = "hps",
            .pm   = &hps_dev_pm_ops,
        },