int sbd_pio_tx(struct sbd_ring_buffer *rb, struct sk_buff *skb)
{
	int ret;
	unsigned int qlen = rb->len;
	unsigned int in = *rb->wp;
	unsigned int out = *rb->rp;
	unsigned int count = skb->len;
	unsigned int space = (rb->buff_size - rb->payload_offset);
	u8 *dst;

	ret = check_rb_space(rb, qlen, in, out);
	if (unlikely(ret < 0))
		return ret;

	if (unlikely(count > space)) {
		mif_err("ERR! {id:%d ch:%d} count %d > space %d\n",
			rb->id, rb->ch, count, space);
		return -ENOSPC;
	}

	barrier();

	dst = rb->buff[in] + rb->payload_offset;

	barrier();

	skb_copy_from_linear_data(skb, dst, count);

	rb->size_v[in] = skb->len;

	barrier();

	*rb->wp = circ_new_ptr(qlen, in, 1);

	/* Commit the item before incrementing the head */
	smp_mb();

	return count;
}
Example #2
0
void au_dpri_sb(struct super_block *sb)
{
	struct au_sbinfo *sbinfo;
	aufs_bindex_t bindex;
	int err;
	/* to reuduce stack size */
	struct {
		struct vfsmount mnt;
		struct au_branch fake;
	} *a;

	/* this function can be called from magic sysrq */
	a = kzalloc(sizeof(*a), GFP_ATOMIC);
	if (unlikely(!a)) {
		dpri("no memory\n");
		return;
	}

	a->mnt.mnt_sb = sb;
	a->fake.br_perm = 0;
	a->fake.br_mnt = &a->mnt;
	a->fake.br_xino.xi_file = NULL;
	atomic_set(&a->fake.br_count, 0);
	smp_mb(); /* atomic_set */
	err = do_pri_br(-1, &a->fake);
	kfree(a);
	dpri("dev 0x%x\n", sb->s_dev);
	if (err || !au_test_aufs(sb))
		return;

	sbinfo = au_sbi(sb);
	if (!sbinfo)
		return;
	dpri("nw %d, gen %u, kobj %d\n",
	     atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
	     atomic_read(&sbinfo->si_kobj.kref.refcount));
	for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
		do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
}
static int
vmci_transport_notify_pkt_recv_post_dequeue(
				struct sock *sk,
				size_t target,
				ssize_t copied,
				bool data_read,
				struct vmci_transport_recv_notify_data *data)
{
	struct vsock_sock *vsk;
	int err;
	bool was_full = false;
	u64 free_space;

	vsk = vsock_sk(sk);
	err = 0;

	if (data_read) {
		smp_mb();

		free_space =
			vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
		was_full = free_space == copied;

		if (was_full)
			PKT_FIELD(vsk, peer_waiting_write) = true;

		err = vmci_transport_send_read_notification(sk);
		if (err < 0)
			return err;

		/* See the comment in
		 * vmci_transport_notify_pkt_send_post_enqueue().
		 */
		sk->sk_data_ready(sk);
	}

	return err;
}
Example #4
0
/**
 * ixgbe_ptp_adjfreq
 * @ptp - the ptp clock structure
 * @ppb - parts per billion adjustment from base
 *
 * adjust the frequency of the ptp cycle counter by the
 * indicated ppb from the base frequency.
 */
static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
	struct ixgbe_adapter *adapter =
		container_of(ptp, struct ixgbe_adapter, ptp_caps);
	struct ixgbe_hw *hw = &adapter->hw;
	u64 freq;
	u32 diff, incval;
	int neg_adj = 0;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}

	smp_mb();
	incval = ACCESS_ONCE(adapter->base_incval);

	freq = incval;
	freq *= ppb;
	diff = div_u64(freq, 1000000000ULL);

	incval = neg_adj ? (incval - diff) : (incval + diff);

	switch (hw->mac.type) {
	case ixgbe_mac_X540:
		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
		break;
	case ixgbe_mac_82599EB:
		IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
				(1 << IXGBE_INCPER_SHIFT_82599) |
				incval);
		break;
	default:
		break;
	}

	return 0;
}
int usbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);
#if defined(CONFIG_ERICSSON_F3307_ENABLE)
	struct sk_buff          *skb;
	struct urb              *res;
	int                     retval;

	if (!--dev->suspend_count) {
		spin_lock_irq(&dev->txq.lock);
		while ((res = usb_get_from_anchor(&dev->deferred))) {

			printk(KERN_INFO"%s has delayed data\n", __func__);
			skb = (struct sk_buff *)res->context;
			retval = usb_submit_urb(res, GFP_ATOMIC);
			if (retval < 0) {
				dev_kfree_skb_any(skb);
				usb_free_urb(res);
				usb_autopm_put_interface_async(dev->intf);
			} else {
				dev->net->trans_start = jiffies;
				__skb_queue_tail(&dev->txq, skb);
			}
		}
		smp_mb();
		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
		spin_unlock_irq(&dev->txq.lock);
		if (!(dev->txq.qlen >= TX_QLEN(dev)))
			netif_start_queue(dev->net);
#else
	if (!--dev->suspend_count)
#endif
		tasklet_schedule (&dev->bh);
#if defined(CONFIG_ERICSSON_F3307_ENABLE)
	}
#endif
	return 0;
}
static int
vmci_transport_notify_pkt_send_post_enqueue(
				struct sock *sk,
				ssize_t written,
				struct vmci_transport_send_notify_data *data)
{
	int err = 0;
	struct vsock_sock *vsk;
	bool sent_wrote = false;
	bool was_empty;
	int retries = 0;

	vsk = vsock_sk(sk);

	smp_mb();

	was_empty =
		vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
	if (was_empty) {
		while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
		       !sent_wrote &&
		       retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
			err = vmci_transport_send_wrote(sk);
			if (err >= 0)
				sent_wrote = true;

			retries++;
		}
	}

	if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
		pr_err("%p unable to send wrote notification to peer\n",
		       sk);
		return err;
	}

	return err;
}
Example #7
0
static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
				     struct comedi_subdevice *s,
				     struct comedi_insn *insn,
				     unsigned int *data)
{
	struct dyna_pci10xx_private *devpriv = dev->private;
	int n;
	u16 d = 0;
	int ret = 0;
	unsigned int chan, range;

	/* get the channel number and range */
	chan = CR_CHAN(insn->chanspec);
	range = range_codes_pci1050_ai[CR_RANGE((insn->chanspec))];

	mutex_lock(&devpriv->mutex);
	/* convert n samples */
	for (n = 0; n < insn->n; n++) {
		/* trigger conversion */
		smp_mb();
		outw_p(0x0000 + range + chan, dev->iobase + 2);
		udelay(10);

		ret = comedi_timeout(dev, s, insn, dyna_pci10xx_ai_eoc, 0);
		if (ret)
			break;

		/* read data */
		d = inw_p(dev->iobase);
		/* mask the first 4 bits - EOC bits */
		d &= 0x0FFF;
		data[n] = d;
	}
	mutex_unlock(&devpriv->mutex);

	/* return the number of samples read/written */
	return ret ? ret : n;
}
Example #8
0
NDAS_SAL_API xint32     sal_file_write(sal_file file, xuchar* buf, xint32 size, xuint64 offset)
{
	struct file* filp = (struct file*) file;
	mm_segment_t oldfs;
	int retval;
	loff_t foffset = offset;
	
//	printk("SAL: pos=%llx, write size = %d\n", offset, size);
	oldfs = get_fs();
	set_fs(get_ds());
#if 1	
	retval = filp->f_op->write(filp, buf, size, &foffset);
#else
	// write throughput test. do nothing when read...
	retval = size;
#endif
	smp_mb();
	set_fs(oldfs);
	if (retval !=size) 
		return -1;
	else
		return size;
}
Example #9
0
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				 int psize)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	smp_mb(); /* see radix__flush_tlb_mm */
	if (!mm_is_thread_local(mm)) {
		if (unlikely(mm_is_singlethreaded(mm))) {
			exit_flush_lazy_tlbs(mm);
			goto local;
		}
		_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	} else {
local:
		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	}
	preempt_enable();
}
Example #10
0
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
{
	long curr;
	long curr_nmi;
	long snap;
	long snap_nmi;

	curr = rdp->dynticks->dynticks;
	snap = rdp->dynticks_snap;
	curr_nmi = rdp->dynticks->dynticks_nmi;
	snap_nmi = rdp->dynticks_nmi_snap;
	smp_mb(); 

	
	if ((curr != snap || (curr & 0x1) == 0) &&
	    (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
		rdp->dynticks_fqs++;
		return 1;
	}

	
	return rcu_implicit_offline_qs(rdp);
}
Example #11
0
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;

	idle_loop_prolog(&in_purr);
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);

	while (!need_resched()) {
		HMT_low();
		HMT_very_low();
	}

	HMT_medium();
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

	idle_loop_epilog(in_purr);

	return index;
}
void synchronize_rcu(void)
{
	struct urcu_state *p;
	int t;
	thread_id_t tid;

	/* Memory barrier ensures mutation seen before grace period. */

	smp_mb();

	/* Only one synchronize_rcu() at a time. */

	spin_lock(&rcu_gp_lock);

	/* Request a quiescent state from each thread. */

	for_each_tid(t, tid) {
		p = per_thread(urcu_statep, t);
		if (p != NULL) {
			p->urcu_qs = URCU_QS_REQ;
			pthread_kill(tid, SIGUSR1);
		}
	}
void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
{
	int count = spin_retry;
	int owner, old;

	owner = 0;
	while (1) {
		if (count-- <= 0) {
			if (owner && arch_vcpu_is_preempted(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		smp_mb();
		if (old >= 0) {
			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
			old = prev;
		}
		if ((old & 0x7fffffff) == 0 && prev >= 0)
			break;
	}
}
Example #14
0
/*
 * release a single token back to a semaphore
 * - entered with lock held and interrupts disabled
 */
void __up(struct semaphore *sem)
{
	struct task_struct *tsk;
	struct sem_waiter *waiter;

	semtrace(sem, "Entering __up");

	/* grant the token to the process at the front of the queue */
	waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);

	/* We must be careful not to touch 'waiter' after we set ->task = NULL.
	 * It is an allocated on the waiter's stack and may become invalid at
	 * any time after that point (due to a wakeup from another source).
	 */
	list_del_init(&waiter->list);
	tsk = waiter->task;
	smp_mb();
	waiter->task = NULL;
	wake_up_process(tsk);
	put_task_struct(tsk);

	semtrace(sem, "Leaving __up");
}
Example #15
0
static int bcmsdh_sdmmc_suspend(struct device *pdev)
{
	int err;
	sdioh_info_t *sdioh;
	struct sdio_func *func = dev_to_sdio_func(pdev);
	mmc_pm_flag_t sdio_flags;

	dev_err(pdev,"%s(%d) [%08x]\n",__func__, __LINE__,(unsigned int*) pdev);
	sd_err(("%s Enter\n", __FUNCTION__));
	if (func->num != 2)
		return 0;

	sdioh = sdio_get_drvdata(func);
	err = bcmsdh_suspend(sdioh->bcmsdh);
	if (err)
		return err;

	sdio_flags = sdio_get_host_pm_caps(func);
	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
		return  -EINVAL;
	}

	/* keep power while host suspended */
	err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
	if (err) {
		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
		return err;
	}
#if defined(OOB_INTR_ONLY)
	bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
#endif
	dhd_mmc_suspend = TRUE;
	smp_mb();

	return 0;
}
Example #16
0
static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
			       bool wakeup_kcm)
{
	struct sock *csk = psock->sk;
	struct kcm_mux *mux = psock->mux;

	/* Unrecoverable error in transmit */

	spin_lock_bh(&mux->lock);

	if (psock->tx_stopped) {
		spin_unlock_bh(&mux->lock);
		return;
	}

	psock->tx_stopped = 1;
	KCM_STATS_INCR(psock->stats.tx_aborts);

	if (!psock->tx_kcm) {
		/* Take off psocks_avail list */
		list_del(&psock->psock_avail_list);
	} else if (wakeup_kcm) {
		/* In this case psock is being aborted while outside of
		 * write_msgs and psock is reserved. Schedule tx_work
		 * to handle the failure there. Need to commit tx_stopped
		 * before queuing work.
		 */
		smp_mb();

		queue_work(kcm_wq, &psock->tx_kcm->tx_work);
	}

	spin_unlock_bh(&mux->lock);

	/* Report error on lower socket */
	report_csk_error(csk, err);
}
Example #17
0
/**
 *	__mark_inode_dirty -	internal function
 *	@inode: inode to mark
 *	@flags: what kind of dirty (i.e. I_DIRTY_SYNC)
 *	Mark an inode as dirty. Callers should use mark_inode_dirty or
 *  	mark_inode_dirty_sync.
 *
 * Put the inode on the super block's dirty list.
 *
 * CAREFUL! We mark it dirty unconditionally, but move it onto the
 * dirty list only if it is hashed or if it refers to a blockdev.
 * If it was not hashed, it will never be added to the dirty list
 * even if it is later hashed, as it will have been marked dirty already.
 *
 * In short, make sure you hash any inodes _before_ you start marking
 * them dirty.
 *
 * This function *must* be atomic for the I_DIRTY_PAGES case -
 * set_page_dirty() is called under spinlock in several places.
 *
 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
 * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
 * the kernel-internal blockdev inode represents the dirtying time of the
 * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
 * page->mapping->host, so the page-dirtying time is recorded in the internal
 * blockdev inode.
 */
void __mark_inode_dirty(struct inode *inode, int flags)
{
	struct super_block *sb = inode->i_sb;

	/*
	 * Don't do this for I_DIRTY_PAGES - that doesn't actually
	 * dirty the inode itself
	 */
	if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
		if (sb->s_op->dirty_inode)
			sb->s_op->dirty_inode(inode);
	}
       
	/*
	 * make sure that changes are seen by all cpus before we test i_state
	 * -- mikulas
	 */
	smp_mb();
	/* avoid the locking if we can */
	if ((inode->i_state & flags) == flags)
		return;
	if (unlikely(block_dump)) {
		struct dentry *dentry = NULL;
		const char *name = "?";
		if (!list_empty(&inode->i_dentry)) {
			dentry = list_entry(inode->i_dentry.next,
					    struct dentry, d_alias);
			if (dentry && dentry->d_name.name)
				name = (const char *) dentry->d_name.name;
		}

		if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
			printk(KERN_DEBUG
			       "%s(%d): dirtied inode %lu (%s) on %s\n",
			       current->comm, current->pid, inode->i_ino,
			       name, inode->i_sb->s_id);
	}
Example #18
0
void domain_shutdown(struct domain *d, u8 reason)
{
    struct vcpu *v;

    spin_lock(&d->shutdown_lock);

    if ( d->shutdown_code == -1 )
        d->shutdown_code = reason;
    reason = d->shutdown_code;

    if ( d->domain_id == 0 )
        dom0_shutdown(reason);

    if ( d->is_shutting_down )
    {
        spin_unlock(&d->shutdown_lock);
        return;
    }

    d->is_shutting_down = 1;

    smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */

    for_each_vcpu ( d, v )
    {
        if ( reason == SHUTDOWN_crash )
            v->defer_shutdown = 0;
        else if ( v->defer_shutdown )
            continue;
        vcpu_pause_nosync(v);
        v->paused_for_shutdown = 1;
    }

    __domain_finalise_shutdown(d);

    spin_unlock(&d->shutdown_lock);
}
Example #19
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	int sender;
	union smp_flush_state *f;

	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
	f = &per_cpu(flush_state, sender);

	/* Could avoid this lock when
	   num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	   probably not worth checking this for a cache-hot lock. */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
	cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(f->flush_cpumask, INVALIDATE_TLB_VECTOR_START + sender);

	while (!cpus_empty(f->flush_cpumask))
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}
Example #20
0
int route_del(unsigned long addr)
{
	struct route_entry *rep;
	struct route_entry **repp;

	write_seqlock(&sl);				//\lnlbl{del:w_sqlock}
	repp = &route_list.re_next;
	for (;;) {
		rep = *repp;
		if (rep == NULL)
			break;
		if (rep->addr == addr) {
			*repp = rep->re_next;
			write_sequnlock(&sl);		//\lnlbl{del:w_squnlock1}
			smp_mb();
			rep->re_freed = 1;		//\lnlbl{del:set_freed}
			free(rep);
			return 0;
		}
		repp = &rep->re_next;
	}
	write_sequnlock(&sl);				//\lnlbl{del:w_squnlock2}
	return -ENOENT;
}
static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;
	ktime_t kt_before;
	unsigned long start_snooze;
	long snooze = drv->states[0].target_residency;

	idle_loop_prolog(&in_purr, &kt_before);

	if (snooze) {
		start_snooze = get_tb() + snooze * tb_ticks_per_usec;
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);

		while ((snooze < 0) || (get_tb() < start_snooze)) {
			if (need_resched() || cpu_is_offline(dev->cpu))
				goto out;
			ppc64_runlatch_off();
			HMT_low();
			HMT_very_low();
		}

		HMT_medium();
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb();
		local_irq_disable();
	}

out:
	HMT_medium();
	dev->last_residency =
		(int)idle_loop_epilog(in_purr, kt_before);
	return index;
}
Example #22
0
File: dir.c Project: acton393/linux
/*
 * Initialize ceph dentry state.
 */
int ceph_init_dentry(struct dentry *dentry)
{
	struct ceph_dentry_info *di;

	if (dentry->d_fsdata)
		return 0;

	di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
	if (!di)
		return -ENOMEM;          /* oh well */

	spin_lock(&dentry->d_lock);
	if (dentry->d_fsdata) {
		/* lost a race */
		kmem_cache_free(ceph_dentry_cachep, di);
		goto out_unlock;
	}

	if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
		d_set_d_op(dentry, &ceph_dentry_ops);
	else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
		d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
	else
		d_set_d_op(dentry, &ceph_snap_dentry_ops);

	di->dentry = dentry;
	di->lease_session = NULL;
	di->time = jiffies;
	/* avoid reordering d_fsdata setup so that the check above is safe */
	smp_mb();
	dentry->d_fsdata = di;
	ceph_dentry_lru_add(dentry);
out_unlock:
	spin_unlock(&dentry->d_lock);
	return 0;
}
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
	int count = spin_retry;
	int owner, old, prev;

	prev = 0x80000000;
	owner = 0;
	while (1) {
		if (count-- <= 0) {
			if (owner && arch_vcpu_is_preempted(~owner))
				smp_yield_cpu(~owner);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
		if (old >= 0 &&
		    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
			prev = old;
		else
			smp_mb();
		if ((old & 0x7fffffff) == 0 && prev >= 0)
			break;
	}
}
Example #24
0
void 
xcall_slave(void)
{
	cnt_ipi1++;

	xcall_slave2();

	smp_mb();

	/***********************************************/
	/*   We  want  to  call  ack_APIC_irq, but we  */
	/*   inline  the expansion because of the GPL  */
	/*   symbol issue.			       */
	/*   Once  we  do  this, we can have more IPI  */
	/*   interrupts arrive (but not until we exit  */
	/*   the   interrupt  routine  and  re-enable  */
	/*   interrupts).			       */
	/***********************************************/
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
	ack_APIC_irq();
# else
	x_apic->write(APIC_EOI, 0);
# endif
}
Example #25
0
static void expunge_all(struct msg_queue *msq, int res)
{
	struct list_head *tmp;

	tmp = msq->q_receivers.next;
	while (tmp != &msq->q_receivers) {
		struct msg_receiver *msr;

		/*
		 * Make sure that the wakeup doesnt preempt
		 * this CPU prematurely. (on PREEMPT_RT)
		 */
		preempt_disable_rt();

		msr = list_entry(tmp, struct msg_receiver, r_list);
		tmp = tmp->next;
		msr->r_msg = NULL;
		wake_up_process(msr->r_tsk);
		smp_mb();
		msr->r_msg = ERR_PTR(res);

		preempt_enable_rt();
	}
}
Example #26
0
static int bcmsdh_sdmmc_suspend(struct device *pdev)
{
	struct sdio_func *func = dev_to_sdio_func(pdev);
	mmc_pm_flag_t sdio_flags;
	int ret;

	if (func->num != 2)
		return 0;

	sd_trace(("%s Enter\n", __FUNCTION__));

	if (dhd_os_check_wakelock(bcmsdh_get_drvdata()))
		return -EBUSY;

	sdio_flags = sdio_get_host_pm_caps(func);

	if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
		sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
		return  -EINVAL;
	}

	/* keep power while host suspended */
	ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
	if (ret) {
		sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
		return ret;
	}

#if defined(OOB_INTR_ONLY)
	bcmsdh_oob_intr_set(0);
#endif	/* defined(OOB_INTR_ONLY) */
	dhd_mmc_suspend = TRUE;
	smp_mb();

	return 0;
}
Example #27
0
/**
 * i40e_ptp_adjfreq - Adjust the PHC frequency
 * @ptp: The PTP clock structure
 * @ppb: Parts per billion adjustment from the base
 *
 * Adjust the frequency of the PHC by the indicated parts per billion from the
 * base frequency.
 **/
static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
	struct i40e_hw *hw = &pf->hw;
	u64 adj, freq, diff;
	int neg_adj = 0;

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}

	freq = I40E_PTP_40GB_INCVAL;
	freq *= ppb;
	diff = div_u64(freq, 1000000000ULL);

	if (neg_adj)
		adj = I40E_PTP_40GB_INCVAL - diff;
	else
		adj = I40E_PTP_40GB_INCVAL + diff;

	/* At some link speeds, the base incval is so large that directly
	 * multiplying by ppb would result in arithmetic overflow even when
	 * using a u64. Avoid this by instead calculating the new incval
	 * always in terms of the 40GbE clock rate and then multiplying by the
	 * link speed factor afterwards. This does result in slightly lower
	 * precision at lower link speeds, but it is fairly minor.
	 */
	smp_mb(); /* Force any pending update before accessing. */
	adj *= READ_ONCE(pf->ptp_adj_mult);

	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);

	return 0;
}
Example #28
0
/*
 * Register a new batch of callbacks, and start it up if there is currently no
 * active batch and the batch to be registered has not already occurred.
 * Caller must hold rcu_ctrlblk.lock.
 */
static void rcu_start_batch(struct rcu_ctrlblk *rcp)
{
	if (rcp->next_pending &&
			rcp->completed == rcp->cur) {
		rcp->next_pending = 0;
		/*
		 * next_pending == 0 must be visible in
		 * __rcu_process_callbacks() before it can see new value of cur.
		 */
		smp_wmb();
		rcp->cur++;

		/*
		 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
		 * Barrier  Otherwise it can cause tickless idle CPUs to be
		 * included in rcp->cpumask, which will extend graceperiods
		 * unnecessarily.
		 */
		smp_mb();
		cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);

		rcp->signaled = 0;
	}
}
int cq_item_3_func(void* data)
{
	int i;
	struct ts_cmd_node cmd;
	
	cmd.command = TS_TEST_CMD;
	cmd.cmd_param.prv_params = data;
	TS_LOG_INFO("called paras:%d\n", (int)cmd.cmd_param.prv_params);
	memset(cq_test_buff, -1, sizeof(cq_test_buff));
	init_completion(&cq_test_sync_1);
	init_completion(&cq_test_sync_2);

	for(i=0; i<TS_CMD_QUEUE_SIZE+5; i++){
		cmd.cmd_param.pub_params.algo_param.algo_order = i;
		cq_test_buff[i] = i;
		if(put_one_cmd(&cmd)){
			TS_LOG_INFO("SEND COMPLETE i:%d\n", i);
			if(!i)
				goto out;//unput any cmd, just break;
			cq_test_buff[i] = -1;
			smp_mb();
			complete(&cq_test_sync_1);
			if(try_wait_for_completion(&cq_test_sync_2) ||
				wait_for_completion_timeout(&cq_test_sync_2, 10*HZ))
			{
				TS_LOG_INFO("CMD PROCESS FINISH\n");
			}
			else
				TS_LOG_ERR("ts thread process TEST CMD expire 10 sec\n");
			goto out;//finish 1 test break form internal loop
		}
	}
	
out:
	return 0;
}
/**
@brief		copy data in an skb to a circular TXQ

Enqueues a frame in @b @@skb to the @b @@dev TXQ if there is enough space in the
TXQ, then releases @b @@skb.

@param mld	the pointer to a mem_link_device instance
@param dev	the pointer to a mem_ipc_device instance
@param skb	the pointer to an sk_buff instance

@retval "> 0"	the size of the frame written in the TXQ
@retval "< 0"	an error code (-EBUSY, -ENOSPC, or -EIO)
*/
static int txq_write(struct mem_link_device *mld, struct mem_ipc_device *dev,
		     struct sk_buff *skb)
{
	char *src = skb->data;
	unsigned int count = skb->len;
	char *dst = get_txq_buff(dev);
	unsigned int qsize = get_txq_buff_size(dev);
	unsigned int in = get_txq_head(dev);
	unsigned int out = get_txq_tail(dev);
	int space;

	space = check_txq_space(mld, dev, qsize, in, out, count);
	if (unlikely(space < 0))
		return space;

#ifdef DEBUG_MODEM_IF_LINK_TX
	/* Record the time-stamp */
	getnstimeofday(&skbpriv(skb)->ts);
#endif

	circ_write(dst, src, qsize, in, count);

	set_txq_head(dev, circ_new_ptr(qsize, in, count));

	/* Commit the item before incrementing the head */
	smp_mb();

#ifdef DEBUG_MODEM_IF_LINK_TX
	if (likely(src))
		log_ipc_pkt(sipc5_get_ch_id(src), LINK, TX, skb, true, true);
#endif

	dev_kfree_skb_any(skb);

	return count;
}