Beispiel #1
0
void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
{
	bool slow;

	if (likely(atomic_read(&skb->users) == 1))
		smp_rmb();
	else if (likely(!atomic_dec_and_test(&skb->users)))
		return;

	slow = lock_sock_fast(sk);
	skb_orphan(skb);
	sk_mem_reclaim_partial(sk);
	unlock_sock_fast(sk, slow);

	/* skb is now orphaned, can be freed outside of locked section */
	trace_kfree_skb(skb, skb_free_datagram_locked);
	__kfree_skb(skb);
}
static inline u16
vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
{
	struct vlan_priority_tci_mapping *mp;

	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */

	mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
	while (mp) {
		if (mp->priority == skb->priority) {
			return mp->vlan_qos; /* This should already be shifted
					      * to mask correctly with the
					      * VLAN's TCI */
		}
		mp = mp->next;
	}
	return 0;
}
Beispiel #3
0
void native_cpu_die(unsigned int cpu)
{
	unsigned int i;

	for (i = 0; i < 10; i++) {
		smp_rmb();
		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
			if (system_state == SYSTEM_RUNNING)
				pr_info("CPU %u is now offline\n", cpu);

			return;
		}

		msleep(100);
	}

	pr_err("CPU %u didn't die...\n", cpu);
}
Beispiel #4
0
void
gc_jd_queue_enqueue(gc_jd_queue_t *q, gc_job_desc_t *item)
{
  item->sys.next = 0;
  _mutex_lock(ptr_to_ea(&q->mutex));
  smp_rmb();		// import barrier

  if (q->tail == 0){    // currently empty
    q->tail = q->head = jdp_to_ea(item);
  }
  else {		// not empty, append
    ea_to_jdp(q->tail)->sys.next = jdp_to_ea(item);
    q->tail = jdp_to_ea(item);
  }

  smp_wmb();		// orders stores above before clearing of mutex
  _mutex_unlock(ptr_to_ea(&q->mutex));
}
Beispiel #5
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	write_pen_release(cpu);

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	smp_cross_call(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #6
0
static inline int calc_load_write_idx(void)
{
	int idx = calc_load_idx;

	/*
	 * See calc_global_nohz(), if we observe the new index, we also
	 * need to observe the new update time.
	 */
	smp_rmb();

	/*
	 * If the folding window started, make sure we start writing in the
	 * next idle-delta.
	 */
	if (!time_before(jiffies, calc_load_update))
		idx++;

	return idx & 1;
}
Beispiel #7
0
static struct dsm_client *dsm_find_client(char *cname)
{
	int i;
	struct dsm_client * client = NULL;

	mutex_lock(&g_dsm_server.mtx_lock);
	smp_rmb();
	for(i=0; i<CLIENT_SIZE; i++){
		if((test_bit(DSM_CLIENT_VAILD_BIT, &g_dsm_server.client_flag[i]))
			&& (!strncasecmp(g_dsm_server.client_list[i]->client_name, cname, CLIENT_NAME_LEN))){
			client = g_dsm_server.client_list[i];
			break;
		}
	}
	mutex_unlock(&g_dsm_server.mtx_lock);
	DSM_LOG_DEBUG("cname: %s find %s\n", cname, client?"success":"failed");

	return client;
}
Beispiel #8
0
static void print_buffers(void)
{
	struct print_buffer *buffer;
	struct entry_head *head;
	off_t read_pos;
	int len, ret;

	while (1) {
		buffer = get_next_buffer();
		if (!buffer)
			break;

		read_pos = buffer->read_pos;
		head = buffer->ring + read_pos;
		len = head->len;

		if (len) {
			/* Print out non-empty entry and proceed */
			/* Check if output goes to syslog */
			if (head->dest == RT_PRINT_SYSLOG_STREAM) {
				syslog(head->priority,
				       "%s", head->data);
			} else {
				ret = fwrite(head->data,
					     head->len, 1, head->dest);
				(void)ret;
			}

			read_pos += sizeof(*head) + len;
		} else {
			/* Emptry entries mark the wrap-around */
			read_pos = 0;
		}

		/* Make sure we have read the entry competely before
		   forwarding read_pos */
		smp_rmb();
		buffer->read_pos = read_pos;

		/* Enforce the read_pos update before proceeding */
		smp_wmb();
	}
}
Beispiel #9
0
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned int comedi_buf_read_alloc(struct comedi_async *async,
                                   unsigned int nbytes)
{
    unsigned int available;

    available = async->munge_count - async->buf_read_alloc_count;
    if (nbytes > available)
        nbytes = available;

    async->buf_read_alloc_count += nbytes;

    /*
     * ensure the async buffer 'counts' are read before we
     * attempt to read data from the read-alloc'ed buffer space
     */
    smp_rmb();

    return nbytes;
}
static int dev_mce_log(struct notifier_block *nb, unsigned long val,
				void *data)
{
	struct mce *mce = (struct mce *)data;
	unsigned int next, entry;

	wmb();
	for (;;) {
		entry = mce_log_get_idx_check(mcelog.next);
		for (;;) {

			/*
			 * When the buffer fills up discard new entries.
			 * Assume that the earlier errors are the more
			 * interesting ones:
			 */
			if (entry >= MCE_LOG_LEN) {
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
				return NOTIFY_OK;
			}
			/* Old left over entry. Skip: */
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
			break;
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
	wmb();
	mcelog.entry[entry].finished = 1;
	wmb();

	/* wake processes polling /dev/mcelog */
	wake_up_interruptible(&mce_chrdev_wait);

	return NOTIFY_OK;
}
Beispiel #11
0
static int
virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
{
    uint16_t num_heads = vring_avail_idx(vq) - idx;

    /* Check it isn't doing very strange things with descriptor numbers. */
    if (num_heads > vq->vring.num) {
        vu_panic(dev, "Guest moved used index from %u to %u",
                 idx, vq->shadow_avail_idx);
        return -1;
    }
    if (num_heads) {
        /* On success, callers read a descriptor at vq->last_avail_idx.
         * Make sure descriptor read does not bypass avail index read. */
        smp_rmb();
    }

    return num_heads;
}
Beispiel #12
0
void mce_log(struct mce *mce)
{
	unsigned next, entry;
	int ret = 0;

	
	trace_mce_record(mce);

	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
	if (ret == NOTIFY_STOP)
		return;

	mce->finished = 0;
	wmb();
	for (;;) {
		entry = rcu_dereference_check_mce(mcelog.next);
		for (;;) {

			if (entry >= MCE_LOG_LEN) {
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
				return;
			}
			
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
			break;
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
	wmb();
	mcelog.entry[entry].finished = 1;
	wmb();

	mce->finished = 1;
	set_bit(0, &mce_need_notify);
}
Beispiel #13
0
int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	write_pen_release(cpu_logical_map(cpu));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * it to jump to the secondary entrypoint.
	 */
	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #14
0
static int acpi_aml_readb_kern(void)
{
	int ret;
	struct circ_buf *crc = &acpi_aml_io.in_crc;
	char *p;

	ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
	if (ret < 0)
		return ret;
	/* sync head before removing cmds */
	smp_rmb();
	p = &crc->buf[crc->tail];
	ret = (int)*p;
	/* sync tail before inserting cmds */
	smp_mb();
	crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
	acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
	return ret;
}
Beispiel #15
0
/*
 * This does the RCU processing work from softirq context. 
 */
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
                                    struct rcu_data *rdp)
{
    if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
        *rdp->donetail = rdp->curlist;
        rdp->donetail = rdp->curtail;
        rdp->curlist = NULL;
        rdp->curtail = &rdp->curlist;
    }

    local_irq_disable();
    if (rdp->nxtlist && !rdp->curlist) {
        rdp->curlist = rdp->nxtlist;
        rdp->curtail = rdp->nxttail;
        rdp->nxtlist = NULL;
        rdp->nxttail = &rdp->nxtlist;
        local_irq_enable();

        /*
         * start the next batch of callbacks
         */

        /* determine batch number */
        rdp->batch = rcp->cur + 1;
        /* see the comment and corresponding wmb() in
         * the rcu_start_batch()
         */
        smp_rmb();

        if (!rcp->next_pending) {
            /* and start it/schedule start if it's a new batch */
            spin_lock(&rcp->lock);
            rcp->next_pending = 1;
            rcu_start_batch(rcp);
            spin_unlock(&rcp->lock);
        }
    } else {
        local_irq_enable();
    }
    rcu_check_quiescent_state(rcp, rdp);
    if (rdp->donelist)
        rcu_do_batch(rdp);
}
Beispiel #16
0
void mce_log(struct mce *mce)
{
	unsigned next, entry;

	/* Emit the trace record: */
	trace_mce_record(mce);

	mce->finished = 0;
	wmb();
	for (;;) {
		entry = rcu_dereference(mcelog.next);
		for (;;) {
			/*
			 * When the buffer fills up discard new entries.
			 * Assume that the earlier errors are the more
			 * interesting ones:
			 */
			if (entry >= MCE_LOG_LEN) {
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
				return;
			}
			/* Old left over entry. Skip: */
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
			break;
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
	wmb();
	mcelog.entry[entry].finished = 1;
	wmb();

	mce->finished = 1;
	set_bit(0, &mce_need_notify);
}
Beispiel #17
0
int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	write_pen_release(cpu_logical_map(cpu));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #18
0
ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
{
	struct rtlx_channel *rt;
	unsigned long failed;
	size_t rt_read;
	size_t fl;

	if (rtlx == NULL)
		return(-ENOSYS);

	rt = &rtlx->channel[index];

	mutex_lock(&channel_wqs[index].mutex);
	smp_rmb();
	rt_read = rt->rt_read;

	
	count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write,
							rt->buffer_size));

	
	fl = min(count, (size_t) rt->buffer_size - rt->rt_write);

	failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
	if (failed)
		goto out;

	
	if (count - fl) {
		failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
	}

out:
	count -= failed;

	smp_wmb();
	rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
	smp_wmb();
	mutex_unlock(&channel_wqs[index].mutex);

	return count;
}
Beispiel #19
0
/* kcm sock is locked. */
static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
{
	struct kcm_mux *mux = kcm->mux;
	struct kcm_psock *psock;

	psock = kcm->tx_psock;

	smp_rmb(); /* Must read tx_psock before tx_wait */

	if (psock) {
		WARN_ON(kcm->tx_wait);
		if (unlikely(psock->tx_stopped))
			unreserve_psock(kcm);
		else
			return kcm->tx_psock;
	}

	spin_lock_bh(&mux->lock);

	/* Check again under lock to see if psock was reserved for this
	 * psock via psock_unreserve.
	 */
	psock = kcm->tx_psock;
	if (unlikely(psock)) {
		WARN_ON(kcm->tx_wait);
		spin_unlock_bh(&mux->lock);
		return kcm->tx_psock;
	}

	if (!list_empty(&mux->psocks_avail)) {
		psock = list_first_entry(&mux->psocks_avail,
					 struct kcm_psock,
					 psock_avail_list);
		list_del(&psock->psock_avail_list);
		if (kcm->tx_wait) {
			list_del(&kcm->wait_psock_list);
			kcm->tx_wait = false;
		}
		kcm->tx_psock = psock;
		psock->tx_kcm = kcm;
		KCM_STATS_INCR(psock->stats.reserved);
	} else if (!kcm->tx_wait) {
Beispiel #20
0
static int tegra_idle_enter_lp2(struct cpuidle_device *dev,
	struct cpuidle_state *state)
{
	ktime_t enter, exit;
	s64 us;

	if (!lp2_in_idle || lp2_disabled_by_suspend ||
	    !tegra_lp2_is_allowed(dev, state)) {
		dev->last_state = &dev->states[0];
		return tegra_idle_enter_lp3(dev, state);
	}

	trace_printk("LP2 entry at %lu us\n",
		     (unsigned long)readl(IO_ADDRESS(TEGRA_TMR1_BASE)
					  + TIMERUS_CNTR_1US));

	local_irq_disable();
	enter = ktime_get();

	tegra_cpu_idle_stats_lp2_ready(dev->cpu);
	tegra_idle_lp2(dev, state);

	trace_printk("LP2 exit at %lu us\n",
		     (unsigned long)readl(IO_ADDRESS(TEGRA_TMR1_BASE)
					  + TIMERUS_CNTR_1US));

	exit = ktime_sub(ktime_get(), enter);
	us = ktime_to_us(exit);

	local_irq_enable();

	smp_rmb();

	/* Update LP2 latency provided no fall back to LP3 */
	if (state == dev->last_state) {
		tegra_lp2_set_global_latency(state);
		tegra_lp2_update_target_residency(state);
	}
	tegra_cpu_idle_stats_lp2_time(dev->cpu, us);

	return (int)us;
}
Beispiel #21
0
/**
 * __kfifo_get_to_user - gets some data from the FIFO, no locking version
 * @fifo: the fifo to be used.
 * @buffer: where the data must be copied. user buffer.
 * @len: the size of the destination buffer.
 *
 * This function copies at most @len bytes from the FIFO into the
 * user @buffer and returns the number of copied bytes.
 *
 * Note that with only one concurrent reader and one concurrent
 * writer, you don't need extra locking to use these functions.
 */
unsigned int __kfifo_get_to_user(struct kfifo *fifo,
				 unsigned char __user *buffer,
				 unsigned int len)
{
	unsigned int n1, n2;
	int ret;

	len = min(len, fifo->in - fifo->out);

	/*
	 * Ensure that we sample the fifo->in index -before- we
	 * start removing bytes from the kfifo.
	 */

	smp_rmb();

	/* first get the data from fifo->out until the end of the buffer */
	n1 = min(len, fifo->size - (fifo->out & (fifo->size - 1)));
	n2 = len -n1;
	ret = copy_to_user(buffer,
			   fifo->buffer + (fifo->out & (fifo->size - 1)), n1);
	if (ret) {
		len = n1 - ret;
		goto out;
	}

	/* then get the rest (if any) from the beginning of the buffer */
	ret = copy_to_user(buffer + n1, fifo->buffer, n2);
	if (ret)
		len = n1 + n2 - ret;

	/*
	 * Ensure that we remove the bytes from the kfifo -before-
	 * we update the fifo->out index.
	 */
out:
	smp_mb();

	fifo->out += len;

	return len;
}
Beispiel #22
0
ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
{
    struct rtlx_channel *rt;
    unsigned long failed;
    size_t rt_read;
    size_t fl;

    if (rtlx == NULL)
        return(-ENOSYS);

    rt = &rtlx->channel[index];

    mutex_lock(&channel_wqs[index].mutex);
    smp_rmb();
    rt_read = rt->rt_read;

    /* total number of bytes to copy */
    count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write,
                rt->buffer_size));

    /* first bit from write pointer to the end of the buffer, or count */
    fl = min(count, (size_t) rt->buffer_size - rt->rt_write);

    failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
    if (failed)
        goto out;

    /* if there's any left copy to the beginning of the buffer */
    if (count - fl) {
        failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
    }

out:
    count -= failed;

    smp_wmb();
    rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
    smp_wmb();
    mutex_unlock(&channel_wqs[index].mutex);

    return count;
}
Beispiel #23
0
int __cpuinit meson_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	* Set synchronisation state between this boot processor
	* and the secondary one
	*/
	spin_lock(&boot_lock);
	 
	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 */
	printk("write pen_release: %d\n",cpu_logical_map(cpu));
	write_pen_release(cpu_logical_map(cpu));

#ifndef CONFIG_MESON_TRUSTZONE
	check_and_rewrite_cpu_entry();
	meson_set_cpu_power_ctrl(cpu, 1);
#endif
	meson_secondary_set(cpu);
	dsb_sev();

	smp_send_reschedule(cpu);
	timeout = jiffies + (10* HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;
		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);
	return pen_release != -1 ? -ENOSYS : 0;
}
Beispiel #24
0
Datei: rtas.c Projekt: 710leo/LVS
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
{
	u16 slb_size = mmu_slb_size;
	int rc = H_MULTI_THREADS_ACTIVE;
	int cpu;

	slb_set_size(SLB_MIN_SIZE);
	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());

	while (rc == H_MULTI_THREADS_ACTIVE && !data->done) {
		rc = rtas_call(data->token, 0, 1, NULL);
		if (rc && rc != H_MULTI_THREADS_ACTIVE)
			printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
	}

	smp_rmb();
	if (rc || data->error)
		slb_set_size(slb_size);

	if (data->error)
		rc = data->error;

	data->error = rc;

	if (wake_when_done) {
		smp_wmb();
		data->done = 1;

		/* Ensure data->done is seen on all CPUs that are about to wake up
		 as a result of the H_PROD below */
		mb();

		for_each_online_cpu(cpu)
			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	}

	if (atomic_dec_return(&data->working) == 0)
		complete(data->complete);

	return rc;
}
Beispiel #25
0
ssize_t rtlx_read(int index, void __user *buff, size_t count)
{
    size_t lx_write, fl = 0L;
    struct rtlx_channel *lx;
    unsigned long failed;

    if (rtlx == NULL)
        return -ENOSYS;

    lx = &rtlx->channel[index];

    mutex_lock(&channel_wqs[index].mutex);
    smp_rmb();
    lx_write = lx->lx_write;

    /* find out how much in total */
    count = min(count,
                (size_t)(lx_write + lx->buffer_size - lx->lx_read)
                % lx->buffer_size);

    /* then how much from the read pointer onwards */
    fl = min(count, (size_t)lx->buffer_size - lx->lx_read);

    failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
    if (failed)
        goto out;

    /* and if there is anything left at the beginning of the buffer */
    if (count - fl)
        failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);

out:
    count -= failed;

    smp_wmb();
    lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
    smp_wmb();
    mutex_unlock(&channel_wqs[index].mutex);

    return count;
}
Beispiel #26
0
/*
 * Returns true if the task does not share ->mm with another thread/process.
 */
bool current_is_single_threaded(void)
{
	struct task_struct *task = current;
	struct mm_struct *mm = task->mm;
	struct task_struct *p, *t;
	bool ret;

	if (atomic_read(&task->signal->live) != 1)
		return false;

	if (atomic_read(&mm->mm_users) == 1)
		return true;

	ret = false;
	rcu_read_lock();
	for_each_process(p) {
		if (unlikely(p->flags & PF_KTHREAD))
			continue;
		if (unlikely(p == task->group_leader))
			continue;

		t = p;
		do {
			if (unlikely(t->mm == mm))
				goto found;
			if (likely(t->mm))
				break;
			/*
			 * t->mm == NULL. Make sure next_thread/next_task
			 * will see other CLONE_VM tasks which might be
			 * forked before exiting.
			 */
			smp_rmb();
		} while_each_thread(p, t);
	}
	ret = true;
found:
	rcu_read_unlock();

	return ret;
}
Beispiel #27
0
ssize_t rtlx_read(int index, void __user *buff, size_t count)
{
	size_t lx_write, fl = 0L;
	struct rtlx_channel *lx;
	unsigned long failed;

	if (rtlx == NULL)
		return -ENOSYS;

	lx = &rtlx->channel[index];

	mutex_lock(&channel_wqs[index].mutex);
	smp_rmb();
	lx_write = lx->lx_write;

	
	count = min(count,
		     (size_t)(lx_write + lx->buffer_size - lx->lx_read)
		     % lx->buffer_size);

	
	fl = min(count, (size_t)lx->buffer_size - lx->lx_read);

	failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
	if (failed)
		goto out;

	
	if (count - fl)
		failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);

out:
	count -= failed;

	smp_wmb();
	lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
	smp_wmb();
	mutex_unlock(&channel_wqs[index].mutex);

	return count;
}
Beispiel #28
0
/**
 *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
 *  @sk: First socket in the group.
 *  @hash: When no BPF filter is available, use this hash to select.
 *  @skb: skb to run through BPF filter.
 *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
 *    the skb does not yet point at the payload, this parameter represents
 *    how far the pointer needs to advance to reach the payload.
 *  Returns a socket that should receive the packet (or NULL on error).
 */
struct sock *reuseport_select_sock(struct sock *sk,
				   u32 hash,
				   struct sk_buff *skb,
				   int hdr_len)
{
	struct sock_reuseport *reuse;
	struct bpf_prog *prog;
	struct sock *sk2 = NULL;
	u16 socks;

	rcu_read_lock();
	reuse = rcu_dereference(sk->sk_reuseport_cb);

	/* if memory allocation failed or add call is not yet complete */
	if (!reuse)
		goto out;

	prog = rcu_dereference(reuse->prog);
	socks = READ_ONCE(reuse->num_socks);
	if (likely(socks)) {
		/* paired with smp_wmb() in reuseport_add_sock() */
		smp_rmb();

		if (!prog || !skb)
			goto select_by_hash;

		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
		else
			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);

select_by_hash:
		/* no bpf or invalid bpf result: fall back to hash usage */
		if (!sk2)
			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
	}

out:
	rcu_read_unlock();
	return sk2;
}
Beispiel #29
0
static inline void __kfifo_out_data(struct kfifo *fifo,
		void *to, unsigned int len, unsigned int off)
{
	unsigned int l;

	/*
	 * Ensure that we sample the fifo->in index -before- we
	 * start removing bytes from the kfifo.
	 */

	smp_rmb();

	off = __kfifo_off(fifo, fifo->out + off);

	/* first get the data from fifo->out until the end of the buffer */
	l = min(len, fifo->size - off);
	memcpy(to, fifo->buffer + off, l);

	/* then get the rest (if any) from the beginning of the buffer */
	memcpy(to + l, fifo->buffer, len - l);
}
Beispiel #30
0
// Called to wait on a barrier
int __cr_barrier_wait(cr_barrier_t *barrier)
{
#if CRI_DEBUG
    DECLARE_WAIT_QUEUE_HEAD_ONSTACK(dummy);
    const int interval = 60;
    int sum = 0;
    do {
        wait_event_timeout(barrier->wait, CR_BARRIER_WAIT_COND(barrier), interval * HZ);
	if (CR_BARRIER_WAIT_COND(barrier)) break;
	sum += interval;
	CR_ERR("cr_barrier warning: tgid/pid %d/%d still blocked after %d seconds, with signal_pending=%d.", current->tgid, current->pid, sum, signal_pending(current));
    } while (1);
    smp_rmb();
    if (barrier->interrupted) {
	CR_ERR("cr_barrier error: interrupt on non-interruptible barrier");
    }
#else
    wait_event(barrier->wait, CR_BARRIER_WAIT_COND(barrier));
#endif
    return do_barrier_once(barrier);
}