示例#1
0
static void get_data_point(void)
{
	struct scbs_data_point item;
	unsigned long head;
	unsigned long tail;

	//Caller must check that data is present
	spin_lock(&consumer_lock);

	head = ACCESS_ONCE(buffer.head);
	tail = buffer.tail;

	if (CIRC_CNT(head, tail, buffer.size) >= 1) {
	/* read index before reading contents at that index */
		smp_read_barrier_depends();

		/* extract one item from the buffer */
		item = buffer.buf[tail];

		scbs_data_point = item;

		smp_mb(); /* finish reading descriptor before incrementing tail */

		buffer.tail = (tail + 1) & (buffer.size - 1);
	}

	spin_unlock(&consumer_lock);
}
示例#2
0
文件: seccomp.c 项目: EvanHa/rbp
/**
 * seccomp_run_filters - evaluates all seccomp filters against @syscall
 * @syscall: number of the current system call
 *
 * Returns valid seccomp BPF response codes.
 */
static u32 seccomp_run_filters(struct seccomp_data *sd)
{
    struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter);
    struct seccomp_data sd_local;
    u32 ret = SECCOMP_RET_ALLOW;

    /* Ensure unexpected behavior doesn't result in failing open. */
    if (unlikely(WARN_ON(f == NULL)))
        return SECCOMP_RET_KILL;

    /* Make sure cross-thread synced filter points somewhere sane. */
    smp_read_barrier_depends();

    if (!sd) {
        populate_seccomp_data(&sd_local);
        sd = &sd_local;
    }

    /*
     * All filters in the list are evaluated and the lowest BPF return
     * value always takes priority (ignoring the DATA).
     */
    for (; f; f = f->prev) {
        u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);

        if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
            ret = cur_ret;
    }
    return ret;
}
示例#3
0
int set_task_ioprio(struct task_struct *task, int ioprio)
{
#ifdef CONFIG_IOSCHED_BFQ
	int err, i;
#else
	int err;
#endif
	struct io_context *ioc;
	const struct cred *cred = current_cred(), *tcred;

	rcu_read_lock();
	tcred = __task_cred(task);
	if (tcred->uid != cred->euid &&
	    tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
		rcu_read_unlock();
		return -EPERM;
	}
	rcu_read_unlock();

	err = security_task_setioprio(task, ioprio);
	if (err)
		return err;

	task_lock(task);
	do {
		ioc = task->io_context;
		/* see wmb() in current_io_context() */
		smp_read_barrier_depends();
		if (ioc)
			break;

		ioc = alloc_io_context(GFP_ATOMIC, -1);
		if (!ioc) {
			err = -ENOMEM;
			break;
		}
#ifdef CONFIG_IOSCHED_BFQ
		/* let other ioc users see the new values */
		smp_wmb();
#endif
		task->io_context = ioc;
	} while (1);

	if (!err) {
		ioc->ioprio = ioprio;
#ifdef CONFIG_IOSCHED_BFQ
		/* make sure schedulers see the new ioprio value */
		wmb();
		for (i = 0; i < IOC_IOPRIO_CHANGED_BITS; i++)
			set_bit(i, ioc->ioprio_changed);
#else
		ioc->ioprio_changed = 1;
#endif
	}

	task_unlock(task);
	return err;
}
示例#4
0
static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
{
	struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
	/*
	 * Make sure to order reads to upperdentry wrt ovl_dentry_update()
	 */
	smp_read_barrier_depends();
	return upperdentry;
}
示例#5
0
void trusty_fiq_handler(struct pt_regs *regs, void *svc_sp)
{
	struct fiq_glue_handler *handler;

	for (handler = ACCESS_ONCE(fiq_handlers); handler;
	     handler = ACCESS_ONCE(handler->next)) {
		/* Barrier paired with smp_wmb in fiq_glue_register_handler */
		smp_read_barrier_depends();
		handler->fiq(handler, regs, svc_sp);
	}
}
示例#6
0
struct pfq_sock *
pfq_get_sock_by_id(pfq_id_t id)
{
        struct pfq_sock *so;
        if (unlikely((__force int)id >= Q_MAX_ID)) {
                pr_devel("[PFQ] pfq_get_sock_by_id: bad id=%d!\n", id);
                return NULL;
        }
	so = (struct pfq_sock *)atomic_long_read(&pfq_sock_vector[(__force int)id]);
	smp_read_barrier_depends();
	return so;
}
示例#7
0
文件: qp.c 项目: 020gzh/linux
/**
 * qp_get_savail - return number of avail send entries
 *
 * @qp - the qp
 *
 * This assumes the s_hlock is held but the s_last
 * qp variable is uncontrolled.
 */
static inline u32 qp_get_savail(struct rvt_qp *qp)
{
	u32 slast;
	u32 ret;

	smp_read_barrier_depends(); /* see rc.c */
	slast = ACCESS_ONCE(qp->s_last);
	if (qp->s_head >= slast)
		ret = qp->s_size - (qp->s_head - slast);
	else
		ret = slast - qp->s_head;
	return ret - 1;
}
示例#8
0
struct mpls_ilm* 
mpls_get_ilm (unsigned int key) 
{
	struct mpls_ilm *ilm = NULL;

	rcu_read_lock();
	ilm = radix_tree_lookup (&mpls_ilm_tree,key);
	smp_read_barrier_depends();
	if (likely(ilm))
		mpls_ilm_hold(ilm);

	rcu_read_unlock();
	return ilm;
}
/*
 *
 * hv_get_ringbuffer_availbytes()
 *
 * Get number of bytes available to read and to write to
 * for the specified ring buffer
 */
static inline void
hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
			  u32 *read, u32 *write)
{
	u32 read_loc, write_loc;

	smp_read_barrier_depends();

	/* Capture the read/write indices before they changed */
	read_loc = rbi->ring_buffer->read_index;
	write_loc = rbi->ring_buffer->write_index;

	*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
	*read = rbi->ring_datasize - *write;
}
示例#10
0
struct mpls_nhlfe*  
mpls_get_nhlfe (unsigned int key) 
{
	struct mpls_nhlfe *nhlfe = NULL;

	rcu_read_lock();
	nhlfe = radix_tree_lookup (&mpls_nhlfe_tree, key);
	smp_read_barrier_depends();
	if (likely(nhlfe)) {
		mpls_nhlfe_hold(nhlfe);
	}
	rcu_read_unlock();

	return nhlfe;
}
示例#11
0
int set_task_ioprio(struct task_struct *task, int ioprio)
{
	int err, i;
	struct io_context *ioc;
	const struct cred *cred = current_cred(), *tcred;

	rcu_read_lock();
	tcred = __task_cred(task);
	if (tcred->uid != cred->euid &&
	    tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
		rcu_read_unlock();
		return -EPERM;
	}
	rcu_read_unlock();

	err = security_task_setioprio(task, ioprio);
	if (err)
		return err;

	task_lock(task);
	do {
		ioc = task->io_context;
		/* see wmb() in current_io_context() */
		smp_read_barrier_depends();
		if (ioc)
			break;

		ioc = alloc_io_context(GFP_ATOMIC, -1);
		if (!ioc) {
			err = -ENOMEM;
			break;
		}
		smp_wmb();
		task->io_context = ioc;
	} while (1);

	if (!err) {
		ioc->ioprio = ioprio;
		wmb();
		for (i = 0; i < IOC_IOPRIO_CHANGED_BITS; i++)
			set_bit(i, ioc->ioprio_changed);
	}

	task_unlock(task);
	return err;
}
示例#12
0
文件: async.c 项目: L0op/qemu
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
int aio_bh_poll(AioContext *ctx)
{
    QEMUBH *bh, **bhp, *next;
    int ret;

    ctx->walking_bh++;

    ret = 0;
    for (bh = ctx->first_bh; bh; bh = next) {
        /* Make sure that fetching bh happens before accessing its members */
        smp_read_barrier_depends();
        next = bh->next;
        if (!bh->deleted && bh->scheduled) {
            bh->scheduled = 0;
            /* Paired with write barrier in bh schedule to ensure reading for
             * idle & callbacks coming after bh's scheduling.
             */
            smp_rmb();
            if (!bh->idle)
                ret = 1;
            bh->idle = 0;
            bh->cb(bh->opaque);
        }
    }

    ctx->walking_bh--;

    /* remove deleted bhs */
    if (!ctx->walking_bh) {
        qemu_mutex_lock(&ctx->bh_lock);
        bhp = &ctx->first_bh;
        while (*bhp) {
            bh = *bhp;
            if (bh->deleted) {
                *bhp = bh->next;
                g_free(bh);
            } else {
                bhp = &bh->next;
            }
        }
        qemu_mutex_unlock(&ctx->bh_lock);
    }

    return ret;
}
示例#13
0
/**
 * task_work_cancel - cancel a pending work added by task_work_add()
 * @task: the task which should execute the work
 * @func: identifies the work to remove
 *
 * Find the last queued pending work with ->func == @func and remove
 * it from queue.
 *
 * RETURNS:
 * The found work or NULL if not found.
 */
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
	struct callback_head **pprev = &task->task_works;
	struct callback_head *work;
	unsigned long flags;
	/*
	 * If cmpxchg() fails we continue without updating pprev.
	 * Either we raced with task_work_add() which added the
	 * new entry before this work, we will find it again. Or
	 * we raced with task_work_run(), *pprev == NULL/exited.
	 */
	raw_spin_lock_irqsave(&task->pi_lock, flags);
	while ((work = ACCESS_ONCE(*pprev))) {
		smp_read_barrier_depends();
		if (work->func != func)
			pprev = &work->next;
		else if (cmpxchg(pprev, work, work->next) == work)
			break;
	}
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);

	return work;
}
示例#14
0
static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
{
	struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry);
	smp_read_barrier_depends();
	return upperdentry;
}
示例#15
0
/* Deferred service handler, run as interrupt-fired tasklet */
static void caam_jr_dequeue(unsigned long devarg)
{
	int hw_idx, sw_idx, i, head, tail;
	struct device *dev = (struct device *)devarg;
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
	u32 *userdesc, userstatus;
	dma_addr_t outbusaddr;
	void *userarg;
	unsigned long flags;

	outbusaddr = rd_reg64(&jrp->rregs->outring_base);
	dma_sync_single_for_cpu(dev, outbusaddr,
				sizeof(struct jr_outentry) * JOBR_DEPTH,
				DMA_FROM_DEVICE);

	spin_lock_irqsave(&jrp->outlock, flags);

	head = ACCESS_ONCE(jrp->head);
	sw_idx = tail = jrp->tail;

	while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
	       rd_reg32(&jrp->rregs->outring_used)) {

		hw_idx = jrp->out_ring_read_index;
		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
			sw_idx = (tail + i) & (JOBR_DEPTH - 1);

			smp_read_barrier_depends();
			if (jrp->outring[hw_idx].desc ==
			    jrp->entinfo[sw_idx].desc_addr_dma)
				break; /* found */
		}
		/* we should never fail to find a matching descriptor */
		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);

		/* Unmap just-run descriptor so we can post-process */
		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
				 jrp->entinfo[sw_idx].desc_size,
				 DMA_TO_DEVICE);

		/* mark completed, avoid matching on a recycled desc addr */
		jrp->entinfo[sw_idx].desc_addr_dma = 0;

		/* Stash callback params for use outside of lock */
		usercall = jrp->entinfo[sw_idx].callbk;
		userarg = jrp->entinfo[sw_idx].cbkarg;
		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
		userstatus = jrp->outring[hw_idx].jrstatus;

		smp_mb();

		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
					   (JOBR_DEPTH - 1);

		/*
		 * if this job completed out-of-order, do not increment
		 * the tail.  Otherwise, increment tail by 1 plus the
		 * number of subsequent jobs already completed out-of-order
		 */
		if (sw_idx == tail) {
			do {
				tail = (tail + 1) & (JOBR_DEPTH - 1);
				smp_read_barrier_depends();
			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
				 jrp->entinfo[tail].desc_addr_dma == 0);

			jrp->tail = tail;
		}

		/* set done */
		wr_reg32(&jrp->rregs->outring_rmvd, 1);

		spin_unlock_irqrestore(&jrp->outlock, flags);

		/* Finally, execute user's callback */
		usercall(dev, userdesc, userstatus, userarg);

		spin_lock_irqsave(&jrp->outlock, flags);

		head = ACCESS_ONCE(jrp->head);
		sw_idx = tail = jrp->tail;
	}

	spin_unlock_irqrestore(&jrp->outlock, flags);

	/* reenable / unmask IRQs */
	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
示例#16
0
static int uts_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
	struct uts_namespace *uts_ns = current->nsproxy->uts_ns;
	struct ve_struct *ve = get_exec_env();
	int i, n1, n2, n3, new_version;
	struct page **new_pages, **p;

	/*
	 * For node or in case we've not changed UTS simply
	 * map preallocated original vDSO.
	 *
	 * In turn if we already allocated one for this UTS
	 * simply reuse it. It improves speed significantly.
	 */
	if (uts_ns == &init_uts_ns)
		goto map_init_uts;
	/*
	 * Dirty lockless hack. Strictly speaking
	 * we need to return @p here if it's non-nil,
	 * but since there only one trasition possible
	 * { =0 ; !=0 } we simply return @uts_ns->vdso.pages
	 */
	p = ACCESS_ONCE(uts_ns->vdso.pages);
	smp_read_barrier_depends();
	if (p)
		goto map_uts;

	if (sscanf(uts_ns->name.release, "%d.%d.%d", &n1, &n2, &n3) == 3) {
		/*
		 * If there were no changes on version simply reuse
		 * preallocated one.
		 */
		new_version = KERNEL_VERSION(n1, n2, n3);
		if (new_version == LINUX_VERSION_CODE)
			goto map_init_uts;
	} else {
		/*
		 * If admin is passed malformed string here
		 * lets warn him once but continue working
		 * not using vDSO virtualization at all. It's
		 * better than walk out with error.
		 */
		pr_warn_once("Wrong release uts name format detected."
			     " Ignoring vDSO virtualization.\n");
		goto map_init_uts;
	}

	mutex_lock(&vdso_mutex);
	if (uts_ns->vdso.pages) {
		mutex_unlock(&vdso_mutex);
		goto map_uts;
	}

	uts_ns->vdso.nr_pages	= init_uts_ns.vdso.nr_pages;
	uts_ns->vdso.size	= init_uts_ns.vdso.size;
	uts_ns->vdso.version_off= init_uts_ns.vdso.version_off;
	new_pages		= kmalloc(sizeof(struct page *) * init_uts_ns.vdso.nr_pages, GFP_KERNEL);
	if (!new_pages) {
		pr_err("Can't allocate vDSO pages array for VE %d\n", ve->veid);
		goto out_unlock;
	}

	for (i = 0; i < uts_ns->vdso.nr_pages; i++) {
		struct page *p = alloc_page(GFP_KERNEL);
		if (!p) {
			pr_err("Can't allocate page for VE %d\n", ve->veid);
			for (; i > 0; i--)
				put_page(new_pages[i - 1]);
			kfree(new_pages);
			goto out_unlock;
		}
		new_pages[i] = p;
		copy_page(page_address(p), page_address(init_uts_ns.vdso.pages[i]));
	}

	uts_ns->vdso.addr = vmap(new_pages, uts_ns->vdso.nr_pages, 0, PAGE_KERNEL);
	if (!uts_ns->vdso.addr) {
		pr_err("Can't map vDSO pages for VE %d\n", ve->veid);
		for (i = 0; i < uts_ns->vdso.nr_pages; i++)
			put_page(new_pages[i]);
		kfree(new_pages);
		goto out_unlock;
	}

	*((int *)(uts_ns->vdso.addr + uts_ns->vdso.version_off)) = new_version;
	smp_wmb();
	uts_ns->vdso.pages = new_pages;
	mutex_unlock(&vdso_mutex);

	pr_debug("vDSO version transition %d -> %d for VE %d\n",
		 LINUX_VERSION_CODE, new_version, ve->veid);

map_uts:
	return setup_additional_pages(bprm, uses_interp, uts_ns->vdso.pages, uts_ns->vdso.size);
map_init_uts:
	return setup_additional_pages(bprm, uses_interp, init_uts_ns.vdso.pages, init_uts_ns.vdso.size);
out_unlock:
	mutex_unlock(&vdso_mutex);
	return -ENOMEM;
}
示例#17
0
文件: cq.c 项目: AK101111/linux
/**
 * rvt_cq_enter - add a new entry to the completion queue
 * @cq: completion queue
 * @entry: work completion entry to add
 * @sig: true if @entry is solicited
 *
 * This may be called with qp->s_lock held.
 */
void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{
	struct rvt_cq_wc *wc;
	unsigned long flags;
	u32 head;
	u32 next;

	spin_lock_irqsave(&cq->lock, flags);

	/*
	 * Note that the head pointer might be writable by user processes.
	 * Take care to verify it is a sane value.
	 */
	wc = cq->queue;
	head = wc->head;
	if (head >= (unsigned)cq->ibcq.cqe) {
		head = cq->ibcq.cqe;
		next = 0;
	} else {
		next = head + 1;
	}

	if (unlikely(next == wc->tail)) {
		spin_unlock_irqrestore(&cq->lock, flags);
		if (cq->ibcq.event_handler) {
			struct ib_event ev;

			ev.device = cq->ibcq.device;
			ev.element.cq = &cq->ibcq;
			ev.event = IB_EVENT_CQ_ERR;
			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
		}
		return;
	}
	if (cq->ip) {
		wc->uqueue[head].wr_id = entry->wr_id;
		wc->uqueue[head].status = entry->status;
		wc->uqueue[head].opcode = entry->opcode;
		wc->uqueue[head].vendor_err = entry->vendor_err;
		wc->uqueue[head].byte_len = entry->byte_len;
		wc->uqueue[head].ex.imm_data =
			(__u32 __force)entry->ex.imm_data;
		wc->uqueue[head].qp_num = entry->qp->qp_num;
		wc->uqueue[head].src_qp = entry->src_qp;
		wc->uqueue[head].wc_flags = entry->wc_flags;
		wc->uqueue[head].pkey_index = entry->pkey_index;
		wc->uqueue[head].slid = entry->slid;
		wc->uqueue[head].sl = entry->sl;
		wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
		wc->uqueue[head].port_num = entry->port_num;
		/* Make sure entry is written before the head index. */
		smp_wmb();
	} else {
		wc->kqueue[head] = *entry;
	}
	wc->head = next;

	if (cq->notify == IB_CQ_NEXT_COMP ||
	    (cq->notify == IB_CQ_SOLICITED &&
	     (solicited || entry->status != IB_WC_SUCCESS))) {
		struct kthread_worker *worker;
		/*
		 * This will cause send_complete() to be called in
		 * another thread.
		 */
		smp_read_barrier_depends(); /* see rvt_cq_exit */
		worker = cq->rdi->worker;
		if (likely(worker)) {
			cq->notify = RVT_CQ_NONE;
			cq->triggered++;
			queue_kthread_work(worker, &cq->comptask);
		}
	}

	spin_unlock_irqrestore(&cq->lock, flags);
}