Beispiel #1
0
void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
{
	struct fscrypt_info *prev;

	if (ci == NULL)
		ci = ACCESS_ONCE(inode->i_crypt_info);
	if (ci == NULL)
		return;

	prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
	if (prev != ci)
		return;

	put_crypt_info(ci);
}
Beispiel #2
0
void f2fs_free_encryption_info(struct inode *inode, struct f2fs_crypt_info *ci)
{
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct f2fs_crypt_info *prev;

	if (ci == NULL)
		ci = ACCESS_ONCE(fi->i_crypt_info);
	if (ci == NULL)
		return;
	prev = cmpxchg(&fi->i_crypt_info, ci, NULL);
	if (prev != ci)
		return;

	f2fs_free_crypt_info(ci);
}
Beispiel #3
0
int page_nid_xchg_last(struct page *page, int nid)
{
	unsigned long old_flags, flags;
	int last_nid;

	do {
		old_flags = flags = page->flags;
		last_nid = page_nid_last(page);

		flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
		flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
	} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));

	return last_nid;
}
static void net_secret_init(void)
{
	u32 tmp;
	int i;

	if (likely(net_secret[0]))
		return;

	for (i = NET_SECRET_SIZE; i > 0;) {
		do {
			get_random_bytes(&tmp, sizeof(tmp));
		} while (!tmp);
		cmpxchg(&net_secret[--i], 0, tmp);
	}
}
Beispiel #5
0
/*
 * FIXME: this is hack to save delta to linux buffer_head.
 * Inefficient, and this is not atomic with dirty bit change. And this
 * may not work on all arch (If set_bit() and cmpxchg() is not
 * exclusive, this has race).
 */
static void tux3_set_bufdelta(struct buffer_head *buffer, int delta)
{
	unsigned long state, old_state;

	delta = tux3_delta(delta);

	state = buffer->b_state;
	for (;;) {
		old_state = state;
		state = tux3_bufsta_update(old_state, delta);
		state = cmpxchg(&buffer->b_state, old_state, state);
		if (state == old_state)
			break;
	}
}
Beispiel #6
0
void mce_log(struct mce *mce)
{
	unsigned next, entry;
	int ret = 0;

	/* Emit the trace record: */
	trace_mce_record(mce);

	ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
	if (ret == NOTIFY_STOP)
		return;

	mce->finished = 0;
	wmb();
	for (;;) {
		entry = rcu_dereference_check_mce(mcelog.next);
		for (;;) {

			/*
			 * When the buffer fills up discard new entries.
			 * Assume that the earlier errors are the more
			 * interesting ones:
			 */
			if (entry >= MCE_LOG_LEN) {
				set_bit(MCE_OVERFLOW,
					(unsigned long *)&mcelog.flags);
				return;
			}
			/* Old left over entry. Skip: */
			if (mcelog.entry[entry].finished) {
				entry++;
				continue;
			}
			break;
		}
		smp_rmb();
		next = entry + 1;
		if (cmpxchg(&mcelog.next, entry, next) == entry)
			break;
	}
	memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
	wmb();
	mcelog.entry[entry].finished = 1;
	wmb();

	mce->finished = 1;
	set_bit(0, &mce_need_notify);
}
Beispiel #7
0
void ext4_free_encryption_info(struct inode *inode,
			       struct ext4_crypt_info *ci)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_crypt_info *prev;

	if (ci == NULL)
		ci = ACCESS_ONCE(ei->i_crypt_info);
	if (ci == NULL)
		return;
	prev = cmpxchg(&ei->i_crypt_info, ci, NULL);
	if (prev != ci)
		return;

	ext4_free_crypt_info(ci);
}
Beispiel #8
0
static void check_lock(struct lock_debug *debug)
{
    int irq_safe = !local_irq_is_enabled();

    if ( unlikely(atomic_read(&spin_debug) <= 0) )
        return;

    /* A few places take liberties with this. */
    /* BUG_ON(in_irq() && !irq_safe); */

    if ( unlikely(debug->irq_safe != irq_safe) )
    {
        int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
        BUG_ON(seen == !irq_safe);
    }
}
Beispiel #9
0
int bbgl_mutex_lock(bbgl_mutex_t *mutex) {
    int c;
    for (int i = 0; i < 100; i++) {
        c = cmpxchg(mutex, 0, 1);
        if (c == 0)
            return 0;
        __asm__ __volatile__("pause" ::: "memory");
    }
    if (c == 1)
        c = xchg(mutex, 2);
    while (c) {
        futex(mutex, FUTEX_WAIT, 2, NULL, NULL, 0);
        c = xchg(mutex, 2);
    }
    return 0;
}
static void j2_send_ipi(unsigned int cpu, unsigned int message)
{
	volatile unsigned *pmsg;
	unsigned old;
	unsigned long val;

	/* There is only one IPI interrupt shared by all messages, so
	 * we keep a separate interrupt flag per message type in sw. */
	pmsg = &per_cpu(j2_ipi_messages, cpu);
	do old = *pmsg;
	while (cmpxchg(pmsg, old, old|(1U<<message)) != old);

	/* Generate the actual interrupt by writing to CCRn bit 28. */
	val = __raw_readl(j2_ipi_trigger + cpu);
	__raw_writel(val | (1U<<28), j2_ipi_trigger + cpu);
}
int gre_del_protocol(const struct gre_protocol *proto, u8 version)
{
	int ret;

	if (version >= GREPROTO_MAX)
		return -EINVAL;

	ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
		0 : -EBUSY;

	if (ret)
		return ret;

	synchronize_rcu();
	return 0;
}
/*
 * Claim the entry so that no one else will poke at it.
 */
static bool irq_work_claim(struct irq_work *work)
{
	unsigned long flags, nflags;

	for (;;) {
		flags = work->flags;
		if (flags & IRQ_WORK_PENDING)
			return false;
		nflags = flags | IRQ_WORK_FLAGS;
		if (cmpxchg(&work->flags, flags, nflags) == flags)
			break;
		cpu_relax();
	}

	return true;
}
Beispiel #13
0
/*! 2017. 6. 3 study -ing */
int
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
{
	struct callback_head *head;

	do {
		head = ACCESS_ONCE(task->task_works);
		if (unlikely(head == &work_exited))
			return -ESRCH;
		work->next = head;
	} while (cmpxchg(&task->task_works, head, work) != head);

	if (notify)
		set_notify_resume(task);
	return 0;
}
Beispiel #14
0
unsigned long long
uisqueue_interlocked_and(unsigned long long __iomem *tgt,
			unsigned long long set)
{
	unsigned long long i;
	unsigned long long j;

	j = readq(tgt);
	do {
		i = j;
		j = cmpxchg((__force unsigned long long *)tgt, i, i & set);

	} while (i != j);

	return j;
}
int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops,
			    unsigned int num)
{
	int ret;

	if (num >= MAX_IPTUN_ENCAP_OPS)
		return -ERANGE;

	ret = (cmpxchg((const struct ip_tunnel_encap_ops **)
		       &iptun_encaps[num],
		       ops, NULL) == ops) ? 0 : -1;

	synchronize_net();

	return ret;
}
Beispiel #16
0
/**
 * llist_add_batch - add several linked entries in batch
 * @new_first:	first entry in batch to be added
 * @new_last:	last entry in batch to be added
 * @head:	the head for your lock-less list
 *
 * Return whether list is empty before adding.
 */
bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
		     struct llist_head *head)
{
	struct llist_node *entry, *old_entry;

	entry = head->first;
	for (;;) {
		old_entry = entry;
		new_last->next = entry;
		entry = cmpxchg(&head->first, old_entry, new_first);
		if (entry == old_entry)
			break;
	}

	return old_entry == NULL;
}
Beispiel #17
0
static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
{
	struct nvmf_connect_command *c = &req->cmd->connect;
	u16 qid = le16_to_cpu(c->qid);
	u16 sqsize = le16_to_cpu(c->sqsize);
	struct nvmet_ctrl *old;

	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
	if (old) {
		pr_warn("queue already connected!\n");
		return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
	}

	nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
	nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
	return 0;
}
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
				      struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	struct amd_nb *nb = cpuc->amd_nb;
	int i;

	if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
		return;

	for (i = 0; i < x86_pmu.num_counters; i++) {
		if (nb->owners[i] == event) {
			cmpxchg(nb->owners+i, event, NULL);
			break;
		}
	}
}
Beispiel #19
0
/**
 * llist_del_first - delete the first entry of lock-less list
 * @head:	the head for your lock-less list
 *
 * If list is empty, return NULL, otherwise, return the first entry
 * deleted, this is the newest added one.
 *
 * Only one llist_del_first user can be used simultaneously with
 * multiple llist_add users without lock.  Because otherwise
 * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
 * llist_add) sequence in another user may change @head->first->next,
 * but keep @head->first.  If multiple consumers are needed, please
 * use llist_del_all or use lock between consumers.
 */
struct llist_node *llist_del_first(struct llist_head *head)
{
	struct llist_node *entry, *old_entry, *next;

	entry = head->first;
	for (;;) {
		if (entry == NULL)
			return NULL;
		old_entry = entry;
		next = entry->next;
		entry = cmpxchg(&head->first, old_entry, next);
		if (entry == old_entry)
			break;
	}

	return entry;
}
Beispiel #20
0
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
{
#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
	struct irq_desc *desc;
	unsigned int i;

	for (i = 0; i < cnt; i++) {
		desc = irq_to_desc(start + i);
		if (desc && !desc->kstat_irqs) {
			unsigned int __percpu *stats = alloc_percpu(unsigned int);

			if (!stats)
				return -1;
			if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
				free_percpu(stats);
		}
	}
static irqreturn_t j2_ipi_interrupt_handler(int irq, void *arg)
{
	unsigned cpu = hard_smp_processor_id();
	volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
	unsigned messages, i;

	do messages = *pmsg;
	while (cmpxchg(pmsg, messages, 0) != messages);

	if (!messages) return IRQ_NONE;

	for (i=0; i<SMP_MSG_NR; i++)
		if (messages & (1U<<i))
			smp_message_recv(i);

	return IRQ_HANDLED;
}
Beispiel #22
0
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
			   unsigned int encap_type)
{
	int ret;

	if (encap_type == LWTUNNEL_ENCAP_NONE ||
	    encap_type > LWTUNNEL_ENCAP_MAX)
		return -ERANGE;

	ret = (cmpxchg((const struct lwtunnel_encap_ops **)
		       &lwtun_encaps[encap_type],
		       ops, NULL) == ops) ? 0 : -1;

	synchronize_net();

	return ret;
}
Beispiel #23
0
static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
{
	struct mspin_node *next = ACCESS_ONCE(node->next);

	if (likely(!next)) {
		/*
		 * Release the lock by setting it to NULL
		 */
		if (cmpxchg(lock, node, NULL) == node)
			return;
		/* Wait until the next pointer is set */
		while (!(next = cpu_relaxed_read_long(&(node->next))))
			cpu_read_relax();
	}
	ACCESS_ONCE(next->locked) = 1;
	smp_wmb();
}
Beispiel #24
0
static void __irq_work_run(void)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;


	/*
	 * Reset the "raised" state right before we check the list because
	 * an NMI may enqueue after we find the list empty from the runner.
	 */
	__this_cpu_write(irq_work_raised, 0);
	barrier();

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty_relaxed(this_list))
		return;

	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
Beispiel #25
0
/*
 * Queue the entry and raise the IPI if needed.
 */
static void __irq_work_queue(struct irq_work *entry)
{
	struct irq_work **head, *next;

	head = &get_cpu_var(irq_work_list);

	do {
		next = *head;
		/* Can assign non-atomic because we keep the flags set. */
		entry->next = next_flags(next, IRQ_WORK_FLAGS);
	} while (cmpxchg(head, next, entry) != next);

	/* The list was empty, raise self-interrupt to start processing. */
	if (!irq_work_next(entry))
		arch_irq_work_raise();

	put_cpu_var(irq_work_list);
}
Beispiel #26
0
static void mspin_unlock(mspin_lock_t *lock,  mspin_node_t *node)
{
	mspin_node_t *next = ACCESS_ONCE(node->next);

	if (likely(!next)) {
		/*
		 * Release the lock by setting it to NULL
		 */
		if (cmpxchg(lock, node, NULL) == node)
			return;
		/* Wait until the next pointer is set */
		while (!(next = ACCESS_ONCE(node->next)))
			arch_mutex_cpu_relax();
	}
	barrier();
	ACCESS_ONCE(next->locked) = 1;
	smp_wmb();
}
Beispiel #27
0
static void catchup_quanta(quanta_t from, quanta_t target,
			   struct pfair_state* state)
{
	quanta_t cur = from, time;
	TRACE("+++< BAD catching up quanta from %lu to %lu\n",
	      from, target);
	while (time_before(cur, target)) {
		wait_for_quantum(cur, state);
		cur++;
		time = cmpxchg(&pfair_time,
			       cur - 1,   /* expected */
			       cur        /* next     */
			);
		if (time == cur - 1)
			schedule_next_quantum(cur);
	}
	TRACE("+++> catching up done\n");
}
Beispiel #28
0
void *ring_pop(struct ring_buf *ring)
{
	void *elem, **slot;
	unsigned int front;

	for (;;) {
		front = ring_front(ring);
		if (front == ring_back(ring))
			return NULL;
		slot = ring->data + (front % ring->length);
		if (!(elem = *slot))
			continue;
		if (cmpxchg(slot, elem, NULL) == elem)
			break;
	}
	atomic_inc(&ring->front);
	return elem;
}
Beispiel #29
0
/**
 * task_work_run - execute the works added by task_work_add()
 *
 * Flush the pending works. Should be used by the core kernel code.
 * Called before the task returns to the user-mode or stops, or when
 * it exits. In the latter case task_work_add() can no longer add the
 * new work after task_work_run() returns.
 */
void task_work_run(void)
{
	struct task_struct *task = current;
	struct callback_head *work, *head, *next;

	for (;;) {
		/*
		 * work->func() can do task_work_add(), do not set
		 * work_exited unless the list is empty.
		 */
		do {
			work = ACCESS_ONCE(task->task_works);
			head = !work && (task->flags & PF_EXITING) ?
				&work_exited : NULL;
		} while (cmpxchg(&task->task_works, work, head) != work);

		if (!work)
			break;
		/*
		 * Synchronize with task_work_cancel(). It can't remove
		 * the first entry == work, cmpxchg(task_works) should
		 * fail, but it can play with *work and other entries.
		 */
		raw_spin_unlock_wait(&task->pi_lock);
		smp_mb();

		/* Reverse the list to run the works in fifo order */
		head = NULL;
		do {
			next = work->next;
			work->next = head;
			head = work;
			work = next;
		} while (work);

		work = head;
		do {
			next = work->next;
			work->func(work);
			work = next;
			cond_resched();
		} while (work);
	}
}
Beispiel #30
-1
void *SharedMemory::lock()
{
    if(rptr==MAP_FAILED)return 0;
    int c;
    c = cmpxchg(&SH_MUTEX(rptr), 0, 1);
    if(!c)return (char*)rptr+sizeof(SegmentHeader);

    if(c==1)c=xchg(&SH_MUTEX(rptr), 2);
    while(c)
    {
        sys_futex(&SH_MUTEX(rptr), FUTEX_WAIT, 2, NULL, NULL, 0);
        c = xchg(&SH_MUTEX(rptr), 2);
    }
    return (char*)rptr+sizeof(SegmentHeader);
}