Ejemplo n.º 1
0
static void __init wakeup_secondary(void)
{
	/*
	 * Write the address of secondary startup routine into the
	 * AuxCoreBoot1 where ROM code will jump and start executing
	 * on secondary core once out of WFE
	 * A barrier is added to ensure that write buffer is drained
	 */
	omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup));
	smp_wmb();

	/*
	 * Send a 'sev' to wake the secondary core from WFE.
	 * Drain the outstanding writes to memory
	 */
	dsb();
	set_event();
	mb();
}
Ejemplo n.º 2
0
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
					 struct cfq_io_context *cic)
{
	struct io_context *ioc = cic->ioc;

	list_del_init(&cic->queue_list);

	/*
	 * Make sure dead mark is seen for dead queues
	 */
	smp_wmb();
	rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

	/*
	 * No write-side locking as no task is using @ioc (they're exited
	 * or bfqd is being deallocated.
	 */
	rcu_read_lock();
	if (rcu_dereference(ioc->ioc_data) == cic) {
		rcu_read_unlock();
		spin_lock(&ioc->lock);
		rcu_assign_pointer(ioc->ioc_data, NULL);
		spin_unlock(&ioc->lock);
	} else
		rcu_read_unlock();

	if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
		cic->cfqq[BLK_RW_ASYNC] = NULL;
	}

	if (cic->cfqq[BLK_RW_SYNC] != NULL) {
		/*
		 * If the bic is using a shared queue, put the reference
		 * taken on the io_context when the bic started using a
		 * shared bfq_queue.
		 */
		if (bfq_bfqq_coop(cic->cfqq[BLK_RW_SYNC]))
			put_io_context(ioc);
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
		cic->cfqq[BLK_RW_SYNC] = NULL;
	}
}
Ejemplo n.º 3
0
struct in_device *inetdev_init(struct net_device *dev)
{
	struct in_device *in_dev;

	ASSERT_RTNL();

	in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL);
	if (!in_dev)
		goto out;
	memset(in_dev, 0, sizeof(*in_dev));
	INIT_RCU_HEAD(&in_dev->rcu_head);
	memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
	in_dev->cnf.sysctl = NULL;
	in_dev->dev = dev;
	if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
		goto out_kfree;
	/* Reference in_dev->dev */
	dev_hold(dev);
#ifdef CONFIG_SYSCTL
	neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4,
			      NET_IPV4_NEIGH, "ipv4", NULL);
#endif

	/* Account for reference dev->ip_ptr (below) */
	in_dev_hold(in_dev);

#ifdef CONFIG_SYSCTL
	devinet_sysctl_register(in_dev, &in_dev->cnf);
#endif
	ip_mc_init_dev(in_dev);
	if (dev->flags & IFF_UP)
		ip_mc_up(in_dev);
out:
	/* we can receive as soon as ip_ptr is set -- do this last */
	smp_wmb();
	dev->ip_ptr = in_dev;
	return in_dev;
out_kfree:
	kfree(in_dev);
	in_dev = NULL;
	goto out;
}
Ejemplo n.º 4
0
/*
 * This function implements a generic ability to update ruid, euid,
 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 */
asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
	int old_ruid = current->uid;
	int old_euid = current->euid;
	int old_suid = current->suid;
	int retval;

	retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
	if (retval)
		return retval;

	if (!capable(CAP_SETUID)) {
		if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
		    (ruid != current->euid) && (ruid != current->suid))
			return -EPERM;
		if ((euid != (uid_t) -1) && (euid != current->uid) &&
		    (euid != current->euid) && (euid != current->suid))
			return -EPERM;
		if ((suid != (uid_t) -1) && (suid != current->uid) &&
		    (suid != current->euid) && (suid != current->suid))
			return -EPERM;
	}
	if (ruid != (uid_t) -1) {
		if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
			return -EAGAIN;
	}
	if (euid != (uid_t) -1) {
		if (euid != current->euid) {
			set_dumpable(current->mm, suid_dumpable);
			smp_wmb();
		}
		current->euid = euid;
	}
	current->fsuid = current->euid;
	if (suid != (uid_t) -1)
		current->suid = suid;

	key_fsuid_changed(current);
	proc_id_connector(current, PROC_EVENT_UID);

	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
}
Ejemplo n.º 5
0
/*
 * munging is applied to data by core as it passes between user
 * and kernel space
 */
static unsigned int comedi_buf_munge(struct comedi_async *async,
                                     unsigned int num_bytes)
{
    struct comedi_subdevice *s = async->subdevice;
    unsigned int count = 0;
    const unsigned num_sample_bytes = bytes_per_sample(s);

    if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
        async->munge_count += num_bytes;
        count = num_bytes;
    } else {
        /* don't munge partial samples */
        num_bytes -= num_bytes % num_sample_bytes;
        while (count < num_bytes) {
            int block_size = num_bytes - count;
            unsigned int buf_end;

            buf_end = async->prealloc_bufsz - async->munge_ptr;
            if (block_size > buf_end)
                block_size = buf_end;

            s->munge(s->device, s,
                     async->prealloc_buf + async->munge_ptr,
                     block_size, async->munge_chan);

            /*
             * ensure data is munged in buffer before the
             * async buffer munge_count is incremented
             */
            smp_wmb();

            async->munge_chan += block_size / num_sample_bytes;
            async->munge_chan %= async->cmd.chanlist_len;
            async->munge_count += block_size;
            async->munge_ptr += block_size;
            async->munge_ptr %= async->prealloc_bufsz;
            count += block_size;
        }
    }

    return count;
}
Ejemplo n.º 6
0
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;

	if (cpu1start_addr) {
		memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);

		__raw_writel(virt_to_phys(socfpga_secondary_startup),
			(sys_manager_base_addr + (cpu1start_addr & 0x000000ff)));

		flush_cache_all();
		smp_wmb();
		outer_clean_range(0, trampoline_size);

		/* This will release CPU #1 out of reset.*/
		__raw_writel(0, rst_manager_base_addr + 0x10);
	}

	return 0;
}
Ejemplo n.º 7
0
Archivo: dtl.c Proyecto: 710leo/LVS
static int dtl_start(struct dtl *dtl)
{
	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);

	dtlr->buf = dtl->buf;
	dtlr->buf_end = dtl->buf + dtl->buf_entries;
	dtlr->write_index = 0;

	/* setting write_ptr enables logging into our buffer */
	smp_wmb();
	dtlr->write_ptr = dtl->buf;

	/* enable event logging */
	dtlr->saved_dtl_mask = lppaca[dtl->cpu].dtl_enable_mask;
	lppaca[dtl->cpu].dtl_enable_mask |= dtl_event_mask;

	dtl_consumer = consume_dtle;
	atomic_inc(&dtl_count);
	return 0;
}
Ejemplo n.º 8
0
static void chaos_read_callback(struct urb *urb)
{
	struct chaoskey *dev = urb->context;
	int status = urb->status;

	usb_dbg(dev->interface, "callback status (%d)", status);

	if (status == 0)
		dev->valid = urb->actual_length;
	else
		dev->valid = 0;

	dev->used = 0;

	/* must be seen first before validity is announced */
	smp_wmb();

	dev->reading = false;
	wake_up(&dev->wait_q);
}
Ejemplo n.º 9
0
static void __init wakeup_secondary(void)
{
#if defined(CHIPREG_BOOT_2ND_ADDR_OFFSET)
	void __iomem *chipRegBase;

	chipRegBase = IOMEM(KONA_CHIPREG_VA);

	writel((virt_to_phys(kona_secondary_startup) & (~0x3))|0x1, chipRegBase + CHIPREG_BOOT_2ND_ADDR_OFFSET);

	smp_wmb();

	/*
	 * Send a 'sev' to wake the secondary core from WFE.
	 * Drain the outstanding writes to memory
	 */
	dsb_sev();
	
	mb();
#endif
}
Ejemplo n.º 10
0
/*
 * This assumes that it can't be called concurrently with itself
 * but only with dequeue_ih_ring_entry.
 */
static bool
enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
{
	unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
	unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);

	if ((rptr - wptr) % kfd->interrupt_ring_size == kfd->device_info->ih_ring_entry_size) {
		/* This is very bad, the system is likely to hang. */
		dev_err_ratelimited(radeon_kfd_chardev(),
			"Interrupt ring overflow, dropping interrupt.\n");
		return false;
	}

	memcpy(kfd->interrupt_ring + wptr, ih_ring_entry, kfd->device_info->ih_ring_entry_size);
	wptr = (wptr + kfd->device_info->ih_ring_entry_size) % kfd->interrupt_ring_size;
	smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
	atomic_set(&kfd->interrupt_ring_wptr, wptr);

	return true;
}
Ejemplo n.º 11
0
static void crash_kexec_prepare_cpus(void)
{
	unsigned int msecs;

	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */

	dump_send_ipi(crash_shutdown_secondary);
	smp_wmb();

	/*
	 * The crash CPU sends an IPI and wait for other CPUs to
	 * respond. Delay of at least 10 seconds.
	 */
	pr_emerg("Sending IPI to other cpus...\n");
	msecs = 10000;
	while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
		cpu_relax();
		mdelay(1);
	}
}
Ejemplo n.º 12
0
static noinline void mspin_lock(mspin_lock_t *lock,  mspin_node_t *node)
{
	mspin_node_t *prev;

	/* Init node */
	node->locked = 0;
	node->next   = NULL;

	prev = xchg(lock, node);
	if (likely(prev == NULL)) {
		/* Lock acquired */
		node->locked = 1;
		return;
	}
	ACCESS_ONCE(prev->next) = node;
	smp_wmb();
	/* Wait until the lock holder passes the lock down */
	while (!ACCESS_ONCE(node->locked))
		arch_mutex_cpu_relax();
}
Ejemplo n.º 13
0
Archivo: kgdb.c Proyecto: 020gzh/linux
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
	int err;
	unsigned long addr_wr = writable_address(bpt->bpt_addr);

	if (addr_wr == 0)
		return -1;

	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
				BREAK_INSTR_SIZE);
	if (err)
		return err;

	err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
				 BREAK_INSTR_SIZE);
	smp_wmb();
	flush_icache_range((unsigned long)bpt->bpt_addr,
			   (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
	return err;
}
Ejemplo n.º 14
0
static int __init vdso_init(void)
{
	int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;

	/*
	 * We can disable vDSO support generally, but we need to retain
	 * one page to support the two-bundle (16-byte) rt_sigreturn path.
	 */
	if (!vdso_enabled) {
		size_t offset = (unsigned long)&__vdso_rt_sigreturn;
		static struct page *sigret_page;
		sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
		BUG_ON(sigret_page == NULL);
		vdso_pagelist = &sigret_page;
		vdso_pages = 1;
		BUG_ON(offset >= PAGE_SIZE);
		memcpy(page_address(sigret_page) + offset,
		       vdso_start + offset, 16);
#ifdef CONFIG_COMPAT
		vdso32_pages = vdso_pages;
		vdso32_pagelist = vdso_pagelist;
#endif
		vdso_ready = 1;
		return 0;
	}

	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
	vdso_pages += data_pages;
	vdso_pagelist = vdso_setup(vdso_start, vdso_pages);

#ifdef CONFIG_COMPAT
	vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
	vdso32_pages += data_pages;
	vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
#endif

	smp_wmb();
	vdso_ready = 1;

	return 0;
}
Ejemplo n.º 15
0
/**
 *  reuseport_add_sock - Add a socket to the reuseport group of another.
 *  @sk:  New socket to add to the group.
 *  @sk2: Socket belonging to the existing reuseport group.
 *  @bind_inany: Whether or not the group is bound to a local INANY address.
 *
 *  May return ENOMEM and not add socket to group under memory pressure.
 */
int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
{
	struct sock_reuseport *old_reuse, *reuse;

	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
		int err = reuseport_alloc(sk2, bind_inany);

		if (err)
			return err;
	}

	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock));
	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
					     lockdep_is_held(&reuseport_lock));
	if (old_reuse && old_reuse->num_socks != 1) {
		spin_unlock_bh(&reuseport_lock);
		return -EBUSY;
	}

	if (reuse->num_socks == reuse->max_socks) {
		reuse = reuseport_grow(reuse);
		if (!reuse) {
			spin_unlock_bh(&reuseport_lock);
			return -ENOMEM;
		}
	}

	reuse->socks[reuse->num_socks] = sk;
	/* paired with smp_rmb() in reuseport_select_sock() */
	smp_wmb();
	reuse->num_socks++;
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

	spin_unlock_bh(&reuseport_lock);

	if (old_reuse)
		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
	return 0;
}
Ejemplo n.º 16
0
int vlan_dev_set_egress_priority(const struct net_device *dev,
				 u32 skb_prio, u16 vlan_prio)
{
	struct vlan_dev_info *vlan = vlan_dev_info(dev);
	struct vlan_priority_tci_mapping *mp = NULL;
	struct vlan_priority_tci_mapping *np;
	u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;

	/* See if a priority mapping exists.. */
	mp = vlan->egress_priority_map[skb_prio & 0xF];
	while (mp) {
		if (mp->priority == skb_prio) {
			if (mp->vlan_qos && !vlan_qos)
				vlan->nr_egress_mappings--;
			else if (!mp->vlan_qos && vlan_qos)
				vlan->nr_egress_mappings++;
			mp->vlan_qos = vlan_qos;
			return 0;
		}
		mp = mp->next;
	}

	/* Create a new mapping then. */
	mp = vlan->egress_priority_map[skb_prio & 0xF];
	np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
	if (!np)
		return -ENOBUFS;

	np->next = mp;
	np->priority = skb_prio;
	np->vlan_qos = vlan_qos;
	/* Before inserting this element in hash table, make sure all its fields
	 * are committed to memory.
	 * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
	 */
	smp_wmb();
	vlan->egress_priority_map[skb_prio & 0xF] = np;
	if (vlan_qos)
		vlan->nr_egress_mappings++;
	return 0;
}
Ejemplo n.º 17
0
Archivo: rtas.c Proyecto: 710leo/LVS
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
{
	u16 slb_size = mmu_slb_size;
	int rc = H_MULTI_THREADS_ACTIVE;
	int cpu;

	slb_set_size(SLB_MIN_SIZE);
	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());

	while (rc == H_MULTI_THREADS_ACTIVE && !data->done) {
		rc = rtas_call(data->token, 0, 1, NULL);
		if (rc && rc != H_MULTI_THREADS_ACTIVE)
			printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
	}

	smp_rmb();
	if (rc || data->error)
		slb_set_size(slb_size);

	if (data->error)
		rc = data->error;

	data->error = rc;

	if (wake_when_done) {
		smp_wmb();
		data->done = 1;

		/* Ensure data->done is seen on all CPUs that are about to wake up
		 as a result of the H_PROD below */
		mb();

		for_each_online_cpu(cpu)
			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	}

	if (atomic_dec_return(&data->working) == 0)
		complete(data->complete);

	return rc;
}
Ejemplo n.º 18
0
static int
virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
                         int i, unsigned int max, unsigned int *next)
{
    /* If this descriptor says it doesn't chain, we're done. */
    if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
        return VIRTQUEUE_READ_DESC_DONE;
    }

    /* Check they're not leading us off end of descriptors. */
    *next = desc[i].next;
    /* Make sure compiler knows to grab that: we don't want it changing! */
    smp_wmb();

    if (*next >= max) {
        vu_panic(dev, "Desc next is %u", next);
        return VIRTQUEUE_READ_DESC_ERROR;
    }

    return VIRTQUEUE_READ_DESC_MORE;
}
Ejemplo n.º 19
0
static int acpi_aml_write_kern(const char *buf, int len)
{
	int ret;
	struct circ_buf *crc = &acpi_aml_io.out_crc;
	int n;
	char *p;

	ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
	if (ret < 0)
		return ret;
	/* sync tail before inserting logs */
	smp_mb();
	p = &crc->buf[crc->head];
	n = min(len, circ_space_to_end(crc));
	memcpy(p, buf, n);
	/* sync head after inserting logs */
	smp_wmb();
	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
	acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
	return n;
}
Ejemplo n.º 20
0
/*
 * For a given cpu, push the previous batch of callbacks onto a (global)
 * pending list, then make the current batch the previous.  A new, empty
 * current batch exists after this operation.
 *
 * Locklessly tolerates changes being made by call_rcu() to the current
 * batch, locklessly tolerates the current batch becoming the previous
 * batch, and locklessly tolerates a new, empty current batch becoming
 * available.  Requires that the previous batch be quiescent by the time
 * rcu_end_batch is invoked.
 */
static void rcu_end_batch(struct rcu_data *rd, struct rcu_list *pending)
{
	int prev;
	struct rcu_list *plist;	/* some cpus' previous list */

	prev = (ACCESS_ONCE(rd->which) & 1) ^ 1;
	plist = &rd->cblist[prev];

	/* Chain previous batch of callbacks, if any, to the pending list */
	if (plist->head) {
		rcu_list_join(pending, plist);
		rcu_list_init(plist);
		smp_wmb();
	}
	/*
	 * Swap current and previous lists.  Other cpus must not see this
	 * out-of-order w.r.t. the just-completed plist init, hence the above
	 * smp_wmb().
	 */
	rd->which++;
}
Ejemplo n.º 21
0
/*
 *recive data to buffer
 */
int fifo_buf_recv(struct fifo_buf *fifo, int fd)
{
    int alen = 0, len = 0, outlen = 0, off = 0;
    alen = (fifo->mask + 1) - (fifo->in - fifo->out);
    off = fifo->in & fifo->mask;
    len = min(alen, fifo->size - off);

    outlen += recv(fd, fifo->data + off, len, 0);
    if(outlen == len && alen - len > 0)
        outlen += recv(fd, fifo->data, alen - len, 0);

    /*
     * make sure that the data is copied before
     * incrementing the fifo->out index counter
     */
    if(outlen > 0){
        smp_wmb();
        fifo->in += outlen;
    }
    return outlen;
}
Ejemplo n.º 22
0
/* Called with ovs_mutex. */
void bpf_dp_disconnect_port(struct vport *p)
{
	struct datapath *dp = p->dp;
	struct plum *plum, *dest_plum;
	u32 dest;

	if (p->port_no == OVSP_LOCAL || p->port_no >= PLUM_MAX_PORTS)
		return;

	plum = ovsl_dereference(dp->plums[0]);

	dest = atomic_read(&plum->ports[p->port_no]);
	if (dest) {
		dest_plum = ovsl_dereference(dp->plums[dest >> 16]);
		atomic_set(&dest_plum->ports[dest & 0xffff], 0);
	}
	atomic_set(&plum->ports[p->port_no], 0);
	smp_wmb();

	/* leave the stats allocated until plum is freed */
}
Ejemplo n.º 23
0
int generic_cpu_enable(unsigned int cpu)
{
	/* Do the normal bootup if we haven't
	 * already bootstrapped. */
	if (system_state != SYSTEM_RUNNING)
		return -ENOSYS;

	/* get the target out of it's holding state */
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
	smp_wmb();

	while (!cpu_online(cpu))
		cpu_relax();

#ifdef CONFIG_PPC64
	fixup_irqs(cpu_online_map);
	/* counter the irq disable in fixup_irqs */
	local_irq_enable();
#endif
	return 0;
}
Ejemplo n.º 24
0
static irqreturn_t card_detect_isr(int irq, void *dev_id)
{
	struct sdhci_host *sdhost = dev_id;
	struct tegra_sdhci *host = sdhci_priv(sdhost);

#ifdef MMC_PATCH_1
	if(atomic_add_return(1, &detect_lock) != 1) {
                return IRQ_HANDLED;   /* indicates the last attach/detach does not finish */
        }
#endif
        
	host->card_present =
		(gpio_get_value(host->gpio_cd)==host->gpio_polarity_cd);
        if(host->gpio_en!=-1)
        {
                gpio_set_value(host->gpio_en,host->card_present?host->gpio_polarity_en:!host->gpio_polarity_en);
        }
	smp_wmb();
	sdhci_card_detect_callback(sdhost);
	return IRQ_HANDLED;
}
Ejemplo n.º 25
0
static enum hrtimer_restart controller_dma_timerfuc(struct hrtimer *timer)
{
	struct hw_controller *controller = NULL;
	unsigned int fifo_cnt = 0;

	controller = container_of(timer, struct hw_controller, timer);
	fifo_cnt = NFC_CMDFIFO_SIZE(controller);

	/* */
	smp_rmb();
	/* */
	smp_wmb();
	if (fifo_cnt == 0)
		complete(&controller_dma_completion);
	else
		hrtimer_start(&controller->timer,
			ktime_set(0, DMA_TIME_CNT_20US),
			HRTIMER_MODE_REL);

	return HRTIMER_NORESTART;
}
Ejemplo n.º 26
0
static noinline
void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
{
	struct mspin_node *prev;

	/* Init node */
	node->locked = 0;
	node->next   = NULL;

	prev = xchg(lock, node);
	if (likely(prev == NULL)) {
		/* Lock acquired */
		node->locked = 1;
		return;
	}
	ACCESS_ONCE(prev->next) = node;
	smp_wmb();
	/* Wait until the lock holder passes the lock down */
	while (!cpu_relaxed_read(&(node->locked)))
		cpu_read_relax();
}
Ejemplo n.º 27
0
/*
 * Samma på svenska..
 */
asmlinkage long sys_setfsgid(gid_t gid)
{
	int old_fsgid;

	old_fsgid = current->fsgid;
	if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
		return old_fsgid;

	if (gid == current->gid || gid == current->egid ||
	    gid == current->sgid || gid == current->fsgid || 
	    capable(CAP_SETGID)) {
		if (gid != old_fsgid) {
			set_dumpable(current->mm, suid_dumpable);
			smp_wmb();
		}
		current->fsgid = gid;
		key_fsgid_changed(current);
		proc_id_connector(current, PROC_EVENT_GID);
	}
	return old_fsgid;
}
Ejemplo n.º 28
0
static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
			 unsigned int mask, u32 length)
{
	int err;
	struct rxe_sq *sq = &qp->sq;
	struct rxe_send_wqe *send_wqe;
	unsigned long flags;

	err = validate_send_wr(qp, ibwr, mask, length);
	if (err)
		return err;

	spin_lock_irqsave(&qp->sq.sq_lock, flags);

	if (unlikely(queue_full(sq->queue))) {
		err = -ENOMEM;
		goto err1;
	}

	send_wqe = producer_addr(sq->queue);

	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
	if (unlikely(err))
		goto err1;

	/*
	 * make sure all changes to the work queue are
	 * written before we update the producer pointer
	 */
	smp_wmb();

	advance_producer(sq->queue);
	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);

	return 0;

err1:
	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
	return err;
}
Ejemplo n.º 29
0
Archivo: qp.c Proyecto: 020gzh/linux
/**
 * rvt_post_srq_receive - post a receive on a shared receive queue
 * @ibsrq: the SRQ to post the receive on
 * @wr: the list of work requests to post
 * @bad_wr: A pointer to the first WR to cause a problem is put here
 *
 * This may be called from interrupt context.
 *
 * Return: 0 on success else errno
 */
int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
		      struct ib_recv_wr **bad_wr)
{
	struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
	struct rvt_rwq *wq;
	unsigned long flags;

	for (; wr; wr = wr->next) {
		struct rvt_rwqe *wqe;
		u32 next;
		int i;

		if ((unsigned)wr->num_sge > srq->rq.max_sge) {
			*bad_wr = wr;
			return -EINVAL;
		}

		spin_lock_irqsave(&srq->rq.lock, flags);
		wq = srq->rq.wq;
		next = wq->head + 1;
		if (next >= srq->rq.size)
			next = 0;
		if (next == wq->tail) {
			spin_unlock_irqrestore(&srq->rq.lock, flags);
			*bad_wr = wr;
			return -ENOMEM;
		}

		wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
		wqe->wr_id = wr->wr_id;
		wqe->num_sge = wr->num_sge;
		for (i = 0; i < wr->num_sge; i++)
			wqe->sg_list[i] = wr->sg_list[i];
		/* Make sure queue entry is written before the head index. */
		smp_wmb();
		wq->head = next;
		spin_unlock_irqrestore(&srq->rq.lock, flags);
	}
	return 0;
}
Ejemplo n.º 30
0
static void pnv_smp_cpu_kill_self(void)
{
	unsigned int cpu;

	/* If powersave_nap is enabled, use NAP mode, else just
	 * spin aimlessly
	 */
	if (!powersave_nap) {
		generic_mach_cpu_die();
		return;
	}

	/* Standard hot unplug procedure */
	local_irq_disable();
	idle_task_exit();
	current->active_mm = NULL; /* for sanity */
	cpu = smp_processor_id();
	DBG("CPU%d offline\n", cpu);
	generic_set_cpu_dead(cpu);
	smp_wmb();

	/* We don't want to take decrementer interrupts while we are offline,
	 * so clear LPCR:PECE1. We keep PECE2 enabled.
	 */
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
	while (!generic_check_cpu_restart(cpu)) {
		power7_idle();
		if (!generic_check_cpu_restart(cpu)) {
			DBG("CPU%d Unexpected exit while offline !\n", cpu);
			/* We may be getting an IPI, so we re-enable
			 * interrupts to process it, it will be ignored
			 * since we aren't online (hopefully)
			 */
			local_irq_enable();
			local_irq_disable();
		}
	}
	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
	DBG("CPU%d coming online...\n", cpu);
}