Ejemplo n.º 1
0
/*
 * If the request contains only one semaphore operation, and there are
 * no complex transactions pending, lock only the semaphore involved.
 * Otherwise, lock the entire semaphore array, since we either have
 * multiple semaphores in our own semops, or we need to look at
 * semaphores from other pending complex operations.
 *
 * Carefully guard against sma->complex_count changing between zero
 * and non-zero while we are spinning for the lock. The value of
 * sma->complex_count cannot change while we are holding the lock,
 * so sem_unlock should be fine.
 *
 * The global lock path checks that all the local locks have been released,
 * checking each local lock once. This means that the local lock paths
 * cannot start their critical sections while the global lock is held.
 */
static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
			      int nsops)
{
	int locknum;
 again:
	if (nsops == 1 && !sma->complex_count) {
		struct sem *sem = sma->sem_base + sops->sem_num;

		/* Lock just the semaphore we are interested in. */
		spin_lock(&sem->lock);

		/*
		 * If sma->complex_count was set while we were spinning,
		 * we may need to look at things we did not lock here.
		 */
		if (unlikely(sma->complex_count)) {
			spin_unlock(&sem->lock);
			goto lock_array;
		}

		/*
		 * Another process is holding the global lock on the
		 * sem_array; we cannot enter our critical section,
		 * but have to wait for the global lock to be released.
		 */
		if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
			spin_unlock(&sem->lock);
			spin_unlock_wait(&sma->sem_perm.lock);
			goto again;
		}

		locknum = sops->sem_num;
	} else {
		int i;
		/*
		 * Lock the semaphore array, and wait for all of the
		 * individual semaphore locks to go away.  The code
		 * above ensures no new single-lock holders will enter
		 * their critical section while the array lock is held.
		 */
 lock_array:
		spin_lock(&sma->sem_perm.lock);
		for (i = 0; i < sma->sem_nsems; i++) {
			struct sem *sem = sma->sem_base + i;
			spin_unlock_wait(&sem->lock);
		}
		locknum = -1;
	}
	return locknum;
}
Ejemplo n.º 2
0
/* Drop into the prom, with the chance to continue with the 'go'
 * prom command.
 */
void
prom_cmdline(void)
{
	unsigned long flags;

	__save_and_cli(flags);

#ifdef CONFIG_SUN_CONSOLE
	if(!serial_console && prom_palette)
		prom_palette (1);
#endif

#ifdef CONFIG_SMP
	smp_capture();
#endif

	p1275_cmd ("enter", P1275_INOUT(0,0));

#ifdef CONFIG_SMP
	smp_release();
	spin_unlock_wait(&__br_write_locks[BR_GLOBALIRQ_LOCK].lock);
#endif

#ifdef CONFIG_SUN_CONSOLE
	if(!serial_console && prom_palette)
		prom_palette (0);
#endif

	__restore_flags(flags);
}
Ejemplo n.º 3
0
void dev_deactivate(struct net_device *dev)
{
	struct Qdisc *qdisc;

	spin_lock_bh(&dev->queue_lock);
	qdisc = dev->qdisc;
	dev->qdisc = &noop_qdisc;

	qdisc_reset(qdisc);

	spin_unlock_bh(&dev->queue_lock);

	dev_watchdog_down(dev);

	while (test_bit(__LINK_STATE_SCHED, &dev->state))
		yield();

	spin_unlock_wait(&dev->xmit_lock);
}
Ejemplo n.º 4
0
/* Drop into the prom, with the chance to continue with the 'go'
 * prom command.
 */
void
prom_cmdline(void)
{
    unsigned long flags;

    __save_and_cli(flags);

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (1);
#endif

    /* We always arrive here via a serial interrupt.
     * So in order for everything to work reliably, even
     * on SMP, we need to drop the IRQ locks we hold.
     */
#ifdef CONFIG_SMP
    irq_exit(smp_processor_id(), 0);
    smp_capture();
#else
    local_irq_count(smp_processor_id())--;
#endif

    p1275_cmd ("enter", P1275_INOUT(0,0));

#ifdef CONFIG_SMP
    smp_release();
    irq_enter(smp_processor_id(), 0);
    spin_unlock_wait(&__br_write_locks[BR_GLOBALIRQ_LOCK].lock);
#else
    local_irq_count(smp_processor_id())++;
#endif

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (0);
#endif

    __restore_flags(flags);
}
Ejemplo n.º 5
0
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->nvcsw = tsk->nivcsw = 0;

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

	if (clone_flags & CLONE_VM) {
		atomic_inc(&oldmm->mm_users);
		mm = oldmm;
		/*
		 * There are cases where the PTL is held to ensure no
		 * new threads start up in user mode using an mm, which
		 * allows optimizing out ipis; the tlb_gather_mmu code
		 * is an example.
		 */
		spin_unlock_wait(&oldmm->page_table_lock);
		goto good_mm;
	}

	retval = -ENOMEM;
	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	/* Copy the current MM stuff.. */
	memcpy(mm, oldmm, sizeof(*mm));
	if (!mm_init(mm))
		goto fail_nomem;

	if (init_new_context(tsk,mm))
		goto fail_nocontext;

	retval = dup_mmap(mm, oldmm);
	if (retval)
		goto free_pt;

	mm->hiwater_rss = mm->rss;
	mm->hiwater_vm = mm->total_vm;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

free_pt:
	mmput(mm);
fail_nomem:
	return retval;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return retval;
}
Ejemplo n.º 6
0
static __inline__ void net_family_read_lock(void)
{
	atomic_inc(&net_family_lockct);
	spin_unlock_wait(&net_family_lock);
}
Ejemplo n.º 7
0
Archivo: ccid.c Proyecto: 274914765/C
static inline void ccids_read_lock(void)
{
    atomic_inc(&ccids_lockct);
    smp_mb__after_atomic_inc();
    spin_unlock_wait(&ccids_lock);
}