Ejemplo n.º 1
0
static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
			      int our_slot, uint32_t generation)
{
	struct gfs2_sbd *sdp = arg;
	struct lm_lockstruct *ls = &sdp->sd_lockstruct;

	/* ensure the ls jid arrays are large enough */
	set_recover_size(sdp, slots, num_slots);

	spin_lock(&ls->ls_recover_spin);
	ls->ls_recover_start = generation;

	if (!ls->ls_recover_mount) {
		ls->ls_recover_mount = generation;
		ls->ls_jid = our_slot - 1;
	}

	if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);

	clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
	spin_unlock(&ls->ls_recover_spin);
}
Ejemplo n.º 2
0
fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpu_clear(cpu, flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu_no_resched();
}
Ejemplo n.º 3
0
void arch_trigger_all_cpu_backtrace(void)
{
	int i;

	if (test_and_set_bit(0, &backtrace_flag))
		/*
		 * If there is already a trigger_all_cpu_backtrace() in progress
		 * (backtrace_flag == 1), don't output double cpu dump infos.
		 */
		return;

	cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);

	printk(KERN_INFO "sending NMI to all CPUs:\n");
	apic->send_IPI_all(NMI_VECTOR);

	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
	for (i = 0; i < 10 * 1000; i++) {
		if (cpumask_empty(to_cpumask(backtrace_mask)))
			break;
		mdelay(1);
	}

	clear_bit(0, &backtrace_flag);
	smp_mb__after_clear_bit();
}
Ejemplo n.º 4
0
static void
__xprt_lock_write_next(struct rpc_xprt *xprt)
{
	struct rpc_task *task;

	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
		return;
	if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	task = rpc_wake_up_next(&xprt->resend);
	if (!task) {
		task = rpc_wake_up_next(&xprt->sending);
		if (!task)
			goto out_unlock;
	}
	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
		struct rpc_rqst *req = task->tk_rqstp;
		xprt->snd_task = task;
		if (req) {
			req->rq_bytes_sent = 0;
			req->rq_ntrans++;
		}
		return;
	}
out_unlock:
	smp_mb__before_clear_bit();
	clear_bit(XPRT_LOCKED, &xprt->sockstate);
	smp_mb__after_clear_bit();
}
Ejemplo n.º 5
0
void arch_trigger_all_cpu_backtrace(void)
{
	int i;

	if (test_and_set_bit(0, &backtrace_flag))
		/*
                                                                  
                                                               
   */
		return;

	cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);

	printk(KERN_INFO "sending NMI to all CPUs:\n");
	apic->send_IPI_all(NMI_VECTOR);

	/*                                                            */
	for (i = 0; i < 10 * 1000; i++) {
		if (cpumask_empty(to_cpumask(backtrace_mask)))
			break;
		mdelay(1);
	}

	clear_bit(0, &backtrace_flag);
	smp_mb__after_clear_bit();
}
Ejemplo n.º 6
0
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;
	unsigned long *bitlock = &NFS_I(inode)->flags;
	struct nfs_pageio_descriptor pgio;
	int err;

	/* Stop dirtying of new pages while we sync */
	err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
			nfs_wait_bit_killable, TASK_KILLABLE);
	if (err)
		goto out_err;

	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);

	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
	nfs_pageio_complete(&pgio);

	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
	smp_mb__after_clear_bit();
	wake_up_bit(bitlock, NFS_INO_FLUSHING);

	if (err < 0)
		goto out_err;
	err = pgio.pg_error;
	if (err < 0)
		goto out_err;
	return 0;
out_err:
	return err;
}
Ejemplo n.º 7
0
/**
 * fscache_object_lookup_negative - Note negative cookie lookup
 * @object: Object pointing to cookie to mark
 *
 * Note negative lookup, permitting those waiting to read data from an already
 * existing backing object to continue as there's no data for them to read.
 */
void fscache_object_lookup_negative(struct fscache_object *object)
{
	struct fscache_cookie *cookie = object->cookie;

	_enter("{OBJ%x,%s}",
	       object->debug_id, fscache_object_states[object->state]);

	spin_lock(&object->lock);
	if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
		fscache_stat(&fscache_n_object_lookups_negative);

		/* transit here to allow write requests to begin stacking up
		 * and read requests to begin returning ENODATA */
		object->state = FSCACHE_OBJECT_CREATING;
		spin_unlock(&object->lock);

		set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags);
		set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);

		_debug("wake up lookup %p", &cookie->flags);
		smp_mb__before_clear_bit();
		clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
	} else {
		ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
		spin_unlock(&object->lock);
	}

	_leave("");
}
Ejemplo n.º 8
0
/*
 * Serialize write access to sockets, in order to prevent different
 * requests from interfering with each other.
 * Also prevents TCP socket connects from colliding with writes.
 */
static int
__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
		if (task == xprt->snd_task)
			return 1;
		if (task == NULL)
			return 0;
		goto out_sleep;
	}
	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
		xprt->snd_task = task;
		if (req) {
			req->rq_bytes_sent = 0;
			req->rq_ntrans++;
		}
		return 1;
	}
	smp_mb__before_clear_bit();
	clear_bit(XPRT_LOCKED, &xprt->sockstate);
	smp_mb__after_clear_bit();
out_sleep:
	dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	if (req && req->rq_ntrans)
		rpc_sleep_on(&xprt->resend, task, NULL, NULL);
	else
		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
	return 0;
}
Ejemplo n.º 9
0
static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
	struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv;
	struct dvb_usb_device *d = adap_to_d(adap);
	int ret = 0;
	dev_dbg(&d->udev->dev,
			"%s: adap=%d active_fe=%d feed_type=%d setting pid [%s]: %04x (%04d) at index %d\n",
			__func__, adap->id, adap->active_fe, dvbdmxfeed->type,
			adap->pid_filtering ? "yes" : "no", dvbdmxfeed->pid,
			dvbdmxfeed->pid, dvbdmxfeed->index);

	if (adap->active_fe == -1)
		return -EINVAL;

	/* remove PID from device HW PID filter */
	if (adap->pid_filtering && adap->props->pid_filter) {
		ret = adap->props->pid_filter(adap, dvbdmxfeed->index,
				dvbdmxfeed->pid, 0);
		if (ret)
			dev_err(&d->udev->dev, "%s: pid_filter() failed=%d\n",
					KBUILD_MODNAME, ret);
	}

	/* we cannot stop streaming until last PID is removed */
	if (--adap->feed_count > 0)
		goto skip_feed_stop;

	/* ask device to stop streaming */
	if (d->props->streaming_ctrl) {
		ret = d->props->streaming_ctrl(adap->fe[adap->active_fe], 0);
		if (ret)
			dev_err(&d->udev->dev,
					"%s: streaming_ctrl() failed=%d\n",
					KBUILD_MODNAME, ret);
	}

	/* disable HW PID filter */
	if (adap->pid_filtering && adap->props->pid_filter_ctrl) {
		ret = adap->props->pid_filter_ctrl(adap, 0);
		if (ret)
			dev_err(&d->udev->dev,
					"%s: pid_filter_ctrl() failed=%d\n",
					KBUILD_MODNAME, ret);
	}

	/* kill USB streaming packets */
	usb_urb_killv2(&adap->stream);

	/* clear 'streaming' status bit */
	clear_bit(ADAP_STREAMING, &adap->state_bits);
	smp_mb__after_clear_bit();
	wake_up_bit(&adap->state_bits, ADAP_STREAMING);
skip_feed_stop:

	if (ret)
		dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
	return ret;
}
Ejemplo n.º 10
0
/*
 * clearing the blocking flag will take the spinlock again.
 * After this you can't safely schedule
 */
void btrfs_clear_lock_blocking(struct extent_buffer *eb)
{
	if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
		spin_nested(eb);
		clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
		smp_mb__after_clear_bit();
	}
	/* exit with the spin lock held */
}
Ejemplo n.º 11
0
static int rtlx_release(struct inode *inode, struct file *filp)
{
	int minor = MINOR(inode->i_rdev);

	clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state);
	smp_mb__after_clear_bit();

	return 0;
}
Ejemplo n.º 12
0
/*
 * Unlock cookie management lock
 */
static inline void nfs_fscache_inode_unlock(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);

	smp_mb__before_clear_bit();
	clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
}
Ejemplo n.º 13
0
static void
nfs_iocounter_dec(struct nfs_io_counter *c)
{
	if (atomic_dec_and_test(&c->io_count)) {
		clear_bit(NFS_IO_INPROGRESS, &c->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
	}
}
Ejemplo n.º 14
0
/* Performing the configuration space reads/writes must not be done in atomic
 * context because some of the pci_* functions can sleep (mostly due to ACPI
 * use of semaphores). This function is intended to be called from a work
 * queue in process context taking a struct pciback_device as a parameter */
void pciback_do_op(struct work_struct *work)
{
	struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
	struct pci_dev *dev;
	struct xen_pci_op *op = &pdev->sh_info->op;

	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else
	{
		switch (op->cmd)
		{
			case XEN_PCI_OP_conf_read:
				op->err = pciback_config_read(dev,
					  op->offset, op->size, &op->value);
				break;
			case XEN_PCI_OP_conf_write:
				op->err = pciback_config_write(dev,
					  op->offset, op->size,	op->value);
				break;
#ifdef CONFIG_PCI_MSI
			case XEN_PCI_OP_enable_msi:
				op->err = pciback_enable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msi:
				op->err = pciback_disable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_enable_msix:
				op->err = pciback_enable_msix(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msix:
				op->err = pciback_disable_msix(pdev, dev, op);
				break;
#endif
			default:
				op->err = XEN_PCI_ERR_not_implemented;
				break;
		}
	}
	/* Tell the driver domain that we're done. */ 
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. 
	*/
	test_and_schedule_op(pdev);
}
Ejemplo n.º 15
0
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
		schedule_work(&xprt->task_cleanup);
}
Ejemplo n.º 16
0
/*
 * Releases the socket for use by other requests.
 */
static void
__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
		xprt->snd_task = NULL;
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->sockstate);
		smp_mb__after_clear_bit();
		__xprt_lock_write_next(xprt);
	}
}
Ejemplo n.º 17
0
void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
{
	enum bdi_state bit;
	wait_queue_head_t *wqh = &congestion_wqh[rw];

	bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
	clear_bit(bit, &bdi->state);
	smp_mb__after_clear_bit();
	if (waitqueue_active(wqh))
		wake_up(wqh);
}
Ejemplo n.º 18
0
/**
 * nfs_unlock_request - Unlock request and wake up sleepers.
 * @req:
 */
void nfs_unlock_request(struct nfs_page *req)
{
	if (!NFS_WBACK_BUSY(req)) {
		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
		BUG();
	}
	smp_mb__before_clear_bit();
	clear_bit(PG_BUSY, &req->wb_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&req->wb_flags, PG_BUSY);
}
Ejemplo n.º 19
0
static void xen_idle(void)
{
	local_irq_disable();

	if (need_resched())
		local_irq_enable();
	else {
		current_thread_info()->status &= ~TS_POLLING;
		smp_mb__after_clear_bit();
		safe_halt();
		current_thread_info()->status |= TS_POLLING;
	}
}
Ejemplo n.º 20
0
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned int cpu;
	unsigned int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();

#ifdef CONFIG_X86_32
	if (current->active_mm)
		load_user_cs_desc(cpu, current->active_mm);
#endif

	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
	f = &flush_state[sender];

	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
	smp_mb__after_clear_bit();
	inc_irq_stat(irq_tlb_count);
}
Ejemplo n.º 21
0
/**
 * lc_put - give up refcnt of @e
 * @lc: the lru cache to operate on
 * @e: the element to put
 *
 * If refcnt reaches zero, the element is moved to the lru list,
 * and a %LC_STARVING (if set) is cleared.
 * Returns the new (post-decrement) refcnt.
 */
unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
{
	PARANOIA_ENTRY();
	PARANOIA_LC_ELEMENT(lc, e);
	BUG_ON(e->refcnt == 0);
	BUG_ON(e == lc->changing_element);
	if (--e->refcnt == 0) {
		/* move it to the front of LRU. */
		list_move(&e->list, &lc->lru);
		lc->used--;
		clear_bit(__LC_STARVING, &lc->flags);
		smp_mb__after_clear_bit();
	}
	RETURN(e->refcnt);
}
Ejemplo n.º 22
0
/**
 * lc_changed - tell @lc that the change has been recorded
 * @lc: the lru cache to operate on
 * @e: the element pending label change
 */
void lc_changed(struct lru_cache *lc, struct lc_element *e)
{
	PARANOIA_ENTRY();
	BUG_ON(e != lc->changing_element);
	PARANOIA_LC_ELEMENT(lc, e);
	++lc->changed;
	e->lc_number = lc->new_number;
	list_add(&e->list, &lc->in_use);
	hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
	lc->changing_element = NULL;
	lc->new_number = LC_FREE;
	clear_bit(__LC_DIRTY, &lc->flags);
	smp_mb__after_clear_bit();
	RETURN();
}
Ejemplo n.º 23
0
int btrfs_tree_unlock(struct extent_buffer *eb)
{
	/*
	 * if we were a blocking owner, we don't have the spinlock held
	 * just clear the bit and look for waiters
	 */
	if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
		smp_mb__after_clear_bit();
	else
		spin_unlock(&eb->lock);

	if (waitqueue_active(&eb->lock_wq))
		wake_up(&eb->lock_wq);
	return 0;
}
Ejemplo n.º 24
0
void default_idle(void)
{
	if (hlt_works()) {
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb__after_clear_bit();

		set_bl_bit();
		if (!need_resched()) {
			local_irq_enable();
			cpu_sleep();
		} else
			local_irq_enable();

		set_thread_flag(TIF_POLLING_NRFLAG);
		clear_bl_bit();
	} else
		poll_idle();
}
Ejemplo n.º 25
0
static void default_idle(void)
{
	if (!hlt_counter) {
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb__after_clear_bit();
		set_bl_bit();
		stop_critical_timings();

		while (!need_resched())
			cpu_sleep();

		start_critical_timings();
		clear_bl_bit();
		set_thread_flag(TIF_POLLING_NRFLAG);
	} else
		while (!need_resched())
			cpu_relax();
}
Ejemplo n.º 26
0
Archivo: process.c Proyecto: 7L/pi_plus
void default_idle(void)
{
	if (likely(hlt_counter)) {
		local_irq_disable();
		stop_critical_timings();
		cpu_relax();
		start_critical_timings();
		local_irq_enable();
	} else {
		clear_thread_flag(TIF_POLLING_NRFLAG);
		smp_mb__after_clear_bit();
		local_irq_disable();
		while (!need_resched())
			cpu_sleep();
		local_irq_enable();
		set_thread_flag(TIF_POLLING_NRFLAG);
	}
}
Ejemplo n.º 27
0
void ipipe_critical_exit(unsigned long flags)
{
	if (num_online_cpus() == 1) {
		hard_local_irq_restore(flags);
		return;
	}

#ifdef CONFIG_SMP
	if (atomic_dec_and_test(&__ipipe_critical_count)) {
		spin_unlock(&__ipipe_cpu_barrier);
		while (!cpus_empty(__ipipe_cpu_sync_map))
			cpu_relax();
		cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map);
		clear_bit(0, &__ipipe_critical_lock);
		smp_mb__after_clear_bit();
	}
#endif /* CONFIG_SMP */

	hard_local_irq_restore(flags);
}
Ejemplo n.º 28
0
static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
{
	int ret;
	struct dvb_usb_adapter *adap = fe->dvb->priv;
	struct dvb_usb_device *d = adap_to_d(adap);
	dev_dbg(&d->udev->dev, "%s: adap=%d fe=%d\n", __func__, adap->id,
			fe->id);

	if (!adap->suspend_resume_active) {
		set_bit(ADAP_SLEEP, &adap->state_bits);
		wait_on_bit(&adap->state_bits, ADAP_STREAMING, wait_schedule,
				TASK_UNINTERRUPTIBLE);
	}

	if (adap->fe_sleep[fe->id]) {
		ret = adap->fe_sleep[fe->id](fe);
		if (ret < 0)
			goto err;
	}

	if (d->props->frontend_ctrl) {
		ret = d->props->frontend_ctrl(fe, 0);
		if (ret < 0)
			goto err;
	}

	ret = dvb_usbv2_device_power_ctrl(d, 0);
	if (ret < 0)
		goto err;
err:
	if (!adap->suspend_resume_active) {
		adap->active_fe = -1;
		clear_bit(ADAP_SLEEP, &adap->state_bits);
		smp_mb__after_clear_bit();
		wake_up_bit(&adap->state_bits, ADAP_SLEEP);
	}

	dev_dbg(&d->udev->dev, "%s: ret=%d\n", __func__, ret);
	return ret;
}
Ejemplo n.º 29
0
/**
 * fscache_obtained_object - Note successful object lookup or creation
 * @object: Object pointing to cookie to mark
 *
 * Note successful lookup and/or creation, permitting those waiting to write
 * data to a backing object to continue.
 *
 * Note that after calling this, an object's cookie may be relinquished by the
 * netfs, and so must be accessed with object lock held.
 */
void fscache_obtained_object(struct fscache_object *object)
{
	struct fscache_cookie *cookie = object->cookie;

	_enter("{OBJ%x,%s}",
	       object->debug_id, fscache_object_states[object->state]);

	/* if we were still looking up, then we must have a positive lookup
	 * result, in which case there may be data available */
	spin_lock(&object->lock);
	if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
		fscache_stat(&fscache_n_object_lookups_positive);

		clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);

		object->state = FSCACHE_OBJECT_AVAILABLE;
		spin_unlock(&object->lock);

		smp_mb__before_clear_bit();
		clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
	} else {
		ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
		fscache_stat(&fscache_n_object_created);

		object->state = FSCACHE_OBJECT_AVAILABLE;
		spin_unlock(&object->lock);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
		smp_wmb();
	}

	if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags))
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING);

	_leave("");
}
Ejemplo n.º 30
0
static int dvb_usb_fe_init(struct dvb_frontend *fe)
{
	int ret;
	struct dvb_usb_adapter *adap = fe->dvb->priv;
	struct dvb_usb_device *d = adap_to_d(adap);
	dev_dbg(&d->udev->dev, "%s: adap=%d fe=%d\n", __func__, adap->id,
			fe->id);

	if (!adap->suspend_resume_active) {
		adap->active_fe = fe->id;
		set_bit(ADAP_INIT, &adap->state_bits);
	}

	ret = dvb_usbv2_device_power_ctrl(d, 1);
	if (ret < 0)
		goto err;

	if (d->props->frontend_ctrl) {
		ret = d->props->frontend_ctrl(fe, 1);
		if (ret < 0)
			goto err;
	}

	if (adap->fe_init[fe->id]) {
		ret = adap->fe_init[fe->id](fe);
		if (ret < 0)
			goto err;
	}
err:
	if (!adap->suspend_resume_active) {
		clear_bit(ADAP_INIT, &adap->state_bits);
		smp_mb__after_clear_bit();
		wake_up_bit(&adap->state_bits, ADAP_INIT);
	}

	dev_dbg(&d->udev->dev, "%s: ret=%d\n", __func__, ret);
	return ret;
}