Beispiel #1
0
static void
__xprt_lock_write_next(struct rpc_xprt *xprt)
{
	struct rpc_task *task;

	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate))
		return;
	if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	task = rpc_wake_up_next(&xprt->resend);
	if (!task) {
		task = rpc_wake_up_next(&xprt->sending);
		if (!task)
			goto out_unlock;
	}
	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
		struct rpc_rqst *req = task->tk_rqstp;
		xprt->snd_task = task;
		if (req) {
			req->rq_bytes_sent = 0;
			req->rq_ntrans++;
		}
		return;
	}
out_unlock:
	smp_mb__before_clear_bit();
	clear_bit(XPRT_LOCKED, &xprt->sockstate);
	smp_mb__after_clear_bit();
}
Beispiel #2
0
irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
				     struct pt_regs *regs)
{
	unsigned long cpu;

	cpu = get_cpu();

	if (!cpu_isset(cpu, flush_cpumask))
		goto out;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	smp_mb__before_clear_bit();
	cpu_clear(cpu, flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu_no_resched();

	return IRQ_HANDLED;
}
Beispiel #3
0
void smp_invalidate_interrupt(struct pt_regs *regs)
{
    unsigned long cpu;

    cpu = get_cpu();

    if (!cpu_isset(cpu, flush_cpumask))
        goto out;
    /*
     * This was a BUG() but until someone can quote me the
     * line from the intel manual that guarantees an IPI to
     * multiple CPUs is retried _only_ on the erroring CPUs
     * its staying as a return
     *
     * BUG();
     */

    if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
        if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
            if (flush_va == TLB_FLUSH_ALL)
                local_flush_tlb();
            else
                __flush_tlb_one(flush_va);
        } else
            leave_mm(cpu);
    }
    ack_APIC_irq();
    smp_mb__before_clear_bit();
    cpu_clear(cpu, flush_cpumask);
    smp_mb__after_clear_bit();
out:
    put_cpu_no_resched();
    inc_irq_stat(irq_tlb_count);
}
/* Performing the configuration space reads/writes must not be done in atomic
 * context because some of the pci_* functions can sleep (mostly due to ACPI
 * use of semaphores). This function is intended to be called from a work
 * queue in process context taking a struct pciback_device as a parameter */
void pciback_do_op(void *data)
{
	struct pciback_device *pdev = data;
	struct pci_dev *dev;
	struct xen_pci_op *op = &pdev->sh_info->op;

	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else if (op->cmd == XEN_PCI_OP_conf_read)
		op->err = pciback_config_read(dev, op->offset, op->size,
					      &op->value);
	else if (op->cmd == XEN_PCI_OP_conf_write)
		op->err = pciback_config_write(dev, op->offset, op->size,
					       op->value);
	else
		op->err = XEN_PCI_ERR_not_implemented;

	/* Tell the driver domain that we're done. */ 
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
	test_and_schedule_op(pdev);
}
Beispiel #5
0
/**
 * xs_close - close a socket
 * @xprt: transport
 *
 * This is used when all requests are complete; ie, no DRC state remains
 * on the server we want to save.
 */
static void xs_close(struct rpc_xprt *xprt)
{
    struct socket *sock = xprt->sock;
    struct sock *sk = xprt->inet;

    if (!sk)
        goto clear_close_wait;

    dprintk("RPC:      xs_close xprt %p\n", xprt);

    write_lock_bh(&sk->sk_callback_lock);
    xprt->inet = NULL;
    xprt->sock = NULL;

    sk->sk_user_data = NULL;
    sk->sk_data_ready = xprt->old_data_ready;
    sk->sk_state_change = xprt->old_state_change;
    sk->sk_write_space = xprt->old_write_space;
    write_unlock_bh(&sk->sk_callback_lock);

    sk->sk_no_check = 0;

    sock_release(sock);
clear_close_wait:
    smp_mb__before_clear_bit();
    clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
    smp_mb__after_clear_bit();
}
/**
 * smp_flush_tlb - Callback to invalidate the TLB.
 * @unused: Callback context (ignored).
 */
void smp_flush_tlb(void *unused)
{
	unsigned long cpu_id;

	cpu_id = get_cpu();

	if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
		/* This was a BUG() but until someone can quote me the line
		 * from the intel manual that guarantees an IPI to multiple
		 * CPUs is retried _only_ on the erroring CPUs its staying as a
		 * return
		 *
		 * BUG();
		 */
		goto out;

	if (flush_va == FLUSH_ALL)
		local_flush_tlb();
	else
		local_flush_tlb_page(flush_mm, flush_va);

	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu_id, &flush_cpumask);
	smp_mb__after_clear_bit();
out:
	put_cpu();
}
Beispiel #7
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
	if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
		unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;

	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
	}
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_clear_bit();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
}
Beispiel #8
0
/**
 * fscache_object_lookup_negative - Note negative cookie lookup
 * @object: Object pointing to cookie to mark
 *
 * Note negative lookup, permitting those waiting to read data from an already
 * existing backing object to continue as there's no data for them to read.
 */
void fscache_object_lookup_negative(struct fscache_object *object)
{
	struct fscache_cookie *cookie = object->cookie;

	_enter("{OBJ%x,%s}",
	       object->debug_id, fscache_object_states[object->state]);

	spin_lock(&object->lock);
	if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
		fscache_stat(&fscache_n_object_lookups_negative);

		/* transit here to allow write requests to begin stacking up
		 * and read requests to begin returning ENODATA */
		object->state = FSCACHE_OBJECT_CREATING;
		spin_unlock(&object->lock);

		set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags);
		set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);

		_debug("wake up lookup %p", &cookie->flags);
		smp_mb__before_clear_bit();
		clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
	} else {
		ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
		spin_unlock(&object->lock);
	}

	_leave("");
}
Beispiel #9
0
/*
 * Serialize write access to sockets, in order to prevent different
 * requests from interfering with each other.
 * Also prevents TCP socket connects from colliding with writes.
 */
static int
__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) {
		if (task == xprt->snd_task)
			return 1;
		if (task == NULL)
			return 0;
		goto out_sleep;
	}
	if (xprt->nocong || __xprt_get_cong(xprt, task)) {
		xprt->snd_task = task;
		if (req) {
			req->rq_bytes_sent = 0;
			req->rq_ntrans++;
		}
		return 1;
	}
	smp_mb__before_clear_bit();
	clear_bit(XPRT_LOCKED, &xprt->sockstate);
	smp_mb__after_clear_bit();
out_sleep:
	dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	if (req && req->rq_ntrans)
		rpc_sleep_on(&xprt->resend, task, NULL, NULL);
	else
		rpc_sleep_on(&xprt->sending, task, NULL, NULL);
	return 0;
}
Beispiel #10
0
/*
 * Unlock cookie management lock
 */
static inline void nfs_fscache_inode_unlock(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);

	smp_mb__before_clear_bit();
	clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
}
Beispiel #11
0
/* Performing the configuration space reads/writes must not be done in atomic
 * context because some of the pci_* functions can sleep (mostly due to ACPI
 * use of semaphores). This function is intended to be called from a work
 * queue in process context taking a struct pciback_device as a parameter */
void pciback_do_op(struct work_struct *work)
{
	struct pciback_device *pdev = container_of(work, struct pciback_device, op_work);
	struct pci_dev *dev;
	struct xen_pci_op *op = &pdev->sh_info->op;

	dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else
	{
		switch (op->cmd)
		{
			case XEN_PCI_OP_conf_read:
				op->err = pciback_config_read(dev,
					  op->offset, op->size, &op->value);
				break;
			case XEN_PCI_OP_conf_write:
				op->err = pciback_config_write(dev,
					  op->offset, op->size,	op->value);
				break;
#ifdef CONFIG_PCI_MSI
			case XEN_PCI_OP_enable_msi:
				op->err = pciback_enable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msi:
				op->err = pciback_disable_msi(pdev, dev, op);
				break;
			case XEN_PCI_OP_enable_msix:
				op->err = pciback_enable_msix(pdev, dev, op);
				break;
			case XEN_PCI_OP_disable_msix:
				op->err = pciback_disable_msix(pdev, dev, op);
				break;
#endif
			default:
				op->err = XEN_PCI_ERR_not_implemented;
				break;
		}
	}
	/* Tell the driver domain that we're done. */ 
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. 
	*/
	test_and_schedule_op(pdev);
}
Beispiel #12
0
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
		schedule_work(&xprt->task_cleanup);
}
Beispiel #13
0
static void tasklet_action(struct softirq_action *a) {
	/* ... */
	if (!test_and_set_bit(TASKLET_STATE_RUN, &t->state)) {
		clear_bit(TASKLET_STATE_SCHED, &t->state);
		t->func(t->data);
		smp_mb__before_clear_bit();
		clear_bit(TASKLET_STATE_RUN, &t->state);
	}
	/* ... */
}
Beispiel #14
0
/*
 * Releases the socket for use by other requests.
 */
static void
__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
		xprt->snd_task = NULL;
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->sockstate);
		smp_mb__after_clear_bit();
		__xprt_lock_write_next(xprt);
	}
}
Beispiel #15
0
/**
 * nfs_unlock_request - Unlock request and wake up sleepers.
 * @req:
 */
void nfs_unlock_request(struct nfs_page *req)
{
	if (!NFS_WBACK_BUSY(req)) {
		printk(KERN_ERR "NFS: Invalid unlock attempted\n");
		BUG();
	}
	smp_mb__before_clear_bit();
	clear_bit(PG_BUSY, &req->wb_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&req->wb_flags, PG_BUSY);
}
Beispiel #16
0
void dwc3_put_device_id(int id)
{
	int			ret;

	if (id < 0)
		return;

	ret = test_bit(id, dwc3_devs);
	WARN(!ret, "dwc3: ID %d not in use\n", id);
	smp_mb__before_clear_bit();
	clear_bit(id, dwc3_devs);
}
Beispiel #17
0
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
{
	unsigned int cpu;
	unsigned int sender;
	union smp_flush_state *f;

	cpu = smp_processor_id();

#ifdef CONFIG_X86_32
	if (current->active_mm)
		load_user_cs_desc(cpu, current->active_mm);
#endif

	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
	f = &flush_state[sender];

	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
	smp_mb__before_clear_bit();
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
	smp_mb__after_clear_bit();
	inc_irq_stat(irq_tlb_count);
}
Beispiel #18
0
static void hsictty_write_callback(struct urb *urb)
{
	struct usb_serial_port *port;
	struct hsictty_port_private *portdata;
	struct hsictty_intf_private *intfdata;
	int i;
	port = urb->context;
	intfdata = usb_get_serial_data(port->serial);
	portdata = usb_get_serial_port_data(port);

	if (urb->actual_length <= 0) {
		hsictty_error
		    ("%s: write failed, write length: %d in channel:%d, endpoint:%d\n",
		     __func__, urb->actual_length, portdata->channel,
		     usb_pipeendpoint(urb->pipe));
	} else {
		hsictty_dbg("%s: write length: %d in channel:%d, endpoint:%d\n",
			    __func__, urb->actual_length, portdata->channel,
			    usb_pipeendpoint(urb->pipe));
	}
#ifdef BACKUP_DATA_DUMP
	if (!dumped)
		backup_log(portdata->channel, 1,
				urb->transfer_buffer, urb->transfer_buffer_length);
#endif

	usb_serial_port_softint(port);

	usb_autopm_put_interface_async(port->serial->interface);
	portdata = usb_get_serial_port_data(port);
	spin_lock(&intfdata->susp_lock);
	intfdata->in_flight--;
	spin_unlock(&intfdata->susp_lock);

	for (i = 0; i < N_OUT_URB; ++i) {
		if (portdata->out_urbs[i] == urb) {
			smp_mb__before_clear_bit();
			hsictty_dbg
			    ("%s: urb(%d) freed on channel:%d, endpoint:%d, in_flight:%d, pm use cnt:%d\n",
			     __func__, i, portdata->channel,
			     usb_pipeendpoint(urb->pipe), intfdata->in_flight,
			     atomic_read(&port->serial->interface->dev.power.
					 usage_count));
			clear_bit(i, &portdata->out_busy);
			complete_all(&portdata->tx_notifier);
			break;
		}
	}
}
Beispiel #19
0
static irqreturn_t xenoprof_ovf_interrupt(int irq, void *dev_id)
{
	struct xenoprof_buf * buf;
	static unsigned long flag;

	buf = xenoprof_buf[smp_processor_id()];

	xenoprof_add_pc(buf, 0);

	if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
		xenoprof_handle_passive();
		smp_mb__before_clear_bit();
		clear_bit(0, &flag);
	}

	return IRQ_HANDLED;
}
Beispiel #20
0
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
{
	int queued;
	int err = ctx->err;

	if (!ctx->queue.qlen) {
		smp_mb__before_clear_bit();
		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);

		if (!ctx->queue.qlen ||
		    test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
			goto out;
	}

	queued = schedule_work(&ctx->postponed);
	BUG_ON(!queued);

out:
	return err;
}
Beispiel #21
0
static void option_outdat_callback(struct urb *urb)
{
	struct usb_serial_port *port;
	struct option_port_private *portdata;
	int i;

	dbg("%s", __func__);

	port =  urb->context;

	usb_serial_port_softint(port);

	portdata = usb_get_serial_port_data(port);
	for (i = 0; i < N_OUT_URB; ++i) {
		if (portdata->out_urbs[i] == urb) {
			smp_mb__before_clear_bit();
			clear_bit(i, &portdata->out_busy);
			break;
		}
	}
}
static void linkwatch_do_dev(struct net_device *dev)
{
	/*
	 * Make sure the above read is complete since it can be
	 * rewritten as soon as we clear the bit below.
	 */
	smp_mb__before_clear_bit();

	clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);

	rfc2863_policy(dev);
	if (dev->flags & IFF_UP) {
		if (netif_carrier_ok(dev))
			dev_activate(dev);
		else
			dev_deactivate(dev);

		netdev_state_change(dev);
	}
	dev_put(dev);
}
Beispiel #23
0
/**
 * fscache_obtained_object - Note successful object lookup or creation
 * @object: Object pointing to cookie to mark
 *
 * Note successful lookup and/or creation, permitting those waiting to write
 * data to a backing object to continue.
 *
 * Note that after calling this, an object's cookie may be relinquished by the
 * netfs, and so must be accessed with object lock held.
 */
void fscache_obtained_object(struct fscache_object *object)
{
	struct fscache_cookie *cookie = object->cookie;

	_enter("{OBJ%x,%s}",
	       object->debug_id, fscache_object_states[object->state]);

	/* if we were still looking up, then we must have a positive lookup
	 * result, in which case there may be data available */
	spin_lock(&object->lock);
	if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
		fscache_stat(&fscache_n_object_lookups_positive);

		clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);

		object->state = FSCACHE_OBJECT_AVAILABLE;
		spin_unlock(&object->lock);

		smp_mb__before_clear_bit();
		clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
		smp_mb__after_clear_bit();
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
	} else {
		ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
		fscache_stat(&fscache_n_object_created);

		object->state = FSCACHE_OBJECT_AVAILABLE;
		spin_unlock(&object->lock);
		set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
		smp_wmb();
	}

	if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags))
		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING);

	_leave("");
}
static void linkwatch_do_dev(struct net_device *dev)
{
	/*
                                                        
                                                
  */
	smp_mb__before_clear_bit();

	/*                                    
                                 
  */
	clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);

	rfc2863_policy(dev);
	if (dev->flags & IFF_UP) {
		if (netif_carrier_ok(dev))
			dev_activate(dev);
		else
			dev_deactivate(dev);

		netdev_state_change(dev);
	}
	dev_put(dev);
}
Beispiel #25
0
static void __linkwatch_run_queue(int urgent_only)
{
	struct net_device *next;

	/*
	 * Limit the number of linkwatch events to one
	 * per second so that a runaway driver does not
	 * cause a storm of messages on the netlink
	 * socket.  This limit does not apply to up events
	 * while the device qdisc is down.
	 */
	if (!urgent_only)
		linkwatch_nextevent = jiffies + HZ;
	/* Limit wrap-around effect on delay. */
	else if (time_after(linkwatch_nextevent, jiffies + HZ))
		linkwatch_nextevent = jiffies;

	clear_bit(LW_URGENT, &linkwatch_flags);

	spin_lock_irq(&lweventlist_lock);
	next = lweventlist;
	lweventlist = NULL;
	spin_unlock_irq(&lweventlist_lock);

	while (next) {
		struct net_device *dev = next;

		next = dev->link_watch_next;

		if (urgent_only && !linkwatch_urgent_event(dev)) {
			linkwatch_add_event(dev);
			continue;
		}

		/*
		 * Make sure the above read is complete since it can be
		 * rewritten as soon as we clear the bit below.
		 */
		smp_mb__before_clear_bit();

		/* We are about to handle this device,
		 * so new events can be accepted
		 */
		clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);

		rfc2863_policy(dev);
		if (dev->flags & IFF_UP)
		{
			if (netif_carrier_ok(dev)) 
			{
				WARN_ON(dev->qdisc_sleeping == &noop_qdisc);
				dev_activate(dev);
			} else
				dev_deactivate(dev);

			netdev_state_change(dev);
		}

		dev_put(dev);
	}

	if (lweventlist)
		linkwatch_schedule_work(0);
}
Beispiel #26
0
/* the core send_sem serializes this with other xmit and shutdown */
static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
{
	struct kvec vec = {
                .iov_base = data,
                .iov_len = len,
	};
        struct msghdr msg = {
                .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
        };

	return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
}

/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
	         unsigned int hdr_off, unsigned int sg, unsigned int off)
{
	struct rds_tcp_connection *tc = conn->c_transport_data;
	int done = 0;
	int ret = 0;

	if (hdr_off == 0) {
		/*
		 * m_ack_seq is set to the sequence number of the last byte of
		 * header and data.  see rds_tcp_is_acked().
		 */
		tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
		rm->m_ack_seq = tc->t_last_sent_nxt +
				sizeof(struct rds_header) +
				be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
		smp_mb__before_clear_bit();
		set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
		tc->t_last_expected_una = rm->m_ack_seq + 1;

		rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
			 rm, rds_tcp_snd_nxt(tc),
			 (unsigned long long)rm->m_ack_seq);
	}

	if (hdr_off < sizeof(struct rds_header)) {
		/* see rds_tcp_write_space() */
		set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);

		ret = rds_tcp_sendmsg(tc->t_sock,
				      (void *)&rm->m_inc.i_hdr + hdr_off,
				      sizeof(rm->m_inc.i_hdr) - hdr_off);
		if (ret < 0)
			goto out;
		done += ret;
		if (hdr_off + done != sizeof(struct rds_header))
			goto out;
	}

	while (sg < rm->data.op_nents) {
		ret = tc->t_sock->ops->sendpage(tc->t_sock,
						sg_page(&rm->data.op_sg[sg]),
						rm->data.op_sg[sg].offset + off,
						rm->data.op_sg[sg].length - off,
						MSG_DONTWAIT|MSG_NOSIGNAL);
		rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
			 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
			 ret);
		if (ret <= 0)
			break;

		off += ret;
		done += ret;
		if (off == rm->data.op_sg[sg].length) {
			off = 0;
			sg++;
		}
	}

out:
	if (ret <= 0) {
		/* write_space will hit after EAGAIN, all else fatal */
		if (ret == -EAGAIN) {
			rds_tcp_stats_inc(s_tcp_sndbuf_full);
			ret = 0;
		} else {
			printk(KERN_WARNING "RDS/tcp: send to %pI4 "
			       "returned %d, disconnecting and reconnecting\n",
			       &conn->c_faddr, ret);
			rds_conn_drop(conn);
		}
	}
	if (done == 0)
		done = ret;
	return done;
}

/*
 * rm->m_ack_seq is set to the tcp sequence number that corresponds to the
 * last byte of the message, including the header.  This means that the
 * entire message has been received if rm->m_ack_seq is "before" the next
 * unacked byte of the TCP sequence space.  We have to do very careful
 * wrapping 32bit comparisons here.
 */
static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
{
	if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags))
		return 0;
	return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0;
}

void rds_tcp_write_space(struct sock *sk)
{
	void (*write_space)(struct sock *sk);
	struct rds_connection *conn;
	struct rds_tcp_connection *tc;

	read_lock_bh(&sk->sk_callback_lock);
	conn = sk->sk_user_data;
	if (!conn) {
		write_space = sk->sk_write_space;
		goto out;
	}

	tc = conn->c_transport_data;
	rdsdebug("write_space for tc %p\n", tc);
	write_space = tc->t_orig_write_space;
	rds_tcp_stats_inc(s_tcp_write_space_calls);

	rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
	tc->t_last_seen_una = rds_tcp_snd_una(tc);
	rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);

        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
		queue_delayed_work(rds_wq, &conn->c_send_w, 0);

out:
	read_unlock_bh(&sk->sk_callback_lock);

	/*
	 * write_space is only called when data leaves tcp's send queue if
	 * SOCK_NOSPACE is set.  We set SOCK_NOSPACE every time we put
	 * data in tcp's send queue because we use write_space to parse the
	 * sequence numbers and notice that rds messages have been fully
	 * received.
	 *
	 * tcp's write_space clears SOCK_NOSPACE if the send queue has more
	 * than a certain amount of space. So we need to set it again *after*
	 * we call tcp's write_space or else we might only get called on the
	 * first of a series of incoming tcp acks.
	 */
	write_space(sk);

	if (sk->sk_socket)
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
Beispiel #27
0
void xen_pcibk_do_op(struct work_struct *data)
{
	struct xen_pcibk_device *pdev =
		container_of(data, struct xen_pcibk_device, op_work);
	struct pci_dev *dev;
	struct xen_pcibk_dev_data *dev_data = NULL;
	struct xen_pci_op *op = &pdev->op;
	int test_intx = 0;
#ifdef CONFIG_PCI_MSI
	unsigned int nr = 0;
#endif

	*op = pdev->sh_info->op;
	barrier();
	dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);

	if (dev == NULL)
		op->err = XEN_PCI_ERR_dev_not_found;
	else {
		dev_data = pci_get_drvdata(dev);
		if (dev_data)
			test_intx = dev_data->enable_intx;
		switch (op->cmd) {
		case XEN_PCI_OP_conf_read:
			op->err = xen_pcibk_config_read(dev,
				  op->offset, op->size, &op->value);
			break;
		case XEN_PCI_OP_conf_write:
			op->err = xen_pcibk_config_write(dev,
				  op->offset, op->size,	op->value);
			break;
#ifdef CONFIG_PCI_MSI
		case XEN_PCI_OP_enable_msi:
			op->err = xen_pcibk_enable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msi:
			op->err = xen_pcibk_disable_msi(pdev, dev, op);
			break;
		case XEN_PCI_OP_enable_msix:
			nr = op->value;
			op->err = xen_pcibk_enable_msix(pdev, dev, op);
			break;
		case XEN_PCI_OP_disable_msix:
			op->err = xen_pcibk_disable_msix(pdev, dev, op);
			break;
#endif
		default:
			op->err = XEN_PCI_ERR_not_implemented;
			break;
		}
	}
	if (!op->err && dev && dev_data) {
		/* Transition detected */
		if ((dev_data->enable_intx != test_intx))
			xen_pcibk_control_isr(dev, 0 /* no reset */);
	}
	pdev->sh_info->op.err = op->err;
	pdev->sh_info->op.value = op->value;
#ifdef CONFIG_PCI_MSI
	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
		unsigned int i;

		for (i = 0; i < nr; i++)
			pdev->sh_info->op.msix_entries[i].vector =
				op->msix_entries[i].vector;
	}
#endif
	/* Tell the driver domain that we're done. */
	wmb();
	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
	notify_remote_via_irq(pdev->evtchn_irq);

	/* Mark that we're done. */
	smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
	clear_bit(_PDEVF_op_active, &pdev->flags);
	smp_mb__after_clear_bit(); /* /before/ final check for work */

	/* Check to see if the driver domain tried to start another request in
	 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
	*/
	xen_pcibk_test_and_schedule_op(pdev);
}
Beispiel #28
0
/**
 * __blk_iopoll_complete - Mark this @iop as un-polled again
 * @iop:      The parent iopoll structure
 *
 * Description:
 *     See blk_iopoll_complete(). This function must be called with interrupts
 *     disabled.
 **/
void __blk_iopoll_complete(struct blk_iopoll *iop)
{
	list_del(&iop->list);
	smp_mb__before_clear_bit();
	clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
Beispiel #29
0
/**
 * blk_iopoll_enable - Enable iopoll on this @iop
 * @iop:      The parent iopoll structure
 *
 * Description:
 *     Enable iopoll on this @iop. Note that the handler run will not be
 *     scheduled, it will only mark it as active.
 **/
void blk_iopoll_enable(struct blk_iopoll *iop)
{
	BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
	smp_mb__before_clear_bit();
	clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
Beispiel #30
0
void free_irqno(unsigned int irq)
{
	smp_mb__before_clear_bit();
	clear_bit(irq, irq_map);
	smp_mb__after_clear_bit();
}