Пример #1
0
/* release_channel() must be called with the edma_mutex held */
static void release_channel(int chan)
{
    int localChan;
    int i;

    /*
     * The non-LSP_210 EDMA interface returns a "magic" value that represents
     * the controller number and channel number muxed together in one UInt32.
     * This module doesn't yet support a controller other than 0, however, this
     * function needs to accommodate being called with a controller > 0 since
     * it's called to release a channel on a controller > 0 when the
     * REQUESTDMA ioctl() receives a controller > 0 that it can't handle and
     * needs to clean up after itself.
     */
    /*
     * In order to not be dependent on the LSP #defines, we need to
     * translate our EDMA interface's #defines to the LSP ones.
     */
#if defined(LSP_210)

    localChan = chan;
    if (chan >= EDMA_QDMA0 && chan <= EDMA_QDMA7) {
        __D("  release_channel: translating QDMA channel %d to LSP namespace ...\n", chan);
        localChan = EDMA_QDMA_CHANNEL_0 + (chan - EDMA_QDMA0);
    }

    for (i = 0; i < channels[localChan].nParam; i++) {
        __D("  release_channel: freeing channel %d...\n", localChan + i);

        davinci_free_dma(localChan + i);
    }

    INIT_LIST_HEAD(&channels[localChan].users);
    channels[localChan].nParam = 0;
    channels[localChan].isParam = 0;

#else   /* defined(LSP_210) */

    localChan = EDMA_CHAN_SLOT(chan);
    if (localChan >= EDMA_QDMA0 && localChan <= EDMA_QDMA7) {
        __E("  release_channel: QDMA is not supported: chan %d\n", chan);
                
        return;
    }

    for (i = 0; i < channels[localChan].nParam; i++) {
        if (channels[localChan].isParam) {
            __D("  release_channel: calling edma_free_slot(%d)...\n",
                chan + i);

            edma_free_slot(chan + i);
        }
        else {
            __D("  release_channel: calling edma_free_channel(%d)...\n",
                chan + i);

            edma_clean_channel(chan + i);
            edma_free_channel(chan + i);
        }
    }

    if (EDMA_CTLR(chan) == 0) {
        INIT_LIST_HEAD(&channels[localChan].users);
        channels[localChan].nParam = 0;
        channels[localChan].isParam = 0;
    }

#endif  /* defined(LSP_210) */
}
Пример #2
0
/*
 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 * and release the buffer immediately.
 *
 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
 */
int
xfs_qm_dqread(
	struct xfs_mount	*mp,
	xfs_dqid_t		id,
	uint			type,
	uint			flags,
	struct xfs_dquot	**O_dqpp)
{
	struct xfs_dquot	*dqp;
	struct xfs_disk_dquot	*ddqp;
	struct xfs_buf		*bp;
	struct xfs_trans	*tp = NULL;
	int			error;
	int			cancelflags = 0;


	dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);

	dqp->dq_flags = type;
	dqp->q_core.d_id = cpu_to_be32(id);
	dqp->q_mount = mp;
	INIT_LIST_HEAD(&dqp->q_lru);
	mutex_init(&dqp->q_qlock);
	init_waitqueue_head(&dqp->q_pinwait);

	/*
	 * Because we want to use a counting completion, complete
	 * the flush completion once to allow a single access to
	 * the flush completion without blocking.
	 */
	init_completion(&dqp->q_flush);
	complete(&dqp->q_flush);

	/*
	 * Make sure group quotas have a different lock class than user
	 * quotas.
	 */
	if (!(type & XFS_DQ_USER))
		lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);

	XFS_STATS_INC(xs_qm_dquot);

	trace_xfs_dqread(dqp);

	if (flags & XFS_QMOPT_DQALLOC) {
		tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
		error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
					  XFS_QM_DQALLOC_LOG_RES(mp), 0,
					  XFS_TRANS_PERM_LOG_RES,
					  XFS_WRITE_LOG_COUNT);
		if (error)
			goto error1;
		cancelflags = XFS_TRANS_RELEASE_LOG_RES;
	}

	/*
	 * get a pointer to the on-disk dquot and the buffer containing it
	 * dqp already knows its own type (GROUP/USER).
	 */
	error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
	if (error) {
		/*
		 * This can happen if quotas got turned off (ESRCH),
		 * or if the dquot didn't exist on disk and we ask to
		 * allocate (ENOENT).
		 */
		trace_xfs_dqread_fail(dqp);
		cancelflags |= XFS_TRANS_ABORT;
		goto error1;
	}

	/* copy everything from disk dquot to the incore dquot */
	memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
	xfs_qm_dquot_logitem_init(dqp);

	/*
	 * Reservation counters are defined as reservation plus current usage
	 * to avoid having to add every time.
	 */
	dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
	dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
	dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);

	/* initialize the dquot speculative prealloc thresholds */
	xfs_dquot_set_prealloc_limits(dqp);

	/* Mark the buf so that this will stay incore a little longer */
	xfs_buf_set_ref(bp, XFS_DQUOT_REF);

	/*
	 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
	 * So we need to release with xfs_trans_brelse().
	 * The strategy here is identical to that of inodes; we lock
	 * the dquot in xfs_qm_dqget() before making it accessible to
	 * others. This is because dquots, like inodes, need a good level of
	 * concurrency, and we don't want to take locks on the entire buffers
	 * for dquot accesses.
	 * Note also that the dquot buffer may even be dirty at this point, if
	 * this particular dquot was repaired. We still aren't afraid to
	 * brelse it because we have the changes incore.
	 */
	ASSERT(xfs_buf_islocked(bp));
	xfs_trans_brelse(tp, bp);

	if (tp) {
		error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
		if (error)
			goto error0;
	}

	*O_dqpp = dqp;
	return error;

error1:
	if (tp)
		xfs_trans_cancel(tp, cancelflags);
error0:
	xfs_qm_dqdestroy(dqp);
	*O_dqpp = NULL;
	return error;
}
Пример #3
0
static int ixgbe_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_ring *temp_ring;
	int i, err;
	u32 new_rx_count, new_tx_count;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
	new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);

	new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
	new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring->count) &&
	    (new_rx_count == adapter->rx_ring->count)) {
		/* nothing to do */
		return 0;
	}

	temp_ring = kcalloc(adapter->num_tx_queues,
	                    sizeof(struct ixgbe_ring), GFP_KERNEL);
	if (!temp_ring)
		return -ENOMEM;

	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
		msleep(1);

	if (new_tx_count != adapter->tx_ring->count) {
		for (i = 0; i < adapter->num_tx_queues; i++) {
			temp_ring[i].count = new_tx_count;
			err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
			if (err) {
				while (i) {
					i--;
					ixgbe_free_tx_resources(adapter,
					                        &temp_ring[i]);
				}
				goto err_setup;
			}
			temp_ring[i].v_idx = adapter->tx_ring[i].v_idx;
		}
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_stop(netdev);
		ixgbe_reset_interrupt_capability(adapter);
		ixgbe_napi_del_all(adapter);
		INIT_LIST_HEAD(&netdev->napi_list);
		kfree(adapter->tx_ring);
		adapter->tx_ring = temp_ring;
		temp_ring = NULL;
		adapter->tx_ring_count = new_tx_count;
	}

	temp_ring = kcalloc(adapter->num_rx_queues,
	                    sizeof(struct ixgbe_ring), GFP_KERNEL);
	if (!temp_ring) {
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_open(netdev);
		return -ENOMEM;
	}

	if (new_rx_count != adapter->rx_ring->count) {
		for (i = 0; i < adapter->num_rx_queues; i++) {
			temp_ring[i].count = new_rx_count;
			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
			if (err) {
				while (i) {
					i--;
					ixgbe_free_rx_resources(adapter,
					                        &temp_ring[i]);
				}
				goto err_setup;
			}
			temp_ring[i].v_idx = adapter->rx_ring[i].v_idx;
		}
		if (netif_running(netdev))
			netdev->netdev_ops->ndo_stop(netdev);
		ixgbe_reset_interrupt_capability(adapter);
		ixgbe_napi_del_all(adapter);
		INIT_LIST_HEAD(&netdev->napi_list);
		kfree(adapter->rx_ring);
		adapter->rx_ring = temp_ring;
		temp_ring = NULL;

		adapter->rx_ring_count = new_rx_count;
	}

	/* success! */
	err = 0;
err_setup:
	ixgbe_init_interrupt_scheme(adapter);
	if (netif_running(netdev))
		netdev->netdev_ops->ndo_open(netdev);

	clear_bit(__IXGBE_RESETTING, &adapter->state);
	return err;
}
Пример #4
0
/**
 * usb_alloc_dev - usb device constructor (usbcore-internal)
 * @parent: hub to which device is connected; null to allocate a root hub
 * @bus: bus used to access the device
 * @port1: one-based index of port; ignored for root hubs
 * Context: !in_interrupt()
 *
 * Only hub drivers (including virtual root hub drivers for host
 * controllers) should ever call this.
 *
 * This call may not be used in a non-sleeping context.
 */
struct usb_device *usb_alloc_dev(struct usb_device *parent,
				 struct usb_bus *bus, unsigned port1)
{
	struct usb_device *dev;
	struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
	unsigned root_hub = 0;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return NULL;

	if (!usb_get_hcd(bus_to_hcd(bus))) {
		kfree(dev);
		return NULL;
	}
	/* Root hubs aren't true devices, so don't allocate HCD resources */
	if (usb_hcd->driver->alloc_dev && parent &&
		!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
		usb_put_hcd(bus_to_hcd(bus));
		kfree(dev);
		return NULL;
	}

	device_initialize(&dev->dev);
	dev->dev.bus = &usb_bus_type;
	dev->dev.type = &usb_device_type;
	dev->dev.groups = usb_device_groups;
	dev->dev.dma_mask = bus->controller->dma_mask;
	set_dev_node(&dev->dev, dev_to_node(bus->controller));
	dev->state = USB_STATE_ATTACHED;
	atomic_set(&dev->urbnum, 0);

	INIT_LIST_HEAD(&dev->ep0.urb_list);
	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
	dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
	/* ep0 maxpacket comes later, from device descriptor */
	usb_enable_endpoint(dev, &dev->ep0, false);
	dev->can_submit = 1;

	/* Save readable and stable topology id, distinguishing devices
	 * by location for diagnostics, tools, driver model, etc.  The
	 * string is a path along hub ports, from the root.  Each device's
	 * dev->devpath will be stable until USB is re-cabled, and hubs
	 * are often labeled with these port numbers.  The name isn't
	 * as stable:  bus->busnum changes easily from modprobe order,
	 * cardbus or pci hotplugging, and so on.
	 */
	if (unlikely(!parent)) {
		dev->devpath[0] = '0';
		dev->route = 0;

		dev->dev.parent = bus->controller;
		dev_set_name(&dev->dev, "usb%d", bus->busnum);
		root_hub = 1;
	} else {
		/* match any labeling on the hubs; it's one-based */
		if (parent->devpath[0] == '0') {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%d", port1);
			/* Root ports are not counted in route string */
			dev->route = 0;
		} else {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%s.%d", parent->devpath, port1);
			/* Route string assumes hubs have less than 16 ports */
			if (port1 < 15)
				dev->route = parent->route +
					(port1 << ((parent->level - 1)*4));
			else
				dev->route = parent->route +
					(15 << ((parent->level - 1)*4));
		}

		dev->dev.parent = &parent->dev;
		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);

		/* hub driver sets up TT records */
	}

	dev->portnum = port1;
	dev->bus = bus;
	dev->parent = parent;
	INIT_LIST_HEAD(&dev->filelist);

#ifdef	CONFIG_PM
	if (usb_hcd->driver->set_autosuspend_delay)
		usb_hcd->driver->set_autosuspend_delay(dev);
	else
		pm_runtime_set_autosuspend_delay(&dev->dev,
				usb_autosuspend_delay * 1000);
	dev->connect_time = jiffies;
	dev->active_duration = -jiffies;
#endif
	if (root_hub)	/* Root hub always ok [and always wired] */
		dev->authorized = 1;
	else {
		dev->authorized = usb_hcd->authorized_default;
		dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
	}
	return dev;
}