Esempio n. 1
0
static int usbtmc_probe(struct usb_interface *intf,
			const struct usb_device_id *id)
{
	struct usbtmc_device_data *data;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in;
	int n;
	int retcode;

	dev_dbg(&intf->dev, "%s called\n", __func__);

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	data->intf = intf;
	data->id = id;
	data->usb_dev = usb_get_dev(interface_to_usbdev(intf));
	usb_set_intfdata(intf, data);
	kref_init(&data->kref);
	mutex_init(&data->io_mutex);
	init_waitqueue_head(&data->waitq);
	atomic_set(&data->iin_data_valid, 0);
	atomic_set(&data->srq_asserted, 0);
	data->zombie = 0;

	/* Determine if it is a Rigol or not */
	data->rigol_quirk = 0;
	dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
		le16_to_cpu(data->usb_dev->descriptor.idVendor),
		le16_to_cpu(data->usb_dev->descriptor.idProduct));
	for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
		if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
		    (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
			dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
			data->rigol_quirk = 1;
			break;
		}
	}

	/* Initialize USBTMC bTag and other fields */
	data->bTag	= 1;
	data->TermCharEnabled = 0;
	data->TermChar = '\n';
	/*  2 <= bTag <= 127   USBTMC-USB488 subclass specification 4.3.1 */
	data->iin_bTag = 2;

	/* USBTMC devices have only one setting, so use that */
	iface_desc = data->intf->cur_altsetting;
	data->ifnum = iface_desc->desc.bInterfaceNumber;

	/* Find bulk endpoints */
	retcode = usb_find_common_endpoints(iface_desc,
			&bulk_in, &bulk_out, NULL, NULL);
	if (retcode) {
		dev_err(&intf->dev, "bulk endpoints not found\n");
		goto err_put;
	}

	data->bulk_in = bulk_in->bEndpointAddress;
	dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);

	data->bulk_out = bulk_out->bEndpointAddress;
	dev_dbg(&intf->dev, "Found Bulk out endpoint at %u\n", data->bulk_out);

	/* Find int endpoint */
	retcode = usb_find_int_in_endpoint(iface_desc, &int_in);
	if (!retcode) {
		data->iin_ep_present = 1;
		data->iin_ep = int_in->bEndpointAddress;
		data->iin_wMaxPacketSize = usb_endpoint_maxp(int_in);
		data->iin_interval = int_in->bInterval;
		dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
				data->iin_ep);
	}

	retcode = get_capabilities(data);
	if (retcode)
		dev_err(&intf->dev, "can't read capabilities\n");
	else
		retcode = sysfs_create_group(&intf->dev.kobj,
					     &capability_attr_grp);

	if (data->iin_ep_present) {
		/* allocate int urb */
		data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!data->iin_urb) {
			retcode = -ENOMEM;
			goto error_register;
		}

		/* Protect interrupt in endpoint data until iin_urb is freed */
		kref_get(&data->kref);

		/* allocate buffer for interrupt in */
		data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
					GFP_KERNEL);
		if (!data->iin_buffer) {
			retcode = -ENOMEM;
			goto error_register;
		}

		/* fill interrupt urb */
		usb_fill_int_urb(data->iin_urb, data->usb_dev,
				usb_rcvintpipe(data->usb_dev, data->iin_ep),
				data->iin_buffer, data->iin_wMaxPacketSize,
				usbtmc_interrupt,
				data, data->iin_interval);

		retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
		if (retcode) {
			dev_err(&intf->dev, "Failed to submit iin_urb\n");
			goto error_register;
		}
	}

	retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);

	retcode = usb_register_dev(intf, &usbtmc_class);
	if (retcode) {
		dev_err(&intf->dev, "Not able to get a minor"
			" (base %u, slice default): %d\n", USBTMC_MINOR_BASE,
			retcode);
		goto error_register;
	}
	dev_dbg(&intf->dev, "Using minor number %d\n", intf->minor);

	return 0;

error_register:
	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
	usbtmc_free_int(data);
err_put:
	kref_put(&data->kref, usbtmc_delete);
	return retcode;
}
Esempio n. 2
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval = -ENOMEM;

	kref_get(&adapter->ref);

	port = zfcp_get_port_by_wwpn(adapter, wwpn);
	if (port) {
		put_device(&port->dev);
		retval = -EEXIST;
		goto err_out;
	}

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		goto err_out;

	rwlock_init(&port->unit_list_lock);
	INIT_LIST_HEAD(&port->unit_list);

	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;
	port->rport_task = RPORT_NONE;
	port->dev.parent = &adapter->ccw_device->dev;
	port->dev.release = zfcp_port_release;

	if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
		kfree(port);
		goto err_out;
	}
	retval = -EINVAL;

	if (device_register(&port->dev)) {
		put_device(&port->dev);
		goto err_out;
	}

	if (sysfs_create_group(&port->dev.kobj,
			       &zfcp_sysfs_port_attrs))
		goto err_out_put;

	write_lock_irq(&adapter->port_list_lock);
	list_add_tail(&port->list, &adapter->port_list);
	write_unlock_irq(&adapter->port_list_lock);

	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);

	return port;

err_out_put:
	device_unregister(&port->dev);
err_out:
	zfcp_ccw_adapter_put(adapter);
	return ERR_PTR(retval);
}
Esempio n. 3
0
/**
 * usb_get_urb - increments the reference count of the urb
 * @urb: pointer to the urb to modify, may be NULL
 *
 * This must be  called whenever a urb is transferred from a device driver to a
 * host controller driver.  This allows proper reference counting to happen
 * for urbs.
 *
 * A pointer to the urb with the incremented reference counter is returned.
 */
struct urb *usb_get_urb(struct urb *urb)
{
	if (urb)
		kref_get(&urb->kref);
	return urb;
}
Esempio n. 4
0
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
			int domain_num, int partition_num, unsigned long align,
			unsigned long iova_length, unsigned long *iova,
			unsigned long *buffer_size,
			unsigned long flags)
{
	struct ion_buffer *buffer;
	struct ion_iommu_map *iommu_map;
	int ret = 0;

	mutex_lock(&client->lock);
	if (!ion_handle_validate(client, handle)) {
		pr_err("%s: invalid handle passed to map_kernel.\n",
		       __func__);
		mutex_unlock(&client->lock);
		return -EINVAL;
	}

	buffer = handle->buffer;
	mutex_lock(&buffer->lock);

	if (!handle->buffer->heap->ops->map_iommu) {
		pr_err("%s: map_iommu is not implemented by this heap.\n",
		       __func__);
		ret = -ENODEV;
		goto out;
	}

	if (ion_validate_buffer_flags(buffer, flags)) {
		ret = -EEXIST;
		goto out;
	}

	/*
	 * If clients don't want a custom iova length, just use whatever
	 * the buffer size is
	 */
	if (!iova_length)
		iova_length = buffer->size;

	if (buffer->size > iova_length) {
		pr_debug("%s: iova length %lx is not at least buffer size"
			" %x\n", __func__, iova_length, buffer->size);
		ret = -EINVAL;
		goto out;
	}

	if (buffer->size & ~PAGE_MASK) {
		pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
			buffer->size, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	if (iova_length & ~PAGE_MASK) {
		pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
			iova_length, PAGE_SIZE);
		ret = -EINVAL;
		goto out;
	}

	iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
	if (_ion_map(&buffer->iommu_map_cnt, &handle->iommu_map_cnt) ||
		!iommu_map) {
		ret = __ion_iommu_map(buffer, domain_num, partition_num, align,
					iova_length, flags, iova);
		if (ret < 0)
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
	} else {
		if (iommu_map->mapped_size != iova_length) {
			pr_err("%s: handle %p is already mapped with length"
				" %x, trying to map with length %lx\n",
				__func__, handle, iommu_map->mapped_size,
				iova_length);
			_ion_unmap(&buffer->iommu_map_cnt,
				   &handle->iommu_map_cnt);
			ret = -EINVAL;
		} else {
			kref_get(&iommu_map->ref);
			*iova = iommu_map->iova_addr;
		}
	}
	*buffer_size = buffer->size;
out:
	mutex_unlock(&buffer->lock);
	mutex_unlock(&client->lock);
	return ret;
}
Esempio n. 5
0
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

	spin_lock(&glob->lru_lock);
	while (ret == -EBUSY) {
		if (unlikely(list_empty(&glob->swap_lru))) {
			spin_unlock(&glob->lru_lock);
			return -EBUSY;
		}

		bo = list_first_entry(&glob->swap_lru,
				      struct ttm_buffer_object, swap);
		kref_get(&bo->list_kref);

		if (!list_empty(&bo->ddestroy)) {
			spin_unlock(&glob->lru_lock);
			(void) ttm_bo_cleanup_refs(bo, false, false, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			continue;
		}

		/**
		 * Reserve buffer. Since we unlock while sleeping, we need
		 * to re-check that nobody removed us from the swap-list while
		 * we slept.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			ttm_bo_wait_unreserved(bo, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
		}
	}

	BUG_ON(ret != 0);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	/**
	 * Wait for GPU, then move to system cached.
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
					     false, false, false);
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

	ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Esempio n. 6
0
void nvhost_job_get(struct nvhost_job *job)
{
    kref_get(&job->ref);
}
Esempio n. 7
0
static void ion_buffer_get(struct ion_buffer *buffer)
{
	kref_get(&buffer->ref);
}
Esempio n. 8
0
struct spu_context * get_spu_context(struct spu_context *ctx)
{
	kref_get(&ctx->kref);
	return ctx;
}
Esempio n. 9
0
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
	kref_get(&tfile->refcount);
	return tfile;
}
Esempio n. 10
0
static int skel_open(struct inode *inode, struct file *file)
{
	struct usb_skel *dev;
	struct usb_interface *interface;
	int subminor;
	int retval = 0;

	subminor = iminor(inode);

	pr_info("%s ++ (%d)\n", __func__, current->pid);

	interface = usb_find_interface(&skel_driver, subminor);
	if (!interface) {
		err("%s - error, can't find device for minor %d",
		     __func__, subminor);
		retval = -ENODEV;
		goto exit;
	}

	dev = usb_get_intfdata(interface);
	if (!dev) {
		retval = -ENODEV;
		goto exit;
	}

	if (!dev->initialized) {
		pr_info("%s: not initialized yet\n", __func__);
		return -ENODEV;
	}

	/* increment our usage count for the device */
	kref_get(&dev->kref);

	/* lock the device to allow correctly handling errors
	 * in resumption */
	mutex_lock(&dev->io_mutex);

	if (!dev->open_count++) {
		retval = usb_autopm_get_interface(interface);
			if (retval) {
				dev->open_count--;
				mutex_unlock(&dev->io_mutex);
				kref_put(&dev->kref, skel_delete);
				goto exit;
			}
	} /* else { //uncomment this block if you want exclusive open
		retval = -EBUSY;
		dev->open_count--;
		mutex_unlock(&dev->io_mutex);
		kref_put(&dev->kref, skel_delete);
		goto exit;
	} */
	/* prevent the device from being autosuspended */

	/* save our object in the file's private structure */
	file->private_data = dev;
	mutex_unlock(&dev->io_mutex);
/*
	if (skel_do_read_io(dev, dev->bulk_in_size) < 0) {
		err("%s - failed submitting read urb",
			__func__);
	}
*/
exit:
	pr_info("%s -- ret=%d\n", __func__, retval);
	return retval;
}
Esempio n. 11
0
/*
 * The parsing of the command line works exactly like the
 * serial.c code, except that the specifier is "ttyUSB" instead
 * of "ttyS".
 */
static int usb_console_setup(struct console *co, char *options)
{
	struct usbcons_info *info = &usbcons_info;
	int baud = 9600;
	int bits = 8;
	int parity = 'n';
	int doflow = 0;
	int cflag = CREAD | HUPCL | CLOCAL;
	char *s;
	struct usb_serial *serial;
	struct usb_serial_port *port;
	int retval;
	struct tty_struct *tty = NULL;
	struct ktermios dummy;

	if (options) {
		baud = simple_strtoul(options, NULL, 10);
		s = options;
		while (*s >= '0' && *s <= '9')
			s++;
		if (*s)
			parity = *s++;
		if (*s)
			bits   = *s++ - '0';
		if (*s)
			doflow = (*s++ == 'r');
	}
	
	/* Sane default */
	if (baud == 0)
		baud = 9600;

	switch (bits) {
	case 7:
		cflag |= CS7;
		break;
	default:
	case 8:
		cflag |= CS8;
		break;
	}
	switch (parity) {
	case 'o': case 'O':
		cflag |= PARODD;
		break;
	case 'e': case 'E':
		cflag |= PARENB;
		break;
	}
	co->cflag = cflag;

	/*
	 * no need to check the index here: if the index is wrong, console
	 * code won't call us
	 */
	port = usb_serial_port_get_by_minor(co->index);
	if (port == NULL) {
		/* no device is connected yet, sorry :( */
		pr_err("No USB device connected to ttyUSB%i\n", co->index);
		return -ENODEV;
	}
	serial = port->serial;

	retval = usb_autopm_get_interface(serial->interface);
	if (retval)
		goto error_get_interface;

	tty_port_tty_set(&port->port, NULL);

	info->port = port;

	++port->port.count;
	if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
		if (serial->type->set_termios) {
			/*
			 * allocate a fake tty so the driver can initialize
			 * the termios structure, then later call set_termios to
			 * configure according to command line arguments
			 */
			tty = kzalloc(sizeof(*tty), GFP_KERNEL);
			if (!tty) {
				retval = -ENOMEM;
				goto reset_open_count;
			}
			kref_init(&tty->kref);
			tty->driver = usb_serial_tty_driver;
			tty->index = co->index;
			init_ldsem(&tty->ldisc_sem);
			INIT_LIST_HEAD(&tty->tty_files);
			kref_get(&tty->driver->kref);
			tty->ops = &usb_console_fake_tty_ops;
			if (tty_init_termios(tty)) {
				retval = -ENOMEM;
				goto put_tty;
			}
			tty_port_tty_set(&port->port, tty);
		}

		/* only call the device specific open if this
		 * is the first time the port is opened */
		retval = serial->type->open(NULL, port);
		if (retval) {
			dev_err(&port->dev, "could not open USB console port\n");
			goto fail;
		}

		if (serial->type->set_termios) {
			tty->termios.c_cflag = cflag;
			tty_termios_encode_baud_rate(&tty->termios, baud, baud);
			memset(&dummy, 0, sizeof(struct ktermios));
			serial->type->set_termios(tty, port, &dummy);

			tty_port_tty_set(&port->port, NULL);
			tty_kref_put(tty);
		}
		set_bit(ASYNCB_INITIALIZED, &port->port.flags);
	}
	/* Now that any required fake tty operations are completed restore
	 * the tty port count */
	--port->port.count;
	/* The console is special in terms of closing the device so
	 * indicate this port is now acting as a system console. */
	port->port.console = 1;

	mutex_unlock(&serial->disc_mutex);
	return retval;

 fail:
	tty_port_tty_set(&port->port, NULL);
 put_tty:
	tty_kref_put(tty);
 reset_open_count:
	port->port.count = 0;
	usb_autopm_put_interface(serial->interface);
 error_get_interface:
	usb_serial_put(serial);
	mutex_unlock(&serial->disc_mutex);
	return retval;
}
Esempio n. 12
0
/*
 * Get an additional reference on an operation.
 */
void gb_operation_get(struct gb_operation *operation)
{
	kref_get(&operation->kref);
}
Esempio n. 13
0
/*
 * usb_set_configuration - Makes a particular device setting be current
 * @dev: the device whose configuration is being updated
 * @configuration: the configuration being chosen.
 * Context: !in_interrupt(), caller owns the device lock
 *
 * This is used to enable non-default device modes.  Not all devices
 * use this kind of configurability; many devices only have one
 * configuration.
 *
 * USB device configurations may affect Linux interoperability,
 * power consumption and the functionality available.  For example,
 * the default configuration is limited to using 100mA of bus power,
 * so that when certain device functionality requires more power,
 * and the device is bus powered, that functionality should be in some
 * non-default device configuration.  Other device modes may also be
 * reflected as configuration options, such as whether two ISDN
 * channels are available independently; and choosing between open
 * standard device protocols (like CDC) or proprietary ones.
 *
 * Note that USB has an additional level of device configurability,
 * associated with interfaces.  That configurability is accessed using
 * usb_set_interface().
 *
 * This call is synchronous. The calling context must be able to sleep,
 * must own the device lock, and must not hold the driver model's USB
 * bus rwsem; usb device driver probe() methods cannot use this routine.
 *
 * Returns zero on success, or else the status code returned by the
 * underlying call that failed.  On successful completion, each interface
 * in the original device configuration has been destroyed, and each one
 * in the new configuration has been probed by all relevant usb device
 * drivers currently known to the kernel.
 */
int usb_set_configuration(struct usb_device *dev, int configuration)
{
	int i, ret;
	struct usb_host_config *cp = NULL;
	struct usb_interface **new_interfaces = NULL;
	int n, nintf;

	for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
		if (dev->config[i].desc.bConfigurationValue == configuration) {
			cp = &dev->config[i];
			break;
		}
	}
	if ((!cp && configuration != 0))
		return -EINVAL;

	/* The USB spec says configuration 0 means unconfigured.
	 * But if a device includes a configuration numbered 0,
	 * we will accept it as a correctly configured state.
	 */
	if (cp && configuration == 0)
		dev_warn(&dev->dev, "config 0 descriptor??\n");

	if (dev->state == USB_STATE_SUSPENDED)
		return -EHOSTUNREACH;

	/* Allocate memory for new interfaces before doing anything else,
	 * so that if we run out then nothing will have changed. */
	n = nintf = 0;
	if (cp) {
		nintf = cp->desc.bNumInterfaces;
		new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
				GFP_KERNEL);
		if (!new_interfaces) {
			dev_err(&dev->dev, "Out of memory");
			return -ENOMEM;
		}

		for (; n < nintf; ++n) {
			new_interfaces[n] = kmalloc(
					sizeof(struct usb_interface),
					GFP_KERNEL);
			if (!new_interfaces[n]) {
				dev_err(&dev->dev, "Out of memory");
				ret = -ENOMEM;
free_interfaces:
				while (--n >= 0)
					kfree(new_interfaces[n]);
				kfree(new_interfaces);
				return ret;
			}
		}
	}

	/* if it's already configured, clear out old state first.
	 * getting rid of old interfaces means unbinding their drivers.
	 */
	if (dev->state != USB_STATE_ADDRESS)
		usb_disable_device (dev, 1);	// Skip ep0

	if ((ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
			USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
			NULL, 0, USB_CTRL_SET_TIMEOUT)) < 0)
		goto free_interfaces;

	dev->actconfig = cp;
	if (!cp)
		usb_set_device_state(dev, USB_STATE_ADDRESS);
	else {
		usb_set_device_state(dev, USB_STATE_CONFIGURED);

		/* Initialize the new interface structures and the
		 * hc/hcd/usbcore interface/endpoint state.
		 */
		for (i = 0; i < nintf; ++i) {
			struct usb_interface_cache *intfc;
			struct usb_interface *intf;
			struct usb_host_interface *alt;

			cp->interface[i] = intf = new_interfaces[i];
			memset(intf, 0, sizeof(*intf));
			intfc = cp->intf_cache[i];
			intf->altsetting = intfc->altsetting;
			intf->num_altsetting = intfc->num_altsetting;
			kref_get(&intfc->ref);

			alt = usb_altnum_to_altsetting(intf, 0);

			/* No altsetting 0?  We'll assume the first altsetting.
			 * We could use a GetInterface call, but if a device is
			 * so non-compliant that it doesn't have altsetting 0
			 * then I wouldn't trust its reply anyway.
			 */
			if (!alt)
				alt = &intf->altsetting[0];

			intf->cur_altsetting = alt;
			usb_enable_interface(dev, intf);
			intf->dev.parent = &dev->dev;
			intf->dev.driver = NULL;
			intf->dev.bus = &usb_bus_type;
			intf->dev.dma_mask = dev->dev.dma_mask;
			intf->dev.release = release_interface;
			device_initialize (&intf->dev);
			sprintf (&intf->dev.bus_id[0], "%d-%s:%d.%d",
				 dev->bus->busnum, dev->devpath,
				 configuration,
				 alt->desc.bInterfaceNumber);
		}
		kfree(new_interfaces);

		if ((cp->desc.iConfiguration) &&
		    (cp->string == NULL)) {
			cp->string = kmalloc(256, GFP_KERNEL);
			if (cp->string)
				usb_string(dev, cp->desc.iConfiguration, cp->string, 256);
		}

		/* Now that all the interfaces are set up, register them
		 * to trigger binding of drivers to interfaces.  probe()
		 * routines may install different altsettings and may
		 * claim() any interfaces not yet bound.  Many class drivers
		 * need that: CDC, audio, video, etc.
		 */
		for (i = 0; i < nintf; ++i) {
			struct usb_interface *intf = cp->interface[i];
			struct usb_interface_descriptor *desc;

			desc = &intf->altsetting [0].desc;
			dev_dbg (&dev->dev,
				"adding %s (config #%d, interface %d)\n",
				intf->dev.bus_id, configuration,
				desc->bInterfaceNumber);
			ret = device_add (&intf->dev);
			if (ret != 0) {
				dev_err(&dev->dev,
					"device_add(%s) --> %d\n",
					intf->dev.bus_id,
					ret);
				continue;
			}
			if ((intf->cur_altsetting->desc.iInterface) &&
			    (intf->cur_altsetting->string == NULL)) {
				intf->cur_altsetting->string = kmalloc(256, GFP_KERNEL);
				if (intf->cur_altsetting->string)
					usb_string(dev, intf->cur_altsetting->desc.iInterface,
						   intf->cur_altsetting->string, 256);
			}
			usb_create_sysfs_intf_files (intf);
		}
	}

	return 0;
}
Esempio n. 14
0
static void vic03_get_hwctx (struct nvhost_hwctx *ctx)
{
	nvhost_dbg_fn("");
	kref_get(&ctx->ref);
}
Esempio n. 15
0
static int wf_sat_probe(struct i2c_client *client,
			const struct i2c_device_id *id)
{
	struct device_node *dev = client->dev.of_node;
	struct wf_sat *sat;
	struct wf_sat_sensor *sens;
	const u32 *reg;
	const char *loc, *type;
	u8 chip, core;
	struct device_node *child;
	int shift, cpu, index;
	char *name;
	int vsens[2], isens[2];

	sat = kzalloc(sizeof(struct wf_sat), GFP_KERNEL);
	if (sat == NULL)
		return -ENOMEM;
	sat->nr = -1;
	sat->node = of_node_get(dev);
	kref_init(&sat->ref);
	mutex_init(&sat->mutex);
	sat->i2c = client;
	INIT_LIST_HEAD(&sat->sensors);
	i2c_set_clientdata(client, sat);

	vsens[0] = vsens[1] = -1;
	isens[0] = isens[1] = -1;
	child = NULL;
	while ((child = of_get_next_child(dev, child)) != NULL) {
		reg = of_get_property(child, "reg", NULL);
		type = of_get_property(child, "device_type", NULL);
		loc = of_get_property(child, "location", NULL);
		if (reg == NULL || loc == NULL)
			continue;

		/* the cooked sensors are between 0x30 and 0x37 */
		if (*reg < 0x30 || *reg > 0x37)
			continue;
		index = *reg - 0x30;

		/* expect location to be CPU [AB][01] ... */
		if (strncmp(loc, "CPU ", 4) != 0)
			continue;
		chip = loc[4] - 'A';
		core = loc[5] - '0';
		if (chip > 1 || core > 1) {
			printk(KERN_ERR "wf_sat_create: don't understand "
			       "location %s for %s\n", loc, child->full_name);
			continue;
		}
		cpu = 2 * chip + core;
		if (sat->nr < 0)
			sat->nr = chip;
		else if (sat->nr != chip) {
			printk(KERN_ERR "wf_sat_create: can't cope with "
			       "multiple CPU chips on one SAT (%s)\n", loc);
			continue;
		}

		if (strcmp(type, "voltage-sensor") == 0) {
			name = "cpu-voltage";
			shift = 4;
			vsens[core] = index;
		} else if (strcmp(type, "current-sensor") == 0) {
			name = "cpu-current";
			shift = 8;
			isens[core] = index;
		} else if (strcmp(type, "temp-sensor") == 0) {
			name = "cpu-temp";
			shift = 10;
		} else
			continue;	/* hmmm shouldn't happen */

		/* the +16 is enough for "cpu-voltage-n" */
		sens = kzalloc(sizeof(struct wf_sat_sensor) + 16, GFP_KERNEL);
		if (sens == NULL) {
			printk(KERN_ERR "wf_sat_create: couldn't create "
			       "%s sensor %d (no memory)\n", name, cpu);
			continue;
		}
		sens->index = index;
		sens->index2 = -1;
		sens->shift = shift;
		sens->sat = sat;
		sens->sens.ops = &wf_sat_ops;
		sens->sens.name = (char *) (sens + 1);
		snprintf((char *)sens->sens.name, 16, "%s-%d", name, cpu);

		if (wf_register_sensor(&sens->sens))
			kfree(sens);
		else {
			list_add(&sens->link, &sat->sensors);
			kref_get(&sat->ref);
		}
	}

	/* make the power sensors */
	for (core = 0; core < 2; ++core) {
		if (vsens[core] < 0 || isens[core] < 0)
			continue;
		cpu = 2 * sat->nr + core;
		sens = kzalloc(sizeof(struct wf_sat_sensor) + 16, GFP_KERNEL);
		if (sens == NULL) {
			printk(KERN_ERR "wf_sat_create: couldn't create power "
			       "sensor %d (no memory)\n", cpu);
			continue;
		}
		sens->index = vsens[core];
		sens->index2 = isens[core];
		sens->shift = 0;
		sens->sat = sat;
		sens->sens.ops = &wf_sat_ops;
		sens->sens.name = (char *) (sens + 1);
		snprintf((char *)sens->sens.name, 16, "cpu-power-%d", cpu);

		if (wf_register_sensor(&sens->sens))
			kfree(sens);
		else {
			list_add(&sens->link, &sat->sensors);
			kref_get(&sat->ref);
		}
	}

	if (sat->nr >= 0)
		sats[sat->nr] = sat;

	return 0;
}
Esempio n. 16
0
/**
 * radeon_fence_ref - take a ref on a fence
 *
 * @fence: radeon fence object
 *
 * Take a reference on a fence (all asics).
 * Returns the fence.
 */
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}
Esempio n. 17
0
static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
{
	int retval;
	unsigned long flags;
	struct zfcp_adapter *adapter = erp_action->adapter;

	kref_get(&adapter->ref);

	write_lock_irqsave(&adapter->erp_lock, flags);
	zfcp_erp_strategy_check_fsfreq(erp_action);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
		zfcp_erp_action_dequeue(erp_action);
		retval = ZFCP_ERP_DISMISSED;
		goto unlock;
	}

	if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
		retval = ZFCP_ERP_FAILED;
		goto check_target;
	}

	zfcp_erp_action_to_running(erp_action);

	/* no lock to allow for blocking operations */
	write_unlock_irqrestore(&adapter->erp_lock, flags);
	retval = zfcp_erp_strategy_do_action(erp_action);
	write_lock_irqsave(&adapter->erp_lock, flags);

	if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
		retval = ZFCP_ERP_CONTINUES;

	switch (retval) {
	case ZFCP_ERP_NOMEM:
		if (!(erp_action->status & ZFCP_STATUS_ERP_LOWMEM)) {
			++adapter->erp_low_mem_count;
			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
		}
		if (adapter->erp_total_count == adapter->erp_low_mem_count)
			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
		else {
			zfcp_erp_strategy_memwait(erp_action);
			retval = ZFCP_ERP_CONTINUES;
		}
		goto unlock;

	case ZFCP_ERP_CONTINUES:
		if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
			--adapter->erp_low_mem_count;
			erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM;
		}
		goto unlock;
	}

check_target:
	retval = zfcp_erp_strategy_check_target(erp_action, retval);
	zfcp_erp_action_dequeue(erp_action);
	retval = zfcp_erp_strategy_statechange(erp_action, retval);
	if (retval == ZFCP_ERP_EXIT)
		goto unlock;
	if (retval == ZFCP_ERP_SUCCEEDED)
		zfcp_erp_strategy_followup_success(erp_action);
	if (retval == ZFCP_ERP_FAILED)
		zfcp_erp_strategy_followup_failed(erp_action);

 unlock:
	write_unlock_irqrestore(&adapter->erp_lock, flags);

	if (retval != ZFCP_ERP_CONTINUES)
		zfcp_erp_action_cleanup(erp_action, retval);

	kref_put(&adapter->ref, zfcp_adapter_release);
	return retval;
}
Esempio n. 18
0
/* Expects to be always run from workqueue - which acts as
 * read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
{
	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
	unsigned out, in, s;
	int head;
	struct msghdr msg = {
		.msg_name = NULL,
		.msg_namelen = 0,
		.msg_control = NULL,
		.msg_controllen = 0,
		.msg_iov = vq->iov,
		.msg_flags = MSG_DONTWAIT,
	};
	size_t len, total_len = 0;
	int err;
	size_t hdr_size;
	struct socket *sock;
	struct vhost_ubuf_ref *uninitialized_var(ubufs);
	bool zcopy, zcopy_used;

	/* TODO: check that we are running from vhost_worker? */
	sock = rcu_dereference_check(vq->private_data, 1);
	if (!sock)
		return;

	mutex_lock(&vq->mutex);
	vhost_disable_notify(&net->dev, vq);

	hdr_size = vq->vhost_hlen;
	zcopy = vq->ubufs;

	for (;;) {
		/* Release DMAs done buffers first */
		if (zcopy)
			vhost_zerocopy_signal_used(net, vq);

		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
					 ARRAY_SIZE(vq->iov),
					 &out, &in,
					 NULL, NULL);
		/* On error, stop handling until the next kick. */
		if (unlikely(head < 0))
			break;
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (head == vq->num) {
			int num_pends;

			/* If more outstanding DMAs, queue the work.
			 * Handle upend_idx wrap around
			 */
			num_pends = likely(vq->upend_idx >= vq->done_idx) ?
				    (vq->upend_idx - vq->done_idx) :
				    (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
			if (unlikely(num_pends > VHOST_MAX_PEND))
				break;
			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
				vhost_disable_notify(&net->dev, vq);
				continue;
			}
			break;
		}
		if (in) {
			vq_err(vq, "Unexpected descriptor format for TX: "
			       "out %d, int %d\n", out, in);
			break;
		}
		/* Skip header. TODO: support TSO. */
		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
		msg.msg_iovlen = out;
		len = iov_length(vq->iov, out);
		/* Sanity check */
		if (!len) {
			vq_err(vq, "Unexpected header len for TX: "
			       "%zd expected %zd\n",
			       iov_length(vq->hdr, s), hdr_size);
			break;
		}
		zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
				       vq->upend_idx != vq->done_idx);

		/* use msg_control to pass vhost zerocopy ubuf info to skb */
		if (zcopy_used) {
			vq->heads[vq->upend_idx].id = head;
			if (!vhost_net_tx_select_zcopy(net) ||
			    len < VHOST_GOODCOPY_LEN) {
				/* copy don't need to wait for DMA done */
				vq->heads[vq->upend_idx].len =
							VHOST_DMA_DONE_LEN;
				msg.msg_control = NULL;
				msg.msg_controllen = 0;
				ubufs = NULL;
			} else {
				struct ubuf_info *ubuf;
				ubuf = vq->ubuf_info + vq->upend_idx;

				vq->heads[vq->upend_idx].len =
					VHOST_DMA_IN_PROGRESS;
				ubuf->callback = vhost_zerocopy_callback;
				ubuf->ctx = vq->ubufs;
				ubuf->desc = vq->upend_idx;
				msg.msg_control = ubuf;
				msg.msg_controllen = sizeof(ubuf);
				ubufs = vq->ubufs;
				kref_get(&ubufs->kref);
			}
			vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
		}
		/* TODO: Check specific error and bomb out unless ENOBUFS? */
		err = sock->ops->sendmsg(NULL, sock, &msg, len);
		if (unlikely(err < 0)) {
			if (zcopy_used) {
				if (ubufs)
					vhost_ubuf_put(ubufs);
				vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
					UIO_MAXIOV;
			}
			vhost_discard_vq_desc(vq, 1);
			break;
		}
		if (err != len)
			pr_debug("Truncated TX packet: "
				 " len %d != %zd\n", err, len);
		if (!zcopy_used)
			vhost_add_used_and_signal(&net->dev, vq, head, 0);
		else
			vhost_zerocopy_signal_used(net, vq);
		total_len += len;
		vhost_net_tx_packet(net);
		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
			vhost_poll_queue(&vq->poll);
			break;
		}
	}

	mutex_unlock(&vq->mutex);
}

static int peek_head_len(struct sock *sk)
{
	struct sk_buff *head;
	int len = 0;
	unsigned long flags;

	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
	head = skb_peek(&sk->sk_receive_queue);
	if (likely(head)) {
		len = head->len;
		if (vlan_tx_tag_present(head))
			len += VLAN_HLEN;
	}

	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
	return len;
}

/* This is a multi-buffer version of vhost_get_desc, that works if
 *	vq has read descriptors only.
 * @vq		- the relevant virtqueue
 * @datalen	- data length we'll be reading
 * @iovcount	- returned count of io vectors we fill
 * @log		- vhost log
 * @log_num	- log offset
 * @quota       - headcount quota, 1 for big buffer
 *	returns number of buffer heads allocated, negative on error
 */
static int get_rx_bufs(struct vhost_virtqueue *vq,
		       struct vring_used_elem *heads,
		       int datalen,
		       unsigned *iovcount,
		       struct vhost_log *log,
		       unsigned *log_num,
		       unsigned int quota)
{
	unsigned int out, in;
	int seg = 0;
	int headcount = 0;
	unsigned d;
	int r, nlogs = 0;

	while (datalen > 0 && headcount < quota) {
		if (unlikely(seg >= UIO_MAXIOV)) {
			r = -ENOBUFS;
			goto err;
		}
		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
				      ARRAY_SIZE(vq->iov) - seg, &out,
				      &in, log, log_num);
		if (d == vq->num) {
			r = 0;
			goto err;
		}
		if (unlikely(out || in <= 0)) {
			vq_err(vq, "unexpected descriptor format for RX: "
				"out %d, in %d\n", out, in);
			r = -EINVAL;
			goto err;
		}
		if (unlikely(log)) {
			nlogs += *log_num;
			log += *log_num;
		}
		heads[headcount].id = d;
		heads[headcount].len = iov_length(vq->iov + seg, in);
		datalen -= heads[headcount].len;
		++headcount;
		seg += in;
	}
	heads[headcount - 1].len += datalen;
	*iovcount = seg;
	if (unlikely(log))
		*log_num = nlogs;
	return headcount;
err:
	vhost_discard_vq_desc(vq, headcount);
	return r;
}
Esempio n. 19
0
static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
{
	kref_get(&block->kref);
}
Esempio n. 20
0
static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
{
	kref_get(&ag->kref);
	return ag;
}
Esempio n. 21
0
static void ion_handle_get(struct ion_handle *handle)
{
	kref_get(&handle->ref);
}
Esempio n. 22
0
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
			     struct rpmsg_hdr *msg, unsigned int len)
{
	struct rpmsg_endpoint *ept;
	struct scatterlist sg;
	int err;
	struct rproc *vrp_rproc;
	void *msg_guest_addr_kva; /* message DMA address' virtual address conversion */

	dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->reserved);
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);

	/*
	 * We currently use fixed-sized buffers, so trivially sanitize
	 * the reported payload length.
	 */
	if (len > RPMSG_BUF_SIZE ||
		msg->len > (len - sizeof(struct rpmsg_hdr))) {
		dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
		return -EINVAL;
	}

	/* use the dst addr to fetch the callback of the appropriate user */
	mutex_lock(&vrp->endpoints_lock);

	ept = idr_find(&vrp->endpoints, msg->dst);

	/* let's make sure no one deallocates ept while we use it */
	if (ept)
		kref_get(&ept->refcount);

	mutex_unlock(&vrp->endpoints_lock);

	if (ept) {
		/* make sure ept->cb doesn't go away while we use it */
		mutex_lock(&ept->cb_lock);

		if (ept->cb)
			ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
				msg->src);

		mutex_unlock(&ept->cb_lock);

		/* farewell, ept, we don't need you anymore */
		kref_put(&ept->refcount, __ept_release);
	} else
		dev_warn(dev, "msg received with no recipient\n");

	vrp_rproc = vdev_to_rproc(vrp->vdev);
	msg_guest_addr_kva = msg;
	if (vrp_rproc->ops->kva_to_guest_addr_kva) {
		msg_guest_addr_kva = vrp_rproc->ops->kva_to_guest_addr_kva(vrp_rproc, msg, vrp->rvq);
	}
	/* publish the real size of the buffer */
	sg_init_one(&sg, msg_guest_addr_kva, RPMSG_BUF_SIZE);

	/* add the buffer back to the remote processor's virtqueue */
	err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
		return err;
	}

	return 0;
}
Esempio n. 23
0
/* called when an rx buffer is used, and it's time to digest a message */
static void rpmsg_recv_done(struct virtqueue *rvq)
{
	struct rpmsg_hdr *msg;
	unsigned int len;
	struct rpmsg_endpoint *ept;
	struct scatterlist sg;
	struct virtproc_info *vrp = rvq->vdev->priv;
	struct device *dev = &rvq->vdev->dev;
	int err;

	msg = virtqueue_get_buf(rvq, &len);
	if (!msg) {
		dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
		return;
	}

	dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->reserved);
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);

	/*
	 * We currently use fixed-sized buffers, so trivially sanitize
	 * the reported payload length.
	 */
	if (len > RPMSG_BUF_SIZE ||
		msg->len > (len - sizeof(struct rpmsg_hdr))) {
		dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
		return;
	}

	/* use the dst addr to fetch the callback of the appropriate user */
	mutex_lock(&vrp->endpoints_lock);

	ept = idr_find(&vrp->endpoints, msg->dst);

	/* let's make sure no one deallocates ept while we use it */
	if (ept)
		kref_get(&ept->refcount);

	mutex_unlock(&vrp->endpoints_lock);

	if (ept) {
		/* make sure ept->cb doesn't go away while we use it */
		mutex_lock(&ept->cb_lock);

		if (ept->cb)
			ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
				msg->src);

		mutex_unlock(&ept->cb_lock);

		/* farewell, ept, we don't need you anymore */
		kref_put(&ept->refcount, __ept_release);
	} else
		dev_warn(dev, "msg received with no recipient\n");

	/* publish the real size of the buffer */
	sg_init_one(&sg, msg, RPMSG_BUF_SIZE);

	/* add the buffer back to the remote processor's virtqueue */
	err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
		return;
	}

	/* tell the remote processor we added another available rx buffer */
	virtqueue_kick(vrp->rvq);
}
/**ltl
功能:设置usb设备的配置,并且加载与接口相关的驱动程序
参数:dev			->usb设备
	configuration	->usb配置编号
返回值:
说明:调用这个接口,usb设备才会进入配置阶段
*/
int usb_set_configuration(struct usb_device *dev, int configuration)
{
	int i, ret;
	struct usb_host_config *cp = NULL;
	struct usb_interface **new_interfaces = NULL;
	int n, nintf;

	for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
		if (dev->config[i].desc.bConfigurationValue == configuration) {
			cp = &dev->config[i];
			break;
		}
	}
	if ((!cp && configuration != 0))
		return -EINVAL;

	/* The USB spec says configuration 0 means unconfigured.
	 * But if a device includes a configuration numbered 0,
	 * we will accept it as a correctly configured state.
	 */
	if (cp && configuration == 0)
		dev_warn(&dev->dev, "config 0 descriptor??\n");

	if (dev->state == USB_STATE_SUSPENDED)
		return -EHOSTUNREACH;

	/* Allocate memory for new interfaces before doing anything else,
	 * so that if we run out then nothing will have changed. */
	n = nintf = 0;
	if (cp) {
		nintf = cp->desc.bNumInterfaces;//接口数量
		new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
				GFP_KERNEL);
		if (!new_interfaces) {
			dev_err(&dev->dev, "Out of memory");
			return -ENOMEM;
		}

		for (; n < nintf; ++n) {
			new_interfaces[n] = kzalloc(
					sizeof(struct usb_interface),
					GFP_KERNEL);
			if (!new_interfaces[n]) {
				dev_err(&dev->dev, "Out of memory");
				ret = -ENOMEM;
free_interfaces:
				while (--n >= 0)
					kfree(new_interfaces[n]);
				kfree(new_interfaces);
				return ret;
			}
		}

		i = dev->bus_mA - cp->desc.bMaxPower * 2;
		if (i < 0)
			dev_warn(&dev->dev, "new config #%d exceeds power "
					"limit by %dmA\n",
					configuration, -i);
	}

	/* if it's already configured, clear out old state first.
	 * getting rid of old interfaces means unbinding their drivers.
	 */
	if (dev->state != USB_STATE_ADDRESS)
		usb_disable_device (dev, 1);	// Skip ep0

	//设置配置编号
	if ((ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
			USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
			NULL, 0, USB_CTRL_SET_TIMEOUT)) < 0) {

		/* All the old state is gone, so what else can we do?
		 * The device is probably useless now anyway.
		 */
		cp = NULL;
	}
	//当前的配置对象
	dev->actconfig = cp;
	if (!cp) {
		usb_set_device_state(dev, USB_STATE_ADDRESS);
		goto free_interfaces;
	}
	usb_set_device_state(dev, USB_STATE_CONFIGURED);

	/* Initialize the new interface structures and the
	 * hc/hcd/usbcore interface/endpoint state.
	 */
	//对interface对象赋值
	for (i = 0; i < nintf; ++i) {
		struct usb_interface_cache *intfc;
		struct usb_interface *intf;
		struct usb_host_interface *alt;

		cp->interface[i] = intf = new_interfaces[i];
		intfc = cp->intf_cache[i];
		intf->altsetting = intfc->altsetting;
		intf->num_altsetting = intfc->num_altsetting;
		kref_get(&intfc->ref);

		alt = usb_altnum_to_altsetting(intf, 0);

		/* No altsetting 0?  We'll assume the first altsetting.
		 * We could use a GetInterface call, but if a device is
		 * so non-compliant that it doesn't have altsetting 0
		 * then I wouldn't trust its reply anyway.
		 */
		if (!alt)
			alt = &intf->altsetting[0];

		intf->cur_altsetting = alt;
		usb_enable_interface(dev, intf);
		intf->dev.parent = &dev->dev;
		intf->dev.driver = NULL;
		/*总线驱动模型中,interface作为设备,如果interface是一个usb hub(bInterfaceClass=9),则按总线驱动模型,就会匹配hub_driver驱动,调用device_add后,
		  最后调用hub_probe,去 分配usb_hub对象,并为每个hub关联中断处理函数,当hub端口中有设备插入时,hub产生
		  一中断hub_irq,最终唤醒线程hub_thread,然后为usb设备分配设备号(hub_port_connect_change)
		  ;如果interface是一个usb设备(bInterfaceClass=x),比如:usbmouse,此时会去匹配usb_mouse_driver。	
		*/
		intf->dev.bus = &usb_bus_type;
		intf->dev.dma_mask = dev->dev.dma_mask;
		intf->dev.release = release_interface;
		device_initialize (&intf->dev);
		mark_quiesced(intf);
		sprintf (&intf->dev.bus_id[0], "%d-%s:%d.%d",
			 dev->bus->busnum, dev->devpath,
			 configuration, alt->desc.bInterfaceNumber);
	}
	kfree(new_interfaces);

	if (cp->string == NULL)
		cp->string = usb_cache_string(dev, cp->desc.iConfiguration);

	/* Now that all the interfaces are set up, register them
	 * to trigger binding of drivers to interfaces.  probe()
	 * routines may install different altsettings and may
	 * claim() any interfaces not yet bound.  Many class drivers
	 * need that: CDC, audio, video, etc.
	 */
	/*每个接口代表一个功能,每个接口都有一个驱动程序,载之*/
	for (i = 0; i < nintf; ++i) {
		struct usb_interface *intf = cp->interface[i];

		dev_dbg (&dev->dev,
			"adding %s (config #%d, interface %d)\n",
			intf->dev.bus_id, configuration,
			intf->cur_altsetting->desc.bInterfaceNumber);
		//执行后,设备驱动将执行探针接口:hub_probe,storage_probe
		ret = device_add (&intf->dev);
		if (ret != 0) {
			dev_err(&dev->dev, "device_add(%s) --> %d\n",
				intf->dev.bus_id, ret);
			continue;
		}
		usb_create_sysfs_intf_files (intf);
	}

	return 0;
}
Esempio n. 25
0
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
				bool interruptible, bool no_wait_reserve,
				bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
	int ret, put_count = 0;

retry:
	spin_lock(&glob->lru_lock);
	if (list_empty(&man->lru)) {
		spin_unlock(&glob->lru_lock);
		return -EBUSY;
	}

	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
	kref_get(&bo->list_kref);

	if (!list_empty(&bo->ddestroy)) {
		spin_unlock(&glob->lru_lock);
		ret = ttm_bo_cleanup_refs(bo, interruptible,
					  no_wait_reserve, no_wait_gpu);
		kref_put(&bo->list_kref, ttm_bo_release_list);

		if (likely(ret == 0 || ret == -ERESTARTSYS))
			return ret;

		goto retry;
	}

	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);

	if (unlikely(ret == -EBUSY)) {
		spin_unlock(&glob->lru_lock);
		if (likely(!no_wait_gpu))
			ret = ttm_bo_wait_unreserved(bo, interruptible);

		kref_put(&bo->list_kref, ttm_bo_release_list);

		/**
		 * We *need* to retry after releasing the lru lock.
		 */

		if (unlikely(ret != 0))
			return ret;
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	BUG_ON(ret != 0);

	ttm_bo_list_ref_sub(bo, put_count, true);

	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
	ttm_bo_unreserve(bo);

	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Esempio n. 26
0
static inline void get_parallel(struct parallel_io *p)
{
	kref_get(&p->refcnt);
}
Esempio n. 27
0
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
{
	kref_get(&qh->kref);
	return qh;
}
Esempio n. 28
0
/**
 *	of_node_get - Increment refcount of a node
 *	@node:	Node to inc refcount, NULL is supported to
 *		simplify writing of callers
 *
 *	Returns node.
 */
struct device_node *of_node_get(struct device_node *node)
{
	if (node)
		kref_get(&node->kref);
	return node;
}
Esempio n. 29
0
int diag_bridge_write(char *data, int size)
{
	struct urb		*urb = NULL;
	unsigned int		pipe;
	struct diag_bridge	*dev = __dev;
	int			ret;

	pr_debug("writing %d bytes", size);

	if (!dev) {
		pr_err("device is disconnected");
		return -ENODEV;
	}

	mutex_lock(&dev->ifc_mutex);
	if (!dev->ifc) {
		ret = -ENODEV;
		goto error;
	}

	if (!dev->ops) {
		pr_err("bridge is not open");
		ret = -ENODEV;
		goto error;
	}

	if (!size) {
		dev_err(&dev->ifc->dev, "invalid size:%d\n", size);
		ret = -EINVAL;
		goto error;
	}

	/* if there was a previous unrecoverable error, just quit */
	if (dev->err) {
		ret = -ENODEV;
		goto error;
	}

	kref_get(&dev->kref);

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		dev_err(&dev->ifc->dev, "unable to allocate urb\n");
		ret = -ENOMEM;
		goto put_error;
	}

	ret = usb_autopm_get_interface(dev->ifc);
	if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
		pr_err_ratelimited("write: autopm_get failed:%d", ret);
		goto free_error;
	}

	pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
	usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
				diag_bridge_write_cb, dev);
	urb->transfer_flags |= URB_ZERO_PACKET;
	usb_anchor_urb(urb, &dev->submitted);
	dev->pending_writes++;

	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret) {
		pr_err_ratelimited("submitting urb failed err:%d", ret);
		dev->pending_writes--;
		usb_unanchor_urb(urb);
		usb_autopm_put_interface(dev->ifc);
		goto free_error;
	}

free_error:
	usb_free_urb(urb);
put_error:
	if (ret) /* otherwise this is done in the completion handler */
		kref_put(&dev->kref, diag_bridge_delete);
error:
	mutex_unlock(&dev->ifc_mutex);
	return ret;
}
Esempio n. 30
0
static void da9055_wdt_ref(struct watchdog_device *wdt_dev)
{
	struct da9055_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);

	kref_get(&driver_data->kref);
}