Esempio n. 1
0
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t flag,
				       unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
	return consistent_alloc(flag, size, dma_handle);
#else
	void *ret;
	struct page *page;
	int node = dev_to_node(dev);

	/* ignore region specifiers */
	flag  &= ~(__GFP_HIGHMEM);

	page = alloc_pages_node(node, flag, get_order(size));
	if (page == NULL)
		return NULL;
	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = virt_to_phys(ret);

	return ret;
#endif
}
Esempio n. 2
0
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
			       struct scatterlist *assoc,
			       struct scatterlist *sgl,
			       struct scatterlist *sglout, uint8_t *iv,
			       uint8_t ivlen,
			       struct qat_crypto_request *qat_req)
{
	struct device *dev = &GET_DEV(inst->accel_dev);
	int i, bufs = 0, sg_nctr = 0;
	int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
	struct qat_alg_buf_list *bufl;
	struct qat_alg_buf_list *buflout = NULL;
	dma_addr_t blp;
	dma_addr_t bloutp = 0;
	struct scatterlist *sg;
	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));

	if (unlikely(!n))
		return -EINVAL;

	bufl = kzalloc_node(sz, GFP_ATOMIC,
			    dev_to_node(&GET_DEV(inst->accel_dev)));
	if (unlikely(!bufl))
		return -ENOMEM;

	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err;

	for_each_sg(assoc, sg, assoc_n, i) {
		if (!sg->length)
			continue;
		bufl->bufers[bufs].addr = dma_map_single(dev,
							 sg_virt(sg),
							 sg->length,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}
	if (ivlen) {
		bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = ivlen;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}

	for_each_sg(sgl, sg, n, i) {
		int y = sg_nctr + bufs;

		if (!sg->length)
			continue;

		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
						      sg->length,
						      DMA_BIDIRECTIONAL);
		bufl->bufers[y].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
			goto err;
		sg_nctr++;
	}
Esempio n. 3
0
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
				 size_t count, loff_t *offp)
{
	struct perf_ctx *perf = filp->private_data;
	int node, i;
	DECLARE_WAIT_QUEUE_HEAD(wq);

	if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
		return -ENOLINK;

	if (perf->perf_threads == 0)
		return -EINVAL;

	if (!mutex_trylock(&perf->run_mutex))
		return -EBUSY;

	perf_clear_thread_status(perf);

	if (perf->perf_threads > MAX_THREADS) {
		perf->perf_threads = MAX_THREADS;
		pr_info("Reset total threads to: %u\n", MAX_THREADS);
	}

	/* no greater than 1M */
	if (seg_order > MAX_SEG_ORDER) {
		seg_order = MAX_SEG_ORDER;
		pr_info("Fix seg_order to %u\n", seg_order);
	}

	if (run_order < seg_order) {
		run_order = seg_order;
		pr_info("Fix run_order to %u\n", run_order);
	}

	node = dev_to_node(&perf->ntb->pdev->dev);
	atomic_set(&perf->tdone, 0);

	/* launch kernel thread */
	for (i = 0; i < perf->perf_threads; i++) {
		struct pthr_ctx *pctx;

		pctx = &perf->pthr_ctx[i];
		atomic_set(&pctx->dma_sync, 0);
		pctx->perf = perf;
		pctx->wq = &wq;
		pctx->thread =
			kthread_create_on_node(ntb_perf_thread,
					       (void *)pctx,
					       node, "ntb_perf %d", i);
		if (IS_ERR(pctx->thread)) {
			pctx->thread = NULL;
			goto err;
		} else {
			wake_up_process(pctx->thread);
		}
	}

	wait_event_interruptible(wq,
		atomic_read(&perf->tdone) == perf->perf_threads);

	threads_cleanup(perf);
	mutex_unlock(&perf->run_mutex);
	return count;

err:
	threads_cleanup(perf);
	mutex_unlock(&perf->run_mutex);
	return -ENXIO;
}
Esempio n. 4
0
static int ntb_perf_thread(void *data)
{
	struct pthr_ctx *pctx = data;
	struct perf_ctx *perf = pctx->perf;
	struct pci_dev *pdev = perf->ntb->pdev;
	struct perf_mw *mw = &perf->mw;
	char __iomem *dst;
	u64 win_size, buf_size, total;
	void *src;
	int rc, node, i;
	struct dma_chan *dma_chan = NULL;

	pr_debug("kthread %s starting...\n", current->comm);

	node = dev_to_node(&pdev->dev);

	if (use_dma && !pctx->dma_chan) {
		dma_cap_mask_t dma_mask;

		dma_cap_zero(dma_mask);
		dma_cap_set(DMA_MEMCPY, dma_mask);
		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
					       (void *)(unsigned long)node);
		if (!dma_chan) {
			pr_warn("%s: cannot acquire DMA channel, quitting\n",
				current->comm);
			return -ENODEV;
		}
		pctx->dma_chan = dma_chan;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
		if (!pctx->srcs[i]) {
			rc = -ENOMEM;
			goto err;
		}
	}

	win_size = mw->phys_size;
	buf_size = 1ULL << seg_order;
	total = 1ULL << run_order;

	if (buf_size > MAX_TEST_SIZE)
		buf_size = MAX_TEST_SIZE;

	dst = (char __iomem *)mw->vbase;

	atomic_inc(&perf->tsync);
	while (atomic_read(&perf->tsync) != perf->perf_threads)
		schedule();

	src = pctx->srcs[pctx->src_idx];
	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);

	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);

	atomic_dec(&perf->tsync);

	if (rc < 0) {
		pr_err("%s: failed\n", current->comm);
		rc = -ENXIO;
		goto err;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	atomic_inc(&perf->tdone);
	wake_up(pctx->wq);
	rc = 0;
	goto done;

err:
	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	if (dma_chan) {
		dma_release_channel(dma_chan);
		pctx->dma_chan = NULL;
	}

done:
	/* Wait until we are told to stop */
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);

	return rc;
}
Esempio n. 5
0
static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
{
	return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
}
Esempio n. 6
0
/**
 * \brief Setup gather lists
 * @param lio per-network private data
 */
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
	struct octnic_gather *g;
	int i, j;

	lio->glist_lock =
	    kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
	if (!lio->glist_lock)
		return -ENOMEM;

	lio->glist =
	    kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
	if (!lio->glist) {
		kfree(lio->glist_lock);
		lio->glist_lock = NULL;
		return -ENOMEM;
	}

	lio->glist_entry_size =
		ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);

	/* allocate memory to store virtual and dma base address of
	 * per glist consistent memory
	 */
	lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
					GFP_KERNEL);
	lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
				       GFP_KERNEL);

	if (!lio->glists_virt_base || !lio->glists_dma_base) {
		lio_delete_glists(lio);
		return -ENOMEM;
	}

	for (i = 0; i < num_iqs; i++) {
		int numa_node = dev_to_node(&oct->pci_dev->dev);

		spin_lock_init(&lio->glist_lock[i]);

		INIT_LIST_HEAD(&lio->glist[i]);

		lio->glists_virt_base[i] =
			lio_dma_alloc(oct,
				      lio->glist_entry_size * lio->tx_qsize,
				      &lio->glists_dma_base[i]);

		if (!lio->glists_virt_base[i]) {
			lio_delete_glists(lio);
			return -ENOMEM;
		}

		for (j = 0; j < lio->tx_qsize; j++) {
			g = kzalloc_node(sizeof(*g), GFP_KERNEL,
					 numa_node);
			if (!g)
				g = kzalloc(sizeof(*g), GFP_KERNEL);
			if (!g)
				break;

			g->sg = lio->glists_virt_base[i] +
				(j * lio->glist_entry_size);

			g->sg_dma_ptr = lio->glists_dma_base[i] +
					(j * lio->glist_entry_size);

			list_add_tail(&g->list, &lio->glist[i]);
		}

		if (j != lio->tx_qsize) {
			lio_delete_glists(lio);
			return -ENOMEM;
		}
	}

	return 0;
}
Esempio n. 7
0
static int
__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
		struct ablkcipher_request *req,
		struct data_queue *data_vq,
		__u8 op)
{
	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
	struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
	struct virtio_crypto *vcrypto = ctx->vcrypto;
	struct virtio_crypto_op_data_req *req_data;
	int src_nents, dst_nents;
	int err;
	unsigned long flags;
	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
	int i;
	u64 dst_len;
	unsigned int num_out = 0, num_in = 0;
	int sg_total;
	uint8_t *iv;

	src_nents = sg_nents_for_len(req->src, req->nbytes);
	dst_nents = sg_nents(req->dst);

	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
			src_nents, dst_nents);

	/* Why 3?  outhdr + iv + inhdr */
	sg_total = src_nents + dst_nents + 3;
	sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!sgs)
		return -ENOMEM;

	req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!req_data) {
		kfree(sgs);
		return -ENOMEM;
	}

	vc_req->req_data = req_data;
	vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
	/* Head of operation */
	if (op) {
		req_data->header.session_id =
			cpu_to_le64(ctx->enc_sess_info.session_id);
		req_data->header.opcode =
			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
	} else {
		req_data->header.session_id =
			cpu_to_le64(ctx->dec_sess_info.session_id);
	    req_data->header.opcode =
			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
	}
	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
	req_data->u.sym_req.u.cipher.para.src_data_len =
			cpu_to_le32(req->nbytes);

	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
	if (unlikely(dst_len > U32_MAX)) {
		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
		err = -EINVAL;
		goto free;
	}

	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
			req->nbytes, dst_len);

	if (unlikely(req->nbytes + dst_len + ivsize +
		sizeof(vc_req->status) > vcrypto->max_size)) {
		pr_err("virtio_crypto: The length is too big\n");
		err = -EINVAL;
		goto free;
	}

	req_data->u.sym_req.u.cipher.para.dst_data_len =
			cpu_to_le32((uint32_t)dst_len);

	/* Outhdr */
	sg_init_one(&outhdr, req_data, sizeof(*req_data));
	sgs[num_out++] = &outhdr;

	/* IV */

	/*
	 * Avoid to do DMA from the stack, switch to using
	 * dynamically-allocated for the IV
	 */
	iv = kzalloc_node(ivsize, GFP_ATOMIC,
				dev_to_node(&vcrypto->vdev->dev));
	if (!iv) {
		err = -ENOMEM;
		goto free;
	}
	memcpy(iv, req->info, ivsize);
	sg_init_one(&iv_sg, iv, ivsize);
	sgs[num_out++] = &iv_sg;
	vc_req->iv = iv;

	/* Source data */
	for (i = 0; i < src_nents; i++)
		sgs[num_out++] = &req->src[i];

	/* Destination data */
	for (i = 0; i < dst_nents; i++)
		sgs[num_out + num_in++] = &req->dst[i];

	/* Status */
	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
	sgs[num_out + num_in++] = &status_sg;

	vc_req->sgs = sgs;

	spin_lock_irqsave(&data_vq->lock, flags);
	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
				num_in, vc_req, GFP_ATOMIC);
	virtqueue_kick(data_vq->vq);
	spin_unlock_irqrestore(&data_vq->lock, flags);
	if (unlikely(err < 0))
		goto free_iv;

	return 0;

free_iv:
	kzfree(iv);
free:
	kzfree(req_data);
	kfree(sgs);
	return err;
}
Esempio n. 8
0
int kfioc_pci_has_numa_info(void)
{
    struct pci_dev pdev = { };

    return dev_to_node(&pdev.dev);
}
Esempio n. 9
0
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
 * @ref: a live per-cpu reference count
 * @altmap: optional descriptor for allocating the memmap from @res
 *
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
 *    (or devm release event).
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
		struct percpu_ref *ref, struct vmem_altmap *altmap)
{
	int is_ram = region_intersects(res->start, resource_size(res),
			"System RAM");
	resource_size_t key, align_start, align_size, align_end;
	struct dev_pagemap *pgmap;
	struct page_map *page_map;
	unsigned long pfn;
	int error, nid;

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

	if (altmap && !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) {
		dev_err(dev, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
				__func__);
		return ERR_PTR(-ENXIO);
	}

	if (!ref)
		return ERR_PTR(-EINVAL);

	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
	if (!page_map)
		return ERR_PTR(-ENOMEM);
	pgmap = &page_map->pgmap;

	memcpy(&page_map->res, res, sizeof(*res));

	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
	pgmap->ref = ref;
	pgmap->res = &page_map->res;

	mutex_lock(&pgmap_lock);
	error = 0;
	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(resource_size(res), SECTION_SIZE);
	align_end = align_start + align_size - 1;
	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

	nid = dev_to_node(dev);
	if (nid < 0)
		nid = numa_mem_id();

	error = arch_add_memory(nid, align_start, align_size, true);
	if (error)
		goto err_add_memory;

	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

		/* ZONE_DEVICE pages must never appear on a slab lru */
		list_force_poison(&page->lru);
		page->pgmap = pgmap;
	}
	devres_add(dev, page_map);
	return __va(res->start);

 err_add_memory:
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
}
Esempio n. 10
0
File: usb.c Progetto: guribe94/linux
/**
 * usb_alloc_dev - usb device constructor (usbcore-internal)
 * @parent: hub to which device is connected; null to allocate a root hub
 * @bus: bus used to access the device
 * @port1: one-based index of port; ignored for root hubs
 * Context: !in_interrupt()
 *
 * Only hub drivers (including virtual root hub drivers for host
 * controllers) should ever call this.
 *
 * This call may not be used in a non-sleeping context.
 *
 * Return: On success, a pointer to the allocated usb device. %NULL on
 * failure.
 */
struct usb_device *usb_alloc_dev(struct usb_device *parent,
				 struct usb_bus *bus, unsigned port1)
{
	struct usb_device *dev;
	struct usb_hcd *usb_hcd = bus_to_hcd(bus);
	unsigned root_hub = 0;
	unsigned raw_port = port1;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return NULL;

	if (!usb_get_hcd(usb_hcd)) {
		kfree(dev);
		return NULL;
	}
	/* Root hubs aren't true devices, so don't allocate HCD resources */
	if (usb_hcd->driver->alloc_dev && parent &&
		!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
		usb_put_hcd(bus_to_hcd(bus));
		kfree(dev);
		return NULL;
	}

	device_initialize(&dev->dev);
	dev->dev.bus = &usb_bus_type;
	dev->dev.type = &usb_device_type;
	dev->dev.groups = usb_device_groups;
	/*
	 * Fake a dma_mask/offset for the USB device:
	 * We cannot really use the dma-mapping API (dma_alloc_* and
	 * dma_map_*) for USB devices but instead need to use
	 * usb_alloc_coherent and pass data in 'urb's, but some subsystems
	 * manually look into the mask/offset pair to determine whether
	 * they need bounce buffers.
	 * Note: calling dma_set_mask() on a USB device would set the
	 * mask for the entire HCD, so don't do that.
	 */
	dev->dev.dma_mask = bus->sysdev->dma_mask;
	dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
	set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
	dev->state = USB_STATE_ATTACHED;
	dev->lpm_disable_count = 1;
	atomic_set(&dev->urbnum, 0);

	INIT_LIST_HEAD(&dev->ep0.urb_list);
	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
	dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
	/* ep0 maxpacket comes later, from device descriptor */
	usb_enable_endpoint(dev, &dev->ep0, false);
	dev->can_submit = 1;

	/* Save readable and stable topology id, distinguishing devices
	 * by location for diagnostics, tools, driver model, etc.  The
	 * string is a path along hub ports, from the root.  Each device's
	 * dev->devpath will be stable until USB is re-cabled, and hubs
	 * are often labeled with these port numbers.  The name isn't
	 * as stable:  bus->busnum changes easily from modprobe order,
	 * cardbus or pci hotplugging, and so on.
	 */
	if (unlikely(!parent)) {
		dev->devpath[0] = '0';
		dev->route = 0;

		dev->dev.parent = bus->controller;
		device_set_of_node_from_dev(&dev->dev, bus->sysdev);
		dev_set_name(&dev->dev, "usb%d", bus->busnum);
		root_hub = 1;
	} else {
		/* match any labeling on the hubs; it's one-based */
		if (parent->devpath[0] == '0') {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%d", port1);
			/* Root ports are not counted in route string */
			dev->route = 0;
		} else {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%s.%d", parent->devpath, port1);
			/* Route string assumes hubs have less than 16 ports */
			if (port1 < 15)
				dev->route = parent->route +
					(port1 << ((parent->level - 1)*4));
			else
				dev->route = parent->route +
					(15 << ((parent->level - 1)*4));
		}

		dev->dev.parent = &parent->dev;
		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);

		if (!parent->parent) {
			/* device under root hub's port */
			raw_port = usb_hcd_find_raw_port_number(usb_hcd,
				port1);
		}
		dev->dev.of_node = usb_of_get_device_node(parent, raw_port);

		/* hub driver sets up TT records */
	}

	dev->portnum = port1;
	dev->bus = bus;
	dev->parent = parent;
	INIT_LIST_HEAD(&dev->filelist);

#ifdef	CONFIG_PM
	pm_runtime_set_autosuspend_delay(&dev->dev,
			usb_autosuspend_delay * 1000);
	dev->connect_time = jiffies;
	dev->active_duration = -jiffies;
#endif
	if (root_hub)	/* Root hub always ok [and always wired] */
		dev->authorized = 1;
	else {
		dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd);
		dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
	}
	return dev;
}
Esempio n. 11
0
#include <asm/tlbflush.h>
#include <asm/homecache.h>

/*                                */

/*
                                                                 
                  
 */
void *dma_alloc_coherent(struct device *dev,
			 size_t size,
			 dma_addr_t *dma_handle,
			 gfp_t gfp)
{
	u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
	int node = dev_to_node(dev);
	int order = get_order(size);
	struct page *pg;
	dma_addr_t addr;

	gfp |= __GFP_ZERO;

	/*
                                                              
                                                               
                                                              
                                                               
                                  
  */
	if (dma_mask <= DMA_BIT_MASK(32))
		node = 0;
Esempio n. 12
0
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
				 size_t count, loff_t *offp)
{
	struct perf_ctx *perf = filp->private_data;
	int node, i;

	if (!perf->link_is_up)
		return 0;

	if (perf->perf_threads == 0)
		return 0;

	if (atomic_read(&perf->tsync) == 0)
		perf->run = false;

	if (perf->run)
		threads_cleanup(perf);
	else {
		perf->run = true;

		if (perf->perf_threads > MAX_THREADS) {
			perf->perf_threads = MAX_THREADS;
			pr_info("Reset total threads to: %u\n", MAX_THREADS);
		}

		/* no greater than 1M */
		if (seg_order > MAX_SEG_ORDER) {
			seg_order = MAX_SEG_ORDER;
			pr_info("Fix seg_order to %u\n", seg_order);
		}

		if (run_order < seg_order) {
			run_order = seg_order;
			pr_info("Fix run_order to %u\n", run_order);
		}

		node = dev_to_node(&perf->ntb->pdev->dev);
		/* launch kernel thread */
		for (i = 0; i < perf->perf_threads; i++) {
			struct pthr_ctx *pctx;

			pctx = &perf->pthr_ctx[i];
			atomic_set(&pctx->dma_sync, 0);
			pctx->perf = perf;
			pctx->thread =
				kthread_create_on_node(ntb_perf_thread,
						       (void *)pctx,
						       node, "ntb_perf %d", i);
			if (IS_ERR(pctx->thread)) {
				pctx->thread = NULL;
				goto err;
			} else
				wake_up_process(pctx->thread);

			if (perf->run == false)
				return -ENXIO;
		}

	}

	return count;

err:
	threads_cleanup(perf);
	return -ENXIO;
}
Esempio n. 13
0
struct usb_device *usb_alloc_dev(struct usb_device *parent,
				 struct usb_bus *bus, unsigned port1)
{
	struct usb_device *dev;
	struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
	unsigned root_hub = 0;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return NULL;

	if (!usb_get_hcd(bus_to_hcd(bus))) {
		kfree(dev);
		return NULL;
	}
	
	if (usb_hcd->driver->alloc_dev && parent &&
		!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
		usb_put_hcd(bus_to_hcd(bus));
		kfree(dev);
		return NULL;
	}

	device_initialize(&dev->dev);
	dev->dev.bus = &usb_bus_type;
	dev->dev.type = &usb_device_type;
	dev->dev.groups = usb_device_groups;
	dev->dev.dma_mask = bus->controller->dma_mask;
	set_dev_node(&dev->dev, dev_to_node(bus->controller));
	dev->state = USB_STATE_ATTACHED;
	atomic_set(&dev->urbnum, 0);

	INIT_LIST_HEAD(&dev->ep0.urb_list);
	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
	dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
	
	usb_enable_endpoint(dev, &dev->ep0, false);
	dev->can_submit = 1;

	
	if (unlikely(!parent)) {
		dev->devpath[0] = '0';
		dev->route = 0;

		dev->dev.parent = bus->controller;
		dev_set_name(&dev->dev, "usb%d", bus->busnum);
		root_hub = 1;
	} else {
		
		if (parent->devpath[0] == '0') {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%d", port1);
			
			dev->route = 0;
		} else {
			snprintf(dev->devpath, sizeof dev->devpath,
				"%s.%d", parent->devpath, port1);
			
			if (port1 < 15)
				dev->route = parent->route +
					(port1 << ((parent->level - 1)*4));
			else
				dev->route = parent->route +
					(15 << ((parent->level - 1)*4));
		}

		dev->dev.parent = &parent->dev;
		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);

		
	}

	dev->portnum = port1;
	dev->bus = bus;
	dev->parent = parent;
	INIT_LIST_HEAD(&dev->filelist);

#ifdef	CONFIG_PM
	mutex_init(&dev->pm_mutex);
	INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
	INIT_WORK(&dev->autoresume, usb_autoresume_work);
	dev->autosuspend_delay = usb_autosuspend_delay * HZ;
	dev->connect_time = jiffies;
	dev->active_duration = -jiffies;
#endif
	if (root_hub)	
		dev->authorized = 1;
	else {
		dev->authorized = usb_hcd->authorized_default;
		dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
	}
	return dev;
}
Esempio n. 14
0
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct adf_accel_dev *accel_dev;
	struct adf_accel_pci *accel_pci_dev;
	struct adf_hw_device_data *hw_data;
	char name[ADF_DEVICE_NAME_LENGTH];
	unsigned int i, bar_nr;
	int ret;

	switch (ent->device) {
	case ADF_DH895XCC_PCI_DEVICE_ID:
		break;
	default:
		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
		return -ENODEV;
	}

	if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
		/* If the accelerator is connected to a node with no memory
		 * there is no point in using the accelerator since the remote
		 * memory transaction will be very slow. */
		dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
		return -EINVAL;
	}

	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
				 dev_to_node(&pdev->dev));
	if (!accel_dev)
		return -ENOMEM;

	INIT_LIST_HEAD(&accel_dev->crypto_list);

	/* Add accel device to accel table.
	 * This should be called before adf_cleanup_accel is called */
	if (adf_devmgr_add_dev(accel_dev)) {
		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
		kfree(accel_dev);
		return -EFAULT;
	}

	accel_dev->owner = THIS_MODULE;
	/* Allocate and configure device configuration structure */
	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
			       dev_to_node(&pdev->dev));
	if (!hw_data) {
		ret = -ENOMEM;
		goto out_err;
	}

	accel_dev->hw_device = hw_data;
	switch (ent->device) {
	case ADF_DH895XCC_PCI_DEVICE_ID:
		adf_init_hw_data_dh895xcc(accel_dev->hw_device);
		break;
	default:
		return -ENODEV;
	}
	accel_pci_dev = &accel_dev->accel_pci_dev;
	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
	pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
			      &hw_data->fuses);

	/* Get Accelerators and Accelerators Engines masks */
	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
	accel_pci_dev->sku = hw_data->get_sku(hw_data);
	accel_pci_dev->pci_dev = pdev;
	/* If the device has no acceleration engines then ignore it. */
	if (!hw_data->accel_mask || !hw_data->ae_mask ||
	    ((~hw_data->ae_mask) & 0x01)) {
		dev_err(&pdev->dev, "No acceleration units found");
		ret = -EFAULT;
		goto out_err;
	}

	/* Create dev top level debugfs entry */
	snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
		 hw_data->dev_class->name, hw_data->instance_id);
	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
	if (!accel_dev->debugfs_dir) {
		dev_err(&pdev->dev, "Could not create debugfs dir\n");
		ret = -EINVAL;
		goto out_err;
	}

	/* Create device configuration table */
	ret = adf_cfg_dev_add(accel_dev);
	if (ret)
		goto out_err;

	pcie_set_readrq(pdev, 1024);

	/* enable PCI device */
	if (pci_enable_device(pdev)) {
		ret = -EFAULT;
		goto out_err;
	}

	/* set dma identifier */
	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
			dev_err(&pdev->dev, "No usable DMA configuration\n");
			ret = -EFAULT;
			goto out_err;
		} else {
			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		}

	} else {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
	}

	if (pci_request_regions(pdev, adf_driver_name)) {
		ret = -EFAULT;
		goto out_err;
	}

	/* Read accelerator capabilities mask */
	pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
			      &hw_data->accel_capabilities_mask);

	/* Find and map all the device's BARS */
	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];

		bar_nr = i * 2;
		bar->base_addr = pci_resource_start(pdev, bar_nr);
		if (!bar->base_addr)
			break;
		bar->size = pci_resource_len(pdev, bar_nr);
		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
		if (!bar->virt_addr) {
			dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
			ret = -EFAULT;
			goto out_err;
		}
	}
	pci_set_master(pdev);

	if (adf_enable_aer(accel_dev, &adf_driver)) {
		dev_err(&pdev->dev, "Failed to enable aer\n");
		ret = -EFAULT;
		goto out_err;
	}

	if (pci_save_state(pdev)) {
		dev_err(&pdev->dev, "Failed to save pci state\n");
		ret = -ENOMEM;
		goto out_err;
	}

	ret = adf_dev_configure(accel_dev);
	if (ret)
		goto out_err;

	ret = adf_dev_init(accel_dev);
	if (ret)
		goto out_err;

	ret = adf_dev_start(accel_dev);
	if (ret) {
		adf_dev_stop(accel_dev);
		goto out_err;
	}

	return 0;
out_err:
	adf_cleanup_accel(accel_dev);
	return ret;
}
Esempio n. 15
0
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
	struct device *dev = &p->pdev->dev;
	struct nd_mapping_desc mapping;
	struct nd_region_desc ndr_desc;
	unsigned long dimm_flags;

	p->bus_desc.ndctl = papr_scm_ndctl;
	p->bus_desc.module = THIS_MODULE;
	p->bus_desc.of_node = p->pdev->dev.of_node;
	p->bus_desc.attr_groups = bus_attr_groups;
	p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);

	if (!p->bus_desc.provider_name)
		return -ENOMEM;

	p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
	if (!p->bus) {
		dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
		return -ENXIO;
	}

	dimm_flags = 0;
	set_bit(NDD_ALIASING, &dimm_flags);

	p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
				dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
	if (!p->nvdimm) {
		dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
		goto err;
	}

	/* now add the region */

	memset(&mapping, 0, sizeof(mapping));
	mapping.nvdimm = p->nvdimm;
	mapping.start = 0;
	mapping.size = p->blocks * p->block_size; // XXX: potential overflow?

	memset(&ndr_desc, 0, sizeof(ndr_desc));
	ndr_desc.attr_groups = region_attr_groups;
	ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
	ndr_desc.res = &p->res;
	ndr_desc.of_node = p->dn;
	ndr_desc.provider_data = p;
	ndr_desc.mapping = &mapping;
	ndr_desc.num_mappings = 1;
	ndr_desc.nd_set = &p->nd_set;
	set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);

	p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
	if (!p->region) {
		dev_err(dev, "Error registering region %pR from %pOF\n",
				ndr_desc.res, p->dn);
		goto err;
	}

	return 0;

err:	nvdimm_bus_unregister(p->bus);
	kfree(p->bus_desc.provider_name);
	return -ENXIO;
}
Esempio n. 16
0
/**
 * usb_alloc_dev - usb device constructor (usbcore-internal)
 * @parent: hub to which device is connected; null to allocate a root hub
 * @bus: bus used to access the device
 * @port1: one-based index of port; ignored for root hubs
 * Context: !in_interrupt()
 *
 * Only hub drivers (including virtual root hub drivers for host
 * controllers) should ever call this.
 *
 * This call may not be used in a non-sleeping context.
 */
struct usb_device *usb_alloc_dev(struct usb_device *parent,
				 struct usb_bus *bus, unsigned port1)
{
	struct usb_device *dev;
	struct usb_hcd *usb_hcd = container_of(bus, struct usb_hcd, self);
	unsigned root_hub = 0;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return NULL;

	if (!usb_get_hcd(bus_to_hcd(bus))) {
		kfree(dev);
		return NULL;
	}

	device_initialize(&dev->dev);
	dev->dev.bus = &usb_bus_type;
	dev->dev.type = &usb_device_type;
	dev->dev.groups = usb_device_groups;
	dev->dev.dma_mask = bus->controller->dma_mask;
	set_dev_node(&dev->dev, dev_to_node(bus->controller));
	dev->state = USB_STATE_ATTACHED;
	atomic_set(&dev->urbnum, 0);

	INIT_LIST_HEAD(&dev->ep0.urb_list);
	dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
	dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
	/* ep0 maxpacket comes later, from device descriptor */
	usb_enable_endpoint(dev, &dev->ep0, true);
	dev->can_submit = 1;

	/* Save readable and stable topology id, distinguishing devices
	 * by location for diagnostics, tools, driver model, etc.  The
	 * string is a path along hub ports, from the root.  Each device's
	 * dev->devpath will be stable until USB is re-cabled, and hubs
	 * are often labeled with these port numbers.  The name isn't
	 * as stable:  bus->busnum changes easily from modprobe order,
	 * cardbus or pci hotplugging, and so on.
	 */
	if (unlikely(!parent)) {
		dev->devpath[0] = '0';

		dev->dev.parent = bus->controller;
		dev_set_name(&dev->dev, "usb%d", bus->busnum);
		root_hub = 1;
	} else {
		/* match any labeling on the hubs; it's one-based */
		if (parent->devpath[0] == '0')
			snprintf(dev->devpath, sizeof dev->devpath,
				"%d", port1);
		else
			snprintf(dev->devpath, sizeof dev->devpath,
				"%s.%d", parent->devpath, port1);

		dev->dev.parent = &parent->dev;
		dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);

		/* hub driver sets up TT records */
	}

	dev->portnum = port1;
	dev->bus = bus;
	dev->parent = parent;
	INIT_LIST_HEAD(&dev->filelist);

#ifdef	CONFIG_PM
	mutex_init(&dev->pm_mutex);
	INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
	dev->autosuspend_delay = usb_autosuspend_delay * HZ;
	dev->connect_time = jiffies;
	dev->active_duration = -jiffies;
#endif
	if (root_hub)	/* Root hub always ok [and always wired] */
		dev->authorized = 1;
	else {
		dev->authorized = usb_hcd->authorized_default;
		dev->wusb = usb_bus_is_wusb(bus)? 1 : 0;
	}
	return dev;
}
Esempio n. 17
0
/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @res: "host memory" address range
 * @ref: a live per-cpu reference count
 * @altmap: optional descriptor for allocating the memmap from @res
 *
 * Notes:
 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
 *    (or devm release event).
 *
 * 2/ @res is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
 */
void *devm_memremap_pages(struct device *dev, struct resource *res,
		struct percpu_ref *ref, struct vmem_altmap *altmap)
{
	resource_size_t key, align_start, align_size, align_end;
	pgprot_t pgprot = PAGE_KERNEL;
	struct dev_pagemap *pgmap;
	struct page_map *page_map;
	int error, nid, is_ram;
	unsigned long pfn;

	align_start = res->start & ~(SECTION_SIZE - 1);
	align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
		- align_start;
	is_ram = region_intersects(align_start, align_size,
		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);

	if (is_ram == REGION_MIXED) {
		WARN_ONCE(1, "%s attempted on mixed region %pr\n",
				__func__, res);
		return ERR_PTR(-ENXIO);
	}

	if (is_ram == REGION_INTERSECTS)
		return __va(res->start);

	if (!ref)
		return ERR_PTR(-EINVAL);

	page_map = devres_alloc_node(devm_memremap_pages_release,
			sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
	if (!page_map)
		return ERR_PTR(-ENOMEM);
	pgmap = &page_map->pgmap;

	memcpy(&page_map->res, res, sizeof(*res));

	pgmap->dev = dev;
	if (altmap) {
		memcpy(&page_map->altmap, altmap, sizeof(*altmap));
		pgmap->altmap = &page_map->altmap;
	}
	pgmap->ref = ref;
	pgmap->res = &page_map->res;

	mutex_lock(&pgmap_lock);
	error = 0;
	align_end = align_start + align_size - 1;
	for (key = align_start; key <= align_end; key += SECTION_SIZE) {
		struct dev_pagemap *dup;

		rcu_read_lock();
		dup = find_dev_pagemap(key);
		rcu_read_unlock();
		if (dup) {
			dev_err(dev, "%s: %pr collides with mapping for %s\n",
					__func__, res, dev_name(dup->dev));
			error = -EBUSY;
			break;
		}
		error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
				page_map);
		if (error) {
			dev_err(dev, "%s: failed: %d\n", __func__, error);
			break;
		}
	}
	mutex_unlock(&pgmap_lock);
	if (error)
		goto err_radix;

	nid = dev_to_node(dev);
	if (nid < 0)
		nid = numa_mem_id();

	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
			align_size);
	if (error)
		goto err_pfn_remap;

	mem_hotplug_begin();
	error = arch_add_memory(nid, align_start, align_size, true);
	mem_hotplug_done();
	if (error)
		goto err_add_memory;

	for_each_device_pfn(pfn, page_map) {
		struct page *page = pfn_to_page(pfn);

		/*
		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
		 * freed or placed on a driver-private list.  Seed the
		 * storage with LIST_POISON* values.
		 */
		list_del(&page->lru);
		page->pgmap = pgmap;
	}
	devres_add(dev, page_map);
	return __va(res->start);

 err_add_memory:
	untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
 err_pfn_remap:
 err_radix:
	pgmap_radix_release(res);
	devres_free(page_map);
	return ERR_PTR(error);
}
Esempio n. 18
0
static int ndev_init_isr(struct amd_ntb_dev *ndev,
			 int msix_min, int msix_max)
{
	struct pci_dev *pdev;
	int rc, i, msix_count, node;

	pdev = ndev->ntb.pdev;

	node = dev_to_node(&pdev->dev);

	ndev->db_mask = ndev->db_valid_mask;

	/* Try to set up msix irq */
	ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
				 GFP_KERNEL, node);
	if (!ndev->vec)
		goto err_msix_vec_alloc;

	ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
				  GFP_KERNEL, node);
	if (!ndev->msix)
		goto err_msix_alloc;

	for (i = 0; i < msix_max; ++i)
		ndev->msix[i].entry = i;

	msix_count = pci_enable_msix_range(pdev, ndev->msix,
					   msix_min, msix_max);
	if (msix_count < 0)
		goto err_msix_enable;

	/* NOTE: Disable MSIX if msix count is less than 16 because of
	 * hardware limitation.
	 */
	if (msix_count < msix_min) {
		pci_disable_msix(pdev);
		goto err_msix_enable;
	}

	for (i = 0; i < msix_count; ++i) {
		ndev->vec[i].ndev = ndev;
		ndev->vec[i].num = i;
		rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
				 "ndev_vec_isr", &ndev->vec[i]);
		if (rc)
			goto err_msix_request;
	}

	dev_dbg(&pdev->dev, "Using msix interrupts\n");
	ndev->db_count = msix_min;
	ndev->msix_vec_count = msix_max;
	return 0;

err_msix_request:
	while (i-- > 0)
		free_irq(ndev->msix[i].vector, &ndev->vec[i]);
	pci_disable_msix(pdev);
err_msix_enable:
	kfree(ndev->msix);
err_msix_alloc:
	kfree(ndev->vec);
err_msix_vec_alloc:
	ndev->msix = NULL;
	ndev->vec = NULL;

	/* Try to set up msi irq */
	rc = pci_enable_msi(pdev);
	if (rc)
		goto err_msi_enable;

	rc = request_irq(pdev->irq, ndev_irq_isr, 0,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_msi_request;

	dev_dbg(&pdev->dev, "Using msi interrupts\n");
	ndev->db_count = 1;
	ndev->msix_vec_count = 1;
	return 0;

err_msi_request:
	pci_disable_msi(pdev);
err_msi_enable:

	/* Try to set up intx irq */
	pci_intx(pdev, 1);

	rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
			 "ndev_irq_isr", ndev);
	if (rc)
		goto err_intx_request;

	dev_dbg(&pdev->dev, "Using intx interrupts\n");
	ndev->db_count = 1;
	ndev->msix_vec_count = 1;
	return 0;

err_intx_request:
	return rc;
}