示例#1
0
文件: pb173.c 项目: jkasztur/pb173
static int my_init(void)
{
	mm_segment_t fs;

	pr_info("Adding module.\n");
	task_mapped = 0;
	temp1 = vmalloc_user(PAGE_SIZE);
	temp2 = vmalloc_user(PAGE_SIZE);
	temp3 = (void *)__get_free_page(GFP_KERNEL);
	temp4 = (void *)__get_free_page(GFP_KERNEL);
	strcpy(temp1, "aaa");
	strcpy(temp2, "bbb");
	strcpy(temp3, "ccc");
	strcpy(temp4, "ddd");
	pr_info("temp1: %p\n", temp1);
	pr_info("temp2: %p\n", temp2);
	pr_info("temp3: %p\n", temp3);
	pr_info("temp4: %p\n", temp4);

	buffer = vmalloc(10000);
	memset(buffer, 0, 10000);
	f = filp_open("/home/vyvoj/repos/pb173/09/task.bin", O_RDONLY, 0);
	if (f == NULL) {
		pr_info("filp_open error!\n");
		return -EIO;
	}
	fs = get_fs();
	set_fs(get_ds());
	f->f_op->read(f, buffer, 10000, &f->f_pos);
	set_fs(fs);
	pr_info("Loading task.bin to buffer finished\n");
	filp_close(f, NULL);
	misc_register(&my_device);
	return 0;
}
示例#2
0
static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size)
{
	struct vb2_vmalloc_buf *buf;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->size = size;
	buf->vaddr = vmalloc_user(buf->size);
	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_vmalloc_put;
	buf->handler.arg = buf;

	if (!buf->vaddr) {
		printk(KERN_ERR "vmalloc of size %ld failed\n", buf->size);
		kfree(buf);
		return NULL;
	}

	atomic_inc(&buf->refcount);
	printk(KERN_DEBUG "Allocated vmalloc buffer of size %ld at vaddr=%p\n",
			buf->size, buf->vaddr);

	return buf;
}
static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
			       enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
	struct vb2_vmalloc_buf *buf;

	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
	if (!buf)
		return NULL;

	buf->size = size;
	buf->vaddr = vmalloc_user(buf->size);
	buf->dma_dir = dma_dir;
	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_vmalloc_put;
	buf->handler.arg = buf;

	if (!buf->vaddr) {
		pr_debug("vmalloc of size %ld failed\n", buf->size);
		kfree(buf);
		return NULL;
	}

	atomic_inc(&buf->refcount);
	return buf;
}
示例#4
0
int pfq_shared_queue_alloc(struct pfq_sock *so, size_t queue_mem)
{
        /* calculate the size of the buffer */

	size_t tm = PAGE_ALIGN(queue_mem);
        size_t tot_mem;

	/* align bufflen to page size */

	size_t num_pages = tm / PAGE_SIZE; void *addr;

	num_pages += (num_pages + (PAGE_SIZE-1)) & (PAGE_SIZE-1);
	tot_mem = num_pages*PAGE_SIZE;

	/* Memory is already zeroed */

        addr = vmalloc_user(tot_mem);
	if (addr == NULL)
	{
		printk(KERN_WARNING "[PFQ|%d] pfq_queue_alloc: out of memory (vmalloc %zu bytes)!", so->id, tot_mem);
		return -ENOMEM;
	}

        so->mem_addr = addr;
        so->mem_size = tot_mem;

	pr_devel("[PFQ|%d] pfq_queue_alloc: caplen:%zu maxlen:%zu memory:%zu bytes.\n", so->id, so->rx_opt.caplen, so->tx_opt.maxlen, tot_mem);
	return 0;
}
示例#5
0
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
	struct ring_buffer *rb;
	unsigned long size;
	void *all_buf;

	size = sizeof(struct ring_buffer);
	size += sizeof(void *);

	rb = kzalloc(size, GFP_KERNEL);
	if (!rb)
		goto fail;

	INIT_WORK(&rb->work, rb_free_work);

	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
	if (!all_buf)
		goto fail_all_buf;

	rb->user_page = all_buf;
	rb->data_pages[0] = all_buf + PAGE_SIZE;
	rb->page_order = ilog2(nr_pages);
	rb->nr_pages = 1;

	ring_buffer_init(rb, watermark, flags);

	return rb;

fail_all_buf:
	kfree(rb);

fail:
	return NULL;
}
static int ion_mm_heap_allocate(struct ion_heap *heap,
                                struct ion_buffer *buffer,
                                unsigned long size, unsigned long align,
                                unsigned long flags)
{
    ion_mm_buffer_info* pBufferInfo = NULL;
    int ret;
    unsigned int addr;
    struct sg_table *table;
    struct scatterlist *sg;
    void* pVA;
    ION_FUNC_ENTER;
    pVA = vmalloc_user(size);
    buffer->priv_virt = NULL;
    if (IS_ERR_OR_NULL(pVA))
    {
        printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    pBufferInfo = (ion_mm_buffer_info*) kzalloc(sizeof(ion_mm_buffer_info), GFP_KERNEL);
    if (IS_ERR_OR_NULL(pBufferInfo))
    {
        vfree(pVA);
        printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    if (!table)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
    if (ret)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        kfree(table);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    sg = table->sgl;
    for (addr=(unsigned int)pVA; addr < (unsigned int) pVA + size; addr += PAGE_SIZE)
    {
        struct page *page = vmalloc_to_page((void*)addr);
        sg_set_page(sg, page, PAGE_SIZE, 0);
        sg = sg_next(sg);
    }
    buffer->sg_table = table;

    pBufferInfo->pVA = pVA;
    pBufferInfo->eModuleID = -1;
    buffer->priv_virt = pBufferInfo;
    ION_FUNC_LEAVE;
    return 0;
}
示例#7
0
文件: kcov.c 项目: plaes/linux
static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
{
	int res = 0;
	void *area;
	struct kcov *kcov = vma->vm_file->private_data;
	unsigned long size, off;
	struct page *page;

	area = vmalloc_user(vma->vm_end - vma->vm_start);
	if (!area)
		return -ENOMEM;

	spin_lock(&kcov->lock);
	size = kcov->size * sizeof(unsigned long);
	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
	    vma->vm_end - vma->vm_start != size) {
		res = -EINVAL;
		goto exit;
	}
	if (!kcov->area) {
		kcov->area = area;
		vma->vm_flags |= VM_DONTEXPAND;
		spin_unlock(&kcov->lock);
		for (off = 0; off < size; off += PAGE_SIZE) {
			page = vmalloc_to_page(kcov->area + off);
			if (vm_insert_page(vma, vma->vm_start + off, page))
				WARN_ONCE(1, "vm_insert_page() failed");
		}
		return 0;
	}
exit:
	spin_unlock(&kcov->lock);
	vfree(area);
	return res;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
				  struct videobuf_buffer *buf,
				  struct vm_area_struct *vma)
{
	struct videobuf_vmalloc_memory *mem;
	struct videobuf_mapping *map;
	int retval, pages;

	dprintk(1, "%s\n", __func__);

	/* create mapping + update buffer list */
	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
	if (NULL == map)
		return -ENOMEM;

	buf->map = map;
	map->start = vma->vm_start;
	map->end   = vma->vm_end;
	map->q     = q;

	buf->baddr = vma->vm_start;

	mem = buf->priv;
	BUG_ON(!mem);
	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);

	pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
	mem->vmalloc = vmalloc_user(pages);
	if (!mem->vmalloc) {
		printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
		goto error;
	}
	dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vmalloc, pages);

	/* Try to remap memory */
	retval = remap_vmalloc_range(vma, mem->vmalloc, 0);
	if (retval < 0) {
		printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
		vfree(mem->vmalloc);
		goto error;
	}

	vma->vm_ops          = &videobuf_vm_ops;
	vma->vm_flags       |= VM_DONTEXPAND | VM_RESERVED;
	vma->vm_private_data = map;

	dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
		map, q, vma->vm_start, vma->vm_end,
		(long int)buf->bsize,
		vma->vm_pgoff, buf->i);

	videobuf_vm_open(vma);

	return 0;

error:
	mem = NULL;
	kfree(map);
	return -ENOMEM;
}
示例#9
0
文件: nommu.c 项目: rochecr/linux
/**
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 *	@size:		allocation size
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
 *
 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 * remap_vmalloc_range() are permissible.
 */
void *vmalloc_32_user(unsigned long size)
{
	/*
	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
	 * but for now this can simply use vmalloc_user() directly.
	 */
	return vmalloc_user(size);
}
示例#10
0
static int __videobuf_iolock(struct videobuf_queue *q,
			     struct videobuf_buffer *vb,
			     struct v4l2_framebuffer *fbuf)
{
	struct videobuf_vmalloc_memory *mem = vb->priv;
	int pages;

	BUG_ON(!mem);

	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);

	switch (vb->memory) {
	case V4L2_MEMORY_MMAP:
		dprintk(1, "%s memory method MMAP\n", __func__);

		/* All handling should be done by __videobuf_mmap_mapper() */
		if (!mem->vaddr) {
			printk(KERN_ERR "memory is not alloced/mmapped.\n");
			return -EINVAL;
		}
		break;
	case V4L2_MEMORY_USERPTR:
		pages = PAGE_ALIGN(vb->size);

		dprintk(1, "%s memory method USERPTR\n", __func__);

		if (vb->baddr) {
			printk(KERN_ERR "USERPTR is currently not supported\n");
			return -EINVAL;
		}

		/* The only USERPTR currently supported is the one needed for
		 * read() method.
		 */

		mem->vaddr = vmalloc_user(pages);
		if (!mem->vaddr) {
			printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
			return -ENOMEM;
		}
		dprintk(1, "vmalloc is at addr %p (%d pages)\n",
			mem->vaddr, pages);


		break;
	case V4L2_MEMORY_OVERLAY:
	default:
		dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);

		/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
		printk(KERN_ERR "Memory method currently unsupported.\n");
		return -EINVAL;
	}

	return 0;
}
示例#11
0
static int ion_system_heap_allocate(struct ion_heap *heap,
				     struct ion_buffer *buffer,
				     unsigned long size, unsigned long align,
				     unsigned long flags)
{
	buffer->priv_virt = vmalloc_user(size);
	if (!buffer->priv_virt)
		return -ENOMEM;
	return 0;
}
示例#12
0
static void *
repl_vmalloc_user(unsigned long size)
{
    void *ret_val;
    ret_val = vmalloc_user(size);

    if (ret_val != NULL)
        klc_add_alloc(ret_val, size, stack_depth);

    return ret_val;
}
示例#13
0
struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
				 int *num_elem,
				 unsigned int elem_size)
{
	struct rxe_queue *q;
	size_t buf_size;
	unsigned int num_slots;

	/* num_elem == 0 is allowed, but uninteresting */
	if (*num_elem < 0)
		goto err1;

	q = kmalloc(sizeof(*q), GFP_KERNEL);
	if (!q)
		goto err1;

	q->rxe = rxe;

	/* used in resize, only need to copy used part of queue */
	q->elem_size = elem_size;

	/* pad element up to at least a cacheline and always a power of 2 */
	if (elem_size < cache_line_size())
		elem_size = cache_line_size();
	elem_size = roundup_pow_of_two(elem_size);

	q->log2_elem_size = order_base_2(elem_size);

	num_slots = *num_elem + 1;
	num_slots = roundup_pow_of_two(num_slots);
	q->index_mask = num_slots - 1;

	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;

	q->buf = vmalloc_user(buf_size);
	if (!q->buf)
		goto err2;

	q->buf->log2_elem_size = q->log2_elem_size;
	q->buf->index_mask = q->index_mask;

	q->buf_size = buf_size;

	*num_elem = num_slots - 1;
	return q;

err2:
	kfree(q);
err1:
	return NULL;
}
示例#14
0
文件: drm_ttm.c 项目: jobi/drm-psb
static void ttm_alloc_pages(struct drm_ttm *ttm)
{
	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
	ttm->pages = NULL;

	if (size <= PAGE_SIZE)
		ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);

	if (!ttm->pages) {
		ttm->pages = vmalloc_user(size);
		if (ttm->pages)
			ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
	}
}
示例#15
0
/**
 * Allocates storage for pointers to the pages that back the ttm.
 *
 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
 */
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
	ttm->pages = NULL;

	if (size <= PAGE_SIZE)
		ttm->pages = kzalloc(size, GFP_KERNEL);

	if (!ttm->pages) {
		ttm->pages = vmalloc_user(size);
		if (ttm->pages)
			ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
	}
}
示例#16
0
static int __init mapdrv_init(void)
{
	int i, result, err;
	dev_t dev = 0;
	unsigned long addr = 0;
	
	md = kmalloc(sizeof(struct mapdrv), GFP_KERNEL);
	if (!md)
		goto fail1;
	result = alloc_chrdev_region(&dev, 0, 1, "mapdrv0");
	major = MAJOR(dev);
	if (result < 0) {
		printk(KERN_WARNING "mapdrv: can't get major %d\n", major);
		goto fail2;
	}
	cdev_init(&md->mapdev, &mapdrv_fops);
	md->mapdev.owner = THIS_MODULE;
	md->mapdev.ops = &mapdrv_fops;
	err = cdev_add (&md->mapdev, dev, 1);
	if (err) 
	{
		printk(KERN_NOTICE "Error %d adding mapdrv", err);
		goto fail3;
	}
	atomic_set(&md->usage, 0);
	/* get a memory area that is only virtual contigous. */
	vmalloc_area = vmalloc_user(MAPLEN);
	if (!vmalloc_area)
		goto fail4;
	/* set a hello message to kernel space for read by user */
	addr = (unsigned long)vmalloc_area;
	for (i=0; i<10; i++)
	{
		sprintf((char *)addr, "hello world from kernel space %d!", i);
		addr += PAGE_SIZE;
	}
	printk("vmalloc_area at 0x%p (phys 0x%lx)\n", vmalloc_area, page_to_pfn(vmalloc_to_page(vmalloc_area)) << PAGE_SHIFT);
	return 0;
fail4:
	cdev_del(&md->mapdev);	
fail3:
	unregister_chrdev_region(dev, 1);
fail2:
	kfree(md);
fail1:
	return -1;
}
示例#17
0
/**
 * qib_create_cq - create a completion queue
 * @ibdev: the device this completion queue is attached to
 * @entries: the minimum size of the completion queue
 * @context: unused by the QLogic_IB driver
 * @udata: user data for libibverbs.so
 *
 * Returns a pointer to the completion queue or negative errno values
 * for failure.
 *
 * Called by ib_create_cq() in the generic verbs code.
 */
struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
			    int comp_vector, struct ib_ucontext *context,
			    struct ib_udata *udata)
{
	struct qib_ibdev *dev = to_idev(ibdev);
	struct qib_cq *cq;
	struct qib_cq_wc *wc;
	struct ib_cq *ret;
	u32 sz;

	if (entries < 1 || entries > ib_qib_max_cqes) {
		ret = ERR_PTR(-EINVAL);
		goto done;
	}

	/* Allocate the completion queue structure. */
	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq) {
		ret = ERR_PTR(-ENOMEM);
		goto done;
	}

	/*
	 * Allocate the completion queue entries and head/tail pointers.
	 * This is allocated separately so that it can be resized and
	 * also mapped into user space.
	 * We need to use vmalloc() in order to support mmap and large
	 * numbers of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
	else
		sz += sizeof(struct ib_wc) * (entries + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_cq;
	}

	/*
	 * Return the address of the WC as the offset to mmap.
	 * See qib_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;

		cq->ip = qib_create_mmap_info(dev, sz, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	} else
		cq->ip = NULL;

	spin_lock(&dev->n_cqs_lock);
	if (dev->n_cqs_allocated == ib_qib_max_cqs) {
		spin_unlock(&dev->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_cqs_allocated++;
	spin_unlock(&dev->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
	 * an error.
	 */
	cq->ibcq.cqe = entries;
	cq->notify = IB_CQ_NONE;
	cq->triggered = 0;
	spin_lock_init(&cq->lock);
	INIT_WORK(&cq->comptask, send_complete);
	wc->head = 0;
	wc->tail = 0;
	cq->queue = wc;

	ret = &cq->ibcq;

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);
bail_cq:
	kfree(cq);
done:
	return ret;
}
/**
 * qib_modify_srq - modify a shared receive queue
 * @ibsrq: the SRQ to modify
 * @attr: the new attributes of the SRQ
 * @attr_mask: indicates which attributes to modify
 * @udata: user data for libibverbs.so
 */
int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		   enum ib_srq_attr_mask attr_mask,
		   struct ib_udata *udata)
{
	struct qib_srq *srq = to_isrq(ibsrq);
	struct qib_rwq *wq;
	int ret = 0;

	if (attr_mask & IB_SRQ_MAX_WR) {
		struct qib_rwq *owq;
		struct qib_rwqe *p;
		u32 sz, size, n, head, tail;

		/* Check that the requested sizes are below the limits. */
		if ((attr->max_wr > ib_qib_max_srq_wrs) ||
		    ((attr_mask & IB_SRQ_LIMIT) ?
		     attr->srq_limit : srq->limit) > attr->max_wr) {
			ret = -EINVAL;
			goto bail;
		}

		sz = sizeof(struct qib_rwqe) +
			srq->rq.max_sge * sizeof(struct ib_sge);
		size = attr->max_wr + 1;
		wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
		if (!wq) {
			ret = -ENOMEM;
			goto bail;
		}

		/* Check that we can write the offset to mmap. */
		if (udata && udata->inlen >= sizeof(__u64)) {
			__u64 offset_addr;
			__u64 offset = 0;

			ret = ib_copy_from_udata(&offset_addr, udata,
						 sizeof(offset_addr));
			if (ret)
				goto bail_free;
			udata->outbuf =
				(void __user *) (unsigned long) offset_addr;
			ret = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (ret)
				goto bail_free;
		}

		spin_lock_irq(&srq->rq.lock);
		/*
		 * validate head and tail pointer values and compute
		 * the number of remaining WQEs.
		 */
		owq = srq->rq.wq;
		head = owq->head;
		tail = owq->tail;
		if (head >= srq->rq.size || tail >= srq->rq.size) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = head;
		if (n < tail)
			n += srq->rq.size - tail;
		else
			n -= tail;
		if (size <= n) {
			ret = -EINVAL;
			goto bail_unlock;
		}
		n = 0;
		p = wq->wq;
		while (tail != head) {
			struct qib_rwqe *wqe;
			int i;

			wqe = get_rwqe_ptr(&srq->rq, tail);
			p->wr_id = wqe->wr_id;
			p->num_sge = wqe->num_sge;
			for (i = 0; i < wqe->num_sge; i++)
				p->sg_list[i] = wqe->sg_list[i];
			n++;
			p = (struct qib_rwqe *)((char *) p + sz);
			if (++tail >= srq->rq.size)
				tail = 0;
		}
		srq->rq.wq = wq;
		srq->rq.size = size;
		wq->head = n;
		wq->tail = 0;
		if (attr_mask & IB_SRQ_LIMIT)
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);

		vfree(owq);

		if (srq->ip) {
			struct qib_mmap_info *ip = srq->ip;
			struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
			u32 s = sizeof(struct qib_rwq) + size * sz;

			qib_update_mmap_info(dev, ip, s, wq);

			/*
			 * Return the offset to mmap.
			 * See qib_mmap() for details.
			 */
			if (udata && udata->inlen >= sizeof(__u64)) {
				ret = ib_copy_to_udata(udata, &ip->offset,
						       sizeof(ip->offset));
				if (ret)
					goto bail;
			}

			/*
			 * Put user mapping info onto the pending list
			 * unless it already is on the list.
			 */
			spin_lock_irq(&dev->pending_lock);
			if (list_empty(&ip->pending_mmaps))
				list_add(&ip->pending_mmaps,
					 &dev->pending_mmaps);
			spin_unlock_irq(&dev->pending_lock);
		}
	} else if (attr_mask & IB_SRQ_LIMIT) {
		spin_lock_irq(&srq->rq.lock);
		if (attr->srq_limit >= srq->rq.size)
			ret = -EINVAL;
		else
			srq->limit = attr->srq_limit;
		spin_unlock_irq(&srq->rq.lock);
	}
	goto bail;

bail_unlock:
	spin_unlock_irq(&srq->rq.lock);
bail_free:
	vfree(wq);
bail:
	return ret;
}
/**
 * qib_create_srq - create a shared receive queue
 * @ibpd: the protection domain of the SRQ to create
 * @srq_init_attr: the attributes of the SRQ
 * @udata: data from libibverbs when creating a user SRQ
 */
struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
			      struct ib_srq_init_attr *srq_init_attr,
			      struct ib_udata *udata)
{
	struct qib_ibdev *dev = to_idev(ibpd->device);
	struct qib_srq *srq;
	u32 sz;
	struct ib_srq *ret;

	if (srq_init_attr->attr.max_sge == 0 ||
	    srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
	    srq_init_attr->attr.max_wr == 0 ||
	    srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
		ret = ERR_PTR(-EINVAL);
		goto done;
	}

	srq = kmalloc(sizeof(*srq), GFP_KERNEL);
	if (!srq) {
		ret = ERR_PTR(-ENOMEM);
		goto done;
	}

	/*
	 * Need to use vmalloc() if we want to support large #s of entries.
	 */
	srq->rq.size = srq_init_attr->attr.max_wr + 1;
	srq->rq.max_sge = srq_init_attr->attr.max_sge;
	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
		sizeof(struct qib_rwqe);
	srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
	if (!srq->rq.wq) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_srq;
	}

	/*
	 * Return the address of the RWQ as the offset to mmap.
	 * See qib_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;
		u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;

		srq->ip =
		    qib_create_mmap_info(dev, s, ibpd->uobject->context,
					 srq->rq.wq);
		if (!srq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wq;
		}

		err = ib_copy_to_udata(udata, &srq->ip->offset,
				       sizeof(srq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	} else
		srq->ip = NULL;

	/*
	 * ib_create_srq() will initialize srq->ibsrq.
	 */
	spin_lock_init(&srq->rq.lock);
	srq->rq.wq->head = 0;
	srq->rq.wq->tail = 0;
	srq->limit = srq_init_attr->attr.srq_limit;

	spin_lock(&dev->n_srqs_lock);
	if (dev->n_srqs_allocated == ib_qib_max_srqs) {
		spin_unlock(&dev->n_srqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_srqs_allocated++;
	spin_unlock(&dev->n_srqs_lock);

	if (srq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &srq->ibsrq;
	goto done;

bail_ip:
	kfree(srq->ip);
bail_wq:
	vfree(srq->rq.wq);
bail_srq:
	kfree(srq);
done:
	return ret;
}
示例#20
0
struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
			      struct ib_qp_init_attr *init_attr,
			      struct ib_udata *udata)
{
	struct ipath_qp *qp;
	int err;
	struct ipath_swqe *swq = NULL;
	struct ipath_ibdev *dev;
	size_t sz;
	size_t sg_list_sz;
	struct ib_qp *ret;

	if (init_attr->create_flags) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
	    init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	/*                                                        */
	if (!init_attr->srq) {
		if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
		    init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
		if (init_attr->cap.max_send_sge +
		    init_attr->cap.max_send_wr +
		    init_attr->cap.max_recv_sge +
		    init_attr->cap.max_recv_wr == 0) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
	}

	switch (init_attr->qp_type) {
	case IB_QPT_UC:
	case IB_QPT_RC:
	case IB_QPT_UD:
	case IB_QPT_SMI:
	case IB_QPT_GSI:
		sz = sizeof(struct ipath_sge) *
			init_attr->cap.max_send_sge +
			sizeof(struct ipath_swqe);
		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
		if (swq == NULL) {
			ret = ERR_PTR(-ENOMEM);
			goto bail;
		}
		sz = sizeof(*qp);
		sg_list_sz = 0;
		if (init_attr->srq) {
			struct ipath_srq *srq = to_isrq(init_attr->srq);

			if (srq->rq.max_sge > 1)
				sg_list_sz = sizeof(*qp->r_sg_list) *
					(srq->rq.max_sge - 1);
		} else if (init_attr->cap.max_recv_sge > 1)
			sg_list_sz = sizeof(*qp->r_sg_list) *
				(init_attr->cap.max_recv_sge - 1);
		qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
		if (!qp) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_swq;
		}
		if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
		    init_attr->qp_type == IB_QPT_SMI ||
		    init_attr->qp_type == IB_QPT_GSI)) {
			qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
			if (!qp->r_ud_sg_list) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_qp;
			}
		} else
			qp->r_ud_sg_list = NULL;
		if (init_attr->srq) {
			sz = 0;
			qp->r_rq.size = 0;
			qp->r_rq.max_sge = 0;
			qp->r_rq.wq = NULL;
			init_attr->cap.max_recv_wr = 0;
			init_attr->cap.max_recv_sge = 0;
		} else {
			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
				sizeof(struct ipath_rwqe);
			qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
					      qp->r_rq.size * sz);
			if (!qp->r_rq.wq) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_sg_list;
			}
		}

		/*
                                            
                                
   */
		spin_lock_init(&qp->s_lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_waitqueue_head(&qp->wait_dma);
		tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
		INIT_LIST_HEAD(&qp->piowait);
		INIT_LIST_HEAD(&qp->timerwait);
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
		else
			qp->s_flags = 0;
		dev = to_idev(ibpd->device);
		err = ipath_alloc_qpn(&dev->qp_table, qp,
				      init_attr->qp_type);
		if (err) {
			ret = ERR_PTR(err);
			vfree(qp->r_rq.wq);
			goto bail_sg_list;
		}
		qp->ip = NULL;
		qp->s_tx = NULL;
		ipath_reset_qp(qp, init_attr->qp_type);
		break;

	default:
		/*                       */
		ret = ERR_PTR(-ENOSYS);
		goto bail;
	}

	init_attr->cap.max_inline_data = 0;

	/*
                                                        
                                 
  */
	if (udata && udata->outlen >= sizeof(__u64)) {
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		} else {
			u32 s = sizeof(struct ipath_rwq) +
				qp->r_rq.size * sz;

			qp->ip =
			    ipath_create_mmap_info(dev, s,
						   ibpd->uobject->context,
						   qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_ip;
			}

			err = ib_copy_to_udata(udata, &(qp->ip->offset),
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
	}

	spin_lock(&dev->n_qps_lock);
	if (dev->n_qps_allocated == ib_ipath_max_qps) {
		spin_unlock(&dev->n_qps_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_qps_allocated++;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &qp->ibqp;
	goto bail;

bail_ip:
	if (qp->ip)
		kref_put(&qp->ip->ref, ipath_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	ipath_free_qp(&dev->qp_table, qp);
	free_qpn(&dev->qp_table, qp->ibqp.qp_num);
bail_sg_list:
	kfree(qp->r_ud_sg_list);
bail_qp:
	kfree(qp);
bail_swq:
	vfree(swq);
bail:
	return ret;
}
示例#21
0
static int __videobuf_iolock(struct videobuf_queue *q,
			     struct videobuf_buffer *vb,
			     struct v4l2_framebuffer *fbuf)
{
	struct videobuf_vmalloc_memory *mem = vb->priv;
	int pages;

	BUG_ON(!mem);

	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);

	switch (vb->memory) {
	case V4L2_MEMORY_MMAP:
		dprintk(1, "%s memory method MMAP\n", __func__);

		/* All handling should be done by __videobuf_mmap_mapper() */
		if (!mem->vaddr) {
			printk(KERN_ERR "memory is not alloced/mmapped.\n");
			return -EINVAL;
		}
		break;
	case V4L2_MEMORY_USERPTR:
		pages = PAGE_ALIGN(vb->size);

		dprintk(1, "%s memory method USERPTR\n", __func__);

		if (vb->baddr) {
			printk(KERN_ERR "USERPTR is currently not supported\n");
			return -EINVAL;
		}

		/* The only USERPTR currently supported is the one needed for
		 * read() method.
		 */

		mem->vaddr = vmalloc_user(pages);
		if (!mem->vaddr) {
			printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
			return -ENOMEM;
		}
		dprintk(1, "vmalloc is at addr %p (%d pages)\n",
			mem->vaddr, pages);

#if 0
		int rc;
		/* Kernel userptr is used also by read() method. In this case,
		   there's no need to remap, since data will be copied to user
		 */
		if (!vb->baddr)
			return 0;

		/* FIXME: to properly support USERPTR, remap should occur.
		   The code below won't work, since mem->vma = NULL
		 */
		/* Try to remap memory */
		rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
		if (rc < 0) {
			printk(KERN_ERR "mmap: remap failed with error %d", rc);
			return -ENOMEM;
		}
#endif

		break;
	case V4L2_MEMORY_OVERLAY:
	default:
		dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);

		/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
		printk(KERN_ERR "Memory method currently unsupported.\n");
		return -EINVAL;
	}

	return 0;
}
示例#22
0
文件: cq.c 项目: AK101111/linux
/**
 * rvt_create_cq - create a completion queue
 * @ibdev: the device this completion queue is attached to
 * @attr: creation attributes
 * @context: unused by the QLogic_IB driver
 * @udata: user data for libibverbs.so
 *
 * Called by ib_create_cq() in the generic verbs code.
 *
 * Return: pointer to the completion queue or negative errno values
 * for failure.
 */
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
			    const struct ib_cq_init_attr *attr,
			    struct ib_ucontext *context,
			    struct ib_udata *udata)
{
	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
	struct rvt_cq *cq;
	struct rvt_cq_wc *wc;
	struct ib_cq *ret;
	u32 sz;
	unsigned int entries = attr->cqe;

	if (attr->flags)
		return ERR_PTR(-EINVAL);

	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
		return ERR_PTR(-EINVAL);

	/* Allocate the completion queue structure. */
	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq)
		return ERR_PTR(-ENOMEM);

	/*
	 * Allocate the completion queue entries and head/tail pointers.
	 * This is allocated separately so that it can be resized and
	 * also mapped into user space.
	 * We need to use vmalloc() in order to support mmap and large
	 * numbers of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
	else
		sz += sizeof(struct ib_wc) * (entries + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_cq;
	}

	/*
	 * Return the address of the WC as the offset to mmap.
	 * See rvt_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;

		cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	}

	spin_lock(&rdi->n_cqs_lock);
	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
		spin_unlock(&rdi->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	rdi->n_cqs_allocated++;
	spin_unlock(&rdi->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&rdi->pending_lock);
		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
		spin_unlock_irq(&rdi->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
	 * an error.
	 */
	cq->rdi = rdi;
	cq->ibcq.cqe = entries;
	cq->notify = RVT_CQ_NONE;
	spin_lock_init(&cq->lock);
	init_kthread_work(&cq->comptask, send_complete);
	cq->queue = wc;

	ret = &cq->ibcq;

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);
bail_cq:
	kfree(cq);
done:
	return ret;
}
示例#23
0
static int tditrace_create_buffer(void) {

  /*
   * [TDIT]
   * [RACE]
   * [    ]timeofday_start.tv_usec
   * [    ]timeofday_start.tv_sec
   * [    ]clock_monotonic_start.tv_nsec
   * [    ]clock_monotonic_start.tv_sec
   * ------
   * [    ]marker, byte 1,0 is total length in dwords,
   *               byte 2   is nr numbers
   *               byte 3   is identifier
   * [    ]clock_monotonic_timestamp.tv_nsec
   * [    ]clock_monotonic_timestamp.tv_sec
   * [    ]<optional> numbers
   * [    ]<optional> text, padded with 0's to multiple of 4 bytes
   * ...
   */

  unsigned int *p;
  _u64 atimeofday_start;
  _u64 amonotonic_start;

  gtracebuffersize = 32 * 1024 * 1024;
  gtracebuffer = (char *)vmalloc_user(gtracebuffersize);

  gtracebuffer_shared_page = virt_to_page(gtracebuffer);

  if (gtracebuffer == 0) {
    printk("ktdim: unable to allocate %dMB tracebuffer\n",
           gtracebuffersize / (1024 * 1024));
    return -1;
  }

  memset(gtracebuffer, 0, gtracebuffersize);

  printk("ktdim: allocated %dMB @0x%08x tracebuffer\n",
         gtracebuffersize / (1024 * 1024), (u32)gtracebuffer);

  gtracebuffer_dword_ptr = (unsigned int *)gtracebuffer;

  /*
   * write one time start text
   */
  sprintf((char *)gtracebuffer_dword_ptr, (char *)"TDITRACE");
  gtracebuffer_dword_ptr += 2;

  p = gtracebuffer_dword_ptr;

  do_gettimeofday((struct timeval *)gtracebuffer_dword_ptr);
  gtracebuffer_dword_ptr += 2;

  do_posix_clock_monotonic_gettime((struct timespec *)gtracebuffer_dword_ptr);
  gtracebuffer_dword_ptr += 2;

  atimeofday_start = (_u64)*p++ * 1000000000;
  atimeofday_start += (_u64)*p++ * 1000;

  amonotonic_start = (_u64)*p++ * 1000000000;
  amonotonic_start += (_u64)*p++;

  *gtracebuffer_dword_ptr = 0;

  gtditrace_enabled = 1;

  return 0;
}
示例#24
0
文件: qp.c 项目: 020gzh/linux
/**
 * rvt_create_qp - create a queue pair for a device
 * @ibpd: the protection domain who's device we create the queue pair for
 * @init_attr: the attributes of the queue pair
 * @udata: user data for libibverbs.so
 *
 * Queue pair creation is mostly an rvt issue. However, drivers have their own
 * unique idea of what queue pair numbers mean. For instance there is a reserved
 * range for PSM.
 *
 * Return: the queue pair on success, otherwise returns an errno.
 *
 * Called by the ib_create_qp() core verbs function.
 */
struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
			    struct ib_qp_init_attr *init_attr,
			    struct ib_udata *udata)
{
	struct rvt_qp *qp;
	int err;
	struct rvt_swqe *swq = NULL;
	size_t sz;
	size_t sg_list_sz;
	struct ib_qp *ret = ERR_PTR(-ENOMEM);
	struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
	void *priv = NULL;
	gfp_t gfp;

	if (!rdi)
		return ERR_PTR(-EINVAL);

	if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
	    init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
		return ERR_PTR(-EINVAL);

	/* GFP_NOIO is applicable to RC QP's only */

	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
	    init_attr->qp_type != IB_QPT_RC)
		return ERR_PTR(-EINVAL);

	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
						GFP_NOIO : GFP_KERNEL;

	/* Check receive queue parameters if no SRQ is specified. */
	if (!init_attr->srq) {
		if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
		    init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
			return ERR_PTR(-EINVAL);

		if (init_attr->cap.max_send_sge +
		    init_attr->cap.max_send_wr +
		    init_attr->cap.max_recv_sge +
		    init_attr->cap.max_recv_wr == 0)
			return ERR_PTR(-EINVAL);
	}

	switch (init_attr->qp_type) {
	case IB_QPT_SMI:
	case IB_QPT_GSI:
		if (init_attr->port_num == 0 ||
		    init_attr->port_num > ibpd->device->phys_port_cnt)
			return ERR_PTR(-EINVAL);
	case IB_QPT_UC:
	case IB_QPT_RC:
	case IB_QPT_UD:
		sz = sizeof(struct rvt_sge) *
			init_attr->cap.max_send_sge +
			sizeof(struct rvt_swqe);
		if (gfp == GFP_NOIO)
			swq = __vmalloc(
				(init_attr->cap.max_send_wr + 1) * sz,
				gfp, PAGE_KERNEL);
		else
			swq = vmalloc_node(
				(init_attr->cap.max_send_wr + 1) * sz,
				rdi->dparms.node);
		if (!swq)
			return ERR_PTR(-ENOMEM);

		sz = sizeof(*qp);
		sg_list_sz = 0;
		if (init_attr->srq) {
			struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);

			if (srq->rq.max_sge > 1)
				sg_list_sz = sizeof(*qp->r_sg_list) *
					(srq->rq.max_sge - 1);
		} else if (init_attr->cap.max_recv_sge > 1)
			sg_list_sz = sizeof(*qp->r_sg_list) *
				(init_attr->cap.max_recv_sge - 1);
		qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
		if (!qp)
			goto bail_swq;

		RCU_INIT_POINTER(qp->next, NULL);

		/*
		 * Driver needs to set up it's private QP structure and do any
		 * initialization that is needed.
		 */
		priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
		if (!priv)
			goto bail_qp;
		qp->priv = priv;
		qp->timeout_jiffies =
			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
				1000UL);
		if (init_attr->srq) {
			sz = 0;
		} else {
			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
				sizeof(struct rvt_rwqe);
			if (udata)
				qp->r_rq.wq = vmalloc_user(
						sizeof(struct rvt_rwq) +
						qp->r_rq.size * sz);
			else if (gfp == GFP_NOIO)
				qp->r_rq.wq = __vmalloc(
						sizeof(struct rvt_rwq) +
						qp->r_rq.size * sz,
						gfp, PAGE_KERNEL);
			else
				qp->r_rq.wq = vmalloc_node(
						sizeof(struct rvt_rwq) +
						qp->r_rq.size * sz,
						rdi->dparms.node);
			if (!qp->r_rq.wq)
				goto bail_driver_priv;
		}

		/*
		 * ib_create_qp() will initialize qp->ibqp
		 * except for qp->ibqp.qp_num.
		 */
		spin_lock_init(&qp->r_lock);
		spin_lock_init(&qp->s_hlock);
		spin_lock_init(&qp->s_lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_timer(&qp->s_timer);
		qp->s_timer.data = (unsigned long)qp;
		INIT_LIST_HEAD(&qp->rspwait);
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_avail = init_attr->cap.max_send_wr;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = RVT_S_SIGNAL_REQ_WR;

		err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
				init_attr->qp_type,
				init_attr->port_num, gfp);
		if (err < 0) {
			ret = ERR_PTR(err);
			goto bail_rq_wq;
		}
		qp->ibqp.qp_num = err;
		qp->port_num = init_attr->port_num;
		rvt_reset_qp(rdi, qp, init_attr->qp_type);
		break;

	default:
		/* Don't support raw QPs */
		return ERR_PTR(-EINVAL);
	}

	init_attr->cap.max_inline_data = 0;

	/*
	 * Return the address of the RWQ as the offset to mmap.
	 * See rvt_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_qpn;
			}
		} else {
			u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;

			qp->ip = rvt_create_mmap_info(rdi, s,
						      ibpd->uobject->context,
						      qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_qpn;
			}

			err = ib_copy_to_udata(udata, &qp->ip->offset,
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
		qp->pid = current->pid;
	}

	spin_lock(&rdi->n_qps_lock);
	if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
		spin_unlock(&rdi->n_qps_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	rdi->n_qps_allocated++;
	/*
	 * Maintain a busy_jiffies variable that will be added to the timeout
	 * period in mod_retry_timer and add_retry_timer. This busy jiffies
	 * is scaled by the number of rc qps created for the device to reduce
	 * the number of timeouts occurring when there is a large number of
	 * qps. busy_jiffies is incremented every rc qp scaling interval.
	 * The scaling interval is selected based on extensive performance
	 * evaluation of targeted workloads.
	 */
	if (init_attr->qp_type == IB_QPT_RC) {
		rdi->n_rc_qps++;
		rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
	}
	spin_unlock(&rdi->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&rdi->pending_lock);
		list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
		spin_unlock_irq(&rdi->pending_lock);
	}

	ret = &qp->ibqp;

	/*
	 * We have our QP and its good, now keep track of what types of opcodes
	 * can be processed on this QP. We do this by keeping track of what the
	 * 3 high order bits of the opcode are.
	 */
	switch (init_attr->qp_type) {
	case IB_QPT_SMI:
	case IB_QPT_GSI:
	case IB_QPT_UD:
		qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
		break;
	case IB_QPT_RC:
		qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
		break;
	case IB_QPT_UC:
		qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
		break;
	default:
		ret = ERR_PTR(-EINVAL);
		goto bail_ip;
	}

	return ret;

bail_ip:
	kref_put(&qp->ip->ref, rvt_release_mmap_info);

bail_qpn:
	free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);

bail_rq_wq:
	vfree(qp->r_rq.wq);

bail_driver_priv:
	rdi->driver_f.qp_priv_free(rdi, qp);

bail_qp:
	kfree(qp);

bail_swq:
	vfree(swq);

	return ret;
}
static int __videobuf_iolock(struct videobuf_queue *q,
			     struct videobuf_buffer *vb,
			     struct v4l2_framebuffer *fbuf)
{
	struct videobuf_vmalloc_memory *mem = vb->priv;
	int pages;

	BUG_ON(!mem);

	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);

	switch (vb->memory) {
	case V4L2_MEMORY_MMAP:
		dprintk(1, "%s memory method MMAP\n", __func__);

		
		if (!mem->vaddr) {
			printk(KERN_ERR "memory is not alloced/mmapped.\n");
			return -EINVAL;
		}
		break;
	case V4L2_MEMORY_USERPTR:
		pages = PAGE_ALIGN(vb->size);

		dprintk(1, "%s memory method USERPTR\n", __func__);

		if (vb->baddr) {
			printk(KERN_ERR "USERPTR is currently not supported\n");
			return -EINVAL;
		}


		mem->vaddr = vmalloc_user(pages);
		if (!mem->vaddr) {
			printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
			return -ENOMEM;
		}
		dprintk(1, "vmalloc is at addr %p (%d pages)\n",
			mem->vaddr, pages);

#if 0
		int rc;
		if (!vb->baddr)
			return 0;

		
		rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
		if (rc < 0) {
			printk(KERN_ERR "mmap: remap failed with error %d", rc);
			return -ENOMEM;
		}
#endif

		break;
	case V4L2_MEMORY_OVERLAY:
	default:
		dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);

		
		printk(KERN_ERR "Memory method currently unsupported.\n");
		return -EINVAL;
	}

	return 0;
}
示例#26
0
文件: srq.c 项目: Anjali05/linux
/**
 * rvt_create_srq - create a shared receive queue
 * @ibpd: the protection domain of the SRQ to create
 * @srq_init_attr: the attributes of the SRQ
 * @udata: data from libibverbs when creating a user SRQ
 *
 * Return: Allocated srq object
 */
struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
			      struct ib_srq_init_attr *srq_init_attr,
			      struct ib_udata *udata)
{
	struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
	struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
		udata, struct rvt_ucontext, ibucontext);
	struct rvt_srq *srq;
	u32 sz;
	struct ib_srq *ret;

	if (srq_init_attr->srq_type != IB_SRQT_BASIC)
		return ERR_PTR(-EOPNOTSUPP);

	if (srq_init_attr->attr.max_sge == 0 ||
	    srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
	    srq_init_attr->attr.max_wr == 0 ||
	    srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
		return ERR_PTR(-EINVAL);

	srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
	if (!srq)
		return ERR_PTR(-ENOMEM);

	/*
	 * Need to use vmalloc() if we want to support large #s of entries.
	 */
	srq->rq.size = srq_init_attr->attr.max_wr + 1;
	srq->rq.max_sge = srq_init_attr->attr.max_sge;
	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
		sizeof(struct rvt_rwqe);
	srq->rq.wq = udata ?
		vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
		vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
			     dev->dparms.node);
	if (!srq->rq.wq) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_srq;
	}

	/*
	 * Return the address of the RWQ as the offset to mmap.
	 * See rvt_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;
		u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;

		srq->ip =
		    rvt_create_mmap_info(dev, s, &ucontext->ibucontext,
					 srq->rq.wq);
		if (!srq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wq;
		}

		err = ib_copy_to_udata(udata, &srq->ip->offset,
				       sizeof(srq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	}

	/*
	 * ib_create_srq() will initialize srq->ibsrq.
	 */
	spin_lock_init(&srq->rq.lock);
	srq->limit = srq_init_attr->attr.srq_limit;

	spin_lock(&dev->n_srqs_lock);
	if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
		spin_unlock(&dev->n_srqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_srqs_allocated++;
	spin_unlock(&dev->n_srqs_lock);

	if (srq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	return &srq->ibsrq;

bail_ip:
	kfree(srq->ip);
bail_wq:
	vfree(srq->rq.wq);
bail_srq:
	kfree(srq);
	return ret;
}
示例#27
0
/**
 * qib_resize_cq - change the size of the CQ
 * @ibcq: the completion queue
 *
 * Returns 0 for success.
 */
int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
	struct qib_cq *cq = to_icq(ibcq);
	struct qib_cq_wc *old_wc;
	struct qib_cq_wc *wc;
	u32 head, tail, n;
	int ret;
	u32 sz;

	if (cqe < 1 || cqe > ib_qib_max_cqes) {
		ret = -EINVAL;
		goto bail;
	}

	/*
	 * Need to use vmalloc() if we want to support large #s of entries.
	 */
	sz = sizeof(*wc);
	if (udata && udata->outlen >= sizeof(__u64))
		sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
	else
		sz += sizeof(struct ib_wc) * (cqe + 1);
	wc = vmalloc_user(sz);
	if (!wc) {
		ret = -ENOMEM;
		goto bail;
	}

	/* Check that we can write the offset to mmap. */
	if (udata && udata->outlen >= sizeof(__u64)) {
		__u64 offset = 0;

		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
		if (ret)
			goto bail_free;
	}

	spin_lock_irq(&cq->lock);
	/*
	 * Make sure head and tail are sane since they
	 * might be user writable.
	 */
	old_wc = cq->queue;
	head = old_wc->head;
	if (head > (u32) cq->ibcq.cqe)
		head = (u32) cq->ibcq.cqe;
	tail = old_wc->tail;
	if (tail > (u32) cq->ibcq.cqe)
		tail = (u32) cq->ibcq.cqe;
	if (head < tail)
		n = cq->ibcq.cqe + 1 + head - tail;
	else
		n = head - tail;
	if (unlikely((u32)cqe < n)) {
		ret = -EINVAL;
		goto bail_unlock;
	}
	for (n = 0; tail != head; n++) {
		if (cq->ip)
			wc->uqueue[n] = old_wc->uqueue[tail];
		else
			wc->kqueue[n] = old_wc->kqueue[tail];
		if (tail == (u32) cq->ibcq.cqe)
			tail = 0;
		else
			tail++;
	}
	cq->ibcq.cqe = cqe;
	wc->head = n;
	wc->tail = 0;
	cq->queue = wc;
	spin_unlock_irq(&cq->lock);

	vfree(old_wc);

	if (cq->ip) {
		struct qib_ibdev *dev = to_idev(ibcq->device);
		struct qib_mmap_info *ip = cq->ip;

		qib_update_mmap_info(dev, ip, sz, wc);

		/*
		 * Return the offset to mmap.
		 * See qib_mmap() for details.
		 */
		if (udata && udata->outlen >= sizeof(__u64)) {
			ret = ib_copy_to_udata(udata, &ip->offset,
					       sizeof(ip->offset));
			if (ret)
				goto bail;
		}

		spin_lock_irq(&dev->pending_lock);
		if (list_empty(&ip->pending_mmaps))
			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = 0;
	goto bail;

bail_unlock:
	spin_unlock_irq(&cq->lock);
bail_free:
	vfree(wc);
bail:
	return ret;
}
示例#28
0
文件: qp.c 项目: DenisLug/mptcp
/**
 * hfi1_create_qp - create a queue pair for a device
 * @ibpd: the protection domain who's device we create the queue pair for
 * @init_attr: the attributes of the queue pair
 * @udata: user data for libibverbs.so
 *
 * Returns the queue pair on success, otherwise returns an errno.
 *
 * Called by the ib_create_qp() core verbs function.
 */
struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
			     struct ib_qp_init_attr *init_attr,
			     struct ib_udata *udata)
{
	struct hfi1_qp *qp;
	int err;
	struct hfi1_swqe *swq = NULL;
	struct hfi1_ibdev *dev;
	struct hfi1_devdata *dd;
	size_t sz;
	size_t sg_list_sz;
	struct ib_qp *ret;

	if (init_attr->cap.max_send_sge > hfi1_max_sges ||
	    init_attr->cap.max_send_wr > hfi1_max_qp_wrs ||
	    init_attr->create_flags) {
		ret = ERR_PTR(-EINVAL);
		goto bail;
	}

	/* Check receive queue parameters if no SRQ is specified. */
	if (!init_attr->srq) {
		if (init_attr->cap.max_recv_sge > hfi1_max_sges ||
		    init_attr->cap.max_recv_wr > hfi1_max_qp_wrs) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
		if (init_attr->cap.max_send_sge +
		    init_attr->cap.max_send_wr +
		    init_attr->cap.max_recv_sge +
		    init_attr->cap.max_recv_wr == 0) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
	}

	switch (init_attr->qp_type) {
	case IB_QPT_SMI:
	case IB_QPT_GSI:
		if (init_attr->port_num == 0 ||
		    init_attr->port_num > ibpd->device->phys_port_cnt) {
			ret = ERR_PTR(-EINVAL);
			goto bail;
		}
	case IB_QPT_UC:
	case IB_QPT_RC:
	case IB_QPT_UD:
		sz = sizeof(struct hfi1_sge) *
			init_attr->cap.max_send_sge +
			sizeof(struct hfi1_swqe);
		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
		if (swq == NULL) {
			ret = ERR_PTR(-ENOMEM);
			goto bail;
		}
		sz = sizeof(*qp);
		sg_list_sz = 0;
		if (init_attr->srq) {
			struct hfi1_srq *srq = to_isrq(init_attr->srq);

			if (srq->rq.max_sge > 1)
				sg_list_sz = sizeof(*qp->r_sg_list) *
					(srq->rq.max_sge - 1);
		} else if (init_attr->cap.max_recv_sge > 1)
			sg_list_sz = sizeof(*qp->r_sg_list) *
				(init_attr->cap.max_recv_sge - 1);
		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
		if (!qp) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_swq;
		}
		RCU_INIT_POINTER(qp->next, NULL);
		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
		if (!qp->s_hdr) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_qp;
		}
		qp->timeout_jiffies =
			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
				1000UL);
		if (init_attr->srq)
			sz = 0;
		else {
			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
				sizeof(struct hfi1_rwqe);
			qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) +
						   qp->r_rq.size * sz);
			if (!qp->r_rq.wq) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_qp;
			}
		}

		/*
		 * ib_create_qp() will initialize qp->ibqp
		 * except for qp->ibqp.qp_num.
		 */
		spin_lock_init(&qp->r_lock);
		spin_lock_init(&qp->s_lock);
		spin_lock_init(&qp->r_rq.lock);
		atomic_set(&qp->refcount, 0);
		init_waitqueue_head(&qp->wait);
		init_timer(&qp->s_timer);
		qp->s_timer.data = (unsigned long)qp;
		INIT_LIST_HEAD(&qp->rspwait);
		qp->state = IB_QPS_RESET;
		qp->s_wq = swq;
		qp->s_size = init_attr->cap.max_send_wr + 1;
		qp->s_max_sge = init_attr->cap.max_send_sge;
		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
			qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
		dev = to_idev(ibpd->device);
		dd = dd_from_dev(dev);
		err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
				init_attr->port_num);
		if (err < 0) {
			ret = ERR_PTR(err);
			vfree(qp->r_rq.wq);
			goto bail_qp;
		}
		qp->ibqp.qp_num = err;
		qp->port_num = init_attr->port_num;
		reset_qp(qp, init_attr->qp_type);

		break;

	default:
		/* Don't support raw QPs */
		ret = ERR_PTR(-ENOSYS);
		goto bail;
	}

	init_attr->cap.max_inline_data = 0;

	/*
	 * Return the address of the RWQ as the offset to mmap.
	 * See hfi1_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		} else {
			u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz;

			qp->ip = hfi1_create_mmap_info(dev, s,
						      ibpd->uobject->context,
						      qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_ip;
			}

			err = ib_copy_to_udata(udata, &(qp->ip->offset),
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
	}

	spin_lock(&dev->n_qps_lock);
	if (dev->n_qps_allocated == hfi1_max_qps) {
		spin_unlock(&dev->n_qps_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_qps_allocated++;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &qp->ibqp;

	/*
	 * We have our QP and its good, now keep track of what types of opcodes
	 * can be processed on this QP. We do this by keeping track of what the
	 * 3 high order bits of the opcode are.
	 */
	switch (init_attr->qp_type) {
	case IB_QPT_SMI:
	case IB_QPT_GSI:
	case IB_QPT_UD:
		qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & OPCODE_QP_MASK;
		break;
	case IB_QPT_RC:
		qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & OPCODE_QP_MASK;
		break;
	case IB_QPT_UC:
		qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & OPCODE_QP_MASK;
		break;
	default:
		ret = ERR_PTR(-EINVAL);
		goto bail_ip;
	}

	goto bail;

bail_ip:
	if (qp->ip)
		kref_put(&qp->ip->ref, hfi1_release_mmap_info);
	else
		vfree(qp->r_rq.wq);
	free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
	kfree(qp->s_hdr);
	kfree(qp);
bail_swq:
	vfree(swq);
bail:
	return ret;
}
static void external_exception(const char *assert_type, const int *log, int log_size, const int *phy, int phy_size, const char *detail)
{
    int *ee_log = NULL;
    unsigned long flags = 0;

    xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, 
                "%s : [%s] log size %d phy ptr %p size %d\n", __func__,
                assert_type, log_size, phy, phy_size);

    if ((log_size > 0) && (log != NULL)) {
		aed_dev.eerec.ee_log_size = log_size;
		ee_log = (int*)kmalloc(log_size, GFP_ATOMIC);
		if(NULL != ee_log)
            memcpy(ee_log, log, log_size);
	} else {
		aed_dev.eerec.ee_log_size = 16;
		ee_log = (int*)kzalloc(aed_dev.eerec.ee_log_size, GFP_ATOMIC);
	}

    if(NULL == ee_log){
        xlog_printk(ANDROID_LOG_ERROR, AEK_LOG_TAG, "%s : memory alloc() fail\n", __func__);
		return;
    }
    
    /*
       Don't lock the whole function for the time is uncertain.
       we rely on the fact that ee_log is not null if race here!
      */
    spin_lock_irqsave(&aed_device_lock, flags);
    
    if (aed_dev.eerec.ee_log == NULL) {
        aed_dev.eerec.ee_log = ee_log;
    } else {
        /*no EE before aee_ee_write destroy the ee log*/
        kfree(ee_log);
        spin_unlock_irqrestore(&aed_device_lock, flags);
        xlog_printk(ANDROID_LOG_WARN, AEK_LOG_TAG, "%s: More than one EE message queued\n", __func__);
        return;
    }
    spin_unlock_irqrestore(&aed_device_lock, flags);

	memset(aed_dev.eerec.assert_type, 0, sizeof(aed_dev.eerec.assert_type));
	strncpy(aed_dev.eerec.assert_type, assert_type, sizeof(aed_dev.eerec.assert_type)-1);
	memset(aed_dev.eerec.exp_filename, 0, sizeof(aed_dev.eerec.exp_filename));
	strncpy(aed_dev.eerec.exp_filename, detail, sizeof(aed_dev.eerec.exp_filename)-1);
    xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, "EE [%s] \n", aed_dev.eerec.assert_type);

	aed_dev.eerec.exp_linenum = 0;
	aed_dev.eerec.fatal1 = 0;
	aed_dev.eerec.fatal2 = 0;	

	/* Check if we can dump memory */
	if (in_interrupt()) {
		/* kernel vamlloc cannot be used in interrupt context */
		xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, "Modem exception occur in interrupt context, no modem coredump");
		phy_size = 0;
	} 
	else if ((phy < 0) || (phy_size > MAX_EE_COREDUMP)) {
		xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, "Modem Physical memory size(%d) too large or invalid", phy_size);
		phy_size = 0;
	}

	if (phy_size > 0) {
		aed_dev.eerec.ee_phy = (int*)vmalloc_user(phy_size);
		if (aed_dev.eerec.ee_phy != NULL) {
			memcpy(aed_dev.eerec.ee_phy, phy, phy_size);
			aed_dev.eerec.ee_phy_size = phy_size;
		}
		else {
			xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, "Losing ee phy mem due to vmalloc return NULL");
			aed_dev.eerec.ee_phy_size = 0;
		}
	}
	else {
		aed_dev.eerec.ee_phy = NULL;
		aed_dev.eerec.ee_phy_size = 0;
	}
	ee_gen_ind_msg();
	xlog_printk(ANDROID_LOG_DEBUG, AEK_LOG_TAG, "external_exception out\n") ;
	wake_up(&aed_dev.eewait);
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
			 struct vm_area_struct *vma)
{
	struct videobuf_vmalloc_memory *mem;
	struct videobuf_mapping *map;
	unsigned int first;
	int retval, pages;
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;

	dprintk(1, "%s\n", __func__);
	if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	/* look for first buffer to map */
	for (first = 0; first < VIDEO_MAX_FRAME; first++) {
		if (NULL == q->bufs[first])
			continue;

		if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
			continue;
		if (q->bufs[first]->boff == offset)
			break;
	}
	if (VIDEO_MAX_FRAME == first) {
		dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
			(vma->vm_pgoff << PAGE_SHIFT));
		return -EINVAL;
	}

	/* create mapping + update buffer list */
	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
	if (NULL == map)
		return -ENOMEM;

	q->bufs[first]->map = map;
	map->start = vma->vm_start;
	map->end   = vma->vm_end;
	map->q     = q;

	q->bufs[first]->baddr = vma->vm_start;

	mem = q->bufs[first]->priv;
	BUG_ON(!mem);
	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);

	pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
	mem->vmalloc = vmalloc_user(pages);
	if (!mem->vmalloc) {
		printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
		goto error;
	}
	dprintk(1, "vmalloc is at addr %p (%d pages)\n",
		mem->vmalloc, pages);

	/* Try to remap memory */
	retval = remap_vmalloc_range(vma, mem->vmalloc, 0);
	if (retval < 0) {
		printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
		vfree(mem->vmalloc);
		goto error;
	}

	vma->vm_ops          = &videobuf_vm_ops;
	vma->vm_flags       |= VM_DONTEXPAND | VM_RESERVED;
	vma->vm_private_data = map;

	dprintk(1,"mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
		map, q, vma->vm_start, vma->vm_end,
		(long int) q->bufs[first]->bsize,
		vma->vm_pgoff, first);

	videobuf_vm_open(vma);

	return 0;

error:
	mem = NULL;
	kfree(map);
	return -ENOMEM;
}