Пример #1
0
static void jazz_fd_dma_mem_free(unsigned long addr,
                                        unsigned long size)
{       
	vdma_free(vdma_phys2log(PHYSADDR(addr)));
	free_pages(addr, get_order(size));	
}
Пример #2
0
static int __init context_init(void)
{
	int i;
	void __iomem *ux500_backup_ptr;

	/* allocate backup pointer for RAM data */
	ux500_backup_ptr = (void *)__get_free_pages(GFP_KERNEL,
				  get_order(U8500_BACKUPRAM_SIZE));

	if (!ux500_backup_ptr) {
		pr_warning("context: could not allocate backup memory\n");
		return -ENOMEM;
	}

	/*
	 * ROM code addresses to store backup contents,
	 * pass the physical address of back up to ROM code
	 */
	writel(virt_to_phys(ux500_backup_ptr),
	       IO_ADDRESS(U8500_EXT_RAM_LOC_BACKUPRAM_ADDR));

	if (cpu_is_u5500()) {
		writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
		       IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));

		writel(IO_ADDRESS(U5500_PUBLIC_BOOT_ROM_BASE),
		       IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));

		context_tpiu.base = ioremap(U5500_TPIU_BASE, SZ_4K);
		context_stm_ape.base = ioremap(U5500_STM_REG_BASE, SZ_4K);
		context_scu.base = ioremap(U5500_SCU_BASE, SZ_4K);

		context_prcc[0].base = ioremap(U5500_CLKRST1_BASE, SZ_4K);
		context_prcc[1].base = ioremap(U5500_CLKRST2_BASE, SZ_4K);
		context_prcc[2].base = ioremap(U5500_CLKRST3_BASE, SZ_4K);
		context_prcc[3].base = ioremap(U5500_CLKRST5_BASE, SZ_4K);
		context_prcc[4].base = ioremap(U5500_CLKRST6_BASE, SZ_4K);

		context_gic_dist_common.base = ioremap(U5500_GIC_DIST_BASE, SZ_4K);
		per_cpu(context_gic_cpu, 0).base = ioremap(U5500_GIC_CPU_BASE, SZ_4K);
	} else if (cpu_is_u8500() || cpu_is_u9540()) {
		/* Give logical address to backup RAM. For both CPUs */
		if (cpu_is_u9540()) {
			writel(IO_ADDRESS_DB9540_ROM(U9540_PUBLIC_BOOT_ROM_BASE),
					IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));

			writel(IO_ADDRESS_DB9540_ROM(U9540_PUBLIC_BOOT_ROM_BASE),
					IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
		} else {
			writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
					IO_ADDRESS(U8500_CPU0_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));

			writel(IO_ADDRESS(U8500_PUBLIC_BOOT_ROM_BASE),
					IO_ADDRESS(U8500_CPU1_BACKUPRAM_ADDR_PUBLIC_BOOT_ROM_LOG_ADDR));
		}

		context_tpiu.base = ioremap(U8500_TPIU_BASE, SZ_4K);
		context_stm_ape.base = ioremap(U8500_STM_REG_BASE, SZ_4K);
		context_scu.base = ioremap(U8500_SCU_BASE, SZ_4K);

		/* PERIPH4 is always on, so no need saving prcc */
		context_prcc[0].base = ioremap(U8500_CLKRST1_BASE, SZ_4K);
		context_prcc[1].base = ioremap(U8500_CLKRST2_BASE, SZ_4K);
		context_prcc[2].base = ioremap(U8500_CLKRST3_BASE, SZ_4K);
		context_prcc[3].base = ioremap(U8500_CLKRST5_BASE, SZ_4K);
		context_prcc[4].base = ioremap(U8500_CLKRST6_BASE, SZ_4K);

		context_gic_dist_common.base = ioremap(U8500_GIC_DIST_BASE, SZ_4K);
		per_cpu(context_gic_cpu, 0).base = ioremap(U8500_GIC_CPU_BASE, SZ_4K);
	}

	per_cpu(context_gic_dist_cpu, 0).base = context_gic_dist_common.base;

	for (i = 1; i < num_possible_cpus(); i++) {
		per_cpu(context_gic_cpu, i).base
			= per_cpu(context_gic_cpu, 0).base;
		per_cpu(context_gic_dist_cpu, i).base
			= per_cpu(context_gic_dist_cpu, 0).base;
	}

	for (i = 0; i < ARRAY_SIZE(context_prcc); i++) {
		const int clusters[] = {1, 2, 3, 5, 6};
		char clkname[10];

		snprintf(clkname, sizeof(clkname), "PERIPH%d", clusters[i]);

		context_prcc[i].clk = clk_get_sys(clkname, NULL);
		BUG_ON(IS_ERR(context_prcc[i].clk));
	}

	if (cpu_is_u8500()) {
		u8500_context_init();
	} else if (cpu_is_u5500()) {
		u5500_context_init();
	} else if (cpu_is_u9540()) {
		u9540_context_init();
	} else {
		printk(KERN_ERR "context: unknown hardware!\n");
		return -EINVAL;
	}

	return 0;
}
Пример #3
0
/**
 * dma_declare_contiguous() - reserve area for contiguous memory handling
 *			      for particular device
 * @dev:   Pointer to device structure.
 * @size:  Size of the reserved memory.
 * @base:  Start address of the reserved memory (optional, 0 for any).
 * @limit: End address of the reserved memory (optional, 0 for any).
 *
 * This function reserves memory for specified device. It should be
 * called by board specific code when early allocator (memblock or bootmem)
 * is still activate.
 */
int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
				  phys_addr_t base, phys_addr_t limit)
{
	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
	phys_addr_t alignment;

	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
		 (unsigned long)size, (unsigned long)base,
		 (unsigned long)limit);

	/* Sanity checks */
	if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	r->size = PAGE_ALIGN(size);

	/* Sanitise input arguments */
#ifndef CMA_NO_MIGRATION
	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
#else
	/* constraints for memory protection */
	alignment = (size < SZ_1M) ? (SZ_4K << get_order(size)): SZ_1M;
#endif
	if (base & (alignment - 1)) {
		pr_err("Invalid alignment of base address %pa\n", &base);
		return -EINVAL;
	}
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

	/* Reserve memory */
	if (base) {
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			base = -EBUSY;
			goto err;
		}
	} else {
		/*
		 * Use __memblock_alloc_base() since
		 * memblock_alloc_base() panic()s.
		 */
		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
		if (!addr) {
			base = -ENOMEM;
			goto err;
		} else {
			base = addr;
		}
	}

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	r->carved_out_start = base;
	r->carved_out_size = size;
	r->dev = dev;
	cma_reserved_count++;
	pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
		(unsigned long)base);

	/* Architecture specific contiguous memory fixup. */
	dma_contiguous_early_fixup(base, size);
	return 0;
err:
	pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
	return base;
}
Пример #4
0
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	free_pages((unsigned long) vaddr, get_order(size));
}
Пример #5
0
int imp_common_request_buffer(struct device *dev,
			      struct imp_logical_channel *channel,
			      struct imp_reqbufs *reqbufs)
{
	struct imp_buffer *buffer = NULL;
	int count = 0;
	unsigned long adr;
	u32 size;

	if (!reqbufs || !channel) {
		dev_err(dev, "request_buffer: error in argument\n");
		return -EINVAL;
	}

	/* if number of buffers requested is more then support return error */
	if (reqbufs->count > MAX_BUFFERS) {
		dev_err(dev, "request_buffer: invalid buffer count\n");
		return -EINVAL;
	}

	if ((reqbufs->buf_type != IMP_BUF_IN)
	    && (reqbufs->buf_type != IMP_BUF_OUT1)
	    && (reqbufs->buf_type != IMP_BUF_OUT2)) {
		dev_err(dev, "request_buffer: invalid buffer type %d\n",
			reqbufs->buf_type);
		return -EINVAL;
	}
	if (reqbufs->count < 0) {
		dev_err(dev, "request_buffer: invalid buffer count %d\n",
			reqbufs->count);
		return -EINVAL;
	}
	/* if buf_type is input then allocate buffers for input */
	if (reqbufs->buf_type == IMP_BUF_IN) {
		/*if buffer count is zero, free all the buffers */
		if (reqbufs->count == 0) {
			/* free all the buffers */
			for (count = 0; count < channel->in_numbufs; count++) {
				/* free memory allocate for the image */
				if (channel->in_bufs[count]) {
					adr =
					    (unsigned long)channel->
					    in_bufs[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
							long)
							phys_to_virt
							(adr),
							channel->
							in_bufs
							[count]->
							size);

					/* free the memory allocated
					   to ipipe_buffer */
					kfree(channel->in_bufs[count]);

					channel->in_bufs[count] = NULL;
				}
			}
			channel->in_numbufs = 0;
			return 0;
		}

		/* free the extra buffers */
		if (channel->in_numbufs > reqbufs->count &&
		    reqbufs->size == channel->in_bufs[0]->size) {
			for (count = reqbufs->count;
			     count < channel->in_numbufs; count++) {
				/* free memory allocate for the image */
				if (channel->in_bufs[count]) {
					adr = channel->in_bufs[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								in_bufs
								[count]->
								size);

					/* free the memory allocated
					   to ipipe_buffer */
					kfree(channel->in_bufs[count]);

					channel->in_bufs[count] = NULL;
				}
			}
			channel->in_numbufs = reqbufs->count;
			return 0;
		}
		/* if size requested is different from already allocated,
		   free memory of all already allocated buffers */
		if (channel->in_numbufs) {
			if (reqbufs->size != channel->in_bufs[0]->size) {
				for (count = 0;
				     count < channel->in_numbufs; count++) {
					if (channel->in_bufs[count]) {
						adr =
						    channel->
						    in_bufs[count]->offset;
						if (adr)
							imp_common_free_pages(
							(unsigned long)
							phys_to_virt(adr),
							channel->in_bufs
							[count]->size);

						kfree(channel->in_bufs[count]);

						channel->in_bufs[count] = NULL;
					}
				}
				channel->in_numbufs = 0;
			}
		}

		/* allocate the buffer */
		for (count = channel->in_numbufs; count < reqbufs->count;
		     count++) {
			/* Allocate memory for struct ipipe_buffer */
			buffer = kmalloc(sizeof(struct imp_buffer), GFP_KERNEL);

			/* if memory allocation fails then return error */
			if (!buffer) {
				/* free all the buffers */
				while (--count >= channel->in_numbufs) {
					adr = channel->in_bufs[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								in_bufs
								[count]->
								size);
					kfree(channel->in_bufs[count]);
					channel->in_bufs[count] = NULL;
				}
				dev_err(dev,
					"1.request_buffer:not enough memory\n");
				return -ENOMEM;
			}

			/* assign buffer's address in configuration */
			channel->in_bufs[count] = buffer;

			/* set buffers index and buf_type,size parameters */
			buffer->index = count;
			buffer->buf_type = IMP_BUF_IN;
			buffer->size = reqbufs->size;
			/* allocate memory for buffer of size passed
			   in reqbufs */
			buffer->offset =
			    (unsigned long)__get_free_pages(GFP_KERNEL |
							    GFP_DMA,
							    get_order
							    (reqbufs->size));

			/* if memory allocation fails, return error */
			if (!(buffer->offset)) {
				/* free all the buffer's space */
				kfree(buffer);
				channel->in_bufs[count] = NULL;
				while (--count >= channel->in_numbufs) {
					adr = channel->in_bufs[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								in_bufs
								[count]->
								size);
					kfree(channel->in_bufs[count]);
					channel->in_bufs[count] = NULL;
				}
				dev_err(dev,
					"2.request_buffer:not enough memory\n");

				return -ENOMEM;
			}

			adr = (unsigned long)buffer->offset;
			size = PAGE_SIZE << (get_order(reqbufs->size));
			while (size > 0) {
				/* make sure the frame buffers
				   are never swapped out of memory */
				SetPageReserved(virt_to_page(adr));
				adr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			/* convert vertual address to physical */
			buffer->offset = (unsigned long)
			    virt_to_phys((void *)(buffer->offset));
		}
		channel->in_numbufs = reqbufs->count;
	}
	/* if buf_type is output then allocate buffers for output */
	else if (reqbufs->buf_type == IMP_BUF_OUT1) {
		if (reqbufs->count == 0) {
			/* free all the buffers */
			for (count = 0; count < channel->out_numbuf1s;
				count++) {
				/* free memory allocate for the image */
				if (channel->out_buf1s[count]) {
					adr = channel->out_buf1s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf1s
								[count]->
								size);

					/* free the memory allocated to
					   ipipe_buffer */
					kfree(channel->out_buf1s[count]);

					channel->out_buf1s[count] = NULL;
				}
			}
			channel->out_numbuf1s = 0;

			return 0;
		}
		/* free the buffers */
		if (channel->out_numbuf1s > reqbufs->count &&
		    reqbufs->size == channel->out_buf1s[0]->size) {
			for (count = reqbufs->count;
			     count < channel->out_numbuf1s; count++) {
				/* free memory allocate for the image */
				if (channel->out_buf1s[count]) {
					adr = channel->out_buf1s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf1s
								[count]->
								size);

					/* free the memory allocated to
					   ipipe_buffer */
					kfree(channel->out_buf1s[count]);

					channel->out_buf1s[count] = NULL;
				}
			}
			channel->out_numbuf1s = reqbufs->count;

			return 0;
		}
		/* if size requested is different from already allocated,
		   free memory of all already allocated buffers */
		if (channel->out_numbuf1s) {
			if (reqbufs->size != channel->out_buf1s[0]->size) {
				for (count = 0;
				     count < channel->out_numbuf1s; count++) {
					if (channel->out_buf1s[count]) {
						adr =
						    channel->
						    out_buf1s[count]->offset;

						if (adr)
							imp_common_free_pages(
								(unsigned long)
								phys_to_virt
								(adr),
								channel->
								out_buf1s
								[count]->
								size);

						kfree(channel->
						      out_buf1s[count]);

						channel->out_buf1s[count] =
						    NULL;
					}
				}
				channel->out_numbuf1s = 0;
			}
		}

		/* allocate the buffer */
		for (count = channel->out_numbuf1s;
		     count < reqbufs->count; count++) {
			/* Allocate memory for struct ipipe_buffer */
			buffer = kmalloc(sizeof(struct imp_buffer), GFP_KERNEL);

			/* if memory allocation fails then return error */
			if (!buffer) {
				/* free all the buffers */
				while (--count >= channel->out_numbuf1s) {
					adr = channel->out_buf1s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf1s
								[count]->
								size);
					kfree(channel->out_buf1s[count]);
					channel->out_buf1s[count] = NULL;
				}

				dev_err(dev,
					"3.request_buffer:not enough memory\n");

				return -ENOMEM;
			}

			/* assign buffer's address out configuration */
			channel->out_buf1s[count] = buffer;

			/* set buffers outdex and buf_type,size parameters */
			buffer->index = count;
			buffer->buf_type = IMP_BUF_OUT1;
			buffer->size = reqbufs->size;
			/* allocate memory for buffer of size passed
			   in reqbufs */
			buffer->offset =
			    (unsigned long)__get_free_pages(GFP_KERNEL |
							    GFP_DMA,
							    get_order
							    (reqbufs->size));

			/* if memory allocation fails, return error */
			if (!(buffer->offset)) {
				/* free all the buffer's space */
				kfree(buffer);
				channel->out_buf1s[count] = NULL;
				while (--count >= channel->out_numbuf1s) {
					adr = channel->out_buf1s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf1s
								[count]->
								size);
					kfree(channel->out_buf1s[count]);
					channel->out_buf1s[count] = NULL;
				}
				dev_err(dev,
					"4.request_buffer:not enough memory\n");

				return -ENOMEM;
			}

			adr = (unsigned long)buffer->offset;
			size = PAGE_SIZE << (get_order(reqbufs->size));
			while (size > 0) {
				/* make sure the frame buffers
				   are never swapped out of memory */
				SetPageReserved(virt_to_page(adr));
				adr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			/* convert vertual address to physical */
			buffer->offset = (unsigned long)
			    virt_to_phys((void *)(buffer->offset));
		}
		channel->out_numbuf1s = reqbufs->count;

	} else if (reqbufs->buf_type == IMP_BUF_OUT2) {
		if (reqbufs->count == 0) {
			/* free all the buffers */
			for (count = 0; count < channel->out_numbuf2s;
				 count++) {
				/* free memory allocate for the image */
				if (channel->out_buf2s[count]) {
					adr = channel->out_buf2s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf2s
								[count]->
								size);

					/* free the memory allocated to
					   ipipe_buffer */
					kfree(channel->out_buf2s[count]);

					channel->out_buf2s[count] = NULL;
				}
			}
			channel->out_numbuf2s = 0;

			return 0;
		}
		/* free the buffers */
		if (channel->out_numbuf2s > reqbufs->count &&
		    reqbufs->size == channel->out_buf2s[0]->size) {
			for (count = reqbufs->count;
			     count < channel->out_numbuf2s; count++) {
				/* free memory allocate for the image */
				if (channel->out_buf2s[count]) {
					adr = channel->out_buf2s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf2s
								[count]->
								size);

					/* free the memory allocated to
					   ipipe_buffer */
					kfree(channel->out_buf2s[count]);

					channel->out_buf2s[count] = NULL;
				}
			}
			channel->out_numbuf2s = reqbufs->count;

			return 0;
		}
		/* if size requested is different from already allocated,
		   free memory of all already allocated buffers */
		if (channel->out_numbuf2s) {
			if (reqbufs->size != channel->out_buf2s[0]->size) {
				for (count = 0;
				     count < channel->out_numbuf2s; count++) {
					if (channel->out_buf2s[count]) {
						adr =
						    channel->
						    out_buf2s[count]->offset;

						if (adr)
							imp_common_free_pages(
								(unsigned long)
								phys_to_virt
								(adr),
								channel->
								out_buf2s
								[count]->
								size);

						kfree(channel->
						      out_buf2s[count]);

						channel->out_buf2s[count] =
						    NULL;
					}
				}
				channel->out_numbuf2s = 0;
			}
		}

		/* allocate the buffer */
		for (count = channel->out_numbuf2s;
		     count < reqbufs->count; count++) {
			/* Allocate memory for struct ipipe_buffer */
			buffer = kmalloc(sizeof(struct imp_buffer), GFP_KERNEL);

			/* if memory allocation fails then return error */
			if (!buffer) {
				/* free all the buffers */
				while (--count >= channel->out_numbuf2s) {
					adr = channel->out_buf2s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
							long)
							phys_to_virt
							(adr),
							channel->
							out_buf2s
							[count]->
							size);
					kfree(channel->out_buf2s[count]);
					channel->out_buf2s[count] = NULL;
				}

				dev_err(dev,
					"5.request_buffer:not enough memory\n");

				return -ENOMEM;
			}

			/* assign buffer's address out configuration */
			channel->out_buf2s[count] = buffer;

			/* set buffers outdex and buf_type,size parameters */
			buffer->index = count;
			buffer->buf_type = IMP_BUF_OUT2;
			buffer->size = reqbufs->size;
			/* allocate memory for buffer of size passed
			   in reqbufs */
			buffer->offset =
			    (unsigned long)__get_free_pages(GFP_KERNEL |
							    GFP_DMA,
							    get_order
							    (reqbufs->size));

			/* if memory allocation fails, return error */
			if (!(buffer->offset)) {
				/* free all the buffer's space */
				kfree(buffer);
				channel->out_buf2s[count] = NULL;
				while (--count >= channel->out_numbuf2s) {
					adr = channel->out_buf2s[count]->offset;
					if (adr)
						imp_common_free_pages((unsigned
								long)
								phys_to_virt
								(adr),
								channel->
								out_buf2s
								[count]->
								size);
					kfree(channel->out_buf2s[count]);
					channel->out_buf2s[count] = NULL;
				}
				dev_err(dev,
					"6.request_buffer:not enough memory\n");

				return -ENOMEM;
			}

			adr = (unsigned long)buffer->offset;
			size = PAGE_SIZE << (get_order(reqbufs->size));
			while (size > 0) {
				/* make sure the frame buffers
				   are never swapped out of memory */
				SetPageReserved(virt_to_page(adr));
				adr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			/* convert vertual address to physical */
			buffer->offset = (unsigned long)
			    virt_to_phys((void *)(buffer->offset));
		}
		channel->out_numbuf2s = reqbufs->count;

	} else {
		dev_err(dev, "request_buffer: invalid buffer type\n");

		return -EINVAL;
	}

	return 0;
}
Пример #6
0
Файл: dma.c Проект: 020gzh/linux
static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	free_pages((unsigned long)vaddr, get_order(size));
}
Пример #7
0
static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
                                      size_t count, loff_t *ppos)
{
    struct b43legacy_wldev *dev;
    struct b43legacy_debugfs_fops *dfops;
    struct b43legacy_dfs_file *dfile;
    ssize_t uninitialized_var(ret);
    char *buf;
    const size_t bufsize = 1024 * 16; /* 16 KiB buffer */
    const size_t buforder = get_order(bufsize);
    int err = 0;

    if (!count)
        return 0;
    dev = file->private_data;
    if (!dev)
        return -ENODEV;

    mutex_lock(&dev->wl->mutex);
    if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
        err = -ENODEV;
        goto out_unlock;
    }

    dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops);
    if (!dfops->read) {
        err = -ENOSYS;
        goto out_unlock;
    }
    dfile = fops_to_dfs_file(dev, dfops);

    if (!dfile->buffer) {
        buf = (char *)__get_free_pages(GFP_KERNEL, buforder);
        if (!buf) {
            err = -ENOMEM;
            goto out_unlock;
        }
        memset(buf, 0, bufsize);
        if (dfops->take_irqlock) {
            spin_lock_irq(&dev->wl->irq_lock);
            ret = dfops->read(dev, buf, bufsize);
            spin_unlock_irq(&dev->wl->irq_lock);
        } else
            ret = dfops->read(dev, buf, bufsize);
        if (ret <= 0) {
            free_pages((unsigned long)buf, buforder);
            err = ret;
            goto out_unlock;
        }
        dfile->data_len = ret;
        dfile->buffer = buf;
    }

    ret = simple_read_from_buffer(userbuf, count, ppos,
                                  dfile->buffer,
                                  dfile->data_len);
    if (*ppos >= dfile->data_len) {
        free_pages((unsigned long)dfile->buffer, buforder);
        dfile->buffer = NULL;
        dfile->data_len = 0;
    }
out_unlock:
    mutex_unlock(&dev->wl->mutex);

    return err ? err : ret;
}
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
			int partition, unsigned long prot)
{
	unsigned long left_to_map = cp_heap->total_size;
	unsigned long order = get_order(SZ_64K);
	unsigned long page_size = SZ_64K;
	int ret_value = 0;
	unsigned long virt_addr_len = cp_heap->total_size;
	struct iommu_domain *domain = msm_get_iommu_domain(domain_num);

	/* If we are mapping into the video domain we need to map twice the
	 * size of the heap to account for prefetch issue in video core.
	 */
	if (domain_num == cp_heap->iommu_2x_map_domain)
		virt_addr_len <<= 1;

	if (cp_heap->total_size & (SZ_64K-1)) {
		pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (cp_heap->base & (SZ_64K-1)) {
		pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
		ret_value = -EINVAL;
	}
	if (!ret_value && domain) {
		unsigned long temp_phys = cp_heap->base;
		unsigned long temp_iova =
				msm_allocate_iova_address(domain_num, partition,
						virt_addr_len, SZ_64K);
		if (!temp_iova) {
			pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
				__func__, domain_num, partition);
			ret_value = -ENOMEM;
			goto out;
		}
		cp_heap->iommu_iova[domain_num] = temp_iova;

		while (left_to_map) {
			int ret = iommu_map(domain, temp_iova, temp_phys,
					    order, prot);
			if (ret) {
				pr_err("%s: could not map %lx in domain %p, error: %d\n",
					__func__, temp_iova, domain, ret);
				ret_value = -EAGAIN;
				goto free_iova;
			}
			temp_iova += page_size;
			temp_phys += page_size;
			left_to_map -= page_size;
		}
		if (domain_num == cp_heap->iommu_2x_map_domain)
			ret_value = msm_iommu_map_extra(domain, temp_iova,
							cp_heap->total_size,
							SZ_64K, prot);
		if (ret_value)
			goto free_iova;
	} else {
		pr_err("Unable to get IOMMU domain %lu\n", domain_num);
		ret_value = -ENOMEM;
	}
	goto out;

free_iova:
	msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
			      partition, virt_addr_len);
out:
	return ret_value;
}
Пример #9
0
static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
			     unsigned long nr_segments,
			     struct kexec_segment __user *segments,
			     unsigned long flags)
{
	int ret;
	struct kimage *image;
	bool kexec_on_panic = flags & KEXEC_ON_CRASH;

	if (kexec_on_panic) {
		/* Verify we have a valid entry point */
		if ((entry < phys_to_boot_phys(crashk_res.start)) ||
		    (entry > phys_to_boot_phys(crashk_res.end)))
			return -EADDRNOTAVAIL;
	}

	/* Allocate and initialize a controlling structure */
	image = do_kimage_alloc_init();
	if (!image)
		return -ENOMEM;

	image->start = entry;

	ret = copy_user_segment_list(image, nr_segments, segments);
	if (ret)
		goto out_free_image;

	if (kexec_on_panic) {
		/* Enable special crash kernel control page alloc policy. */
		image->control_page = crashk_res.start;
		image->type = KEXEC_TYPE_CRASH;
	}

	ret = sanity_check_segment_list(image);
	if (ret)
		goto out_free_image;

	/*
	 * Find a location for the control code buffer, and add it
	 * the vector of segments so that it's pages will also be
	 * counted as destination pages.
	 */
	ret = -ENOMEM;
	image->control_code_page = kimage_alloc_control_pages(image,
					   get_order(KEXEC_CONTROL_PAGE_SIZE));
	if (!image->control_code_page) {
		pr_err("Could not allocate control_code_buffer\n");
		goto out_free_image;
	}

	if (!kexec_on_panic) {
		image->swap_page = kimage_alloc_control_pages(image, 0);
		if (!image->swap_page) {
			pr_err("Could not allocate swap buffer\n");
			goto out_free_control_pages;
		}
	}

	*rimage = image;
	return 0;
out_free_control_pages:
	kimage_free_page_list(&image->control_pages);
out_free_image:
	kfree(image);
	return ret;
}
Пример #10
0
static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
		     irq_handler_t irq_handler)
{
	enum xp_retval xp_ret;
	int ret;
	int nid;
	int nasid;
	int pg_order;
	struct page *page;
	struct xpc_gru_mq_uv *mq;
	struct uv_IO_APIC_route_entry *mmr_value;

	mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
	if (mq == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
			"a xpc_gru_mq_uv structure\n");
		ret = -ENOMEM;
		goto out_0;
	}

	mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
				  GFP_KERNEL);
	if (mq->gru_mq_desc == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
			"a gru_message_queue_desc structure\n");
		ret = -ENOMEM;
		goto out_1;
	}

	pg_order = get_order(mq_size);
	mq->order = pg_order + PAGE_SHIFT;
	mq_size = 1UL << mq->order;

	mq->mmr_blade = uv_cpu_to_blade_id(cpu);

	nid = cpu_to_node(cpu);
	page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				pg_order);
	if (page == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
			"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
		ret = -ENOMEM;
		goto out_2;
	}
	mq->address = page_address(page);

	/* enable generation of irq when GRU mq operation occurs to this mq */
	ret = xpc_gru_mq_watchlist_alloc_uv(mq);
	if (ret != 0)
		goto out_3;

	ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
	if (ret != 0)
		goto out_4;

	ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
	if (ret != 0) {
		dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
			mq->irq, -ret);
		goto out_5;
	}

	nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));

	mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
	ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
				     nasid, mmr_value->vector, mmr_value->dest);
	if (ret != 0) {
		dev_err(xpc_part, "gru_create_message_queue() returned "
			"error=%d\n", ret);
		ret = -EINVAL;
		goto out_6;
	}

	/* allow other partitions to access this GRU mq */
	xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
	if (xp_ret != xpSuccess) {
		ret = -EACCES;
		goto out_6;
	}

	return mq;

	/* something went wrong */
out_6:
	free_irq(mq->irq, NULL);
out_5:
	xpc_release_gru_mq_irq_uv(mq);
out_4:
	xpc_gru_mq_watchlist_free_uv(mq);
out_3:
	free_pages((unsigned long)mq->address, pg_order);
out_2:
	kfree(mq->gru_mq_desc);
out_1:
	kfree(mq);
out_0:
	return ERR_PTR(ret);
}
Пример #11
0
/*
 *	OMAP Device MMU(IOMMU) detection
 */
static int __devinit omap_iommu_probe(struct platform_device *pdev)
{
	int err = -ENODEV;
	void *p;
	int irq;
	struct iommu *obj;
	struct resource *res;
	struct iommu_platform_data *pdata = pdev->dev.platform_data;

	if (pdev->num_resources != 2)
		return -EINVAL;

	obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
	if (!obj)
		return -ENOMEM;

	obj->clk = clk_get(&pdev->dev, pdata->clk_name);
	if (IS_ERR(obj->clk))
		goto err_clk;

	obj->nr_tlb_entries = pdata->nr_tlb_entries;
	obj->name = pdata->name;
	obj->dev = &pdev->dev;
	obj->ctx = (void *)obj + sizeof(*obj);

	mutex_init(&obj->iommu_lock);
	mutex_init(&obj->mmap_lock);
	spin_lock_init(&obj->page_table_lock);
	INIT_LIST_HEAD(&obj->mmap);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		err = -ENODEV;
		goto err_mem;
	}
	obj->regbase = ioremap(res->start, resource_size(res));
	if (!obj->regbase) {
		err = -ENOMEM;
		goto err_mem;
	}

	res = request_mem_region(res->start, resource_size(res),
				 dev_name(&pdev->dev));
	if (!res) {
		err = -EIO;
		goto err_mem;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		err = -ENODEV;
		goto err_irq;
	}
	err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
			  dev_name(&pdev->dev), obj);
	if (err < 0)
		goto err_irq;
	platform_set_drvdata(pdev, obj);

	p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
	if (!p) {
		err = -ENOMEM;
		goto err_pgd;
	}
	memset(p, 0, IOPGD_TABLE_SIZE);
	clean_dcache_area(p, IOPGD_TABLE_SIZE);
	obj->iopgd = p;

	BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));

	dev_info(&pdev->dev, "%s registered\n", obj->name);
	return 0;

err_pgd:
	free_irq(irq, obj);
err_irq:
	release_mem_region(res->start, resource_size(res));
	iounmap(obj->regbase);
err_mem:
	clk_put(obj->clk);
err_clk:
	kfree(obj);
	return err;
}
Пример #12
0
/**
 * Setup mobicore kernel log. It assumes it's running on CORE 0!
 * The fastcall will complain is that is not the case!
 */
long mobicore_log_setup(void *data)
{
	unsigned long phys_log_buf;
	union fc_generic fc_log;

	long ret;
	log_pos = 0;
	log_buf = NULL;
	log_thread = NULL;
	log_line = NULL;
	log_line_len = 0;

	/* Sanity check for the log size */
	if (log_size < PAGE_SIZE)
		return -EFAULT;
	else
		log_size = PAGE_ALIGN(log_size);

	log_line = kzalloc(LOG_LINE_SIZE, GFP_KERNEL);
	if (IS_ERR(log_line)) {
		MCDRV_DBG_ERROR("failed to allocate log line!");
		return -ENOMEM;
	}

	log_thread = kthread_create(log_worker, NULL, "mobicore_log");
	if (IS_ERR(log_thread)) {
		MCDRV_DBG_ERROR("mobicore log thread creation failed!");
		ret = -EFAULT;
		goto mobicore_log_setup_log_line;
	}

	/* We are going to map this buffer into virtual address space in SWd.
	 * To reduce complexity there, we use a contiguous buffer. */
	log_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(log_size));
	if (!log_buf) {
		MCDRV_DBG_ERROR("Failed to get page for logger!");
		ret = -ENOMEM;
		goto mobicore_log_setup_kthread;
	}
	phys_log_buf = virt_to_phys(log_buf);

	memset(&fc_log, 0, sizeof(fc_log));
	fc_log.as_in.cmd      = MC_FC_NWD_TRACE;
	fc_log.as_in.param[0] = phys_log_buf;
	fc_log.as_in.param[1] = log_size;

	MCDRV_DBG("fc_log virt=%p phys=%p ", log_buf, (void *)phys_log_buf);
	mc_fastcall(&fc_log);
	MCDRV_DBG("fc_log out ret=0x%08x", fc_log.as_out.ret);
	/* If the setup failed we must free the memory allocated */
	if (fc_log.as_out.ret) {
		MCDRV_DBG_ERROR("MobiCore shared traces setup failed!");
		free_pages((unsigned long)log_buf, get_order(log_size));
		log_buf = NULL;
		ret = -EIO;
		goto mobicore_log_setup_kthread;
	}

	MCDRV_DBG("fc_log Logger version %u\n", log_buf->version);
	return 0;

mobicore_log_setup_kthread:
	kthread_stop(log_thread);
	log_thread = NULL;
mobicore_log_setup_log_line:
	kfree(log_line);
	log_line = NULL;
	return ret;
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
		   struct mlx4_buf *buf)
{
	dma_addr_t t;

	if (size <= max_direct) {
		buf->nbufs        = 1;
		buf->npages       = 1;
		buf->page_shift   = get_order(size) + PAGE_SHIFT;
		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
						       size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		buf->direct.map = t;

		while (t & ((1 << buf->page_shift) - 1)) {
			--buf->page_shift;
			buf->npages *= 2;
		}

		memset(buf->direct.buf, 0, size);
	} else {
		int i;

		buf->direct.buf  = NULL;
		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages      = buf->nbufs;
		buf->page_shift  = PAGE_SHIFT;
		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
					   GFP_KERNEL);
		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; ++i) {
			buf->page_list[i].buf =
				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						   &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;

			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}

		if (BITS_PER_LONG == 64) {
			struct page **pages;
			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
			if (!pages)
				goto err_free;
			for (i = 0; i < buf->nbufs; ++i)
				pages[i] = virt_to_page(buf->page_list[i].buf);
			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}

	return 0;

err_free:
	mlx4_buf_free(dev, size, buf);

	return -ENOMEM;
}
Пример #14
0
static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct sg_table *st;
	struct scatterlist *sg;
	unsigned int sg_page_sizes;
	unsigned int npages;
	int max_order;
	gfp_t gfp;

	max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		unsigned int max_segment;

		max_segment = swiotlb_max_segment();
		if (max_segment) {
			max_segment = max_t(unsigned int, max_segment,
					    PAGE_SIZE) >> PAGE_SHIFT;
			max_order = min(max_order, ilog2(max_segment));
		}
	}
#endif

	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
	if (IS_I965GM(i915) || IS_I965G(i915)) {
		/* 965gm cannot relocate objects above 4GiB. */
		gfp &= ~__GFP_HIGHMEM;
		gfp |= __GFP_DMA32;
	}

create_st:
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return -ENOMEM;

	npages = obj->base.size / PAGE_SIZE;
	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	st->nents = 0;
	sg_page_sizes = 0;

	do {
		int order = min(fls(npages) - 1, max_order);
		struct page *page;

		do {
			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
					   order);
			if (page)
				break;
			if (!order--)
				goto err;

			/* Limit subsequent allocations as well */
			max_order = order;
		} while (1);

		sg_set_page(sg, page, PAGE_SIZE << order, 0);
		sg_page_sizes |= PAGE_SIZE << order;
		st->nents++;

		npages -= 1 << order;
		if (!npages) {
			sg_mark_end(sg);
			break;
		}

		sg = __sg_next(sg);
	} while (1);

	if (i915_gem_gtt_prepare_pages(obj, st)) {
		/* Failed to dma-map try again with single page sg segments */
		if (get_order(st->sgl->length)) {
			internal_free_pages(st);
			max_order = 0;
			goto create_st;
		}
		goto err;
	}

	/* Mark the pages as dontneed whilst they are still pinned. As soon
	 * as they are unpinned they are allowed to be reaped by the shrinker,
	 * and the caller is expected to repopulate - the contents of this
	 * object are only valid whilst active and pinned.
	 */
	obj->mm.madv = I915_MADV_DONTNEED;

	__i915_gem_object_set_pages(obj, st, sg_page_sizes);

	return 0;

err:
	sg_set_page(sg, NULL, 0, 0);
	sg_mark_end(sg);
	internal_free_pages(st);

	return -ENOMEM;
}
Пример #15
0
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
	free_pages((unsigned long) vaddr, get_order(size));
}
Пример #16
0
static void dma_free(void *vaddr, size_t size)
{
	vaddr = (void *)KSEG0ADDR(vaddr);
	free_pages((unsigned long) vaddr, get_order(size));
}
Пример #17
0
int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
		    union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
		    int hca_write, struct mthca_mr *mr)
{
	int err = -ENOMEM;
	int npages, shift;
	u64 *dma_list = NULL;
	dma_addr_t t;
	int i;

	if (size <= max_direct) {
		*is_direct = 1;
		npages     = 1;
		shift      = get_order(size) + PAGE_SHIFT;

		buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
						     size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		pci_unmap_addr_set(&buf->direct, mapping, t);

		memset(buf->direct.buf, 0, size);

		while (t & ((1 << shift) - 1)) {
			--shift;
			npages *= 2;
		}

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			goto err_free;

		for (i = 0; i < npages; ++i)
			dma_list[i] = t + i * (1 << shift);
	} else {
		*is_direct = 0;
		npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		shift      = PAGE_SHIFT;

		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
		if (!dma_list)
			return -ENOMEM;

		buf->page_list = kmalloc(npages * sizeof *buf->page_list,
					 GFP_KERNEL);
		if (!buf->page_list)
			goto err_out;

		for (i = 0; i < npages; ++i)
			buf->page_list[i].buf = NULL;

		for (i = 0; i < npages; ++i) {
			buf->page_list[i].buf =
				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						   &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			dma_list[i] = t;
			pci_unmap_addr_set(&buf->page_list[i], mapping, t);

			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}
	}

	err = mthca_mr_alloc_phys(dev, pd->pd_num,
				  dma_list, shift, npages,
				  0, size,
				  MTHCA_MPT_FLAG_LOCAL_READ |
				  (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
				  mr);
	if (err)
		goto err_free;

	kfree(dma_list);

	return 0;

err_free:
	mthca_buf_free(dev, size, buf, *is_direct, NULL);

err_out:
	kfree(dma_list);

	return err;
}
Пример #18
0
struct net_device * __init ltpc_probe(void)
{
	struct net_device *dev;
	int err = -ENOMEM;
	int x=0,y=0;
	int autoirq;
	unsigned long f;
	unsigned long timeout;

	dev = alloc_ltalkdev(sizeof(struct ltpc_private));
	if (!dev)
		goto out;

	SET_MODULE_OWNER(dev);

	/* probe for the I/O port address */
	
	if (io != 0x240 && request_region(0x220,8,"ltpc")) {
		x = inb_p(0x220+6);
		if ( (x!=0xff) && (x>=0xf0) ) {
			io = 0x220;
			goto got_port;
		}
		release_region(0x220,8);
	}
	if (io != 0x220 && request_region(0x240,8,"ltpc")) {
		y = inb_p(0x240+6);
		if ( (y!=0xff) && (y>=0xf0) ){ 
			io = 0x240;
			goto got_port;
		}
		release_region(0x240,8);
	} 

	/* give up in despair */
	printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
	err = -ENODEV;
	goto out1;

 got_port:
	/* probe for the IRQ line */
	if (irq < 2) {
		unsigned long irq_mask;

		irq_mask = probe_irq_on();
		/* reset the interrupt line */
		inb_p(io+7);
		inb_p(io+7);
		/* trigger an interrupt (I hope) */
		inb_p(io+6);
		mdelay(2);
		autoirq = probe_irq_off(irq_mask);

		if (autoirq == 0) {
			printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
		} else {
			irq = autoirq;
		}
	}

	/* allocate a DMA buffer */
	ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
	if (!ltdmabuf) {
		printk(KERN_ERR "ltpc: mem alloc failed\n");
		err = -ENOMEM;
		goto out2;
	}

	ltdmacbuf = &ltdmabuf[800];

	if(debug & DEBUG_VERBOSE) {
		printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
	}

	/* reset the card */

	inb_p(io+1);
	inb_p(io+3);

	msleep(20);

	inb_p(io+0);
	inb_p(io+2);
	inb_p(io+7); /* clear reset */
	inb_p(io+4); 
	inb_p(io+5);
	inb_p(io+5); /* enable dma */
	inb_p(io+6); /* tri-state interrupt line */

	ssleep(1);
	
	/* now, figure out which dma channel we're using, unless it's
	   already been specified */
	/* well, 0 is a legal DMA channel, but the LTPC card doesn't
	   use it... */
	dma = ltpc_probe_dma(io, dma);
	if (!dma) {  /* no dma channel */
		printk(KERN_ERR "No DMA channel found on ltpc card.\n");
		err = -ENODEV;
		goto out3;
	}

	/* print out friendly message */
	if(irq)
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
	else
		printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d.  Using polled mode.\n",io,dma);

	/* Fill in the fields of the device structure with ethernet-generic values. */
	dev->hard_start_xmit = ltpc_xmit;
	dev->hard_header = ltpc_hard_header;
	dev->get_stats = ltpc_get_stats;

	/* add the ltpc-specific things */
	dev->do_ioctl = &ltpc_ioctl;

	dev->set_multicast_list = &set_multicast_list;
	dev->mc_list = NULL;
	dev->base_addr = io;
	dev->irq = irq;
	dev->dma = dma;

	/* the card will want to send a result at this point */
	/* (I think... leaving out this part makes the kernel crash,
           so I put it back in...) */

	f=claim_dma_lock();
	disable_dma(dma);
	clear_dma_ff(dma);
	set_dma_mode(dma,DMA_MODE_READ);
	set_dma_addr(dma,virt_to_bus(ltdmabuf));
	set_dma_count(dma,0x100);
	enable_dma(dma);
	release_dma_lock(f);

	(void) inb_p(io+3);
	(void) inb_p(io+2);
	timeout = jiffies+100*HZ/100;

	while(time_before(jiffies, timeout)) {
		if( 0xf9 == inb_p(io+6))
			break;
		schedule();
	}

	if(debug & DEBUG_VERBOSE) {
		printk("setting up timer and irq\n");
	}

	/* grab it and don't let go :-) */
	if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0)
	{
		(void) inb_p(io+7);  /* enable interrupts from board */
		(void) inb_p(io+7);  /* and reset irq line */
	} else {
		if( irq )
			printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
		dev->irq = 0;
		/* polled mode -- 20 times per second */
		/* this is really, really slow... should it poll more often? */
		init_timer(&ltpc_timer);
		ltpc_timer.function=ltpc_poll;
		ltpc_timer.data = (unsigned long) dev;

		ltpc_timer.expires = jiffies + HZ/20;
		add_timer(&ltpc_timer);
	}
	err = register_netdev(dev);
	if (err)
		goto out4;

	return NULL;
out4:
	del_timer_sync(&ltpc_timer);
	if (dev->irq)
		free_irq(dev->irq, dev);
out3:
	free_pages((unsigned long)ltdmabuf, get_order(1000));
out2:
	release_region(io, 8);
out1:
	free_netdev(dev);
out:
	return ERR_PTR(err);
}
Пример #19
0
/**
 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
 * @disk:	Target disk
 *
 * Helper function for low-level device drivers to (re) allocate and initialize
 * a disk request queue zone bitmaps. This functions should normally be called
 * within the disk ->revalidate method. For BIO based queues, no zone bitmap
 * is allocated.
 */
int blk_revalidate_disk_zones(struct gendisk *disk)
{
	struct request_queue *q = disk->queue;
	unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
	unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
	unsigned int i, rep_nr_zones = 0, z = 0, nrz;
	struct blk_zone *zones = NULL;
	sector_t sector = 0;
	int ret = 0;

	/*
	 * BIO based queues do not use a scheduler so only q->nr_zones
	 * needs to be updated so that the sysfs exposed value is correct.
	 */
	if (!queue_is_rq_based(q)) {
		q->nr_zones = nr_zones;
		return 0;
	}

	if (!blk_queue_is_zoned(q) || !nr_zones) {
		nr_zones = 0;
		goto update;
	}

	/* Allocate bitmaps */
	ret = -ENOMEM;
	seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
	if (!seq_zones_wlock)
		goto out;
	seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
	if (!seq_zones_bitmap)
		goto out;

	/* Get zone information and initialize seq_zones_bitmap */
	rep_nr_zones = nr_zones;
	zones = blk_alloc_zones(q->node, &rep_nr_zones);
	if (!zones)
		goto out;

	while (z < nr_zones) {
		nrz = min(nr_zones - z, rep_nr_zones);
		ret = blk_report_zones(disk, sector, zones, &nrz, GFP_NOIO);
		if (ret)
			goto out;
		if (!nrz)
			break;
		for (i = 0; i < nrz; i++) {
			if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
				set_bit(z, seq_zones_bitmap);
			z++;
		}
		sector += nrz * blk_queue_zone_sectors(q);
	}

	if (WARN_ON(z != nr_zones)) {
		ret = -EIO;
		goto out;
	}

update:
	/*
	 * Install the new bitmaps, making sure the queue is stopped and
	 * all I/Os are completed (i.e. a scheduler is not referencing the
	 * bitmaps).
	 */
	blk_mq_freeze_queue(q);
	q->nr_zones = nr_zones;
	swap(q->seq_zones_wlock, seq_zones_wlock);
	swap(q->seq_zones_bitmap, seq_zones_bitmap);
	blk_mq_unfreeze_queue(q);

out:
	free_pages((unsigned long)zones,
		   get_order(rep_nr_zones * sizeof(struct blk_zone)));
	kfree(seq_zones_wlock);
	kfree(seq_zones_bitmap);

	if (ret) {
		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
		blk_mq_freeze_queue(q);
		blk_queue_free_zone_bitmaps(q);
		blk_mq_unfreeze_queue(q);
	}

	return ret;
}
Пример #20
0
static unsigned long dma_mem_alloc(int size)
{
        int order = get_order(size);

        return __get_dma_pages(GFP_KERNEL, order);
}
Пример #21
0
static void ccio_free_consistent(struct pci_dev *dev, size_t size,
			       void *vaddr, dma_addr_t handle)
{
	free_pages((unsigned long)vaddr, get_order(size));
}
Пример #22
0
/* routine to allocate DMA buffer RAM of size (in bytes).
   Returns 0 on success, and <0 on fail */ 
static int get_dma_buffer(ssize_t size) {
  ssize_t bytes_to_get = size & ~0x3; /* get long-aligned */
  ssize_t usedbytes;
  unsigned long page_order;
  void * bufferpiece;
#ifdef bla_pages
  struct page * page; /* just to check */
#endif
  dma_addr_t busaddress; /* bus address of DMA buffer */
  struct dma_page_pointer * currbuf;
  struct dma_page_pointer * tmpbuf;
  /* check multi pages */
  struct page *spa;
  unsigned long pflags;
  int pcnt,i;  

  /* reset dma pointer buffer */
  currbuf=dma_main_pointer; /* NULL if no buffer exists */
#ifdef bla1
  printk("I should get %d bytes now.\n",bytes_to_get);
#endif
 
  /* still have to get only small pieces.... */
  page_order = 3;
    
  page_order = get_order(bytes_to_get);
  if (page_order >= MAX_ORDER) page_order=MAX_ORDER;

  while (bytes_to_get>0) {
      /* shrink size if possible */
      while((page_order>0) && (PAGE_SIZE<<(page_order-1))>=bytes_to_get)
	  page_order--;
      
#ifdef bla_pages
      printk("current page order: %d\n",(int)page_order);
#endif
      /* bufferpiece = pci_alloc_consistent (this_pci_device,
	 bytes_to_get, &busaddress); */ /* does not help... */
      bufferpiece = (void *)__get_free_pages(GFP_KERNEL,page_order);
      
      if (bufferpiece) {

	  /* repair missing page counts */
	  add_individual_page_counts(bufferpiece, page_order);
	  
	  /* get block structure */
	  for (i=0;i<(1 << page_order);i++) {
	      spa=virt_to_page(bufferpiece+i*PAGE_SIZE);
	      pcnt=page_count(spa);
	      pflags = spa->flags;
	      /*      printk("subpage index: %d, count: %d, flags: %x\n",i,pcnt,(int)pflags); */
      }	  
	  busaddress = virt_to_bus(bufferpiece);
	  /* success: make new entry in chain */
	  tmpbuf = (dma_page_pointer *) kmalloc(sizeof(dma_page_pointer),
						GFP_KERNEL); /* first, get buffer */
	  if (!tmpbuf) {
	      printk(" Wruagh - kmalloc failed for buffer pointer....\n");
	      release_individual_page_counts(bufferpiece,page_order);
	      free_pages((unsigned long)bufferpiece,page_order); /* give it back */
	      printk("kmalloc failed during DMA buffer alloc. better reboot.\n");
	      return -ENOMEM;
	  }
	  
#ifdef bla_pages
	  page = virt_to_page(bufferpiece);
	  printk(" page of %x is: %x\n",(int)bufferpiece, (int)page);
#endif
	  
	  if (currbuf) { /* there is already a structure */
	      /* fill new struct; currbuf points to last structure filled  */
	      tmpbuf->next=currbuf->next; tmpbuf->previous=currbuf;
	      /* insert in chain */
	      currbuf->next->previous=tmpbuf;currbuf->next=tmpbuf;
	      currbuf=tmpbuf;
	  } else {
	      tmpbuf->previous=tmpbuf; tmpbuf->next=tmpbuf; /* fill new struct */
	      currbuf=tmpbuf; dma_main_pointer=currbuf; /* set main pointer */
	  };
	  
	  /* fill structure with actual buffer info */
	  usedbytes = PAGE_SIZE<<page_order;
	  currbuf->fullsize = usedbytes; /* all allocated bytes */
	  usedbytes = (usedbytes>bytes_to_get?bytes_to_get:usedbytes);
	  currbuf->size=usedbytes; /* get useful size into buffer */
	  currbuf->order=page_order; /* needed for free_pages */
	  currbuf->buffer=bufferpiece;  /* kernel address of buffer */
	  currbuf->physbuf=busaddress; /* PCI bus address */
	  
      /* less work to do.. */
	  bytes_to_get -= usedbytes;
      } else {
	  /* could not get the large mem piece. try smaller ones */
	  if (page_order>0) {
	      page_order--; continue;
	  } else {
	      break; /* stop and clean up in case of problems */
	  };
      }
  }
  if (bytes_to_get <=0)  return 0; /* everything went fine.... */
  /* cleanup of unused buffers and pointers with standard release code */
  release_dma_buffer();
  return -ENOMEM;
}
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
		if (ret) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
		}

		vfree(sglist);
	} else {
		unsigned long order = get_order(page_size);
		unsigned long aligned_size = ALIGN(size, page_size);
		unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);

		for (i = 0; i < nrpages; i++) {
			ret = iommu_map(domain, temp_iova, phy_addr, page_size,
						cached);
			if (ret) {
				pr_err("%s: could not map %lx in domain %p, error: %d\n",
					__func__, start_iova, domain, ret);
				ret = -EAGAIN;
				goto out;
			}
			temp_iova += page_size;
		}
	}
Пример #24
0
void *co_os_alloc_pages(unsigned long pages)
{
	return (void *)__get_free_pages(GFP_KERNEL, get_order(pages << PAGE_SHIFT));
}
Пример #25
0
static int dovefb_ovly_ioctl(struct fb_info *fi, unsigned int cmd,
		unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct dovefb_layer_info *dfli = fi->par;
	u32 x;
	int vmode = 0;
	int gfx_on = 1;
	int vid_on = 1;
	int interpolation = 0;

	switch (cmd) {
	case DOVEFB_IOCTL_WAIT_VSYNC:
		wait_for_vsync(dfli);
		break;
	case DOVEFB_IOCTL_GET_VIEWPORT_INFO:
		return copy_to_user(argp, &dfli->surface.viewPortInfo,
			sizeof(struct _sViewPortInfo)) ? -EFAULT : 0;
	case DOVEFB_IOCTL_SET_VIEWPORT_INFO:
		mutex_lock(&dfli->access_ok);
		if (copy_from_user(&gViewPortInfo, argp,
				sizeof(gViewPortInfo))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		if (check_surface(fi, -1, &gViewPortInfo, 0, 0))
			dovefb_ovly_set_par(fi);

		mutex_unlock(&dfli->access_ok);
		break;
	case DOVEFB_IOCTL_SET_VIDEO_MODE:
		/*
		 * Get data from user space.
		 */
		if (copy_from_user(&vmode, argp, sizeof(vmode)))
			return -EFAULT;

		if (check_surface(fi, vmode, 0, 0, 0))
			dovefb_ovly_set_par(fi);
		break;
	case DOVEFB_IOCTL_GET_VIDEO_MODE:
		return copy_to_user(argp, &dfli->surface.videoMode,
			sizeof(u32)) ? -EFAULT : 0;
	case DOVEFB_IOCTL_CREATE_VID_BUFFER:
	{
		struct _sOvlySurface OvlySurface;

		mutex_lock(&dfli->access_ok);
		if (copy_from_user(&OvlySurface, argp,
				sizeof(struct _sOvlySurface))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		/* Request a video buffer. */
		dovefb_ovly_create_surface(&OvlySurface);

		if (copy_to_user(argp, &OvlySurface,
				sizeof(struct _sOvlySurface))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		mutex_unlock(&dfli->access_ok);

		break;
	}
	case DOVEFB_IOCTL_FLIP_VID_BUFFER:
	{
		struct _sOvlySurface *surface = 0;
		u8 *start_addr, *input_data, *dst_addr;
		u32 length;
		surface = kmalloc(sizeof(struct _sOvlySurface),
				GFP_KERNEL);

		/* Get user-mode data. */
		if (copy_from_user(surface, argp,
		    sizeof(struct _sOvlySurface))) {
			kfree(surface);
			return -EFAULT;
		}
		mutex_lock(&dfli->access_ok);
		length = surface->videoBufferAddr.length;
		dst_addr = dfli->surface.videoBufferAddr.startAddr;
		start_addr = surface->videoBufferAddr.startAddr;
		input_data = surface->videoBufferAddr.inputData;

		/*
		 * Has DMA addr?
		 */
		if (start_addr &&
		    (!input_data)) {
			if (0 != addFreeBuf(freeBufList, (u8 *)surface)) {
				pr_debug("Error: addFreeBuf()\n");
				mutex_unlock(&dfli->access_ok);
				kfree(surface);
				return -EFAULT;
			} else {
				/* pr_debug("addFreeBuf(0x%08x) ok.\n",
					start_addr); */
			}
		} else {
			if (check_surface(fi, surface->videoMode,
					&surface->viewPortInfo,
					&surface->viewPortOffset,
					&surface->videoBufferAddr))
				dovefb_ovly_set_par(fi);

			/* copy buffer */
			if (input_data) {
				wait_for_vsync(dfli);
				/* if support hw DMA, replace this. */
				if (copy_from_user(dfli->fb_start,
						   input_data, length)) {
					mutex_unlock(&dfli->access_ok);
					kfree(surface);
					return -EFAULT;
				}
				mutex_unlock(&dfli->access_ok);
				kfree(surface);
				return 0;
			}

			kfree(surface);
#if 0
			/*
			 * Fix me: Currently not implemented yet.
			 * Application allocate a physical contiguous
			 * buffer and pass it into driver. Here is to
			 * update fb's info to new buffer and free
			 * old buffer.
			 */
			if (start_addr) {
				if (dfli->mem_status)
					free_pages(
					    (unsigned long)dfli->fb_start,
					    get_order(dfli->fb_size));
				else
					dma_free_writecombine(dfli->dev,
					    dfli->fb_size,
					    dfli->fb_start,
					    dfli->fb_start_dma);

				dfli->fb_start = __va(start_addr);
				dfli->fb_size = length;
				dfli->fb_start_dma =
				    (dma_addr_t)__pa(dfli->fb_start);
				dfli->mem_status = 1;
				fi->fix.smem_start = dfli->fb_start_dma;
				fi->fix.smem_len = dfli->fb_size;
				fi->screen_base = dfli->fb_start;
				fi->screen_size = dfli->fb_size;
			}
#endif
		}
		mutex_unlock(&dfli->access_ok);
		return 0;
	}
	case DOVEFB_IOCTL_GET_FREELIST:
	{
		mutex_lock(&dfli->access_ok);

		if (copy_to_user(argp, filterBufList,
				MAX_QUEUE_NUM*sizeof(u8 *))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		clearFreeBuf(filterBufList, RESET_BUF);

		mutex_unlock(&dfli->access_ok);
		return 0;
	}
	case DOVEFB_IOCTL_GET_BUFF_ADDR:
	{
		return copy_to_user(argp, &dfli->surface.videoBufferAddr,
			sizeof(struct _sVideoBufferAddr)) ? -EFAULT : 0;
	}
	case DOVEFB_IOCTL_SET_VID_OFFSET:
		mutex_lock(&dfli->access_ok);
		if (copy_from_user(&gViewPortOffset, argp,
				sizeof(gViewPortOffset))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		if (check_surface(fi, -1, 0, &gViewPortOffset, 0))
			dovefb_ovly_set_par(fi);
		mutex_unlock(&dfli->access_ok);
		break;
	case DOVEFB_IOCTL_GET_VID_OFFSET:
		return copy_to_user(argp, &dfli->surface.viewPortOffset,
			sizeof(struct _sViewPortOffset)) ? -EFAULT : 0;
	case DOVEFB_IOCTL_SET_MEMORY_TOGGLE:
		break;
	case DOVEFB_IOCTL_SET_COLORKEYnALPHA:
		if (copy_from_user(&dfli->ckey_alpha, argp,
		    sizeof(struct _sColorKeyNAlpha)))
			return -EFAULT;

		dovefb_ovly_set_colorkeyalpha(dfli);
		break;
	case DOVEFB_IOCTL_GET_COLORKEYnALPHA:
		if (copy_to_user(argp, &dfli->ckey_alpha,
		    sizeof(struct _sColorKeyNAlpha)))
			return -EFAULT;
		break;
	case DOVEFB_IOCTL_SWITCH_VID_OVLY:
		if (copy_from_user(&vid_on, argp, sizeof(int)))
			return -EFAULT;
		if (0 == vid_on) {
			x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) &
				~CFG_DMA_ENA_MASK;
			writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0);
		} else {
			x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) |
				CFG_DMA_ENA(0x1);
			writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0);
			/* Enable VID & VSync. */
			x = readl(dfli->reg_base + SPU_IRQ_ENA) |
				DOVEFB_VID_INT_MASK | DOVEFB_VSYNC_INT_MASK;
			writel(x, dfli->reg_base + SPU_IRQ_ENA);
		}
		break;
	case DOVEFB_IOCTL_SWITCH_GRA_OVLY:
		if (copy_from_user(&gfx_on, argp, sizeof(int)))
			return -EFAULT;
		if (0 == gfx_on) {
			x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) &
				~CFG_GRA_ENA_MASK;
			writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0);
		} else {
			x = readl(dfli->reg_base + LCD_SPU_DMA_CTRL0) |
				CFG_GRA_ENA(0x1);
			writel(x, dfli->reg_base + LCD_SPU_DMA_CTRL0);
		}
		break;
	case DOVEFB_IOCTL_GET_FBID:
		mutex_lock(&dfli->access_ok);
		if (copy_to_user(argp, &dfli->cur_fbid, sizeof(unsigned int))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}
		mutex_unlock(&dfli->access_ok);
		break;
	case DOVEFB_IOCTL_GET_SRC_MODE:
		mutex_lock(&dfli->access_ok);
		if (copy_to_user(argp, &dfli->src_mode, sizeof(int))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}
		mutex_unlock(&dfli->access_ok);
		break;
	case DOVEFB_IOCTL_SET_SRC_MODE:
		mutex_lock(&dfli->access_ok);
		if (copy_from_user(&dfli->src_mode, argp, sizeof(int))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}

		if (SHM_NORMAL == dfli->src_mode) {
			int i;

			/*
			 * Recycle all video buffer.
			 */
			/* 1. collect freelist buffer */
			for (i = (MAX_QUEUE_NUM-1); i >= 0; i--) {
				if (freeBufList[i])
					break;
			}
			collectFreeBuf(filterBufList, freeBufList, (i));

			/* 2. Recycle current frame to filter list. */
			for (i = 0; i < MAX_QUEUE_NUM; i++) {
				if (!filterBufList[i])
					filterBufList[i] = (u8 *)dfli->new_addr;
			}

			/* clear and reset related resource. */
			clearFreeBuf(freeBufList, RESET_BUF|FREE_ENTRY);
			dfli->new_addr = 0;
			dfli->cur_fbid = 0;
			memset(dfli->fb_start, 0, dfli->fb_size);
		}

		mutex_unlock(&dfli->access_ok);
		break;
	case DOVEFB_IOCTL_GET_FBPA:
		{
		struct shm_private_info info;
		int index;

		if (copy_from_user(&info, argp,
		    sizeof(struct shm_private_info)))
			return -EFAULT;

		/* which frame want to find. */
		index = info.fbid;

		/* calc physical address. */
		info.fb_pa = (unsigned long)(dfli->fb_start_dma+
				(index*info.width*info.height*MAX_YUV_PIXEL));
		if (copy_to_user(argp, &info, sizeof(struct shm_private_info)))
			return -EFAULT;

		break;
		}
	case DOVEFB_IOCTL_NEXT_FRAME_PRESENT:
		{
		unsigned int phy_addr[3];
		mutex_lock(&dfli->access_ok);
		if (copy_from_user(&phy_addr, argp, 3*sizeof(unsigned int))) {
			mutex_unlock(&dfli->access_ok);
			return -EFAULT;
		}
		mutex_unlock(&dfli->access_ok);
		dfli->vid_ovly_phys_addr_y = phy_addr[0];
		dfli->vid_ovly_phys_addr_u = phy_addr[1];
		dfli->vid_ovly_phys_addr_v = phy_addr[2];
		break;
		}
	case DOVEFB_IOCTL_SET_INTERPOLATION_MODE:
		/*
		 * Get data from user space.
		 */
		if (copy_from_user(&interpolation, argp, sizeof(interpolation)))
			return -EFAULT;
		if ((interpolation == 0) || (interpolation == 3))
			writel(CFG_VSC_LINEAR(interpolation) |
				(readl(dfli->reg_base +	SPU_IOPAD_CONTROL) &
				!CFG_VSC_LINEAR_MASK),
				dfli->reg_base + SPU_IOPAD_CONTROL);
		break;
	default:
		pr_debug("ioctl_ovly(0x%x) No match.\n", cmd);
		break;
	}

	return 0;
}
Пример #26
0
void co_os_free_pages(void *ptr, unsigned long pages)
{
	free_pages((unsigned long)ptr, get_order(pages << PAGE_SHIFT));
}
Пример #27
0
/*
 * Allocate memory for a coherent mapping.
 */
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		   gfp_t gfp)
{
	void *memory;
	unsigned long dma_mask = 0;
	u64 bus;

	if (!dev)
		dev = &fallback_dev;
	dma_mask = dev->coherent_dma_mask;
	if (dma_mask == 0)
		dma_mask = DMA_32BIT_MASK;

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

	/* Kludge to make it bug-to-bug compatible with i386. i386
	   uses the normal dma_mask for alloc_coherent. */
	dma_mask &= *dev->dma_mask;

	/* Why <=? Even when the mask is smaller than 4GB it is often
	   larger than 16MB and in this case we have a chance of
	   finding fitting memory in the next higher zone first. If
	   not retry with true GFP_DMA. -AK */
	if (dma_mask <= DMA_32BIT_MASK)
		gfp |= GFP_DMA32;

 again:
	memory = dma_alloc_pages(dev, gfp, get_order(size));
	if (memory == NULL)
		return NULL;

	{
		int high, mmu;
		bus = virt_to_bus(memory);
	        high = (bus + size) >= dma_mask;
		mmu = high;
		if (force_iommu && !(gfp & GFP_DMA))
			mmu = 1;
		else if (high) {
			free_pages((unsigned long)memory,
				   get_order(size));

			/* Don't use the 16MB ZONE_DMA unless absolutely
			   needed. It's better to use remapping first. */
			if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
				gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
				goto again;
			}

			/* Let low level make its own zone decisions */
			gfp &= ~(GFP_DMA32|GFP_DMA);

			if (dma_ops->alloc_coherent)
				return dma_ops->alloc_coherent(dev, size,
							   dma_handle, gfp);
			return NULL;
		}

		memset(memory, 0, size);
		if (!mmu) {
			*dma_handle = virt_to_bus(memory);
			return memory;
		}
	}

	if (dma_ops->alloc_coherent) {
		free_pages((unsigned long)memory, get_order(size));
		gfp &= ~(GFP_DMA|GFP_DMA32);
		return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
	}

	if (dma_ops->map_simple) {
		*dma_handle = dma_ops->map_simple(dev, memory,
					      size,
					      PCI_DMA_BIDIRECTIONAL);
		if (*dma_handle != bad_dma_address)
			return memory;
	}

	if (panic_on_overflow)
		panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
	free_pages((unsigned long)memory, get_order(size));
	return NULL;
}
Пример #28
0
/**
 * percpu_ida_destroy - release a tag pool's resources
 * @pool: pool to free
 *
 * Frees the resources allocated by percpu_ida_init().
 */
void percpu_ida_destroy(struct percpu_ida *pool)
{
	free_percpu(pool->tag_cpu);
	free_pages((unsigned long) pool->freelist,
		   get_order(pool->nr_tags * sizeof(unsigned)));
}
Пример #29
0
void free_thread_info(struct thread_info *ti)
{
	free_thread_xstate(ti->task);
	free_pages((unsigned long)ti, get_order(THREAD_SIZE));
}
Пример #30
0
static int netvsc_destroy_buf(struct netvsc_device *net_device)
{
	struct nvsp_message *revoke_packet;
	int ret = 0;
	struct net_device *ndev = net_device->ndev;

	/*
	 * If we got a section count, it means we received a
	 * SendReceiveBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->recv_section_cnt) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
		revoke_packet->msg.v1_msg.
		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/*
		 * If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				"revoke receive buffer to netvsp\n");
			return ret;
		}
	}

	/* Teardown the gpadl on the vsp end */
	if (net_device->recv_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
			   net_device->recv_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown receive buffer's gpadl\n");
			return ret;
		}
		net_device->recv_buf_gpadl_handle = 0;
	}

	if (net_device->recv_buf) {
		/* Free up the receive buffer */
		free_pages((unsigned long)net_device->recv_buf,
			get_order(net_device->recv_buf_size));
		net_device->recv_buf = NULL;
	}

	if (net_device->recv_section) {
		net_device->recv_section_cnt = 0;
		kfree(net_device->recv_section);
		net_device->recv_section = NULL;
	}

	/* Deal with the send buffer we may have setup.
	 * If we got a  send section size, it means we received a
	 * SendsendBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->send_section_size) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
		revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/* If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				   "revoke send buffer to netvsp\n");
			return ret;
		}
	}
	/* Teardown the gpadl on the vsp end */
	if (net_device->send_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
					   net_device->send_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown send buffer's gpadl\n");
			return ret;
		}
		net_device->send_buf_gpadl_handle = 0;
	}
	if (net_device->send_buf) {
		/* Free up the receive buffer */
		free_pages((unsigned long)net_device->send_buf,
			   get_order(net_device->send_buf_size));
		net_device->send_buf = NULL;
	}
	kfree(net_device->send_section_map);

	return ret;
}