Beispiel #1
0
/* stores the physical address (+offset) of each handle relocation entry
 * into its output location. see nvmap_pin_array for more details.
 *
 * each entry in arr (i.e., each relocation request) specifies two handles:
 * the handle to pin (pin), and the handle where the address of pin should be
 * written (patch). in pseudocode, this loop basically looks like:
 *
 * for (i = 0; i < nr; i++) {
 *     (pin, pin_offset, patch, patch_offset) = arr[i];
 *     patch[patch_offset] = address_of(pin) + pin_offset;
 * }
 */
static int nvmap_reloc_pin_array(struct nvmap_client *client,
				 const struct nvmap_pinarray_elem *arr,
				 int nr, struct nvmap_handle *gather)
{
	struct nvmap_handle *last_patch = NULL;
	unsigned int last_pfn = 0;
	pte_t **pte;
	void *addr;
	int i;

	pte = nvmap_alloc_pte(client->dev, &addr);
	if (IS_ERR(pte))
		return PTR_ERR(pte);

	for (i = 0; i < nr; i++) {
		struct nvmap_handle *patch;
		struct nvmap_handle *pin;
		phys_addr_t reloc_addr;
		phys_addr_t phys;
		unsigned int pfn;

		/* all of the handles are validated and get'ted prior to
		 * calling this function, so casting is safe here */
		pin = (struct nvmap_handle *)arr[i].pin_mem;

		if (arr[i].patch_mem == (unsigned long)last_patch) {
			patch = last_patch;
		} else if (arr[i].patch_mem == (unsigned long)gather) {
			patch = gather;
		} else {
			if (last_patch)
				nvmap_handle_put(last_patch);

			patch = nvmap_get_handle_id(client, arr[i].patch_mem);
			if (!patch) {
				nvmap_free_pte(client->dev, pte);
				return -EPERM;
			}
			last_patch = patch;
		}

                if (!patch) {
                        nvmap_free_pte(client->dev, pte);
                        return -EPERM;
                }

		if (patch->heap_pgalloc) {
			unsigned int page = arr[i].patch_offset >> PAGE_SHIFT;
			phys = page_to_phys(patch->pgalloc.pages[page]);
			phys += (arr[i].patch_offset & ~PAGE_MASK);
		} else {
Beispiel #2
0
void nvmap_kunmap(struct nvmap_handle_ref *ref, unsigned int pagenum,
		  void *addr)
{
	struct nvmap_handle *h;
	phys_addr_t paddr;
	pte_t **pte;

	BUG_ON(!addr || !ref);
	h = ref->handle;

	if (nvmap_find_cache_maint_op(h->dev, h)) {
		struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
		/* acquire pin lock to ensure maintenance is done before
		 * handle is pinned */
		mutex_lock(&share->pin_lock);
		nvmap_cache_maint_ops_flush(h->dev, h);
		mutex_unlock(&share->pin_lock);
	}

	if (h->heap_pgalloc)
		paddr = page_to_phys(h->pgalloc.pages[pagenum]);
	else
		paddr = h->carveout->base + pagenum * PAGE_SIZE;

	if (h->flags != NVMAP_HANDLE_UNCACHEABLE &&
	    h->flags != NVMAP_HANDLE_WRITE_COMBINE) {
		dmac_flush_range(addr, addr + PAGE_SIZE);
		outer_flush_range(paddr, paddr + PAGE_SIZE);
	}

	pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
	nvmap_free_pte(nvmap_dev, pte);
	nvmap_handle_put(h);
}