/* * Map a per-context mmio space into the given vma. */ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) { u64 len = vma->vm_end - vma->vm_start; len = min(len, ctx->psn_size); if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); } /* make sure there is a valid per process space for this AFU */ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { pr_devel("AFU doesn't support mmio space\n"); return -EINVAL; } /* Can't mmap until the AFU is enabled */ if (!ctx->afu->enabled) return -EBUSY; pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, ctx->psn_phys, ctx->pe , ctx->master); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return vm_iomap_memory(vma, ctx->psn_phys, len); }
/* Sysfs API to allow mmap of the ring buffers * The ring buffer is allocated as contiguous memory by vmbus_open */ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj); void *ring_buffer = page_address(channel->ringbuffer_page); if (channel->state != CHANNEL_OPENED_STATE) return -ENODEV; return vm_iomap_memory(vma, virt_to_phys(ring_buffer), channel->ringbuffer_pagecount << PAGE_SHIFT); }
static int sprdfb_mmap(struct fb_info *info,struct vm_area_struct *vma) { struct sprdfb_device *dev = NULL; if(NULL == info){ printk(KERN_ERR "sprdfb: sprdfb_ioctl error. (Invalid Parameter)"); return -1; } dev = info->par; printk("sprdfb: sprdfb_mmap,vma=0x%x\n",vma); vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); dev->ctrl->set_vma(vma); return vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len); }
/* This function maps kernel space memory to user space memory. */ static int bridge_mmap(struct file *filp, struct vm_area_struct *vma) { struct omap_dsp_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx " "flags %lx\n", __func__, filp, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma->vm_flags); return vm_iomap_memory(vma, pdata->phys_mempool_base, pdata->phys_mempool_size); }
static int log_buf_mmap(struct file *filp, struct vm_area_struct *vma) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return vm_iomap_memory(vma, log_start, log_size); }