Beispiel #1
0
/*
 * mspec_close
 *
 * Called when unmapping a device mapping. Frees all mspec pages
 * belonging to all the vma's sharing this vma_data structure.
 */
static void
mspec_close(struct vm_area_struct *vma)
{
	struct vma_data *vdata;
	int index, last_index;
	unsigned long my_page;

	vdata = vma->vm_private_data;

	if (!atomic_dec_and_test(&vdata->refcnt))
		return;

	last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
	for (index = 0; index < last_index; index++) {
		if (vdata->maddr[index] == 0)
			continue;
		/*
		 * Clear the page before sticking it back
		 * into the pool.
		 */
		my_page = vdata->maddr[index];
		vdata->maddr[index] = 0;
		if (!mspec_zero_block(my_page, PAGE_SIZE))
			uncached_free_page(my_page, 1);
		else
			printk(KERN_WARNING "mspec_close(): "
			       "failed to zero page %ld\n", my_page);
	}

	kvfree(vdata);
}
Beispiel #2
0
/*
 * mspec_nopfn
 *
 * Creates a mspec page and maps it to user space.
 */
static unsigned long
mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
{
    unsigned long paddr, maddr;
    unsigned long pfn;
    int index;
    struct vma_data *vdata = vma->vm_private_data;

    index = (address - vma->vm_start) >> PAGE_SHIFT;
    maddr = (volatile unsigned long) vdata->maddr[index];
    if (maddr == 0) {
        maddr = uncached_alloc_page(numa_node_id());
        if (maddr == 0)
            return NOPFN_OOM;

        spin_lock(&vdata->lock);
        if (vdata->maddr[index] == 0) {
            vdata->count++;
            vdata->maddr[index] = maddr;
        } else {
            uncached_free_page(maddr);
            maddr = vdata->maddr[index];
        }
        spin_unlock(&vdata->lock);
    }

    if (vdata->type == MSPEC_FETCHOP)
        paddr = TO_AMO(maddr);
    else
        paddr = maddr & ~__IA64_UNCACHED_OFFSET;

    pfn = paddr >> PAGE_SHIFT;

    return pfn;
}
Beispiel #3
0
/*
 * mspec_close
 *
 * Called when unmapping a device mapping. Frees all mspec pages
 * belonging to the vma.
 */
static void
mspec_close(struct vm_area_struct *vma)
{
    struct vma_data *vdata;
    int i, pages, result, vdata_size;

    vdata = vma->vm_private_data;
    if (!atomic_dec_and_test(&vdata->refcnt))
        return;

    pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
    vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
    for (i = 0; i < pages; i++) {
        if (vdata->maddr[i] == 0)
            continue;
        /*
         * Clear the page before sticking it back
         * into the pool.
         */
        result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
        if (!result)
            uncached_free_page(vdata->maddr[i]);
        else
            printk(KERN_WARNING "mspec_close(): "
                   "failed to zero page %i\n",
                   result);
    }

    if (vdata_size <= PAGE_SIZE)
        kfree(vdata);
    else
        vfree(vdata);
}
Beispiel #4
0
/*
 * mspec_fault
 *
 * Creates a mspec page and maps it to user space.
 */
static int
mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long paddr, maddr;
	unsigned long pfn;
	pgoff_t index = vmf->pgoff;
	struct vma_data *vdata = vma->vm_private_data;

	maddr = (volatile unsigned long) vdata->maddr[index];
	if (maddr == 0) {
		maddr = uncached_alloc_page(numa_node_id(), 1);
		if (maddr == 0)
			return VM_FAULT_OOM;

		spin_lock(&vdata->lock);
		if (vdata->maddr[index] == 0) {
			vdata->count++;
			vdata->maddr[index] = maddr;
		} else {
			uncached_free_page(maddr, 1);
			maddr = vdata->maddr[index];
		}
		spin_unlock(&vdata->lock);
	}

	if (vdata->type == MSPEC_FETCHOP)
		paddr = TO_AMO(maddr);
	else
		paddr = maddr & ~__IA64_UNCACHED_OFFSET;

	pfn = paddr >> PAGE_SHIFT;

	/*
	 * vm_insert_pfn can fail with -EBUSY, but in that case it will
	 * be because another thread has installed the pte first, so it
	 * is no problem.
	 */
	vm_insert_pfn(vma, vmf->address, pfn);

	return VM_FAULT_NOPAGE;
}
Beispiel #5
0
/*
 * Fill the partition reserved page with the information needed by
 * other partitions to discover we are alive and establish initial
 * communications.
 */
struct xpc_rsvd_page *
xpc_rsvd_page_init(void)
{
	struct xpc_rsvd_page *rp;
	AMO_t *amos_page;
	u64 rp_pa, nasid_array = 0;
	int i, ret;


	/* get the local reserved page's address */

	preempt_disable();
	rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
	preempt_enable();
	if (rp_pa == 0) {
		dev_err(xpc_part, "SAL failed to locate the reserved page\n");
		return NULL;
	}
	rp = (struct xpc_rsvd_page *) __va(rp_pa);

	if (rp->partid != sn_partition_id) {
		dev_err(xpc_part, "the reserved page's partid of %d should be "
			"%d\n", rp->partid, sn_partition_id);
		return NULL;
	}

	rp->version = XPC_RP_VERSION;

	/* establish the actual sizes of the nasid masks */
	if (rp->SAL_version == 1) {
		/* SAL_version 1 didn't set the nasids_size field */
		rp->nasids_size = 128;
	}
	xp_nasid_mask_bytes = rp->nasids_size;
	xp_nasid_mask_words = xp_nasid_mask_bytes / 8;

	/* setup the pointers to the various items in the reserved page */
	xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
	xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
	xpc_vars = XPC_RP_VARS(rp);
	xpc_vars_part = XPC_RP_VARS_PART(rp);

	/*
	 * Before clearing xpc_vars, see if a page of AMOs had been previously
	 * allocated. If not we'll need to allocate one and set permissions
	 * so that cross-partition AMOs are allowed.
	 *
	 * The allocated AMO page needs MCA reporting to remain disabled after
	 * XPC has unloaded.  To make this work, we keep a copy of the pointer
	 * to this page (i.e., amos_page) in the struct xpc_vars structure,
	 * which is pointed to by the reserved page, and re-use that saved copy
	 * on subsequent loads of XPC. This AMO page is never freed, and its
	 * memory protections are never restricted.
	 */
	if ((amos_page = xpc_vars->amos_page) == NULL) {
		amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
		if (amos_page == NULL) {
			dev_err(xpc_part, "can't allocate page of AMOs\n");
			return NULL;
		}

		/*
		 * Open up AMO-R/W to cpu.  This is done for Shub 1.1 systems
		 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
		 */
		if (!enable_shub_wars_1_1()) {
			ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
					PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
					&nasid_array);
			if (ret != 0) {
				dev_err(xpc_part, "can't change memory "
					"protections\n");
				uncached_free_page(__IA64_UNCACHED_OFFSET |
						   TO_PHYS((u64) amos_page));
				return NULL;
			}
		}
	} else if (!IS_AMO_ADDRESS((u64) amos_page)) {
		/*
		 * EFI's XPBOOT can also set amos_page in the reserved page,
		 * but it happens to leave it as an uncached physical address
		 * and we need it to be an uncached virtual, so we'll have to
		 * convert it.
		 */
		if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
			dev_err(xpc_part, "previously used amos_page address "
				"is bad = 0x%p\n", (void *) amos_page);
			return NULL;
		}
		amos_page = (AMO_t *) TO_AMO((u64) amos_page);
	}

	/* clear xpc_vars */
	memset(xpc_vars, 0, sizeof(struct xpc_vars));

	xpc_vars->version = XPC_V_VERSION;
	xpc_vars->act_nasid = cpuid_to_nasid(0);
	xpc_vars->act_phys_cpuid = cpu_physical_id(0);
	xpc_vars->vars_part_pa = __pa(xpc_vars_part);
	xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
	xpc_vars->amos_page = amos_page;  /* save for next load of XPC */


	/* clear xpc_vars_part */
	memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
							XP_MAX_PARTITIONS);

	/* initialize the activate IRQ related AMO variables */
	for (i = 0; i < xp_nasid_mask_words; i++) {
		(void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
	}

	/* initialize the engaged remote partitions related AMO variables */
	(void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
	(void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);

	/* timestamp of when reserved page was setup by XPC */
	rp->stamp = CURRENT_TIME;

	/*
	 * This signifies to the remote partition that our reserved
	 * page is initialized.
	 */
	rp->vars_pa = __pa(xpc_vars);

	return rp;
}