示例#1
0
/*
 * Given a nasid, get the physical address of the  partition's reserved page
 * for that nasid. This function returns 0 on any error.
 */
static unsigned long
xpc_get_rsvd_page_pa(int nasid)
{
	enum xp_retval ret;
	u64 cookie = 0;
	unsigned long rp_pa = nasid;	/* seed with nasid */
	size_t len = 0;
	size_t buf_len = 0;
	void *buf = buf;
	void *buf_base = NULL;
	enum xp_retval (*get_partition_rsvd_page_pa)
		(void *, u64 *, unsigned long *, size_t *) =
		xpc_arch_ops.get_partition_rsvd_page_pa;

	while (1) {

		/* !!! rp_pa will need to be _gpa on UV.
		 * ??? So do we save it into the architecture specific parts
		 * ??? of the xpc_partition structure? Do we rename this
		 * ??? function or have two versions? Rename rp_pa for UV to
		 * ??? rp_gpa?
		 */
		ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);

		dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
			"address=0x%016lx, len=0x%016lx\n", ret,
			(unsigned long)cookie, rp_pa, len);

		if (ret != xpNeedMoreInfo)
			break;

		/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
		if (L1_CACHE_ALIGN(len) > buf_len) {
			kfree(buf_base);
			buf_len = L1_CACHE_ALIGN(len);
			buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
							    &buf_base);
			if (buf_base == NULL) {
				dev_err(xpc_part, "unable to kmalloc "
					"len=0x%016lx\n", buf_len);
				ret = xpNoMemory;
				break;
			}
		}

		ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
		if (ret != xpSuccess) {
			dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
			break;
		}
	}

	kfree(buf_base);

	if (ret != xpSuccess)
		rp_pa = 0;

	dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
	return rp_pa;
}
示例#2
0
/*
 * Get a copy of a portion of the remote partition's rsvd page.
 *
 * remote_rp points to a buffer that is cacheline aligned for BTE copies and
 * is large enough to contain a copy of their reserved page header and
 * part_nasids mask.
 */
enum xp_retval
xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids,
		  struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa)
{
	int l;
	enum xp_retval ret;

	/* get the reserved page's physical address */

	*remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
	if (*remote_rp_pa == 0)
		return xpNoRsvdPageAddr;

	/* pull over the reserved page header and part_nasids mask */
	ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa,
			       XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes);
	if (ret != xpSuccess)
		return ret;

	if (discovered_nasids != NULL) {
		unsigned long *remote_part_nasids =
		    XPC_RP_PART_NASIDS(remote_rp);

		for (l = 0; l < xpc_nasid_mask_nlongs; l++)
			discovered_nasids[l] |= remote_part_nasids[l];
	}

	/* zero timestamp indicates the reserved page has not been setup */
	if (remote_rp->ts_jiffies == 0)
		return xpRsvdPageNotSet;

	if (XPC_VERSION_MAJOR(remote_rp->version) !=
	    XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
		return xpBadVersion;
	}

	/* check that both remote and local partids are valid for each side */
	if (remote_rp->SAL_partid < 0 ||
	    remote_rp->SAL_partid >= xp_max_npartitions ||
	    remote_rp->max_npartitions <= xp_partition_id) {
		return xpInvalidPartid;
	}

	if (remote_rp->SAL_partid == xp_partition_id)
		return xpLocalPartid;

	return xpSuccess;
}
示例#3
0
/*
 * Given a partid, get the nasids owned by that partition from the
 * remote partition's reserved page.
 */
enum xp_retval
xpc_initiate_partid_to_nasids(short partid, void *nasid_mask)
{
	struct xpc_partition *part;
	unsigned long part_nasid_pa;

	part = &xpc_partitions[partid];
	if (part->remote_rp_pa == 0)
		return xpPartitionDown;

	memset(nasid_mask, 0, xpc_nasid_mask_nbytes);

	part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa);

	return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa,
				xpc_nasid_mask_nbytes);
}
示例#4
0
文件: xpc_uv.c 项目: 020gzh/linux
static void
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
	unsigned int mq_size;
	int pg_order;
	int ret;

	/* disallow other partitions to access GRU mq */
	mq_size = 1UL << mq->order;
	ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
	BUG_ON(ret != xpSuccess);

	/* unregister irq handler and release mq irq/vector mapping */
	free_irq(mq->irq, NULL);
	xpc_release_gru_mq_irq_uv(mq);

	/* disable generation of irq when GRU mq op occurs to this mq */
	xpc_gru_mq_watchlist_free_uv(mq);

	pg_order = mq->order - PAGE_SHIFT;
	free_pages((unsigned long)mq->address, pg_order);

	kfree(mq);
}
示例#5
0
static void
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
	unsigned int mq_size;
	int pg_order;
	int ret;

	
	mq_size = 1UL << mq->order;
	ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
	BUG_ON(ret != xpSuccess);

	
	free_irq(mq->irq, NULL);
	xpc_release_gru_mq_irq_uv(mq);

	
	xpc_gru_mq_watchlist_free_uv(mq);

	pg_order = mq->order - PAGE_SHIFT;
	free_pages((unsigned long)mq->address, pg_order);

	kfree(mq);
}
示例#6
0
文件: xpc_uv.c 项目: 020gzh/linux
static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
		     irq_handler_t irq_handler)
{
	enum xp_retval xp_ret;
	int ret;
	int nid;
	int nasid;
	int pg_order;
	struct page *page;
	struct xpc_gru_mq_uv *mq;
	struct uv_IO_APIC_route_entry *mmr_value;

	mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
	if (mq == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
			"a xpc_gru_mq_uv structure\n");
		ret = -ENOMEM;
		goto out_0;
	}

	mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
				  GFP_KERNEL);
	if (mq->gru_mq_desc == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
			"a gru_message_queue_desc structure\n");
		ret = -ENOMEM;
		goto out_1;
	}

	pg_order = get_order(mq_size);
	mq->order = pg_order + PAGE_SHIFT;
	mq_size = 1UL << mq->order;

	mq->mmr_blade = uv_cpu_to_blade_id(cpu);

	nid = cpu_to_node(cpu);
	page = __alloc_pages_node(nid,
				      GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
				      pg_order);
	if (page == NULL) {
		dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
			"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
		ret = -ENOMEM;
		goto out_2;
	}
	mq->address = page_address(page);

	/* enable generation of irq when GRU mq operation occurs to this mq */
	ret = xpc_gru_mq_watchlist_alloc_uv(mq);
	if (ret != 0)
		goto out_3;

	ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
	if (ret != 0)
		goto out_4;

	ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
	if (ret != 0) {
		dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
			mq->irq, -ret);
		goto out_5;
	}

	nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));

	mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
	ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
				     nasid, mmr_value->vector, mmr_value->dest);
	if (ret != 0) {
		dev_err(xpc_part, "gru_create_message_queue() returned "
			"error=%d\n", ret);
		ret = -EINVAL;
		goto out_6;
	}

	/* allow other partitions to access this GRU mq */
	xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
	if (xp_ret != xpSuccess) {
		ret = -EACCES;
		goto out_6;
	}

	return mq;

	/* something went wrong */
out_6:
	free_irq(mq->irq, NULL);
out_5:
	xpc_release_gru_mq_irq_uv(mq);
out_4:
	xpc_gru_mq_watchlist_free_uv(mq);
out_3:
	free_pages((unsigned long)mq->address, pg_order);
out_2:
	kfree(mq->gru_mq_desc);
out_1:
	kfree(mq);
out_0:
	return ERR_PTR(ret);
}