示例#1
0
struct page *LinuxMemAreaOffsetToPage(struct LinuxMemArea *psLinuxMemArea,
				      u32 ui32ByteOffset)
{
	u32 ui32PageIndex;
	char *pui8Addr;

	switch (psLinuxMemArea->eAreaType) {
	case LINUX_MEM_AREA_ALLOC_PAGES:
		ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
		return
		     psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
		break;
	case LINUX_MEM_AREA_VMALLOC:
		pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
		pui8Addr += ui32ByteOffset;
		return vmalloc_to_page(pui8Addr);
		break;
	case LINUX_MEM_AREA_SUB_ALLOC:
		return LinuxMemAreaOffsetToPage(psLinuxMemArea->
				     uData.sSubAlloc.psParentLinuxMemArea,
				     psLinuxMemArea->
						uData.sSubAlloc.ui32ByteOffset +
						ui32ByteOffset);
	default:
		PVR_DPF(PVR_DBG_ERROR, "%s: Unsupported request for "
			 "struct page from struct LinuxMemArea with type=%s",
			 LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType));
		return NULL;
	}
}
示例#2
0
struct IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(
					struct LinuxMemArea *psLinuxMemArea,
					u32 ui32ByteOffset)
{
	struct IMG_CPU_PHYADDR CpuPAddr;

	CpuPAddr.uiAddr = 0;

	switch (psLinuxMemArea->eAreaType) {
	case LINUX_MEM_AREA_IOREMAP:
		{
			CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
			CpuPAddr.uiAddr += ui32ByteOffset;
			break;
		}
	case LINUX_MEM_AREA_EXTERNAL_KV:
		{
			if (psLinuxMemArea->uData.sExternalKV.bPhysContig) {
				CpuPAddr =
				    SysSysPAddrToCpuPAddr(
						 psLinuxMemArea->uData.
							  sExternalKV.uPhysAddr.
								  SysPhysAddr);
				CpuPAddr.uiAddr += ui32ByteOffset;
			} else {
				u32 ui32PageIndex =
				    PHYS_TO_PFN(ui32ByteOffset);
				struct IMG_SYS_PHYADDR SysPAddr =
				    psLinuxMemArea->uData.sExternalKV.uPhysAddr.
						    pSysPhysAddr[ui32PageIndex];

				CpuPAddr = SysSysPAddrToCpuPAddr(SysPAddr);
				CpuPAddr.uiAddr +=
				    ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
			}
			break;
		}
	case LINUX_MEM_AREA_IO:
		{
			CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
			CpuPAddr.uiAddr += ui32ByteOffset;
			break;
		}
	case LINUX_MEM_AREA_VMALLOC:
		{
			char *pCpuVAddr;
			pCpuVAddr =
			    (char *) psLinuxMemArea->uData.sVmalloc.
			    pvVmallocAddress;
			pCpuVAddr += ui32ByteOffset;
			CpuPAddr.uiAddr = VMallocToPhys(pCpuVAddr);
			break;
		}
	case LINUX_MEM_AREA_ALLOC_PAGES:
		{
			struct page *page;
			u32 ui32PageIndex = PHYS_TO_PFN(ui32ByteOffset);
			page =
			    psLinuxMemArea->uData.sPageList.
			    pvPageList[ui32PageIndex];
			CpuPAddr.uiAddr = page_to_phys(page);
			CpuPAddr.uiAddr += ADDR_TO_PAGE_OFFSET(ui32ByteOffset);
			break;
		}
	case LINUX_MEM_AREA_SUB_ALLOC:
		{
			CpuPAddr =
			    OSMemHandleToCpuPAddr(psLinuxMemArea->uData.
						  sSubAlloc.
						  psParentLinuxMemArea,
						  psLinuxMemArea->uData.
						  sSubAlloc.ui32ByteOffset +
						  ui32ByteOffset);
			break;
		}
	default:
		PVR_DPF(PVR_DBG_ERROR,
			 "%s: Unknown struct LinuxMemArea type (%d)\n",
			 __func__, psLinuxMemArea->eAreaType);
	}

	PVR_ASSERT(CpuPAddr.uiAddr);
	return CpuPAddr;
}
示例#3
0
文件: v2v.c 项目: OpenXT/xc-windows
NTSTATUS
v2v_listen(const char *xenbus_prefix, struct v2v_channel **channel,
           unsigned prod_ring_page_order, unsigned cons_ring_page_order,
           struct v2v_async *async_values)
{
    NTSTATUS status = STATUS_SUCCESS;
    unsigned prod_ring_size = PAGE_SIZE << prod_ring_page_order;
    unsigned cons_ring_size = PAGE_SIZE << cons_ring_page_order;
    struct v2v_channel *chan;
    xenbus_transaction_t xbt = {0};
    BOOLEAN xbt_pending = FALSE;
    PHYSICAL_ADDRESS pa;
    unsigned x;
    unsigned xen_receive_port, xen_send_port;
    uint32_t xen_gref;

    XM_ASSERT(channel != NULL);
    XM_ASSERT(xenbus_prefix != NULL);

    if (prod_ring_page_order > MAX_RING_PAGE_ORDER ||
        cons_ring_page_order > MAX_RING_PAGE_ORDER)
        return STATUS_INVALID_PARAMETER;

    if (async_values && 
       (!async_values->receive_dpc || !async_values->send_dpc))
        return STATUS_INVALID_PARAMETER;

    *channel = NULL;

    if (!xenbus_await_initialisation())
        return STATUS_NO_SUCH_DEVICE;

    chan = v2v_make_channel(xenbus_prefix, async_values);
    if (!chan)
        return STATUS_NO_MEMORY;

    chan->is_temple = TRUE;

    chan->prod_sring = ExAllocatePoolWithTag(NonPagedPool, prod_ring_size, V2V_TAG);
    chan->cons_sring = ExAllocatePoolWithTag(NonPagedPool, cons_ring_size, V2V_TAG);
    chan->control = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, V2V_TAG);
    if (!chan->prod_sring || !chan->cons_sring || !chan->control)
        goto err_nomem;

    RtlZeroMemory(chan->prod_sring, prod_ring_size);
    RtlZeroMemory((void *)chan->cons_sring, cons_ring_size);
    RtlZeroMemory(chan->control, PAGE_SIZE);

    chan->nr_prod_ring_pages = 1 << prod_ring_page_order;
    chan->nr_cons_ring_pages = 1 << cons_ring_page_order;

    /* pre-allocate the granf refs we are going to need below in a grant cache */
    chan->u.temple.grant_cache =
        GnttabAllocCache(chan->nr_prod_ring_pages + chan->nr_cons_ring_pages + 1);
    if (!chan->u.temple.grant_cache)
        goto err_nomem;

    v2v_nc2_attach_rings_temple(&chan->nc2_rings,
                                chan->cons_sring,
                                cons_ring_size,
                                chan->prod_sring,
                                prod_ring_size,
                                chan->control);

    for (;;) {
        xenbus_transaction_start(&xbt);        
        xbt_pending = TRUE;
        
        status = v2v_connect_channel_xenbus(chan, xbt);
        if (!NT_SUCCESS(status))
            goto err;        

        for (x = 0; x < 1u << prod_ring_page_order; x++) {
            pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->prod_sring + x * PAGE_SIZE));
            chan->u.temple.prod_grefs[x] =
                GnttabGrantForeignAccessCache(chan->peer_domid,
                                              PHYS_TO_PFN(pa),
                                              GRANT_MODE_RO,
                                              chan->u.temple.grant_cache);
            XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.prod_grefs[x]));

            status = v2v_write_grantref(chan, xbt, x, TRUE);
            if (!NT_SUCCESS(status))          
                goto err;
        }

        for (x = 0; x < 1u << cons_ring_page_order; x++) {
            pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->cons_sring + x * PAGE_SIZE));
            chan->u.temple.cons_grefs[x] =
                GnttabGrantForeignAccessCache(chan->peer_domid,
                                              PHYS_TO_PFN(pa),
                                              GRANT_MODE_RW,
                                              chan->u.temple.grant_cache);
            XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.cons_grefs[x]));

            status = v2v_write_grantref(chan, xbt, x, FALSE);
            if (!NT_SUCCESS(status))
                goto err;
        }

        pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->control));
        chan->u.temple.control_gref =
            GnttabGrantForeignAccessCache(chan->peer_domid,
                                          PHYS_TO_PFN(pa),
                                          GRANT_MODE_RW,
                                          chan->u.temple.grant_cache);
        XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.control_gref));

        chan->receive_evtchn_port = 
            EvtchnAllocUnboundDpc(chan->peer_domid,
                                  (chan->is_sync ? v2v_dpc : async_values->receive_dpc),
                                  (chan->is_sync ? &chan->s.sync.receive_event : async_values->receive_ctx));
        if (is_null_EVTCHN_PORT(chan->receive_evtchn_port)) {
            status = STATUS_INSUFFICIENT_RESOURCES;
            goto err;
        }
        xen_receive_port = xen_EVTCHN_PORT(chan->receive_evtchn_port);

        chan->send_evtchn_port = 
            EvtchnAllocUnboundDpc(chan->peer_domid,
                                  (chan->is_sync ? v2v_dpc : async_values->send_dpc),
                                  (chan->is_sync ? &chan->s.sync.send_event : async_values->send_ctx));
        if (is_null_EVTCHN_PORT(chan->send_evtchn_port)) {
            status = STATUS_INSUFFICIENT_RESOURCES;
            goto err;
        }
        xen_send_port = xen_EVTCHN_PORT(chan->send_evtchn_port);

        xen_gref = xen_GRANT_REF(chan->u.temple.control_gref);
        status = 
            v2v_xenstore_scatter(xbt, chan->local_prefix,
                                 "prod-order", xenstore_scatter_type_int,
                                     prod_ring_page_order,
                                 "cons-order", xenstore_scatter_type_int,
                                     cons_ring_page_order,
                                 "control-gref", xenstore_scatter_type_grant_ref,
                                     xen_gref,
                                 "prod-evtchn",xenstore_scatter_type_evtchn_port,
                                     xen_send_port,
                                 "cons-evtchn",xenstore_scatter_type_evtchn_port,
                                     xen_receive_port,
                                 NULL);
        if (!NT_SUCCESS(status))
            goto err;

        status = v2v_change_local_state(chan, xbt, v2v_state_listening);
        if (!NT_SUCCESS(status))
            goto err;

        status = xenbus_transaction_end(xbt, 0);
        xbt_pending = FALSE;
        if (NT_SUCCESS(status))
            break;
        if (status != STATUS_RETRY)
            goto err;

        /* cleanup for retry */
        for (x = 0; x < 1u << prod_ring_page_order; x++) {
            GnttabEndForeignAccessCache(chan->u.temple.prod_grefs[x],
                                        chan->u.temple.grant_cache);
        }
        RtlZeroMemory(chan->u.temple.prod_grefs, sizeof(chan->u.temple.prod_grefs));

        for (x = 0; x < 1u << cons_ring_page_order; x++) {
            GnttabEndForeignAccessCache(chan->u.temple.cons_grefs[x],
                                        chan->u.temple.grant_cache);
        }
        RtlZeroMemory(chan->u.temple.cons_grefs, sizeof(chan->u.temple.cons_grefs));

        GnttabEndForeignAccessCache(chan->u.temple.control_gref,
                                    chan->u.temple.grant_cache);
        chan->u.temple.control_gref = null_GRANT_REF();

        EvtchnClose(chan->receive_evtchn_port);
        chan->receive_evtchn_port = null_EVTCHN_PORT();
        EvtchnClose(chan->send_evtchn_port);
        chan->send_evtchn_port = null_EVTCHN_PORT();

        xenbus_unregister_watch(chan->remote_state_watch);
        chan->remote_state_watch = NULL;
        ExFreePoolWithTag(chan->remote_prefix, V2V_TAG);
        chan->remote_prefix = NULL;
    }

    *channel = chan;

    return STATUS_SUCCESS;

err_nomem:
    status = STATUS_NO_MEMORY;
err:
    if (xbt_pending)
        xenbus_transaction_end(xbt, 1);
    /* since the channel has never been connected here, it is safe 
       to free any temple resources that may have been allocated in 
       this routine */
    v2v_destroy_channel(chan, TRUE);
    return status;
}