BOOLEAN CParaNdisRX::InitialAllocatePhysicalMemory(tCompletePhysicalAddress* Address) { if (Address->size % PAGE_SIZE) { DPrintf(0, ("[%s] size (%d) is not page aligned\n", __FUNCTION__, Address->size)); return FALSE; } while (m_RxBufferIndex < ARRAYSIZE(m_ReservedRxBufferMemory)) { tCompletePhysicalAddress* bulkBuffer = &m_ReservedRxBufferMemory[m_RxBufferIndex]; if (bulkBuffer->size == 0) { bulkBuffer->size = 1024 * 256; if (!ParaNdis_InitialAllocatePhysicalMemory(m_Context, bulkBuffer)) { DPrintf(0, ("[%s] fail to allocate memory with slot %d\n", __FUNCTION__, m_RxBufferIndex)); break; } } if (bulkBuffer->size - m_RxBufferOffset >= Address->size) { Address->Physical.QuadPart = bulkBuffer->Physical.QuadPart + m_RxBufferOffset; Address->Virtual = (PCHAR)(bulkBuffer->Virtual) + m_RxBufferOffset; m_RxBufferOffset += Address->size; return TRUE; } else { m_RxBufferIndex++; m_RxBufferOffset = 0; } } return FALSE; }
bool CParaNdisCX::Create(PPARANDIS_ADAPTER Context, UINT DeviceQueueIndex) { m_Context = Context; m_queueIndex = (u16)DeviceQueueIndex; if (!ParaNdis_InitialAllocatePhysicalMemory(m_Context, &m_ControlData)) { DPrintf(0, ("CParaNdisCX::Create - ParaNdis_InitialAllocatePhysicalMemory failed for %u\n", DeviceQueueIndex)); m_ControlData.Virtual = nullptr; return false; } return m_VirtQueue.Create(DeviceQueueIndex, m_Context->IODevice, m_Context->MiniportHandle, m_Context->bDoPublishIndices ? true : false); }
pRxNetDescriptor CParaNdisRX::CreateRxDescriptorOnInit() { //For RX packets we allocate following pages // 1 page for virtio header and indirect buffers array // X pages needed to fit maximal length buffer of data // The assumption is virtio header and indirect buffers array fit 1 page ULONG ulNumPages = m_Context->MaxPacketSize.nMaxDataSizeHwRx / PAGE_SIZE + 2; pRxNetDescriptor p = (pRxNetDescriptor)ParaNdis_AllocateMemory(m_Context, sizeof(*p)); if (p == NULL) return NULL; NdisZeroMemory(p, sizeof(*p)); p->BufferSGArray = (struct VirtIOBufferDescriptor *) ParaNdis_AllocateMemory(m_Context, sizeof(*p->BufferSGArray) * ulNumPages); if (p->BufferSGArray == NULL) goto error_exit; p->PhysicalPages = (tCompletePhysicalAddress *) ParaNdis_AllocateMemory(m_Context, sizeof(*p->PhysicalPages) * ulNumPages); if (p->PhysicalPages == NULL) goto error_exit; p->BufferSGLength = 0; while (ulNumPages > 0) { // Allocate the first page separately, the rest can be one contiguous block ULONG ulPagesToAlloc = (p->BufferSGLength == 0 ? 1 : ulNumPages); while (!ParaNdis_InitialAllocatePhysicalMemory( m_Context, PAGE_SIZE * ulPagesToAlloc, &p->PhysicalPages[p->BufferSGLength])) { // Retry with half the pages if (ulPagesToAlloc == 1) goto error_exit; else ulPagesToAlloc /= 2; } p->BufferSGArray[p->BufferSGLength].physAddr = p->PhysicalPages[p->BufferSGLength].Physical; p->BufferSGArray[p->BufferSGLength].length = p->PhysicalPages[p->BufferSGLength].size; ulNumPages -= ulPagesToAlloc; p->BufferSGLength++; } //First page is for virtio header, size needs to be adjusted correspondingly p->BufferSGArray[0].length = m_Context->nVirtioHeaderSize; ULONG indirectAreaOffset = ALIGN_UP(m_Context->nVirtioHeaderSize, ULONGLONG); //Pre-cache indirect area addresses p->IndirectArea.Physical.QuadPart = p->PhysicalPages[0].Physical.QuadPart + indirectAreaOffset; p->IndirectArea.Virtual = RtlOffsetToPointer(p->PhysicalPages[0].Virtual, indirectAreaOffset); p->IndirectArea.size = PAGE_SIZE - indirectAreaOffset; if (!ParaNdis_BindRxBufferToPacket(m_Context, p)) goto error_exit; return p; error_exit: ParaNdis_FreeRxBufferDescriptor(m_Context, p); return NULL; }