// Read procedure of the device NTSTATUS SlDeviceReadProc(DEVICE_OBJECT *device_object, IRP *irp) { SL_DEVICE *dev = *((SL_DEVICE **)device_object->DeviceExtension); NTSTATUS ret = STATUS_UNSUCCESSFUL; UINT ret_size = 0; IO_STACK_LOCATION *irp_stack = IoGetCurrentIrpStackLocation(irp); if (dev->IsBasicDevice) { // Return the adapter list in the case of basic device if (irp_stack->Parameters.Read.Length >= sizeof(SL_ADAPTER_INFO_LIST)) { SL_ADAPTER_INFO_LIST *dst = irp->UserBuffer; if (dst != NULL) { MDL *mdl; mdl = IoAllocateMdl(dst, irp_stack->Parameters.Read.Length, false, false, NULL); if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoWriteAccess); } SlZero(dst, sizeof(SL_ADAPTER_INFO_LIST)); dst->Signature = SL_SIGNATURE; dst->SeLowVersion = SL_VER; dst->EnumCompleted = sl->IsEnumCompleted ? 8 : 1; SlLockList(sl->AdapterList); { UINT i; dst->NumAdapters = MIN(SL_LIST_NUM(sl->AdapterList), SL_MAX_ADAPTER_INFO_LIST_ENTRY); for (i = 0;i < dst->NumAdapters;i++) { SL_ADAPTER *a = SL_LIST_DATA(sl->AdapterList, i); SL_ADAPTER_INFO *d = &dst->Adapters[i]; d->MtuSize = a->MtuSize; SlCopy(d->MacAddress, a->MacAddress, 6); SlCopy(d->AdapterId, a->AdapterId, sizeof(a->AdapterId)); strcpy(d->FriendlyName, a->FriendlyName); } } SlUnlockList(sl->AdapterList); ret_size = sizeof(SL_ADAPTER_INFO); ret = STATUS_SUCCESS; if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } } } } else { // Adapter device SL_FILE *f = irp_stack->FileObject->FsContext; if (irp_stack->Parameters.Read.Length == SL_EXCHANGE_BUFFER_SIZE) { UCHAR *buf = irp->UserBuffer; if (dev->Halting || f->Adapter->Halt || buf == NULL) { // Halting } else { UINT num = 0; bool left = true; MDL *mdl; mdl = IoAllocateMdl(buf, SL_EXCHANGE_BUFFER_SIZE, false, false, NULL); if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoWriteAccess); } // Lock the receive queue SlLock(f->RecvLock); { while (true) { SL_PACKET *q; if (num >= SL_MAX_PACKET_EXCHANGE) { if (f->RecvPacketHead == NULL) { left = false; } break; } q = f->RecvPacketHead; if (q != NULL) { f->RecvPacketHead = f->RecvPacketHead->Next; q->Next = NULL; f->NumRecvPackets--; if (f->RecvPacketHead == NULL) { f->RecvPacketTail = NULL; } } else { left = false; break; } SL_SIZE_OF_PACKET(buf, num) = q->Size; SlCopy(SL_ADDR_OF_PACKET(buf, num), q->Data, q->Size); num++; SlFree(q); } } SlUnlock(f->RecvLock); if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } SL_NUM_PACKET(buf) = num; SL_LEFT_FLAG(buf) = left; if (left == false) { SlReset(f->Event); } else { SlSet(f->Event); } ret = STATUS_SUCCESS; ret_size = SL_EXCHANGE_BUFFER_SIZE; } } } irp->IoStatus.Status = ret; irp->IoStatus.Information = ret_size; IoCompleteRequest(irp, IO_NO_INCREMENT); return ret; }
static NTSTATUS STDCALL read_data(device_extension* Vcb, UINT64 addr, UINT32 length, UINT8* buf) { CHUNK_ITEM* ci; CHUNK_ITEM_STRIPE* cis; read_data_context* context; UINT64 i/*, type*/, offset; NTSTATUS Status; device** devices; // FIXME - make this work with RAID if (Vcb->log_to_phys_loaded) { chunk* c = get_chunk_from_address(Vcb, addr); if (!c) { ERR("get_chunk_from_address failed\n"); return STATUS_INTERNAL_ERROR; } ci = c->chunk_item; offset = c->offset; devices = c->devices; } // if (ci->type & BLOCK_FLAG_DUPLICATE) { // type = BLOCK_FLAG_DUPLICATE; // } else if (ci->type & BLOCK_FLAG_RAID0) { // FIXME("RAID0 not yet supported\n"); // return STATUS_NOT_IMPLEMENTED; // } else if (ci->type & BLOCK_FLAG_RAID1) { // FIXME("RAID1 not yet supported\n"); // return STATUS_NOT_IMPLEMENTED; // } else if (ci->type & BLOCK_FLAG_RAID10) { // FIXME("RAID10 not yet supported\n"); // return STATUS_NOT_IMPLEMENTED; // } else if (ci->type & BLOCK_FLAG_RAID5) { // FIXME("RAID5 not yet supported\n"); // return STATUS_NOT_IMPLEMENTED; // } else if (ci->type & BLOCK_FLAG_RAID6) { // FIXME("RAID6 not yet supported\n"); // return STATUS_NOT_IMPLEMENTED; // } else { // SINGLE // type = 0; // } cis = (CHUNK_ITEM_STRIPE*)&ci[1]; context = ExAllocatePoolWithTag(NonPagedPool, sizeof(read_data_context), ALLOC_TAG); if (!context) { ERR("out of memory\n"); return STATUS_INSUFFICIENT_RESOURCES; } RtlZeroMemory(context, sizeof(read_data_context)); KeInitializeEvent(&context->Event, NotificationEvent, FALSE); context->stripes = ExAllocatePoolWithTag(NonPagedPool, sizeof(read_data_stripe) * ci->num_stripes, ALLOC_TAG); if (!context->stripes) { ERR("out of memory\n"); return STATUS_INSUFFICIENT_RESOURCES; } RtlZeroMemory(context->stripes, sizeof(read_data_stripe) * ci->num_stripes); context->buflen = length; context->num_stripes = ci->num_stripes; // context->type = type; // FIXME - for RAID, check beforehand whether there's enough devices to satisfy request for (i = 0; i < ci->num_stripes; i++) { PIO_STACK_LOCATION IrpSp; if (!devices[i]) { context->stripes[i].status = ReadDataStatus_MissingDevice; context->stripes[i].buf = NULL; } else { context->stripes[i].context = (struct read_data_context*)context; context->stripes[i].buf = ExAllocatePoolWithTag(NonPagedPool, length, ALLOC_TAG); if (!context->stripes[i].buf) { ERR("out of memory\n"); Status = STATUS_INSUFFICIENT_RESOURCES; goto exit; } context->stripes[i].Irp = IoAllocateIrp(devices[i]->devobj->StackSize, FALSE); if (!context->stripes[i].Irp) { ERR("IoAllocateIrp failed\n"); Status = STATUS_INSUFFICIENT_RESOURCES; goto exit; } IrpSp = IoGetNextIrpStackLocation(context->stripes[i].Irp); IrpSp->MajorFunction = IRP_MJ_READ; if (devices[i]->devobj->Flags & DO_BUFFERED_IO) { FIXME("FIXME - buffered IO\n"); } else if (devices[i]->devobj->Flags & DO_DIRECT_IO) { context->stripes[i].Irp->MdlAddress = IoAllocateMdl(context->stripes[i].buf, length, FALSE, FALSE, NULL); if (!context->stripes[i].Irp->MdlAddress) { ERR("IoAllocateMdl failed\n"); Status = STATUS_INSUFFICIENT_RESOURCES; goto exit; } MmProbeAndLockPages(context->stripes[i].Irp->MdlAddress, KernelMode, IoWriteAccess); } else { context->stripes[i].Irp->UserBuffer = context->stripes[i].buf; } IrpSp->Parameters.Read.Length = length; IrpSp->Parameters.Read.ByteOffset.QuadPart = addr - offset + cis[i].offset; context->stripes[i].Irp->UserIosb = &context->stripes[i].iosb; IoSetCompletionRoutine(context->stripes[i].Irp, read_data_completion, &context->stripes[i], TRUE, TRUE, TRUE); context->stripes[i].status = ReadDataStatus_Pending; } } for (i = 0; i < ci->num_stripes; i++) { if (context->stripes[i].status != ReadDataStatus_MissingDevice) { IoCallDriver(devices[i]->devobj, context->stripes[i].Irp); } } KeWaitForSingleObject(&context->Event, Executive, KernelMode, FALSE, NULL); // FIXME - if checksum error, write good data over bad // check if any of the stripes succeeded for (i = 0; i < ci->num_stripes; i++) { if (context->stripes[i].status == ReadDataStatus_Success) { RtlCopyMemory(buf, context->stripes[i].buf, length); Status = STATUS_SUCCESS; goto exit; } } // if not, see if we got a checksum error // for (i = 0; i < ci->num_stripes; i++) { // if (context->stripes[i].status == ReadDataStatus_CRCError) { // WARN("stripe %llu had a checksum error\n", i); // // Status = STATUS_IMAGE_CHECKSUM_MISMATCH; // goto exit; // } // } // failing that, return the first error we encountered for (i = 0; i < ci->num_stripes; i++) { if (context->stripes[i].status == ReadDataStatus_Error) { Status = context->stripes[i].iosb.Status; goto exit; } } // if we somehow get here, return STATUS_INTERNAL_ERROR Status = STATUS_INTERNAL_ERROR; exit: for (i = 0; i < ci->num_stripes; i++) { if (context->stripes[i].Irp) { if (devices[i]->devobj->Flags & DO_DIRECT_IO) { MmUnlockPages(context->stripes[i].Irp->MdlAddress); IoFreeMdl(context->stripes[i].Irp->MdlAddress); } IoFreeIrp(context->stripes[i].Irp); } if (context->stripes[i].buf) ExFreePool(context->stripes[i].buf); } ExFreePool(context->stripes); ExFreePool(context); return Status; }
// Dispatch table for control NTSTATUS NeoNdisDispatch(DEVICE_OBJECT *DeviceObject, IRP *Irp) { NTSTATUS status; IO_STACK_LOCATION *stack; void *buf; BOOL ok; status = STATUS_SUCCESS; if (ctx == NULL) { return NDIS_STATUS_FAILURE; } // Get the IRP stack stack = IoGetCurrentIrpStackLocation(Irp); // Initialize the number of bytes Irp->IoStatus.Information = 0; Irp->IoStatus.Status = STATUS_SUCCESS; buf = Irp->UserBuffer; if (ctx->Halting != FALSE) { // Device driver is terminating Irp->IoStatus.Information = STATUS_UNSUCCESSFUL; IoCompleteRequest(Irp, IO_NO_INCREMENT); return STATUS_UNSUCCESSFUL; } // Branch to each operation switch (stack->MajorFunction) { case IRP_MJ_CREATE: // Device is opened if (NeoNdisOnOpen(Irp, stack) == FALSE) { Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } break; case IRP_MJ_CLOSE: // Device is closed if (NeoNdisOnClose(Irp, stack) == FALSE) { Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } break; case IRP_MJ_READ: #ifndef WIN9X // Read (Reading of the received packet) ok = false; if (buf != NULL) { if (ctx->Opened && ctx->Inited) { if (stack->Parameters.Read.Length == NEO_EXCHANGE_BUFFER_SIZE) { // Address check MDL *mdl = IoAllocateMdl(buf, NEO_EXCHANGE_BUFFER_SIZE, false, false, NULL); if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoWriteAccess); } if (NeoIsKernelAddress(buf) == FALSE) { // Read NeoRead(buf); Irp->IoStatus.Information = NEO_EXCHANGE_BUFFER_SIZE; ok = true; } if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } } } } if (ok == FALSE) { // An error occurred Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } #endif // WIN9X break; case IRP_MJ_WRITE: #ifndef WIN9X // Write (Writing of a transmission packet) ok = false; if (buf != NULL) { if (ctx->Opened && ctx->Inited) { if (stack->Parameters.Write.Length == NEO_EXCHANGE_BUFFER_SIZE) { // Address check MDL *mdl = IoAllocateMdl(buf, NEO_EXCHANGE_BUFFER_SIZE, false, false, NULL); if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoReadAccess); } if (NeoIsKernelAddress(buf) == FALSE) { // Write NeoWrite(buf); Irp->IoStatus.Information = stack->Parameters.Write.Length; ok = true; } if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } } } } if (ok == FALSE) { // An error occurred Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } break; #endif // WIN9X case IRP_MJ_DEVICE_CONTROL: #ifdef WIN9X // IO Control switch (stack->Parameters.DeviceIoControl.IoControlCode) { case NEO_IOCTL_SET_EVENT: // Specify a event if (Irp->AssociatedIrp.SystemBuffer == NULL || stack->Parameters.DeviceIoControl.InputBufferLength != sizeof(DWORD)) { // An error occurred Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; } else { DWORD value = *((DWORD *)Irp->AssociatedIrp.SystemBuffer); ctx->Event = NeoCreateWin9xEvent(value); Irp->IoStatus.Information = sizeof(DWORD); } break; case NEO_IOCTL_PUT_PACKET: // Write a packet ok = false; buf = Irp->AssociatedIrp.SystemBuffer; if (buf != NULL) { if (stack->Parameters.DeviceIoControl.InputBufferLength == NEO_EXCHANGE_BUFFER_SIZE) { // Write NeoWrite(buf); Irp->IoStatus.Information = NEO_EXCHANGE_BUFFER_SIZE; ok = true; } } if (ok == false) { // An error occurred Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } break; case NEO_IOCTL_GET_PACKET: // Get the packet ok = false; buf = Irp->AssociatedIrp.SystemBuffer; if (buf != NULL) { if (stack->Parameters.DeviceIoControl.OutputBufferLength == NEO_EXCHANGE_BUFFER_SIZE) { // Read NeoRead(buf); Irp->IoStatus.Information = NEO_EXCHANGE_BUFFER_SIZE; ok = true; } } if (ok == false) { // An error occurred Irp->IoStatus.Status = STATUS_UNSUCCESSFUL; status = STATUS_UNSUCCESSFUL; } break; } #endif // WIN9X break; } IoCompleteRequest(Irp, IO_NO_INCREMENT); return STATUS_SUCCESS; }
NTSTATUS RtAudioPin::InitRtBuffer(ULONG size) { KSRTAUDIO_BUFFER_PROPERTY_WITH_NOTIFICATION RtAudioProperty = { 0 }; KSRTAUDIO_NOTIFICATION_EVENT_PROPERTY RtNotificationProperty = { 0 }; KSRTAUDIO_HWREGISTER_PROPERTY HwRegProperty = { 0 }; KSRTAUDIO_HWREGISTER HwRegister = { 0 }; IO_STATUS_BLOCK StatusBlock = { 0 }; KSPROPERTY KsProperty; KSRTAUDIO_HWLATENCY Latency = { 0 }; NTSTATUS status; PAGED_CODE(); ASSERT(m_BufferMdl == NULL); ASSERT(m_PositionPointerMdl == NULL); // // Allocate a realtime audio buffer // RtAudioProperty.Property.Set = KSPROPSETID_RtAudio; RtAudioProperty.Property.Id = KSPROPERTY_RTAUDIO_BUFFER_WITH_NOTIFICATION; RtAudioProperty.Property.Flags = KSPROPERTY_TYPE_GET; RtAudioProperty.BaseAddress = NULL; RtAudioProperty.RequestedBufferSize = size; RtAudioProperty.NotificationCount = 2; status = SendIoctl(IOCTL_KS_PROPERTY, &RtAudioProperty, sizeof(RtAudioProperty), &m_RtBuffer, sizeof(m_RtBuffer), &StatusBlock, TRUE); ASSERT(NT_SUCCESS(status)); ASSERT(NT_SUCCESS(StatusBlock.Status)); ASSERT(StatusBlock.Information == sizeof(m_RtBuffer)); if (!NT_SUCCESS(status)) { RtlZeroMemory(&m_RtBuffer, sizeof(m_RtBuffer)); ClosePin(); return status; } // // Portcls.sys assumes that the RtAudio buffer will be accessed via user // mode and we need a kernel mode mapping (a.k.a. 'system address') in order to access the buffer // in an arbitrary thread context via the timer DPC. // if (m_RtBuffer.BufferAddress) { m_BufferMdl = IoAllocateMdl(m_RtBuffer.BufferAddress, m_RtBuffer.ActualBufferSize, FALSE, FALSE, NULL); ASSERT(m_BufferMdl != NULL); if (m_BufferMdl != NULL) { MmProbeAndLockPages(m_BufferMdl, KernelMode, IoModifyAccess); m_RtBuffer.BufferAddress = MmGetSystemAddressForMdlSafe(m_BufferMdl, NormalPagePriority | MdlMappingNoExecute); } else { ClosePin(); return STATUS_NO_MEMORY; } } // // Setup m_pPositionRegister. // HwRegProperty.Property.Set = KSPROPSETID_RtAudio; HwRegProperty.Property.Id = KSPROPERTY_RTAUDIO_POSITIONREGISTER; HwRegProperty.Property.Flags = KSPROPERTY_TYPE_GET; HwRegProperty.BaseAddress = NULL; status = SendIoctl(IOCTL_KS_PROPERTY, &HwRegProperty, sizeof(HwRegProperty), &HwRegister, sizeof(HwRegister), &StatusBlock, TRUE); ASSERT(NT_SUCCESS(status)); ASSERT(NT_SUCCESS(StatusBlock.Status)); ASSERT(StatusBlock.Information == sizeof(HwRegister)); if (!NT_SUCCESS(status)) { RtlZeroMemory(&m_RtBuffer, sizeof(m_RtBuffer)); ClosePin(); return status; } ASSERT(HwRegister.Width == 32); m_PositionPointerMdl = IoAllocateMdl(HwRegister.Register, HwRegister.Width / 8, FALSE, FALSE, NULL); ASSERT(m_PositionPointerMdl != NULL); if (m_PositionPointerMdl != NULL) { MmProbeAndLockPages(m_PositionPointerMdl, KernelMode, IoModifyAccess); m_pPositionRegister = (PULONG) MmGetSystemAddressForMdlSafe(m_PositionPointerMdl, NormalPagePriority | MdlMappingNoExecute); } else { ClosePin(); return STATUS_NO_MEMORY; } // // Calculate m_FifoSizeInFrames. // if (NT_SUCCESS(status)) { KsProperty.Set = KSPROPSETID_RtAudio; KsProperty.Id = KSPROPERTY_RTAUDIO_HWLATENCY; KsProperty.Flags = KSPROPERTY_TYPE_GET; status = SendIoctl(IOCTL_KS_PROPERTY, &KsProperty, sizeof(KsProperty), &Latency, sizeof(Latency), &StatusBlock, TRUE); ASSERT(NT_SUCCESS(status)); ASSERT(NT_SUCCESS(StatusBlock.Status)); ASSERT(StatusBlock.Information == sizeof(Latency)); } if (NT_SUCCESS(status)) { WORD BlockAlign = m_Format.WaveFormatExt.Format.nBlockAlign; // Make sure we round up instead of down m_FifoSizeInFrames = (Latency.FifoSize + BlockAlign - 1) / BlockAlign; } else { RtlZeroMemory(&m_RtBuffer, sizeof(m_RtBuffer)); ClosePin(); return status; } return status; }
void AndroidUsbPipeFileObject::OnCtlBulkWrite(WDFREQUEST request, size_t output_buf_len, size_t input_buf_len) { ASSERT_IRQL_LOW_OR_DISPATCH(); // Make sure that this is an output pipe if (is_input_pipe()) { GoogleDbgPrint("\n!!!! Attempt to IOCTL write to input pipe %p", this); WdfRequestComplete(request, STATUS_ACCESS_DENIED); return; } // Verify buffers ASSERT(input_buf_len >= sizeof(AdbBulkTransfer)); // Output buffer points to ULONG that receives number of transferred bytes ASSERT(output_buf_len >= sizeof(ULONG)); if ((input_buf_len < sizeof(AdbBulkTransfer)) || (output_buf_len < sizeof(ULONG))) { WdfRequestComplete(request, STATUS_INVALID_BUFFER_SIZE); return; } // Get the input buffer NTSTATUS status = STATUS_SUCCESS; AdbBulkTransfer* transfer_param = reinterpret_cast<AdbBulkTransfer*>(InAddress(request, &status)); ASSERT(NT_SUCCESS(status) && (NULL != transfer_param)); if (!NT_SUCCESS(status)) { WdfRequestComplete(request, status); return; } // Get the output buffer ULONG* ret_transfer = reinterpret_cast<ULONG*>(OutAddress(request, &status)); ASSERT(NT_SUCCESS(status) && (NULL != ret_transfer)); if (!NT_SUCCESS(status)) { WdfRequestComplete(request, status); return; } // Cache these param to prevent us from sudden change after we've chacked it. // This is common practice in protecting ourselves from malicious code: // 1. Never trust anything that comes from the User Mode. // 2. Never assume that anything that User Mode buffer has will remain // unchanged. void* transfer_buffer = transfer_param->GetWriteBuffer(); ULONG transfer_size = transfer_param->transfer_size; // Make sure zero length I/O doesn't go through if (0 == transfer_size) { *ret_transfer = 0; WdfRequestCompleteWithInformation(request, STATUS_SUCCESS, sizeof(ULONG)); return; } // Make sure that buffer is not NULL ASSERT(NULL != transfer_buffer); if (NULL == transfer_buffer) { WdfRequestComplete(request, STATUS_INVALID_PARAMETER); return; } // At this point we are ready to build MDL for the user buffer. PMDL write_mdl = IoAllocateMdl(transfer_buffer, transfer_size, FALSE, FALSE, NULL); ASSERT(NULL != write_mdl); if (NULL == write_mdl) { WdfRequestComplete(request, STATUS_INSUFFICIENT_RESOURCES); return; } // Now we need to probe/lock this mdl __try { MmProbeAndLockPages(write_mdl, WdfRequestGetRequestorMode(request), IoReadAccess); status = STATUS_SUCCESS; } __except (EXCEPTION_EXECUTE_HANDLER) { status = GetExceptionCode(); ASSERTMSG("\n!!!!! AndroidUsbPipeFileObject::OnCtlBulkWrite exception", false); } if (!NT_SUCCESS(status)) { IoFreeMdl(write_mdl); WdfRequestComplete(request, status); return; } // Perform the write status = CommonBulkReadWrite(request, write_mdl, transfer_size, false, transfer_param->time_out, true); if (!NT_SUCCESS(status)) { // If CommonBulkReadWrite failed we need to unlock and free MDL here MmUnlockPages(write_mdl); IoFreeMdl(write_mdl); } }
VOID CcPrepareMdlWrite ( IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, OUT PMDL *MdlChain, OUT PIO_STATUS_BLOCK IoStatus ) /*++ Routine Description: This routine attempts to lock the specified file data in the cache and return a description of it in an Mdl along with the correct I/O status. Pages to be completely overwritten may be satisfied with emtpy pages. It is *not* safe to call this routine from Dpc level. This call is synchronous and raises on error. When this call returns, the caller may immediately begin to transfer data into the buffers via the Mdl. When the call returns with TRUE, the pages described by the Mdl are locked in memory, but not mapped in system space. If the caller needs the pages mapped in system space, then it must map them. On the subsequent call to CcMdlWriteComplete the pages will be unmapped if they were mapped, and in any case unlocked and the Mdl deallocated. Arguments: FileObject - Pointer to the file object for a file which was opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for which CcInitializeCacheMap was called by the file system. FileOffset - Byte offset in file for desired data. Length - Length of desired data in bytes. MdlChain - On output it returns a pointer to an Mdl chain describing the desired data. Note that even if FALSE is returned, one or more Mdls may have been allocated, as may be ascertained by the IoStatus.Information field (see below). IoStatus - Pointer to standard I/O status block to receive the status for the in-transfer of the data. (STATUS_SUCCESS guaranteed for cache hits, otherwise the actual I/O status is returned.) The I/O Information Field indicates how many bytes have been successfully locked down in the Mdl Chain. Return Value: None --*/ { PSHARED_CACHE_MAP SharedCacheMap; PVOID CacheBuffer; LARGE_INTEGER FOffset; PMDL Mdl = NULL; PMDL MdlTemp; LARGE_INTEGER Temp; ULONG SavedState = 0; ULONG ZeroFlags = 0; ULONG Information = 0; KIRQL OldIrql; ULONG ActivePage; ULONG PageIsDirty; PVACB Vacb = NULL; DebugTrace(+1, me, "CcPrepareMdlWrite\n", 0 ); DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, FileOffset->HighPart ); DebugTrace( 0, me, " Length = %08lx\n", Length ); // // Get pointer to SharedCacheMap. // SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; // // See if we have an active Vacb, that we need to free. // GetActiveVacb( SharedCacheMap, OldIrql, Vacb, ActivePage, PageIsDirty ); // // If there is an end of a page to be zeroed, then free that page now, // so it does not cause our data to get zeroed. If there is an active // page, free it so we have the correct ValidDataGoal. // if ((Vacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); Vacb = NULL; } FOffset = *FileOffset; // // Put try-finally around the loop to deal with exceptions // try { // // Not all of the transfer will come back at once, so we have to loop // until the entire transfer is complete. // while (Length != 0) { ULONG ReceivedLength; LARGE_INTEGER BeyondLastByte; // // Map and see how much we could potentially access at this // FileOffset, then cut it down if it is more than we need. // CacheBuffer = CcGetVirtualAddress( SharedCacheMap, FOffset, &Vacb, &ReceivedLength ); if (ReceivedLength > Length) { ReceivedLength = Length; } BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength; // // At this point we can calculate the ZeroFlags. // // // We can always zero middle pages, if any. // ZeroFlags = ZERO_MIDDLE_PAGES; // // See if we are completely overwriting the first or last page. // if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) && (ReceivedLength >= PAGE_SIZE)) { ZeroFlags |= ZERO_FIRST_PAGE; } if ((BeyondLastByte.LowPart & (PAGE_SIZE - 1)) == 0) { ZeroFlags |= ZERO_LAST_PAGE; } // // See if the entire transfer is beyond valid data length, // or at least starting from the second page. // Temp = FOffset; Temp.LowPart &= ~(PAGE_SIZE -1); ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); Temp.QuadPart = SharedCacheMap->ValidDataGoal.QuadPart - Temp.QuadPart; ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); if (Temp.QuadPart <= 0) { ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; } else if ((Temp.HighPart == 0) && (Temp.LowPart <= PAGE_SIZE)) { ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; } (VOID)CcMapAndRead( SharedCacheMap, &FOffset, ReceivedLength, ZeroFlags, TRUE, CacheBuffer ); // // Now attempt to allocate an Mdl to describe the mapped data. // DebugTrace( 0, mm, "IoAllocateMdl:\n", 0 ); DebugTrace( 0, mm, " BaseAddress = %08lx\n", CacheBuffer ); DebugTrace( 0, mm, " Length = %08lx\n", ReceivedLength ); Mdl = IoAllocateMdl( CacheBuffer, ReceivedLength, FALSE, FALSE, NULL ); DebugTrace( 0, mm, " <Mdl = %08lx\n", Mdl ); if (Mdl == NULL) { DebugTrace( 0, 0, "Failed to allocate Mdl\n", 0 ); ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); } DebugTrace( 0, mm, "MmProbeAndLockPages:\n", 0 ); DebugTrace( 0, mm, " Mdl = %08lx\n", Mdl ); MmDisablePageFaultClustering(&SavedState); MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess ); MmEnablePageFaultClustering(SavedState); SavedState = 0; // // Now that some data (maybe zeros) is locked in memory and // set dirty, it is safe, and necessary for us to advance // valid data goal, so that we will not subsequently ask // for a zero page. Note if we are extending valid data, // our caller has the file exclusive. // ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); if (BeyondLastByte.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { SharedCacheMap->ValidDataGoal = BeyondLastByte; } ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); // // Unmap the data now, now that the pages are locked down. // CcFreeVirtualAddress( Vacb ); Vacb = NULL; // // Now link the Mdl into the caller's chain // if ( *MdlChain == NULL ) { *MdlChain = Mdl; } else { MdlTemp = CONTAINING_RECORD( *MdlChain, MDL, Next ); while (MdlTemp->Next != NULL) { MdlTemp = MdlTemp->Next; } MdlTemp->Next = Mdl; } Mdl = NULL; // // Assume we did not get all the data we wanted, and set FOffset // to the end of the returned data. // FOffset = BeyondLastByte; // // Update number of bytes transferred. // Information += ReceivedLength; // // Calculate length left to transfer. // Length -= ReceivedLength; } } finally { if (AbnormalTermination()) { if (SavedState != 0) { MmEnablePageFaultClustering(SavedState); } if (Vacb != NULL) { CcFreeVirtualAddress( Vacb ); } if (Mdl != NULL) { IoFreeMdl( Mdl ); } // // Otherwise loop to deallocate the Mdls // FOffset = *FileOffset; while (*MdlChain != NULL) { MdlTemp = (*MdlChain)->Next; DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); DebugTrace( 0, mm, " Mdl = %08lx\n", *MdlChain ); MmUnlockPages( *MdlChain ); // // Extract the File Offset for this part of the transfer, and // tell the lazy writer to write these pages, since we have // marked them dirty. Ignore the only exception (allocation // error), and console ourselves for having tried. // CcSetDirtyInMask( SharedCacheMap, &FOffset, (*MdlChain)->ByteCount ); FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)((*MdlChain)->ByteCount); IoFreeMdl( *MdlChain ); *MdlChain = MdlTemp; } DebugTrace(-1, me, "CcPrepareMdlWrite -> Unwinding\n", 0 ); } else { IoStatus->Status = STATUS_SUCCESS; IoStatus->Information = Information; // // Make sure the SharedCacheMap does not go away while // the Mdl write is in progress. We decrment below. // CcAcquireMasterLock( &OldIrql ); CcIncrementOpenCount( SharedCacheMap, 'ldmP' ); CcReleaseMasterLock( OldIrql ); } } DebugTrace( 0, me, " <MdlChain = %08lx\n", *MdlChain ); DebugTrace(-1, me, "CcPrepareMdlWrite -> VOID\n", 0 ); return; }
VOID CcMdlRead ( IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, OUT PMDL *MdlChain, OUT PIO_STATUS_BLOCK IoStatus ) /*++ Routine Description: This routine attempts to lock the specified file data in the cache and return a description of it in an Mdl along with the correct I/O status. It is *not* safe to call this routine from Dpc level. This routine is synchronous, and raises on errors. As each call returns, the pages described by the Mdl are locked in memory, but not mapped in system space. If the caller needs the pages mapped in system space, then it must map them. Note that each call is a "single shot" which should be followed by a call to CcMdlReadComplete. To resume an Mdl-based transfer, the caller must form one or more subsequent calls to CcMdlRead with appropriately adjusted parameters. Arguments: FileObject - Pointer to the file object for a file which was opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for which CcInitializeCacheMap was called by the file system. FileOffset - Byte offset in file for desired data. Length - Length of desired data in bytes. MdlChain - On output it returns a pointer to an Mdl chain describing the desired data. Note that even if FALSE is returned, one or more Mdls may have been allocated, as may be ascertained by the IoStatus.Information field (see below). IoStatus - Pointer to standard I/O status block to receive the status for the transfer. (STATUS_SUCCESS guaranteed for cache hits, otherwise the actual I/O status is returned.) The I/O Information Field indicates how many bytes have been successfully locked down in the Mdl Chain. Return Value: None Raises: STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. --*/ { PSHARED_CACHE_MAP SharedCacheMap; PPRIVATE_CACHE_MAP PrivateCacheMap; PVOID CacheBuffer; LARGE_INTEGER FOffset; PMDL Mdl = NULL; PMDL MdlTemp; PETHREAD Thread = PsGetCurrentThread(); ULONG SavedState = 0; ULONG OriginalLength = Length; ULONG Information = 0; PVACB Vacb = NULL; ULONG SavedMissCounter = 0; KIRQL OldIrql; ULONG ActivePage; ULONG PageIsDirty; PVACB ActiveVacb = NULL; DebugTrace(+1, me, "CcMdlRead\n", 0 ); DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, FileOffset->HighPart ); DebugTrace( 0, me, " Length = %08lx\n", Length ); // // Save the current readahead hints. // MmSavePageFaultReadAhead( Thread, &SavedState ); // // Get pointer to SharedCacheMap. // SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; PrivateCacheMap = FileObject->PrivateCacheMap; // // See if we have an active Vacb, that we need to free. // GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); // // If there is an end of a page to be zeroed, then free that page now, // so we don't send Greg the uninitialized data... // if ((ActiveVacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); } // // If read ahead is enabled, then do the read ahead here so it // overlaps with the copy (otherwise we will do it below). // Note that we are assuming that we will not get ahead of our // current transfer - if read ahead is working it should either // already be in memory or else underway. // if (PrivateCacheMap->ReadAheadEnabled && (PrivateCacheMap->ReadAheadLength[1] == 0)) { CcScheduleReadAhead( FileObject, FileOffset, Length ); } // // Increment performance counters // CcMdlReadWait += 1; // // This is not an exact solution, but when IoPageRead gets a miss, // it cannot tell whether it was CcCopyRead or CcMdlRead, but since // the miss should occur very soon, by loading the pointer here // probably the right counter will get incremented, and in any case, // we hope the errrors average out! // CcMissCounter = &CcMdlReadWaitMiss; FOffset = *FileOffset; // // Check for read past file size, the caller must filter this case out. // ASSERT( ( FOffset.QuadPart + (LONGLONG)Length ) <= SharedCacheMap->FileSize.QuadPart ); // // Put try-finally around the loop to deal with any exceptions // try { // // Not all of the transfer will come back at once, so we have to loop // until the entire transfer is complete. // while (Length != 0) { ULONG ReceivedLength; LARGE_INTEGER BeyondLastByte; // // Map the data and read it in (if necessary) with the // MmProbeAndLockPages call below. // CacheBuffer = CcGetVirtualAddress( SharedCacheMap, FOffset, &Vacb, &ReceivedLength ); if (ReceivedLength > Length) { ReceivedLength = Length; } BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength; // // Now attempt to allocate an Mdl to describe the mapped data. // DebugTrace( 0, mm, "IoAllocateMdl:\n", 0 ); DebugTrace( 0, mm, " BaseAddress = %08lx\n", CacheBuffer ); DebugTrace( 0, mm, " Length = %08lx\n", ReceivedLength ); Mdl = IoAllocateMdl( CacheBuffer, ReceivedLength, FALSE, FALSE, NULL ); DebugTrace( 0, mm, " <Mdl = %08lx\n", Mdl ); if (Mdl == NULL) { DebugTrace( 0, 0, "Failed to allocate Mdl\n", 0 ); ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); } DebugTrace( 0, mm, "MmProbeAndLockPages:\n", 0 ); DebugTrace( 0, mm, " Mdl = %08lx\n", Mdl ); // // Set to see if the miss counter changes in order to // detect when we should turn on read ahead. // SavedMissCounter += CcMdlReadWaitMiss; MmSetPageFaultReadAhead( Thread, COMPUTE_PAGES_SPANNED( CacheBuffer, ReceivedLength ) - 1); MmProbeAndLockPages( Mdl, KernelMode, IoReadAccess ); SavedMissCounter -= CcMdlReadWaitMiss; // // Unmap the data now, now that the pages are locked down. // CcFreeVirtualAddress( Vacb ); Vacb = NULL; // // Now link the Mdl into the caller's chain // if ( *MdlChain == NULL ) { *MdlChain = Mdl; } else { MdlTemp = CONTAINING_RECORD( *MdlChain, MDL, Next ); while (MdlTemp->Next != NULL) { MdlTemp = MdlTemp->Next; } MdlTemp->Next = Mdl; } Mdl = NULL; // // Assume we did not get all the data we wanted, and set FOffset // to the end of the returned data. // FOffset = BeyondLastByte; // // Update number of bytes transferred. // Information += ReceivedLength; // // Calculate length left to transfer. // Length -= ReceivedLength; } } finally { CcMissCounter = &CcThrowAway; // // Restore the readahead hints. // MmResetPageFaultReadAhead( Thread, SavedState ); if (AbnormalTermination()) { // // We may have failed to allocate an Mdl while still having // data mapped. // if (Vacb != NULL) { CcFreeVirtualAddress( Vacb ); } if (Mdl != NULL) { IoFreeMdl( Mdl ); } // // Otherwise loop to deallocate the Mdls // while (*MdlChain != NULL) { MdlTemp = (*MdlChain)->Next; DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); DebugTrace( 0, mm, " Mdl = %08lx\n", *MdlChain ); MmUnlockPages( *MdlChain ); IoFreeMdl( *MdlChain ); *MdlChain = MdlTemp; } DebugTrace(-1, me, "CcMdlRead -> Unwinding\n", 0 ); } else { // // Now enable read ahead if it looks like we got any misses, and do // the first one. // if (!FlagOn( FileObject->Flags, FO_RANDOM_ACCESS ) && !PrivateCacheMap->ReadAheadEnabled && (SavedMissCounter != 0)) { PrivateCacheMap->ReadAheadEnabled = TRUE; CcScheduleReadAhead( FileObject, FileOffset, OriginalLength ); } // // Now that we have described our desired read ahead, let's // shift the read history down. // PrivateCacheMap->FileOffset1 = PrivateCacheMap->FileOffset2; PrivateCacheMap->BeyondLastByte1 = PrivateCacheMap->BeyondLastByte2; PrivateCacheMap->FileOffset2 = *FileOffset; PrivateCacheMap->BeyondLastByte2.QuadPart = FileOffset->QuadPart + (LONGLONG)OriginalLength; IoStatus->Status = STATUS_SUCCESS; IoStatus->Information = Information; } } DebugTrace( 0, me, " <MdlChain = %08lx\n", *MdlChain ); DebugTrace2(0, me, " <IoStatus = %08lx, %08lx\n", IoStatus->Status, IoStatus->Information ); DebugTrace(-1, me, "CcMdlRead -> VOID\n", 0 ); return; }
/* BOOLEAN KTdiStreamSocket::Listen(IN USHORT wRemotePort, IN ULONG dwRemoteAddress) { //KLocker locker(&m_KSynchroObject); BOOLEAN bRes = FALSE; PIRP pIrp = NULL, pIrpError = NULL; PDEVICE_OBJECT pDeviceObject; NTSTATUS NtStatus; //PTDI_CONNECTION_INFORMATION pRequestListenInfo = NULL; //PTDI_CONNECTION_INFORMATION pReturnConnectionInfo; PTA_IP_ADDRESS pRequestAddress; PTDI_ADDRESS_IP pIp; IO_STATUS_BLOCK IoStatusBlock; __try { if (m_bOpen == TRUE && m_bBind == TRUE && m_bConnected == FALSE && Disconnect() == TRUE) { m_nRemotePort = wPort; m_nRemoteAddress = dwAddress; pDeviceObject = IoGetRelatedDeviceObject(m_pTdiConnectionObject); m_pRequestListenInfo = (PTDI_CONNECTION_INFORMATION) new char[2*sizeof(TDI_CONNECTION_INFORMATION) + 2*sizeof(TA_IP_ADDRESS) + sizeof(ULONG)]; if (m_pRequestListenInfo != NULL) { memset(m_pRequestListenInfo, 0, sizeof(TDI_CONNECTION_INFORMATION) + sizeof(TA_IP_ADDRESS) + sizeof(ULONG)); m_pReturnListenInfo = (PTDI_CONNECTION_INFORMATION)((PUCHAR)m_pRequestListenInfo + sizeof(TDI_CONNECTION_INFORMATION) + sizeof(TA_IP_ADDRESS)); m_pReturnListenInfo->RemoteAddressLength = sizeof(TA_IP_ADDRESS); m_pReturnListenInfo->RemoteAddress = (PUCHAR)m_pRequestListenInfo + sizeof(TDI_CONNECTION_INFORMATION); m_pRequestListenInfo->Options = (PVOID) ((PUCHAR)m_pReturnListenInfo + sizeof(TDI_CONNECTION_INFORMATION) + sizeof(TA_IP_ADDRESS)); *((ULONG*)(m_pRequestListenInfo->Options)) = TDI_QUERY_ACCEPT; m_pRequestListenInfo->OptionsLength = sizeof(ULONG); if (m_nRemoteAddress != 0) { m_pRequestListenInfo->RemoteAddressLength = sizeof(TA_IP_ADDRESS); m_pRequestListenInfo->RemoteAddress = (PUCHAR)m_pRequestListenInfo + sizeof(TDI_CONNECTION_INFORMATION); pRequestAddress = (PTA_IP_ADDRESS)(m_pRequestListenInfo->RemoteAddress); pRequestAddress->TAAddressCount = 1; pRequestAddress->Address[0].AddressLength = sizeof(TDI_ADDRESS_IP); pRequestAddress->Address[0].AddressType = TDI_ADDRESS_TYPE_IP; pIp = (PTDI_ADDRESS_IP)(pRequestAddress->Address[0].Address); pIp->sin_port = W_LITTLE_TO_BIG_ENDIAN(m_nRemotePort); pIp->in_addr = D_LITTLE_TO_BIG_ENDIAN(m_nRemoteAddress);; } else { m_pRequestListenInfo->RemoteAddressLength = 0; m_pRequestListenInfo->RemoteAddress = NULL; } pIrp = TdiBuildInternalDeviceControlIrp( TDI_LISTEN, pDeviceObject, m_pTdiConnectionObject, NULL, NULL); pIrpError = pIrp; if (pIrp != NULL) { TdiBuildListen( pIrp, pDeviceObject, m_pTdiConnectionObject, NULL, NULL, TDI_QUERY_ACCEPT, // flags m_pRequestListenInfo, m_pReturnListenInfo); pIrpError = NULL; KeInitializeEvent(&m_kAcceptDestroyEvent, NotificationEvent, FALSE); KeInitializeEvent(&m_kListenEvent, NotificationEvent, FALSE); pIrp->UserEvent = &m_kListenEvent; NtStatus = TdiCall(pIrp, pDeviceObject, &IoStatusBlock, FALSE); if (NT_SUCCESS(NtStatus)) { DbgPrint ("TdiListen: OK (%08x)!!!\n", NtStatus); m_bListen = TRUE; bRes = TRUE; } else { DbgPrint ("TdiListen: ERROR (%08x)!!!\n", NtStatus); delete[] m_pRequestListenInfo; m_pRequestListenInfo = NULL; } } } } } __finally { if (pIrpError != NULL) IoFreeIrp(pIrpError); if (m_bListen == FALSE && m_pRequestListenInfo != NULL) delete[] m_pRequestListenInfo; } return bRes; } BOOLEAN KTdiStreamSocket::Accept(ULONG dwTimeOut) { //KLocker locker(&m_KSynchroObject); BOOLEAN bRes = FALSE; PIRP pIrp = NULL, pIrpError = NULL; PDEVICE_OBJECT pDeviceObject; NTSTATUS NtStatus; PTA_IP_ADDRESS pReturnAddress; PTDI_ADDRESS_IP pIp; IO_STATUS_BLOCK IoStatusBlock; PVOID pkEvents[2]; LARGE_INTEGER TimeOut; PLARGE_INTEGER pTimeOut = NULL; //m_KSynchroObject.Lock(); __try { if (m_bOpen == TRUE && m_bBind == TRUE && m_bConnected == FALSE && m_bListen == TRUE) { if (dwTimeOut != 0) { pTimeOut = &TimeOut; TimeOut.QuadPart = dwTimeOut * 10000; // msec -> 100 nsec intervals TimeOut.QuadPart = -TimeOut.QuadPart; } pkEvents[0] = &m_kListenEvent; pkEvents[1] = &m_kAcceptDestroyEvent; NtStatus = KeWaitForMultipleObjects(2, pkEvents, WaitAny, Suspended, KernelMode, FALSE, pTimeOut, NULL); if (NtStatus == 0) { pDeviceObject = IoGetRelatedDeviceObject(m_pTdiConnectionObject); pReturnAddress = (PTA_IP_ADDRESS)(m_pReturnListenInfo->RemoteAddress); pReturnAddress->TAAddressCount = 1; pReturnAddress->Address[0].AddressLength = sizeof(TDI_ADDRESS_IP); pReturnAddress->Address[0].AddressType = TDI_ADDRESS_TYPE_IP; pIrp = TdiBuildInternalDeviceControlIrp( TDI_ACCEPT, pDeviceObject, m_pTdiConnectionObject, NULL, NULL); pIrpError = pIrp; if (pIrp != NULL) { TdiBuildAccept( pIrp, pDeviceObject, m_pTdiConnectionObject, NULL, NULL, m_pRequestListenInfo, m_pReturnListenInfo); pIrpError = NULL; NtStatus = TdiCall(pIrp, pDeviceObject, &IoStatusBlock, TRUE); if (NT_SUCCESS(NtStatus)) { m_bConnected = TRUE; bRes = TRUE; pIp = (PTDI_ADDRESS_IP)(pReturnAddress->Address[0].Address); m_nRemotePort = W_BIG_TO_LITTLE_ENDIAN(pIp->sin_port); m_nRemoteAddress = D_BIG_TO_LITTLE_ENDIAN(pIp->in_addr); DbgPrint ("TdiAccept: OK (%08x : %04x)!!!\n", m_nRemoteAddress, m_nRemotePort); } else { DbgPrint ("TdiAccept: ERROR (%08x)!!!\n", NtStatus); } } } } } __finally { if (pIrpError != NULL) IoFreeIrp(pIrpError); } //m_KSynchroObject.UnLock(); return bRes; } */ ULONG KTdiStreamSocket::Send(PVOID pData, ULONG dwSize) { //KLocker locker(&m_KSynchroObject); PIRP pIrp = NULL, pIrpError = NULL; PMDL pMdl; PDEVICE_OBJECT pDeviceObject; NTSTATUS NtStatus; IO_STATUS_BLOCK IoStatusBlock; ULONG dwBytesSended = 0; //m_KSynchroObject.Lock(); __try { if (m_bOpen == TRUE && m_bConnected == TRUE && dwSize != 0) { pDeviceObject = IoGetRelatedDeviceObject(m_pTdiConnectionObject); pIrp = TdiBuildInternalDeviceControlIrp ( TDI_SEND, // sub function pDeviceObject, // pointer to device object m_pTdiConnectionObject, // pointer to control object NULL, // pointer to event NULL); // pointer to return buffer pIrpError = pIrp; if (pIrp == NULL) // validate pointer { NtStatus = STATUS_INSUFFICIENT_RESOURCES; } else { pMdl = IoAllocateMdl( pData, // buffer pointer - virtual address dwSize, // length FALSE, // not secondary FALSE, // don't charge quota NULL); // don't use irp if (pMdl != NULL) // validate mdl pointer { __try { MmProbeAndLockPages(pMdl, KernelMode, IoModifyAccess); // probe & lock } __except(EXCEPTION_EXECUTE_HANDLER) { DbgPrint("EXCEPTION: MmProbeAndLockPages\n"); IoFreeMdl(pMdl); pMdl = NULL; } } if (pMdl != NULL) { TdiBuildSend( pIrp, pDeviceObject, m_pTdiConnectionObject, NULL, NULL, pMdl, 0, dwSize); pIrpError = NULL; //m_KSynchroObject.UnLock(); NtStatus = TdiCall(pIrp, pDeviceObject, &IoStatusBlock); //m_KSynchroObject.Lock(); if (NT_SUCCESS(NtStatus)) { dwBytesSended = IoStatusBlock.Information; } else { DbgPrint ("TdiSend: ERROR (%08x)!!!\n", NtStatus); Disconnect(); } } } }
// Write procedure of the device NTSTATUS SlDeviceWriteProc(DEVICE_OBJECT *device_object, IRP *irp) { SL_DEVICE *dev = *((SL_DEVICE **)device_object->DeviceExtension); NTSTATUS ret = STATUS_UNSUCCESSFUL; IO_STACK_LOCATION *irp_stack = IoGetCurrentIrpStackLocation(irp); UINT ret_size = 0; if (dev->IsBasicDevice == false) { // Adapter device SL_FILE *f = irp_stack->FileObject->FsContext; if (irp_stack->Parameters.Write.Length == SL_EXCHANGE_BUFFER_SIZE) { UCHAR *buf = irp->UserBuffer; if (dev->Halting || dev->Adapter->Halt || buf == NULL) { // Halting } else { // Write the packet MDL *mdl; UINT num = SL_NUM_PACKET(buf); mdl = IoAllocateMdl(buf, SL_EXCHANGE_BUFFER_SIZE, false, false, NULL); if (mdl != NULL) { MmProbeAndLockPages(mdl, KernelMode, IoReadAccess); } ret = true; ret_size = SL_EXCHANGE_BUFFER_SIZE; if (num >= 1 && num <= SL_MAX_PACKET_EXCHANGE) { UINT i, j; NET_BUFFER_LIST *nbl_head = NULL; NET_BUFFER_LIST *nbl_tail = NULL; UINT num_packets = 0; NDIS_HANDLE adapter_handle = NULL; SlLock(f->Adapter->Lock); if (f->Adapter->NumPendingSendPackets <= SL_MAX_PACKET_QUEUED) { // Admit to send only if the number of packets being transmitted does not exceed the specified limit adapter_handle = f->Adapter->AdapterHandle; } if (adapter_handle != NULL) { // Lock the file list which opens the same adapter SlLockList(dev->FileList); for (j = 0;j < SL_LIST_NUM(dev->FileList);j++) { SL_FILE *other = SL_LIST_DATA(dev->FileList, j); if (other != f) { // Lock the receive queue of other file lists SlLock(other->RecvLock); other->SetEventFlag = false; } } for (i = 0;i < num;i++) { UINT packet_size = SL_SIZE_OF_PACKET(buf, i); UCHAR *packet_buf; NET_BUFFER_LIST *nbl = NULL; bool ok = false; if (packet_size > SL_MAX_PACKET_SIZE) { packet_size = SL_MAX_PACKET_SIZE; } else if (packet_size < SL_PACKET_HEADER_SIZE) { packet_size = SL_PACKET_HEADER_SIZE; } packet_buf = (UCHAR *)SL_ADDR_OF_PACKET(buf, i); for (j = 0;j < SL_LIST_NUM(dev->FileList);j++) { SL_FILE *other = SL_LIST_DATA(dev->FileList, j); if (other != f) { // Insert into the receive queue of the other file lists if (other->NumRecvPackets < SL_MAX_PACKET_QUEUED) { SL_PACKET *q = SlMalloc(sizeof(SL_PACKET)); SlCopy(q->Data, packet_buf, packet_size); q->Size = packet_size; q->Next = NULL; if (other->RecvPacketHead == NULL) { other->RecvPacketHead = q; } else { other->RecvPacketTail->Next = q; } other->RecvPacketTail = q; other->NumRecvPackets++; other->SetEventFlag = true; } } } // Allocate a new NET_BUFFER_LIST if (f->NetBufferListPool != NULL) { nbl = NdisAllocateNetBufferList(f->NetBufferListPool, 16, 0); if (nbl != NULL) { nbl->SourceHandle = adapter_handle; } } if (nbl != NULL) { // Get the NET_BUFFER from the NET_BUFFER_LIST NET_BUFFER *nb = NET_BUFFER_LIST_FIRST_NB(nbl); NET_BUFFER_LIST_NEXT_NBL(nbl) = NULL; if (nb != NULL && OK(NdisRetreatNetBufferDataStart(nb, packet_size, 0, NULL))) { // Buffer copy UCHAR *dst = NdisGetDataBuffer(nb, packet_size, NULL, 1, 0); if (dst != NULL) { SlCopy(dst, packet_buf, packet_size); ok = true; } else { NdisAdvanceNetBufferDataStart(nb, packet_size, false, NULL); } } } if (ok == false) { if (nbl != NULL) { NdisFreeNetBufferList(nbl); } } else { if (nbl_head == NULL) { nbl_head = nbl; } if (nbl_tail != NULL) { NET_BUFFER_LIST_NEXT_NBL(nbl_tail) = nbl; } nbl_tail = nbl; *((void **)NET_BUFFER_LIST_CONTEXT_DATA_START(nbl)) = f; num_packets++; } } for (j = 0;j < SL_LIST_NUM(dev->FileList);j++) { SL_FILE *other = SL_LIST_DATA(dev->FileList, j); if (other != f) { // Release the receive queue of other file lists SlUnlock(other->RecvLock); // Set an event if (other->SetEventFlag) { SlSet(other->Event); } } } SlUnlockList(dev->FileList); if (nbl_head != NULL) { InterlockedExchangeAdd(&f->NumSendingPacketets, num_packets); InterlockedExchangeAdd(&f->Adapter->NumPendingSendPackets, num_packets); SlUnlock(f->Adapter->Lock); NdisSendNetBufferLists(adapter_handle, nbl_head, 0, 0); } else { SlUnlock(f->Adapter->Lock); } } else { SlUnlock(f->Adapter->Lock); } } if (mdl != NULL) { MmUnlockPages(mdl); IoFreeMdl(mdl); } } } } irp->IoStatus.Information = ret_size; irp->IoStatus.Status = ret; IoCompleteRequest(irp, IO_NO_INCREMENT); return ret; }