EXTERN_C static NTSTATUS ScvnpWriteFile(_In_ PCFLT_RELATED_OBJECTS FltObjects, _In_ const wchar_t *OutPathW, _In_ void *Buffer, _In_ ULONG BufferSize, _In_ ULONG CreateDisposition) { PAGED_CODE(); UNICODE_STRING outPath = {}; RtlInitUnicodeString(&outPath, OutPathW); OBJECT_ATTRIBUTES objAttr = RTL_INIT_OBJECT_ATTRIBUTES( &outPath, OBJ_KERNEL_HANDLE | OBJ_CASE_INSENSITIVE); HANDLE fileHandle = nullptr; IO_STATUS_BLOCK ioStatus = {}; auto status = FltCreateFile( FltObjects->Filter, FltObjects->Instance, &fileHandle, GENERIC_WRITE, &objAttr, &ioStatus, nullptr, FILE_ATTRIBUTE_NORMAL, 0, CreateDisposition, FILE_SEQUENTIAL_ONLY | FILE_SYNCHRONOUS_IO_NONALERT | FILE_NON_DIRECTORY_FILE, nullptr, 0, 0); if (status == STATUS_OBJECT_NAME_COLLISION || status == STATUS_DELETE_PENDING) { return status; } if (!NT_SUCCESS(status)) { LOG_ERROR_SAFE("FltCreateFile failed (%08x) for %S", status, OutPathW); return status; } PFILE_OBJECT fileObject = nullptr; status = ObReferenceObjectByHandle(fileHandle, 0, nullptr, KernelMode, reinterpret_cast<void **>(&fileObject), nullptr); if (!NT_SUCCESS(status)) { LOG_ERROR_SAFE("ObReferenceObjectByHandle failed (%08x) for %S", status, OutPathW); goto End; } status = FltWriteFile(FltObjects->Instance, fileObject, nullptr, BufferSize, Buffer, 0, nullptr, nullptr, nullptr); if (!NT_SUCCESS(status)) { LOG_ERROR_SAFE("FltWriteFile failed (%08x) for %S", status, OutPathW); goto End; } End: if (fileObject) { ObDereferenceObject(fileObject); } if (fileHandle) { FltClose(fileHandle); } return status; }
static _Check_return_ NTSTATUS LcFetchFileByChunks ( _In_ PCFLT_RELATED_OBJECTS FltObjects, _In_ HANDLE SourceFileHandle, _In_ PLARGE_INTEGER SourceFileSize, _Out_ PLARGE_INTEGER BytesCopied ) /*++ Summary: This function copies the original file from the 'SourceFileHandle' to the currently opened file ('FltObjects->FileObject') by chunks. It maintains its own list of chunks, and extends it, if there are no chunks available to read into. Write operation goes from one chunk to another in a sequential order. If the next chunk is empty, write waits for the read to be completed, and proceeds. There are simple rules for chunks allocation: 1. Up to two chunks are initially allocated: a) If the file is smaller than the 'ChunkSize', only one chunk is allocated with buffer that is equal to the file size. b) If the file is larger than the 'ChunkSize', two chunks are allocated, taking the file size into account for the second chunk size. 2. If all chunks currently allocated are full and awaiting to be written to a disk, and the current amount of chunks is lesser than 'MaxChunks', an additional chunk is allocated. There is a corner case, when the actual file size differs from the reported one. In this case one of the chunks in the list will be smaller than the 'ChunkSize'. For example, 'MaxChunks' is 3, ChunkSize is '10', file size reported is '12', and actual file size is '25'. Two chunks will be initially allocated: [1] 10b; [2] (12-10)=2b. Later on, when all of them will be filled in with the data, and EOF will not be received, because the actual size is 25b, another chunk [3] of size 10b (ChunkSize) will be allocated. In total, there will be three chunks: 10b, 2b, and 10b. We don't reallocate the 2nd chunk, because this driver is supposed to be used with a proper filesystems, but making this modification might be a valuable TODO item. Let's look at how chunks work. All chunks are stored in the doubly-linked list. [Head] node doesn't contain any buffer to store data in. Refer to the 'FILE_CHUNK' structure for details. MSDN about lists: http://msdn.microsoft.com/en-us/library/windows/hardware/ff563802(v=vs.85).aspx For large files we will have two chunks from the beginning. [Head] <-> [1] <-> [2] <-> [Head] There are pointers for [R]ead and [W]rite. When the first chunk is being read, the list will look like the following: [Head] <-> [1] <-> [2] <-> [Head] [W] [R] [W] is awaiting for the [1] to be filled in with the data before writing it to the disk. When the [1] chunk is filled with the data: [Head] <-> [1*] <-> [2] <-> [Head] [W] [R] [1] is full and is being written to a disk, and we're reading into chunk [2]. Let's also assume that the reads are faster then writes. When the [2] chunk is full, there are no free chunks available: [Head] <-> [1*] <-> [2*] <-> [Head] [W] [R] If the current amount of chunks is lesser than the 'MaxChunks' value, a new chunk will be allocated before the next [R] node. In this case it will be added before [Head] to the end of the list, and read will continue: [Head] <-> [1*] <-> [2*] <-> [3] <-> [Head] [W] [R] Then [W] and [R] finish, and [W] moves to the [2] chunk. [Head] <-> [1] <-> [2*] <-> [3*] <-> [Head] [W] [R] [R] sees that [1] chunk is available, and reads into it: [Head] <-> [1] <-> [2*] <-> [3*] <-> [Head] [R] [W] After [R] finishes reading, there are no free chunks again: [Head] <-> [1*] <-> [2*] <-> [3*] <-> [Head] [R] [W] A new chunk can be allocated again before the [2]: [Head] <-> [1*] <-> [4] <-> [2*] <-> [3*] <-> [Head] [R] [W] With this approach, [R] will fill chunks [1]->[2]->[3]->[1]->[4], and write will write them in the same order. I.e. allocating a new chunk before the next filled chunk (if the amount of chunks is lesser than the 'MaxChunks') makes sure that the data is written sequentially, and there is no need to constantly seek in the file. Arguments: FltObjects - Pointer to the 'FLT_RELATED_OBJECTS' data structure containing opaque handles to this filter, instance, its associated volume and file object. SourceFileHandle - Handle to the source file to copy content from. SourceFileSize - Size of the source file. BytesCopied - Pointer to the LARGE_INTEGER structure that receives the amount of bytes copied. Return Value: The return value is the status of the operation. --*/ { NTSTATUS status = STATUS_SUCCESS; LIST_ENTRY chunksListHead = { 0 }; ULONG chunkListLength = 0; // State of the R/W operations. BOOLEAN readComplete = FALSE; BOOLEAN writeComplete = FALSE; PFILE_CHUNK readChunk = NULL; PFILE_CHUNK writeChunk = NULL; BOOLEAN eof = FALSE; BOOLEAN waitingForRead = FALSE; KEVENT writeEvent = { 0 }; WRITE_CALLBACK_CONTEXT writeCallbackContext = { 0 }; LARGE_INTEGER waitTimeout = { 0 }; LARGE_INTEGER zeroTimeout = { 0 }; IO_STATUS_BLOCK statusBlock = { 0 }; LARGE_INTEGER remainingBytes = { 0 }; LARGE_INTEGER totalBytesRead = { 0 }; LARGE_INTEGER totalBytesWritten = { 0 }; LARGE_INTEGER sourceFileOffset = { 0 }; LARGE_INTEGER destinationFileOffset = { 0 }; PAGED_CODE(); FLT_ASSERT(FltObjects != NULL); FLT_ASSERT(SourceFileHandle != NULL); FLT_ASSERT(SourceFileSize != NULL); FLT_ASSERT(SourceFileSize->QuadPart > 0); FLT_ASSERT(BytesCopied != NULL); FLT_ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); *BytesCopied = RtlConvertLongToLargeInteger(0); __try { // Set the relative timeout (1 stands for 100 nanoseconds). waitTimeout = RtlConvertLongToLargeInteger(-10000); waitTimeout.QuadPart *= TimeoutMilliseconds; KeInitializeEvent(&writeEvent, NotificationEvent, TRUE); writeCallbackContext.Event = &writeEvent; remainingBytes.QuadPart = SourceFileSize->QuadPart; NT_IF_FAIL_LEAVE(LcInitializeChunksList(FltObjects->Instance, &chunksListHead, remainingBytes, &chunkListLength)); for (;;) { if (waitingForRead) { // Wait for the read operation to finish. NT_IF_FAIL_LEAVE(ZwWaitForSingleObject(SourceFileHandle, FALSE, &waitTimeout)); readComplete = TRUE; } else { readComplete = ZwWaitForSingleObject(SourceFileHandle, FALSE, &zeroTimeout) == STATUS_SUCCESS; } writeComplete = KeReadStateEvent(&writeEvent) != 0; if (!eof && readComplete) { // If it's not the first read, update status of the current chunk. if (readChunk != NULL) { status = statusBlock.Status; if (NT_SUCCESS(status) || status == STATUS_END_OF_FILE) { ULONG bytesRead = (ULONG)statusBlock.Information; readChunk->BytesInBuffer = bytesRead; remainingBytes.QuadPart -= bytesRead; totalBytesRead.QuadPart += bytesRead; sourceFileOffset.QuadPart += bytesRead; if (status == STATUS_END_OF_FILE || bytesRead < readChunk->BufferSize) { eof = TRUE; status = STATUS_SUCCESS; // Will not be used later in this case, only to have the proper data here. remainingBytes.QuadPart = 0; } } NT_IF_FAIL_LEAVE(status); } // Move to the next available chunk and schedule read. if (!eof) { // If the remote file system returned an invalid file size, when we started reading it, // this value might be negative. Set it to the default, so the newly allocated chunk // will have the maximum allowed size. if (remainingBytes.QuadPart <= 0) { remainingBytes.QuadPart = ChunkSize; } NT_IF_FAIL_LEAVE(LcGetNextAvailableChunk( FltObjects->Instance, &chunksListHead, &readChunk, &chunkListLength, TRUE, // Read operation. &remainingBytes, &writeEvent, &waitTimeout)); // Schedule read operation for the current chunk. status = ZwReadFile( SourceFileHandle, NULL, NULL, NULL, &statusBlock, readChunk->Buffer, readChunk->BufferSize, &sourceFileOffset, NULL); NT_IF_FALSE_LEAVE(status == STATUS_PENDING || status == STATUS_SUCCESS, status); } } if (writeComplete) { if (!waitingForRead) { // If it's not the first write, update status of the current chunk. if (writeChunk != NULL) { NT_IF_FAIL_LEAVE(writeCallbackContext.Status); writeChunk->BytesInBuffer = 0; totalBytesWritten.QuadPart += writeCallbackContext.BytesWritten; destinationFileOffset.QuadPart += writeCallbackContext.BytesWritten; } NT_IF_FAIL_LEAVE(LcGetNextAvailableChunk( FltObjects->Instance, &chunksListHead, &writeChunk, &chunkListLength, FALSE, // Write operation. NULL, NULL, NULL)); } waitingForRead = FALSE; // If we don't have any data in the current chunk, restart from the beginning of the loop. if (writeChunk->BytesInBuffer == 0) { if (eof) { // We're done! break; } else { // Since we're waiting for the read to complete for the current chunk, // don't change the chunk position on next iteration. waitingForRead = TRUE; continue; } } KeClearEvent(&writeEvent); NT_IF_FAIL_LEAVE(FltWriteFile( FltObjects->Instance, FltObjects->FileObject, &destinationFileOffset, writeChunk->BytesInBuffer, writeChunk->Buffer, FLTFL_IO_OPERATION_DO_NOT_UPDATE_BYTE_OFFSET, NULL, (PFLT_COMPLETED_ASYNC_IO_CALLBACK)&LcWriteCallback, &writeCallbackContext)); } } *BytesCopied = totalBytesWritten; } __finally { LcClearChunksList(FltObjects->Instance, &chunksListHead); } return status; }
/*--------------------------------------------------------- 函数名称: FileWriteEncryptionHeader 函数描述: 非重入写加密头信息 输入参数: pfiInstance 过滤器实例 pfoFileObject 文件对象 pvcVolumeContext 卷上下文 pscFileStreamContext文件流上下文 输出参数: 返回值: STATUS_SUCCESS 成功 否则返回相应状态 其他: 加密头数据定义请参考FileFunction.h ENCRYPTION_HEADER 更新维护: 2011.4.9 修改为使用FltXXX版本 ---------------------------------------------------------*/ NTSTATUS FileWriteEncryptionHeader( __in PFLT_INSTANCE pfiInstance, __in PFILE_OBJECT pfoFileObject, __in PVOLUME_CONTEXT pvcVolumeContext, __in PFILE_STREAM_CONTEXT pscFileStreamContext ) { //文件头数据 // static WCHAR wHeader[CONFIDENTIAL_FILE_HEAD_SIZE/sizeof(WCHAR)] = ENCRYPTION_HEADER; //完成事件 KEVENT keEventComplete; //文件大小 LARGE_INTEGER nFileSize; //偏移量 LARGE_INTEGER nOffset; //长度 设置为标准加密头长度 ULONG ulLength = CONFIDENTIAL_FILE_HEAD_SIZE; //返回值 NTSTATUS status; //加密头地址 PVOID pHeader; //是否需要设置文件大小 BOOLEAN bSetSize = FALSE; // //开始前先初始化事件对象 // KeInitializeEvent( &keEventComplete, SynchronizationEvent,//同步事件 FALSE//事件初始标志为FALSE ); // //设置文件大小 // FILE_STREAM_CONTEXT_LOCK_ON(pscFileStreamContext); // //新的文件 把加密头大小加到FileLength上面 // if(pscFileStreamContext->nFileValidLength.QuadPart == 0){ pscFileStreamContext->nFileSize.QuadPart = CONFIDENTIAL_FILE_HEAD_SIZE; bSetSize = TRUE; }else{ ASSERT(pscFileStreamContext->nFileSize.QuadPart >= CONFIDENTIAL_FILE_HEAD_SIZE); } FILE_STREAM_CONTEXT_LOCK_OFF(pscFileStreamContext); if(bSetSize){ status = FileSetSize( pfiInstance, pfoFileObject, &nFileSize ); if(!NT_SUCCESS(status)) return status; } // //如果不是新文件 而又要写加密头 一定是更新加密头 长度必大于一个加密头 // /* nFileSize.QuadPart = pscFileStreamContext->nFileValidLength.QuadPart + CONFIDENTIAL_FILE_HEAD_SIZE; */ // //写入加密标识头 // nOffset.QuadPart = 0; // ulLength = ROUND_TO_SIZE(ulLength,pvcVolumeContext->ulSectorSize); pHeader = ExAllocateFromNPagedLookasideList(&nliNewFileHeaderLookasideList); FctConstructFileHead(pscFileStreamContext,pHeader); status = FltWriteFile( pfiInstance,//起始实例,用于防止重入 pfoFileObject,//文件对象 &nOffset,//偏移量 从头写起 CONFIDENTIAL_FILE_HEAD_SIZE,//ulLength,//一个头的大小 pHeader,//头数据 FLTFL_IO_OPERATION_DO_NOT_UPDATE_BYTE_OFFSET|FLTFL_IO_OPERATION_NON_CACHED,//非缓存写入 NULL,//不需要返回写入的字节数 FileCompleteCallback,//回调,确认执行完毕 &keEventComplete//回调上下文,传递完成事件 ); // //等待完成 // KeWaitForSingleObject(&keEventComplete, Executive, KernelMode, TRUE, 0); ExFreeToNPagedLookasideList(&nliNewFileHeaderLookasideList,pHeader); return status; }