// RETURN VALUE: TRUE if success, FALSE if failure BOOL AsyncFuzzer::init(tstring deviceName, ULONG nbThreads) { BOOL bResult=FALSE; UINT nbThreadsValid=0; hDev = CreateFile(deviceName, MAXIMUM_ALLOWED, FILE_SHARE_READ|FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL); if(hDev!=INVALID_HANDLE_VALUE) { // Get a valid nb of threads: MAX_THREADS if too big, twice the nb of procs if too small if(nbThreads>MAX_THREADS) { nbThreadsValid = MAX_THREADS; TPRINT(VERBOSITY_INFO, _T("Nb of threads too big, using %d\n"), MAX_THREADS); } else { nbThreadsValid = nbThreads ? nbThreads : GetNumberOfProcs()*2; } threads = (PHANDLE)HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(HANDLE)*nbThreadsValid); if(threads) { startingNbThreads = nbThreadsValid; if(InitializeThreadsAndCompletionPort()) { TPRINT(VERBOSITY_INFO, _T("%u threads and IOCP created successfully\n"), startingNbThreads); bResult = TRUE; } else { TPRINT(VERBOSITY_ERROR, _T("Failed to create Threads and IOCP\n")); } } } return bResult; }
BOOL NamedPipeInputFuzzer::Init() { BOOL bResult=FALSE; dibf_pipe = CreateNamedPipe(_T("\\\\.\\pipe\\dibf_pipe"), PIPE_ACCESS_INBOUND, PIPE_TYPE_MESSAGE|PIPE_READMODE_MESSAGE|PIPE_WAIT|PIPE_REJECT_REMOTE_CLIENTS, 1, MAX_BUFSIZE/2, MAX_BUFSIZE/2, 0, NULL); if(dibf_pipe!=INVALID_HANDLE_VALUE) { TPRINT(VERBOSITY_DEFAULT, _T("Named pipe created, waiting for connection...\n")); if(ConnectNamedPipe(dibf_pipe, NULL)?TRUE:(GetLastError()==ERROR_PIPE_CONNECTED)) { TPRINT(VERBOSITY_DEFAULT, _T("Fuzzing client connected to named pipe\n")); inputThread = CreateThread(NULL, 0, FuzzInputProc, this, 0, NULL); if(inputThread) { bResult = TRUE; } else { TPRINT(VERBOSITY_ERROR, _T("Failed to create fuzz input thread with error %#.8x\n"), GetLastError()); } } } return bResult; }
//DESCRIPTION: // This function is the entry point for the async fuzzer. It packs all params in the config structure // and passes it to its initialization function. It then associates the device passed as parameter to // the completion port and passes control to the worker threads by posting an empty completion status. // //INPUT: // hDev - device to fuzz // pIoctlstorage - the list of ioctls // dwIOCTLCount - the count of ioctls in pIoctlstorage // nbOfThreadsRequested - the requested number of threads // timeLimit - an array containing the 3 timouts (for each fuzzer) // maxPending - the max number of pending requests for the async fuzzer // cancelRate - percentage of pending requests to attempt to cancel for the async fuzzer // pStats - the statistics stats pointer //OUTPUT: // TRUE for success // FALSE for error // BOOL AsyncFuzzer::start() { BOOL bResult = FALSE; DWORD waitResult; // Pass control to the iocp handler if(!PostQueuedCompletionStatus(hIocp, 0, SPECIAL_PACKET, SPECIAL_OVERLAPPED_START)) { TPRINT(VERBOSITY_ERROR, _T("Failed to post completion status to completion port\n")); } // Wait for ctrl-c or timout bResult = WaitOnTerminationEvents(timeLimit); if(bResult) { state = STATE_CLEANUP; waitResult = WaitForMultipleObjects(startingNbThreads, threads, TRUE, ASYNC_CLEANUP_TIMEOUT); if(waitResult==WAIT_OBJECT_0) { TPRINT(VERBOSITY_INFO, _T("All fuzzer threads exited timely\n")); bResult = TRUE; } else { TPRINT(VERBOSITY_ERROR, _T("Not all worker threads exited timely\n")); } } else { TPRINT(VERBOSITY_ERROR, _T("Failed wait on termination event\n")); } return bResult; }
//DESCRIPTION: // This function creates the completion port and the requested number of threads. // If threads creation fails, the successfully created threads' handles are closed before returning. // //INPUT: // nbOfThreads - the number of threads to create // pWorkerThreads - the ouput pointer to the thread handle array // pAsync_config - the config struct // //OUTPUT: // TRUE for success // FALSE for error // BOOL AsyncFuzzer::InitializeThreadsAndCompletionPort() { BOOL bResult = FALSE; hIocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, (ULONG_PTR)NULL, 0); if(hIocp) { // Associate the device handle to iocp bResult = (NULL!=CreateIoCompletionPort(hDev, hIocp, 0, 0)); if(bResult) { // Configure io completion port bResult = SetFileCompletionNotificationModes(hDev, FILE_SKIP_COMPLETION_PORT_ON_SUCCESS); if(bResult) { bResult = CreateThreads(); if(!bResult){ TPRINT(VERBOSITY_ERROR, _T("Failed to create worker threads\n")); } } else { TPRINT(VERBOSITY_ERROR, _T("Failed to configure iocompletion port with error %#.8x\n"), GetLastError()); } } else { TPRINT(VERBOSITY_ERROR, _T("Failed to associate device with iocompletion port with error %#.8x\n"), GetLastError()); } } else { TPRINT(VERBOSITY_ERROR, _T("Failed to create I/O completion port with error %#.8x\n"), GetLastError()); } return bResult; }
DWORD WINAPI NamedPipeInputFuzzer::FuzzInputProc(PVOID param) { BOOL bDone, bResult=FALSE; UCHAR input[MAX_BUFSIZE+4]; UINT index; DWORD bytesRead, error; vector<UCHAR> *packet; NamedPipeInputFuzzer *npif = (NamedPipeInputFuzzer*)param; // Double while is not as bad as it looks while(!npif->bExit) { index = 0; bDone = FALSE; while(!bDone) { bResult = ReadFile(npif->dibf_pipe, &input[index], (MAX_BUFSIZE+4)-index, &bytesRead, NULL); // Check for data reception if (bResult&&bytesRead) { // Update index index+=bytesRead; // Sanity check received data if(index>=4) { // Create new packet packet = new vector<UCHAR>(input, &input[index]); // Enqueue new packet EnterCriticalSection(&npif->lock); npif->iopackets.push(packet); LeaveCriticalSection(&npif->lock); } bDone = TRUE; } else { error = GetLastError(); switch(error) { case ERROR_BROKEN_PIPE: TPRINT(VERBOSITY_ERROR, _T("Named pipe client disconnected\n")); bDone = TRUE; npif->bExit = TRUE; break; case ERROR_MORE_DATA: if(bytesRead) { // Update index index+=bytesRead; } else { // Packet too big bDone = TRUE; } break; default: TPRINT(VERBOSITY_ERROR, _T("Reading from named pipe failed with error %#.8x\n"), error); bDone = TRUE; break; } } } } SetEvent(npif->hEvent); return ERROR_SUCCESS; }
void test_index_headers() { char header_bin[] = { 5,226,251,160,170,107,207,39,248,218,139,62,137,58,95,46,204,10,12,1,0,64,0, 254,1,0,218,1,0,0,136,5,1,4,0,136,254,127,0,218,127,0,8,0,83,119,9,1,254,128, 0,222,128,0,0,36,5,121,20,136,0,0,58,0,1,1,11,12,4,197,0,2,13,8,0,3,13,8,0,4, 13,8,0,5,13,8,0,6,13,8,0,7,13,8,0,8,13,8,0,9,13,8,0,10,13,8,0,12,13,8,0,13, 13,8,0,14,13,8,0,16,13,8,0,17,13,8,0,18,13,8,0,19,13,8,0,20,13,8,0,21,13,8,0, 22,13,8,0,23,13,8,0,24,13,8,0,25,13,8,0,26,13,8,0,27,13,8,0,28,13,8,0,29,13, 8,0,30,13,8,0,31,13,8,0,32,13,8,0,33,13,8,0,34,13,8,0,35,13,8,37,19,12,4,197, 0,37,13,16,0,38,13,8,0,39,13,8,0,40,13,8,0,41,13,8,0,42,13,8,0,43,13,8,0,44, 13,8,0,45,13,8,0,46,13,8,0,47,13,8,0,48,13,8,0,49,13,8,0,50,13,8,0,51,13,8,0, 52,13,8,0,53,13,8,0,54,13,8,0,55,13,8,0,56,13,8,0,57,13,8,0,59,13,8,0,60,13, 8,0,62,13,8,64,145,0,0,0,24,174,99,0,0,0,19,159,140,0,0,1,49,254,101,3,226, 101,3,0,255,13,1,32,2,0,152,0,0,0,44,71,93,1,148,8,152,106,0,254,148,0,254, 148,0,1,148,24,0,5,55,56,49,52,52,5,154,8,63,200,207,1,154,4,129,243,254,154, 0,254,154,0,46,154,0,112,1,0,4,0,5,0,10,0,60,0,62,0,2,0,11,0,15,0,2,0,58,0, 61,0,2,0,15,0,58,105,173,44,0,0,4,197,0,63,0,0,0,0,4,197 }; TPRINT("Decoding an index header...\n"); index_header_t *header = test_index_header_decoding(header_bin, sizeof(header_bin)); TPRINT("Encoding the previously decoded header...\n"); char *header_bin2 = NULL; size_t header_bin2_size = 0; test_index_header_encoding(header, &header_bin2, &header_bin2_size); assert(header_bin2_size == sizeof(header_bin)); assert(memcmp(header_bin2, header_bin, header_bin2_size) == 0); TPRINT("Decoding the previously encoded header...\n"); index_header_t *header2 = test_index_header_decoding(header_bin2, header_bin2_size); TPRINT("Encoding the previously decoded header...\n"); char *header_bin3 = NULL; size_t header_bin3_size = 0; test_index_header_encoding(header2, &header_bin3, &header_bin3_size); assert(header_bin3_size == sizeof(header_bin)); assert(memcmp(header_bin3, header_bin, header_bin3_size) == 0); free_index_header(header); free_index_header(header2); free(header_bin2); free(header_bin3); }
NamedPipeInputFuzzer::NamedPipeInputFuzzer() : bExit(FALSE) { canGoCold =TRUE; TPRINT(VERBOSITY_DEBUG, _T("NamedPipeInputFuzzer constructor\n")); InitializeCriticalSection(&lock); return; }
AsyncFuzzer::AsyncFuzzer(ULONG timeLimit, ULONG maxPending, ULONG cancelRate, FuzzingProvider *provider) : Fuzzer(provider) { TPRINT(VERBOSITY_DEBUG, _T("AsyncFuzzer constructor\n")); this->currentNbThreads = 0; this->startingNbThreads = 0; this->timeLimit = timeLimit; this->maxPending = maxPending; this->cancelRate = cancelRate; return; }
DWORD SyncFuzzer::FuzzProc(PVOID param) { SyncFuzzer *syncFuzzer = (SyncFuzzer*)param; BOOL bResult; ULONG nbConsecutiveFailures=0; IoRequest request(syncFuzzer->hDev); DWORD threadID; threadID = GetCurrentThreadId(); // Initialize thread's PRNG mt19937 prng(UNLFOLD_LOW_WORD(GetCurrentThreadId())^GetTickCount()); while(syncFuzzer->state==STATE_FUZZING) { bResult = request.fuzz(syncFuzzer->fuzzingProvider, &prng); if(bResult) { bResult = request.sendSync(); InterlockedIncrement(&tracker.stats.SynchronousRequests); InterlockedIncrement(&tracker.stats.SentRequests); InterlockedIncrement(&tracker.stats.CompletedRequests); if(bResult) { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Sync request %#.8x (iocode %#.8x) completed successfully\n"), threadID, &request, request.GetIoCode()); InterlockedIncrement(&tracker.stats.SuccessfulRequests); nbConsecutiveFailures = 0; } else { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Sync request %#.8x (iocode %#.8x) completed with error %#.8x\n"), threadID, &request, request.GetIoCode(), GetLastError()); InterlockedIncrement(&tracker.stats.FailedRequests); nbConsecutiveFailures++; } if(nbConsecutiveFailures==MAX_CONSECUTIVE_FAILURES) { TPRINT(VERBOSITY_DEFAULT, _T(" %u IOCTL failures in a row -- check config?\n"), nbConsecutiveFailures); nbConsecutiveFailures = 0; } } // No more fuzzing available from provider else { SetEvent(syncFuzzer->tracker.hEvent); break; } } return ERROR_SUCCESS; }
AsyncFuzzer::~AsyncFuzzer() { TPRINT(VERBOSITY_DEBUG, _T("AsyncFuzzer destructor\n")); // Close all handles array for(ULONG i=0; i<startingNbThreads&&threads[i]; i++) { CloseHandle(threads[i]); } // Close IO completion port CloseHandle(hIocp); // Free thread handles array HeapFree(GetProcessHeap(), 0, threads); return; }
BOOL SyncFuzzer::start() { BOOL bResult=FALSE; HANDLE hThread; DWORD waitResult; hThread = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)FuzzProc, this, 0, NULL); if(hThread) { // Wait for ctrl-c or timout bResult = WaitOnTerminationEvents(timeLimit); if(bResult) { state = STATE_DONE; waitResult = WaitForSingleObject(hThread, SYNC_CLEANUP_TIMEOUT); if(waitResult==WAIT_OBJECT_0) { TPRINT(VERBOSITY_INFO, _T("Fuzzer thread exited timely\n")); bResult = TRUE; } else { TPRINT(VERBOSITY_ERROR, _T("Fuzzer thread failed to exited timely\n")); } } } return bResult; }
NamedPipeInputFuzzer::~NamedPipeInputFuzzer() { DWORD waitResult; TPRINT(VERBOSITY_DEBUG, _T("NamedPipeInputFuzzer destructor\n")); bExit = TRUE; // Wait 2 seconds then kill the input thread waitResult = WaitForSingleObject(inputThread, 2000); if(waitResult!=WAIT_OBJECT_0) { TerminateThread(inputThread, 0); } DeleteCriticalSection(&lock); if(dibf_pipe!=INVALID_HANDLE_VALUE) { CloseHandle(dibf_pipe); } CloseHandle(inputThread); return; }
SyncFuzzer::SyncFuzzer(ULONG timeLimit, FuzzingProvider *provider) : Fuzzer(provider) { TPRINT(VERBOSITY_DEBUG, _T("AsyncFuzzer constructor\n")); this->timeLimit = timeLimit; }
FuzzingProvider::~FuzzingProvider() { TPRINT(VERBOSITY_DEBUG, _T("FuzzingProvider destructor\n")); return; }
void test_reductions() { char reduction_bin[] = { 0,0,0,6,46,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,4, 49,53,56,50,0,5,45,49,53,56,50,0,9,49,49,48,49,50,48,54,52,55 }; char id_btree_reduction_bin[] = { 0,0,0,11,210,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,30,0,0,0,0,0,0 }; char r_bin2[MAX_REDUCTION_SIZE]; size_t r_bin2_size = 0; char id_btree_r_bin2[MAX_REDUCTION_SIZE]; size_t id_btree_r_bin2_size = 0; char r_bin3[MAX_REDUCTION_SIZE]; size_t r_bin3_size = 0; char id_btree_r_bin3[MAX_REDUCTION_SIZE]; size_t id_btree_r_bin3_size = 0; TPRINT("Decoding a view btree reduction ...\n"); view_btree_reduction_t *r = test_view_btree_reduction_decoding(reduction_bin, sizeof(reduction_bin)); TPRINT("Decoding a view id btree reduction ...\n"); view_id_btree_reduction_t *id_btree_r = test_view_id_btree_reduction_decoding(id_btree_reduction_bin); TPRINT("Encoding the previously decoded view btree reduction ...\n"); test_view_btree_reduction_encoding(r, r_bin2, &r_bin2_size); assert(r_bin2_size == sizeof(reduction_bin)); assert(memcmp(r_bin2, reduction_bin, r_bin2_size) == 0); TPRINT("Encoding the previously decoded view id btree reduction ...\n"); test_view_id_btree_reduction_encoding(id_btree_r, id_btree_r_bin2, &id_btree_r_bin2_size); assert(id_btree_r_bin2_size == sizeof(id_btree_reduction_bin)); assert(memcmp(id_btree_r_bin2, id_btree_reduction_bin, id_btree_r_bin2_size) == 0); TPRINT("Decoding the previously encoded view btree reduction ...\n"); view_btree_reduction_t *r2 = test_view_btree_reduction_decoding(r_bin2, r_bin2_size); TPRINT("Decoding the previously encoded view id btree reduction ...\n"); view_id_btree_reduction_t *id_btree_r2 = test_view_id_btree_reduction_decoding(id_btree_r_bin2); TPRINT("Encoding the previously decoded view btree reduciton ...\n"); test_view_btree_reduction_encoding(r2, r_bin3, &r_bin3_size); assert(r_bin3_size == sizeof(reduction_bin)); assert(memcmp(r_bin3, reduction_bin, r_bin3_size) == 0); TPRINT("Encoding the previously decoded view id btree reduciton ...\n"); test_view_id_btree_reduction_encoding(id_btree_r2, id_btree_r_bin3, &id_btree_r_bin3_size); assert(id_btree_r_bin3_size == sizeof(id_btree_reduction_bin)); assert(memcmp(id_btree_r_bin3, id_btree_reduction_bin, id_btree_r_bin3_size) == 0); free_view_btree_reduction(r); free_view_btree_reduction(r2); free_view_id_btree_reduction(id_btree_r); free_view_id_btree_reduction(id_btree_r2); }
void test_values() { char value_bin[] = { 0,10,0,0,4,54,49,53,53,0,0,4,54,49,53,52 }; char id_btree_value_bin[] = { 0,67,0,0,2,0,14,91,49,50,51,44,34,102,111,111,98,97,114, 34,93,0,4,45,51,50,49,1,0,1,0,7,91,53,44,54,44,55,93 }; char *v_bin2 = NULL; size_t v_bin2_size = 0; char *id_btree_v_bin2 = NULL; size_t id_btree_v_bin2_size = 0; char *v_bin3 = NULL; size_t v_bin3_size = 0; char *id_btree_v_bin3 = NULL; size_t id_btree_v_bin3_size = 0; TPRINT("Decoding a view btree value ...\n"); view_btree_value_t *v = test_view_btree_value_decoding(value_bin, sizeof(value_bin)); TPRINT("Decoding a view id btree value ...\n"); view_id_btree_value_t *id_btree_v; id_btree_v = test_view_id_btree_value_decoding(id_btree_value_bin, sizeof(id_btree_value_bin)); TPRINT("Encoding the previously decoded view btree value ...\n"); test_view_btree_value_encoding(v, &v_bin2, &v_bin2_size); assert(v_bin2_size == sizeof(value_bin)); assert(memcmp(v_bin2, value_bin, v_bin2_size) == 0); TPRINT("Encoding the previously decoded view id btree value ...\n"); test_view_id_btree_value_encoding(id_btree_v, &id_btree_v_bin2, &id_btree_v_bin2_size); assert(id_btree_v_bin2_size == sizeof(id_btree_value_bin)); assert(memcmp(id_btree_v_bin2, id_btree_value_bin, id_btree_v_bin2_size) == 0); TPRINT("Decoding the previously encoded view btree value ...\n"); view_btree_value_t *v2 = test_view_btree_value_decoding(v_bin2, v_bin2_size); TPRINT("Decoding the previously encoded view id btree value ...\n"); view_id_btree_value_t *id_btree_v2; id_btree_v2 = test_view_id_btree_value_decoding(id_btree_v_bin2, id_btree_v_bin2_size); TPRINT("Encoding the previously decoded view btree value ...\n"); test_view_btree_value_encoding(v2, &v_bin3, &v_bin3_size); assert(v_bin3_size == sizeof(value_bin)); assert(memcmp(v_bin3, value_bin, v_bin3_size) == 0); TPRINT("Encoding the previously decoded view id btree value ...\n"); test_view_id_btree_value_encoding(id_btree_v2, &id_btree_v_bin3, &id_btree_v_bin3_size); assert(id_btree_v_bin3_size == sizeof(id_btree_value_bin)); assert(memcmp(id_btree_v_bin3, id_btree_value_bin, id_btree_v_bin3_size) == 0); free_view_btree_value(v); free_view_btree_value(v2); free(v_bin2); free(v_bin3); free_view_id_btree_value(id_btree_v); free_view_id_btree_value(id_btree_v2); free(id_btree_v_bin2); free(id_btree_v_bin3); }
Dumbfuzzer::Dumbfuzzer(const vector<IoctlDef> &ioctlStorage) : ioStore(ioctlStorage) { TPRINT(VERBOSITY_DEBUG, _T("Dumbfuzzer constructor\n")); return; }
Dumbfuzzer::~Dumbfuzzer() { TPRINT(VERBOSITY_DEBUG, _T("Dumbfuzzer destructor\n")); return; }
SlidingDwordFuzzer::SlidingDwordFuzzer(const vector<IoctlDef> &ioctlStorage) : ioStore(ioctlStorage), iteration(0), position(0), ioctlIndex(0) { TPRINT(VERBOSITY_DEBUG, _T("SlidingDwordFuzzer constructor\n")); return; }
//DESCRIPTION: // This function is the thread proc for the async fuzzer. It dequeues requests from the io completion port, // handles special control OVERLAPPED requests, fires IOCTLS asyncrhonously until the set maximum is reached and // finally handles the cleanup. // //INPUT: // Parameter - contains the async config structure // //OUTPUT: // TRUE for success // FALSE for error // DWORD WINAPI AsyncFuzzer::Iocallback(PVOID param) { UINT status; BOOL bResult, canceled, gotAPacket; DWORD threadID; IoRequest *request; // Get current TID threadID = GetCurrentThreadId(); // Get asyncfuzzer AsyncFuzzer *asyncfuzzer = (AsyncFuzzer*)param; // Initialize thread's PRNG mt19937 prng(UNLFOLD_LOW_WORD(threadID)^GetTickCount()); do { gotAPacket = asyncfuzzer->DequeueIoPacket(&request); // Keep firing until enough requests are pending or we are finishing while(asyncfuzzer->state==STATE_FUZZING) { if(!gotAPacket) { // Loose request allocation limit if(asyncfuzzer->AllowNewAllocation()) { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Allocating new request in addition to the %u existing ones (%u pending)\n"), threadID, Fuzzer::tracker.stats.AllocatedRequests, Fuzzer::tracker.stats.PendingRequests); request = new IoRequest(asyncfuzzer->hDev); // Create new request // try/catch this? -> TPRINT(VERBOSITY_ERROR, _T("TID[%.5u]: Failed to allocate new request (keep going with existing %u request allocations)\n"), threadID, Fuzzer::tracker.stats.AllocatedRequests); InterlockedIncrement(&Fuzzer::tracker.stats.AllocatedRequests); gotAPacket=TRUE; } else { TPRINT(VERBOSITY_DEBUG, _T("TID[%u]: ENOUGH REQUESTS ALLOCATED (%d) FOR THE CURRENTLY PENDING NUMBER REQUESTS OF %d\n"), threadID, Fuzzer::tracker.stats.AllocatedRequests, Fuzzer::tracker.stats.PendingRequests); break; } } else { // Make sure overlapped is zeroed request->reset(); } if(gotAPacket) { // Craft a fuzzed request bResult = request->fuzz(asyncfuzzer->fuzzingProvider, &prng); // If request fuzzed and ready for sending if(bResult) { // Fire IOCTL status = request->sendAsync(); TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Sent request %#.8x (iocode %#.8x)\n"), threadID, request, request->GetIoCode()); InterlockedIncrement(&Fuzzer::tracker.stats.SentRequests); // Handle pending IOs if(status==DIBF_PENDING) { // Cancel a portion of requests canceled=FALSE; if((ULONG)(rand()%100)<asyncfuzzer->cancelRate) { canceled = CancelIoEx(asyncfuzzer->hDev, &request->overlp); if(canceled) { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Sent a cancel for request %#.8x (iocode %#.8x)\n"), threadID, request, request->GetIoCode()); } else { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Failed to attempt cancelation of request %#.8x (iocode %#.8x), error %#.8x\n"), threadID, request, request->GetIoCode(), GetLastError()); } } // Whether cancellation was sent or not, the request is pending InterlockedIncrement(&Fuzzer::tracker.stats.PendingRequests); // Request is processing and not to be reused gotAPacket=FALSE; } else { // Displaying synchronous completion result InterlockedIncrement(&Fuzzer::tracker.stats.CompletedRequests); InterlockedIncrement(&Fuzzer::tracker.stats.SynchronousRequests); if(status==DIBF_SUCCESS){ InterlockedIncrement(&Fuzzer::tracker.stats.SuccessfulRequests); TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Request %#.8x (iocode %#.8x) synchronously completed successfully\n"), threadID, request, request->GetIoCode()); } else { InterlockedIncrement(&Fuzzer::tracker.stats.FailedRequests); TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Request %#.8x (iocode %#.8x) synchronously completed with error %#.8x\n"), threadID, request, request->GetIoCode(), GetLastError()); } } } else { // Can only fuzz as fast as the fuzzing provider fuzzes TPRINT(VERBOSITY_DEBUG, _T("TID[%.5u]: Failed to craft fuzzed request\n"), threadID); } } } // while firing ioctl // Cleanup stage only if we have a packet if(gotAPacket && asyncfuzzer->state==STATE_CLEANUP) { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Freeing request %#.8x (%u currently allocated requests)\n"), threadID, request, Fuzzer::tracker.stats.AllocatedRequests); delete request; // Only one thread shall be allowed through if(InterlockedDecrement(&Fuzzer::tracker.stats.AllocatedRequests)==0) { TPRINT(VERBOSITY_INFO, _T("TID[%.5u]: Last request was processed - exiting\n"), threadID); asyncfuzzer->state=STATE_DONE; for(UINT i=0; i<asyncfuzzer->startingNbThreads-1; i++) { // Unblock other threads PostQueuedCompletionStatus(asyncfuzzer->hIocp, 0, SPECIAL_PACKET, SPECIAL_OVERLAPPED_DONE); } } } } while(asyncfuzzer->state!=STATE_DONE); return 0; }
SlidingDwordFuzzer::~SlidingDwordFuzzer() { TPRINT(VERBOSITY_DEBUG, _T("SlidingDwordFuzzer destructor\n")); return; }
// Empty constructor and destructor FuzzingProvider::FuzzingProvider() : canGoCold(FALSE) { hEvent = CreateEvent(NULL, FALSE, FALSE, NULL); TPRINT(VERBOSITY_DEBUG, _T("FuzzingProvider constructor\n")); return; }
SyncFuzzer::~SyncFuzzer() { TPRINT(VERBOSITY_DEBUG, _T("AsyncFuzzer destructor\n")); return; }
BOOL AsyncFuzzer::DequeueIoPacket(IoRequest **request) { BOOL gotRequest=FALSE, ioSuccess=FALSE; DWORD threadID, nbOfBytes, error; ULONG_PTR specialPacket; LPOVERLAPPED pOvrlp; // Get current TID threadID = GetCurrentThreadId(); // Dequeue I/O packet // If request was successful if(GetQueuedCompletionStatus(hIocp, &nbOfBytes, &specialPacket, &pOvrlp, INFINITE)) { // Handle special control overlapped types if(specialPacket) { switch((DWORD)pOvrlp) { case SPECIAL_OVERLAPPED_START: TPRINT(VERBOSITY_INFO, _T("TID[%.5u]: Control passed to worker threads\n"), threadID); break; case SPECIAL_OVERLAPPED_DONE: TPRINT(VERBOSITY_INFO, _T("TID[%.5u]: Received status complete notice - exiting\n"), threadID); break; default: TPRINT(VERBOSITY_ERROR, _T("TID[%.5u]: Received unexpected special OVERLAPPED\n"), threadID); break; } } else { if(pOvrlp){ // Capture the request that just completed *request = CONTAINING_RECORD(pOvrlp, IoRequest, overlp); gotRequest = TRUE; ioSuccess = TRUE; } } } else { // This should NEVER happen if(!pOvrlp) { TPRINT(VERBOSITY_ERROR, _T("TID[%.5u]: Timeout/internal error waiting for I/O completion\n"), threadID); } else { // Capture the request that just completed with error *request = CONTAINING_RECORD(pOvrlp, IoRequest, overlp); gotRequest = TRUE; ioSuccess = FALSE; } } // Got request, do accounting if(gotRequest) { // Accounting for completed requests InterlockedIncrement(&Fuzzer::tracker.stats.CompletedRequests); InterlockedIncrement(&Fuzzer::tracker.stats.ASyncRequests); if(!ioSuccess) { error = GetLastError(); if(error == ERROR_OPERATION_ABORTED) { TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Async request %#.8x (iocode %#.8x) canceled successfully\n"), threadID, *request, (*request)->GetIoCode()); InterlockedIncrement(&Fuzzer::tracker.stats.CanceledRequests); } else { InterlockedIncrement(&Fuzzer::tracker.stats.FailedRequests); TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Async request %#.8x (iocode %#.8x) completed with error %#.8x\n"), threadID, *request, (*request)->GetIoCode(), GetLastError()); } } else { InterlockedIncrement(&Fuzzer::tracker.stats.SuccessfulRequests); TPRINT(VERBOSITY_ALL, _T("TID[%.5u]: Async request %#.8x (iocode %#.8x) completed successfully\n"), threadID, *request, (*request)->GetIoCode()); } InterlockedDecrement(&Fuzzer::tracker.stats.PendingRequests); } return gotRequest; }