bool rig_list_remove(RIG_LIST l, void *item) { NULLCHECK_EXIT(l); NULLCHECK_EXIT(item); size_t key = rig_hash(item, l->typeinfo.size, RIG_HASH_MURMUR2); MX_LOCK(l); // Find first occourrence of item in list, or the right next key (if not present) Node prev, curr; list_traverse(l, l->head, key, item, &prev, &curr); if (curr != NULL && curr->key == key) { // Item present, remove it prev->next = curr->next; l->count--; MX_UNLOCK(l); // Free memory free(curr); return (true); } MX_UNLOCK(l); errno = ENOENT; return (false); }
bool rig_list_get(RIG_LIST l, void *item) { NULLCHECK_EXIT(l); NULLCHECK_EXIT(item); MX_LOCK(l); Node prev = l->head, curr = prev->next; if (curr == NULL) { // Empty list MX_UNLOCK(l); errno = ENOENT; return (false); } // Remove first element prev->next = curr->next; l->count--; MX_UNLOCK(l); str_ops_copy(item, curr->data, l->typeinfo.size); free(curr); return (true); }
bool rig_list_insert(RIG_LIST l, void *item) { NULLCHECK_EXIT(l); NULLCHECK_EXIT(item); // Allocate memory for the new element Node node = malloc(sizeof(*node) + l->typeinfo.size); if (node == NULL) { errno = ENOMEM; return (false); } // Set the content of the new element size_t key = rig_hash(item, l->typeinfo.size, RIG_HASH_MURMUR2); node->key = key; str_ops_copy(node->data, item, l->typeinfo.size); MX_LOCK(l); if (l->count == l->capacity) { // List full MX_UNLOCK(l); free(node); errno = EXFULL; return (false); } // Find first occourrence of item in list, or the right next key (if not present) Node prev, curr; list_traverse(l, l->head, key, item, &prev, &curr); if (!TEST_BITFIELD(l->flags, RIG_LIST_ALLOWDUPS) && curr != NULL && curr->key == key) { // No duplicates allowed and item already present, return MX_UNLOCK(l); free(node); errno = EEXIST; return (false); } // Link the new element in node->next = curr; prev->next = node; l->count++; MX_UNLOCK(l); return (true); }
void rig_list_destroy(RIG_LIST *l) { NULLCHECK_EXIT(l); NULLCHECK_EXIT(*l); if (rig_atomic_uint_dec_and_test(&(*l)->refcount)) { MX_LOCK(*l); // Traverse the list and remove all nodes (sentinel included) Node curr = (*l)->head, succ = NULL; while (curr != NULL) { succ = curr->next; free(curr); curr = succ; } MX_UNLOCK(*l); // Destroy the lock and the list if (TEST_BITFIELD((*l)->flags, RIG_LIST_THREADSAFE)) { pthread_mutex_destroy(&(*l)->mxlock); } free(*l); } *l = NULL; }
size_t rig_list_count(RIG_LIST l) { NULLCHECK_EXIT(l); MX_LOCK(l); size_t ret = l->count; MX_UNLOCK(l); return (ret); }
bool rig_list_full(RIG_LIST l) { NULLCHECK_EXIT(l); MX_LOCK(l); size_t ret = l->count; MX_UNLOCK(l); return (ret == l->capacity); }
bool rig_list_empty(RIG_LIST l) { NULLCHECK_EXIT(l); MX_LOCK(l); size_t ret = l->count; MX_UNLOCK(l); return (ret == 0); }
bool rig_list_look(RIG_LIST l, void *item) { NULLCHECK_EXIT(l); NULLCHECK_EXIT(item); MX_LOCK(l); Node curr = l->head->next; if (curr == NULL) { // Empty list MX_UNLOCK(l); errno = ENOENT; return (false); } // Get first element content str_ops_copy(item, curr->data, l->typeinfo.size); MX_UNLOCK(l); return (true); }
void arch_bfdDisasm(pid_t pid, uint8_t * mem, size_t size, char *instr) { MX_LOCK(&arch_bfd_mutex); DEFER(MX_UNLOCK(&arch_bfd_mutex)); bfd_init(); char fname[PATH_MAX]; snprintf(fname, sizeof(fname), "/proc/%d/exe", pid); bfd *bfdh = bfd_openr(fname, NULL); if (bfdh == NULL) { LOG_W("bfd_openr('/proc/%d/exe') failed", pid); return; } DEFER(bfd_close_all_done(bfdh)); if (!bfd_check_format(bfdh, bfd_object)) { LOG_W("bfd_check_format() failed"); return; } disassembler_ftype disassemble = disassembler(bfdh); if (disassemble == NULL) { LOG_W("disassembler() failed"); return; } struct disassemble_info info; init_disassemble_info(&info, instr, arch_bfdFPrintF); info.arch = bfd_get_arch(bfdh); info.mach = bfd_get_mach(bfdh); info.buffer = mem; info.buffer_length = size; info.section = NULL; info.endian = bfd_little_endian(bfdh) ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG; disassemble_init_for_target(&info); strcpy(instr, ""); if (disassemble(0, &info) <= 0) { snprintf(instr, _HF_INSTR_SZ, "[DIS-ASM_FAILURE]"); } }
void arch_bfdResolveSyms(pid_t pid, funcs_t * funcs, size_t num) { /* Guess what? libbfd is not multi-threading safe */ MX_LOCK(&arch_bfd_mutex); DEFER(MX_UNLOCK(&arch_bfd_mutex)); bfd_init(); __block bfd_t bfdParams = { .bfdh = NULL, .section = NULL, .syms = NULL, }; if (arch_bfdInit(pid, &bfdParams) == false) { return; } DEFER(arch_bfdDestroy(&bfdParams)); const char *func; const char *file; unsigned int line; for (unsigned int i = 0; i < num; i++) { snprintf(funcs[i].func, sizeof(funcs->func), "[UNKNOWN]"); if (funcs[i].pc == NULL) { continue; } long offset = (long)funcs[i].pc - bfdParams.section->vma; if ((offset < 0 || (unsigned long)offset > bfdParams.section->size)) { continue; } if (bfd_find_nearest_line (bfdParams.bfdh, bfdParams.section, bfdParams.syms, offset, &file, &func, &line)) { snprintf(funcs[i].func, sizeof(funcs->func), "%s", func); funcs[i].line = line; } } }
bool rig_list_reset(RIG_LIST l) { NULLCHECK_EXIT(l); MX_LOCK(l); // Save head node for later use Node curr = l->head->next, succ = NULL; // Reset values to empty l->head->next = NULL; l->count = 0; MX_UNLOCK(l); // Traverse the now detached list and free all nodes while (curr != NULL) { succ = curr->next; free(curr); curr = succ; } return (true); }
static bool arch_sanCovParseRaw(honggfuzz_t * hfuzz, fuzzer_t * fuzzer) { int dataFd = -1; uint8_t *dataBuf = NULL; off_t dataFileSz = 0, pos = 0; bool is32bit = true, ret = false, isSeedFirstRun = false; char covFile[PATH_MAX] = { 0 }; /* Fuzzer local runtime data structs - need free() before exit */ uint64_t *startMapsIndex = NULL; memMap_t *mapsBuf = NULL; /* Local counters */ uint64_t nBBs = 0; /* Total BB hits found in raw file */ uint64_t nZeroBBs = 0; /* Number of non-hit instrumented BBs */ uint64_t mapsNum = 0; /* Total number of entries in map file */ uint64_t noCovMapsNum = 0; /* Loaded DSOs not compiled with coverage */ /* File line-by-line read help buffers */ char *pLine = NULL; size_t lineSz = 0; /* Coverage data analysis starts by parsing map file listing */ snprintf(covFile, sizeof(covFile), "%s/%s/%d.sancov.map", hfuzz->workDir, _HF_SANCOV_DIR, fuzzer->pid); if (!files_exists(covFile)) { LOG_D("sancov map file not found"); return false; } FILE *fCovMap = fopen(covFile, "rb"); if (fCovMap == NULL) { PLOG_E("Couldn't open '%s' - R/O mode", covFile); goto bail; } /* First line contains PC length (32/64-bit) */ if (getline(&pLine, &lineSz, fCovMap) == -1) { LOG_E("Invalid map file '%s'", covFile); fclose(fCovMap); goto bail; } int pcLen = atoi(pLine); if (pcLen == 32) { is32bit = true; } else if (pcLen == 64) { is32bit = false; } else { LOG_E("Invalid PC length (%d) in map file '%s'", pcLen, covFile); } /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { /* If runtime data destroy flag, new seed has been picked so destroy old & create new Trie */ if (hfuzz->clearCovMetadata == true) { /* Since this path is invoked on first run too, destroy old Trie only if exists */ if (hfuzz->covMetadata != NULL) { arch_trieDestroy(hfuzz->covMetadata); } arch_trieCreate(&hfuzz->covMetadata); hfuzz->clearCovMetadata = false; isSeedFirstRun = true; } } MX_UNLOCK(&hfuzz->sanCov_mutex); /* See if #maps is available from previous run to avoid realloc inside loop */ uint64_t prevMapsNum = __sync_fetch_and_add(&hfuzz->sanCovCnts.dsoCnt, 0UL); if (prevMapsNum > 0) { if ((mapsBuf = malloc(prevMapsNum * sizeof(memMap_t))) == NULL) { PLOG_E("malloc failed (sz=%" PRIu64 ")", prevMapsNum * sizeof(memMap_t)); /* This will be picked-up later from realloc branch */ prevMapsNum = 0; } } /* Iterate map entries */ for (;;) { if (getline(&pLine, &lineSz, fCovMap) == -1) { break; } /* Trim trailing whitespaces, not sure if needed copied from upstream sancov.py */ char *lineEnd = pLine + strlen(pLine) - 1; while (lineEnd > pLine && isspace(*lineEnd)) { lineEnd--; } *(lineEnd + 1) = 0; /* * Each line has following format: * Start End Base bin/DSO name * b5843000 b584e6ac b5843000 liblog.so */ memMap_t mapData = {.start = 0 }; char *savePtr = NULL; mapData.start = strtoull(strtok_r(pLine, " ", &savePtr), NULL, 16); mapData.end = strtoull(strtok_r(NULL, " ", &savePtr), NULL, 16); mapData.base = strtoull(strtok_r(NULL, " ", &savePtr), NULL, 16); char *mapName = strtok_r(NULL, " ", &savePtr); memcpy(mapData.mapName, mapName, strlen(mapName)); /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { /* Add entry to Trie with zero data if not already */ if (!arch_trieSearch(hfuzz->covMetadata->children, mapData.mapName)) { arch_trieAdd(&hfuzz->covMetadata, mapData.mapName); } } MX_UNLOCK(&hfuzz->sanCov_mutex); /* If not DSO number history (first run) or new DSO loaded, realloc local maps metadata buf */ if (prevMapsNum == 0 || prevMapsNum < mapsNum) { if ((mapsBuf = realloc(mapsBuf, (size_t) (mapsNum + 1) * sizeof(memMap_t))) == NULL) { PLOG_E("realloc failed (sz=%" PRIu64 ")", (mapsNum + 1) * sizeof(memMap_t)); goto bail; } } /* Add entry to local maps metadata array */ memcpy(&mapsBuf[mapsNum], &mapData, sizeof(memMap_t)); /* Increase loaded maps counter (includes non-instrumented DSOs too) */ mapsNum++; } /* Delete .sancov.map file */ fclose(fCovMap); unlink(covFile); /* Create a quick index array with maps start addresses */ startMapsIndex = malloc(mapsNum * sizeof(uint64_t)); if (startMapsIndex == NULL) { PLOG_E("malloc failed (sz=%" PRIu64 ")", mapsNum * sizeof(uint64_t)); goto bail; } /* Sort quick maps index */ qsort(mapsBuf, mapsNum, sizeof(memMap_t), arch_qsortCmp); for (size_t i = 0; i < mapsNum; i++) { startMapsIndex[i] = mapsBuf[i].start; } /* mmap() .sancov.raw file */ snprintf(covFile, sizeof(covFile), "%s/%s/%d.sancov.raw", hfuzz->workDir, _HF_SANCOV_DIR, fuzzer->pid); dataBuf = files_mapFile(covFile, &dataFileSz, &dataFd, false); if (dataBuf == NULL) { LOG_E("Couldn't open and map '%s' in R/O mode", covFile); goto bail; } /* * Avoid cost of size checks inside raw data read loop by defining the read function * & pivot size based on PC length. */ uint64_t(*pReadRawBBAddrFunc) (const uint8_t *) = NULL; uint8_t pivot = 0; if (is32bit) { pReadRawBBAddrFunc = &util_getUINT32; pivot = 4; } else { pReadRawBBAddrFunc = &util_getUINT64; pivot = 8; } /* * Take advantage of data locality (next processed addr is very likely to belong * to same map) to avoid Trie node search for each read entry. */ node_t *curMap = NULL; uint64_t prevIndex = 0; /* Iterate over data buffer containing list of hit BB addresses */ while (pos < dataFileSz) { uint64_t bbAddr = pReadRawBBAddrFunc(dataBuf + pos); pos += pivot; /* Don't bother for zero BB addr (inserted checks without hit) */ if (bbAddr == 0x0) { nZeroBBs++; continue; } else { /* Find best hit based on start addr & verify range for errors */ uint64_t bestFit = arch_interpSearch(startMapsIndex, mapsNum, bbAddr); if (bbAddr >= mapsBuf[bestFit].start && bbAddr < mapsBuf[bestFit].end) { /* Increase exe/DSO total BB counter */ mapsBuf[bestFit].bbCnt++; /* Update current Trie node if map changed */ if (curMap == NULL || (prevIndex != bestFit)) { prevIndex = bestFit; /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { curMap = arch_trieSearch(hfuzz->covMetadata->children, mapsBuf[bestFit].mapName); if (curMap == NULL) { LOG_E("Corrupted Trie - '%s' not found", mapsBuf[bestFit].mapName); MX_UNLOCK(&hfuzz->sanCov_mutex); continue; } /* Maintain bitmaps only for exec/DSOs with coverage enabled - allocate on first use */ if (curMap->data.pBM == NULL) { LOG_D("Allocating bitmap for map '%s'", mapsBuf[bestFit].mapName); curMap->data.pBM = arch_newBitmap(_HF_BITMAP_SIZE); /* * If bitmap allocation failed, unset cached Trie node ptr * to execute this selection branch again. */ if (curMap->data.pBM == NULL) { curMap = NULL; MX_UNLOCK(&hfuzz->sanCov_mutex); continue; } } } MX_UNLOCK(&hfuzz->sanCov_mutex); } /* If new relative BB addr update DSO's bitmap */ uint32_t relAddr = (uint32_t) (bbAddr - mapsBuf[bestFit].base); if (!arch_queryBitmap(curMap->data.pBM, relAddr)) { /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { arch_setBitmap(curMap->data.pBM, relAddr); } MX_UNLOCK(&hfuzz->sanCov_mutex); /* Also increase new BBs counter at worker's thread runtime data */ mapsBuf[bestFit].newBBCnt++; } } else { /* * Normally this should never get executed. If hit, sanitizer * coverage data collection come across some kind of bug. */ LOG_E("Invalid BB addr (%" PRIx64 ") at offset %ld", bbAddr, pos); } } nBBs++; } /* Finally iterate over all instrumented maps to sum-up the number of newly met BB addresses */ for (uint64_t i = 0; i < mapsNum; i++) { if (mapsBuf[i].bbCnt > 0 && !isSeedFirstRun) { fuzzer->sanCovCnts.newBBCnt += mapsBuf[i].newBBCnt; } else { noCovMapsNum++; } } /* Successful parsing - update fuzzer worker's counters */ fuzzer->sanCovCnts.hitBBCnt = nBBs; fuzzer->sanCovCnts.totalBBCnt = nBBs + nZeroBBs; fuzzer->sanCovCnts.dsoCnt = mapsNum; fuzzer->sanCovCnts.iDsoCnt = mapsNum - noCovMapsNum; /* Instrumented DSOs */ ret = true; bail: unlink(covFile); if (dataBuf) { munmap(dataBuf, dataFileSz); } if (dataFd != -1) { close(dataFd); } if (mapsBuf) { free(mapsBuf); } if (startMapsIndex) { free(startMapsIndex); } if (pLine) { free(pLine); } return ret; } static bool arch_sanCovParse(honggfuzz_t * hfuzz, fuzzer_t * fuzzer) { int dataFd = -1; uint8_t *dataBuf = NULL; off_t dataFileSz = 0, pos = 0; bool is32bit = true; char covFile[PATH_MAX] = { 0 }; DIR *pSanCovDir = NULL; bool ret = false; snprintf(covFile, sizeof(covFile), "%s/%s/%s.%d.sancov", hfuzz->workDir, _HF_SANCOV_DIR, files_basename(hfuzz->cmdline[0]), fuzzer->pid); if (!files_exists(covFile)) { LOG_D("Target sancov file not found"); return false; } /* Local cache file suffix to use for file search of worker pid data */ char pidFSuffix[13] = { 0 }; snprintf(pidFSuffix, sizeof(pidFSuffix), "%d.sancov", fuzzer->pid); /* Total BBs counter summarizes all DSOs */ uint64_t nBBs = 0; /* Iterate sancov dir for files generated against fuzzer pid */ snprintf(covFile, sizeof(covFile), "%s/%s", hfuzz->workDir, _HF_SANCOV_DIR); pSanCovDir = opendir(covFile); struct dirent *pDir = NULL; while ((pDir = readdir(pSanCovDir)) != NULL) { /* Parse files with worker's PID */ if (strstr(pDir->d_name, pidFSuffix)) { snprintf(covFile, sizeof(covFile), "%s/%s/%s", hfuzz->workDir, _HF_SANCOV_DIR, pDir->d_name); dataBuf = files_mapFile(covFile, &dataFileSz, &dataFd, false); if (dataBuf == NULL) { LOG_E("Couldn't open and map '%s' in R/O mode", covFile); goto bail; } if (dataFileSz < 8) { LOG_E("Coverage data file too short"); goto bail; } /* Check magic values & derive PC length */ uint64_t magic = util_getUINT64(dataBuf); if (magic == kMagic32) { is32bit = true; } else if (magic == kMagic64) { is32bit = false; } else { LOG_E("Invalid coverage data file"); goto bail; } pos += 8; /* * Avoid cost of size checks inside raw data read loop by defining the read function * & pivot size based on PC length. */ uint64_t(*pReadRawBBAddrFunc) (const uint8_t *) = NULL; uint8_t pivot = 0; if (is32bit) { pReadRawBBAddrFunc = &util_getUINT32; pivot = 4; } else { pReadRawBBAddrFunc = &util_getUINT64; pivot = 8; } while (pos < dataFileSz) { uint32_t bbAddr = pReadRawBBAddrFunc(dataBuf + pos); pos += pivot; if (bbAddr == 0x0) { continue; } nBBs++; } } } /* Successful parsing - update fuzzer worker counters */ fuzzer->sanCovCnts.hitBBCnt = nBBs; ret = true; bail: unlink(covFile); if (dataBuf) { munmap(dataBuf, dataFileSz); } if (dataFd != -1) { close(dataFd); } if (pSanCovDir) { closedir(pSanCovDir); } return ret; }
static bool fuzz_prepareFileDynamically(honggfuzz_t * hfuzz, fuzzer_t * fuzzer, int rnd_index) { MX_LOCK(&hfuzz->dynamicFile_mutex); /* If max dynamicFile iterations counter, pick new seed file when working with input file corpus */ if (hfuzz->inputFile && __sync_fetch_and_add(&hfuzz->dynFileIterExpire, 0UL) >= _HF_MAX_DYNFILE_ITER) { size_t fileSz = files_readFileToBufMax(hfuzz->files[rnd_index], hfuzz->dynamicFileBest, hfuzz->maxFileSz); if (fileSz == 0) { MX_UNLOCK(&hfuzz->dynamicFile_mutex); LOG_E("Couldn't read '%s'", hfuzz->files[rnd_index]); return false; } hfuzz->dynamicFileBestSz = fileSz; /* Reset counter since new seed pick */ __sync_fetch_and_and(&hfuzz->dynFileIterExpire, 0UL); fuzz_resetFeedbackCnts(hfuzz); /* * In order to have accurate comparison base for coverage, first iteration * of a new seed is executed without mangling. Also workersBlock_mutex mutex * is maintain until execution is finished to ensure that other threads will * work against the same coverage data vs. original seed. */ hfuzz->isDynFileLocked = true; } else if (hfuzz->inputFile == NULL && (fuzz_isPerfCntsSet(hfuzz) == false)) { /* * When working with an empty input file corpus (allowed if perf feedback enabled for Linux archs), * first iteration is executed without mangling. First iteration need to be executed by one thread * blocking other workers from continuing until finished. */ hfuzz->isDynFileLocked = true; } if (hfuzz->dynamicFileBestSz > hfuzz->maxFileSz) { LOG_F("Current BEST file Sz > maxFileSz (%zu > %zu)", hfuzz->dynamicFileBestSz, hfuzz->maxFileSz); } fuzzer->dynamicFileSz = hfuzz->dynamicFileBestSz; memcpy(fuzzer->dynamicFile, hfuzz->dynamicFileBest, hfuzz->dynamicFileBestSz); MX_UNLOCK(&hfuzz->dynamicFile_mutex); /* * true isDynFileLocked indicates first run for a new seed, so skip mangling * without unlocking threads block mutex. */ MX_LOCK(&hfuzz->workersBlock_mutex); if (hfuzz->isDynFileLocked) { goto skipMangling; } MX_UNLOCK(&hfuzz->workersBlock_mutex); /* * if flip rate is 0.0, early abort file mangling. This will leave perf counters * with values equal to dry runs against input corpus. */ if (hfuzz->flipRate == 0.0L) { goto skipMangling; } mangle_Resize(hfuzz, fuzzer->dynamicFile, &fuzzer->dynamicFileSz); mangle_mangleContent(hfuzz, fuzzer->dynamicFile, fuzzer->dynamicFileSz); skipMangling: if (files_writeBufToFile (fuzzer->fileName, fuzzer->dynamicFile, fuzzer->dynamicFileSz, O_WRONLY | O_CREAT | O_EXCL | O_TRUNC) == false) { LOG_E("Couldn't write buffer to file '%s'", fuzzer->fileName); return false; } return true; }