static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *stated_files, struct hashtable *hashed_files) { for (uint32_t i = 0; i < obj->n_file_info_indexes; i++) { struct file_info *fi = &mf->file_infos[obj->file_info_indexes[i]]; char *path = mf->files[fi->index]; struct file_stats *st = hashtable_search(stated_files, path); if (!st) { struct stat file_stat; if (x_stat(path, &file_stat) != 0) { return 0; } st = x_malloc(sizeof(*st)); st->size = file_stat.st_size; st->mtime = file_stat.st_mtime; st->ctime = file_stat.st_ctime; hashtable_insert(stated_files, x_strdup(path), st); } if (fi->size != st->size) { return 0; } if (conf->sloppiness & SLOPPY_FILE_STAT_MATCHES) { if (fi->mtime == st->mtime && fi->ctime == st->ctime) { cc_log("mtime/ctime hit for %s", path); continue; } else { cc_log("mtime/ctime miss for %s", path); } } struct file_hash *actual = hashtable_search(hashed_files, path); if (!actual) { struct mdfour hash; hash_start(&hash); int result = hash_source_code_file(conf, &hash, path); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", path); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { return 0; } actual = x_malloc(sizeof(*actual)); hash_result_as_bytes(&hash, actual->hash); actual->size = hash.totalN; hashtable_insert(hashed_files, x_strdup(path), actual); } if (memcmp(fi->hash, actual->hash, mf->hash_size) != 0 || fi->size != actual->size) { return 0; } } return 1; }
/* * Try to get the object hash from a manifest file. Caller frees. Returns NULL * on failure. */ struct file_hash * manifest_get(struct conf *conf, const char *manifest_path) { int fd; gzFile f = NULL; struct manifest *mf = NULL; struct hashtable *hashed_files = NULL; /* path --> struct file_hash */ struct hashtable *stated_files = NULL; /* path --> struct file_stats */ uint32_t i; struct file_hash *fh = NULL; fd = open(manifest_path, O_RDONLY | O_BINARY); if (fd == -1) { /* Cache miss. */ cc_log("No such manifest file"); goto out; } f = gzdopen(fd, "rb"); if (!f) { close(fd); cc_log("Failed to gzdopen manifest file"); goto out; } mf = read_manifest(f); if (!mf) { cc_log("Error reading manifest file"); goto out; } hashed_files = create_hashtable(1000, hash_from_string, strings_equal); stated_files = create_hashtable(1000, hash_from_string, strings_equal); /* Check newest object first since it's a bit more likely to match. */ for (i = mf->n_objects; i > 0; i--) { if (verify_object(conf, mf, &mf->objects[i - 1], stated_files, hashed_files)) { fh = x_malloc(sizeof(*fh)); *fh = mf->objects[i - 1].hash; goto out; } } out: if (hashed_files) { hashtable_destroy(hashed_files, 1); } if (stated_files) { hashtable_destroy(stated_files, 1); } if (f) { gzclose(f); } if (mf) { free_manifest(mf); } return fh; }
/* the main ccache driver function */ static void ccache(int argc, char *argv[]) { /* find the real compiler */ find_compiler(argc, argv); /* use the real compiler if HOME is not set */ if (!cache_dir) { cc_log("Unable to determine home directory\n"); cc_log("ccache is disabled\n"); failed(); } /* we might be disabled */ if (getenv("CCACHE_DISABLE")) { cc_log("ccache is disabled\n"); failed(); } if (getenv("CCACHE_STRIPC")) { strip_c_option = 1; } if (getenv("CCACHE_UNIFY")) { enable_unify = 1; } detect_swig(); /* process argument list, returning a new set of arguments for pre-processing */ process_args(orig_args->argc, orig_args->argv); /* run with -E to find the hash */ find_hash(stripped_args); /* if we can return from cache at this point then do */ from_cache(1); if (getenv("CCACHE_READONLY")) { cc_log("read-only set - doing real compile\n"); failed(); } /* run real compiler, sending output to cache */ to_cache(stripped_args); /* return from cache */ from_cache(0); /* oh oh! */ cc_log("secondary from_cache failed!\n"); stats_update(STATS_ERROR); failed(); }
// Clean up one cache subdirectory. void clean_up_dir(struct conf *conf, const char *dir, double limit_multiple) { cc_log("Cleaning up cache directory %s", dir); // When "max files" or "max cache size" is reached, one of the 16 cache // subdirectories is cleaned up. When doing so, files are deleted (in LRU // order) until the levels are below limit_multiple. cache_size_threshold = (uint64_t)round(conf->max_size * limit_multiple / 16); files_in_cache_threshold = (size_t)round(conf->max_files * limit_multiple / 16); num_files = 0; cache_size = 0; files_in_cache = 0; // Build a list of files. traverse(dir, traverse_fn); // Clean the cache. cc_log("Before cleanup: %.0f KiB, %.0f files", (double)cache_size / 1024, (double)files_in_cache); bool cleaned = sort_and_clean(); cc_log("After cleanup: %.0f KiB, %.0f files", (double)cache_size / 1024, (double)files_in_cache); if (cleaned) { cc_log("Cleaned up cache directory %s", dir); stats_add_cleanup(dir, 1); } stats_set_sizes(dir, files_in_cache, cache_size); // Free it up. for (unsigned i = 0; i < num_files; i++) { free(files[i]->fname); free(files[i]); files[i] = NULL; } if (files) { free(files); } allocated = 0; files = NULL; num_files = 0; cache_size = 0; files_in_cache = 0; }
/* execute a compiler backend, capturing all output to the given paths the full path to the compiler to run is in argv[0] */ int execute(char **argv, const char *path_stdout, const char *path_stderr) { pid_t pid; int status; cc_log_argv("Executing ", argv); pid = fork(); if (pid == -1) fatal("Failed to fork: %s", strerror(errno)); if (pid == 0) { int fd; tmp_unlink(path_stdout); fd = open(path_stdout, O_WRONLY|O_CREAT|O_TRUNC|O_EXCL|O_BINARY, 0666); if (fd == -1) { cc_log("Error creating %s: %s", path_stdout, strerror(errno)); exit(FAILED_TO_CREATE_STDOUT); } dup2(fd, 1); close(fd); tmp_unlink(path_stderr); fd = open(path_stderr, O_WRONLY|O_CREAT|O_TRUNC|O_EXCL|O_BINARY, 0666); if (fd == -1) { cc_log("Error creating %s: %s", path_stderr, strerror(errno)); exit(FAILED_TO_CREATE_STDERR); } dup2(fd, 2); close(fd); exit(execv(argv[0], argv)); } if (waitpid(pid, &status, 0) != pid) { fatal("waitpid failed: %s", strerror(errno)); } if (WEXITSTATUS(status) == 0 && WIFSIGNALED(status)) { return -1; } if (status == FAILED_TO_CREATE_STDOUT) { fatal("Could not create %s (permission denied?)", path_stdout); } else if (status == FAILED_TO_CREATE_STDERR) { fatal("Could not create %s (permission denied?)", path_stderr); } return WEXITSTATUS(status); }
// Wipe one cache subdirectory. void wipe_dir(const char *dir) { cc_log("Clearing out cache directory %s", dir); files_in_cache = 0; traverse(dir, wipe_fn); if (files_in_cache > 0) { cc_log("Cleared out cache directory %s", dir); stats_add_cleanup(dir, 1); } files_in_cache = 0; }
/* write out a stats file */ void stats_write(const char *path, struct counters *counters) { size_t i; char *tmp_file; FILE *f; tmp_file = format("%s.tmp.%s", path, tmp_string()); f = fopen(tmp_file, "wb"); if (!f && errno == ENOENT) { if (create_parent_dirs(path) == 0) { f = fopen(tmp_file, "wb"); } } if (!f) { cc_log("Failed to open %s", tmp_file); goto end; } for (i = 0; i < counters->size; i++) { if (fprintf(f, "%u\n", counters->data[i]) < 0) { fatal("Failed to write to %s", tmp_file); } } fclose(f); x_rename(tmp_file, path); end: free(tmp_file); }
/* * Hash a string. Returns a bitmask of HASH_SOURCE_CODE_* results. */ int hash_source_code_string( struct conf *conf, struct mdfour *hash, const char *str, size_t len, const char *path) { int result = HASH_SOURCE_CODE_OK; /* * Check for __DATE__ and __TIME__ if the sloppiness configuration tells us * we should. */ if (!(conf->sloppiness & SLOPPY_TIME_MACROS)) { result |= check_for_temporal_macros(str, len); } /* * Hash the source string. */ hash_buffer(hash, str, len); if (result & HASH_SOURCE_CODE_FOUND_DATE) { /* * Make sure that the hash sum changes if the (potential) expansion of * __DATE__ changes. */ time_t t = time(NULL); struct tm *now = localtime(&t); cc_log("Found __DATE__ in %s", path); hash_delimiter(hash, "date"); hash_buffer(hash, &now->tm_year, sizeof(now->tm_year)); hash_buffer(hash, &now->tm_mon, sizeof(now->tm_mon)); hash_buffer(hash, &now->tm_mday, sizeof(now->tm_mday)); } if (result & HASH_SOURCE_CODE_FOUND_TIME) { /* * We don't know for sure that the program actually uses the __TIME__ * macro, but we have to assume it anyway and hash the time stamp. However, * that's not very useful since the chance that we get a cache hit later * the same second should be quite slim... So, just signal back to the * caller that __TIME__ has been found so that the direct mode can be * disabled. */ cc_log("Found __TIME__ in %s", path); } return result; }
/* * Release the lockfile for the given path. Assumes that we are the legitimate * owner. */ void lockfile_release(const char *path) { char *lockfile = format("%s.lock", path); cc_log("Releasing lock %s", lockfile); unlink(lockfile); free(lockfile); }
static void delete_file(const char *path, size_t size) { if (unlink(path) == 0) { cache_size -= size; files_in_cache--; } else if (errno != ENOENT) { cc_log("Failed to unlink %s (%s)", path, strerror(errno)); } }
/* Make a copy of stderr that will not be cached, so things like distcc can send networking errors to it. */ static void setup_uncached_err(void) { char *buf; int uncached_fd; uncached_fd = dup(2); if (uncached_fd == -1) { cc_log("dup(2) failed\n"); failed(); } /* leak a pointer to the environment */ x_asprintf(&buf, "UNCACHED_ERR_FD=%d", uncached_fd); if (putenv(buf) == -1) { cc_log("putenv failed\n"); failed(); } }
// Copy the current log memory buffer to an output file. void cc_dump_debug_log_buffer(const char *path) { FILE *file = fopen(path, "w"); if (file) { (void) fwrite(debug_log_buffer, 1, debug_log_size, file); fclose(file); } else { cc_log("Failed to open %s: %s", path, strerror(errno)); } }
bool hash_file(struct hash *hash, const char *fname) { int fd = open(fname, O_RDONLY|O_BINARY); if (fd == -1) { cc_log("Failed to open %s: %s", fname, strerror(errno)); return false; } bool ret = hash_fd(hash, fd); close(fd); return ret; }
static void delete_sibling_file(const char *base, const char *extension) { struct stat st; char *path; x_asprintf(&path, "%s%s", base, extension); if (lstat(path, &st) == 0) { delete_file(path, file_size(&st) / 1024); } else if (errno != ENOENT) { cc_log("Failed to stat %s (%s)", path, strerror(errno)); } free(path); }
static void delete_sibling_file(const char *base, const char *extension) { struct stat st; char *path; path = format("%s%s", base, extension); if (lstat(path, &st) == 0) { delete_file(path, file_size(&st)); } else if (errno != ENOENT) { cc_log("Failed to stat %s: %s", path, strerror(errno)); } free(path); }
// Something went badly wrong! void fatal(const char *format, ...) { va_list ap; va_start(ap, format); char msg[8192]; vsnprintf(msg, sizeof(msg), format, ap); va_end(ap); cc_log("FATAL: %s", msg); fprintf(stderr, "ccache: error: %s\n", msg); x_exit(1); }
static void delete_file(const char *path, size_t size, bool update_counters) { bool deleted = x_try_unlink(path) == 0; if (!deleted && errno != ENOENT && errno != ESTALE) { cc_log("Failed to unlink %s (%s)", path, strerror(errno)); } else if (update_counters) { // The counters are intentionally subtracted even if there was no file to // delete since the final cache size calculation will be incorrect if they // aren't. (This can happen when there are several parallel ongoing // cleanups of the same directory.) cache_size -= size; files_in_cache--; } }
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *hashed_files) { uint32_t i; struct file_info *fi; struct {int result; struct file_hash fh;} *actual; struct mdfour hash; int result; for (i = 0; i < obj->n_file_info_indexes; i++) { fi = &mf->file_infos[obj->file_info_indexes[i]]; actual = hashtable_search(hashed_files, mf->files[fi->index]); if (!actual) { actual = x_malloc(sizeof(*actual)); hash_start(&hash); result = hash_source_code_file(conf, &hash, mf->files[fi->index]); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", mf->files[fi->index]); cloud_hook_reset_includes(); free(actual); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { cloud_hook_reset_includes(); free(actual); return 0; } actual->result = result; hash_result_as_bytes(&hash, actual->fh.hash); actual->fh.size = hash.totalN; hashtable_insert(hashed_files, x_strdup(mf->files[fi->index]), actual); } if (memcmp(fi->hash, actual->fh.hash, mf->hash_size) != 0 || fi->size != actual->fh.size) { cloud_hook_reset_includes(); return 0; } /* Passing the hash here is an optimization, but it's not the right hash if a time macro was present. */ cloud_hook_include_file(mf->files[fi->index], actual->result ? NULL : &actual->fh); } return 1; }
// Find an executable by name in $PATH. Exclude any that are links to // exclude_name. char * find_executable(const char *name, const char *exclude_name) { if (is_absolute_path(name)) { return x_strdup(name); } char *path = conf->path; if (str_eq(path, "")) { path = getenv("PATH"); } if (!path) { cc_log("No PATH variable"); return NULL; } return find_executable_in_path(name, exclude_name, path); }
/* update cached file sizes and count helper function for to_cache() */ static void to_cache_stats_helper(struct stat *pstat, char *cached_filename, char *tmp_outfiles, int *files_size, int *cached_files_count) { #if ENABLE_ZLIB /* do an extra stat on the cache file for the size statistics */ if (stat(cached_filename, pstat) != 0) { cc_log("failed to stat cache files - %s\n", strerror(errno)); stats_update(STATS_ERROR); if (tmp_outfiles) { unlink(tmp_outfiles); } failed(); } #else (void)cached_filename; (void)tmp_outfiles; #endif (*files_size) += file_size(pstat); (*cached_files_count)++; }
static int write_manifest(gzFile f, const struct manifest *mf) { uint16_t i, j; WRITE_INT(4, MAGIC); WRITE_INT(1, MANIFEST_VERSION); WRITE_INT(1, 16); WRITE_INT(2, 0); WRITE_INT(4, mf->n_files); for (i = 0; i < mf->n_files; i++) { WRITE_STR(mf->files[i]); } WRITE_INT(4, mf->n_file_infos); for (i = 0; i < mf->n_file_infos; i++) { WRITE_INT(4, mf->file_infos[i].index); WRITE_BYTES(mf->hash_size, mf->file_infos[i].hash); WRITE_INT(4, mf->file_infos[i].size); WRITE_INT(8, mf->file_infos[i].mtime); WRITE_INT(8, mf->file_infos[i].ctime); } WRITE_INT(4, mf->n_objects); for (i = 0; i < mf->n_objects; i++) { WRITE_INT(4, mf->objects[i].n_file_info_indexes); for (j = 0; j < mf->objects[i].n_file_info_indexes; j++) { WRITE_INT(4, mf->objects[i].file_info_indexes[j]); } WRITE_BYTES(mf->hash_size, mf->objects[i].hash.hash); WRITE_INT(4, mf->objects[i].hash.size); } return 1; error: cc_log("Error writing to manifest file"); return 0; }
/* find the real compiler. We just search the PATH to find a executable of the same name that isn't a link to ourselves */ static void find_compiler(int argc, char **argv) { char *base; char *path; orig_args = args_init(argc, argv); base = str_basename(argv[0]); /* we might be being invoked like "ccache gcc -c foo.c" */ if (strcmp(base, MYNAME) == 0) { args_remove_first(orig_args); free(base); if (strchr(argv[1],'/') #ifdef _WIN32 || strchr(argv[1],'\\') #endif ) { /* a full path was given */ return; } base = str_basename(argv[1]); } /* support user override of the compiler */ if ((path=getenv("CCACHE_CC"))) { base = x_strdup(path); } orig_args->argv[0] = find_executable(base, MYNAME); /* can't find the compiler! */ if (!orig_args->argv[0]) { stats_update(STATS_COMPILER); cc_log("could not find compiler (%s)\n", base); perror(base); exit(1); } }
/* cleanup in one cache subdir */ void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize) { unsigned i; cc_log("Cleaning up cache directory %s", dir); cache_size_threshold = maxsize * LIMIT_MULTIPLE; files_in_cache_threshold = maxfiles * LIMIT_MULTIPLE; num_files = 0; cache_size = 0; files_in_cache = 0; /* build a list of files */ traverse(dir, traverse_fn); /* clean the cache */ sort_and_clean(); stats_set_sizes(dir, files_in_cache, cache_size); /* free it up */ for (i = 0; i < num_files; i++) { free(files[i]->fname); free(files[i]); files[i] = NULL; } if (files) { free(files); } allocated = 0; files = NULL; num_files = 0; cache_size = 0; files_in_cache = 0; }
static int verify_object(struct conf *conf, struct manifest *mf, struct object *obj, struct hashtable *hashed_files) { uint32_t i; struct file_info *fi; struct file_hash *actual; struct mdfour hash; int result; for (i = 0; i < obj->n_file_info_indexes; i++) { fi = &mf->file_infos[obj->file_info_indexes[i]]; actual = hashtable_search(hashed_files, mf->files[fi->index]); if (!actual) { actual = x_malloc(sizeof(*actual)); hash_start(&hash); result = hash_source_code_file(conf, &hash, mf->files[fi->index]); if (result & HASH_SOURCE_CODE_ERROR) { cc_log("Failed hashing %s", mf->files[fi->index]); free(actual); return 0; } if (result & HASH_SOURCE_CODE_FOUND_TIME) { free(actual); return 0; } hash_result_as_bytes(&hash, actual->hash); actual->size = hash.totalN; hashtable_insert(hashed_files, x_strdup(mf->files[fi->index]), actual); } if (memcmp(fi->hash, actual->hash, mf->hash_size) != 0 || fi->size != actual->size) { return 0; } } return 1; }
/* something went badly wrong - just execute the real compiler */ static void failed(void) { char *e; /* delete intermediate pre-processor file if needed */ if (i_tmpfile) { if (!direct_i_file) { unlink(i_tmpfile); } free(i_tmpfile); i_tmpfile = NULL; } /* delete the cpp stderr file if necessary */ if (cpp_stderr) { unlink(cpp_stderr); free(cpp_stderr); cpp_stderr = NULL; } /* strip any local args */ args_strip(orig_args, "--ccache-"); if ((e=getenv("CCACHE_PREFIX"))) { char *p = find_executable(e, MYNAME); if (!p) { perror(e); exit(1); } args_add_prefix(orig_args, p); } execv(orig_args->argv[0], orig_args->argv); cc_log("execv returned (%s)!\n", strerror(errno)); perror(orig_args->argv[0]); exit(1); }
/* hash a file that consists of preprocessor output, but remove any line number information from the hash */ int unify_hash(const char *fname) { #ifdef _WIN32 HANDLE file; HANDLE section; DWORD filesize_low; char *map; int ret = -1; file = CreateFileA(fname, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL); if (file != INVALID_HANDLE_VALUE) { filesize_low = GetFileSize(file, NULL); if (!(filesize_low == INVALID_FILE_SIZE && GetLastError() != NO_ERROR)) { section = CreateFileMappingA(file, NULL, PAGE_READONLY, 0, 0, NULL); CloseHandle(file); if (section != NULL) { map = MapViewOfFile(section, FILE_MAP_READ, 0, 0, 0); CloseHandle(section); if (map != NULL) ret = 0; } } } if (ret == -1) { cc_log("Failed to open preprocessor output %s\n", fname); stats_update(STATS_PREPROCESSOR); return -1; } /* pass it through the unifier */ unify((unsigned char *)map, filesize_low); UnmapViewOfFile(map); return 0; #else int fd; struct stat st; char *map; fd = open(fname, O_RDONLY|O_BINARY); if (fd == -1 || fstat(fd, &st) != 0) { cc_log("Failed to open preprocessor output %s\n", fname); if (fd != -1) close(fd); stats_update(STATS_PREPROCESSOR); return -1; } /* we use mmap() to make it easy to handle arbitrarily long lines in preprocessor output. I have seen lines of over 100k in length, so this is well worth it */ map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); close(fd); if (map == (char *)-1) { cc_log("Failed to mmap %s\n", fname); stats_update(STATS_PREPROCESSOR); return -1; } /* pass it through the unifier */ unify((unsigned char *)map, st.st_size); munmap(map, st.st_size); return 0; #endif }
int win32execute(char *path, char **argv, int doreturn, int fd_stdout, int fd_stderr) { PROCESS_INFORMATION pi; memset(&pi, 0x00, sizeof(pi)); STARTUPINFO si; memset(&si, 0x00, sizeof(si)); char *sh = win32getshell(path); if (sh) { path = sh; } si.cb = sizeof(STARTUPINFO); if (fd_stdout != -1) { si.hStdOutput = (HANDLE)_get_osfhandle(fd_stdout); si.hStdError = (HANDLE)_get_osfhandle(fd_stderr); si.hStdInput = GetStdHandle(STD_INPUT_HANDLE); si.dwFlags = STARTF_USESTDHANDLES; if (si.hStdOutput == INVALID_HANDLE_VALUE || si.hStdError == INVALID_HANDLE_VALUE) { return -1; } } else { // Redirect subprocess stdout, stderr into current process. si.hStdOutput = GetStdHandle(STD_OUTPUT_HANDLE); si.hStdError = GetStdHandle(STD_ERROR_HANDLE); si.hStdInput = GetStdHandle(STD_INPUT_HANDLE); si.dwFlags = STARTF_USESTDHANDLES; if (si.hStdOutput == INVALID_HANDLE_VALUE || si.hStdError == INVALID_HANDLE_VALUE) { return -1; } } char *args = win32argvtos(sh, argv); const char *ext = strrchr(path, '.'); char full_path_win_ext[MAX_PATH] = {0}; add_exe_ext_if_no_to_fullpath(full_path_win_ext, MAX_PATH, ext, path); BOOL ret = CreateProcess(full_path_win_ext, args, NULL, NULL, 1, 0, NULL, NULL, &si, &pi); if (fd_stdout != -1) { close(fd_stdout); close(fd_stderr); } free(args); if (ret == 0) { LPVOID lpMsgBuf; DWORD dw = GetLastError(); FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, dw, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL); LPVOID lpDisplayBuf = (LPVOID) LocalAlloc(LMEM_ZEROINIT, (lstrlen((LPCTSTR) lpMsgBuf) + lstrlen((LPCTSTR) __FILE__) + 200) * sizeof(TCHAR)); _snprintf((LPTSTR) lpDisplayBuf, LocalSize(lpDisplayBuf) / sizeof(TCHAR), TEXT("%s failed with error %lu: %s"), __FILE__, dw, (const char *)lpMsgBuf); cc_log("can't execute %s; OS returned error: %s", full_path_win_ext, (char *)lpDisplayBuf); LocalFree(lpMsgBuf); LocalFree(lpDisplayBuf); return -1; } WaitForSingleObject(pi.hProcess, INFINITE); DWORD exitcode; GetExitCodeProcess(pi.hProcess, &exitcode); CloseHandle(pi.hProcess); CloseHandle(pi.hThread); if (!doreturn) { x_exit(exitcode); } return exitcode; }
/* something went badly wrong! */ void fatal(const char *msg) { cc_log("FATAL: %s\n", msg); exit(1); }
/* * Write counter updates in counter_updates to disk. */ void stats_flush(void) { struct counters *counters; bool need_cleanup = false; bool should_flush = false; int i; assert(conf); if (!conf->stats) { return; } if (!counter_updates) { return; } for (i = 0; i < STATS_END; ++i) { if (counter_updates->data[i] > 0) { should_flush = true; break; } } if (!should_flush) { return; } if (!stats_file) { char *stats_dir; /* * A NULL stats_file means that we didn't get past calculate_object_hash(), * so we just choose one of stats files in the 16 subdirectories. */ stats_dir = format("%s/%x", conf->cache_dir, hash_from_int(getpid()) % 16); stats_file = format("%s/stats", stats_dir); free(stats_dir); } if (!lockfile_acquire(stats_file, lock_staleness_limit)) { return; } counters = counters_init(STATS_END); stats_read(stats_file, counters); for (i = 0; i < STATS_END; ++i) { counters->data[i] += counter_updates->data[i]; } stats_write(stats_file, counters); lockfile_release(stats_file); if (!str_eq(conf->log_file, "")) { for (i = 0; i < STATS_END; ++i) { if (counter_updates->data[stats_info[i].stat] != 0 && !(stats_info[i].flags & FLAG_NOZERO)) { cc_log("Result: %s", stats_info[i].message); } } } if (conf->max_files != 0 && counters->data[STATS_NUMFILES] > conf->max_files / 16) { need_cleanup = true; } if (conf->max_size != 0 && counters->data[STATS_TOTALSIZE] > conf->max_size / 1024 / 16) { need_cleanup = true; } if (need_cleanup) { char *p = dirname(stats_file); cleanup_dir(conf, p); free(p); } counters_free(counters); }
// Write counter updates in counter_updates to disk. void stats_flush(void) { assert(conf); if (!conf->stats) { return; } if (!counter_updates) { return; } bool should_flush = false; for (int i = 0; i < STATS_END; ++i) { if (counter_updates->data[i] > 0) { should_flush = true; break; } } if (!should_flush) { return; } if (!stats_file) { char *stats_dir; // A NULL stats_file means that we didn't get past calculate_object_hash(), // so we just choose one of stats files in the 16 subdirectories. stats_dir = format("%s/%x", conf->cache_dir, hash_from_int(getpid()) % 16); stats_file = format("%s/stats", stats_dir); free(stats_dir); } if (!lockfile_acquire(stats_file, lock_staleness_limit)) { return; } struct counters *counters = counters_init(STATS_END); stats_read(stats_file, counters); for (int i = 0; i < STATS_END; ++i) { counters->data[i] += counter_updates->data[i]; } stats_write(stats_file, counters); lockfile_release(stats_file); if (!str_eq(conf->log_file, "") || conf->debug) { for (int i = 0; i < STATS_END; ++i) { if (counter_updates->data[stats_info[i].stat] != 0 && !(stats_info[i].flags & FLAG_NOZERO)) { cc_log("Result: %s", stats_info[i].message); } } } char *subdir = dirname(stats_file); bool need_cleanup = false; if (conf->max_files != 0 && counters->data[STATS_NUMFILES] > conf->max_files / 16) { cc_log("Need to clean up %s since it holds %u files (limit: %u files)", subdir, counters->data[STATS_NUMFILES], conf->max_files / 16); need_cleanup = true; } if (conf->max_size != 0 && counters->data[STATS_TOTALSIZE] > conf->max_size / 1024 / 16) { cc_log("Need to clean up %s since it holds %u KiB (limit: %lu KiB)", subdir, counters->data[STATS_TOTALSIZE], (unsigned long)conf->max_size / 1024 / 16); need_cleanup = true; } if (need_cleanup) { clean_up_dir(conf, subdir, conf->limit_multiple); } free(subdir); counters_free(counters); }