RmBuffer *rm_buffer_pool_get(RmBufferPool *pool) { RmBuffer *buffer = NULL; g_mutex_lock(&pool->lock); { while(!buffer) { if(pool->stack) { buffer = pool->stack->data; pool->stack = g_slist_delete_link(pool->stack, pool->stack); } else if(pool->avail_buffers > 0) { buffer = rm_buffer_new(pool); } else { if(!pool->mem_warned) { rm_log_warning_line( "read buffer limit reached - waiting for " "processing to catch up"); pool->mem_warned = true; } g_cond_wait(&pool->change, &pool->lock); } } pool->avail_buffers--; if(pool->avail_buffers < pool->min_kept_buffers) { pool->min_kept_buffers = pool->avail_buffers; } } g_mutex_unlock(&pool->lock); rm_assert_gentle(buffer); return buffer; }
RmBuffer *rm_buffer_get(RmBufferPool *pool) { RmBuffer *buffer = NULL; g_mutex_lock(&pool->lock); { while(!buffer) { buffer = rm_util_slist_pop(&pool->stack, NULL); if (!buffer && pool->avail_buffers > 0) { buffer = rm_buffer_new(pool); } if (!buffer) { if(!pool->mem_warned) { rm_log_warning_line( "read buffer limit reached - waiting for " "processing to catch up"); pool->mem_warned = true; } g_cond_wait(&pool->change, &pool->lock); } } pool->avail_buffers--; } g_mutex_unlock(&pool->lock); rm_assert_gentle(buffer); return buffer; }
int rm_json_cache_read(RmTrie *file_trie, const char *json_path) { #if !HAVE_JSON_GLIB (void)file_trie; (void)json_path; rm_log_info_line(_("caching is not supported due to missing json-glib library.")); return EXIT_FAILURE; #else rm_assert_gentle(file_trie); rm_assert_gentle(json_path); int result = EXIT_FAILURE; GError *error = NULL; size_t keys_in_table = rm_trie_size(file_trie); JsonParser *parser = json_parser_new(); rm_log_info_line(_("Loading json-cache `%s'"), json_path); if(!json_parser_load_from_file(parser, json_path, &error)) { rm_log_warning_line(_("FAILED: %s\n"), error->message); g_error_free(error); goto failure; } JsonNode *root = json_parser_get_root(parser); if(JSON_NODE_TYPE(root) != JSON_NODE_ARRAY) { rm_log_warning_line(_("No valid json cache (no array in /)")); goto failure; } /* Iterate over all objects in it */ json_array_foreach_element(json_node_get_array(root), (JsonArrayForeach)rm_json_cache_parse_entry, file_trie); /* check if some entries were added */ result = (keys_in_table >= rm_trie_size(file_trie)); failure: if(parser) { g_object_unref(parser); } return result; #endif }
bool rm_session_was_aborted() { gint rc = g_atomic_int_get(&SESSION_ABORTED); static GOnce print_once = G_ONCE_INIT; switch(rc) { case 1: g_once (&print_once, rm_session_print_first_abort_warn, NULL); break; case 2: rm_log_warning_line(_("Received second Interrupt, stopping hard.")); exit(EXIT_FAILURE); break; } return rc; }
int rm_hasher_main(int argc, const char **argv) { RmHasherSession tag; /* List of paths we got passed (or NULL) */ tag.paths = NULL; /* Print hashes in the same order as files in command line args */ tag.print_in_order = TRUE; /* Print a hash with builtin identifier */ tag.print_multihash = FALSE; /* Digest type (user option, default SHA1) */ tag.digest_type = RM_DIGEST_SHA1; gint threads = 8; gint64 buffer_mbytes = 256; ////////////// Option Parsing /////////////// /* clang-format off */ const GOptionEntry entries[] = { {"digest-type" , 'd' , 0 , G_OPTION_ARG_CALLBACK , (GOptionArgFunc)rm_hasher_parse_type , _("Digest type [SHA1]") , "[TYPE]"} , {"num-threads" , 't' , 0 , G_OPTION_ARG_INT , &threads , _("Number of hashing threads [8]") , "N"} , {"multihash" , 'm' , 0 , G_OPTION_ARG_NONE , &tag.print_multihash , _("Print hash as self identifying multihash") , NULL} , {"buffer-mbytes" , 'b' , 0 , G_OPTION_ARG_INT64 , &buffer_mbytes , _("Megabytes read buffer [256 MB]") , "MB"} , {"ignore-order" , 'i' , G_OPTION_FLAG_REVERSE , G_OPTION_ARG_NONE , &tag.print_in_order , _("Print hashes in order completed, not in order entered (reduces memory usage)") , NULL} , {"" , 0 , 0 , G_OPTION_ARG_FILENAME_ARRAY , &tag.paths , _("Space-separated list of files") , "[FILE…]"} , {NULL , 0 , 0 , 0 , NULL , NULL , NULL}}; /* clang-format on */ GError *error = NULL; GOptionContext *context = g_option_context_new(_("Hash a list of files")); GOptionGroup *main_group = g_option_group_new(argv[0], _("Hash a list of files"), "", &tag, NULL); char summary[4096]; memset(summary, 0, sizeof(summary)); g_snprintf(summary, sizeof(summary), _("Multi-threaded file digest (hash) calculator.\n" "\n Available digest types:" "\n %s\n" "\n Versions with different bit numbers:" "\n %s\n" "\n Supported, but not useful:" "\n %s\n"), "spooky, city, xxhash, sha{1,256,512}, md5, murmur", "spooky{32,64,128}, city{128,256,512}, murmur{512}", "farmhash, cumulative, paranoid, ext, bastard"); g_option_group_add_entries(main_group, entries); g_option_context_set_main_group(context, main_group); g_option_context_set_summary(context, summary); if(!g_option_context_parse(context, &argc, (char ***)&argv, &error)) { /* print g_option error message */ rm_log_error_line("%s", error->message); exit(EXIT_FAILURE); } if(tag.paths == NULL) { /* read paths from stdin */ char path_buf[PATH_MAX]; char *tokbuf = NULL; GPtrArray *paths = g_ptr_array_new(); while(fgets(path_buf, PATH_MAX, stdin)) { char *abs_path = realpath(strtok_r(path_buf, "\n", &tokbuf), NULL); g_ptr_array_add(paths, abs_path); } tag.paths = (char **)g_ptr_array_free(paths, FALSE); } if(tag.paths == NULL || tag.paths[0] == NULL) { rm_log_error_line(_("No valid paths given.")); exit(EXIT_FAILURE); } g_option_context_free(context); ////////// Implementation ////// if(tag.print_in_order) { /* allocate buffer to collect results */ tag.completed_digests_buffer = g_slice_alloc0((g_strv_length(tag.paths) + 1) * sizeof(RmDigest *)); tag.path_index = 0; } /* initialise structures */ g_mutex_init(&tag.lock); RmHasher *hasher = rm_hasher_new(tag.digest_type, threads, FALSE, 4096, 1024 * 1024 * buffer_mbytes, 0, (RmHasherCallback)rm_hasher_callback, &tag); /* Iterate over paths, pushing to hasher threads */ for(int i = 0; tag.paths && tag.paths[i]; ++i) { /* check it is a regular file */ RmStat stat_buf; if(rm_sys_stat(tag.paths[i], &stat_buf) == -1) { rm_log_warning_line(_("Can't open directory or file \"%s\": %s"), tag.paths[i], strerror(errno)); } else if(S_ISDIR(stat_buf.st_mode)) { rm_log_warning_line(_("Directories are not supported: %s"), tag.paths[i]); } else if(S_ISREG(stat_buf.st_mode)) { RmHasherTask *task = rm_hasher_task_new(hasher, NULL, GINT_TO_POINTER(i)); rm_hasher_task_hash(task, tag.paths[i], 0, 0, FALSE); rm_hasher_task_finish(task); continue; } else { rm_log_warning_line(_("%s: Unknown file type"), tag.paths[i]); } /* dummy callback for failed paths */ g_free(tag.paths[i]); tag.paths[i] = NULL; rm_hasher_callback(hasher, NULL, &tag, GINT_TO_POINTER(i)); } /* wait for all hasher threads to finish... */ rm_hasher_free(hasher, TRUE); /* tidy up */ if(tag.print_in_order) { g_slice_free1((g_strv_length(tag.paths) + 1) * sizeof(RmDigest *), tag.completed_digests_buffer); } g_strfreev(tag.paths); return EXIT_SUCCESS; }
static gpointer rm_session_print_first_abort_warn(_UNUSED gpointer data) { rm_log_warning("\r"); rm_log_warning_line(_("Received Interrupt, stopping...")); return NULL; }
/* Method to test if a file is non stripped binary. Uses libelf*/ bool rm_util_is_nonstripped(_U const char *path, _U RmStat *statp) { bool is_ns = false; #if HAVE_LIBELF g_return_val_if_fail(path, false); if(statp && (statp->st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { return false; } /* inspired by "jschmier"'s answer at http://stackoverflow.com/a/5159890 */ int fd; /* ELF handle */ Elf *elf; /* section descriptor pointer */ Elf_Scn *scn; /* section header */ GElf_Shdr shdr; /* Open ELF file to obtain file descriptor */ if((fd = rm_sys_open(path, O_RDONLY)) == -1) { rm_log_warning_line(_("cannot open file '%s' for nonstripped test: "), path); rm_log_perror(""); return 0; } /* Protect program from using an older library */ if(elf_version(EV_CURRENT) == EV_NONE) { rm_log_error_line(_("ELF Library is out of date!")); return false; } /* Initialize elf pointer for examining contents of file */ elf = elf_begin(fd, ELF_C_READ, NULL); /* Initialize section descriptor pointer so that elf_nextscn() * returns a pointer to the section descriptor at index 1. * */ scn = NULL; /* Iterate through ELF sections */ while((scn = elf_nextscn(elf, scn)) != NULL) { /* Retrieve section header */ gelf_getshdr(scn, &shdr); /* If a section header holding a symbol table (.symtab) * is found, this ELF file has not been stripped. */ if(shdr.sh_type == SHT_SYMTAB) { is_ns = true; break; } } elf_end(elf); rm_sys_close(fd); #endif return is_ns; }