s3_device::s3_device(const std::string& filename, const bool write) { m_filename = filename; // split out the access key and secret key webstor::s3url url; webstor::parse_s3url(filename, url); m_s3fs = std::make_shared<dmlc::io::S3FileSystem>(); m_s3fs->SetCredentials(url.access_key_id, url.secret_key); std::string url_without_credentials; if (url.endpoint.empty()) { url_without_credentials = "s3://" + url.bucket + "/" + url.object_name; } else { url_without_credentials = "s3://" + url.endpoint + "/" + url.bucket + "/" + url.object_name; } auto uri = dmlc::io::URI(url_without_credentials.c_str()); if (write) { m_write_stream.reset(m_s3fs->Open(uri, "w")); } else { try { auto pathinfo = m_s3fs->GetPathInfo(uri); m_filesize = pathinfo.size; if (pathinfo.type != dmlc::io::kFile) { log_and_throw("Cannot open " + sanitize_url(filename)); } m_read_stream.reset(m_s3fs->OpenForRead(uri)); } catch (...) { log_and_throw("Cannot open " + sanitize_url(filename)); } } }
void unity_global::save_model(std::shared_ptr<model_base> model, const std::string& model_wrapper, const std::string& url) { logstream(LOG_INFO) << "Save model to " << sanitize_url(url) << std::endl; logstream(LOG_INFO) << "Model name: " << model->name() << std::endl; try { dir_archive dir; dir.open_directory_for_write(url); dir.set_metadata("contents", "model"); oarchive oarc(dir); oarc.write(CLASS_MAGIC_HEADER, strlen(CLASS_MAGIC_HEADER)); oarc << model->name(); oarc << model_wrapper; oarc << *model; if (dir.get_output_stream()->fail()) { std::string message = "Fail to write."; log_and_throw_io_failure(message); } dir.close(); } catch (std::ios_base::failure& e) { std::string message = "Unable to save model to " + sanitize_url(url) + ": " + e.what(); log_and_throw_io_failure(message); } catch (std::string& e) { log_and_throw(std::string("Unable to save model to ") + sanitize_url(url) + ": " + e); } catch (...) { log_and_throw(std::string("Unknown Error: Unable to save model to ") + sanitize_url(url)); } }
bool unity_sgraph::save_graph(std::string target, std::string format) { log_func_entry(); try { if (format == "binary") { dir_archive dir; dir.open_directory_for_write(target); dir.set_metadata("contents", "graph"); oarchive oarc(dir); if (dir.get_output_stream()->fail()) { log_and_throw_io_failure("Fail to write"); } save(oarc); dir.close(); } else if (format == "json") { save_sgraph_to_json(get_graph(), target); } else if (format == "csv") { save_sgraph_to_csv(get_graph(), target); } else { log_and_throw("Unable to save to format : " + format); } } catch (std::ios_base::failure& e) { std::string message = "Unable to save graph to " + sanitize_url(target) + ": " + e.what(); log_and_throw_io_failure(message); } catch (std::string& e) { std::string message = "Unable to save graph to " + sanitize_url(target) + ": " + e; log_and_throw(message); } catch (...) { std::string message = "Unable to save graph to " + sanitize_url(target) + ": Unknown Error."; log_and_throw(message); } return true; }
void unity_sgraph::fast_validate_add_edges(const sframe& edges, std::string src_field, std::string dst_field, size_t groupa, size_t groupb) const { if (!edges.contains_column(src_field)) { log_and_throw("Input sframe does not contain source id column: " + src_field); } if (!edges.contains_column(dst_field)) { log_and_throw("Input sframe does not contain target id column: " + dst_field); } flex_type_enum src_id_type = edges.column_type(edges.column_index(src_field)); flex_type_enum dst_id_type = edges.column_type(edges.column_index(dst_field)); if (src_id_type != dst_id_type) { std::string msg = "Source and target ids have different types: "; msg += std::string(flex_type_enum_to_name(src_id_type)) + " != " + flex_type_enum_to_name(dst_id_type); log_and_throw(msg); } if (src_id_type != flex_type_enum::INTEGER && src_id_type != flex_type_enum::STRING) { log_and_throw( std::string("Invalid id column type : ") + flex_type_enum_to_name(src_id_type) + ". Supported types are: integer and string." ); } }
bool unity_sgraph::load_graph(std::string target_dir) { log_func_entry(); try { dir_archive dir; dir.open_directory_for_read(target_dir); std::string contents; if (dir.get_metadata("contents", contents) == false || contents != "graph") { log_and_throw(std::string("Archive does not contain a graph.")); } iarchive iarc(dir); load(iarc); dir.close(); } catch (std::ios_base::failure& e) { std::string message = "Unable to load graph from " + sanitize_url(target_dir) + ": " + e.what(); log_and_throw_io_failure(message); } catch (std::string& e) { std::string message = "Unable to load graph from " + sanitize_url(target_dir) + ": " + e; log_and_throw(message); } catch (...) { std::string message = "Unable to load graph from " + sanitize_url(target_dir) + ": Unknown Error."; log_and_throw(message); } return true; }
std::shared_ptr<unity_sgraph_base> unity_sgraph::lambda_triple_apply(const std::string& lambda_str, const std::vector<std::string>& mutated_fields) { log_func_entry(); if (mutated_fields.empty()) { log_and_throw("mutated_fields cannot be empty"); } std::shared_ptr<sgraph> g = std::make_shared<sgraph>((*m_graph)()); std::vector<std::string> mutated_vertex_fields, mutated_edge_fields; const auto& all_vertex_fields = g->get_vertex_fields(); const auto& all_edge_fields = g->get_edge_fields(); std::set<std::string> all_vertex_field_set(all_vertex_fields.begin(), all_vertex_fields.end()); std::set<std::string> all_edge_field_set(all_edge_fields.begin(), all_edge_fields.end()); for (auto& f : mutated_fields) { if (f == sgraph::VID_COLUMN_NAME || f == sgraph::SRC_COLUMN_NAME || f == sgraph::DST_COLUMN_NAME) { log_and_throw("mutated fields cannot contain id field: " + f); } if (!all_vertex_field_set.count(f) && !all_edge_field_set.count(f)) { log_and_throw("mutated field \"" + f + "\" cannot be found in graph"); } if (all_vertex_field_set.count(f)) mutated_vertex_fields.push_back(f); if (all_edge_field_set.count(f)) mutated_edge_fields.push_back(f); } DASSERT_FALSE(mutated_fields.empty()); sgraph_compute::triple_apply(*g, lambda_str, mutated_vertex_fields, mutated_edge_fields); std::shared_ptr<unity_sgraph> ret(new unity_sgraph(g)); return ret; }
std::vector<std::pair<flexible_type,gl_sframe>> grouped_sframe::iterator_get_next(size_t len) { if(!m_inited) log_and_throw("The 'group' operation needs to occur before iteration!"); if(!m_iterating) log_and_throw("Must begin iteration before iterating!"); std::vector<std::pair<flexible_type,gl_sframe>> ret; if(len < 1) { return ret; } else { auto items_left = m_range_directory.size() - m_cur_iterator_idx; if(len > items_left) { len = items_left; } ret.resize(len); } size_t cur_cnt = 0; for(; (m_cur_iterator_idx < m_range_directory.size()) && (cur_cnt < len); ++m_cur_iterator_idx, ++cur_cnt) { auto sf = this->get_group_by_index(m_cur_iterator_idx); auto name = m_group_names[m_cur_iterator_idx]; ret[cur_cnt] = std::make_pair(name, sf); } if(cur_cnt < len) { m_iterating = false; } return ret; }
void process::close_read_pipe() { if(!m_launched) log_and_throw("No process launched!"); if(!m_launched_with_popen || m_read_handle == NULL) log_and_throw("Cannot close read pipe from child, no pipe initialized."); CloseHandle(m_read_handle); m_read_handle = NULL; }
sframe join(sframe& sf_left, sframe& sf_right, std::string join_type, const std::map<std::string,std::string> join_columns, size_t max_buffer_size) { // ***SANITY CHECKS // check that each sframe is valid if(!sf_left.num_rows() || !sf_left.num_columns()) { log_and_throw("Current SFrame has nothing to join!"); } if(!sf_right.num_rows() || !sf_right.num_columns()) { log_and_throw("Given SFrame has nothing to join!"); } std::vector<size_t> left_join_positions; std::vector<size_t> right_join_positions; for(const auto &col_pair : join_columns) { // Check that all columns exist (in both sframes) // These will throw if not found left_join_positions.push_back(sf_left.column_index(col_pair.first)); right_join_positions.push_back(sf_right.column_index(col_pair.second)); // Each column must have matching types to compare effectively if(sf_left.column_type(left_join_positions.back()) != sf_right.column_type(right_join_positions.back())) { log_and_throw("Columns " + col_pair.first + " and " + col_pair.second + " does not have the same type in both SFrames."); } } // Figure out what join type we have to do boost::algorithm::to_lower(join_type); join_type_t in_join_type; if(join_type == "outer") { in_join_type = FULL_JOIN; } else if(join_type == "left") { in_join_type = LEFT_JOIN; } else if(join_type == "right") { in_join_type = RIGHT_JOIN; } else if(join_type == "inner") { in_join_type = INNER_JOIN; } else { log_and_throw("Invalid join type given!"); } // execute join (perhaps multiplex algorithm based on something?) join_impl::hash_join_executor join_executor(sf_left, sf_right, left_join_positions, right_join_positions, in_join_type, max_buffer_size); return join_executor.grace_hash_join(); }
/** * Return the begin iterator of the segment. * The iterator (\ref sarray_iterator) is of the input iterator type and * has value_type T. See \ref end() to get the end iterator of the segment. * * The iterator is invalid once the originating sarray is destroyed. * Accessing the iterator after the sarray is destroyed is undefined behavior. * * \code * // example to print segment 1 to screen * auto iter = sarr.begin(1); * auto enditer =sarr.end(1); * while(iter != enditer) { * std::cout << *iter << "\n"; * ++iter; * } * \endcode * * Will throw an exception if the sarray is invalid (there is an error * reading files) Also segmentid must be a valid segment ID. Will throw an * exception otherwise. */ iterator begin(size_t segmentid) const { std::lock_guard<mutex> lck(lock); if (opened_segments.count(segmentid) == 0) { opened_segments.insert(segmentid); } else { log_and_throw(std::string("Must reset sarray iterators!")); } if (reader == NULL) log_and_throw(std::string("Invalid sarray")); if (segmentid >= num_segments()) log_and_throw(std::string("Invalid segment ID")); return iterator(&(m_read_buffers[segmentid]), segmentid, true); }
/** * Parse the png image info from in memory buffer. */ void parse_png(const char* data, size_t length, size_t& width, size_t& height, size_t& channels) { // Begin of setup png_structp png_ptr; png_infop info_ptr; setup_png_reader(data, length, &png_ptr, &info_ptr); // Construct the simple in memory buffer png_memory_buffer source; source.data = (char*)data; source.length = length; source.offset = 0; // Set custom read function png_set_read_fn(png_ptr, (png_voidp)&source, (png_rw_ptr)png_memread_func); png_read_info(png_ptr, info_ptr); // End of setup png_uint_32 _width = 0; png_uint_32 _height = 0; int bit_depth = 0; int color_type = -1; png_uint_32 retval = png_get_IHDR(png_ptr, info_ptr, &_width, &_height, &bit_depth, &color_type, NULL, NULL, NULL); if(retval != 1) { logstream(LOG_ERROR) << "Fail parsing PNG header" << std::endl; png_destroy_read_struct(&png_ptr, &info_ptr, NULL); throw(std::string("Invalid PNG file")); } width = _width; height = _height; if (bit_depth != 8) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); log_and_throw(std::string("Unsupported PNG bit depth: " + std::to_string(bit_depth))); } channels = png_num_channels(color_type); if (channels != 1 && channels != 3 && channels != 4) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); log_and_throw(std::string("Unsupported PNG color type: ") + std::to_string(color_type)); } png_destroy_read_struct(&png_ptr, &info_ptr, NULL); }
continuous_bins continuous_result::get_bins(flex_int num_bins) const { if (num_bins < 1) { log_and_throw("num_bins must be positive."); } continuous_bins ret; // determine the bin range that covers min to max size_t first_bin = get_bin_idx(min, scale_min, scale_max); size_t last_bin = get_bin_idx(max, scale_min, scale_max); size_t effective_bins = (last_bin - first_bin) + 1; // Might end up with fewer effective bins due to very small // number of unique values. For now, comment out this assert. // TODO -- what should we assert here instead, to make sure we have enough // effective range for the desired number of bins? Or should we force // discrete histogram for very low cardinality? (At which point, we keep // this assertion). //DASSERT_GE(effective_bins, (MAX_BINS/4)); if (num_bins > (MAX_BINS/4)) { log_and_throw("num_bins must be less than or equal to the effective number of bins available."); } // rescale to desired bins, taking more than the effective range if // necessary in order to get to num_bins total without resampling size_t bins_per_bin = effective_bins / num_bins; size_t overflow = effective_bins % num_bins; size_t before = 0; size_t after = 0; if (overflow) { overflow = num_bins - overflow; bins_per_bin = (effective_bins + overflow) / num_bins; before = overflow / 2; after = (overflow / 2) + (overflow % 2); } ret.bins = flex_list(num_bins, 0); // initialize empty ret.min = get_value_at_bin(std::max<ssize_t>(0, first_bin - before), scale_min, scale_max, MAX_BINS); ret.max = get_value_at_bin(std::min<ssize_t>(last_bin + after + 1, MAX_BINS), scale_min, scale_max, MAX_BINS); for (size_t i=0; i<num_bins; i++) { for (size_t j=0; j<bins_per_bin; j++) { ssize_t idx = (i * bins_per_bin) + j + (first_bin - before); if (idx < 0 || idx >= MAX_BINS) { // don't try to get values below 0, or past MAX_BINS, that would be silly continue; } ret.bins[i] += this->bins[idx]; } } return ret; }
void unity_sgraph::fast_validate_add_vertices(const sframe& vertices, std::string id_field, size_t group) const { if (!vertices.contains_column(id_field)) { log_and_throw("Input sframe does not contain id column: " + id_field); } flex_type_enum id_type = vertices.column_type(vertices.column_index(id_field)); if (id_type != flex_type_enum::INTEGER && id_type != flex_type_enum::STRING) { log_and_throw( std::string("Invalid id column type : ") + flex_type_enum_to_name(id_type) + ". Supported types are: integer and string." ); } }
variant_type simple_model::get_value(std::string key, variant_map_type& opts) { if (params.count(key)) { return params[key]; } else { log_and_throw("Key " + key + " not found in model."); } }
std::vector<T> read_sequence_section(const boost::property_tree::ptree& data, std::string key, size_t expected_elements) { std::vector<T> ret; if (expected_elements == 0) return ret; const boost::property_tree::ptree& section = data.get_child(key); ret.resize(expected_elements); // loop through the children of the column_names section size_t sid = 0; for(const auto& val: section) { const auto& key = val.first; if (key.empty()) { // this is the array-like sequences ret[sid] = boost::lexical_cast<T>(val.second.get_value<std::string>()); ++sid; } else { // this is a dictionary-like sequence sid = std::stoi(key); if (sid >= ret.size()) { log_and_throw(std::string("Invalid ID in ") + key + " section." "Segment IDs are expected to be sequential."); } ret[sid] = boost::lexical_cast<T>(val.second.get_value<std::string>()); } } return ret; }
jpeg_error_exit (j_common_ptr cinfo) { /* Always display the message. */ /* We could postpone this until after returning, if we chose. */ (*cinfo->err->output_message) (cinfo); log_and_throw(std::string("Unexpected JPEG decode failure")); }
/** * Reads the value of a key associated with the sarray. * Returns true on success, false on failure. */ bool get_metadata(std::string key, std::string &val) const { ASSERT_NE(reader, NULL); if (reader == NULL) log_and_throw(std::string("Invalid sarray")); bool ret = false; std::tie(ret, val) = get_metadata(key); return ret; }
void continuous::init(const gl_sarray& source) { continuous_parent::init(source); flex_type_enum dtype = m_source.dtype(); if (dtype != flex_type_enum::INTEGER && dtype != flex_type_enum::FLOAT) { log_and_throw("dtype of the provided SArray is not valid for histogram. Only int and float are valid dtypes."); } size_t input_size = m_source.size(); if (input_size >= 2 && m_source[0].get_type() != flex_type_enum::UNDEFINED && m_source[1].get_type() != flex_type_enum::UNDEFINED) { // start with a sane range for the bins (somewhere near the data) // (it can be exceptionally small, since the doubling used in resize() // will make it converge to the real range quickly) m_transformer.init(m_source[0], m_source[1]); } else if (input_size == 1 && m_source[0].get_type() != flex_type_enum::UNDEFINED) { // one value, not so interesting m_transformer.init(m_source[0], m_source[0]); } else { // no data m_transformer.init(0.0, 0.0); } }
void read_raw_image(const std::string& url, char** data, size_t& length, size_t& width, size_t& height, size_t& channels, Format& format, const std::string& format_hint) { general_ifstream fin(url); length = fin.file_size(); *data = new char[length]; try { fin.read(*data, length); if (format_hint == "JPG") { format = Format::JPG; } else if (format_hint == "PNG") { format = Format::PNG; } else { if (boost::algorithm::iends_with(url, "jpg") || boost::algorithm::iends_with(url, "jpeg")) { format = Format::JPG; } else if (boost::algorithm::iends_with(url, "png")) { format = Format::PNG; } } if (format == Format::JPG) { parse_jpeg(*data, length, width, height, channels); } else if (format == Format::PNG) { parse_png(*data, length, width, height, channels); } else { log_and_throw(std::string("Unsupported image format. Supported formats are JPG and PNG")); } } catch (...) { delete[] *data; *data = NULL; length = 0; throw; } fin.close(); };
static void call_lua_function(lua::Value& function, const flexible_type& arg, flexible_type& ret) { lua::Value valret; switch(arg.get_type()) { case flex_type_enum::INTEGER: valret = function(arg.get<flex_int>()); break; case flex_type_enum::FLOAT: valret = function(arg.get<flex_float>()); break; case flex_type_enum::STRING: valret = function(arg.get<flex_string>().c_str()); break; default: log_and_throw("Not Supported at the moment"); }; if (valret.is<lua::Integer>()) { lua::Integer val = 0; valret.get<lua::Integer>(val); ret = val; } else if (valret.is<lua::Number>()) { lua::Number val = 0; valret.get<lua::Number>(val); ret = val; } else if (valret.is<lua::String>()) { std::string val; valret.get<std::string>(val); ret = std::move(val); } else { ret = FLEX_UNDEFINED; } }
std::streamsize read(char* strm_ptr, std::streamsize n) { // there is an upper limit of how many bytes we can read // based on the file size n = std::min<std::streamsize>(n, m_file_size - m_file_pos); std::streamsize ret = 0; while(n > 0) { // the block number containing the offset. auto block_number = m_file_pos / READ_CACHING_BLOCK_SIZE; // the offset inside the block auto block_offset = m_file_pos % READ_CACHING_BLOCK_SIZE; // number of bytes I can read inside this block before I hit the next block size_t n_bytes = (block_number + 1) * READ_CACHING_BLOCK_SIZE - m_file_pos; n_bytes = std::min<size_t>(n_bytes, n); bool success = fetch_block(strm_ptr + ret, block_number, block_offset, n_bytes); if (success == false) { log_and_throw(std::string("Unable to read ") + m_filename); } n -= n_bytes; ret += n_bytes; // advance the file position m_file_pos += n_bytes; } return ret; }
flex_dict_view::flex_dict_view(const flexible_type& value) { if (value.get_type() == flex_type_enum::DICT) { m_flex_dict_ptr = &(value.get<flex_dict>()); return; } log_and_throw("Cannot construct a flex_dict_view object from type "); }
std::map<std::string, flexible_type> unity_global::describe_toolkit_function(std::string name) { auto spec = toolkit_functions->get_toolkit_function_info(name); if (spec == NULL) { log_and_throw(std::string("No such toolkit!")); } else { return spec->description; } }
variant_map_type unity_global::load_model(const std::string& url) { logstream(LOG_INFO) << "Load model from " << sanitize_url(url) << std::endl; try { dir_archive dir; dir.open_directory_for_read(url); std::string contents; if (dir.get_metadata("contents", contents) == false || contents != "model") { log_and_throw(std::string("Archive does not contain a model.")); } iarchive iarc(dir); std::string model_name; std::string model_wrapper; char buf[256] = ""; size_t magic_header_size = strlen(CLASS_MAGIC_HEADER); iarc.read(buf, magic_header_size); if (strcmp(buf, CLASS_MAGIC_HEADER)) { log_and_throw(std::string("Invalid model file.")); } iarc >> model_name; logstream(LOG_INFO) << "Model name: " << model_name << std::endl; iarc >> model_wrapper; std::shared_ptr<model_base> model_ptr = classes->get_toolkit_class(model_name); iarc >> *(model_ptr); if (dir.get_input_stream()->fail()) { std::string message = "Fail to read."; log_and_throw_io_failure(message); } dir.close(); variant_map_type ret; variant_set_value<std::shared_ptr<model_base>>(ret["model_base"], model_ptr); flexible_type flex_model_wrapper = (flexible_type)model_wrapper; variant_set_value<flexible_type>(ret["model_wrapper"], flex_model_wrapper); return ret; } catch (std::ios_base::failure& e) { std::string message = "Unable to load model from " + sanitize_url(url) + ": " + e.what(); log_and_throw_io_failure(message); } catch (std::string& e) { log_and_throw(std::string("Unable to load model from ") + sanitize_url(url) + ": " + e); } catch (const std::exception& e) { log_and_throw(std::string("Unable to load model from ") + sanitize_url(url) + ": " + e.what()); } catch (...) { log_and_throw(std::string("Unknown Error: Unable to load model from ") + sanitize_url(url)); } }
std::map<std::string, flexible_type> toolkit_class_registry::get_toolkit_class_description(const std::string& class_name) { if (descriptions.count(class_name)) { return descriptions[class_name]; } else { log_and_throw(std::string("Class" + class_name + " does not exist.")); } }
T safe_varmap_get(const variant_map_type& kv, std::string key) { if (kv.count(key) == 0) { log_and_throw("Required Key " + key + " not found"); } else { return variant_get_value<T>(kv.at(key)); } __builtin_unreachable(); }
std::shared_ptr<model_base> toolkit_class_registry::get_toolkit_class( const std::string& class_name) { if (registry.count(class_name)) { return std::shared_ptr<model_base>(registry[class_name]()); } else { log_and_throw(std::string("Class " + class_name + " does not exist.")); } }
/** * Utility function to throw an error if a vector is of unequal length. * \param[in] gl_sarray of type vector */ void check_vector_equal_size(const gl_sarray& in) { // Initialize. DASSERT_TRUE(in.dtype() == flex_type_enum::VECTOR); size_t n_threads = thread::cpu_count(); n_threads = std::max(n_threads, size_t(1)); size_t m_size = in.size(); // Throw the following error. auto throw_error = [] (size_t row_number, size_t expected, size_t current) { std::stringstream ss; ss << "Vectors must be of the same size. Row " << row_number << " contains a vector of size " << current << ". Expected a vector of" << " size " << expected << "." << std::endl; log_and_throw(ss.str()); }; // Within each block of the SArray, check that the vectors have the same size. std::vector<size_t> expected_sizes (n_threads, size_t(-1)); in_parallel([&](size_t thread_idx, size_t n_threads) { size_t start_row = thread_idx * m_size / n_threads; size_t end_row = (thread_idx + 1) * m_size / n_threads; size_t expected_size = size_t(-1); size_t row_number = start_row; for (const auto& v: in.range_iterator(start_row, end_row)) { if (v != FLEX_UNDEFINED) { if (expected_size == size_t(-1)) { expected_size = v.size(); expected_sizes[thread_idx] = expected_size; } else { DASSERT_TRUE(v.get_type() == flex_type_enum::VECTOR); if (expected_size != v.size()) { throw_error(row_number, expected_size, v.size()); } } } row_number++; } }); // Make sure sizes accross blocks are also the same. size_t vector_size = size_t(-1); for (size_t thread_idx = 0; thread_idx < n_threads; thread_idx++) { // If this block contains all None values, skip it. if (expected_sizes[thread_idx] != size_t(-1)) { if (vector_size == size_t(-1)) { vector_size = expected_sizes[thread_idx]; } else { if (expected_sizes[thread_idx] != vector_size) { throw_error(thread_idx * m_size / n_threads, vector_size, expected_sizes[thread_idx]); } } } } }
static bool file_contains_substring(std::string file, std::string substring) { general_ifstream fin(file); if (fin.fail()) { log_and_throw("Cannot open " + file); } size_t fsize = fin.file_size(); if (fsize == (size_t)(-1)) { log_and_throw("Cannot open " + file); } char* buf = new char[fsize]; fin.read(buf, fsize); auto f = boost::algorithm::boyer_moore_search(buf, buf + fsize, substring.begin(), substring.end()); // return is true if found bool ret = (f != buf + fsize); delete [] buf; return ret; }
void * gds_realloc(void *ptr, size_t size) { ptr = realloc(ptr, size); if (ptr == NULL) { log_and_throw(NotEnoughMemoryException, "realloc failed to allocate %d bytes", size); } return ptr; }