int main (void) { cout << __FILE__ << endl; CoolComplex circuit; circuit = in_series (resistor (1.0), in_parallel (in_series (resistor (100.0), inductor (0.2)), in_parallel (capacitor (0.000001), resistor (10000000.0)))); cout << "Circuit impedence is " << circuit << " at frequency " << FREQUENCY << "\n"; return 0; // Exit with OK status }
/** * Utility function to throw an error if a vector is of unequal length. * \param[in] gl_sarray of type vector */ void check_vector_equal_size(const gl_sarray& in) { // Initialize. DASSERT_TRUE(in.dtype() == flex_type_enum::VECTOR); size_t n_threads = thread::cpu_count(); n_threads = std::max(n_threads, size_t(1)); size_t m_size = in.size(); // Throw the following error. auto throw_error = [] (size_t row_number, size_t expected, size_t current) { std::stringstream ss; ss << "Vectors must be of the same size. Row " << row_number << " contains a vector of size " << current << ". Expected a vector of" << " size " << expected << "." << std::endl; log_and_throw(ss.str()); }; // Within each block of the SArray, check that the vectors have the same size. std::vector<size_t> expected_sizes (n_threads, size_t(-1)); in_parallel([&](size_t thread_idx, size_t n_threads) { size_t start_row = thread_idx * m_size / n_threads; size_t end_row = (thread_idx + 1) * m_size / n_threads; size_t expected_size = size_t(-1); size_t row_number = start_row; for (const auto& v: in.range_iterator(start_row, end_row)) { if (v != FLEX_UNDEFINED) { if (expected_size == size_t(-1)) { expected_size = v.size(); expected_sizes[thread_idx] = expected_size; } else { DASSERT_TRUE(v.get_type() == flex_type_enum::VECTOR); if (expected_size != v.size()) { throw_error(row_number, expected_size, v.size()); } } } row_number++; } }); // Make sure sizes accross blocks are also the same. size_t vector_size = size_t(-1); for (size_t thread_idx = 0; thread_idx < n_threads; thread_idx++) { // If this block contains all None values, skip it. if (expected_sizes[thread_idx] != size_t(-1)) { if (vector_size == size_t(-1)) { vector_size = expected_sizes[thread_idx]; } else { if (expected_sizes[thread_idx] != vector_size) { throw_error(thread_idx * m_size / n_threads, vector_size, expected_sizes[thread_idx]); } } } } }
gl_sframe grouped_sframe::group_info() const { if (m_group_names.size() == 0) { log_and_throw("No groups present. Cannot obtain group info."); } // Return column names. std::vector<std::string> ret_column_names = m_key_col_names; ret_column_names.push_back("group_size"); DASSERT_EQ(ret_column_names.size(), m_key_col_names.size() + 1); // Return column types from the first group info. DASSERT_TRUE(m_group_names.size() > 1); std::vector<flex_type_enum> ret_column_types; flexible_type first_key = m_group_names[0]; flex_type_enum key_type = first_key.get_type(); if (key_type == flex_type_enum::LIST) { for (size_t k = 0; k < first_key.size(); k++) { ret_column_types.push_back(first_key.array_at(k).get_type()); } } else { ret_column_types.push_back(key_type); } ret_column_types.push_back(flex_type_enum::INTEGER); DASSERT_EQ(ret_column_types.size(), ret_column_names.size()); // Prepare for writing. size_t num_segments = thread::cpu_count(); gl_sframe_writer writer(ret_column_names, ret_column_types, num_segments); size_t range_dir_size = m_range_directory.size(); // Write the group info. in_parallel([&](size_t thread_idx, size_t num_threads) { size_t start_idx = range_dir_size * thread_idx / num_threads; size_t end_idx = range_dir_size * (thread_idx + 1) / num_threads; for (size_t i = start_idx; i < end_idx; i++) { size_t range_start = m_range_directory[i]; size_t range_end = 0; if((i + 1) == m_range_directory.size()) { range_end = m_grouped_sf.size(); } else { range_end = m_range_directory[i + 1]; } size_t num_rows = range_end - range_start; std::vector<flexible_type> vals = m_group_names[i]; vals.push_back(num_rows); DASSERT_EQ(vals.size(), ret_column_names.size()); writer.write(vals, thread_idx); } return writer.close(); }); }
gl_sarray gl_sarray::cumulative_aggregate( std::shared_ptr<group_aggregate_value> aggregator) const { flex_type_enum input_type = this->dtype(); flex_type_enum output_type = aggregator->set_input_types({input_type}); if (! aggregator->support_type(input_type)) { std::stringstream ss; ss << "Cannot perform this operation on an SArray of type " << flex_type_enum_to_name(input_type) << "." << std::endl; log_and_throw(ss.str()); } // Empty case. size_t m_size = this->size(); if (m_size == 0) { return gl_sarray({}, output_type); } // Make a copy of an newly initialize aggregate for each thread. size_t n_threads = thread::cpu_count(); gl_sarray_writer writer(output_type, n_threads); std::vector<std::shared_ptr<group_aggregate_value>> aggregators; for (size_t i = 0; i < n_threads; i++) { aggregators.push_back( std::shared_ptr<group_aggregate_value>(aggregator->new_instance())); } // Skip Phases 1,2 when single threaded or more threads than rows. if ((n_threads > 1) && (m_size > n_threads)) { // Phase 1: Compute prefix-sums for each block. in_parallel([&](size_t thread_idx, size_t n_threads) { size_t start_row = thread_idx * m_size / n_threads; size_t end_row = (thread_idx + 1) * m_size / n_threads; for (const auto& v: this->range_iterator(start_row, end_row)) { DASSERT_TRUE(thread_idx < aggregators.size()); if (v != FLEX_UNDEFINED) { aggregators[thread_idx]->add_element_simple(v); } } }); // Phase 2: Combine prefix-sum(s) at the end of each block. for (size_t i = n_threads - 1; i > 0; i--) { for (size_t j = 0; j < i; j++) { DASSERT_TRUE(i < aggregators.size()); DASSERT_TRUE(j < aggregators.size()); aggregators[i]->combine(*aggregators[j]); } } } // Phase 3: Reaggregate with an re-intialized prefix-sum from previous blocks. auto reagg_fn = [&](size_t thread_idx, size_t n_threads) { flexible_type y = FLEX_UNDEFINED; size_t start_row = thread_idx * m_size / n_threads; size_t end_row = (thread_idx + 1) * m_size / n_threads; std::shared_ptr<group_aggregate_value> re_aggregator ( aggregator->new_instance()); // Initialize with the merged value. if (thread_idx >= 1) { DASSERT_TRUE(thread_idx - 1 < aggregators.size()); y = aggregators[thread_idx - 1]->emit(); re_aggregator->combine(*aggregators[thread_idx - 1]); } // Write prefix-sum for (const auto& v: this->range_iterator(start_row, end_row)) { if (v != FLEX_UNDEFINED) { re_aggregator->add_element_simple(v); y = re_aggregator->emit(); } writer.write(y, thread_idx); } }; // Run single threaded if more threads than rows. if (m_size > n_threads) { in_parallel(reagg_fn); } else { reagg_fn(0, 1); } return writer.close(); }