inline void load( Archive & ar, STD::vector<bool, Allocator> &t, const unsigned int /* file_version */ ){ // retrieve number of elements unsigned int count; ar >> BOOST_SERIALIZATION_NVP(count); t.clear(); while(count-- > 0){ bool i; ar >> boost::serialization::make_nvp("item", i); t.push_back(i); } }
u32 ChatBuffer::formatChatLine(const ChatLine& line, u32 cols, std::vector<ChatFormattedLine>& destination) const { u32 num_added = 0; std::vector<ChatFormattedFragment> next_frags; ChatFormattedLine next_line; ChatFormattedFragment temp_frag; u32 out_column = 0; u32 in_pos = 0; u32 hanging_indentation = 0; // Format the sender name and produce fragments if (!line.name.empty()) { temp_frag.text = L"<"; temp_frag.column = 0; //temp_frag.bold = 0; next_frags.push_back(temp_frag); temp_frag.text = line.name; temp_frag.column = 0; //temp_frag.bold = 1; next_frags.push_back(temp_frag); temp_frag.text = L"> "; temp_frag.column = 0; //temp_frag.bold = 0; next_frags.push_back(temp_frag); } std::wstring name_sanitized = sanitizeChatString(line.name); // Choose an indentation level if (line.name.empty()) { // Server messages hanging_indentation = 0; } else if (name_sanitized.size() + 3 <= cols/2) { // Names shorter than about half the console width hanging_indentation = name_sanitized.size() + 3; } else { // Very long names hanging_indentation = 2; } FMColoredString line_text(line.text); next_line.first = true; bool text_processing = false; // Produce fragments and layout them into lines while (!next_frags.empty() || in_pos < line_text.size()) { // Layout fragments into lines while (!next_frags.empty()) { ChatFormattedFragment& frag = next_frags[0]; if (frag.text.size() <= cols - out_column) { // Fragment fits into current line frag.column = out_column; next_line.fragments.push_back(frag); out_column += frag.text.size(); next_frags.erase(next_frags.begin()); } else { // Fragment does not fit into current line // So split it up temp_frag.text = frag.text.substr(0, cols - out_column); temp_frag.column = out_column; //temp_frag.bold = frag.bold; next_line.fragments.push_back(temp_frag); frag.text = frag.text.substr(cols - out_column); out_column = cols; } if (out_column == cols || text_processing) { // End the current line destination.push_back(next_line); num_added++; next_line.fragments.clear(); next_line.first = false; out_column = text_processing ? hanging_indentation : 0; } } // Produce fragment if (in_pos < line_text.size()) { u32 remaining_in_input = line_text.size() - in_pos; u32 remaining_in_output = cols - out_column; // Determine a fragment length <= the minimum of // remaining_in_{in,out}put. Try to end the fragment // on a word boundary. u32 frag_length = 1, space_pos = 0; while (frag_length < remaining_in_input && frag_length < remaining_in_output) { if (std::isspace(line_text.getString()[in_pos + frag_length])) space_pos = frag_length; ++frag_length; } if (space_pos != 0 && frag_length < remaining_in_input) frag_length = space_pos + 1; temp_frag.text = line_text.substr(in_pos, frag_length); temp_frag.column = 0; //temp_frag.bold = 0; next_frags.push_back(temp_frag); in_pos += frag_length; text_processing = true; } } // End the last line if (num_added == 0 || !next_line.fragments.empty()) { destination.push_back(next_line); num_added++; } return num_added; }
sframe groupby_aggregate(const sframe& source, const std::vector<std::string>& keys, const std::vector<std::string>& output_column_names, const std::vector<std::pair<std::vector<std::string>, std::shared_ptr<group_aggregate_value>>>& groups, size_t max_buffer_size) { // first, sanity checks // check that group keys exist if (output_column_names.size() != groups.size()) { log_and_throw("There must be as many output columns as there are groups"); } { // check that output column names are all unique, and do not intersect with // keys. Since empty values will be automatically assigned, we will skip // those. std::set<std::string> all_output_columns(keys.begin(), keys.end()); size_t named_column_count = 0; for (auto s: output_column_names) { if (!s.empty()) { all_output_columns.insert(s); ++named_column_count; } } if (all_output_columns.size() != keys.size() + named_column_count) { log_and_throw("Output columns names are not unique"); } } for (const auto& key: keys) { // check that the column name is valid if (!source.contains_column(key)) { log_and_throw("SFrame does not contain column " + key); } } // check that each group is valid for (const auto& group: groups) { // check that the column name is valid if (group.first.size() > 0) { for(size_t index = 0; index < group.first.size();index++) { auto& col_name = group.first[index]; if (!source.contains_column(col_name)) { log_and_throw("SFrame does not contain column " + col_name); } if(graphlab::registered_arg_functions.count(group.second->name()) != 0 && index > 0) continue; // check that the types are valid size_t column_number = source.column_index(col_name); if (!group.second->support_type(source.column_type(column_number))) { log_and_throw("Requested operation: " + group.second->name() + " not supported on the type of column " + col_name); } } } } // key should not have repeated columns std::set<std::string> key_columns; std::set<std::string> group_columns; for (const auto& key: keys) key_columns.insert(key); for (const auto& group: groups) { for(auto& col_name : group.first) { group_columns.insert(col_name); } } if (key_columns.size() != keys.size()) { log_and_throw("Group by key cannot have repeated column names"); } // ok. select out just the columns I care about // begin with the key columns std::vector<std::string> all_columns(key_columns.begin(), key_columns.end()); // then all the group columns (as long as they are not also key columns) for (const auto& group_column: group_columns) { if (group_column != "" && key_columns.count(group_column) == 0) { all_columns.push_back(group_column); } } sframe frame_with_relevant_cols = source.select_columns(all_columns); // prepare the output frame sframe output; std::vector<std::string> column_names; std::vector<flex_type_enum> column_types; // output frame has the key column name and types for (const auto& key: key_columns) { column_names.push_back(key); column_types.push_back(source.column_type(source.column_index(key))); } // then for each group, make a unique name and determine the output group type for (size_t i = 0;i < groups.size(); ++i) { const auto& group = groups[i]; std::string candidate_name = output_column_names[i]; if (candidate_name.empty()) { std::string root_candidate_name; if(graphlab::registered_arg_functions.count(group.second->name()) == 0) { for (auto& col_name: group.first) { if (root_candidate_name.empty()) { root_candidate_name += " of " + col_name; } else { root_candidate_name += "_" + col_name; } } root_candidate_name = group.second->name() + root_candidate_name; } else { if(group.first.size() != 2) log_and_throw("arg functions takes exactly two arguments"); root_candidate_name += group.first[1] + " for " + group.second->name() + " of " + group.first[0]; } candidate_name = root_candidate_name; size_t ctr = 1; // keep trying to come up with a unique column name while (std::find(column_names.begin(), column_names.end(), candidate_name) != column_names.end()) { candidate_name = root_candidate_name + "." + std::to_string(ctr); ++ctr; } } column_names.push_back(candidate_name); std::vector<flex_type_enum> input_types; for(auto col_name : group.first) { input_types.push_back(source.column_type(source.column_index(col_name))); } // this statement is valid for argmax and argmin as well, because their // set_input_types(...) simply return input_types. auto output_type = group.second->set_input_types(input_types); column_types.push_back(output_type); } // done! now we can start on the groupby size_t nsegments = frame_with_relevant_cols.num_segments(); // either nsegments, or n*log n buckets nsegments = std::max(nsegments, thread::cpu_count() * std::max<size_t>(1, log2(thread::cpu_count()))); output.open_for_write(column_names, column_types, "", nsegments); groupby_aggregate_impl::group_aggregate_container container(max_buffer_size, nsegments); // ok the input sframe (frame_with_relevant_cols) contains all the values // we care about. However, the challenge here is to figure out how the keys // and values line up. By construction, all the key columns come first. // which is good. But group columns can be pretty much anywhere. size_t num_keys = keys.size(); for (const auto& group: groups) { std::vector<size_t> column_numbers; for(auto& col_name : group.first) { column_numbers.push_back(frame_with_relevant_cols.column_index(col_name)); } container.define_group(column_numbers, group.second); } // done. now we can begin parallel processing // shuffle the rows based on the value of the key column. auto input_reader = frame_with_relevant_cols.get_reader(thread::cpu_count()); graphlab::timer ti; logstream(LOG_INFO) << "Filling group container: " << std::endl; parallel_for (0, input_reader->num_segments(), [&](size_t i) { auto iter = input_reader->begin(i); auto enditer = input_reader->end(i); while(iter != enditer) { auto& row = *iter; container.add(row, num_keys); ++iter; } }); logstream(LOG_INFO) << "Group container filled in " << ti.current_time() << std::endl; logstream(LOG_INFO) << "Writing output: " << std::endl; ti.start(); container.group_and_write(output); logstream(LOG_INFO) << "Output written in: " << ti.current_time() << std::endl; output.close(); return output; }
/* ************************************************************************** * Compute Connector widths that this class requires in order to work * properly on a given hierarchy. ************************************************************************** */ void RefineScheduleConnectorWidthRequestor::computeRequiredConnectorWidths( std::vector<hier::IntVector>& self_connector_widths, std::vector<hier::IntVector>& fine_connector_widths, const hier::PatchHierarchy& patch_hierarchy) const { int max_levels = patch_hierarchy.getMaxNumberOfLevels(); const tbox::Dimension& dim(patch_hierarchy.getDim()); /* * Add one to max data ghost width to create overlaps of data * living on patch boundaries. */ const hier::IntVector max_data_gcw( patch_hierarchy.getPatchDescriptor()->getMaxGhostWidth(dim) + 1); hier::IntVector max_stencil_width = patch_hierarchy.getGridGeometry()->getMaxTransferOpStencilWidth(dim); max_stencil_width.max( RefinePatchStrategy::getMaxRefineOpStencilWidth(dim)); hier::IntVector zero_vector(hier::IntVector::getZero(dim), patch_hierarchy.getNumberBlocks()); /* * Compute the Connector width needed to ensure all edges are found * during mesh recursive refine schedule generation. It is safe to * be conservative, but carrying around a larger than necessary * width requires more memory and slows down Connector operations. * * All Connectors to self need to be at least wide enough to * support the copy of data from the same level into ghost cells. * Thus, the width should be at least that of the max ghost data * width. On the finest level, there is no other requirement. For * other levels, we need enough width for: * * - refining the next finer level * * - refining recursively starting at each of the levels finer than * it. */ hier::IntVector self_width(max_data_gcw * d_gcw_factor, patch_hierarchy.getNumberBlocks()); self_connector_widths.clear(); self_connector_widths.resize(max_levels, self_width); fine_connector_widths.clear(); if (max_levels > 1) { fine_connector_widths.resize(max_levels - 1, zero_vector); // to be computed below. } /* * Note that the following loops go from fine to coarse. This is * because Connector widths for coarse levels depend on those for * fine levels. */ for (int ln = max_levels - 1; ln > -1; --ln) { computeRequiredFineConnectorWidthsForRecursiveRefinement( fine_connector_widths, max_data_gcw, max_stencil_width, patch_hierarchy, ln); } }
/* Import a graph from a file. * Requires: * @nodes The list to be filled with the nodes * @container The container to put the vertexs * @edges The list of edges to be filled * @fname The filename * Returns: * true on success * false otherwise */ bool TriNavMeshBuilder::importGraph(std::vector<GNode *> &nodes, PolyStructsContainer<sm::Vertex *> &container, PolyStructsContainer<Triangle *> &triangles, std::vector<GEdge *> &edges, const Ogre::String &fname) { std::ifstream in; in.open(fname.c_str()); if(!in.is_open()){ debugERROR("Error while opening the file %s\n", fname.c_str()); return false; } if(!container.isEmpty()){ debugWARNING("Warning, Vertex container is not empty\n"); ASSERT(false); } if(!triangles.isEmpty()){ debugWARNING("Warning, triangles is not empty\n"); ASSERT(false); } // first read the vertexs list std::vector<sm::Vertex *> vertexs; int aux; in >> aux; ASSERT(aux > 0); vertexs.reserve(aux); for(int i = 0; i < aux; ++i){ sm::Vertex *v = new sm::Vertex; in >> v->x; in >> v->y; vertexs.push_back(v); } ASSERT(in.good()); // now read the number of nodes nodes.clear(); in >> aux; nodes.reserve(aux); std::vector<Triangle *> triangVec; int i1, i2, i3; for(int i = 0; i < aux; ++i){ in >> i1; in >> i2; in >> i3; ASSERT(i1 < vertexs.size() && i2 < vertexs.size() && i3 < vertexs.size()); Triangle *t = new Triangle(vertexs[i1], vertexs[i2], vertexs[i3]); triangVec.push_back(t); GNode *node = new GNode(t); nodes.push_back(node); } ASSERT(in.good()); // now get all the edges edges.clear(); in >> aux; ASSERT(aux > 0); edges.reserve(aux); bool ok = false; float weight; for(int i = 0; i < aux; ++i){ in >> i1; in >> i2; in >> weight; if(i1 >= nodes.size() || i2 >= nodes.size()){ debugRED("i1: %d, size: %zd, i2: %d, size: %zd\n", i1, nodes.size(), i2, nodes.size()); ASSERT(i1 < nodes.size() && i2 < nodes.size()); } GNode *n1 = nodes[i1], *n2 = nodes[i2]; GEdge *e = new GEdge(n1,n2); ok = n1->setNewEdge(e); ASSERT(ok); ok = n2->setNewEdge(e); ASSERT(ok); edges.push_back(e); e->setWeight(weight); } // Save the triangles and the vertexs for(int i = vertexs.size()-1; i >= 0; --i){ container.addObj(vertexs[i]); } vertexs.clear(); for(int i = triangVec.size()-1; i >= 0; --i){ triangles.addObj(triangVec[i]); } triangVec.clear(); return true; }
bool ReportFixture(b2Fixture* fixture){ foundBodies.push_back(fixture->GetBody()); return true; }
CravaTrend::CravaTrend(Simbox * timeSimbox, Simbox * timeCutSimbox, ModelSettings * modelSettings, bool & failed, std::string & errTxt, const InputFiles * inputFiles) { n_samples_ = 1000; const std::vector<std::string> trend_cube_parameters = modelSettings->getTrendCubeParameters(); const std::vector<int> trend_cube_type = modelSettings->getTrendCubeType(); n_trend_cubes_ = static_cast<int>(trend_cube_parameters.size()); std::vector<std::string> trendCubeNames(n_trend_cubes_); if(n_trend_cubes_ > 0) { std::string errorText = ""; const int nx = timeSimbox->getnx(); const int ny = timeSimbox->getny(); const int nz = timeSimbox->getnz(); const int nxp = nx; const int nyp = ny; const int nzp = nz; const int rnxp = 2*(nxp/2 + 1); for(int grid_number=0; grid_number<n_trend_cubes_; grid_number++) { FFTGrid * trend_cube = NULL; const std::string log_name = "trend cube '"+trend_cube_parameters[grid_number]+"'"; if(trend_cube_type[grid_number] == ModelSettings::CUBE_FROM_FILE) { trendCubeNames[grid_number] = inputFiles->getTrendCube(grid_number); const SegyGeometry * dummy1 = NULL; const TraceHeaderFormat * dummy2 = NULL; const float offset = modelSettings->getSegyOffset(0); //Facies estimation only allowed for one time lapse ModelGeneral::readGridFromFile(trendCubeNames[grid_number], log_name, offset, trend_cube, dummy1, dummy2, FFTGrid::PARAMETER, timeSimbox, timeCutSimbox, modelSettings, errorText, true); if(errorText != "") { errorText += "Reading of file \'"+trendCubeNames[grid_number]+"\' failed\n"; errTxt += errorText; failed = true; } } else if(trend_cube_type[grid_number] == ModelSettings::STRATIGRAPHIC_DEPTH) { LogKit::LogFormatted(LogKit::Low,"\nGenerating trend grid \'"+trend_cube_parameters[grid_number]+"\'\n"); trend_cube = ModelGeneral::createFFTGrid(nx, ny, nz, nxp, nyp, nzp, false); trend_cube->createRealGrid(); trend_cube->setAccessMode(FFTGrid::WRITE); for(int k=0; k<nzp; k++) { for(int j=0; j<nyp; j++) { for(int i=0; i<rnxp; i++) { if(i < nx) trend_cube->setRealValue(i, j, k, static_cast<float>(k)); else trend_cube->setRealValue(i, j, k, 0); } } } trend_cube->endAccess(); } else if(trend_cube_type[grid_number] == ModelSettings::TWT) { LogKit::LogFormatted(LogKit::Low,"\nGenerating trend grid \'"+trend_cube_parameters[grid_number]+"\'\n"); trend_cube = ModelGeneral::createFFTGrid(nx, ny, nz, nxp, nyp, nzp, false); trend_cube->createRealGrid(); trend_cube->setAccessMode(FFTGrid::WRITE); for(int k=0; k<nzp; k++) { for(int j=0; j<nyp; j++) { for(int i=0; i<rnxp; i++) { if(i < nx) { float value = static_cast<float>(timeSimbox->getTop(i,j) + timeSimbox->getdz(i,j)*k); trend_cube->setRealValue(i, j, k, value); } else trend_cube->setRealValue(i, j, k, 0); } } } trend_cube->endAccess(); } NRLib::Grid<double> grid_cube(nx, ny, nz); for(int k=0; k<nzp; k++) { for(int j=0; j<nyp; j++) { for(int i=0; i<rnxp; i++) { if (i < nx && j < ny && k < nz) grid_cube(i,j,k) = trend_cube->getRealValue(i,j,k); } } } trend_cubes_.push_back(grid_cube); // Calculate trend_cube_sampling_ // Sample all trends from min to max of the trend cube, using increment_ in the sampling trend_cube->calculateStatistics(); const float max = trend_cube->getMaxReal(); const float min = trend_cube->getMinReal(); const double increment = (max-min)/(n_samples_-1); std::vector<double> sampling(n_samples_); for(int j=0; j<n_samples_-1; j++) sampling[j] = min + j*increment; sampling[n_samples_-1] = max; trend_cube_sampling_.push_back(sampling); if((modelSettings->getOutputGridsOther() & IO::TREND_CUBES) > 0) { std::string fileName = IO::PrefixTrendCubes() + trend_cube_parameters[grid_number]; writeToFile(timeSimbox, trend_cube, fileName, "trend cube"); } delete trend_cube; } } }
bool basic_router_setup() { sai_status_t status; // setup saiport for each ethport LOGG(TEST_INFO, SETL3, "sai_switch_api->get_switch_attribute SAI_SWITCH_ATTR_PORT_NUMBER\n"); sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_PORT_NUMBER; status = sai_switch_api->get_switch_attribute(1, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to get SAI_SWITCH_ATTR_PORT_NUMBER %d", -status); return false; } LOGG(TEST_DEBUG, SETL3, "SAI_SWITCH_ATTR_PORT_NUMBER %d\n", attr.value.u32); sai_uint32_t port_count = attr.value.u32; //We will cover all of the ports supoorted g_testcount = port_count; sai_object_id_t *port_list = new sai_object_id_t[port_count]; LOGG(TEST_INFO, SETL3, "sai_switch_api->get_switch_attribute SAI_SWITCH_ATTR_PORT_LIST\n"); attr.id = SAI_SWITCH_ATTR_PORT_LIST; attr.value.objlist.count = port_count; attr.value.objlist.list = port_list; status = sai_switch_api->get_switch_attribute(1, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to get SAI_SWITCH_ATTR_PORT_LIST %d", -status); return false; } unsigned int i = 0; sai_object_id_t vlan_member_id; while (!vlan_member_list.empty()) { vlan_member_id = vlan_member_list.back(); if (!SAI_OID_TYPE_CHECK(vlan_member_id, SAI_OBJECT_TYPE_VLAN_MEMBER)) { LOGG(TEST_ERR, SETL3, "vlan_member_id retrieved is not the right type%d", -status); return false; } LOGG(TEST_INFO, SETL3, "sai_vlan_api->remove_vlan_member\n"); status = sai_vlan_api->remove_vlan_member(vlan_member_id); if (status != SAI_STATUS_SUCCESS ) { LOGG(TEST_ERR, SETL3, "fail to remove member ports from vlan 1. status=0x%x\n", -status); return false; } vlan_member_list.pop_back(); } LOGG(TEST_INFO, SETL3, "sai_hif_api->set_trap_attribute SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION, TTL_ERROR\n"); attr.id = SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_TRAP; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_TTL_ERROR, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to trap ttl=1 packets to cpu. status=0x%x\n", -status); return false; } attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_CHANNEL; attr.value.s32 = SAI_HOSTIF_TRAP_CHANNEL_NETDEV; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_TTL_ERROR, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set trap channel for SAI_HOSTIF_TRAP_ID_TTL_ERROR. status=0x%x\n", -status); return false; } LOGG(TEST_DEBUG, SETL3, "set SAI_HOSTIF_TRAP_ID_TTL_ERROR \n"); LOGG(TEST_INFO, SETL3, "sai_hif_api->set_trap_attribute SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION, ARP_REQUEST\n"); attr.id = SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_TRAP; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_ARP_REQUEST, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to trap arp request packets to cpu. status=0x%x\n", -status); return false; } attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_CHANNEL; attr.value.s32 = SAI_HOSTIF_TRAP_CHANNEL_NETDEV; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_ARP_REQUEST, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set trap channel for SAI_HOSTIF_TRAP_ID_ARP_REQUEST. status=0x%x\n", -status); return false; } LOGG(TEST_DEBUG, SETL3, "set SAI_HOSTIF_TRAP_ID_ARP_REQUEST \n"); LOGG(TEST_INFO, SETL3, "sai_hif_api->set_trap_attribute SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION, ARP_RESPONSE\n"); attr.id = SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_TRAP; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_ARP_RESPONSE, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to trap arp reply packets to cpu. status=0x%x\n", -status); return false; } attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_CHANNEL; attr.value.s32 = SAI_HOSTIF_TRAP_CHANNEL_NETDEV; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_ARP_RESPONSE, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set trap channel for SAI_HOSTIF_TRAP_ID_ARP_RESPONSE. status=0x%x\n", -status); return false; } LOGG(TEST_DEBUG, SETL3, "set SAI_HOSTIF_TRAP_ID_ARP_RESPONSE \n"); LOGG(TEST_INFO, SETL3, "sai_hif_api->set_trap_attribute SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION, LLDP\n"); attr.id = SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_TRAP; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_LLDP, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to trap lldp packets to cpu. status=0x%x\n", -status); return false; } attr.id = SAI_HOSTIF_TRAP_ATTR_TRAP_CHANNEL; attr.value.s32 = SAI_HOSTIF_TRAP_CHANNEL_NETDEV; status = sai_hif_api->set_trap_attribute(SAI_HOSTIF_TRAP_ID_LLDP, &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set trap channel for SAI_HOSTIF_TRAP_ID_LLDP. status=0x%x\n", -status); return false; } LOGG(TEST_DEBUG, SETL3, "set SAI_HOSTIF_TRAP_ID_LLDP \n"); LOGG(TEST_INFO, SETL3, "sai_vr_api->create_virtual_router\n"); status = sai_vr_api->create_virtual_router(&g_vr_id, 0, NULL); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to create virtual router. status=0x%x", -status); return false; } if (!SAI_OID_TYPE_CHECK(g_vr_id, SAI_OBJECT_TYPE_VIRTUAL_ROUTER)) { LOGG(TEST_ERR, SETL3, "virtual router oid generated is not the right type\n"); return false; } LOGG(TEST_DEBUG, SETL3, "virtual rounter id 0x%lx\n", g_vr_id); LOGG(TEST_INFO, SETL3, "for each port, sai_port_api->set_port_attribute SAI_PORT_ATTR_ADMIN_STATE true\n"); LOGG(TEST_INFO, SETL3, "for each port, sai_port_api->set_port_attribute, SAI_PORT_ATTR_FDB_LEARNING, SAI_PORT_LEARN_MODE_HW\n"); for (i = 0; i < port_count; i++) { attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = true; sai_status_t status = sai_port_api->set_port_attribute(port_list[i], &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set port 0x%lx admin state to UP: %d\n", port_list[i], -status); return false; } attr.id = SAI_PORT_ATTR_FDB_LEARNING; attr.value.s32 = SAI_PORT_LEARN_MODE_HW; status = sai_port_api->set_port_attribute(port_list[i], &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set port 0x%lx learning mode to hw: %d\n", port_list[i], -status); return false; } } //one interface for each port for (i = 0; i < g_testcount; i++) { g_intfAlias[i] = "et0_" + to_string(i + 1); g_ipAddr[i] = IpAddress("10.10." + to_string(140 + i + 1) + "." + to_string(130)); g_ipMask[i] = IpAddress("255.255.255.252"); g_macAddr[i] = MacAddress("00:11:11:11:11:" + to_string(i + 1)); } for (i = 0; i < g_testcount; i++) { LOGG(TEST_DEBUG, SETL3, "--- interface %s %s/%s %s ---\n", g_intfAlias[i].c_str(), g_ipAddr[i].to_string().c_str(), g_ipMask[i].to_string().c_str(), g_macAddr[i].to_string().c_str() ); std::vector<sai_object_id_t> port_objlist; long unsigned int vlanid; sai_attribute_t attr; std::vector<sai_attribute_t> attr_list; //assuming interface is of PANEL_INTF vlanid = PANEL_PORT_VLAN_START + i + 1; port_objlist.push_back(port_list[i]); if (!setup_one_l3_interface(vlanid, port_objlist.size(), port_objlist.data(), g_macAddr[i], g_ipAddr[i], g_ipMask[i], g_rif_id[i])) { LOGG(TEST_ERR, SETL3, "fail to setup l3 interface for %s\n", g_intfAlias[i].c_str()); return false; } LOGG(TEST_DEBUG, SETL3, "setup_l3_interface for %s successfully\n", g_intfAlias[i].c_str()); attr.id = SAI_HOSTIF_ATTR_TYPE; attr.value.s32 = SAI_HOSTIF_TYPE_NETDEV; attr_list.push_back(attr); attr.id = SAI_HOSTIF_ATTR_RIF_OR_PORT_ID; attr.value.oid = port_list[i]; attr_list.push_back(attr); attr.id = SAI_HOSTIF_ATTR_NAME; strncpy((char *)&attr.value.chardata, g_intfAlias[i].c_str(), HOSTIF_NAME_SIZE); attr_list.push_back(attr); LOGG(TEST_INFO, SETL3, "sai_hif_api->create_hostif name %s\n", g_intfAlias[i].c_str()); sai_object_id_t hif_id; status = sai_hif_api->create_hostif(&hif_id, attr_list.size(), attr_list.data()); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to create host interface name %s: %d\n", g_intfAlias[i].c_str(), -status); return false; } if (!SAI_OID_TYPE_CHECK(hif_id, SAI_OBJECT_TYPE_HOST_INTERFACE)) { LOGG(TEST_ERR, SETL3, "host interface oid generated is not the right type\n"); return false; } LOGG(TEST_DEBUG, SETL3, "hif_id 0x%lx \n", hif_id); } LOGG(TEST_DEBUG, SETL3, "--- end of loof of interface ---\n"); for (i = 0; i < g_testcount; i++) { g_dst_mac[i] = MacAddress("00:22:22:22:22:" + to_string(i + 1)); } fdb_mgr->Show(); for (i = 0; i < g_testcount; i++) { if (! fdb_mgr->Add(g_dst_mac[i], PANEL_PORT_VLAN_START + i + 1, SAI_FDB_ENTRY_STATIC, port_list[i], SAI_PACKET_ACTION_FORWARD)) { LOGG(TEST_ERR, SETL3, "fail to create sai_fdb_entry {mac %-15s vlan_id %hu}\n", g_dst_mac[i].to_string().c_str(), PANEL_PORT_VLAN_START + i + 1); return false; } } fdb_mgr->Show(); return true; }
unsigned long StaticRangeCoder::encodeCharVectorToStream (const std::vector<char>& inputByteVector_arg, std::ostream& outputByteStream_arg) { DWord freq[257]; uint8_t ch; int i, f; char out; // define numerical limits const DWord top = (DWord)1 << 24; const DWord bottom = (DWord)1 << 16; const DWord maxRange = (DWord)1 << 16; DWord low, range; unsigned int input_size; input_size = inputByteVector_arg.size (); unsigned int readPos; unsigned long streamByteCount; streamByteCount = 0; // init output vector outputCharVector_.clear(); outputCharVector_.reserve(sizeof(char) * input_size); uint64_t FreqHist[257]; // calculate frequency table memset (FreqHist, 0, sizeof(FreqHist)); readPos = 0; while (readPos < input_size) { uint8_t symbol = (uint8_t)inputByteVector_arg[readPos++]; FreqHist[symbol + 1]++; } // convert to cumulative frequency table freq[0] = 0; for (f = 1; f <= 256; f++) { freq[f] = freq[f - 1] + (DWord)FreqHist[f]; if (freq[f] <= freq[f - 1]) freq[f] = freq[f - 1] + 1; } // rescale if numerical limits are reached while (freq[256] >= maxRange) { for (f = 1; f <= 256; f++) { freq[f] /= 2; ; if (freq[f] <= freq[f - 1]) freq[f] = freq[f - 1] + 1; } } // write cumulative frequency table to output stream outputByteStream_arg.write ((const char *)&freq[0], sizeof(freq)); streamByteCount += sizeof(freq); readPos = 0; low = 0; range = (DWord)-1; // start encoding while (readPos < input_size) { // read symol ch = inputByteVector_arg[readPos++]; // map to range low += freq[ch] * (range /= freq[256]); range *= freq[ch + 1] - freq[ch]; // check range limits while ((low ^ (low + range)) < top || ((range < bottom) && ((range = -low & (bottom - 1)), 1))) { out = low >> 24; range <<= 8; low <<= 8; outputCharVector_.push_back(out); } } // flush remaining data for (i = 0; i < 4; i++) { out = low >> 24; outputCharVector_.push_back(out); low <<= 8; } // write encoded data to stream outputByteStream_arg.write (&outputCharVector_[0], outputCharVector_.size()); streamByteCount += outputCharVector_.size(); return streamByteCount; }
unsigned long StaticRangeCoder::decodeStreamToIntVector (std::istream& inputByteStream_arg, std::vector<unsigned int>& outputIntVector_arg) { uint8_t ch; unsigned int i, f; // define range limits const uint64_t top = (uint64_t)1 << 56; const uint64_t bottom = (uint64_t)1 << 48; uint64_t low, range; uint64_t code; unsigned int outputBufPos; unsigned long output_size; uint64_t frequencyTableSize; unsigned char frequencyTableByteSize; unsigned long streamByteCount; streamByteCount = 0; outputBufPos = 0; output_size = outputIntVector_arg.size (); // read size of cumulative frequency table from stream inputByteStream_arg.read ((char*)&frequencyTableSize, sizeof(frequencyTableSize)); inputByteStream_arg.read ((char*)&frequencyTableByteSize, sizeof(frequencyTableByteSize)); streamByteCount += sizeof(frequencyTableSize)+sizeof(frequencyTableByteSize); // check size of frequency table vector if (cFreqTable_.size () < frequencyTableSize) { cFreqTable_.resize (frequencyTableSize); } // init with zero memset (&cFreqTable_[0], 0, sizeof(uint64_t) * frequencyTableSize); // read cumulative frequency table for (f = 1; f < frequencyTableSize; f++) { inputByteStream_arg.read ((char *)&cFreqTable_[f], frequencyTableByteSize); streamByteCount += frequencyTableByteSize; } // initialize range & code code = 0; low = 0; range = (uint64_t)-1; // init code vector for (i = 0; i < 8; i++) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = (code << 8) | ch; } // decoding for (i = 0; i < output_size; i++) { uint64_t count = (code - low) / (range /= cFreqTable_[frequencyTableSize - 1]); // symbol lookup in cumulative frequency table uint64_t symbol = 0; uint64_t sSize = (frequencyTableSize - 1) / 2; while (sSize > 0) { if (cFreqTable_[symbol + sSize] <= count) { symbol += sSize; } sSize /= 2; } // write symbol to output stream outputIntVector_arg[outputBufPos++] = symbol; // map to range low += cFreqTable_[symbol] * range; range *= cFreqTable_[symbol + 1] - cFreqTable_[symbol]; // check range limits while ((low ^ (low + range)) < top || ((range < bottom) && ((range = -low & (bottom - 1)), 1))) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = code << 8 | ch; range <<= 8; low <<= 8; } } return streamByteCount; }
unsigned long StaticRangeCoder::encodeIntVectorToStream (std::vector<unsigned int>& inputIntVector_arg, std::ostream& outputByteStream_arg) { unsigned int inputsymbol; unsigned int i, f; char out; uint64_t frequencyTableSize; uint8_t frequencyTableByteSize; // define numerical limits const uint64_t top = (uint64_t)1 << 56; const uint64_t bottom = (uint64_t)1 << 48; const uint64_t maxRange = (uint64_t)1 << 48; unsigned long input_size = (unsigned) inputIntVector_arg.size (); uint64_t low, range; unsigned int inputSymbol; unsigned int readPos; unsigned long streamByteCount; streamByteCount = 0; // init output vector outputCharVector_.clear(); outputCharVector_.reserve(sizeof(char) * input_size * 2); frequencyTableSize = 1; readPos = 0; // calculate frequency table cFreqTable_[0] = cFreqTable_[1] = 0; while (readPos < input_size) { inputSymbol = inputIntVector_arg[readPos++]; if (inputSymbol + 1 >= frequencyTableSize) { // frequency table is to small -> adaptively extend it uint64_t oldfrequencyTableSize; oldfrequencyTableSize = frequencyTableSize; do { // increase frequency table size by factor 2 frequencyTableSize <<= 1; } while (inputSymbol + 1 > frequencyTableSize); if (cFreqTable_.size () < frequencyTableSize + 1) { // resize frequency vector cFreqTable_.resize (frequencyTableSize + 1); } // init new frequency range with zero memset (&cFreqTable_[oldfrequencyTableSize + 1], 0, sizeof(uint64_t) * (frequencyTableSize - oldfrequencyTableSize)); } cFreqTable_[inputSymbol + 1]++; } frequencyTableSize++; // convert to cumulative frequency table for (f = 1; f < frequencyTableSize; f++) { cFreqTable_[f] = cFreqTable_[f - 1] + cFreqTable_[f]; if (cFreqTable_[f] <= cFreqTable_[f - 1]) cFreqTable_[f] = cFreqTable_[f - 1] + 1; } // rescale if numerical limits are reached while (cFreqTable_[frequencyTableSize - 1] >= maxRange) { for (f = 1; f < cFreqTable_.size (); f++) { cFreqTable_[f] /= 2; ; if (cFreqTable_[f] <= cFreqTable_[f - 1]) cFreqTable_[f] = cFreqTable_[f - 1] + 1; } } // calculate amount of bytes per frequency table entry frequencyTableByteSize = (uint8_t)ceil (Log2 ((double) cFreqTable_[frequencyTableSize - 1]) / 8.0); // write size of frequency table to output stream outputByteStream_arg.write ((const char *)&frequencyTableSize, sizeof(frequencyTableSize)); outputByteStream_arg.write ((const char *)&frequencyTableByteSize, sizeof(frequencyTableByteSize)); streamByteCount += sizeof(frequencyTableSize)+sizeof(frequencyTableByteSize); // write cumulative frequency table to output stream for (f = 1; f < frequencyTableSize; f++) { outputByteStream_arg.write ((const char *)&cFreqTable_[f], frequencyTableByteSize); streamByteCount += frequencyTableByteSize; } readPos = 0; low = 0; range = (uint64_t)-1; // start encoding while (readPos < input_size) { // read symol inputsymbol = inputIntVector_arg[readPos++]; // map to range low += cFreqTable_[inputsymbol] * (range /= cFreqTable_[frequencyTableSize - 1]); range *= cFreqTable_[inputsymbol + 1] - cFreqTable_[inputsymbol]; // check range limits while ((low ^ (low + range)) < top || ((range < bottom) && ((range = -low & (bottom - 1)), 1))) { out = low >> 56; range <<= 8; low <<= 8; outputCharVector_.push_back(out); } } // flush remaining data for (i = 0; i < 8; i++) { out = low >> 56; outputCharVector_.push_back(out); low <<= 8; } // write encoded data to stream outputByteStream_arg.write (&outputCharVector_[0], outputCharVector_.size()); streamByteCount += outputCharVector_.size(); return streamByteCount; }
unsigned long AdaptiveRangeCoder::decodeStreamToCharVector (std::istream& inputByteStream_arg, std::vector<char>& outputByteVector_arg) { uint8_t ch; DWord freq[257]; unsigned int i, j, f; // define limits const DWord top = (DWord)1 << 24; const DWord bottom = (DWord)1 << 16; const DWord maxRange = (DWord)1 << 16; DWord low, range; DWord code; unsigned int outputBufPos; unsigned int output_size = (unsigned) outputByteVector_arg.size (); unsigned long streamByteCount; streamByteCount = 0; outputBufPos = 0; code = 0; low = 0; range = (DWord)-1; // init decoding for (i = 0; i < 4; i++) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = (code << 8) | ch; } // init cumulative frequency table for (i = 0; i <= 256; i++) freq[i] = i; // decoding loop for (i = 0; i < output_size; i++) { uint8_t symbol = 0; uint8_t sSize = 256 / 2; // map code to range DWord count = (code - low) / (range /= freq[256]); // find corresponding symbol while (sSize > 0) { if (freq[symbol + sSize] <= count) { symbol += sSize; } sSize /= 2; } // output symbol outputByteVector_arg[outputBufPos++] = symbol; // update range limits low += freq[symbol] * range; range *= freq[symbol + 1] - freq[symbol]; // decode range limits while ((low ^ (low + range)) < top || ((range < bottom) && ((range = -low & (bottom - 1)), 1))) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = code << 8 | ch; range <<= 8; low <<= 8; } // update cumulative frequency table for (j = symbol + 1; j < 257; j++) freq[j]++; // detect overflow if (freq[256] >= maxRange) { // rescale for (f = 1; f <= 256; f++) { freq[f] /= 2; if (freq[f] <= freq[f - 1]) freq[f] = freq[f - 1] + 1; } } } return streamByteCount; }
inline int nnz() const { return (int)data_.size(); }
inline int cols() const { return (int)cidx_.size() - 1; }
void change_capacity(int nnz_new) { ridx_.resize(nnz_new, rows_); data_.resize(nnz_new, 0.0); }
//L3 Interface Initialization static bool setup_one_l3_interface(sai_vlan_id_t vlanid, int port_count, const sai_object_id_t *port_list, const MacAddress mac, const IpAddress ipaddr, const IpAddress ipmask, sai_object_id_t &rif_id) { LOGG(TEST_INFO, SETL3, "sai_vlan_api->create_vlan, create vlan %hu.\n", vlanid); sai_status_t status = sai_vlan_api->create_vlan(vlanid); if (status != SAI_STATUS_SUCCESS && status != SAI_STATUS_ITEM_ALREADY_EXISTS) { LOGG(TEST_ERR, SETL3, "fail to create vlan %hu. status=0x%x\n", vlanid, -status); return false; } std::vector<sai_attribute_t> member_attrs; sai_attribute_t member_attr; sai_object_id_t vlan_member_id; for (int i = 0; i < port_count; ++i) { member_attr.id = SAI_VLAN_MEMBER_ATTR_VLAN_ID; member_attr.value.u16 = vlanid; member_attrs.push_back(member_attr); member_attr.id = SAI_VLAN_MEMBER_ATTR_PORT_ID; member_attr.value.oid = port_list[i]; member_attrs.push_back(member_attr); member_attr.id = SAI_VLAN_MEMBER_ATTR_TAGGING_MODE; member_attr.value.s32 = SAI_VLAN_PORT_UNTAGGED; member_attrs.push_back(member_attr); LOGG(TEST_INFO, SETL3, "sai_vlan_api->create_vlan_member, with vlan %d.\n", vlanid); status = sai_vlan_api->create_vlan_member(&vlan_member_id, member_attrs.size(), member_attrs.data()); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to create member vlan %hu. status=0x%x\n", vlanid, -status); return false; } vlan_member_list.push_back(vlan_member_id); } sai_attribute_t attr; attr.id = SAI_PORT_ATTR_PORT_VLAN_ID; attr.value.u16 = vlanid; for (int i = 0; i < port_count; ++i) { LOGG(TEST_INFO, SETL3, "sai_port_api->set_port_attribute SAI_PORT_ATTR_PORT_VLAN_ID %hu to port 0x%lx\n", vlanid, port_list[i]); status = sai_port_api->set_port_attribute(port_list[i], &attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to set port %lu untagged vlan %hu. status=0x%x\n", port_list[i], vlanid, -status); return false; } } // create router interface std::vector<sai_attribute_t> rif_attrs; sai_attribute_t rif_attr; rif_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; rif_attr.value.oid = g_vr_id; rif_attrs.push_back(rif_attr); rif_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; rif_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_VLAN; rif_attrs.push_back(rif_attr); rif_attr.id = SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS; memcpy(rif_attr.value.mac, mac.to_bytes(), sizeof(sai_mac_t)); rif_attrs.push_back(rif_attr); rif_attr.id = SAI_ROUTER_INTERFACE_ATTR_VLAN_ID; rif_attr.value.u16 = vlanid; rif_attrs.push_back(rif_attr); LOGG(TEST_INFO, SETL3, "sai_rif_api->create_router_interface\n"); status = sai_rif_api->create_router_interface(&rif_id, rif_attrs.size(), rif_attrs.data()); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to create router interface. status=0x%x\n", -status); return false; } if (!SAI_OID_TYPE_CHECK(rif_id, SAI_OBJECT_TYPE_ROUTER_INTERFACE)) { LOGG(TEST_ERR, SETL3, "router interface oid generated is not the right type\n"); return false; } LOGG(TEST_DEBUG, SETL3, "router_interface created, rif_id 0x%lx\n", rif_id); // add interface ip to l3 host table LOGG(TEST_INFO, SETL3, "sai_route_api->create_route, SAI_ROUTE_ATTR_PACKET_ACTION, SAI_PACKET_ACTION_TRAP\n"); sai_unicast_route_entry_t unicast_route_entry; unicast_route_entry.vr_id = g_vr_id; unicast_route_entry.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; unicast_route_entry.destination.addr.ip4 = ipaddr.addr(); unicast_route_entry.destination.mask.ip4 = 0xffffffff; sai_attribute_t route_attr; route_attr.id = SAI_ROUTE_ATTR_PACKET_ACTION; route_attr.value.s32 = SAI_PACKET_ACTION_TRAP; status = sai_route_api->create_route(&unicast_route_entry, 1, &route_attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to add route for l3 interface to cpu. status=0x%x\n", -status); return false; } // by default, drop all the traffic destined to the the ip subnet. // if we learn some of the neighbors, add them explicitly to the l3 host table. LOGG(TEST_INFO, SETL3, "sai_route_api->create_route, SAI_ROUTE_ATTR_PACKET_ACTION, SAI_PACKET_ACTION_DROP\n"); unicast_route_entry.vr_id = g_vr_id; unicast_route_entry.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; unicast_route_entry.destination.addr.ip4 = ipaddr.addr() & ipmask.addr(); unicast_route_entry.destination.mask.ip4 = ipmask.addr(); route_attr.id = SAI_ROUTE_ATTR_PACKET_ACTION; route_attr.value.s32 = SAI_PACKET_ACTION_DROP; status = sai_route_api->create_route(&unicast_route_entry, 1, &route_attr); if (status != SAI_STATUS_SUCCESS) { LOGG(TEST_ERR, SETL3, "fail to add l3 intf subnet to blackhole. status=0x%x", -status); return false; } return true; }
unsigned long StaticRangeCoder::decodeStreamToCharVector (std::istream& inputByteStream_arg, std::vector<char>& outputByteVector_arg) { uint8_t ch; DWord freq[257]; unsigned int i; // define range limits const DWord top = (DWord)1 << 24; const DWord bottom = (DWord)1 << 16; DWord low, range; DWord code; unsigned int outputBufPos; unsigned int output_size; unsigned long streamByteCount; streamByteCount = 0; output_size = outputByteVector_arg.size (); outputBufPos = 0; // read cumulative frequency table inputByteStream_arg.read ((char*)&freq[0], sizeof(freq)); streamByteCount += sizeof(freq); code = 0; low = 0; range = (DWord)-1; // init code for (i = 0; i < 4; i++) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = (code << 8) | ch; } // decoding for (i = 0; i < output_size; i++) { // symbol lookup in cumulative frequency table uint8_t symbol = 0; uint8_t sSize = 256 / 2; DWord count = (code - low) / (range /= freq[256]); while (sSize > 0) { if (freq[symbol + sSize] <= count) { symbol += sSize; } sSize /= 2; } // write symbol to output stream outputByteVector_arg[outputBufPos++] = symbol; low += freq[symbol] * range; range *= freq[symbol + 1] - freq[symbol]; // check range limits while ((low ^ (low + range)) < top || ((range < bottom) && ((range = -low & (bottom - 1)), 1))) { inputByteStream_arg.read ((char*)&ch, sizeof(char)); streamByteCount += sizeof(char); code = code << 8 | ch; range <<= 8; low <<= 8; } } return streamByteCount; }
bool loadOBJ(const char* path, std::vector<glm::vec3> &out_vertices, std::vector<glm::vec2> &out_uvs, std::vector<glm::vec3> &out_normals) { std::vector<unsigned int> vertex_indices; std::vector<unsigned int> uv_indices; std::vector<unsigned int> normal_indices; std::vector<glm::vec3> temp_vertices; std::vector<glm::vec2> temp_uvs; std::vector<glm::vec3> temp_normals; // Opens file FILE* file = fopen(path, "r"); if (file == NULL) { std::cout << "Couldn't open " << path << std::endl; return false; } // Reads file and changes it's shape while(true) { // Reads until End Of File char line_header[128]; int line = fscanf(file, "%s", line_header); if (line == EOF) break; if (strcmp(line_header, "v") == 0) { glm::vec3 vertex; fscanf(file, "%f %f %f\n", &vertex.x, &vertex.y, &vertex.z); temp_vertices.push_back(vertex); } else if (strcmp(line_header, "vt") == 0) { glm::vec2 uv; fscanf(file, "%f %f", &uv.x, &uv.y); temp_uvs.push_back(uv); } else if (strcmp(line_header, "vn") == 0) { glm::vec3 normal; fscanf(file, "%f %f %f\n", &normal.x, &normal.y, &normal.z); temp_normals.push_back(normal); } else if (strcmp(line_header, "f") == 0) { std::string vertex1, vertex2, vertex3; unsigned int vertex_index[3], uv_index[3], normal_index[3]; int matches = fscanf(file, "%d/%d/%d %d/%d/%d %d/%d/%d\n", &vertex_index[0], &uv_index[0], &normal_index[0], &vertex_index[1], &uv_index[1], &normal_index[1], &vertex_index[2], &uv_index[2], &normal_index[2]); if (matches != 9) { std::cout << "File wasn't standard" << std::endl; return false; } vertex_indices.push_back(vertex_index[0]); vertex_indices.push_back(vertex_index[1]); vertex_indices.push_back(vertex_index[2]); uv_indices.push_back(uv_index[0]); uv_indices.push_back(uv_index[1]); uv_indices.push_back(uv_index[2]); normal_indices.push_back(normal_index[0]); normal_indices.push_back(normal_index[1]); normal_indices.push_back(normal_index[2]); } } for(unsigned int i = 0; i < vertex_indices.size(); i++) { unsigned int vertex_index = vertex_indices[i]; glm::vec3 vertex = temp_vertices[vertex_index - 1]; out_vertices.push_back(vertex); unsigned int uv_index = uv_indices[i]; glm::vec2 uv = temp_uvs[uv_index - 1]; out_uvs.push_back(uv); unsigned int normal_index = normal_indices[i]; glm::vec3 normal = temp_normals[normal_index - 1]; out_normals.push_back(normal); } return true; }
//cached call bool DCDTrajectoryWriter::write(const std::vector<Vector3DBlock> &cachedCoords) { //push out if sufficient if( cachedCoords.size() > 0 ){ report << debug(1) <<"Writing DCD, multiple frames." << endr; //original code modified for caching, index 0 must exist here const unsigned int setcount = cachedCoords.size(); const unsigned int count = cachedCoords[0].size();//ccoords.size(); if (!reopen(count, setcount)) return false; //loop over each set of coordinates for( int i=0; i<setcount; i++){ Vector3DBlock ccoords = cachedCoords[i]; //original code //const unsigned int count = ccoords.size(); //if (!reopen(count)) return false; myX.resize(count); myY.resize(count); myZ.resize(count); for (unsigned int i = 0; i < count; ++i) { myX[i] = static_cast<float>(ccoords[i].c[0]); myY[i] = static_cast<float>(ccoords[i].c[1]); myZ[i] = static_cast<float>(ccoords[i].c[2]); if (myIsLittleEndian != ISLITTLEENDIAN) { swapBytes(myX[i]); swapBytes(myY[i]); swapBytes(myZ[i]); } } int32 nAtoms = static_cast<int32>(count * 4); if (myIsLittleEndian != ISLITTLEENDIAN) swapBytes(nAtoms); file.write((char *)&nAtoms, sizeof(int32)); file.write((char *)&(myX[0]), count * sizeof(float4)); file.write((char *)&nAtoms, sizeof(int32)); file.write((char *)&nAtoms, sizeof(int32)); file.write((char *)&(myY[0]), count * sizeof(float4)); file.write((char *)&nAtoms, sizeof(int32)); file.write((char *)&nAtoms, sizeof(int32)); file.write((char *)&(myZ[0]), count * sizeof(float4)); file.write((char *)&nAtoms, sizeof(int32)); //close(); if( file.fail() ){ close(); return false; }//end of coordinate save }//end of loop //close file once strored close(); return true;//!file.fail(); } }
/** * A canonical signature exists of: <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype> * Where R and S are not negative (their first byte has its highest bit not set), and not * excessively padded (do not start with a 0 byte, unless an otherwise negative number follows, * in which case a single 0 byte is necessary and even required). * * See https://cryptomailcointalk.org/index.php?topic=8392.msg127623#msg127623 * * This function is consensus-critical since BIP66. */ bool static IsValidSignatureEncoding(const std::vector<unsigned char> &sig) { // Format: 0x30 [total-length] 0x02 [R-length] [R] 0x02 [S-length] [S] [sighash] // * total-length: 1-byte length descriptor of everything that follows, // excluding the sighash byte. // * R-length: 1-byte length descriptor of the R value that follows. // * R: arbitrary-length big-endian encoded R value. It must use the shortest // possible encoding for a positive integers (which means no null bytes at // the start, except a single one when the next byte has its highest bit set). // * S-length: 1-byte length descriptor of the S value that follows. // * S: arbitrary-length big-endian encoded S value. The same rules apply. // * sighash: 1-byte value indicating what data is hashed (not part of the DER // signature) // Minimum and maximum size constraints. if (sig.size() < 9) return false; if (sig.size() > 73) return false; // A signature is of type 0x30 (compound). if (sig[0] != 0x30) return false; // Make sure the length covers the entire signature. if (sig[1] != sig.size() - 3) return false; // Extract the length of the R element. unsigned int lenR = sig[3]; // Make sure the length of the S element is still inside the signature. if (5 + lenR >= sig.size()) return false; // Extract the length of the S element. unsigned int lenS = sig[5 + lenR]; // Verify that the length of the signature matches the sum of the length // of the elements. if ((size_t)(lenR + lenS + 7) != sig.size()) return false; // Check whether the R element is an integer. if (sig[2] != 0x02) return false; // Zero-length integers are not allowed for R. if (lenR == 0) return false; // Negative numbers are not allowed for R. if (sig[4] & 0x80) return false; // Null bytes at the start of R are not allowed, unless R would // otherwise be interpreted as a negative number. if (lenR > 1 && (sig[4] == 0x00) && !(sig[5] & 0x80)) return false; // Check whether the S element is an integer. if (sig[lenR + 4] != 0x02) return false; // Zero-length integers are not allowed for S. if (lenS == 0) return false; // Negative numbers are not allowed for S. if (sig[lenR + 6] & 0x80) return false; // Null bytes at the start of S are not allowed, unless S would otherwise be // interpreted as a negative number. if (lenS > 1 && (sig[lenR + 6] == 0x00) && !(sig[lenR + 7] & 0x80)) return false; return true; }
int FuzzerDriver(const std::vector<std::string> &Args, UserSuppliedFuzzer &USF) { using namespace fuzzer; assert(!Args.empty()); ProgName = new std::string(Args[0]); ParseFlags(Args); if (Flags.help) { PrintHelp(); return 0; } if (Flags.jobs > 0 && Flags.workers == 0) { Flags.workers = std::min(NumberOfCpuCores() / 2, Flags.jobs); if (Flags.workers > 1) Printf("Running %d workers\n", Flags.workers); } if (Flags.workers > 0 && Flags.jobs > 0) return RunInMultipleProcesses(Args, Flags.workers, Flags.jobs); Fuzzer::FuzzingOptions Options; Options.Verbosity = Flags.verbosity; Options.MaxLen = Flags.max_len; Options.UnitTimeoutSec = Flags.timeout; Options.MaxTotalTimeSec = Flags.max_total_time; Options.DoCrossOver = Flags.cross_over; Options.MutateDepth = Flags.mutate_depth; Options.ExitOnFirst = Flags.exit_on_first; Options.UseCounters = Flags.use_counters; Options.UseIndirCalls = Flags.use_indir_calls; Options.UseTraces = Flags.use_traces; Options.ShuffleAtStartUp = Flags.shuffle; Options.PreferSmallDuringInitialShuffle = Flags.prefer_small_during_initial_shuffle; Options.Reload = Flags.reload; Options.OnlyASCII = Flags.only_ascii; Options.TBMDepth = Flags.tbm_depth; Options.TBMWidth = Flags.tbm_width; if (Flags.runs >= 0) Options.MaxNumberOfRuns = Flags.runs; if (!Inputs->empty()) Options.OutputCorpus = (*Inputs)[0]; if (Flags.sync_command) Options.SyncCommand = Flags.sync_command; Options.SyncTimeout = Flags.sync_timeout; Options.ReportSlowUnits = Flags.report_slow_units; if (Flags.artifact_prefix) Options.ArtifactPrefix = Flags.artifact_prefix; if (Flags.dict) if (!ParseDictionaryFile(FileToString(Flags.dict), &Options.Dictionary)) return 1; if (Flags.verbosity > 0 && !Options.Dictionary.empty()) Printf("Dictionary: %zd entries\n", Options.Dictionary.size()); Options.SaveArtifacts = !Flags.test_single_input; Fuzzer F(USF, Options); // Timer if (Flags.timeout > 0) SetTimer(Flags.timeout / 2 + 1); if (Flags.test_single_input) return RunOneTest(&F, Flags.test_single_input); if (Flags.merge) { F.Merge(*Inputs); exit(0); } unsigned Seed = Flags.seed; // Initialize Seed. if (Seed == 0) Seed = time(0) * 10000 + getpid(); if (Flags.verbosity) Printf("Seed: %u\n", Seed); USF.GetRand().ResetSeed(Seed); F.RereadOutputCorpus(); for (auto &inp : *Inputs) if (inp != Options.OutputCorpus) F.ReadDir(inp, nullptr); if (F.CorpusSize() == 0) F.AddToCorpus(Unit()); // Can't fuzz empty corpus, so add an empty input. F.ShuffleAndMinimize(); if (Flags.save_minimized_corpus) F.SaveCorpus(); F.Loop(); if (Flags.verbosity) Printf("Done %d runs in %zd second(s)\n", F.getTotalNumberOfRuns(), F.secondsSinceProcessStartUp()); exit(0); // Don't let F destroy itself. }
template <typename PointT, typename FlannDistance> void pcl::search::FlannSearch<PointT, FlannDistance>::radiusSearch ( const PointCloud& cloud, const std::vector<int>& indices, double radius, std::vector< std::vector<int> >& k_indices, std::vector< std::vector<float> >& k_sqr_distances, unsigned int max_nn) const { if (indices.empty ()) // full point cloud + trivial copy operation = no need to do any conversion/copying to the flann matrix! { k_indices.resize (cloud.size ()); k_sqr_distances.resize (cloud.size ()); if (! cloud.is_dense) // remove this check as soon as FLANN does NaN checks internally { for (size_t i = 0; i < cloud.size(); i++) { assert (point_representation_->isValid (cloud[i]) && "Invalid (NaN, Inf) point coordinates given to radiusSearch!"); } } bool can_cast = point_representation_->isTrivial (); float* data = 0; if (!can_cast) { data = new float[dim_*cloud.size ()]; for (size_t i = 0; i < cloud.size (); ++i) { float* out = data+i*dim_; point_representation_->vectorize (cloud[i],out); } } float* cdata = can_cast ? const_cast<float*> (reinterpret_cast<const float*> (&cloud[0])) : data; const flann::Matrix<float> m (cdata ,cloud.size (), dim_, can_cast ? sizeof (PointT) : dim_ * sizeof (float)); flann::SearchParams p; p.sorted = sorted_results_; p.eps = eps_; p.checks = checks_; // here: max_nn==0: take all neighbors. flann: max_nn==0: return no neighbors, only count them. max_nn==-1: return all neighbors p.max_neighbors = max_nn != 0 ? max_nn : -1; index_->radiusSearch (m,k_indices,k_sqr_distances,static_cast<float> (radius * radius), p); delete [] data; } else // if indices are present, the cloud has to be copied anyway. Only copy the relevant parts of the points here. { k_indices.resize (indices.size ()); k_sqr_distances.resize (indices.size ()); if (! cloud.is_dense) // remove this check as soon as FLANN does NaN checks internally { for (size_t i = 0; i < indices.size(); i++) { assert (point_representation_->isValid (cloud [indices[i]]) && "Invalid (NaN, Inf) point coordinates given to radiusSearch!"); } } float* data = new float [dim_ * indices.size ()]; for (size_t i = 0; i < indices.size (); ++i) { float* out = data+i*dim_; point_representation_->vectorize (cloud[indices[i]], out); } const flann::Matrix<float> m (data, cloud.size (), point_representation_->getNumberOfDimensions ()); flann::SearchParams p; p.sorted = sorted_results_; p.eps = eps_; p.checks = checks_; // here: max_nn==0: take all neighbors. flann: max_nn==0: return no neighbors, only count them. max_nn==-1: return all neighbors p.max_neighbors = max_nn != 0 ? max_nn : -1; index_->radiusSearch (m, k_indices, k_sqr_distances, static_cast<float> (radius * radius), p); delete[] data; } if (!identity_mapping_) { for (size_t j = 0; j < k_indices.size (); ++j ) { for (size_t i = 0; i < k_indices[j].size (); ++i) { int& neighbor_index = k_indices[j][i]; neighbor_index = index_mapping_[neighbor_index]; } } } }
/// Perform tracking std::vector<cv::DMatch> MatcherOpenCV::performTracking(cv::Mat prevImg, cv::Mat img, std::vector<cv::Point2f> &prevFeatures, std::vector<cv::Point2f> &features, std::vector<cv::KeyPoint>& prevKeyPoints, std::vector<cv::KeyPoint>& keyPoints, std::vector<double>& prevDetDists, std::vector<double>& detDists) { // Some needed variables std::vector<uchar> status; std::vector<float> err; cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, matcherParameters.OpenCVParams.maxIter, matcherParameters.OpenCVParams.eps); // Setting OpenCV flags based on our parameters int trackingFlags = 0; if (matcherParameters.OpenCVParams.useInitialFlow > 0) trackingFlags = cv::OPTFLOW_USE_INITIAL_FLOW; if (matcherParameters.OpenCVParams.trackingErrorType > 0) trackingFlags |= cv::OPTFLOW_LK_GET_MIN_EIGENVALS; // Calculating the movement of features cv::calcOpticalFlowPyrLK(prevImg, img, prevFeatures, features, status, err, cv::Size(matcherParameters.OpenCVParams.winSize, matcherParameters.OpenCVParams.winSize), matcherParameters.OpenCVParams.maxLevels, termcrit, trackingFlags, matcherParameters.OpenCVParams.trackingMinEigThreshold); keyPoints = prevKeyPoints; //copy new positions to keyPoints for(std::vector<cv::Point2f>::size_type i = 0; i < features.size(); ++i){ keyPoints[i].pt = features[i]; } detDists = prevDetDists; // This parts removes additional features for which we observed an error above preset threshold int errSize = (int)err.size(); for (int i = 0; i < errSize; i++) { if (err[i] > matcherParameters.OpenCVParams.trackingErrorThreshold) status[i] = 0; } // Removing features if they are too close to each other - the feature to remove is based on an error from tracking std::set<int> featuresToRemove; for (std::vector<cv::Point2f>::size_type i = 0; i < features.size(); i++) { for (std::vector<cv::Point2f>::size_type j = i + 1; j < features.size(); j++) { if (cv::norm(features[i] - features[j]) < matcherParameters.OpenCVParams.minimalReprojDistanceNewTrackingFeatures) { if ( err[i] > err[j]) featuresToRemove.insert((int)i); else featuresToRemove.insert((int)j); } } } // Returning result in matching-compatible format int i = 0, j = 0; std::vector<cv::DMatch> matches; std::vector<cv::Point2f>::iterator itFeatures = features.begin(); std::vector<cv::KeyPoint>::iterator itKeyPoints = keyPoints.begin(); std::vector<double>::iterator itDetDists = detDists.begin(); std::vector<uchar>::iterator it = status.begin(); for (; it != status.end(); ++it, i++) { // Tracking succeed and the feature is not too close to feature with more precise tracking if (*it != 0 && featuresToRemove.find(i) == featuresToRemove.end()) { matches.push_back(cv::DMatch(i, j, 0)); j++; ++itFeatures; ++itKeyPoints; ++itDetDists; } // Tracking failed -- we remove those features else { itFeatures = features.erase(itFeatures); itKeyPoints = keyPoints.erase(itKeyPoints); itDetDists = detDists.erase(itDetDists); } } if (matcherParameters.verbose > 0) std::cout << "MatcherOpenCV::performTracking -- features tracked " << matches.size() << " (" << (float)matches.size() * 100.0 / (float)prevFeatures.size() << "%)" << std::endl; // Return result return matches; }
/* Export a graph (triangle navmesh graph) to a file * Requires: * @nodes The list of all the nodes * @fname the filename to export the graph */ bool TriNavMeshBuilder::exportGraph(const std::vector<GNode *> &nodes, const std::vector<GEdge *> &edges, const Ogre::String &fname) { std::ofstream out; out.open(fname.c_str()); if(!out.is_open()){ debug("Error trying to open the file %s\n", fname.c_str()); return false; } // get all the vertexs std::map<const sm::Vertex *, int> vertexs; int vertexCount = 0; std::vector<const sm::Vertex *> vertexList; for(int i = nodes.size()-1; i >= 0; --i){ ASSERT(nodes[i]->getTriangle()); const sm::Vertex *v1 = nodes[i]->getTriangle()->v1; const sm::Vertex *v2 = nodes[i]->getTriangle()->v2; const sm::Vertex *v3 = nodes[i]->getTriangle()->v3; if(vertexs.find(v1) == vertexs.end()) {vertexs[v1] = vertexCount++; vertexList.push_back(v1);} if(vertexs.find(v2) == vertexs.end()) {vertexs[v2] = vertexCount++; vertexList.push_back(v2);} if(vertexs.find(v3) == vertexs.end()) {vertexs[v3] = vertexCount++; vertexList.push_back(v3);} } ASSERT(vertexCount == vertexList.size()); // save the number of vertexs and save the vertexs out << vertexCount << "\n"; for(int i = 0; i < vertexCount; ++i){ out << vertexList[i]->x << "\t" << vertexList[i]->y << "\n"; } // save the number of nodes and the nodes and the nodeMap std::map<const GNode *, int> nodesMap; int nodesCount = 0; out << nodes.size() << "\n"; for(int i = 0; i < nodes.size(); ++i){ if(nodesMap.find(nodes[i]) == nodesMap.end()){nodesMap[nodes[i]] = nodesCount++;} const Triangle *t = nodes[i]->getTriangle(); ASSERT(t); // we will save the vertexs index of the triangle of the node out << vertexs[t->v1] << "\t" << vertexs[t->v2] << "\t" << vertexs[t->v3] << "\n"; } // save the number of edges and the edges out << edges.size() << "\n"; for(int i = 0; i < edges.size(); ++i){ const GNode *n1 = edges[i]->getNode1(); const GNode *n2 = edges[i]->getNode2(); ASSERT(n1 && n2); // we will save the nodes index and the distance out << nodesMap[n1] << "\t" << nodesMap[n2] << "\t" << edges[i]->getWeight() << "\n"; } out.close(); return true; }
void Recombiner::recombine(const stdr_msgs::LadybugImages & raw_images, std::vector<sensor_msgs::Image::Ptr> & bayer_images) { ROS_ASSERT( raw_images.images.size() == 24 ); bayer_images.resize(6); #if !SAVE_INTERMEDIATE_IMAGES #pragma omp parallel for #else std::cout <<"timestamp: " <<std::setprecision(std::numeric_limits<double>::digits10) <<raw_images.header.stamp.toSec() <<std::endl; std::cout <<"Saving frame " <<frame_count_ <<std::endl; #endif for( unsigned cam=0; cam<6; ++cam ) { const unsigned c4 = cam * 4; if( !selector_[cam] ) continue; const bool valid_camera_img = decompress4(raw_images, c4); // Prepare the output // Make sure the image is properly defined and allocated if( !valid_camera_img ) { bayer_images[cam].reset(); continue; } if( ! bayer_images[cam] ) bayer_images[cam].reset( new sensor_msgs::Image ); sensor_msgs::Image & img = *(bayer_images[cam]); img.header = raw_images.header; std::stringstream ss; ss << "/ladybug/camera" <<cam; img.header.frame_id = ss.str(); img.height = FULL_HEIGHT; img.width = FULL_WIDTH; img.is_bigendian = false; if( debayer_ ) { img.encoding = sensor_msgs::image_encodings::RGB8; img.step = img.width * 3; } else { img.encoding = sensor_msgs::image_encodings::BAYER_GRBG8; img.step = img.width; } img.data.resize(img.height * img.step); // select the output of the recombining stage std::vector<unsigned char> & data = debayer_ ? combined_images_[cam] : img.data; combine(c4, &(data[0])); if( debayer_ ) { // TODO: implement debayering with dc1394 dc1394_bayer_decoding_8bit( &(combined_images_[cam][0]), &(img.data[0]), FULL_WIDTH, FULL_HEIGHT, DC1394_COLOR_FILTER_GRBG, debayer_alg_); #if SAVE_INTERMEDIATE_IMAGES //save debayered image cv::Mat mrgb(FULL_HEIGHT, FULL_WIDTH, CV_8UC3, &(img.data[0])); cv::Mat mbgr(FULL_HEIGHT, FULL_WIDTH, CV_8UC3); cv::cvtColor(mrgb, mbgr, CV_RGB2BGR); const std::string name = (boost::format("frame%04d_final_%02d.bmp") % frame_count_ % (c4/4)).str(); cv::imwrite(name, mbgr); cv::imshow("frame", mbgr); cv::waitKey(); #endif } } ++frame_count_; }
TUint DeviceList::Count() const { return (TUint)iList.size(); }
/** * Function TransformRoundedEndsSegmentToPolygon * convert a segment with rounded ends to a polygon * Convert arcs to multiple straight lines * @param aCornerBuffer = a buffer to store the polygon * @param aStart = the segment start point coordinate * @param aEnd = the segment end point coordinate * @param aCircleToSegmentsCount = the number of segments to approximate a circle * @param aWidth = the segment width * Note: the polygon is inside the arc ends, so if you want to have the polygon * outside the circle, you should give aStart and aEnd calculated with a correction factor */ void TransformRoundedEndsSegmentToPolygon( std::vector <CPolyPt>& aCornerBuffer, wxPoint aStart, wxPoint aEnd, int aCircleToSegmentsCount, int aWidth ) { int radius = aWidth / 2; wxPoint endp = aEnd - aStart; // end point coordinate for the same segment starting at (0,0) wxPoint startp = aStart; wxPoint corner; int seg_len; CPolyPt polypoint; // normalize the position in order to have endp.x >= 0; if( endp.x < 0 ) { endp = aStart - aEnd; startp = aEnd; } int delta_angle = ArcTangente( endp.y, endp.x ); // delta_angle is in 0.1 degrees seg_len = (int) sqrt( ( (double) endp.y * endp.y ) + ( (double) endp.x * endp.x ) ); int delta = 3600 / aCircleToSegmentsCount; // rot angle in 0.1 degree // Compute the outlines of the segment, and creates a polygon // add right rounded end: for( int ii = 0; ii < 1800; ii += delta ) { corner = wxPoint( 0, radius ); RotatePoint( &corner, ii ); corner.x += seg_len; RotatePoint( &corner, -delta_angle ); corner += startp; polypoint.x = corner.x; polypoint.y = corner.y; aCornerBuffer.push_back( polypoint ); } // Finish arc: corner = wxPoint( seg_len, -radius ); RotatePoint( &corner, -delta_angle ); corner += startp; polypoint.x = corner.x; polypoint.y = corner.y; aCornerBuffer.push_back( polypoint ); // add left rounded end: for( int ii = 0; ii < 1800; ii += delta ) { corner = wxPoint( 0, -radius ); RotatePoint( &corner, ii ); RotatePoint( &corner, -delta_angle ); corner += startp; polypoint.x = corner.x; polypoint.y = corner.y; aCornerBuffer.push_back( polypoint ); } // Finish arc: corner = wxPoint( 0, radius ); RotatePoint( &corner, -delta_angle ); corner += startp; polypoint.x = corner.x; polypoint.y = corner.y; aCornerBuffer.push_back( polypoint ); aCornerBuffer.back().end_contour = true; }
bool calc::trace_by_hsv_to_rgb( const double h, const double s, const double v ,const std::vector<calc::trace_by_hsv_params>& hsv_params , double& r, double& g, double& b ) { /* 高速化のための初期計算と、中間値の保持と、黒/色判断関数 */ class check_black_and_color { public: check_black_and_color(const double s ,const double v) :one_minus_v_(1.-v) ,s_mul_v_(s*v) {} inline bool is_black_side( const double slope_line_len ,const double intercept ) { if (this->s_mul_v_ == 0.) {/* 色味か明度がゼロなら黒部分 */ return true; } if ( slope_line_len == 0.) { /* 0なら全て色部分 */ return false; } if ( ((1. - slope_line_len) * this->s_mul_v_) < (this->one_minus_v_ - intercept) * slope_line_len) { return true; /* 斜め切断して黒味側 */ } return false; /* 上以外は色側 */ } private: double one_minus_v_ ,s_mul_v_; } chk_b_and_c(s,v); /* トレス(2値化)処理 */ for (unsigned ii = 0; ii < hsv_params.size(); ++ii) { const trace_by_hsv_params& area = hsv_params.at(ii); /* 有効でない範囲は無視して他の範囲を探す */ if (area.enable_sw == false) { continue; } /* 黒線 */ if (area.hue_min < 0. || area.hue_max < 0.) { /* 太さ外のときは次ループへ */ if (area.thickness < v) { continue; } /* 色味範囲のときは次ループへ */ if ((0. < v) && (0. < s) && (chk_b_and_c.is_black_side( area.slope_line_len ,area.intercept ) == false)) { continue; } /* vがゼロあるいは、sがゼロのときは黒線 */ } /* 色線 */ else { /* 色相の範囲外の時は次ループへ */ if (area.hue_min < area.hue_max) { if ((h < area.hue_min) || (area.hue_max < h)) { continue; } } else if (area.hue_max < area.hue_min) { if ((area.hue_max < h) && (h < area.hue_min)) { continue; } } else if (area.hue_min == area.hue_max) { if (area.rotate360_sw==false) { if (area.hue_min != h) { /* 0回転でmin/max位置になければ範囲外 */ continue; } } /* 1回転なら全て範囲内 */ } /* 太さ外のときは次ループへ */ if (area.thickness < (1. - s)) { continue; } /* 黒である(色味がない)ので次ループへ */ if (v <= 0.) { continue; } /* 色味がないので次ループへ */ if (s <= 0.) { continue; } /* 黒味範囲のときは次ループへ */ if (chk_b_and_c.is_black_side( area.slope_line_len ,area.intercept )) { continue; } } /* 指定範囲の色で、トレススイッチONなら、 トレス(指定の色に)して、抜ける 範囲が重複していたらループで先の指定を優先する */ r = area.target_r; g = area.target_g; b = area.target_b; return true; } /* 有効な範囲がなく2値化をしないなら 白色(紙の地の色)(RGB=(1,1,1))にする */ r = 1.; g = 1.; b = 1.; return false; }
std::vector<double>::const_iterator data_end() const { return data_.end(); }
inline void save( Archive & ar, const STD::vector<bool, Allocator> &t, const unsigned int /* file_version */ ){ // record number of elements unsigned int count = t.size(); ar << BOOST_SERIALIZATION_NVP(count); STD::vector<bool>::const_iterator it = t.begin(); while(count-- > 0){ bool tb = *it++; ar << boost::serialization::make_nvp("item", tb); } }