예제 #1
0
DocumentInfo::DocumentInfo(const DocumentInfo &other) :
	m_extract(other.m_extract),
	m_score(other.m_score),
	m_indexId(other.m_indexId),
	m_docId(other.m_docId)
{
	copy(other.m_fields.begin(), other.m_fields.end(),
		inserter(m_fields, m_fields.begin()));
	copy(other.m_labels.begin(), other.m_labels.end(),
		inserter(m_labels, m_labels.begin()));
}
예제 #2
0
/// Sets whether the query should be expanded.
bool XapianEngine::setQueryExpansion(set<unsigned int> &relevantDocuments)
{
	copy(relevantDocuments.begin(), relevantDocuments.end(),
		inserter(m_relevantDocuments, m_relevantDocuments.begin()));

	return true;
}
예제 #3
0
DocumentInfo& DocumentInfo::operator=(const DocumentInfo& other)
{
	if (this != &other)
	{
		m_fields.clear();
		copy(other.m_fields.begin(), other.m_fields.end(),
			inserter(m_fields, m_fields.begin()));
		m_extract = other.m_extract;
		m_score = other.m_score;
		m_labels.clear();
		copy(other.m_labels.begin(), other.m_labels.end(),
			inserter(m_labels, m_labels.begin()));
		m_indexId = other.m_indexId;
		m_docId = other.m_docId;
	}

	return *this;
}
예제 #4
0
QueryResult AndQuery::eval(const TextQuery &text) const
{
    auto right = rhs.eval(text), left = lhs.eval(text);
    auto ret_lines = make_shared<set<line_no>>();
    set_intersection(left.cbegin(), left.cend(), right.cbegin(), right.cend(),
        inserter(*ret_lines, ret_lines->begin()));
    std::cout << kaka;
    return QueryResult(rep(), ret_lines, left.get_file());
}
예제 #5
0
/// Sets the set of documents to expand from.
bool XapianEngine::setExpandSet(const set<string> &docsSet)
{
	copy(docsSet.begin(), docsSet.end(),
		inserter(m_expandDocuments, m_expandDocuments.begin()));
#ifdef DEBUG
	cout << "XapianEngine::setExpandSet: " << m_expandDocuments.size() << " documents" << endl;
#endif

	return true;
}
예제 #6
0
int main() {
	auto insert_point = data.begin();
	advance(insert_point, 2);
	istringstream iss("10 20 30");
	copy(istream_iterator<int>(iss), istream_iterator<int>(),
	     inserter(data, insert_point));
	copy(data.begin(), data.end(),
	     ostream_iterator<int>(cout, " "));
	cout << endl;
}
예제 #7
0
DocumentInfo::DocumentInfo(const DocumentInfo &other) :
	m_title(other.m_title),
	m_location(other.m_location),
	m_type(other.m_type),
	m_language(other.m_language),
	m_timestamp(other.m_timestamp)
{
	copy(other.m_labels.begin(), other.m_labels.end(),
		inserter(m_labels, m_labels.begin()));
}
예제 #8
0
int main()
{
	vector<int> ivec;
	istream_iterator<int> in(cin);
	istream_iterator<int> eof;
	ostream_iterator<int> out(cout);
	unique_copy(in, eof, inserter(ivec, ivec.begin()));
	sort(ivec.begin(), ivec.end());
	for (auto i : ivec)
	{
		*out++ = i;
	}
	cout << endl;
	return 0;
}
예제 #9
0
DocumentInfo& DocumentInfo::operator=(const DocumentInfo& other)
{
	if (this != &other)
	{
		m_title = other.m_title;
		m_location = other.m_location;
		m_type = other.m_type;
		m_language = other.m_language;
		m_timestamp = other.m_timestamp;
		m_labels.clear();
		copy(other.m_labels.begin(), other.m_labels.end(),
			inserter(m_labels, m_labels.begin()));
	}

	return *this;
}
예제 #10
0
파일: Query.cpp 프로젝트: hwliu/CPPExamples
// returns the intersection of its operands' result sets
QueryResult
AndQuery::eval(const TextQuery& text) const
{
    // virtual calls through the Query operands to get result sets for the operands
    auto left = lhs.eval(text), right = rhs.eval(text);

    // set to hold the intersection of left and right
    auto ret_lines = make_shared<set<line_no>>();

    // writes the intersection of two ranges to a destination iterator
    // destination iterator in this call adds elements to ret
    set_intersection(left.begin(), left.end(),
                     right.begin(), right.end(),
                     inserter(*ret_lines, ret_lines->begin()));
    return QueryResult(rep(), ret_lines, left.get_file());
}
예제 #11
0
// returns intersection of its operands' result sets
set<TextQuery::line_no>
AndQuery::eval(const TextQuery& file) const
{
    // virtual calls through the Query handle to get result sets for the operands
    set<line_no> left = lhs.eval(file), 
                 right = rhs.eval(file);

    set<line_no> ret_lines;  // destination to hold results 

    // writes intersection of two ranges to a destination iterator
    // destination iterator in this call adds elements to ret
    set_intersection(left.begin(), left.end(), 
                  right.begin(), right.end(),
                  inserter(ret_lines, ret_lines.begin()));
    return ret_lines;
}
예제 #12
0
int main()
{
    vector<int> iVec;
    for (int i = 0; i < 10; ++i)
        iVec.push_back(i);

    print_vector(iVec);

//    vector<int>::iterator itr = find();
    cout << "inserter: ";
    vector<int> vecInserter;
    replace_copy(iVec.begin(), iVec.end(), inserter(vecInserter, vecInserter.begin()), 9, 10);
    print_vector(vecInserter);

    cout << "back_inserter: ";
    vector<int> vecBack;
    replace_copy(iVec.begin(), iVec.end(), back_inserter(vecBack), 9, 10);
    print_vector(vecBack);

    cout << "front_inserter: ";
    list<int> listFront;
    replace_copy(iVec.begin(), iVec.end(), front_inserter(listFront), 9, 10);
    print_list(listFront);
}
예제 #13
0
파일: ex28.cpp 프로젝트: MisLink/CppPrimer
int main() {
  vector<int> vec{1, 2, 3, 4, 5, 6, 7, 8, 9};
  vector<int> vec1, vec2;
  list<int> lst;
  auto it1 = back_inserter(vec1);
  auto it2 = front_inserter(lst);
  auto it3 = inserter(vec2, vec2.begin());
  copy(vec.begin(), vec.end(), it1);
  for (auto& x : vec1) {
    cout << x << ' ';
  }
  cout << endl;
  copy(vec.begin(), vec.end(), it2);
  for (auto& x : lst) {
    cout << x << ' ';
  }
  cout << endl;
  copy(vec.begin(), vec.end(), it3);
  for (auto& x : vec2) {
    cout << x << ' ';
  }
  cout << endl;
  return 0;
}
예제 #14
0
파일: student.cpp 프로젝트: karepker/thesis
void Student::AddCoursesTaken(std::initializer_list<const Course*> courses) {
	copy(begin(courses), end(courses),
			inserter(courses_taken_, end(courses_taken_)));
}
예제 #15
0
void View::signals_changed()
{
	using sigrok::Channel;

	vector< shared_ptr<TraceTreeItem> > new_top_level_items;

	const auto device = session_.device();
	if (!device)
		return;

	shared_ptr<sigrok::Device> sr_dev = device->device();
	assert(sr_dev);

	const vector< shared_ptr<Channel> > channels(
		sr_dev->channels());

	// Make a list of traces that are being added, and a list of traces
	// that are being removed
	const vector<shared_ptr<Trace>> prev_trace_list = list_by_type<Trace>();
	const set<shared_ptr<Trace>> prev_traces(
		prev_trace_list.begin(), prev_trace_list.end());

	const unordered_set< shared_ptr<Signal> > sigs(session_.signals());

	set< shared_ptr<Trace> > traces(sigs.begin(), sigs.end());

#ifdef ENABLE_DECODE
	const vector< shared_ptr<DecodeTrace> > decode_traces(
		session().get_decode_signals());
	traces.insert(decode_traces.begin(), decode_traces.end());
#endif

	set< shared_ptr<Trace> > add_traces;
	set_difference(traces.begin(), traces.end(),
		prev_traces.begin(), prev_traces.end(),
		inserter(add_traces, add_traces.begin()));

	set< shared_ptr<Trace> > remove_traces;
	set_difference(prev_traces.begin(), prev_traces.end(),
		traces.begin(), traces.end(),
		inserter(remove_traces, remove_traces.begin()));

	// Make a look-up table of sigrok Channels to pulseview Signals
	unordered_map<shared_ptr<sigrok::Channel>, shared_ptr<Signal> >
		signal_map;
	for (const shared_ptr<Signal> &sig : sigs)
		signal_map[sig->channel()] = sig;

	// Populate channel groups
	for (auto entry : sr_dev->channel_groups()) {
		const shared_ptr<sigrok::ChannelGroup> &group = entry.second;

		if (group->channels().size() <= 1)
			continue;

		// Find best trace group to add to
		TraceTreeItemOwner *owner = find_prevalent_trace_group(
			group, signal_map);

		// If there is no trace group, create one
		shared_ptr<TraceGroup> new_trace_group;
		if (!owner) {
			new_trace_group.reset(new TraceGroup());
			owner = new_trace_group.get();
		}

		// Extract traces for the trace group, removing them from
		// the add list
		const vector< shared_ptr<Trace> > new_traces_in_group =
			extract_new_traces_for_channels(group->channels(),
				signal_map, add_traces);

		// Add the traces to the group
		const pair<int, int> prev_v_extents = owner->v_extents();
		int offset = prev_v_extents.second - prev_v_extents.first;
		for (shared_ptr<Trace> trace : new_traces_in_group) {
			assert(trace);
			owner->add_child_item(trace);

			const pair<int, int> extents = trace->v_extents();
			if (trace->enabled())
				offset += -extents.first;
			trace->force_to_v_offset(offset);
			if (trace->enabled())
				offset += extents.second;
		}

		// If this is a new group, enqueue it in the new top level
		// items list
		if (!new_traces_in_group.empty() && new_trace_group)
			new_top_level_items.push_back(new_trace_group);
	}

	// Enqueue the remaining logic channels in a group
	vector< shared_ptr<Channel> > logic_channels;
	copy_if(channels.begin(), channels.end(), back_inserter(logic_channels),
		[](const shared_ptr<Channel>& c) {
			return c->type() == sigrok::ChannelType::LOGIC; });
	const vector< shared_ptr<Trace> > non_grouped_logic_signals =
		extract_new_traces_for_channels(logic_channels,
			signal_map, add_traces);
	const shared_ptr<TraceGroup> non_grouped_trace_group(
		make_shared<TraceGroup>());
	for (shared_ptr<Trace> trace : non_grouped_logic_signals)
		non_grouped_trace_group->add_child_item(trace);
	new_top_level_items.push_back(non_grouped_trace_group);

	// Enqueue the remaining channels as free ungrouped traces
	const vector< shared_ptr<Trace> > new_top_level_signals =
		extract_new_traces_for_channels(channels,
			signal_map, add_traces);
	new_top_level_items.insert(new_top_level_items.end(),
		new_top_level_signals.begin(), new_top_level_signals.end());

	// Enqueue any remaining traces i.e. decode traces
	new_top_level_items.insert(new_top_level_items.end(),
		add_traces.begin(), add_traces.end());

	// Remove any removed traces
	for (shared_ptr<Trace> trace : remove_traces) {
		TraceTreeItemOwner *const owner = trace->owner();
		assert(owner);
		owner->remove_child_item(trace);
	}

	// Add and position the pending top levels items
	for (auto item : new_top_level_items) {
		add_child_item(item);

		// Position the item after the last present item
		int offset = v_extents().second;
		const pair<int, int> extents = item->v_extents();
		if (item->enabled())
			offset += -extents.first;
		item->force_to_v_offset(offset);
		if (item->enabled())
			offset += extents.second;
	}

	update_layout();

	header_->update();
	viewport_->update();
}
예제 #16
0
// 3 modes:
//   registering?	replicate *all*
//   burying?
//     healthy?		replicate selectively
//     degrading?	wipe
void *rereplicate(void *i) {
	bool slave_failed = *(bool *)i;
	slave_idx failed_slavid = *(slave_idx *)((bool *)i+1);
	free(i);
	pthread_detach(pthread_self());

	map<const char *, struct filinfo *> *files_local = new map<const char *, struct filinfo *>();
	bool actually_replicate = true;

	pthread_mutex_lock(slaves_lock);

	if(slave_failed) {
		copy_if(files->begin(), files->end(), inserter(*files_local, files_local->begin()), [failed_slavid](const pair<const char *, struct filinfo *> &it){return it.second->holders->count(failed_slavid);});
		if(living_count < MIN_STOR_REDUN) actually_replicate = false; // All nodes are already identical, so replicating is pointless
	}
	else
		copy(files->begin(), files->end(), inserter(*files_local, files_local->begin()));

	pthread_mutex_unlock(slaves_lock);

	for(auto file_corr = files_local->begin(); file_corr != files_local->end(); ++file_corr) {
		slave_idx dest_slavid = -1;
		if(actually_replicate) {
			pthread_mutex_lock(file_corr->second->write_lock);

			pthread_mutex_lock(slaves_lock);
			if(slave_failed) {
				unordered_set<slave_idx> *holders = file_corr->second->holders;
				dest_slavid = bestslave([holders](slave_idx check){return holders->count(check);});
			}
			else
				dest_slavid = failed_slavid; // Propagate to the new node

			struct slavinfo *dest_slavif = (*slaves_info)[dest_slavid];
			pthread_mutex_unlock(slaves_lock);

			if(!slave_failed && !dest_slavif->alive) {
				// We're trying to mirror onto a brand new node that just died on us!
				// Our work here is done: a separate cleanup thread was spawned, so we defer to it.
				pthread_mutex_unlock(file_corr->second->write_lock);
				return NULL;
			}

			char *value = NULL;
			size_t vallen;
			// Our use of the same identifier for both newly-added and failed slaves is threadsafe because the thread that handles the "newly-added" case bails out as soon as it discovers its slave has been lost.
			getfile(file_corr->first, &value, &vallen, -failed_slavid); // Use additive inverse of faild slave ID as our unique queue identifier

			if(!putfile(dest_slavif, file_corr->first, value, vallen, -failed_slavid, true)) // We'll use that same unique ID to mark our place in line
				// TODO release the writelock, repeat this run of the for loop
				writelog(PRI_DBG, "Failed to put the file during cremation; case not handled!");
		}

		pthread_mutex_lock(files_lock);

		(*files)[file_corr->first]->holders->erase(failed_slavid);
		if(actually_replicate)
			(*files)[file_corr->first]->holders->insert(dest_slavid);

		pthread_mutex_unlock(files_lock);

		if(actually_replicate)
			pthread_mutex_unlock(file_corr->second->write_lock);
	}

	delete files_local;
	return NULL;
}
예제 #17
0
/// Sets the document's labels.
void DocumentInfo::setLabels(const set<string> &labels)
{
	copy(labels.begin(), labels.end(),
		inserter(m_labels, m_labels.begin()));
}