//bool AttributeContainer::hasAttribute(const std::string &name)
//{
//	return m_attributes.exists(name);
//}
AttributeContainer::AttributeContainer(const AttributeContainer &container)
{
    std::vector<std::string> keys=container.keys();

    for(size_t i=0; i<keys.size(); ++i)
    {
        addAttribute(container.attribute(keys[i]));
    }
}
예제 #2
0
파일: SyncBuffer.cpp 프로젝트: flinz/auryn
void SyncBuffer::pop(SpikeDelay * delay, NeuronID size)
{
	for (NeuronID i = 1 ; i < MINDELAY+1 ; ++i ) {
		delay->get_spikes(i)->clear();
		delay->get_spikes(i)->clear();
	}


	for (int r = 0 ; r < mpicom->size() ; ++r ) {
		NeuronID numberOfSpikes = recv_buf[r*max_send_size]; // total data
		NeuronID * iter = &recv_buf[r*max_send_size+pop_offsets[r]]; // first spike

		NeuronID temp  = (*iter - groupPopOffset);
		NeuronID spike = temp%size; // spike (if it exists) in current group
		int t = temp/size; // timeslice in MINDELAY if we are out this might be the next group


		for ( int i = 0 ; i < MINDELAY ; ++i ) count[i] = 0;
		// while we are in the current group && have not read all entries
		while ( t < MINDELAY && numberOfSpikes ) {  
			delay->get_spikes(t+1)->push_back(spike);
			iter++;
			numberOfSpikes--;
			count[t]++; // store spike counts for each time-slice

			temp  = (*iter - groupPopOffset);
			spike = temp%size;
			t = temp/size;
		}


		// extract a total of count*get_num_attributes() attributes 
		if ( delay->get_num_attributes() ) {
			for ( NeuronID slice = 0 ; slice < MINDELAY ; ++slice ) {
				AttributeContainer * ac = delay->get_attributes(slice+1);
				for ( NeuronID k = 0 ; k < delay->get_num_attributes() ; ++k ) { // loop over attributes
					for ( NeuronID s = 0 ; s < count[slice] ; ++s ) { // loop over spikes
						AurynFloat * attrib;
						attrib = (AurynFloat*)(iter);
						iter++;
						ac->push_back(*attrib);
						// if ( mpicom->rank() == 0 )
						// 	cout << " reading attr " << " " << slice << " "  << k << " " << s << " " << scientific << *attrib << endl;
					}
				}
			}
		}

		recv_buf[r*max_send_size] = numberOfSpikes; // save remaining entries
		pop_offsets[r] = iter - &recv_buf[r*max_send_size]; // save offset in recv_buf section
	}

	groupPopOffset += size*MINDELAY;

}
예제 #3
0
void SyncBuffer::push(SpikeDelay * delay, const NeuronID size)
{
	// DEBUG
	// std::cout << "Rank " << mpicom->rank() << "push\n";
	// delay->print();
	
	const SYNCBUFFER_DELTA_DATATYPE grid_size = (SYNCBUFFER_DELTA_DATATYPE)size*MINDELAY;

	SYNCBUFFER_DELTA_DATATYPE unrolled_last_pos = 0;
	// circular loop over different delay bins
	for (int slice = 0 ; slice < MINDELAY ; ++slice ) {
		SpikeContainer * sc = delay->get_spikes(slice+1);
		AttributeContainer * ac = delay->get_attributes(slice+1);

		// loop over all spikes in current delay time slice
		for (int i = 0 ; 
				i < sc->size() ; 
				++i ) {
			NeuronID spike = sc->at(i);
			// compute unrolled position in current delay
			SYNCBUFFER_DELTA_DATATYPE unrolled_pos = (SYNCBUFFER_DELTA_DATATYPE)(spike) + (SYNCBUFFER_DELTA_DATATYPE)size*slice; 
			// compute vertical unrolled difference from last spike
			SYNCBUFFER_DELTA_DATATYPE spike_delta = unrolled_pos + carry_offset - unrolled_last_pos;
			// memorize current position in slice
			unrolled_last_pos = unrolled_pos; 
			// discard carry_offset since its only added to the first spike_delta
			carry_offset = 0;

			// overflow managment -- should only ever kick in for very very large SpikingGroups and very very sparse activity
			while ( spike_delta >= max_delta_size ) {
				send_buf.push_back( max_delta_size );
				spike_delta -= max_delta_size;
			}
		
			// storing the spike delta (or its remainder) to buffer
			send_buf.push_back(spike_delta);

			// append spike attributes here in buffer
			for ( int k = 0 ; k < delay->get_num_attributes() ; ++k ) { // loop over attributes
				NeuronID cast_attrib = *(NeuronID*)(&(ac->at(i*delay->get_num_attributes()+k)));
				send_buf.push_back(cast_attrib);
				// std::cout << "store " << std::scientific << ac->at(i*delay->get_num_attributes()+k) << " int " << cast_attrib << std::endl;
			}
		}
	}

	// set save carry_offset which is the remaining difference from the present group
	// plus because there might be more than one group without a spike ...
	carry_offset += grid_size-unrolled_last_pos;

}
예제 #4
0
파일: SyncBuffer.cpp 프로젝트: flinz/auryn
void SyncBuffer::push(SpikeDelay * delay, NeuronID size)
{

	for (NeuronID i = 1 ; i < MINDELAY+1 ; ++i ) {
		SpikeContainer * sc = delay->get_spikes(i);

		// NeuronID s = (NeuronID) (sc->size());
		// send_buf[0] += s;

		count[i-1] = 0;
		for (SpikeContainer::const_iterator spike = sc->begin() ; 
			spike != sc->end() ; ++spike ) {
			NeuronID compressed = *spike + groupPushOffset1 + (i-1)*size;
			send_buf.push_back(compressed);
			count[i-1]++;
		}

		send_buf[0] += delay->get_spikes(i)->size(); // send the total number of spikes
	}

	// transmit get_num_attributes() attributes for count spikes for all time slices
	if ( delay->get_num_attributes() ) {
		for (NeuronID i = 1 ; i < MINDELAY+1 ; ++i ) {
			AttributeContainer * ac = delay->get_attributes(i);
			for ( NeuronID k = 0 ; k < delay->get_num_attributes() ; ++k ) { // loop over attributes
				for ( NeuronID s = 0 ; s < count[i-1] ; ++s ) { // loop over spikes
					send_buf.push_back(*(NeuronID*)(&(ac->at(s+count[i-1]*k))));
					// if ( mpicom->rank() == 0 )
					// 	cout << " pushing attr " << " " << i << " " << k << " " << s << " " 
					// 		<< scientific << ac->at(s+count[i-1]*k) << endl;
				}
			}
		}
	}

	groupPushOffset1 += size*MINDELAY;
}
예제 #5
0
void foreach_attrib(AttributeContainer& attr_cont, std::vector<FunctorAttribThreaded*> funcs)
{
	unsigned int nbth = funcs.size();

	std::vector<unsigned int >* vid = new std::vector<unsigned int>[2*nbth];
	boost::thread** threads = new boost::thread*[nbth];

	for (unsigned int i = 0; i < 2*nbth; ++i)
		vid[i].reserve(SIZE_BUFFER_THREAD);

	// fill each vid buffers with 4096 id
	unsigned int id = attr_cont.begin();
	unsigned int nb = 0;
	unsigned int nbm = nbth*SIZE_BUFFER_THREAD;
	while ((id != attr_cont.end()) && (nb < nbm))
	{
		vid[nb%nbth].push_back(id);
		nb++;
		attr_cont.next(id);
	}


	boost::barrier sync1(nbth+1);
	boost::barrier sync2(nbth+1);
	bool finished=false;
	// lauch threads
	for (unsigned int i = 0; i < nbth; ++i)
		threads[i] = new boost::thread(ThreadFunctionAttrib(funcs[i], vid[i],sync1,sync2, finished,1+i));

	while (id != attr_cont.end())
	{
		for (unsigned int i = nbth; i < 2*nbth; ++i)
			vid[i].clear();

		unsigned int nb = 0;
		while ((id != attr_cont.end()) && (nb < nbm))
		{
			vid[nbth + nb%nbth].push_back(id);
			nb++;
			attr_cont.next(id);
		}

		sync1.wait();
		for (unsigned int i = 0; i < nbth; ++i)
			vid[i].swap(vid[nbth+i]);
		sync2.wait();
	}

	sync1.wait();
	finished = true;
	sync2.wait();

	//wait for all theads to be finished
	for (unsigned int i = 0; i < nbth; ++i)
	{
		threads[i]->join();
		delete threads[i];
	}
	delete[] threads;
	delete[] vid;
}