Пример #1
0
void ConfigChannel::readParamArray(cvg_int id, cvg_char *buffer, cvg_int length) {
	cvg_bool closeChannelOnError = true;
	try {
		ConfigHeader header;
		header.signature = PROTOCOL_CONFIG_SIGNATURE;
		header.info.mode = CONFIG_READ;
		header.info.paramId = id;
		header.info.dataLength = 0;
		ConfigHeader_hton(&header);
		sendBuffer(&header, sizeof(ConfigHeader), true);
		recvBuffer(&header, sizeof(ConfigHeader));
		ConfigHeader_ntoh(&header);
		if ((header.info.mode != CONFIG_WRITE && header.info.mode != CONFIG_NACK) || header.info.paramId != id) {
			throw cvgException("bad response");
		}
		if (header.info.dataLength > length) {
			throw cvgException("data too long");
		}
		if (header.info.mode == CONFIG_NACK) {
			closeChannelOnError = false;
			throw cvgException("bad parameter specification");
		}
		recvBuffer(buffer, length);
	} catch(cvgException e) {
		if (closeChannelOnError) close();
		throw cvgException(cvgString("[ConfigChannel] Error writing parameter ID ") + id + ". Reason: " + e.getMessage());
	}
}
Пример #2
0
// получает поле с размером данных из канала связи. Возвращает размер данных или 0xFFFFFFFF в случае ошибки
static uint32 ReceiveLengthField(int Handle, StreamMethod Method, byte *LengthField, byte *LengthFieldSize, uint32 Timeout)
{
	uint32 Size = 0xFFFFFFFF;
	*LengthFieldSize = 0;
	byte b;
	if (recvByte(Handle, Method, &b, Timeout) > 0)
	{
		LengthField[0] = b;
		if (b < 0x80)
		{
			Size = b;
			*LengthFieldSize = 1;
		}
		else
		{
			byte datasize = b & 0x0F;
			uint32 data = 0; // datasize не больше 4 байт
			tBuffer buf;
			bufInit(&buf, (byte *)&data, datasize);
			if (recvBuffer(Handle, Method, buf.ptr, buf.dim, Timeout) > 0)
			{
				memcpy(LengthField + 1, &data, datasize);
				if (IsLittleEndian())
					memrev(&data, datasize);
				Size = data;
				*LengthFieldSize = datasize + 1;
			}
		}
	}
	return Size;
}
Пример #3
0
int Publisher::exchangeBorders(int neighbors[], int numNeighbors, const PVLayerLoc * loc, int delay/*default 0*/) {
   // Code duplication with Communicator::exchange.  Consolidate?
   PVHalo const * halo = &loc->halo;
   if (halo->lt==0 && halo->rt==0 && halo->dn==0 && halo->up==0) { return PV_SUCCESS; }
   int status = PV_SUCCESS;

#ifdef PV_USE_MPI
   //Using local ranks and communicators for border exchange
   int icRank = comm->commRank();
   MPI_Comm mpiComm = comm->communicator();

   //Loop through batches
   for(int b = 0; b < loc->nbatch; b++){
      // don't send interior
      assert(numRequests == b * (comm->numberOfNeighbors()-1));
      for (int n = 1; n < NUM_NEIGHBORHOOD; n++) {
         if (neighbors[n] == icRank) continue;  // don't send interior to self
         pvdata_t * recvBuf = recvBuffer(b, delay) + comm->recvOffset(n, loc);
         // sendBuf = cube->data + Communicator::sendOffset(n, &cube->loc);
         pvdata_t * sendBuf = recvBuffer(b, delay) + comm->sendOffset(n, loc);


#ifdef DEBUG_OUTPUT
         size_t recvOff = comm->recvOffset(n, &cube.loc);
         size_t sendOff = comm->sendOffset(n, &cube.loc);
         if( cube.loc.nb > 0 ) {
            pvInfo().printf("[%2d]: recv,send to %d, n=%d, delay=%d, recvOffset==%ld, sendOffset==%ld, numitems=%d, send[0]==%f\n", comm->commRank(), neighbors[n], n, delay, recvOff, sendOff, cube.numItems, sendBuf[0]);
         }
         else {
            pvInfo().printf("[%2d]: recv,send to %d, n=%d, delay=%d, recvOffset==%ld, sendOffset==%ld, numitems=%d\n", comm->commRank(), neighbors[n], n, delay, recvOff, sendOff, cube.numItems);
         }
         pvInfo().flush();
#endif //DEBUG_OUTPUT

         MPI_Irecv(recvBuf, 1, neighborDatatypes[n], neighbors[n], comm->getReverseTag(n), mpiComm,
                   &requests[numRequests++]);
         int status = MPI_Send( sendBuf, 1, neighborDatatypes[n], neighbors[n], comm->getTag(n), mpiComm);
         assert(status==0);

      }
      assert(numRequests == (b+1) * (comm->numberOfNeighbors()-1));
   }

#endif // PV_USE_MPI

   return status;
}
Пример #4
0
void StructuredGrid2D::initParallel(const std::vector<int> &ownership, const std::vector<Label> &gids)
{
    if(ownership.size() != _cells.size() || gids.size() != _cells.size())
        throw Exception("StructuredGrid2D", "initParallel", "invalid partitioning.");

    _ownership = ownership;

    _sendBuffers.clear();
    _recvBuffers.clear();
    _sendBuffers.resize(_comm->nProcs());
    _recvBuffers.resize(_comm->nProcs());

    _globalToLocalIdMap.clear();
    _globalToLocalIdMap.reserve(gids.size());

    for(auto i = 0; i < _cells.size(); ++i)
    {
        _cells[i].setgid(gids[i]);
        auto insert = _globalToLocalIdMap.emplace(_cells[i].gid(), _cells[i].lid());

        if(!insert.second)
            throw Exception("StructuredGrid2D", "initParallel", "duplicate global id.");

        if(_ownership[i] >= _comm->nProcs())
            throw Exception("StructuredGrid2D",
                            "initParallel",
                            "proc " + std::to_string(_ownership[i]) + " is greater than the max comm rank.");
        else if(_ownership[i] != _comm->rank())
        {
            _localCells.remove(_cells[i]);
            _recvBuffers[_ownership[i]].add(_cells[i]);
        }
    }

    std::vector<std::vector<Label>> recvOrders(_comm->nProcs());
    for(auto proc = 0; proc < _comm->nProcs(); ++proc)
    {
        for(const Cell &c: recvBuffer(proc))
            recvOrders[proc].emplace_back(c.gid());

        _comm->isend(proc, recvOrders[proc], _comm->rank());
    }

    std::vector<std::vector<Label>> sendOrders(_comm->nProcs());
    for(auto proc = 0; proc < _comm->nProcs(); ++proc)
    {
        sendOrders[proc].resize(_comm->probeSize<Label>(proc, proc));
        _comm->recv(proc, sendOrders[proc], proc);
    }

    _comm->waitAll();

    for(int proc = 0; proc < _comm->nProcs(); ++proc)
        for(Label gid: sendOrders[proc])
        {
            Label lid = _globalToLocalIdMap.at(gid);
            _sendBuffers[proc].add(_cells[lid]);
        }
}
Пример #5
0
int recvByte(int Handle, StreamMethod Method, byte *Data, int TimeoutMsec)
{
	*Data = 0;
	byte buffer[1] = { 0 };
	int ret = recvBuffer(Handle, Method, buffer, sizeof(buffer), TimeoutMsec);
	if (ret == 1)
		*Data = buffer[0];
	return ret;
}
void test( const T & in )
{
   mpi::SendBuffer sendBuffer;
   sendBuffer << in;

   T out;
   mpi::RecvBuffer recvBuffer( sendBuffer );
   recvBuffer >> out;

   WALBERLA_CHECK_EQUAL( in, out );
}
Пример #7
0
int Publisher::publish(HyPerLayer* pub,
                       int neighbors[], int numNeighbors,
                       int borders[], int numBorders,
                       PVLayerCube* cube,
                       int delay/*default=0*/)
{
   //
   // Everyone publishes border region to neighbors even if no subscribers.
   // This means that everyone should wait as well.
   //

   size_t dataSize = cube->numItems * sizeof(pvdata_t);
   assert(dataSize == (store->size() * store->numberOfBuffers()));

   pvdata_t * sendBuf = cube->data;
   pvdata_t * recvBuf = recvBuffer(0); //Grab all of the buffer, allocated continuously  

   bool isSparse = store->isSparse();

   if (pub->getLastUpdateTime() >= pub->getParent()->simulationTime()) {
      // copy entire layer and let neighbors overwrite
      //Only need to exchange borders if layer was updated this timestep
      memcpy(recvBuf, sendBuf, dataSize);
      exchangeBorders(neighbors, numNeighbors, &cube->loc, 0);
      store->setLastUpdateTime(LOCAL/*bufferId*/, pub->getLastUpdateTime());

      //Updating active indices is done after MPI wait in HyPerCol
      //to avoid race condition because exchangeBorders mpi is async
   }
   else if (store->numberOfLevels()>1){
      // If there are delays, copy last level's data to this level.
      // TODO: we could use pointer indirection to cut down on the number of memcpy calls required, if this turns out to be an expensive step
      memcpy(recvBuf, recvBuffer(LOCAL/*bufferId*/,1), dataSize);
      store->setLastUpdateTime(LOCAL/*bufferId*/, pub->getLastUpdateTime());
   }

   return PV_SUCCESS;
}
Пример #8
0
        /**
         @param qmessage Query message to send to the server list
         @return Query result. If the query failed or timed out, dns::message.result() will return no_result.
         */
        dns::message &
        query ( dns::message & qmessage )
        {
          // set a few defaults for a message
          qmessage.recursive(true);
          qmessage.action(dns::message::query);
          qmessage.opcode(dns::message::squery);

          // make our message id unique
          qmessage.id(0xaffe);

          qmessage.encode(reqBuffer);

          // in the event nothing get's resolved, answer with the original question
          responseMessage = qmessage;
          responseMessage.result(dns::message::no_result);

          // die if we don't get a response within death_timeout
          death_timer.expires_from_now(boost::posix_time::seconds(death_timeout));
          death_timer.async_wait(boost::bind(&resolve::request_timeout, this));

          for( vector< ip::udp::endpoint >::iterator iter = endpointList.begin(); iter != endpointList.end(); ++iter )
          {
            // we're waiting for N of requests
            requestCount.inc();

            // setup the receive buffer
            shared_dns_buffer_t recvBuffer(new dns_buffer_t);
            socket.async_receive_from(boost::asio::buffer(recvBuffer.get()->get_array()), *iter, boost::bind(
                &resolve::handle_recv,
                this,
                recvBuffer,
                boost::asio::placeholders::error,
                boost::asio::placeholders::bytes_transferred));
          }

          // kick off the request
          send_packet();

          // run a blocking service
          ioservice.run();

          return responseMessage;
        }
Пример #9
0
static void HandleClientConnection(int Socket)
{
	StreamMethod Method = smRecvSend;

	if (!Socket) return;

	byte Status = 0;
	byte cc = 0;
#ifdef TCP_DEBUG
	printf("Wait connection\n");
#endif
	int ret = recvByte(Socket, Method, &cc, ConnectionTimeoutMSec);
#ifdef TCP_DEBUG
	//printf("recvByte: %d. cc = %d \n", ret, cc);
#endif
	if (ret > 0 && cc == pccENQ)
	{
		byte ConnectionBuffer[2];
		ret = recvBuffer(Socket, Method, ConnectionBuffer, sizeof(ConnectionBuffer), ConnectionTimeoutMSec); // принимаем данные дл¤ соединени¤
		if (ret > 0)
		{
			PV = ConnectionBuffer[0];
			KSN = ConnectionBuffer[1];
			if (sendByte(Socket, Method, pccACK) < 0) goto lblKO; // подтверждаем соединение и переходим в режим ожидани¤ сообщени¤
#ifdef TCP_DEBUG
			printf("Connected. PV: %d, KSN: %d\n", PV, KSN);
#endif
			int time1 = GetTickCount();
			uint32 size;
			byte *data = NULL;

			while (/*Status == 0*/1) // –јЅќ“ј≈ћ ƒќ “ј…ћј”“ј »Ћ» ѕќЋ”„≈Ќ»я —»√ЌјЋј Ќј ќ“—ќ≈ƒ»Ќ≈Ќ»≈
			{
				data = tcpReceiveMessage(Socket, &size, &cc, ReceiveTimeoutMSec);
#ifdef TCP_DEBUG
				if (cc != 0)
					printf("drmReceiveMessage. cc = %d, size = %lu\n", cc, size);
#endif
				if (cc == pccSTX) // в data у мен¤ лежит data
				{
					if (data)
					{
						Status = 3; // хорошее завершение с приЄмом какого-то пакета
						ParseClientMessage(Socket, data, size); // в buffer у нас сообщение от клиента
						time1 = GetTickCount();
					}
					else
					{
						printf("Empty package from client\n");
					}
				}
				else if (cc == pccEOT)
				{
					Status = 2; // disconnect query
#ifdef TCP_DEBUG
					printf("Disconnect query received\n");
#endif
					if (sendByte(Socket, Method, pccEOT) < 0) goto lblKO;
					break; // exit from the cycle
				}

				if (data) // очищаем пам¤ть
				{
					free(data);
					data = NULL;
				}

				// check timeout
				if (GetTickCount() - time1 >= ClientTimeoutMSec)
				{
#ifdef TCP_DEBUG
					printf("Client timeout occurred\n");
#endif
					Status = 1; // timeout
					//sendByte(Socket, pccEOT);
					break;
				}

			}
			if (Status != 1)
			{
			  //printf("Send disconnect byte...");
			  if (sendByte(Socket, Method, pccEOT) < 0) goto lblKO; // disconnect
			  //printf("Done\n");
			}
		}
	}

lblKO:
	goto lblEnd;
lblEnd:
#ifdef TCP_DEBUG
    printf("Closing client socket...");
#endif
	close(Socket); // закрываем сокет клиента в конце работы
	Socket = 0;
#ifdef TCP_DEBUG
    printf("Done\n");
#endif
#ifdef TCP_DEBUG
	printf("Client disconnected\n");
#endif
}
Пример #10
0
/**
    @brief Receive message from the server
    @param size - return size of data
    @param decrypt - if TRUE, then decrypt received data by session key
    @param control - return control character
    @return data (you MUST free memory after use)
*/
byte *tcpReceiveMessage(int Handle, uint32 *size, byte *control, uint32 TimeoutMsec)
{
	StreamMethod Method = smRecvSend;
	byte *data;
	int ret = 0;
	byte cc = 0;
	byte LengthField[MAXLENGTHFIELDSIZE];
	byte LengthFieldSize;
	uint32 AlignedSize;
	byte AttempsCount = 1;
	byte MaxAttempsCount = 5;
lblWaitAgain:
	*size = 0;
	*control = 0;
	data = NULL;
	sleepms(100);
	ret = recvByte(Handle, Method, &cc, 0/*Timeout*/);
	//printf"recvByte: %d. byte %d\n", ret, cc);
	if (ret > 0)
	{
		*control = cc;
		switch (cc)
		{
			case pccSTX:
				*size = ReceiveLengthField(Handle, Method, LengthField, &LengthFieldSize, TimeoutMsec); // получаем размер данных
				if (*size != 0xFFFFFFFF)
				{
					AlignedSize = GetAlignedSize(*size);
					data = malloc(AlignedSize); // allocate data
					ret = recvBuffer(Handle, Method, data, AlignedSize, TimeoutMsec);
					#ifdef TCP_DEBUG
						// PrintHEX(data, *size);
					#endif
					//printf"comRecvBufLarge: %d\n", ret);
					if (ret > 0)
					{
						// ok, we have a data - calc CRC
						uint32 CRCBufferSize = LengthFieldSize + AlignedSize + 1; // datasize + data + ETX
						byte *CRCBuffer = malloc(CRCBufferSize);
						memcpy(CRCBuffer, LengthField, LengthFieldSize); // copy length field to checkbuffer
						memcpy(CRCBuffer+LengthFieldSize, data, AlignedSize); // copy data from checkbuffer
						CRCBuffer[CRCBufferSize - 1] = pccETX; // last character = ETX
						uint64 CalculatedCRC = crcCalculate(CRCBuffer, CRCBufferSize);

						free(CRCBuffer);
						CRCBuffer = NULL;

						ret = recvByte(Handle, Method, &cc, TimeoutMsec); // receive ETX character
						//printf"comRecv: %d. byte %d\n", ret, cc);
						if (ret > 0 && cc == pccETX)
						{
							byte crcfield[crcGetFieldSize(CRCBufferSize)];
							ret = recvBuffer(Handle, Method, crcfield, sizeof(crcfield), TimeoutMsec); // get size
							uint64 ReceivedCRC = crcExtract(crcfield, sizeof(crcfield));
							//printf"comRecvBuf: %d\n", ret);
							if (ret > 0)
							{
								if (ReceivedCRC == CalculatedCRC)	// check the data
									ret = sendByte(Handle, Method, pccACK); // good!!!
								else
								{
				    				if (AttempsCount <= MaxAttempsCount)
				    				{
				    					AttempsCount++;
				    					ret = sendByte(Handle, Method, pccNAK);
										// and go to wait this packet again
										goto lblWaitAgain;
				    				}
				    				else
				    				{ free(data); data = NULL; }
								}
							}
							else
							{ free(data); data = NULL; }
						}
						else
						{ free(data); data = NULL; }
					}
					else
					{ free(data); data = NULL; }
				}
				break;
			case pccEOT: // query close connection
				data = NULL;
				break;
			case pccBEL:
				sendByte(Handle, Method, pccBEL);
				goto lblWaitAgain;
			default:
				data = NULL;
				break;
		}
	}
	else
	{
		*control = 0;
		data = NULL;
	}

	/*if (data)
		cphDecrypt(data, data, PCSessionKey, AlignedSize, ToServer); // decipher received data*/

	return data;
}
Пример #11
0
void
HyperbolicSolver< Mesh, SolverType >::
updateGhostValues ( typename MeshPartitioner<Mesh>::GhostEntityDataMap_Type& ghostDataMap )
{

    // fill send buffer
    buffer_Type sendBuffer;

    // TODO: move this to a const reference
    typename MeshPartitioner<Mesh>::GhostEntityDataMap_Type::const_iterator procIt  = ghostDataMap.begin();
    typename MeshPartitioner<Mesh>::GhostEntityDataMap_Type::const_iterator procEnd = ghostDataMap.end();
    typename MeshPartitioner<Mesh>::GhostEntityDataContainer_Type::const_iterator dataIt;
    typename MeshPartitioner<Mesh>::GhostEntityDataContainer_Type::const_iterator dataEnd;
    for ( ; procIt != procEnd; ++procIt )
    {
        std::vector<Real> valueList ( sendBuffer[ procIt->first ] );

        dataEnd = procIt->second.end();
        for ( dataIt = procIt->second.begin(); dataIt != dataEnd; ++dataIt )
        {
            ID elementId ( M_FESpace.mesh()->faceElement ( dataIt->localFacetId, 0 ) );

            VectorElemental ghostValue  ( M_FESpace.refFE().nbDof(), 1 );
            extract_vec ( *M_uOld, ghostValue, M_FESpace.refFE(), M_FESpace.dof(), elementId, 0 );
            // TODO: this works only for P0
            sendBuffer[ procIt->first ].push_back ( ghostValue[ 0 ] );
        }
    }

    // organize recvBuffer
    buffer_Type recvBuffer ( sendBuffer );

    // send data
    MPI_Status status;
    for ( Int proc = 0; proc < M_displayer.comm()->NumProc(); proc++ )
    {
        if ( proc != M_displayer.comm()->MyPID() )
        {
            MPI_Send ( &sendBuffer[ proc ][ 0 ], sendBuffer[ proc ].size(), MPI_DOUBLE, proc, M_displayer.comm()->MyPID() + 1000 * proc, ( boost::dynamic_pointer_cast <Epetra_MpiComm> (M_displayer.comm() ) )->Comm() );
            MPI_Recv ( &recvBuffer[ proc ][ 0 ], recvBuffer[ proc ].size(), MPI_DOUBLE, proc, proc + 1000 * M_displayer.comm()->MyPID(), ( boost::dynamic_pointer_cast <Epetra_MpiComm> (M_displayer.comm() ) )->Comm(), &status );
        }

    }

    // store data in the M_ghostDataMap member
    for ( buffer_Type::const_iterator procIt = recvBuffer.begin(); procIt != recvBuffer.end(); ++procIt )
    {
        UInt count ( 0 );
        for ( ghostDataContainer_Type::const_iterator dataIt = procIt->second.begin(); dataIt != procIt->second.end(); ++dataIt, count++ )
        {
            ID ghostFaceId = ghostDataMap[ procIt->first ][ count ].localFacetId;
            M_ghostDataMap[ ghostFaceId ] = *dataIt;
        }
    }


    // DEBUG
    //    std::ofstream outf ( ( "hype." + boost::lexical_cast<std::string> ( M_displayer.comm()->MyPID() ) + ".out" ).c_str() );
    //    outf << M_uOld->epetraVector() << std::endl << std::endl;
    //
    //    for ( buffer_Type::const_iterator procIt = recvBuffer.begin(); procIt != recvBuffer.end(); ++procIt )
    //    {
    //        outf << "proc " << procIt->first << " size " << recvBuffer[ procIt->first ].size() << std::endl;
    //        UInt count ( 0 );
    //        for ( procData_Type::const_iterator dataIt = procIt->second.begin(); dataIt != procIt->second.end(); ++dataIt, count++ )
    //        {
    //            ID ghostFaceId = ghostDataMap[ procIt->first ][ count ].localFacetId;
    //            ID elementId ( M_FESpace.mesh()->faceElement( ghostFaceId, 0 ) );
    //            outf << "lid " << elementId << " " << "gid " << ghostDataMap[ procIt->first ][ count ].ghostElementLocalId << " " << *dataIt << std::endl;
    //        }
    //    }

} // updateGhostValues