TVerdict CRtpNoBind::doTestStepL() /** * @return - TVerdict code */ { SetTestStepResult(EFail); TInt err; if(KErrNone == InitSubConL()) { err = iRtpSocket.Open(iSocketServ, KAfInet, KSockDatagram, KProtocolRtp, iSubCon); if(err == KErrNone) { iIpAddrDest1.SetPort(iDestPort1); TBuf8<1024> sendBuf((TUint8*)"Hello World"); TRequestStatus status; iRtpSocket.SendTo(sendBuf, iIpAddrDest1, NULL, status); User::WaitForRequest(status); if(status.Int() == KErrNone) { SetTestStepResult(EPass); } } } return TestStepResult(); }
TVerdict CRtcpSendRecv::doTestStepL() /** * @return - TVerdict code */ { SetTestStepResult(EFail); if(KErrNone == InitSocketsL()) { iIpAddrDest1.SetPort(iDestPort1+1); TBuf8<3000> sendBuf((TUint8*)"Hello World"); TRequestStatus status; iRtcpSocket.SendTo(sendBuf, iIpAddrDest1, NULL, status); User::WaitForRequest(status); if(status.Int() == KErrNone) { /* Now do a Receive */ sendBuf.FillZ(); iRtcpSocket.RecvFrom(sendBuf,iIpAddrDest1,NULL,status); User::WaitForRequest(status); if(status.Int() == KErrNone) { sendBuf.SetLength(2500); TRequestStatus status; iRtcpSocket.SendTo(sendBuf, iIpAddrDest1, NULL, status); User::WaitForRequest(status); if(status.Int() == KErrNone) { SetTestStepResult(EPass); } } } } return TestStepResult(); }
int32_ Cx_ZmqBackend::ConsoleSendToWorker(COMMAND_LINE& command_line) { GET_OBJECT_RET(ZmqMgr, iZmqMgr, LWDP_GET_OBJECT_ERROR); COMMAND_LINE::iterator iter; std::string sendBuf(""); FOREACH_STL(iter, command_line) { sendBuf.append(*iter); sendBuf.append("@"); }
void Sender::swipe() { // counter static int counterSwipe = counter.add("swipe"); // check available writes for (auto it = mpOutQueues->begin(); it != mpOutQueues->end(); ++it) { sendBuf(it->first, mpBufs[it->first]); } counter.inc(counterSwipe); }
void FileServer::loopResponse() { _responseRunning = true; while(!_responseEndThread) { _responseBufListMutex.lock(); size_t responseSize = _responseBufList.size(); _responseBufListMutex.unlock(); if(0 == responseSize) { usleep(500); /* error */ continue; } _responseBufListMutex.lock(); ResponseStruct responseBuf = _responseBufList.front(); _responseBufList.pop_front(); _responseBufListMutex.unlock(); //send response std::string responseString; runtime::FileSendComplete fileSendProtoComplete; fileSendProtoComplete.set_file_name(responseBuf.fileResponseProto.file_name()); fileSendProtoComplete.set_result(responseBuf.fileResponseProto.result()); fileSendProtoComplete.set_error_num(responseBuf.fileResponseProto.error_num()); fileSendProtoComplete.SerializeToString(&responseString); char dataBuf[1024] = {0}; struct ResponseHeaderStruct { char startFlag[12]; unsigned short protoNum; unsigned short protoBufLen; }; ResponseHeaderStruct responseHeader; strcpy(responseHeader.startFlag, PROTO_START); responseHeader.protoNum = PROTONUM::FILESENDCOMPLETE; responseHeader.protoBufLen = (unsigned short) responseString.size(); memcpy(dataBuf, &responseHeader, sizeof(responseHeader)); memcpy(dataBuf + sizeof(responseHeader), responseString.c_str(), responseString.size()); sendBuf(responseBuf.fd, dataBuf, sizeof(responseHeader) + responseString.size()); cocos2d::log("responseFile:%s,result:%d", fileSendProtoComplete.file_name().c_str(), fileSendProtoComplete.result()); } _responseRunning = false; }
TVerdict CRtcpConnect::doTestStepL() /** * @return - TVerdict code */ { SetTestStepResult(EFail); if(KErrNone == InitSocketsL()) { iIpAddrDest1.SetPort(iDestPort1+1); TRequestStatus status; iRtcpSocket.Connect(iIpAddrDest1,status); User::WaitForRequest(status); if(status.Int() == KErrNone) { TInetAddr rem; iRtcpSocket.RemoteName(rem); if(rem == iIpAddrDest1) { TBuf8<1024> sendBuf((TUint8*)"Hello World"); iRtcpSocket.Send(sendBuf, NULL, status); User::WaitForRequest(status); if(status.Int() == KErrNone) { /* Now do a Receive */ sendBuf.FillZ(); iRtcpSocket.Recv(sendBuf,NULL,status); User::WaitForRequest(status); if(status.Int() == KErrNone) { /* Next time Connect shud fail */ iIpAddrDest1.SetPort(iDestPort1+2); iRtcpSocket.Connect(iIpAddrDest1,status); User::WaitForRequest(status); if(status.Int() != KErrNone) { SetTestStepResult(EPass); } } } } } } return TestStepResult(); }
void testRing() { TransferRingBuffer sendBuf(100); unsigned int charCounter = 0, readCounter = 0; srand(time(NULL)); while(true) { void *data; int size; printf("-----------------\n"); sendBuf.printInfo(); if(sendBuf.startWrite(data, size)) { int rnd = rand() % size + 1; for(int i = 0; i < rnd; ++i) { ((unsigned char*)data)[i] = charCounter; charCounter = ((charCounter + 1) & 0xff); } sendBuf.endWrite(rnd); } sendBuf.printInfo(); if(sendBuf.startRead(data, size)) { int rnd = rand() % size + 1; checkBuffer1((unsigned char*)data, rnd, readCounter); checkBuffer2((unsigned char*)data, rnd); sendBuf.endRead(rnd); sendBuf.printInfo(); } } }
void fillFinishSirfSend(circular* buf, int port) { fillFinishSirfCore(buf); sendBuf(port,buf); }
void MonomeHost::sendMessage(uint8_t byte1, uint8_t byte2) { uint8_t buf[2] = {byte1, byte2}; sendBuf(buf, 2); }
inline void DistMatrix<T,MD,STAR,Int>::PrintBase ( std::ostream& os, const std::string msg ) const { #ifndef RELEASE PushCallStack("[MD,* ]::PrintBase"); #endif const elem::Grid& g = this->Grid(); if( g.Rank() == 0 && msg != "" ) os << msg << std::endl; const Int height = this->Height(); const Int width = this->Width(); const Int localHeight = this->LocalHeight(); const Int lcm = g.LCM(); if( height == 0 || width == 0 || !g.InGrid() ) { #ifndef RELEASE PopCallStack(); #endif return; } std::vector<T> sendBuf(height*width,0); if( this->Participating() ) { const Int colShift = this->ColShift(); const T* thisLocalBuffer = this->LockedLocalBuffer(); const Int thisLDim = this->LocalLDim(); #ifdef HAVE_OPENMP #pragma omp parallel for #endif for( Int j=0; j<width; ++j ) { T* destCol = &sendBuf[colShift+j*height]; const T* sourceCol = &thisLocalBuffer[j*thisLDim]; for( Int iLocal=0; iLocal<localHeight; ++iLocal ) destCol[iLocal*lcm] = sourceCol[iLocal]; } } // If we are the root, allocate a receive buffer std::vector<T> recvBuf; if( g.Rank() == 0 ) recvBuf.resize( height*width ); // Sum the contributions and send to the root mpi::Reduce ( &sendBuf[0], &recvBuf[0], height*width, mpi::SUM, 0, g.Comm() ); if( g.Rank() == 0 ) { // Print the data for( Int i=0; i<height; ++i ) { for( Int j=0; j<width; ++j ) os << recvBuf[i+j*height] << " "; os << "\n"; } os << std::endl; } #ifndef RELEASE PopCallStack(); #endif }
tmp<Field<Type> > ggiGAMGInterface::fastReduce(const UList<Type>& ff) const { // Algorithm // Local processor contains faceCells part of the zone and requires // zoneAddressing part. // For fast communications, each processor will send the faceCells and // zoneAddressing to the master. Master will assemble global zone // and send off messages to all processors containing only // the required data // HJ, 24/Jun/2011 if (ff.size() != this->size()) { FatalErrorIn ( "tmp<Field<Type> > ggiGAMGInterface::fastReduce" "(" " const UList<Type>& ff" ") const" ) << "Wrong field size. ff: " << ff.size() << " interface: " << this->size() << abort(FatalError); } if (localParallel() || !Pstream::parRun()) { // Field remains identical: no parallel communications required tmp<Field<Type> > tresult(new Field<Type>(ff)); return tresult; } // Execute reduce if not already done if (!initReduce_) { initFastReduce(); } if (Pstream::master()) { // Master collects information and distributes data. Field<Type> expandField(zoneSize(), pTraits<Type>::zero); // Insert master processor const labelList& za = zoneAddressing(); forAll (za, i) { expandField[za[i]] = ff[i]; } // Master receives and inserts data from all processors for which // receiveAddr contains entries for (label procI = 1; procI < Pstream::nProcs(); procI++) { const labelList& curRAddr = receiveAddr_[procI]; if (!curRAddr.empty()) { Field<Type> receiveBuf(curRAddr.size()); // Opt: reconsider mode of communication IPstream::read ( Pstream::blocking, procI, reinterpret_cast<char*>(receiveBuf.begin()), receiveBuf.byteSize() ); // Insert received information forAll (curRAddr, i) { expandField[curRAddr[i]] = receiveBuf[i]; } } } // Expanded field complete, send required data to other processors for (label procI = 1; procI < Pstream::nProcs(); procI++) { const labelList& curSAddr = sendAddr_[procI]; if (!curSAddr.empty()) { Field<Type> sendBuf(curSAddr.size()); forAll (curSAddr, i) { sendBuf[i] = expandField[curSAddr[i]]; } // Opt: reconsider mode of communication OPstream::write ( Pstream::blocking, procI, reinterpret_cast<const char*>(sendBuf.begin()), sendBuf.byteSize() ); } }