void CSensorPublisher::HandleWaitSetConditions() { DDS::ConditionSeq activeConditions; DDS::ReturnCode_t ret; // Set timeout to 0, since we are using a different mechanism to drive our loop update rate. DDS::Duration_t timeout{ 0, 0 }; // Wait for active conditions ret = m_pWaitSet->wait( activeConditions, timeout ); if( ret != DDS::RETCODE_OK ) { // Timed out. Do nothing, as we expect this to happen frequently. } else { // Some condition was triggered, so we loop through the active conditions for( int i = 0; i < activeConditions.length(); ++i ) { try { // Call the appropriate handler for each active condition // Warning: Make sure you registered a callback for every status mask you enabled in the WaitSet. The try-catch will let you know if you didn't. m_waitSetHandlers.at( (DDS::Condition*)activeConditions[ i ] )( activeConditions[ i ] ); } catch( const std::exception &e ) { LOG( ERROR ) << "Exception handling WaitSet: " << e.what(); } } } }
void DDS_WaitSet_i::convert_conditions (const DDSConditionSeq & dds_conditions, ::DDS::ConditionSeq & conditions) { DDS4CCM_TRACE ("DDS_WaitSet_i::convert_conditions"); conditions.length (dds_conditions.length ()); for (DDS_Long i = 0; i < dds_conditions.length(); ++i) { DDSQueryCondition * dds_qc = dynamic_cast <DDSQueryCondition *> (dds_conditions[i]); if (dds_qc) { ::DDS::QueryCondition_var cond; ACE_NEW_THROW_EX (cond, DDS_QueryCondition_i (dds_qc, ::DDS::DataReader::_nil ()), ::CORBA::NO_MEMORY ()); conditions[i] = ::DDS::QueryCondition::_duplicate (cond.in ()); } else { DDSReadCondition * dds_rc = dynamic_cast <DDSReadCondition *> (dds_conditions[i]); if (dds_rc) { ::DDS::ReadCondition_var cond; ACE_NEW_THROW_EX (cond, DDS_ReadCondition_i (dds_rc, ::DDS::DataReader::_nil ()), ::CORBA::NO_MEMORY ()); conditions[i] = ::DDS::ReadCondition::_duplicate (cond.in ()); } } } }
void Publisher::run() { DDS::Duration_t timeout = { DDS::DURATION_INFINITE_SEC, DDS::DURATION_INFINITE_NSEC}; DDS::ConditionSeq conditions; DDS::PublicationMatchedStatus matches = { 0, 0, 0, 0, 0}; const int readers_per_publication = 2; unsigned int cummulative_count = 0; do { if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("%d of %d subscriptions attached, waiting for more.\n"), cummulative_count, this->publications_.size()*readers_per_publication )); } if( DDS::RETCODE_OK != this->waiter_->wait( conditions, timeout)) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("failed to synchronize at start of test.\n") )); throw BadSyncException(); } for( unsigned long index = 0; index < conditions.length(); ++index) { DDS::StatusCondition_var condition = DDS::StatusCondition::_narrow( conditions[ index].in()); DDS::Entity_var writer_entity = condition->get_entity(); DDS::DataWriter_var writer = DDS::DataWriter::_narrow( writer_entity); if( !CORBA::is_nil( writer.in())) { DDS::StatusMask changes = writer->get_status_changes(); if( changes & DDS::PUBLICATION_MATCHED_STATUS) { if (writer->get_publication_matched_status(matches) != ::DDS::RETCODE_OK) { ACE_ERROR ((LM_ERROR, "ERROR: failed to get publication matched status\n")); ACE_OS::exit (1); } cummulative_count += matches.current_count_change; } } } // We know that there are 2 subscriptions matched with each publication. } while( cummulative_count < (readers_per_publication*this->publications_.size())); // Kluge to bias the race between BuiltinTopic samples and application // samples towards the BuiltinTopics during association establishment. // ACE_OS::sleep( 2); if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("starting to publish samples with %d matched subscriptions.\n"), cummulative_count )); } for( unsigned int index = 0; index < this->publications_.size(); ++index) { this->publications_[ index]->start(); } // Allow some traffic to occur before making any wait() calls. ACE_OS::sleep( 2); ::DDS::Duration_t delay = { 5, 0 }; // Wait for up to 5 seconds. if (this->options_.publisher()) { DDS::ReturnCode_t error = this->publisher_->wait_for_acknowledgments(delay); if (error != DDS::RETCODE_OK) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("publisher wait failed with code: %d.\n"), error)); ++this->status_; } } else { for( unsigned int index = 0; index < this->publications_.size(); ++index) { // First wait on this writer. ::DDS::ReturnCode_t result = this->publications_[ index]->wait_for_acks( delay); if( result != ::DDS::RETCODE_OK) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("publication %d wait failed with code: %d.\n"), index, result )); ++this->status_; } } } // Signal the writers to terminate. for( unsigned int index = 0; index < this->publications_.size(); ++index) { this->publications_[ index]->stop(); } // Additional wait() calls will be made by each thread during shutdown. // Separate loop so the termination messages can be handled concurrently. for( unsigned int index = 0; index < this->publications_.size(); ++index) { // Join and clean up. this->publications_[ index]->wait(); ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("publication %d stopping after sending %d messages.\n"), index, this->publications_[ index]->messages() )); this->status_ += this->publications_[ index]->status(); delete this->publications_[ index]; } this->publications_.clear(); if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("finished publishing samples.\n") )); } }
int ACE_TMAIN (int argc, ACE_TCHAR *argv[]) { try { DDS::DomainParticipantFactory_var dpf; DDS::DomainParticipant_var participant; dpf = TheParticipantFactoryWithArgs(argc, argv); participant = dpf->create_participant(11, PARTICIPANT_QOS_DEFAULT, DDS::DomainParticipantListener::_nil(), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); if (CORBA::is_nil (participant.in ())) { cerr << "create_participant failed." << endl; return 1 ; } Messenger::MessageTypeSupportImpl* mts_servant = new Messenger::MessageTypeSupportImpl; if (DDS::RETCODE_OK != mts_servant->register_type(participant.in (), "")) { cerr << "Failed to register the MessageTypeTypeSupport." << endl; exit(1); } CORBA::String_var type_name = mts_servant->get_type_name (); DDS::TopicQos topic_qos; participant->get_default_topic_qos(topic_qos); DDS::Topic_var topic = participant->create_topic("Movie Discussion List", type_name.in (), topic_qos, DDS::TopicListener::_nil(), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); if (CORBA::is_nil (topic.in ())) { cerr << "Failed to create_topic." << endl; exit(1); } // Create the subscriber and attach to the corresponding // transport. DDS::Subscriber_var sub = participant->create_subscriber (SUBSCRIBER_QOS_DEFAULT, DDS::SubscriberListener::_nil(), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); if (CORBA::is_nil (sub.in ())) { cerr << "Failed to create_subscriber." << endl; exit(1); } // ---------------------------------------------- { // Attempt to create a DataReader with intentionally // incompatible QoS. DDS::DataReaderQos bogus_qos; sub->get_default_datareader_qos (bogus_qos); // Set up a 2 second recurring deadline. DataReader creation // should fail with this QoS since the requested deadline period // will be less than the test configured offered deadline // period. bogus_qos.deadline.period.sec = 2; bogus_qos.deadline.period.nanosec = 0; DDS::DataReader_var tmp_dr = sub->create_datareader (topic.in (), bogus_qos, DDS::DataReaderListener::_nil (), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); if (CORBA::is_nil (tmp_dr.in ())) { cerr << "ERROR: DataReader creation with bogus QoS failed." << endl; exit (1); } DDS::StatusCondition_var cond = tmp_dr->get_statuscondition(); cond->set_enabled_statuses(DDS::REQUESTED_INCOMPATIBLE_QOS_STATUS); DDS::WaitSet_var ws = new DDS::WaitSet; ws->attach_condition(cond); DDS::Duration_t four_sec = {4, 0}; DDS::ConditionSeq active; ws->wait(active, four_sec); // Check if the incompatible deadline was correctly flagged. if ((active.length() == 0) || (active[0] != cond)) { cerr << "ERROR: Failed to get requested incompatible qos status" << endl; exit (1); } DDS::RequestedIncompatibleQosStatus incompatible_status; if (tmp_dr->get_requested_incompatible_qos_status (incompatible_status) != ::DDS::RETCODE_OK) { cerr << "ERROR: Failed to get requested incompatible qos status" << endl; exit (1); } DDS::QosPolicyCountSeq const & policies = incompatible_status.policies; bool incompatible_deadline = false; CORBA::ULong const len = policies.length (); for (CORBA::ULong i = 0; i < len; ++i) { if (policies[i].policy_id == DDS::DEADLINE_QOS_POLICY_ID) { incompatible_deadline = true; break; } } if (!incompatible_deadline) { cerr << "ERROR: A DataReader/Writer association was created " << endl << " despite use of deliberately incompatible deadline " << "QoS." << endl; exit (1); } } // ---------------------------------------------- // Create the listener. DDS::DataReaderListener_var listener (new DataReaderListenerImpl); DataReaderListenerImpl* listener_servant = dynamic_cast<DataReaderListenerImpl*>(listener.in()); if (CORBA::is_nil (listener.in ())) { cerr << "ERROR: listener is nil." << endl; exit(1); } DDS::DataReaderQos dr_qos; // Good QoS. sub->get_default_datareader_qos (dr_qos); assert (DEADLINE_PERIOD.sec > 1); // Requirement for the test. // First data reader will have a listener to test listener // callback on deadline expiration. DDS::DataReader_var dr1 = sub->create_datareader (topic.in (), dr_qos, listener.in (), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); // Second data reader will not have a listener to test proper // handling of a nil listener in the deadline handling code. DDS::DataReader_var dr2 = sub->create_datareader (topic.in (), dr_qos, DDS::DataReaderListener::_nil (), ::OpenDDS::DCPS::DEFAULT_STATUS_MASK); if (CORBA::is_nil (dr1.in ()) || CORBA::is_nil (dr2.in ())) { cerr << "ERROR: create_datareader failed." << endl; exit(1); } dr_qos.deadline.period.sec = DEADLINE_PERIOD.sec; dr_qos.deadline.period.nanosec = DEADLINE_PERIOD.nanosec; // Reset qos to have deadline. The watch dog now starts. if (dr1->set_qos (dr_qos) != ::DDS::RETCODE_OK || dr2->set_qos (dr_qos) != ::DDS::RETCODE_OK) { cerr << "ERROR: set deadline qos failed." << endl; exit(1); } Messenger::MessageDataReader_var message_dr1 = Messenger::MessageDataReader::_narrow(dr1.in()); Messenger::MessageDataReader_var message_dr2 = Messenger::MessageDataReader::_narrow(dr2.in()); int max_attempts = 10; int attempts = 0; // Synchronize with publisher. Wait until both associate with DataWriter. while (attempts < max_attempts) { ::DDS::SubscriptionMatchedStatus status1; ::DDS::SubscriptionMatchedStatus status2; if (dr1->get_subscription_matched_status (status1) == ::DDS::RETCODE_OK && dr2->get_subscription_matched_status (status2) == ::DDS::RETCODE_OK) { if (status1.total_count == 1 && status2.total_count == 1) break; ++ attempts; ACE_OS::sleep (1); } else { cerr << "ERROR: Failed to get subscription matched status" << endl; exit (1); } } if (attempts >= max_attempts) { cerr << "ERROR: failed to make associations. " << endl; exit (1); } // ---------------------------------------------- ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: sleep for %d milliseconds\n"), SLEEP_DURATION.msec())); // Wait for deadline periods to expire. ACE_OS::sleep (SLEEP_DURATION); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: now verify missed ") ACE_TEXT ("deadline status \n"))); DDS::RequestedDeadlineMissedStatus deadline_status1; if (dr1->get_requested_deadline_missed_status(deadline_status1) != ::DDS::RETCODE_OK) { cerr << "ERROR: Failed to get requested deadline missed status" << endl; exit (1); } DDS::RequestedDeadlineMissedStatus deadline_status2; if (dr2->get_requested_deadline_missed_status(deadline_status2) != ::DDS::RETCODE_OK) { cerr << "ERROR: Failed to get requested deadline missed status" << endl; exit (1); } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: got missed") ACE_TEXT ("deadline status \n"))); Messenger::Message message; message.subject_id = 99; ::DDS::InstanceHandle_t dr1_hd1 = message_dr1->lookup_instance (message); ::DDS::InstanceHandle_t dr2_hd1 = message_dr2->lookup_instance (message); message.subject_id = 100; ::DDS::InstanceHandle_t dr1_hd2 = message_dr1->lookup_instance (message); ::DDS::InstanceHandle_t dr2_hd2 = message_dr2->lookup_instance (message); if (deadline_status1.last_instance_handle != dr1_hd1 && deadline_status1.last_instance_handle != dr1_hd2) { cerr << "ERROR: Expected DR1 last instance handle (" << dr1_hd1 << " or " << dr1_hd2 << ") did not occur (" << deadline_status1.last_instance_handle << ")" << endl; exit (1); } if (deadline_status2.last_instance_handle != dr2_hd1 && deadline_status2.last_instance_handle != dr2_hd2) { cerr << "ERROR: Expected DR2 last instance handle (" << dr2_hd1 << " or " << dr2_hd2 << ") did not occur (" << deadline_status2.last_instance_handle << endl; exit (1); } //The reader deadline period is 5 seconds and writer writes //each instance every 9 seconds, so after SLEEP_DURATION(11secs), //the deadline missed should be 1 per instance if (deadline_status1.total_count != NUM_INSTANCE || deadline_status2.total_count != NUM_INSTANCE) { cerr << "ERROR: Expected number of missed requested " << "deadlines (" << NUM_INSTANCE << ") " << "did " << endl << " not occur (" << deadline_status1.total_count << " and/or " << deadline_status2.total_count << ")." << endl; exit (1); } if (deadline_status1.total_count_change != NUM_INSTANCE || deadline_status2.total_count_change != NUM_INSTANCE) { cerr << "ERROR: Incorrect missed requested " << "deadline count change" << endl << " (" << deadline_status1.total_count_change << " and/or " << deadline_status2.total_count_change << " instead of " << NUM_EXPIRATIONS * NUM_INSTANCE << ")." << endl; exit (1); } // Here the writers should continue writes all samples with // .5 second interval. ACE_Time_Value no_miss_period = num_messages * write_interval; ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: sleep for %d msec\n"), (SLEEP_DURATION + no_miss_period).msec())); // Wait for another set of deadline periods(5 + 11 secs). // During this period, the writers continue write all samples with // .5 second interval. ACE_OS::sleep (SLEEP_DURATION + no_miss_period); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: now verify missed ") ACE_TEXT ("deadline status \n"))); if ((dr1->get_requested_deadline_missed_status(deadline_status1) != ::DDS::RETCODE_OK) || (dr2->get_requested_deadline_missed_status(deadline_status2) != ::DDS::RETCODE_OK)) { cerr << "ERROR: failed to get requested deadline missed status" << endl; exit (1); } ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("(%P|%t) Subscriber: got missed") ACE_TEXT ("deadline status \n"))); if (deadline_status1.last_instance_handle != dr1_hd1 && deadline_status1.last_instance_handle != dr1_hd2) { cerr << "ERROR: Expected DR1 last instance handle (" << dr1_hd1 << " or " << dr1_hd2 << ") did not occur (" << deadline_status1.last_instance_handle << ")" << endl; exit (1); } if (deadline_status2.last_instance_handle != dr2_hd1 && deadline_status2.last_instance_handle != dr2_hd2) { cerr << "ERROR: Expected DR2 last instance handle (" << dr2_hd1 << " or " << dr2_hd2 << ") did not occur (" << deadline_status2.last_instance_handle << endl; exit (1); } if (deadline_status1.total_count != 3 * NUM_INSTANCE || deadline_status2.total_count != 3 * NUM_INSTANCE) { cerr << "ERROR: Another expected number of missed requested " << "deadlines (" << NUM_INSTANCE << ")" << endl << " did not occur (" << deadline_status1.total_count << " and/or " << deadline_status2.total_count << ")." << endl; exit (1); } if (deadline_status1.total_count_change != 2 * NUM_INSTANCE || deadline_status2.total_count_change != 2 * NUM_INSTANCE) { cerr << "ERROR: Incorrect missed requested " << "deadline count" << endl << " change (" << deadline_status1.total_count_change << "and/or " << deadline_status2.total_count_change << " instead of " << NUM_EXPIRATIONS << ")." << endl; exit (1); } int expected = 10; while ( listener_servant->num_arrived() < expected) { ACE_OS::sleep (1); } if (!CORBA::is_nil (participant.in ())) { participant->delete_contained_entities(); } if (!CORBA::is_nil (dpf.in ())) { dpf->delete_participant(participant.in ()); } ACE_OS::sleep(2); TheServiceParticipant->shutdown (); } catch (CORBA::Exception& e) { cerr << "SUB: Exception caught in main ():" << endl << e << endl; return 1; } return 0; }
//==================================================================== //== main //==================================================================== int main(int argc, char * argv[]) { Miro::Log::init(argc, argv); Miro::Robot::init(argc, argv); kn::DdsEntitiesFactorySvcParameters * ddsParams = kn::DdsEntitiesFactorySvcParameters::instance(); ddsParams->defaultLibrary = "RapidQosLibrary"; kn::DdsSupport::init(argc, argv); ddsParams->participants[0].participantName = "RaftFileQueueReceiver"; ddsParams->participants[0].profile = "RapidDefaultQos"; if (parseArgs(argc, argv) != 0) { return 1; } kn::DdsEntitiesFactorySvc ddsEntities; ddsEntities.init(ddsParams); { // make sure our staging and destination directories exist FileReceiverParameters* params = FileReceiverParameters::instance(); QDir dir; if( !dir.mkpath(params->dataDestinationDir.c_str()) ) { KN_ERROR("Failed to create data destination directory: %s", params->dataDestinationDir.c_str()); return -1; } if( !dir.mkpath(params->dataStagingDir.c_str()) ) { KN_ERROR("Failed to create staging directory: %s", params->dataStagingDir.c_str()); return -1; } } { // make sure that all dds readers/writers end their scope // before we finalize the ddsEntities factory FileQueueReceiverSamplePublisher statistics(rapid::FILEQUEUERECEIVER_SAMPLE_TOPIC, "", "RapidFileQueueReceiverSampleProfile"); rapid::RapidHelper::initHeader(statistics.event().hdr); DDS::WaitSet waitset; DDS::ReturnCode_t retcode = DDS_RETCODE_OK; FileAnnounceHandler announceHandler; FileAnnounceSubscriber announceSubscriber(rapid::FILEANNOUNCE_TOPIC, Miro::RobotParameters::instance()->name, "RapidFileAnnounceProfile"); FileAnnounce::DataReader& announceReader = announceSubscriber.dataReader(); DDS::StatusCondition * announceCondition = announceReader.get_statuscondition(); announceCondition->set_enabled_statuses(DDS_DATA_AVAILABLE_STATUS); retcode = waitset.attach_condition(announceCondition); if (retcode != DDS_RETCODE_OK) { KN_FATAL_OSTR("attach_condition error: " << kn::DdsSupport::getError(retcode)); return -1; } FileSampleHandler sampleHandler; FileSampleSubscriber sampleSubscriber(rapid::FILEQUEUE_SAMPLE_TOPIC, "", "RapidFileQueueSampleProfile"); FileQueueSample::DataReader& sampleReader = sampleSubscriber.dataReader(); DDS::StatusCondition * sampleCondition = sampleReader.get_statuscondition(); sampleCondition->set_enabled_statuses(DDS_DATA_AVAILABLE_STATUS); retcode = waitset.attach_condition(sampleCondition); if (retcode != DDS_RETCODE_OK) { KN_FATAL_OSTR("attach_condition error: " << kn::DdsSupport::getError(retcode)); return -1; } DDS::ConditionSeq activeConditions; DDS::Duration_t const timeout = {1, 0}; // wait for 1 sec max int length; FileAnnounceSeq fileAnnounces; FileQueueSampleSeq samples; DDS::SampleInfoSeq infos; ACE_Time_Value lastReceiverSampleSend(0); Miro::ShutdownHandler shutdownHandler; while (!shutdownHandler.isShutdown()) { // main loop // some feedback for now... cout << "." << flush; // DDS event-loop processing // The triggered condition(s) will be placed in activeConditions retcode = waitset.wait(activeConditions, timeout); if (retcode != DDS_RETCODE_TIMEOUT) { if (retcode != DDS_RETCODE_OK) { KN_FATAL_OSTR("waitset error" << kn::DdsSupport::getError(retcode)); return -1; } for (int i = 0; i < activeConditions.length(); ++i) { // dds subscribers if (activeConditions[i] == announceCondition) { do { announceReader.take(fileAnnounces, infos, 1024, DDS::ANY_SAMPLE_STATE, DDS::ANY_VIEW_STATE, DDS::ANY_INSTANCE_STATE); length = fileAnnounces.length(); for (DDS::Long j = 0; j < length; ++j) { if (infos[j].valid_data) { announceHandler(&fileAnnounces[j]); } else { KN_INFO("DDS FileSample take !valid_data"); } } ACE_OS::sleep(1); retcode = announceReader.return_loan(fileAnnounces, infos); if (retcode != DDS::RETCODE_OK) { KN_ERROR_OSTR("DDS FileAnnounce return loan error: " << kn::DdsSupport::getError(retcode)); } } while (length == 1024); } if (activeConditions[i] == sampleCondition) { do { sampleReader.take(samples, infos, 1024, DDS::ANY_SAMPLE_STATE, DDS::ANY_VIEW_STATE, DDS::ANY_INSTANCE_STATE); length = samples.length(); for (DDS::Long i = 0; i < length; ++i) { if (infos[i].valid_data) { KN_INFO("FileQueueSample"); sampleHandler(&samples[i]); } else { KN_INFO("DDS FileSample take !valid_data"); } } retcode = sampleReader.return_loan(samples, infos); if (retcode != DDS::RETCODE_OK) { KN_ERROR_OSTR("DDS FileQueueSample return loan error: " << kn::DdsSupport::getError(retcode)); } } while (length == 1024); } } } StringQueue& completedFiles = sampleHandler.getCompletedFiles(); while (!completedFiles.empty()) { std::string uuid = completedFiles.pop(); if(announceHandler.hasUuid(uuid)) { moveFile(uuid, announceHandler); sampleHandler.removeUuid(uuid); announceHandler.removeUuid(uuid); } } StringQueue& announcedFiles = announceHandler.getAnnouncedFiles(); while (!announcedFiles.empty()) { std::string uuid = announcedFiles.pop(); if(sampleHandler.hasUuid(uuid) && sampleHandler.isFileComplete(uuid)) { moveFile(uuid, announceHandler); sampleHandler.removeUuid(uuid); announceHandler.removeUuid(uuid); } } // send statistics ACE_Time_Value now = ACE_OS::gettimeofday(); if (lastReceiverSampleSend + ACE_Time_Value(1) < now) { rapid::FileQueueReceiverSample& event = statistics.event(); DDS::LivelinessChangedStatus livelinessStatus; sampleReader.get_liveliness_changed_status(livelinessStatus); // santity checking if (livelinessStatus.alive_count > 1) { KN_WARN("Subscribed to more than one sample writer."); } DDS::DataReaderCacheStatus cacheStatus; sampleReader.get_datareader_cache_status(cacheStatus); rapid::RapidHelper::updateHeader(event.hdr); event.connected = livelinessStatus.alive_count > 0; event.bufferedChunks = cacheStatus.sample_count; event.processedChunks = sampleHandler.getNumTotalSamples(); event.completedFiles = sampleHandler.getNumCompleted(); statistics.sendEvent(); lastReceiverSampleSend = now; } } // print statistics // XXX: We are double-counting some announce topics. I'm not sure why... std::cout << "Statistics: " << std::endl << " Announced: " << announceHandler.getNumAnnounced() << std::endl << " Started: " << sampleHandler.getNumStarted() << std::endl << " Completed: " << sampleHandler.getNumCompleted() << std::endl << " Moved: " << g_numFilesMoved << std::endl; if (sampleHandler.getNumStarted() != sampleHandler.getNumCompleted()) { std::cout << " INCOMPLETE: " << sampleHandler.getNumStarted() - sampleHandler.getNumCompleted() << std::endl; } } ddsEntities.fini(); return 0; }
void Publisher::run() { DDS::Duration_t timeout = { DDS::DURATION_INFINITE_SEC, DDS::DURATION_INFINITE_NSEC}; DDS::ConditionSeq conditions; DDS::PublicationMatchedStatus matches = { 0, 0, 0, 0, 0}; unsigned int cummulative_count = 0; do { if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("%d of %d subscriptions attached, waiting for more.\n"), cummulative_count, this->publications_.size() )); } if( DDS::RETCODE_OK != this->waiter_->wait( conditions, timeout)) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("failed to synchronize at start of test.\n") )); throw BadSyncException(); } for( unsigned long index = 0; index < conditions.length(); ++index) { DDS::StatusCondition_var condition = DDS::StatusCondition::_narrow( conditions[ index].in()); DDS::DataWriter_var writer = DDS::DataWriter::_narrow( condition->get_entity()); if( !CORBA::is_nil( writer.in())) { DDS::StatusMask changes = writer->get_status_changes(); if( changes & DDS::PUBLICATION_MATCHED_STATUS) { if (writer->get_publication_matched_status(matches) != ::DDS::RETCODE_OK) { ACE_ERROR ((LM_ERROR, "ERROR: failed to get publication matched status\n")); ACE_OS::exit (1); } cummulative_count += matches.current_count_change; } } } } while( cummulative_count < this->publications_.size()); // Kluge to bias the race between BuiltinTopic samples and application // samples towards the BuiltinTopics during association establishment. ACE_OS::sleep( 2); if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("starting to publish samples with %d matched subscriptions.\n"), cummulative_count )); } for( PublicationMap::const_iterator current = this->publications_.begin(); current != this->publications_.end(); ++current ) { current->second->start(); } // Execute test for specified duration, or block until terminated externally. if( this->options_.duration() > 0) { ACE_Time_Value duration( this->options_.duration(), 0); ACE_OS::sleep( duration); } else { // Block the main thread, leaving the others working. ACE_Thread_Manager::instance()->wait(); } // Signal the writers to terminate. for( PublicationMap::const_iterator current = this->publications_.begin(); current != this->publications_.end(); ++current ) { current->second->stop(); } // Separate loop so the termination messages can be handled concurrently. for( PublicationMap::const_iterator current = this->publications_.begin(); current != this->publications_.end(); ++current ) { // Join and clean up. current->second->wait(); ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("publication %C stopping after sending %d messages.\n"), current->first.c_str(), current->second->messages() )); delete current->second; } this->publications_.clear(); if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("finished publishing samples.\n") )); } }
void Publisher::run() { DDS::Duration_t timeout = { DDS::DURATION_INFINITE_SEC, DDS::DURATION_INFINITE_NSEC}; DDS::ConditionSeq conditions; DDS::PublicationMatchedStatus matches = { 0, 0, 0, 0, 0}; unsigned int cummulative_count = 0; do { if( this->options_.verbose()) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("%d of 2 subscriptions attached, waiting for more.\n"), cummulative_count )); } if( DDS::RETCODE_OK != this->waiter_->wait( conditions, timeout)) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("failed to synchronize at start of test.\n") )); throw BadSyncException(); } for( unsigned long index = 0; index < conditions.length(); ++index) { DDS::StatusCondition_var condition = DDS::StatusCondition::_narrow( conditions[ index].in()); DDS::Entity_var writer_entity = condition->get_entity(); DDS::DataWriter_var writer = DDS::DataWriter::_narrow( writer_entity); if( !CORBA::is_nil( writer.in())) { DDS::StatusMask changes = writer->get_status_changes(); if( changes & DDS::PUBLICATION_MATCHED_STATUS) { if (writer->get_publication_matched_status(matches) != ::DDS::RETCODE_OK) { ACE_ERROR ((LM_ERROR, "ERROR: failed to get publication matched status\n")); ACE_OS::exit (1); } cummulative_count += matches.current_count_change; } } } } while( cummulative_count < 2); /// Kluge to ensure that the remote/subscriber side endpoints have /// been fully associated before starting to send. This appears to be /// a race between the association creation and use and the BuiltIn /// Topic data becoming available. There is no existing mechanism (nor /// should there be) to prevent an association from exchanging data /// prior to the remote endpoint information becoming available via the /// BuiltIn Topic publications. ACE_OS::sleep( 2); ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("starting to publish samples.\n") )); Test::DataDataWriter_var writer0 = Test::DataDataWriter::_narrow( this->writer_[0].in()); Test::DataDataWriter_var writer1 = Test::DataDataWriter::_narrow( this->writer_[1].in()); Test::Data sample0; Test::Data sample1; sample0.key = 1; sample0.value = 0; // before_value is just for the high priority sample, low priority samples are in order sample0.before_value = 0; sample0.priority = false; // add some extra baggage to ensure sample0.baggage.length(9999); if (options_.multipleInstances()) sample1.key = 2; else sample1.key = 1; sample1.value = 0; // will determine later which value this sample should be seen before sample1.before_value = 0; sample1.priority = true; bool sent = false; for (unsigned long num_samples = 1; num_samples < (unsigned long)-1 && !sent; ++num_samples) { ++sample0.value; if (writer0->write( sample0, DDS::HANDLE_NIL) == DDS::RETCODE_TIMEOUT) { // indicate the high priority sample should arrive before the indicated low priority sample sample1.before_value = sample0.value - 1; while (writer1->write( sample1, DDS::HANDLE_NIL) == DDS::RETCODE_TIMEOUT) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) ERROR: Publisher::run() - ") ACE_TEXT("should not have backpressure for the second writer.\n") )); } sent = true; } } ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) Publisher::run() - ") ACE_TEXT("finished publishing %d samples.\n"), sample0.value )); // Make sure that the data has arriven. ::DDS::Duration_t shutdownDelay = {15, 0}; // Wait up to a total of 15 // seconds to finish the test. if (this->options_.transportType() != Options::UDP) { writer0->wait_for_acknowledgments(shutdownDelay); writer1->wait_for_acknowledgments(shutdownDelay); } else { // Wait for acks won't work with UDP... ACE_OS::sleep(15); } }