int DDS_TEST::test(ACE_TString host, u_short port) { if (host.empty() || port == 0) { std::cerr << "ERROR: -h <host> and -p <port> options are required\n"; return 1; } // 0. initialization ACE_INET_Addr remote_addr(port, host.c_str()); TransportInst_rch inst = TheTransportRegistry->create_inst("my_rtps", "rtps_udp"); RtpsUdpInst* rtps_inst = dynamic_cast<RtpsUdpInst*>(inst.in()); if (!rtps_inst) { std::cerr << "ERROR: Failed to cast to RtpsUdpInst\n"; return 1; } rtps_inst->datalink_release_delay_ = 0; rtps_inst->heartbeat_period_ = ACE_Time_Value(0, 100*1000 /*microseconds*/); TransportConfig_rch cfg = TheTransportRegistry->create_config("cfg"); cfg->instances_.push_back(inst); TheTransportRegistry->global_config(cfg); RepoIdBuilder local; local.federationId(0x01234567); // guidPrefix1 local.participantId(0x89abcdef); // guidPrefix2 local.entityKey(0x012345); local.entityKind(ENTITYKIND_USER_WRITER_WITH_KEY); OpenDDS::RTPS::GUID_t local_guid(local); const OpenDDS::RTPS::GuidPrefix_t& local_prefix = local_guid.guidPrefix; RepoIdBuilder remote; // these values must match what's in subscriber.cpp remote.federationId(0x01234567); // guidPrefix1 remote.participantId(0xefcdab89); // guidPrefix2 remote.entityKey(0x452310); remote.entityKind(ENTITYKIND_USER_READER_WITH_KEY); LocatorSeq locators; locators.length(1); locators[0].kind = (remote_addr.get_type() == AF_INET6) ? LOCATOR_KIND_UDPv6 : LOCATOR_KIND_UDPv4; locators[0].port = port; address_to_bytes(locators[0].address, remote_addr); size_t size_locator = 0, padding_locator = 0; gen_find_size(locators, size_locator, padding_locator); ACE_Message_Block mb_locator(size_locator + padding_locator + 1); Serializer ser_loc(&mb_locator, ACE_CDR_BYTE_ORDER, Serializer::ALIGN_CDR); ser_loc << locators; ser_loc << ACE_OutputCDR::from_boolean(false); // requires inline QoS SimpleDataWriter sdw(local_guid); sdw.enable_transport(true /*reliable*/, false /*durable*/); AssociationData subscription; subscription.remote_id_ = remote; subscription.remote_reliable_ = false; subscription.remote_data_.length(1); subscription.remote_data_[0].transport_type = "rtps_udp"; subscription.remote_data_[0].data.replace( static_cast<CORBA::ULong>(mb_locator.length()), &mb_locator); if (!sdw.init(subscription)) { std::cerr << "publisher TransportClient::associate() failed\n"; return 1; } // 1. send by directly writing an RTPS Message to the socket const Header hdr = { {'R', 'T', 'P', 'S'}, PROTOCOLVERSION, VENDORID_OPENDDS, {local_prefix[0], local_prefix[1], local_prefix[2], local_prefix[3], local_prefix[4], local_prefix[5], local_prefix[6], local_prefix[7], local_prefix[8], local_prefix[9], local_prefix[10], local_prefix[11]} }; const ACE_Time_Value now = ACE_OS::gettimeofday(); log_time(now); const double conv = 4294.967296; // NTP fractional (2^-32) sec per microsec const InfoTimestampSubmessage it = { {INFO_TS, 1, 8}, {static_cast<ACE_CDR::Long>(now.sec()), static_cast<ACE_CDR::ULong>(now.usec() * conv)} }; DataSubmessage ds = { {DATA, 7, 20 + 24 + 12 + sizeof(text)}, 0, 16, ENTITYID_UNKNOWN, local_guid.entityId, {0, 1}, ParameterList() }; TestMsg data; data.key = 0x09230923; data.value = text; ds.inlineQos.length(1); OpenDDS::RTPS::KeyHash_t hash; marshal_key_hash(data, hash); ds.inlineQos[0].key_hash(hash); const ACE_CDR::ULong encap = 0x00000100; // {CDR_LE, options} in BE format size_t size = 0, padding = 0; gen_find_size(hdr, size, padding); gen_find_size(it, size, padding); gen_find_size(ds, size, padding); find_size_ulong(size, padding); gen_find_size(data, size, padding); ACE_Message_Block msg(size + padding); Serializer ser(&msg, host_is_bigendian, Serializer::ALIGN_CDR); bool ok = (ser << hdr) && (ser << it) && (ser << ds) && (ser << encap) && (ser << data); if (!ok) { std::cerr << "ERROR: failed to serialize RTPS message\n"; return 1; } ACE_INET_Addr local_addr; ACE_SOCK_Dgram sock(local_addr); ssize_t res = sock.send(msg.rd_ptr(), msg.length(), remote_addr); if (res < 0) { std::cerr << "ERROR: error in send()\n"; return 1; } else { std::ostringstream oss; oss << "Sent " << res << " bytes.\n"; ACE_DEBUG((LM_INFO, oss.str().c_str())); } // 2a. send control messages through the OpenDDS transport // Send an instance registration { TestMsg control_sample; control_sample.key = 0x04030201; DataSampleHeader dsh; dsh.message_id_ = INSTANCE_REGISTRATION; dsh.sequence_ = SequenceNumber::SEQUENCENUMBER_UNKNOWN(); dsh.publication_id_ = local_guid; dsh.key_fields_only_ = true; // Calculate the data buffer length size = 0; padding = 0; OpenDDS::DCPS::KeyOnly<const TestMsg> ko_instance_data(control_sample); find_size_ulong(size, padding); // encap gen_find_size(ko_instance_data, size, padding); dsh.message_length_ = static_cast<ACE_UINT32>(size + padding); ACE_Message_Block* ir_mb = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh.message_length_)); *ir_mb << dsh; OpenDDS::DCPS::Serializer serializer(ir_mb->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (serializer << encap) && (serializer << ko_instance_data); if (!ok) { std::cerr << "ERROR: failed to serialize data for instance registration\n"; return 1; } ::DDS_TEST::force_inline_qos(false); // No inline QoS sdw.send_control(dsh, ir_mb); // Send a dispose instance { dsh.message_id_ = DISPOSE_INSTANCE; ACE_Message_Block* di_mb = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh.message_length_)); *di_mb << dsh; OpenDDS::DCPS::Serializer serializer(di_mb->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (serializer << encap) && (serializer << ko_instance_data); if (!ok) { std::cerr << "ERROR: failed to serialize data for dispose instance\n"; return 1; } ::DDS_TEST::force_inline_qos(true); // Inline QoS sdw.inline_qos_mode_ = SimpleDataWriter::PARTIAL_MOD_QOS; sdw.send_control(dsh, di_mb); } // Send an unregister instance { dsh.message_id_ = UNREGISTER_INSTANCE; ACE_Message_Block* ui_mb = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh.message_length_)); *ui_mb << dsh; OpenDDS::DCPS::Serializer serializer(ui_mb->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (serializer << encap) && (serializer << ko_instance_data); if (!ok) { std::cerr << "ERROR: failed to serialize data for unregister instance\n"; return 1; } ::DDS_TEST::force_inline_qos(true); // Inline QoS sdw.inline_qos_mode_ = SimpleDataWriter::FULL_MOD_QOS; sdw.send_control(dsh, ui_mb); } // Send a dispose & unregister instance { dsh.message_id_ = DISPOSE_UNREGISTER_INSTANCE; ACE_Message_Block* ui_mb = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh.message_length_)); *ui_mb << dsh; OpenDDS::DCPS::Serializer serializer(ui_mb->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (serializer << encap) && (serializer << ko_instance_data); if (!ok) { std::cerr << "ERROR: failed to serialize data for dispose unregister instance\n"; return 1; } ::DDS_TEST::force_inline_qos(true); // Inline QoS sdw.inline_qos_mode_ = SimpleDataWriter::FULL_MOD_QOS; sdw.send_control(dsh, ui_mb); } } // 2b. send sample data through the OpenDDS transport TransportSendElementAllocator alloc(2, sizeof(TransportSendElementAllocator)); DataSampleElement elements[] = { DataSampleElement(local_guid, &sdw, 0, &alloc, 0), // Data Sample DataSampleElement(local_guid, &sdw, 0, &alloc, 0), // Data Sample (key=99 means end) }; SendStateDataSampleList list; list.head_ = elements; list.size_ = sizeof(elements) / sizeof(elements[0]); list.tail_ = &elements[list.size() - 1]; for (int i = 0; i < list.size() - 1; ++i) { DDS_TEST::set_next_send_sample(elements[i], &elements[i + 1]); } // Send a regular data sample int index = 0; DataSampleHeader& dsh = elements[index].header_; dsh.message_id_ = SAMPLE_DATA; dsh.publication_id_ = local_guid; dsh.sequence_ = 3; // test GAP generation const ACE_Time_Value tv = ACE_OS::gettimeofday(); log_time(tv); DDS::Time_t st = time_value_to_time(tv); dsh.source_timestamp_sec_ = st.sec; dsh.source_timestamp_nanosec_ = st.nanosec; // Calculate the data buffer length size = 0; padding = 0; find_size_ulong(size, padding); // encap gen_find_size(data, size, padding); dsh.message_length_ = static_cast<ACE_UINT32>(size + padding); elements[index].sample_ = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh.message_length_)); *elements[index].sample_ << dsh; Serializer ser2(elements[index].sample_->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (ser2 << encap) && (ser2 << data); if (!ok) { std::cerr << "ERROR: failed to serialize data for elements[" << index << "]\n"; return 1; } // Send a data sample with a key of 99 to terminate the subscriber index++; DataSampleHeader& dsh2 = elements[index].header_; dsh2.sequence_ = dsh.sequence_+1; dsh2.message_id_ = SAMPLE_DATA; dsh.publication_id_ = local_guid; dsh2.key_fields_only_ = false; const ACE_Time_Value tv2 = ACE_OS::gettimeofday(); log_time(tv2); DDS::Time_t st2 = time_value_to_time(tv2); dsh2.source_timestamp_sec_ = st2.sec; dsh2.source_timestamp_nanosec_ = st2.nanosec; data.key = 99; data.value = ""; // Calculate the data buffer length size = 0; padding = 0; find_size_ulong(size, padding); // encap gen_find_size(data, size, padding); dsh2.message_length_ = static_cast<ACE_UINT32>(size + padding); elements[index].sample_ = new ACE_Message_Block(DataSampleHeader::max_marshaled_size(), ACE_Message_Block::MB_DATA, new ACE_Message_Block(dsh2.message_length_)); *elements[index].sample_ << dsh2; Serializer ser3(elements[index].sample_->cont(), host_is_bigendian, Serializer::ALIGN_CDR); ok = (ser3 << encap) && (ser3 << data.key) && (ser3 << data.value); if (!ok) { std::cerr << "ERROR: failed to serialize data for elements[" << index << "]\n"; return 1; } sdw.callbacks_expected_ = list.size(); ::DDS_TEST::force_inline_qos(true); // Inline QoS sdw.send(list); while (sdw.callbacks_expected_) { ACE_OS::sleep(1); } // Allow enough time for a HEARTBEAT message to be generated ACE_OS::sleep(rtps_inst->heartbeat_period_ * 2.0); // 3. cleanup sdw.disassociate(subscription.remote_id_); TheServiceParticipant->shutdown(); ACE_Thread_Manager::instance()->wait(); return 0; }
bool OpenDDS::DCPS::DataDurabilityCache::insert( DDS::DomainId_t domain_id, char const * topic_name, char const * type_name, SendStateDataSampleList & the_data, DDS::DurabilityServiceQosPolicy const & qos) { if (the_data.size() == 0) return true; // Nothing to cache. // Apply DURABILITY_SERVICE QoS HISTORY and RESOURCE_LIMITS related // settings prior to data insertion into the cache. CORBA::Long const depth = get_instance_sample_list_depth( qos.history_kind, qos.history_depth, qos.max_samples_per_instance); // Iterator to first DataSampleElement to be copied. SendStateDataSampleList::iterator element(the_data.begin()); if (depth < 0) return false; // Should never occur. else if (depth == 0) return true; // Nothing else to do. Discard all data. else if (the_data.size() > depth) { // N.B. Dropping data samples does not take into account // those samples which are not actually persisted (i.e. // samples with the coherent_sample_ flag set). The spec // does not provide any guidance in this case, therefore // we opt for the simplest solution and assume that there // are no change sets when calculating the number of // samples to drop. // Drop "old" samples. Only keep the "depth" most recent // samples, i.e. those found at the tail end of the // SendStateDataSampleList. ssize_t const advance_amount = the_data.size() - depth; std::advance(element, advance_amount); } // ----------- // Copy samples to the domain/topic/type-specific cache. key_type const key(domain_id, topic_name, type_name, this->allocator_.get()); SendStateDataSampleList::iterator the_end(the_data.end()); sample_list_type * sample_list = 0; typedef DurabilityQueue<sample_data_type> data_queue_type; data_queue_type ** slot = 0; data_queue_type * samples = 0; // sample_list_type::value_type using OpenDDS::FileSystemStorage::Directory; using OpenDDS::FileSystemStorage::File; Directory::Ptr dir; std::vector<std::string> path; { ACE_Allocator * const allocator = this->allocator_.get(); ACE_GUARD_RETURN(ACE_SYNCH_MUTEX, guard, this->lock_, false); if (this->kind_ == DDS::PERSISTENT_DURABILITY_QOS) { try { dir = Directory::create(this->data_dir_.c_str()); std::ostringstream oss; oss << domain_id; path.push_back(oss.str()); path.push_back(topic_name); path.push_back(type_name); dir = dir->get_dir(path); // dir is now the "type" directory, which is shared by all datawriters // of the domain/topic/type. We actually need a new directory per // datawriter and this assumes that insert() is called once per // datawriter, as is currently the case. dir = dir->create_next_dir(); path.push_back(dir->name()); // for use by the Cleanup_Handler } catch (const std::exception& ex) { if (DCPS_debug_level > 0) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) DataDurabilityCache::insert ") ACE_TEXT("couldn't create directory for PERSISTENT ") ACE_TEXT("data: %C\n"), ex.what())); } dir = 0; } } if (this->samples_->find(key, sample_list, allocator) != 0) { // Create a new list (actually an ACE_Array_Base<>) with the // appropriate allocator passed to its constructor. ACE_NEW_MALLOC_RETURN( sample_list, static_cast<sample_list_type *>( allocator->malloc(sizeof(sample_list_type))), sample_list_type(1, static_cast<data_queue_type *>(0), allocator), false); if (this->samples_->bind(key, sample_list, allocator) != 0) return false; } data_queue_type ** const begin = &((*sample_list)[0]); data_queue_type ** const end = begin + sample_list->size(); // Find an empty slot in the array. This is a linear search but // that should be fine for the common case, i.e. a small number of // DataWriters that push data into the cache. slot = std::find(begin, end, static_cast<data_queue_type *>(0)); if (slot == end) { // No available slots. Grow the array accordingly. size_t const old_len = sample_list->size(); sample_list->size(old_len + 1); data_queue_type ** new_begin = &((*sample_list)[0]); slot = new_begin + old_len; } ACE_NEW_MALLOC_RETURN( samples, static_cast<data_queue_type *>( allocator->malloc(sizeof(data_queue_type))), data_queue_type(allocator), false); // Insert the samples in to the sample list. *slot = samples; if (!dir.is_nil()) { samples->fs_path_ = path; } for (SendStateDataSampleList::iterator i(element); i != the_end; ++i) { DataSampleElement& elem = *i; // N.B. Do not persist samples with coherent changes. // To verify, we check the DataSampleHeader for the // coherent_change_ flag. The DataSampleHeader will // always be the first message block in the chain. // // It should be noted that persisting coherent changes // is a non-trivial task, and should be handled when // finalizing persistence profile conformance. if (DataSampleHeader::test_flag(COHERENT_CHANGE_FLAG, elem.get_sample())) { continue; // skip coherent sample } sample_data_type sample(elem, allocator); if (samples->enqueue_tail(sample) != 0) return false; if (!dir.is_nil()) { try { File::Ptr f = dir->create_next_file(); std::ofstream os; if (!f->write(os)) return false; DDS::Time_t timestamp; const char * data; size_t len; sample.get_sample(data, len, timestamp); os << timestamp.sec << ' ' << timestamp.nanosec << ' '; os.write(data, len); } catch (const std::exception& ex) { if (DCPS_debug_level > 0) { ACE_ERROR((LM_ERROR, ACE_TEXT("(%P|%t) DataDurabilityCache::insert ") ACE_TEXT("couldn't write sample for PERSISTENT ") ACE_TEXT("data: %C\n"), ex.what())); } } } } } // ----------- // Schedule cleanup timer. //FUTURE: The cleanup delay needs to be persisted (if QoS is persistent) ACE_Time_Value const cleanup_delay( duration_to_time_value(qos.service_cleanup_delay)); if (cleanup_delay > ACE_Time_Value::zero) { if (OpenDDS::DCPS::DCPS_debug_level >= 4) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("OpenDDS (%P|%t) Scheduling durable data ") ACE_TEXT("cleanup for\n") ACE_TEXT("OpenDDS (%P|%t) (domain_id, topic, type) ") ACE_TEXT("== (%d, %C, %C)\n"), domain_id, topic_name, type_name)); } Cleanup_Handler * const cleanup = new Cleanup_Handler(*sample_list, slot - &(*sample_list)[0], this->allocator_.get(), path, this->data_dir_); ACE_Event_Handler_var safe_cleanup(cleanup); // Transfer ownership long const tid = this->reactor_->schedule_timer(cleanup, 0, // ACT cleanup_delay); if (tid == -1) { ACE_GUARD_RETURN(ACE_SYNCH_MUTEX, guard, this->lock_, false); ACE_DES_FREE(samples, this->allocator_->free, DurabilityQueue<sample_data_type>); *slot = 0; return false; } else { { ACE_GUARD_RETURN(ACE_SYNCH_MUTEX, guard, this->lock_, false); this->cleanup_timer_ids_.push_back(tid); } cleanup->timer_id(tid, &this->cleanup_timer_ids_); } } return true; }