Exemplo n.º 1
0
static u_result
handlePublication(
    u_dataReader dataReader,
    c_long dataOffset,
    u_dataReader pDataReader,
    c_long pDataOffset)
{
    v_dataReaderSample sample;
    u_result result;
    v_state state;
    v_message msg;
    struct v_publicationInfo *data;
    in_writer writer;
    in_participant participant;

    sample = NULL;
    result = u_dataReaderTake(dataReader, takeOne, &sample);

    while(sample && (result == U_RESULT_OK)){
        state = v_readerSample(sample)->sampleState;
        msg   = v_dataReaderSampleMessage(sample);
        data  = (struct v_publicationInfo *)(C_DISPLACE(msg, dataOffset));
        os_mutexLock (&gluelock);
        if(v_stateTest(state, L_DISPOSED)){
          writer = in_writerLookup(&data->key);

          if(writer){
            in_writerFree(writer, NULL);
          } else {
            nn_log (LC_WARNING, "handlePublication: disposed writer not found\n");
            /*abort();*/
          }
        } else {
          participant = in_participantLookup(&(data->participant_key));

          if(!participant){
            result = handleParticipant(pDataReader, pDataOffset, 1);

            if(result == U_RESULT_OK){
              participant = in_participantLookup(&(data->participant_key));
            }
          }
          if(participant){
            in_writerNew(participant, data);
          } else {
            nn_log (LC_ERROR, "handlePublication: participant not found\n");
            result = U_RESULT_INTERNAL_ERROR;
            /*abort();*/
          }
        }
        os_mutexUnlock (&gluelock);
        c_free(sample);
        sample = NULL;
        result = u_dataReaderTake(dataReader, takeOne, &sample);
    }
    return result;
}
Exemplo n.º 2
0
static u_result
handleGroup(
    v_service service,
    v_group group)
{
    v_networkReaderEntry entry;
    nn_log (LC_DISCOVERY, "Found new group '%s.%s'; adding networkReaderEntry...\n",
              v_entity(group->partition)->name,
              v_entity(group->topic)->name);

    entry = v_networkReaderEntryNew(
            vclientReader, group,
            v_publicGid(v_public(service)).systemId,
            1, 0);

    if (group->topic->qos->durability.kind >= V_DURABILITY_TRANSIENT)
    {
      /* For transient topics, DDSI readers are spontaneously
         generated to ensure data will indeed arrive -- FIXME:
         currently no provision is made to ensure no "early"
         publications are lost while DDSI discovery is still digesting
         these readers.

         For convenience, we use the regular DDS<->DDSI mapping to
         handle these ficitious readers, and we pretend these
         ficitious readers are owned by the DDSI service
         participant. That one has been created, and as luck has it,
         the participants are discovered before the groups are. So we
         just look it up. */
      v_builtinTopicKey pkey;
      in_participant p;
      nn_log (LC_DISCOVERY, "Group is transient - creating DDSI data reader...\n");

      os_mutexLock (&gluelock);
      pkey = u_entityGid ((u_entity) participant);
      if ((p = in_participantLookup (&pkey)) == NULL)
        nn_log (LC_ERROR, "handleGroup: participant lookup of self failed, transient data may not work\n");
      else
      {
        if (!in_fictitiousTransientReaderNew (p, group))
        {
          nn_log (LC_ERROR, "handleGroup: creation of fictitious transient data reader failed, transient data may not work\n");
        }
      }
      os_mutexUnlock (&gluelock);
    }

    v_networkReaderEntryNotifyConnected(entry, SERVICE_NAME);
    v_networkReaderRemoteActivityDetected(vclientReader);
    return U_RESULT_OK;
}
Exemplo n.º 3
0
static int open_tracing_file (void)
{
  if (config.tracingOutputFileName == NULL || *config.tracingOutputFileName == 0)
  {
    config.tracingOutputFile = NULL;
    return 1;
  }
  else if (os_strcasecmp (config.tracingOutputFileName, "stdout") == 0)
  {
    config.tracingOutputFile = stdout;
    return 1;
  }
  else if (os_strcasecmp (config.tracingOutputFileName, "stderr") == 0)
  {
    config.tracingOutputFile = stderr;
    return 1;
  }
  else if ((config.tracingOutputFile = fopen (config.tracingOutputFileName, config.tracingAppendToFile ? "a" : "w")) == NULL)
  {
    nn_log (LC_ERROR, "%s: cannot open for writing", config.tracingOutputFileName);
    return 0;
  }
  else
  {
    return 1;
  }
}
Exemplo n.º 4
0
void nn_log_addrset (logcat_t tf, const char *prefix, const struct addrset *as)
{
  struct log_addrset_helper_arg arg;
  arg.tf = tf;
  nn_log (tf, prefix);
  addrset_forall ((struct addrset *) as, log_addrset_helper, &arg); /* drop const, we know it is */
}
Exemplo n.º 5
0
static void log_addrset_helper (const os_sockaddr_storage *n, void *varg)
{
  const struct log_addrset_helper_arg *arg = varg;
  char buf[INET6_ADDRSTRLEN_EXTENDED];
  if (config.enabled_logcats & arg->tf)
  {
    nn_log (arg->tf, " %s", sockaddr_to_string_with_port (buf, n));
  }
}
Exemplo n.º 6
0
static void*
in_discoveryClientWriterMonitor(
        UNUSED_ARG (void* arg))
{
    c_bool result, sendTo, more;
    v_message message;
    c_ulong sequenceNumber, priority;
    v_gid sender, receiver;
    c_time sendBefore;
    v_networkReaderEntry entry;
    os_time sleepTime;
    in_writer writer;
    os_boolean proceed = TRUE;
    v_networkReaderWaitResult nrwr;
    struct nn_xpack *xp;

    xp = nn_xpack_new ();
    if(vclientQueue){
        sleepTime.tv_sec = 0;
        sleepTime.tv_nsec = 50 * 1000 * 1000;

        while(!terminate && proceed){
            nrwr = v_networkReaderWait(vclientReader, queueId, &vclientQueue);

            if((nrwr & V_WAITRESULT_MSGWAITING) == V_WAITRESULT_MSGWAITING){
                more = TRUE;

                while(more){
                    result = v_networkQueueTakeFirst(
                        vclientQueue, &message, &entry, &sequenceNumber, &sender,
                        &sendTo, &receiver, &sendBefore, &priority, &more);

                    if(result){
                        os_mutexLock (&gluelock);
                        writer = in_writerLookup(&sender);
                        if (writer)
                          proceed = in_writerWrite(xp, writer, message);
                        os_mutexUnlock (&gluelock);
                        c_free (message);
                    }
                }
                nn_xpack_send (xp);
            } else if((nrwr & V_WAITRESULT_TRIGGERED) == V_WAITRESULT_TRIGGERED){
                nn_log (LC_TRACE, "clientReader triggered...\n");
                proceed = FALSE;
            }
        }
    }
    nn_xpack_free (xp);
    return NULL;
}
Exemplo n.º 7
0
static u_result create_builtin_readers (struct builtin_datareader_set *drset, u_participant p)
{
  v_subscriberQos sQos = NULL;
  v_readerQos rdQos = NULL;
  v_gid gid;
  c_value ps[1];

  drset->subscriber = NULL;
  drset->participant_dr = NULL;
  drset->subscription_dr = NULL;
  drset->publication_dr = NULL;

  if ((sQos = u_subscriberQosNew (NULL)) == NULL)
    goto fail;
  sQos->presentation.access_scope = V_PRESENTATION_TOPIC;
  if ((sQos->partition = os_strdup ("__BUILT-IN PARTITION__")) == NULL)
    goto fail;

  if ((rdQos = u_readerQosNew (NULL)) == NULL)
    goto fail;
  rdQos->durability.kind = V_DURABILITY_TRANSIENT;
  rdQos->reliability.kind = V_RELIABILITY_RELIABLE;
  rdQos->history.kind = V_HISTORY_KEEPLAST;
  rdQos->history.depth = 1;

  if ((drset->subscriber = u_subscriberNew (p, "DDSI2BuiltinSubscriber", sQos, TRUE)) == NULL)
    goto fail;
  gid = u_entityGid ((u_entity) drset->subscriber);
  ps[0].kind = V_ULONG;
  ps[0].is.ULong = gid.systemId;
  nn_log (LC_TRACE, "create_builtin_readers: systemId = %lx\n", (unsigned long) ps[0].is.ULong);

  if ((drset->participant_dr = u_subscriberCreateDataReader (drset->subscriber, "DCPSParticipantReader", "select * from DCPSParticipant where key.systemId = %0", ps, rdQos, TRUE)) == NULL)
    goto fail;
  if ((drset->subscription_dr = u_subscriberCreateDataReader (drset->subscriber, "DCPSSubscriptionReader", "select * from DCPSSubscription where key.systemId = %0", ps, rdQos, TRUE)) == NULL)
    goto fail;
  if ((drset->publication_dr = u_subscriberCreateDataReader (drset->subscriber, "DCPSPublicationReader", "select * from DCPSPublication where key.systemId = %0", ps, rdQos, TRUE)) == NULL)
    goto fail;
  u_readerQosFree (rdQos);
  u_subscriberQosFree (sQos);
  return U_RESULT_OK;

 fail:
  destroy_builtin_readers (drset);
  if (rdQos)
    u_readerQosFree (rdQos);
  if (sQos)
    u_subscriberQosFree (sQos);
  return U_RESULT_INTERNAL_ERROR;
}
Exemplo n.º 8
0
static char *find_template(nproxy_connection_t * conn, int code, const char *suggested) {
    char *file = NULL;
    char *tpl_dir = NULL;

    tpl_dir = conn->profile->template_dir;

    if (suggested != NULL) {
        if (!tpl_dir) {
            file = (char *) suggested;
            if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
                return file;
            }
        } else {
            file = apr_psprintf(conn->pool, "%s/%s", tpl_dir, suggested);
            if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
                return file;
            }
        }

        nn_log(NN_LOG_WARNING, "Cannot find template file (%s)", suggested);
    }

    if (tpl_dir == NULL) {
        file = apr_psprintf(conn->pool, "%d.html", code);
        if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
            return file;
        }

        file = "default.html";
        if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
            return file;
        }
    } else {
        file = apr_psprintf(conn->pool, "%s/%d.html", tpl_dir, code);
        if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
            return file;
        }

        file = apr_psprintf(conn->pool, "%s/default.html", tpl_dir);
        if ((apr_file_exists(conn->pool, file) == APR_STATUS_SUCCESS) && (apr_is_file(conn->pool, file) == APR_STATUS_SUCCESS)) {
            return file;
        }
    }

    return NULL;
}
Exemplo n.º 9
0
static int add_addresses_to_addrset_1 (struct addrset *as, const char *ip, int port_mode, const char *msgtag)
{
  char buf[INET6_ADDRSTRLEN_EXTENDED];
  os_sockaddr_storage addr;

  if (!os_sockaddrStringToAddress (ip, (os_sockaddr *) &addr, !config.useIpv6))
  {
    NN_WARNING2 ("%s: %s: not a valid address\n", msgtag, ip);
    return -1;
  }

  if (port_mode >= 0)
  {
    sockaddr_set_port (&addr, port_mode);
    nn_log (LC_CONFIG, "%s: add %s", msgtag, sockaddr_to_string_with_port (buf, &addr));
    add_to_addrset (as, &addr);
  }
  else
  {
    sockaddr_set_port (&addr, 0);
    nn_log (LC_CONFIG, "%s: add ", msgtag);
    if (!is_mcaddr (&addr))
    {
      int i;
      for (i = 0; i < 10; i++)
      {
        int port = config.port_base + config.port_dg * config.domainId + i * config.port_pg + config.port_d1;
        sockaddr_set_port (&addr, port);
        if (i == 0)
          nn_log (LC_CONFIG, "%s", sockaddr_to_string_with_port (buf, &addr));
        else
          nn_log (LC_CONFIG, ", :%d", port);
        add_to_addrset (as, &addr);
      }
    }
    else
    {
      int port = port_mode;
      if (port == -1)
        port = config.port_base + config.port_dg * config.domainId + config.port_d0;
      sockaddr_set_port (&addr, port);
      nn_log (LC_CONFIG, "%s", sockaddr_to_string_with_port (buf, &addr));
      add_to_addrset (as, &addr);
    }
  }

  nn_log (LC_CONFIG, "\n");
  return 0;
}
Exemplo n.º 10
0
static void
in_discoveryWatchSpliced(
    v_serviceStateKind spliceDaemonState,
    UNUSED_ARG (c_voidp usrData))
{
    switch(spliceDaemonState){
    case STATE_TERMINATING:
    case STATE_TERMINATED:
    case STATE_DIED:
        nn_log (LC_INFO, "Splicedaemon is terminating and so am I...\n");
        terminate = TRUE;
        u_serviceChangeState(u_service(participant), STATE_TERMINATING);
        os_mutexLock (&gluelock);
        if (vclientReader)
          v_networkReaderTrigger(vclientReader, queueId);
        os_mutexUnlock (&gluelock);
        break;
    default:
        break;
    }
}
Exemplo n.º 11
0
static char *parse_template(nproxy_connection_t * conn, const char *file) {
    apr_file_t *f;
    char b[4096];
    apr_size_t blen;
    apr_status_t status;
     /**/ char *in_p = NULL;
    nn_buffer_t *bufferin;
    size_t out_size;
    char *out_p = NULL;
    nn_buffer_t *bufferout;

    bufferin = nn_buffer_init(conn->pool, 0, 4096);
    if (!bufferin) {
        return NULL;
    }

    bufferout = nn_buffer_init(conn->pool, 0, 4096);
    if (bufferout == NULL) {
        return NULL;
    }

    if (APR_STATUS_SUCCESS != apr_file_open(&f, file, APR_READ, 0, conn->pool)) {
        nn_log(NN_LOG_ERROR, "Cannot open template file %s", file);
        return NULL;
    }

    do {
        blen = sizeof(b);
        status = apr_file_read(f, b, &blen);
        if (blen > 0) {
            if (nn_buffer_append(bufferin, b, blen) != APR_STATUS_SUCCESS) {
                return NULL;
            }
        }
    } while (blen > 0);
    apr_file_close(f);

    if (status != APR_EOF) {
        /* An error occurred reading the file */
        nn_log(NN_LOG_ERROR, "Error reading template file %s", file);
        return NULL;
    }

    /* Now we have the full content of the file. We must start evaluating our substitutions. */
    nn_buffer_get_char(bufferin, &in_p);
    if (in_p == NULL) {
        return NULL;
    }

    if (conn->var_hash) {
        apr_hash_index_t *hi;

        for (hi = apr_hash_first(conn->pool, conn->var_hash); hi; hi = apr_hash_next(hi)) {
            apr_ssize_t vlen;
            const char *var = NULL;
            char *val = NULL;

            apr_hash_this(hi, (const void **) &var, &vlen, (void *) &val);

            if (!zstr(var) && !zstr(val)) {
                char *s1 = NULL;

                s1 = apr_psprintf(conn->pool, "{%s}", var);

                nn_buffer_get_char(bufferin, &in_p);
                if (in_p) {
                    nn_buffer_reset(bufferout);
                    do_subst(in_p, bufferout, s1, val);

                    out_size = nn_buffer_get_char(bufferout, &out_p);
                    if (out_size && out_p) {
                        nn_buffer_reset(bufferin);
                        nn_buffer_append(bufferin, out_p, out_size);
                    }
                }
            }

        }
    }

    /* We won't destroy the buffer, it will be deleted with the pool */
    nn_buffer_get_char(bufferin, &in_p);

    return in_p;
}
Exemplo n.º 12
0
int
main(
    int argc,
    char* argv[])
{
    int result = 0;
    v_participantQos participantQos;
    u_result uresult;
    os_boolean success;
    v_subscriberQos subscriberQos;
    c_time resolution;
    c_base base;
    v_kernel kernel;

    /* Necessary to initialize the user layer. Do this just once per process.*/
    mlv_init ();
    uresult = u_userInitialise();
    mlv_setforreal (1);

    if(uresult == U_RESULT_OK){
        /* Allocate default participant qos*/
        participantQos = u_participantQosNew(NULL);

        {
          os_mutexAttr mattr;
          os_mutexAttrInit (&mattr);
          mattr.scopeAttr = OS_SCOPE_PRIVATE;
          os_mutexInit (&gluelock, &mattr);
        }

        if(participantQos){
            if(argc > 1){
                SERVICE_NAME = argv[1];
            }
            if(argc > 2){
                SERVICE_URI = argv[2];
            }
            /*create participant*/
            participant = u_participant(u_serviceNew(
                                SERVICE_URI, 0, SERVICE_NAME,
                                NULL,
                                U_SERVICE_NETWORKING,
                                (v_qos)participantQos));

            if(participant){
                struct cfgst *cfgst;
                ddsi2_participant_gid = u_entityGid (u_entity (participant));

                /*Notify kernel that I am initializing. */
                u_serviceChangeState(u_service(participant),STATE_INITIALISING);

                /*Start monitoring the splicedaemon state. I need to terminate if he says so*/
                u_serviceWatchSpliceDaemon(
                        u_service(participant),
                        in_discoveryWatchSpliced,
                        &terminate);

                if ((cfgst = config_init (participant, SERVICE_NAME)) != NULL)
                {
                  unsigned rtps_flags = 0;
                  struct nn_servicelease *servicelease;

                  open_tracing_file ();
                  /* Dependencies between default values is not
                     handled automatically by the config processing
                     (yet) */
                  if (config.many_sockets_mode)
                  {
                    if (config.max_participants == 0)
                      config.max_participants = 100;
                  }
                  if (NN_STRICT_P)
                  {
                    /* Should not be sending invalid messages when strict */
                    config.respond_to_rti_init_zero_ack_with_invalid_heartbeat = 0;
                    config.acknack_numbits_emptyset = 1;
                  }
                  config_print_and_free_cfgst (cfgst);

                  servicelease = nn_servicelease_new (participant);
                  nn_servicelease_start_renewing (servicelease);

                  myNetworkId = getNetworkId ();

                  u_entityAction(u_entity(participant), resolveKernelService, NULL);
                  base = c_getBase(service);
                  kernel = v_object(service)->kernel;
                  rtps_init (base, kernel, config.domainId, config.participantIndex,
                             rtps_flags, config.networkAddressString, config.peers);

                  /* Initialize entity administration. */
                  success = in_entityAdminInit(participant);

                  if(success){
                    /*Create subscriber to receive client writers' messages.*/
                    subscriberQos = u_subscriberQosNew(NULL);
                    os_free(subscriberQos->partition);
                    subscriberQos->partition = NULL;

                    clientSubscriber = u_subscriberNew(
                            participant,
                            "clientSubscriber",
                            subscriberQos,
                            TRUE);

                    if(clientSubscriber){
                      /*Create networkReader to be able to receive client writers' messages.*/
                      clientReader = u_networkReaderNew(
                              clientSubscriber,
                              "clientReader",
                              NULL,
                              TRUE);

                      if(clientReader){
                        resolution.seconds = 0;
                        resolution.nanoseconds = 10 * 1000 * 1000; /*10 ms*/

                        /*Create network queue*/
                        uresult = u_networkReaderCreateQueue(
                                clientReader,
                                1000, 0, FALSE, FALSE,
                                resolution,
                                TRUE, &queueId,
                                "DDSi");

                        if(uresult == U_RESULT_OK){
                          struct builtin_datareader_set drset;
                          u_entityAction(u_entity(clientReader), resolveKernelReader, NULL);

                          uresult = create_builtin_readers (&drset, participant);
                          if (uresult == U_RESULT_OK)
                          {
                            u_serviceChangeState(u_service(participant),STATE_OPERATIONAL);
                            uresult = attachAndMonitor(participant, &drset);

                            if((uresult != U_RESULT_OK) &&
                               (uresult != U_RESULT_DETACHING))
                            {
                              nn_log (LC_ERROR, "Abnormal termination...\n");
                              result = -1;
                            } else {
                              nn_log (LC_INFO, "Deleting entities...\n");
                            }
                            destroy_builtin_readers (&drset);
                          } else {
                            nn_log (LC_FATAL, "Could not create subscription + readers for builtin topics.\n");
                            result = -1;
                          }
                          terminate = TRUE;
                          v_networkReaderTrigger(vclientReader, queueId);
                          os_threadWaitExit(clientWriterThread, NULL);
                        } else {
                          nn_log (LC_FATAL, "Could not create networkQueue.\n");
                          result = -1;
                        }
                        /*Clean up networkReader*/
                        os_mutexLock (&gluelock);
                        u_networkReaderFree(clientReader);
                        clientReader = NULL;
                        vclientReader = NULL;
                        os_mutexUnlock (&gluelock);
                      } else {
                        nn_log (LC_FATAL, "Could not create networkReader.\n");
                        result = -1;
                      }
                      /*Clean up subscriber*/
                      u_subscriberFree(clientSubscriber);
                    } else {
                      nn_log (LC_FATAL, "Could not create subscriber.\n");
                      result = -1;
                    }

                    /*Clean up entity administration*/
                    in_entityAdminDestroy();
                  } else {
                    nn_log (LC_FATAL, "Could not initialize entity adminstration.\n");
                    result = -1;
                  }

                  /* RTPS layer now defines types, cleanup before detaching */
                  rtps_term();

                  /*Notify kernel that I've terminated*/
                  u_serviceChangeState(u_service(participant),STATE_TERMINATED);
                  nn_servicelease_free (servicelease);
                  /*Delete participant*/
                  uresult = u_serviceFree(u_service(participant));

                  if(uresult != U_RESULT_OK){
                    nn_log (LC_FATAL, "Deletion of participant failed.\n");
                    result = -1;
                  }
                } else {
                    nn_log (LC_FATAL, "Initialization of configuration failed.\n");
                    result = -1;
                }
            } else {
                nn_log (LC_FATAL, "Could not create participant.\n");
                result = -1;
            }
            u_participantQosFree (participantQos);
        } else {
            nn_log (LC_FATAL, "Could not allocate participantQos.\n");
            result = -1;
        }
        os_mutexDestroy (&gluelock);
        /* Detach user layer */
        mlv_setforreal (0);
        uresult = u_userDetach();
        mlv_fini ();

        if(uresult != U_RESULT_OK){
            nn_log (LC_FATAL, "Detachment of user layer failed.\n");
            result = -1;
        }
    } else {
        nn_log (LC_FATAL, "Initialization of user layer failed.\n");
        result = -1;
    }
    nn_log (LC_INFO, "Finis.\n");

    /* Must be really late, or nn_log becomes unhappy -- but it should
       be before os_osExit (which appears to be called from
       u_userExit(), which is not called by u_userDetach but by an
       exit handler, it appears.) */
    config_fini ();
    return result;
}
Exemplo n.º 13
0
static u_result
startMonitoring(
    const u_participant participant,
    const u_waitset waitset,
    const struct builtin_datareader_set *drset)
{
    c_iter events, topics;
    u_waitsetEvent event;
    c_time timeout;
    os_uint32 reportInterval;
    v_gid participantGid, publicationGid, subscriptionGid, gid;
    u_result result;
    u_dataReader dataReader;
    u_topic topic;
    c_iter vgroups;
    v_group vgroup;
    v_duration duration;
    c_long participantOffset, publicationOffset, subscriptionOffset;
    os_threadAttr attr;
    os_result osr;

    /*Resolve unique identifications of readers*/
    participantGid  = u_entityGid((u_entity)drset->participant_dr);
    publicationGid  = u_entityGid((u_entity)drset->publication_dr);
    subscriptionGid = u_entityGid((u_entity)drset->subscription_dr);

    /*Resolve topics to find offsets in the data. The offsets are used later on*/
    duration.seconds = 0;
    duration.nanoseconds = 0;

    topics = u_participantFindTopic(participant, V_PARTICIPANTINFO_NAME, duration);
    topic  = c_iterTakeFirst(topics);

    if(topic){
        result = u_entityAction(u_entity(topic), resolveOffset, &participantOffset);
    } else {
        result = U_RESULT_INTERNAL_ERROR;
        nn_log (LC_FATAL, "Could not resolve participant info offset.\n");
    }
    c_iterFree(topics);

    if(result == U_RESULT_OK){
        topics = u_participantFindTopic(participant, V_PUBLICATIONINFO_NAME, duration);
        topic  = c_iterTakeFirst(topics);

        if(topic){
            result = u_entityAction(u_entity(topic), resolveOffset, &publicationOffset);
        } else {
            result = U_RESULT_INTERNAL_ERROR;
            nn_log (LC_FATAL, "Could not resolve publication info offset.\n");
        }
        c_iterFree(topics);
    }

    if(result == U_RESULT_OK){
        topics = u_participantFindTopic(participant, V_SUBSCRIPTIONINFO_NAME, duration);
        topic  = c_iterTakeFirst(topics);

        if(topic){
            result = u_entityAction(u_entity(topic), resolveOffset, &subscriptionOffset);
        } else {
            result = U_RESULT_INTERNAL_ERROR;
            nn_log (LC_FATAL, "Could not resolve subscription info offset.\n");
        }
        c_iterFree(topics);
    }

    if(result == U_RESULT_OK){
        timeout.seconds     = 0;
        timeout.nanoseconds = 100 * 1000 * 1000; /*100 ms*/

        nn_log (LC_TRACE, "Collecting initial entities...\n");
        result = handleParticipant(drset->participant_dr, participantOffset, 0);

        if(result == U_RESULT_OK){
            result = handlePublication(drset->publication_dr, publicationOffset,
                    drset->participant_dr, participantOffset);

            if(result == U_RESULT_OK){
                result = handleSubscription(drset->subscription_dr, subscriptionOffset,
                        drset->participant_dr, participantOffset);

                if(result == U_RESULT_OK){
                    vgroups = v_serviceTakeNewGroups(service);
                    vgroup = (v_group)c_iterTakeFirst(vgroups);

                    while(vgroup && result == U_RESULT_OK){
                        result = handleGroup(service, vgroup);
                        c_free(vgroup);
                        vgroup = (v_group)c_iterTakeFirst(vgroups);
                    }
                    c_iterFree(vgroups);

                    if(result == U_RESULT_OK){
                        nn_log (LC_TRACE, "Waiting for entities to be created/deleted...\n");
                    } else {
                        nn_log (LC_FATAL, "Could not collect initial groups...\n");
                    }
                } else {
                    nn_log (LC_FATAL, "Could not collect initial subscriptions...\n");
                }
            } else {
                nn_log (LC_FATAL, "Could not collect initial publications...\n");
            }
        } else {
            nn_log (LC_FATAL, "Could not collect initial participants...\n");
        }
    }

    osr = os_threadAttrInit(&attr);

    if(osr == os_resultSuccess){
        osr = os_threadCreate(&clientWriterThread,
                "clientWriterMonitor", &attr,
                in_discoveryClientWriterMonitor, NULL);

        if(osr != os_resultSuccess){
            result = U_RESULT_INTERNAL_ERROR;
        }
    } else {
        result = U_RESULT_INTERNAL_ERROR;
    }
    reportInterval = 0;

    while(result == U_RESULT_OK && !terminate){
        events = NULL;
        /*Wait for events to occur*/
        result = u_waitsetTimedWaitEvents(waitset, timeout, &events);

        if(result == U_RESULT_OK){
            event = (u_waitsetEvent)(c_iterTakeFirst(events));

            while(event){
                if(((event->events) & V_EVENT_DATA_AVAILABLE) ==
                    V_EVENT_DATA_AVAILABLE)
                {
                    if(event->entity){
                        dataReader = (u_dataReader)event->entity;
                        gid        = u_entityGid(event->entity);

                        if(v_gidCompare(gid, participantGid) == C_EQ){
                            result = handleParticipant(
                                    drset->participant_dr, participantOffset, 0);
                        } else if(v_gidCompare(gid, subscriptionGid) == C_EQ){
                            result = handleSubscription(
                                    drset->subscription_dr, subscriptionOffset,
                                    drset->participant_dr, participantOffset);
                        } else if(v_gidCompare(gid, publicationGid) == C_EQ){
                            result = handlePublication(
                                    drset->publication_dr, publicationOffset,
                                    drset->participant_dr, participantOffset);
                        } else {
                            nn_log (LC_FATAL,
                                    "This is impossible...at least in my understanding of the world.\n");
                            result = U_RESULT_INTERNAL_ERROR;
                        }
                    } else {
                        nn_log (LC_WARNING, "DATA_AVAILABLE (%d) but no entity.\n", event->events);
                    }
                } else if(((event->events) & V_EVENT_NEW_GROUP) ==
                    V_EVENT_NEW_GROUP)
                {
                    vgroups = v_serviceTakeNewGroups(service);
                    vgroup = (v_group)c_iterTakeFirst(vgroups);

                    while(vgroup && result == U_RESULT_OK){
                        result = handleGroup(service, vgroup);
                        c_free(vgroup);
                        vgroup = (v_group)c_iterTakeFirst(vgroups);
                    }
                    c_iterFree(vgroups);
                } else {
                    nn_log (LC_FATAL, "Received unexpected event %d.\n", event->events);
                    result = U_RESULT_INTERNAL_ERROR;
                }
                u_waitsetEventFree(event);
                event = (u_waitsetEvent)(c_iterTakeFirst(events));
            }
        } else if(result == U_RESULT_DETACHING){
            nn_log (LC_INFO, "Starting termination now...\n");
        } else if(result == U_RESULT_TIMEOUT){
            result = U_RESULT_OK;
        } else {
            nn_log (LC_FATAL, "Waitset wait failed.\n");
        }
        if(events){/* events may be null if waitset was deleted */
            c_iterFree(events);
        }
        reportInterval++;

        if(reportInterval >= 5){
            /*reportEntities();*/
            reportInterval = 0;
        }
    }
    return result;
}
Exemplo n.º 14
0
static u_result
attachAndMonitor(
    const u_participant participant,
    const struct builtin_datareader_set *drset)
{
    u_waitset waitset;
    u_dataReader dataReader;
    c_iter readers;
    u_result result;
    c_long i, length;

    result = U_RESULT_INTERNAL_ERROR;
    readers = NULL;
    length = 0;
    /*Create waitset.*/
    waitset = u_waitsetNew(participant);

    if(waitset){
        /*Set event mask of the waitset.*/
        result = u_waitsetSetEventMask(waitset, V_EVENT_DATA_AVAILABLE | V_EVENT_NEW_GROUP | V_EVENT_SERVICESTATE_CHANGED);

        if(result == U_RESULT_OK){
            result = u_dispatcherSetEventMask(
                    (u_dispatcher)participant, V_EVENT_NEW_GROUP | V_EVENT_SERVICESTATE_CHANGED);

            if(result == U_RESULT_OK){
                v_serviceFillNewGroups(service);
                result = u_waitsetAttach(
                        waitset, (u_entity)participant,
                        (u_entity)participant);

                if(result != U_RESULT_OK){
                    nn_log (LC_FATAL, "Could not attach datareader to waitset.\n");
                }
            } else {
                nn_log (LC_FATAL, "Could not set event mask of participant.");
            }

            if(result == U_RESULT_OK){
                readers     = c_iterNew(drset->participant_dr);
                readers     = c_iterInsert(readers, drset->publication_dr);
                readers     = c_iterInsert(readers, drset->subscription_dr);

                result     = U_RESULT_OK;
                length     = c_iterLength(readers);

                for(i=0; i<length && (result == U_RESULT_OK); i++){
                    dataReader = (u_dataReader)(c_iterObject(readers, i));

                    /*Set event mask of the datareader to trigger on available data.*/
                    result = u_dispatcherSetEventMask(
                                (u_dispatcher)dataReader, V_EVENT_DATA_AVAILABLE);

                    if(result == U_RESULT_OK){
                        /*Attach reader to the waitset.*/
                        result = u_waitsetAttach(
                                waitset, (u_entity)dataReader, (u_entity)dataReader);

                        if(result != U_RESULT_OK){
                            nn_log (LC_FATAL, "Could not attach datareader to waitset.\n");
                        }
                    } else {
                        nn_log (LC_FATAL, "Could not set event mask of datareader.\n");
                    }
                }
            }
        } else {
            nn_log (LC_FATAL, "Could not set event mask of waitset.\n");
        }


        if(result == U_RESULT_OK){
            /*Start monitoring the creation/deletion of entities.*/
            result = startMonitoring(participant, waitset, drset);
        }
        u_waitsetDetach(waitset, u_entity(participant));

        if(readers){
            /*Detach all datareaders from the waitset.*/
            for(i=0; i<length; i++){
                u_waitsetDetach(waitset, (u_entity)(c_iterObject(readers, i)));
            }
            c_iterFree(readers);
        }
        /*Delete the waitset.*/
        result = u_waitsetFree(waitset);

        if(result != U_RESULT_OK){
            nn_log (LC_FATAL, "Deletion of waitset failed.\n");
        }
    } else {
        nn_log (LC_FATAL, "Could not create waitset.\n");
    }

    return result;
}
Exemplo n.º 15
0
int find_own_ip (const char *requested_address)
{
  const char *sep = " ";
  char last_if_name[80] = "";
  int quality = -1;
  os_result res;
  int i;
  unsigned int nif;
  os_ifAttributes *ifs;
  int maxq_list[MAX_INTERFACES];
  int maxq_count = 0;
  int maxq_strlen = 0;
  int selected_idx = -1;
  char addrbuf[INET6_ADDRSTRLEN_EXTENDED];

  if ((ifs = os_malloc (MAX_INTERFACES * sizeof (*ifs))) == NULL)
  {
    NN_FATAL0 ("ddsi2: insufficient memory for enumerating network interfaces\n");
    return 0;
  }

  nn_log (LC_CONFIG, "interfaces:");

  if (config.useIpv6)
    res = os_sockQueryIPv6Interfaces (ifs, (os_uint32) MAX_INTERFACES, &nif);
  else
    res = os_sockQueryInterfaces (ifs, (os_uint32) MAX_INTERFACES, &nif);
  if (res != os_resultSuccess)
  {
    NN_ERROR1 ("os_sockQueryInterfaces: %d\n", (int) res);
    os_free (ifs);
    return 0;
  }

  gv.n_interfaces = 0;
  for (i = 0; i < (int) nif; i++, sep = ", ")
  {
    os_sockaddr_storage tmpip, tmpmask;
    char if_name[sizeof (last_if_name)];
    int q = 0;

    os_strncpy (if_name, ifs[i].name, sizeof (if_name) - 1);
    if_name[sizeof (if_name) - 1] = 0;

    if (strcmp (if_name, last_if_name))
      nn_log (LC_CONFIG, "%s%s", sep, if_name);
    os_strcpy (last_if_name, if_name);

    /* interface must be up */
    if ((ifs[i].flags & IFF_UP) == 0)
    {
      nn_log (LC_CONFIG, " (interface down)");
      continue;
    }

    tmpip = ifs[i].address;
    tmpmask = ifs[i].network_mask;
    sockaddr_to_string_no_port (addrbuf, &tmpip);
    nn_log (LC_CONFIG, " %s", addrbuf);

    if (ifs[i].flags & IFF_LOOPBACK)
    {
      /* Loopback device has the lowest priority of every interface
         available, because the other interfaces at least in principle
         allow communicating with other machines. */
      q += 0;
#if OS_SOCKET_HAS_IPV6
      if (!(tmpip.ss_family == AF_INET6 && IN6_IS_ADDR_LINKLOCAL (&((os_sockaddr_in6 *) &tmpip)->sin6_addr)))
        q += 1;
#endif
    }
    else
    {
#if OS_SOCKET_HAS_IPV6
      /* We accept link-local IPv6 addresses, but an interface with a
         link-local address will end up lower in the ordering than one
         with a global address.  When forced to use a link-local
         address, we restrict ourselves to operating on that one
         interface only and assume any advertised (incoming) link-local
         address belongs to that interface.  FIXME: this is wrong, and
         should be changed to tag addresses with the interface over
         which it was received.  But that means proper multi-homing
         support and has quite an impact in various places, not least of
         which is the abstraction layer. */
      if (!(tmpip.ss_family == AF_INET6 && IN6_IS_ADDR_LINKLOCAL (&((os_sockaddr_in6 *) &tmpip)->sin6_addr)))
        q += 5;
#endif

      /* We strongly prefer a multicast capable interface, if that's
         not available anything that's not point-to-point, or else we
         hope IP routing will take care of the issues. */
      if (ifs[i].flags & IFF_MULTICAST)
        q += 4;
      else if (!(ifs[i].flags & IFF_POINTOPOINT))
        q += 3;
      else
        q += 2;
    }

    nn_log (LC_CONFIG, "(q%d)", q);
    if (q == quality) {
      maxq_list[maxq_count] = gv.n_interfaces;
      maxq_strlen += 2 + strlen (if_name);
      maxq_count++;
    } else if (q > quality) {
      maxq_list[0] = gv.n_interfaces;
      maxq_strlen += 2 + strlen (if_name);
      maxq_count = 1;
      quality = q;
    }

    gv.interfaces[gv.n_interfaces].addr = tmpip;
    gv.interfaces[gv.n_interfaces].netmask = tmpmask;
    gv.interfaces[gv.n_interfaces].mc_capable = ((ifs[i].flags & IFF_MULTICAST) != 0);
    gv.interfaces[gv.n_interfaces].point_to_point = ((ifs[i].flags & IFF_POINTOPOINT) != 0);
    gv.interfaces[gv.n_interfaces].if_index = ifs[i].interfaceIndexNo;
    gv.interfaces[gv.n_interfaces].name = os_strdup (if_name);
    gv.n_interfaces++;
  }
  nn_log (LC_CONFIG, "\n");
  os_free (ifs);

  if (requested_address == NULL)
  {
    if (maxq_count > 1)
    {
      const int idx = maxq_list[0];
      char *names;
      sockaddr_to_string_no_port (addrbuf, &gv.interfaces[idx].addr);
      if ((names = os_malloc (maxq_strlen + 1)) == NULL)
        NN_WARNING2 ("using network interface %s (%s) out of multiple candidates\n",
                     gv.interfaces[idx].name, addrbuf);
      else
      {
        int p = 0;
        for (i = 0; i < maxq_count; i++)
          p += snprintf (names + p, maxq_strlen - p, ", %s", gv.interfaces[maxq_list[i]].name);
        NN_WARNING3 ("using network interface %s (%s) selected arbitrarily from: %s\n",
                     gv.interfaces[idx].name, addrbuf, names + 2);
        os_free (names);
      }
    }

    if (maxq_count > 0)
      selected_idx = maxq_list[0];
    else
      NN_ERROR0 ("failed to determine default own IP address\n");
  }
  else
  {
    os_sockaddr_storage req;
    if (!os_sockaddrStringToAddress (config.networkAddressString, (os_sockaddr *) &req, !config.useIpv6))
    {
      /* Presumably an interface name */
      for (i = 0; i < gv.n_interfaces; i++)
        if (strcmp (gv.interfaces[i].name, config.networkAddressString) == 0)
          break;
    }
    else
    {
      /* Try an exact match on the address */
      for (i = 0; i < gv.n_interfaces; i++)
        if (os_sockaddrIPAddressEqual ((os_sockaddr *) &gv.interfaces[i].addr, (os_sockaddr *) &req))
          break;
      if (i == gv.n_interfaces && !config.useIpv6)
      {
        /* Try matching on network portion only, where the network
           portion is based on the netmask of the interface under
           consideration */
        for (i = 0; i < gv.n_interfaces; i++)
        {
          os_sockaddr_storage req1 = req, ip1 = gv.interfaces[i].addr;
          assert (req1.ss_family == AF_INET);
          assert (ip1.ss_family == AF_INET);

          /* If the host portion of the requested address is non-zero,
             skip this interface */
          if (((os_sockaddr_in *) &req1)->sin_addr.s_addr &
              ~((os_sockaddr_in *) &gv.interfaces[i].netmask)->sin_addr.s_addr)
            continue;

          ((os_sockaddr_in *) &req1)->sin_addr.s_addr &=
            ((os_sockaddr_in *) &gv.interfaces[i].netmask)->sin_addr.s_addr;
          ((os_sockaddr_in *) &ip1)->sin_addr.s_addr &=
            ((os_sockaddr_in *) &gv.interfaces[i].netmask)->sin_addr.s_addr;
          if (os_sockaddrIPAddressEqual ((os_sockaddr *) &ip1, (os_sockaddr *) &req1))
            break;
        }
      }
    }

    if (i < gv.n_interfaces)
      selected_idx = i;
    else
      NN_ERROR1 ("%s: does not match an available interface\n", config.networkAddressString);
  }

  if (selected_idx < 0)
    return 0;
  else
  {
    gv.ownip = gv.interfaces[selected_idx].addr;
    sockaddr_set_port (&gv.ownip, 0);
    gv.selected_interface = selected_idx;
    gv.interfaceNo = gv.interfaces[selected_idx].if_index;
#if OS_SOCKET_HAS_IPV6
    if (config.useIpv6)
    {
      assert (gv.ownip.ss_family == AF_INET6);
      gv.ipv6_link_local =
        IN6_IS_ADDR_LINKLOCAL (&((os_sockaddr_in6 *) &gv.ownip)->sin6_addr) != 0;
    }
    else
    {
      gv.ipv6_link_local = 0;
    }
#endif
    nn_log (LC_CONFIG, "selected interface: %s (index %u)\n",
            gv.interfaces[selected_idx].name, (unsigned) gv.interfaceNo);
    return 1;
  }
}
Exemplo n.º 16
0
static void *lease_renewal_thread (struct nn_servicelease *sl)
{
  /* Do not check more often than once every 100ms (no particular
     reason why it has to be 100ms), regardless of the lease settings.
     Note: can't trust sl->self, may have been scheduled before the
     assignment. */
  const os_int64 min_progress_check_intv = 100 * T_MILLISECOND;
  struct thread_state1 *self = lookup_thread_state ();
  nn_mtime_t next_thread_cputime = { 0 };
  nn_mtime_t tlast = { 0 };
  int was_alive = 1;
  unsigned i;
  for (i = 0; i < thread_states.nthreads; i++)
  {
    sl->av_ary[i].alive = 1;
    sl->av_ary[i].wd = thread_states.ts[i].watchdog - 1;
  }
  os_mutexLock (&sl->lock);
  while (sl->keepgoing)
  {
    unsigned n_alive = 0;
    nn_mtime_t tnow = now_mt ();

    LOG_THREAD_CPUTIME (next_thread_cputime);

    TRACE (("servicelease: tnow %"PA_PRId64":", tnow.v));

    /* Check progress only if enough time has passed: there is no
       guarantee that os_cond_timedwait wont ever return early, and we
       do want to avoid spurious warnings. */
    if (tnow.v < tlast.v + min_progress_check_intv)
    {
      n_alive = thread_states.nthreads;
    }
    else
    {
      tlast = tnow;
      for (i = 0; i < thread_states.nthreads; i++)
      {
        if (thread_states.ts[i].state != THREAD_STATE_ALIVE)
          n_alive++;
        else
        {
          vtime_t vt = thread_states.ts[i].vtime;
          vtime_t wd = thread_states.ts[i].watchdog;
          int alive = vtime_asleep_p (vt) || vtime_asleep_p (wd) || vtime_gt (wd, sl->av_ary[i].wd);
          n_alive += (unsigned) alive;
          TRACE ((" %d(%s):%c:%u:%u->%u:", i, thread_states.ts[i].name, alive ? 'a' : 'd', vt, sl->av_ary[i].wd, wd));
          sl->av_ary[i].wd = wd;
          if (sl->av_ary[i].alive != alive)
          {
            const char *name = thread_states.ts[i].name;
            const char *msg;
            if (!alive)
              msg = "failed to make progress";
            else
              msg = "once again made progress";
            NN_WARNING2 ("thread %s %s\n", name ? name : "(anon)", msg);
            sl->av_ary[i].alive = (char) alive;
          }
        }
      }
    }

    /* Only renew the lease if all threads are alive, so that one
       thread blocking for a while but not too extremely long will
       cause warnings for that thread in the log file, but won't cause
       the DDSI2 service to be marked as dead. */
    if (n_alive == thread_states.nthreads)
    {
      TRACE ((": [%d] renewing\n", n_alive));
      /* FIXME: perhaps it would be nice to control automatic
         liveliness updates from here.
         FIXME: should terminate failure of renew_cb() */
      sl->renew_cb (sl->renew_arg);
      was_alive = 1;
    }
    else
    {
      TRACE ((": [%d] NOT renewing\n", n_alive));
      if (was_alive)
        log_stack_traces ();
      was_alive = 0;
    }

#if SYSDEPS_HAVE_GETRUSAGE
    /* If getrusage() is available, use it to log CPU and memory
       statistics to the trace.  Getrusage() can't fail if the
       parameters are valid, and these are by the book.  Still we
       check. */
    if (config.enabled_logcats & LC_TIMING)
    {
      struct rusage u;
      if (getrusage (RUSAGE_SELF, &u) == 0)
      {
        nn_log (LC_TIMING,
                "rusage: utime %d.%06d stime %d.%06d maxrss %ld data %ld vcsw %ld ivcsw %ld\n",
                (int) u.ru_utime.tv_sec, (int) u.ru_utime.tv_usec,
                (int) u.ru_stime.tv_sec, (int) u.ru_stime.tv_usec,
                u.ru_maxrss, u.ru_idrss, u.ru_nvcsw, u.ru_nivcsw);
      }
    }
#endif

    os_condTimedWait (&sl->cond, &sl->lock, sl->sleepTime);

    /* We are never active in a way that matters for the garbage
       collection of old writers, &c. */
    thread_state_asleep (self);
  }
  os_mutexUnlock (&sl->lock);
  return NULL;
}