static void *sctp_intertask_interface(void *args_p)
{
    itti_mark_task_ready(TASK_SCTP);
    MSC_START_USE();

    while(1) {
        MessageDef *received_message_p;
        itti_receive_msg(TASK_SCTP, &received_message_p);

        switch (ITTI_MSG_ID(received_message_p)) {
        case SCTP_INIT_MSG: {
            SCTP_DEBUG("Received SCTP_INIT_MSG\n");

            /* We received a new connection request */
            if (sctp_create_new_listener(&received_message_p->ittiMsg.sctpInit) < 0) {
                /* SCTP socket creation or bind failed... */
                SCTP_ERROR("Failed to create new SCTP listener\n");
            }
        }
        break;

        case SCTP_CLOSE_ASSOCIATION: {

        } break;

        case SCTP_DATA_REQ: {
            if (sctp_send_msg(SCTP_DATA_REQ(received_message_p).assocId,
                              SCTP_DATA_REQ(received_message_p).stream,
                              SCTP_DATA_REQ(received_message_p).buffer,
                              SCTP_DATA_REQ(received_message_p).bufLen) < 0) {
                SCTP_ERROR("Failed to send message over SCTP\n");
            }
        }
        break;

        case MESSAGE_TEST: {
            //                 int i = 10000;
            //                 while(i--);
        } break;

        case TERMINATE_MESSAGE: {
            itti_exit_task();
        }
        break;

        default: {
            SCTP_DEBUG("Unkwnon message ID %d:%s\n",
                       ITTI_MSG_ID(received_message_p),
                       ITTI_MSG_NAME(received_message_p));
        }
        break;
        }

        itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
        received_message_p = NULL;
    }

    return NULL;
}
static void *gtpv1u_thread(void *args)
{
  itti_mark_task_ready(TASK_GTPV1_U);
  MSC_START_USE();

  while(1) {
    /* Trying to fetch a message from the message queue.
     * If the queue is empty, this function will block till a
     * message is sent to the task.
     */
    MessageDef *received_message_p = NULL;
    itti_receive_msg(TASK_GTPV1_U, &received_message_p);
    DevAssert(received_message_p != NULL);


    switch (ITTI_MSG_ID(received_message_p)) {

    case TERMINATE_MESSAGE: {
      itti_exit_task();
    }
    break;

    // DATA COMING FROM UDP
    case UDP_DATA_IND: {
      udp_data_ind_t *udp_data_ind_p;
      udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind;
      nwGtpv1uProcessUdpReq(gtpv1u_sgw_data.gtpv1u_stack,
                             udp_data_ind_p->buffer,
                             udp_data_ind_p->buffer_length,
                             udp_data_ind_p->peer_port,
                             udp_data_ind_p->peer_address);
       //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), udp_data_ind_p->buffer);
    }
    break;

    case TIMER_HAS_EXPIRED:
      nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
      break;

    default: {
      GTPU_ERROR("Unkwnon message ID %d:%s\n",
                 ITTI_MSG_ID(received_message_p),
                 ITTI_MSG_NAME(received_message_p));
    }
    break;
    }

    itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
    received_message_p = NULL;
  }

  return NULL;
}
static void *pdcp_thread_main(void* param)
{
  uint8_t eNB_flag = !UE_flag;

  LOG_I(PDCP,"This is pdcp_thread eNB_flag = %d\n",eNB_flag);
  MSC_START_USE();

  while (!oai_exit) {

    if (pthread_mutex_lock(&pdcp_mutex) != 0) {
      LOG_E(PDCP,"Error locking mutex.\n");
    } else {
      while (pdcp_instance_cnt < 0) {
        pthread_cond_wait(&pdcp_cond,&pdcp_mutex);
      }

      if (pthread_mutex_unlock(&pdcp_mutex) != 0) {
        LOG_E(PDCP,"Error unlocking mutex.\n");
      }
    }

    if (oai_exit) break;

    if (eNB_flag) {
      pdcp_run(PHY_vars_eNB_g[0]->frame, eNB_flag, PHY_vars_eNB_g[0]->Mod_id, 0);
      LOG_D(PDCP,"Calling pdcp_run (eNB) for frame %d\n",PHY_vars_eNB_g[0]->frame);
    } else  {
      pdcp_run(PHY_vars_UE_g[0]->frame, eNB_flag, 0, PHY_vars_UE_g[0]->Mod_id);
      LOG_D(PDCP,"Calling pdcp_run (UE) for frame %d\n",PHY_vars_UE_g[0]->frame);
    }

    if (pthread_mutex_lock(&pdcp_mutex) != 0) {
      LOG_E(PDCP,"Error locking mutex.\n");
    } else {
      pdcp_instance_cnt--;

      if (pthread_mutex_unlock(&pdcp_mutex) != 0) {
        LOG_E(PDCP,"Error unlocking mutex.\n");
      }
    }
  }

  return(NULL);
}
Example #4
0
void                                   *
s6a_thread (
  void *args)
{
  itti_mark_task_ready (TASK_S6A);
  MSC_START_USE ();

  while (1) {
    MessageDef                             *received_message_p = NULL;

    /*
     * Trying to fetch a message from the message queue.
     * * If the queue is empty, this function will block till a
     * * message is sent to the task.
     */
    itti_receive_msg (TASK_S6A, &received_message_p);
    DevAssert (received_message_p != NULL);

    switch (ITTI_MSG_ID (received_message_p)) {
    case S6A_UPDATE_LOCATION_REQ:{
        s6a_generate_update_location (&received_message_p->ittiMsg.s6a_update_location_req);
      }
      break;
    case S6A_AUTH_INFO_REQ:{
        s6a_generate_authentication_info_req (&received_message_p->ittiMsg.s6a_auth_info_req);
      }
      break;
    case TERMINATE_MESSAGE:{
        itti_exit_task ();
      }
      break;
    default:{
        S6A_DEBUG ("Unkwnon message ID %d: %s\n", ITTI_MSG_ID (received_message_p), ITTI_MSG_NAME (received_message_p));
      }
      break;
    }
    itti_free (ITTI_MSG_ORIGIN_ID (received_message_p), received_message_p);
    received_message_p = NULL;
  }
  return NULL;
}
//-----------------------------------------------------------------------------
static
void *pdcp_netlink_thread_fct(void *arg)
//-----------------------------------------------------------------------------
{
    int                            len             = 0;
    struct pdcp_netlink_element_s *new_data_p      = NULL;
    uint8_t                        pdcp_thread_read_state ;
    eNB_flag_t                     eNB_flag        = 0;
    module_id_t                    module_id       = 0;
    pdcp_thread_read_state = 0;
    memset(nl_rx_buf, 0, NL_MAX_PAYLOAD);
    LOG_I(PDCP, "[NETLINK_THREAD] binding to fd  %d\n",nas_sock_fd);
    MSC_START_USE();
    while (1) {

        len = recvmsg(nas_sock_fd, &nas_msg_rx, 0);

        if (len == 0) {
            /* Other peer (kernel) has performed an orderly shutdown
             */
            LOG_E(PDCP, "[NETLINK_THREAD] Kernel module has closed the netlink\n");
            exit(EXIT_FAILURE);
        } else if (len < 0) {
            /* There was an error */
            LOG_E(PDCP, "[NETLINK_THREAD] An error occured while reading netlink (%d:%s)\n",
                  errno, strerror(errno));
            exit(EXIT_FAILURE);
        } else {
            /* Normal read.
             * NOTE: netlink messages can be assembled to form a multipart message
             */
            for (nas_nlh_rx = (struct nlmsghdr *) nl_rx_buf;
                    NLMSG_OK(nas_nlh_rx, (unsigned int)len);
                    nas_nlh_rx = NLMSG_NEXT (nas_nlh_rx, len)) {
                start_meas(&ip_pdcp_stats_tmp);

                /* There is no need to check for nlmsg_type because
                 * the header is not set in our drivers.
                 */
                if (pdcp_thread_read_state == 0) {
                    new_data_p = malloc(sizeof(struct pdcp_netlink_element_s));

                    if (nas_nlh_rx->nlmsg_len == sizeof (pdcp_data_req_header_t) + sizeof(struct nlmsghdr)) {
                        pdcp_thread_read_state = 1;
                        memcpy((void *)&new_data_p->pdcp_read_header, (void *)NLMSG_DATA(nas_nlh_rx), sizeof(pdcp_data_req_header_t));
                        LOG_I(PDCP, "[NETLINK_THREAD] RX pdcp_data_req_header_t inst %u, "
                              "rb_id %u data_size %d\n",
                              new_data_p->pdcp_read_header.inst,
                              new_data_p->pdcp_read_header.rb_id,
                              new_data_p->pdcp_read_header.data_size);
                    } else {
                        LOG_E(PDCP, "[NETLINK_THREAD] WRONG size %d should be sizeof "
                              "%d ((pdcp_data_req_header_t) + sizeof(struct nlmsghdr))\n",
                              nas_nlh_rx->nlmsg_len,
                              sizeof (pdcp_data_req_header_t) + sizeof(struct nlmsghdr));
                    }
                } else {
                    pdcp_thread_read_state = 0;

#ifdef OAI_EMU

                    // LG: new_data_p->pdcp_read_header.inst will contain in fact a module id
                    if (new_data_p->pdcp_read_header.inst >= oai_emulation.info.nb_enb_local) {
                        module_id = new_data_p->pdcp_read_header.inst
                                    - oai_emulation.info.nb_enb_local +
                                    + oai_emulation.info.first_ue_local;
                        eNB_flag = 0;
                    } else {
                        module_id = new_data_p->pdcp_read_header.inst
                                    + oai_emulation.info.first_enb_local;
                        eNB_flag = 1;
                    }

#else
                    module_id = 0;
#endif
                    new_data_p->data = malloc(sizeof(uint8_t) * new_data_p->pdcp_read_header.data_size);
                    /* Copy the data */
                    memcpy(new_data_p->data, NLMSG_DATA(nas_nlh_rx), new_data_p->pdcp_read_header.data_size);

                    if (eNB_flag) {
                        if (pdcp_netlink_nb_element_enb[module_id]
                                > PDCP_QUEUE_NB_ELEMENTS) {
                            LOG_E(PDCP, "[NETLINK_THREAD][Mod %02x] We reached maximum number of elements in eNB pdcp queue (%d)\n",
                                  module_id, pdcp_netlink_nb_element_enb);
                        }

                        LOG_I(PDCP,"[NETLINK_THREAD] IP->PDCP : En-queueing packet for eNB module id %d\n", module_id);

                        /* Enqueue the element in the right queue */
                        lfds611_queue_guaranteed_enqueue(pdcp_netlink_queue_enb[module_id], new_data_p);
                        stop_meas(&ip_pdcp_stats_tmp);
                        copy_meas(&eNB_pdcp_stats[module_id].pdcp_ip,&ip_pdcp_stats_tmp);
                    } else {
                        if (pdcp_netlink_nb_element_ue[module_id]
                                > PDCP_QUEUE_NB_ELEMENTS) {
                            LOG_E(PDCP, "[NETLINK_THREAD][Mod %02x] We reached maximum number of elements in UE pdcp queue (%d)\n",
                                  module_id, pdcp_netlink_nb_element_ue);
                        }

                        LOG_I(PDCP,"[NETLINK_THREAD] IP->PDCP : En-queueing packet for UE module id  %d\n", module_id);

                        /* Enqueue the element in the right queue */
                        lfds611_queue_guaranteed_enqueue(pdcp_netlink_queue_ue[module_id], new_data_p);
                        stop_meas(&ip_pdcp_stats_tmp);
                        copy_meas(&UE_pdcp_stats[module_id].pdcp_ip,&ip_pdcp_stats_tmp);
                    }
                }
            }
        }
    }

    return NULL;
}
Example #6
0
void *s1ap_eNB_task(void *arg)
{
  MessageDef *received_msg = NULL;
  int         result;

  S1AP_DEBUG("Starting S1AP layer\n");

  s1ap_eNB_prepare_internal_data();

  itti_mark_task_ready(TASK_S1AP);
  MSC_START_USE();

  while (1) {
    itti_receive_msg(TASK_S1AP, &received_msg);

    switch (ITTI_MSG_ID(received_msg)) {
    case TERMINATE_MESSAGE:
      itti_exit_task();
      break;

    case S1AP_REGISTER_ENB_REQ: {
      /* Register a new eNB.
       * in Virtual mode eNBs will be distinguished using the mod_id/
       * Each eNB has to send an S1AP_REGISTER_ENB message with its
       * own parameters.
       */
      s1ap_eNB_handle_register_eNB(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                   &S1AP_REGISTER_ENB_REQ(received_msg));
    }
    break;

    case SCTP_NEW_ASSOCIATION_RESP: {
      s1ap_eNB_handle_sctp_association_resp(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                            &received_msg->ittiMsg.sctp_new_association_resp);
    }
    break;

    case SCTP_DATA_IND: {
      s1ap_eNB_handle_sctp_data_ind(&received_msg->ittiMsg.sctp_data_ind);
    }
    break;

    case S1AP_NAS_FIRST_REQ: {
      s1ap_eNB_handle_nas_first_req(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                    &S1AP_NAS_FIRST_REQ(received_msg));
    }
    break;

    case S1AP_UPLINK_NAS: {
      s1ap_eNB_nas_uplink(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                          &S1AP_UPLINK_NAS(received_msg));
    }
    break;

    case S1AP_UE_CAPABILITIES_IND: {
      s1ap_eNB_ue_capabilities(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                               &S1AP_UE_CAPABILITIES_IND(received_msg));
    }
    break;

    case S1AP_INITIAL_CONTEXT_SETUP_RESP: {
      s1ap_eNB_initial_ctxt_resp(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                 &S1AP_INITIAL_CONTEXT_SETUP_RESP(received_msg));
    }
    break;

    case S1AP_E_RAB_SETUP_RESP: {
      s1ap_eNB_e_rab_setup_resp(ITTI_MESSAGE_GET_INSTANCE(received_msg),
				&S1AP_E_RAB_SETUP_RESP(received_msg));
    }
    break;
      
    case S1AP_NAS_NON_DELIVERY_IND: {
      s1ap_eNB_nas_non_delivery_ind(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                    &S1AP_NAS_NON_DELIVERY_IND(received_msg));
    }
    break;

    case S1AP_UE_CONTEXT_RELEASE_COMPLETE: {
      s1ap_ue_context_release_complete(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                       &S1AP_UE_CONTEXT_RELEASE_COMPLETE(received_msg));
    }
    break;

    case S1AP_UE_CONTEXT_RELEASE_REQ: {
      s1ap_eNB_instance_t               *s1ap_eNB_instance_p           = NULL; // test
      struct s1ap_eNB_ue_context_s      *ue_context_p                  = NULL; // test

      s1ap_ue_context_release_req(ITTI_MESSAGE_GET_INSTANCE(received_msg),
                                  &S1AP_UE_CONTEXT_RELEASE_REQ(received_msg));


      s1ap_eNB_instance_p = s1ap_eNB_get_instance(ITTI_MESSAGE_GET_INSTANCE(received_msg)); // test
      DevAssert(s1ap_eNB_instance_p != NULL); // test

      if ((ue_context_p = s1ap_eNB_get_ue_context(s1ap_eNB_instance_p,
                          S1AP_UE_CONTEXT_RELEASE_REQ(received_msg).eNB_ue_s1ap_id)) == NULL) { // test
        /* The context for this eNB ue s1ap id doesn't exist in the map of eNB UEs */
        S1AP_ERROR("Failed to find ue context associated with eNB ue s1ap id: %u\n",
                   S1AP_UE_CONTEXT_RELEASE_REQ(received_msg).eNB_ue_s1ap_id); // test
      }  // test
    }
    break;

    default:
      S1AP_ERROR("Received unhandled message: %d:%s\n",
                 ITTI_MSG_ID(received_msg), ITTI_MSG_NAME(received_msg));
      break;
    }

    result = itti_free (ITTI_MSG_ORIGIN_ID(received_msg), received_msg);
    AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result);

    received_msg = NULL;
  }

  return NULL;
}
static void *nas_intertask_interface(void *args_p)
{
  itti_mark_task_ready(TASK_NAS_MME);
  MSC_START_USE();

  while(1) {
    MessageDef *received_message_p;

next_message:
    itti_receive_msg(TASK_NAS_MME, &received_message_p);

    switch (ITTI_MSG_ID(received_message_p)) {
    case NAS_CONNECTION_ESTABLISHMENT_IND: {
#if defined(DISABLE_USE_NAS)
      MessageDef       *message_p;
      nas_attach_req_t *nas_req_p;
      s1ap_initial_ue_message_t *transparent;

      NAS_DEBUG("NAS abstraction: Generating NAS ATTACH REQ\n");

      message_p = itti_alloc_new_message(TASK_NAS_MME, NAS_ATTACH_REQ);

      nas_req_p = &message_p->ittiMsg.nas_attach_req;
      transparent = &message_p->ittiMsg.nas_attach_req.transparent;

      nas_req_p->initial = INITIAL_REQUEST;
      sprintf(nas_req_p->imsi, "%14llu", 20834123456789ULL);

      memcpy(transparent, &received_message_p->ittiMsg.nas_conn_est_ind.transparent,
             sizeof(s1ap_initial_ue_message_t));

      itti_send_msg_to_task(TASK_MME_APP, INSTANCE_DEFAULT, message_p);
#else
      nas_establish_ind_t *nas_est_ind_p;

      nas_est_ind_p = &received_message_p->ittiMsg.nas_conn_est_ind.nas;

      nas_proc_establish_ind(nas_est_ind_p->UEid,
                             nas_est_ind_p->tac,
                             nas_est_ind_p->initialNasMsg.data,
                             nas_est_ind_p->initialNasMsg.length);
#endif
    }
    break;

#if defined(DISABLE_USE_NAS)

    case NAS_ATTACH_ACCEPT: {
      itti_send_msg_to_task(TASK_S1AP, INSTANCE_DEFAULT, received_message_p);
      goto next_message;
    }
    break;

    case NAS_AUTHENTICATION_REQ: {
      MessageDef      *message_p;
      nas_auth_resp_t *nas_resp_p;

      NAS_DEBUG("NAS abstraction: Generating NAS AUTHENTICATION RESPONSE\n");

      message_p = itti_alloc_new_message(TASK_NAS_MME, NAS_AUTHENTICATION_RESP);

      nas_resp_p = &message_p->ittiMsg.nas_auth_resp;

      memcpy(nas_resp_p->imsi, received_message_p->ittiMsg.nas_auth_req.imsi, 16);

      itti_send_msg_to_task(TASK_MME_APP, INSTANCE_DEFAULT, message_p);
    }
    break;
#else

    case NAS_UPLINK_DATA_IND: {
      nas_proc_ul_transfer_ind(NAS_UL_DATA_IND(received_message_p).UEid,
                               NAS_UL_DATA_IND(received_message_p).nasMsg.data,
                               NAS_UL_DATA_IND(received_message_p).nasMsg.length);
    }
    break;

    case NAS_DOWNLINK_DATA_CNF: {
      nas_proc_dl_transfer_cnf(NAS_DL_DATA_CNF(received_message_p).UEid);
    } break;

    case NAS_DOWNLINK_DATA_REJ: {
      nas_proc_dl_transfer_rej(NAS_DL_DATA_REJ(received_message_p).UEid);
    } break;

    case NAS_AUTHENTICATION_PARAM_RSP: {
      nas_proc_auth_param_res(&NAS_AUTHENTICATION_PARAM_RSP(received_message_p));
    }
    break;

    case NAS_AUTHENTICATION_PARAM_FAIL: {
      nas_proc_auth_param_fail(&NAS_AUTHENTICATION_PARAM_FAIL(received_message_p));
    }
    break;
#endif

    case NAS_PDN_CONNECTIVITY_RSP: {
      nas_proc_pdn_connectivity_res(&NAS_PDN_CONNECTIVITY_RSP(received_message_p));
    }
    break;

    case NAS_PDN_CONNECTIVITY_FAIL: {
      nas_proc_pdn_connectivity_fail(&NAS_PDN_CONNECTIVITY_FAIL(received_message_p));
    }
    break;


    case TIMER_HAS_EXPIRED: {
#if !defined(DISABLE_USE_NAS)
      /* Call the NAS timer api */
      nas_timer_handle_signal_expiry(TIMER_HAS_EXPIRED(received_message_p).timer_id,
                                     TIMER_HAS_EXPIRED(received_message_p).arg);
#endif
    }
    break;

    case S1AP_ENB_DEREGISTERED_IND: {
#if !defined(DISABLE_USE_NAS)
      int i;

      for (i = 0; i < S1AP_ENB_DEREGISTERED_IND(received_message_p).nb_ue_to_deregister; i ++) {
        nas_proc_deregister_ue(S1AP_ENB_DEREGISTERED_IND(received_message_p).mme_ue_s1ap_id[i]);
      }

#endif
    }
    break;

    case S1AP_DEREGISTER_UE_REQ: {
#if !defined(DISABLE_USE_NAS)
      nas_proc_deregister_ue(S1AP_DEREGISTER_UE_REQ(received_message_p).mme_ue_s1ap_id);
#endif
    }
    break;

    case TERMINATE_MESSAGE: {
      itti_exit_task();
    }
    break;

    default: {
      NAS_DEBUG("Unkwnon message ID %d:%s from %s\n",
                ITTI_MSG_ID(received_message_p),
                ITTI_MSG_NAME(received_message_p),
                ITTI_MSG_ORIGIN_NAME(received_message_p));
    }
    break;
    }

    itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
    received_message_p = NULL;
  }

  return NULL;
}
Example #8
0
//-----------------------------------------------------------------------------
void *gtpv1u_eNB_task(void *args)
{
  int                       rc = 0;
  instance_t                instance;
  //const char               *msg_name_p;

  rc = gtpv1u_eNB_init();
  AssertFatal(rc == 0, "gtpv1u_eNB_init Failed");
  itti_mark_task_ready(TASK_GTPV1_U);
  MSC_START_USE();

  while(1) {
    /* Trying to fetch a message from the message queue.
     * If the queue is empty, this function will block till a
     * message is sent to the task.
     */
    MessageDef *received_message_p = NULL;
    itti_receive_msg(TASK_GTPV1_U, &received_message_p);
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_IN);
    DevAssert(received_message_p != NULL);

    instance = ITTI_MSG_INSTANCE(received_message_p);
    //msg_name_p = ITTI_MSG_NAME(received_message_p);

    switch (ITTI_MSG_ID(received_message_p)) {

    case GTPV1U_ENB_DELETE_TUNNEL_REQ: {
      gtpv1u_delete_s1u_tunnel(instance, &received_message_p->ittiMsg.Gtpv1uDeleteTunnelReq);
    }
    break;

    // DATA COMING FROM UDP
    case UDP_DATA_IND: {
      udp_data_ind_t *udp_data_ind_p;
      udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind;
      nwGtpv1uProcessUdpReq(gtpv1u_data_g.gtpv1u_stack,
                            udp_data_ind_p->buffer,
                            udp_data_ind_p->buffer_length,
                            udp_data_ind_p->peer_port,
                            udp_data_ind_p->peer_address);
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), udp_data_ind_p->buffer);
    }
    break;

    // DATA TO BE SENT TO UDP
    case GTPV1U_ENB_TUNNEL_DATA_REQ: {
      gtpv1u_enb_tunnel_data_req_t *data_req_p           = NULL;
      NwGtpv1uUlpApiT               stack_req;
      NwGtpv1uRcT                   rc                   = NW_GTPV1U_FAILURE;
      hashtable_rc_t                hash_rc              = HASH_TABLE_KEY_NOT_EXISTS;
      gtpv1u_ue_data_t             *gtpv1u_ue_data_p     = NULL;
      teid_t                        enb_s1u_teid         = 0;
      teid_t                        sgw_s1u_teid         = 0;

      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN);
      data_req_p = &GTPV1U_ENB_TUNNEL_DATA_REQ(received_message_p);
      //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length);

#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
      gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length);
#endif
      memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));

      hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, (uint64_t)data_req_p->rnti, (void**)&gtpv1u_ue_data_p);

      if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
        LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti);
      } else {
        if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_DRB_Identity)) {
          enb_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB;
          sgw_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_sgw;
          stack_req.apiType                   = NW_GTPV1U_ULP_API_SEND_TPDU;
          stack_req.apiInfo.sendtoInfo.teid   = sgw_s1u_teid;
          stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr;

          rc = nwGtpv1uGpduMsgNew(
                 gtpv1u_data_g.gtpv1u_stack,
                 sgw_s1u_teid,
                 NW_FALSE,
                 gtpv1u_data_g.seq_num++,
                 data_req_p->buffer,
                 data_req_p->length,
                 data_req_p->offset,
                 &(stack_req.apiInfo.sendtoInfo.hMsg));

          if (rc != NW_GTPV1U_OK) {
            LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
            MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
            		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
            (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */
          } else {
            rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
              MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
              		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
            } else {
            	  MSC_LOG_TX_MESSAGE(
            			  MSC_GTPU_ENB,
            			  MSC_GTPU_SGW,
            			  NULL,
            			  0,
            			  MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u",
            			  0,0,
            			  enb_s1u_teid,
            			  sgw_s1u_teid,
            			  data_req_p->length);

            }

            rc = nwGtpv1uMsgDelete(gtpv1u_data_g.gtpv1u_stack,
                                   stack_req.apiInfo.sendtoInfo.hMsg);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
            }
          }
        }
      }

      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT);
      /* Buffer still needed, do not free it */
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), data_req_p->buffer);
    }
    break;

    case TERMINATE_MESSAGE: {
      if (gtpv1u_data_g.ue_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.ue_mapping);
      }

      if (gtpv1u_data_g.teid_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.teid_mapping);
      }

      itti_exit_task();
    }
    break;

    case TIMER_HAS_EXPIRED:
      nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
      break;

    default: {
      LOG_E(GTPU, "Unkwnon message ID %d:%s\n",
            ITTI_MSG_ID(received_message_p),
            ITTI_MSG_NAME(received_message_p));
    }
    break;
    }

    rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
    AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc);
    received_message_p = NULL;
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_OUT);
  }

  return NULL;
}
void *sctp_receiver_thread(void *args_p)
{
    struct sctp_arg_s *sctp_arg_p;

    /* maximum file descriptor number */
    int fdmax, clientsock, i;
    /* master file descriptor list */
    fd_set master;
    /* temp file descriptor list for select() */
    fd_set read_fds;

    if ((sctp_arg_p = (struct sctp_arg_s *)args_p) == NULL) {
        pthread_exit(NULL);
    }

    /* clear the master and temp sets */
    FD_ZERO(&master);
    FD_ZERO(&read_fds);

    FD_SET(sctp_arg_p->sd, &master);
    fdmax = sctp_arg_p->sd; /* so far, it's this one*/
    MSC_START_USE();

    while(1) {

        memcpy(&read_fds, &master, sizeof(master));

        if (select(fdmax+1, &read_fds, NULL, NULL, NULL) == -1) {
            SCTP_ERROR("[%d] Select() error: %s",
                       sctp_arg_p->sd, strerror(errno));
            free(args_p);
            args_p = NULL;
            pthread_exit(NULL);
        }

        for (i = 0; i <= fdmax; i++) {
            if (FD_ISSET(i, &read_fds)) {
                if (i == sctp_arg_p->sd) {
                    /* There is data to read on listener socket. This means we have to accept
                     * the connection.
                     */
                    if ((clientsock = accept(sctp_arg_p->sd, NULL, NULL)) < 0) {
                        SCTP_ERROR("[%d] accept: %s:%d\n", sctp_arg_p->sd, strerror(errno), errno);
                        free(args_p);
                        args_p = NULL;
                        pthread_exit(NULL);
                    } else {
                        FD_SET(clientsock, &master); /* add to master set */

                        if(clientsock > fdmax) {
                            /* keep track of the maximum */
                            fdmax = clientsock;
                        }
                    }
                } else {
                    int ret;

                    /* Read from socket */
                    ret = sctp_read_from_socket(i, sctp_arg_p->ppid);

                    /* When the socket is disconnected we have to update
                     * the fd_set.
                     */
                    if (ret == SCTP_RC_DISCONNECT) {
                        /* Remove the socket from the FD set and update the max sd */
                        FD_CLR(i, &master);

                        if (i == fdmax) {
                            while (FD_ISSET(fdmax, &master) == FALSE)
                                fdmax -= 1;
                        }
                    }
                }
            }
        }
    }

    free(args_p);
    args_p = NULL;
    return NULL;
}