Example #1
0
static
NwSdpRcT nwSdpProcessGtpv1uStackReqCallback(NwGtpv1uUlpHandleT hUlp,
                           NwGtpv1uUlpApiT *pUlpApi)
{
  NwSdpRcT rc;
  NwSdpT* thiz = (NwSdpT*)hUlp;

  switch(pUlpApi->apiType)
  {
    case NW_GTPV1U_ULP_API_RECV_TPDU:
      {
        NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Received T-PDU from GTPU Stack");
        rc = nwSdpProcessGtpuDataIndication(thiz, (NwSdpFlowContextT*)pUlpApi->apiInfo.recvMsgInfo.hUlpSession, pUlpApi->apiInfo.recvMsgInfo.hMsg);
      }
      break;

    default:
      NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Received Unhandled API request from GTPU Stack");
      break;
  }

  rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack,  pUlpApi->apiInfo.recvMsgInfo.hMsg);
  NW_ASSERT( rc == NW_SDP_OK );

  return NW_SDP_OK;
}
Example #2
0
/**
 * Destructor
 *
 * @param[out] pthiz : Pointer to pointer to Trxn object.
 * @return NW_GTPV1U_OK on success.
 */
NwGtpv1uRcT
nwGtpv1uTrxnDelete( NW_INOUT NwGtpv1uTrxnT **pthiz)
{
  NwGtpv1uRcT rc = NW_GTPV1U_OK;
  NwGtpv1uStackT *pStack;
  NwGtpv1uTrxnT *thiz = *pthiz;

  pStack = thiz->pStack;

  if(thiz->hRspTmr) {
    rc = nwGtpv1uTrxnStopPeerRspTimer(thiz);
    NW_ASSERT(rc == NW_GTPV1U_OK);
  }

  if(thiz->pMsg) {
    rc = nwGtpv1uMsgDelete((NwGtpv1uStackHandleT)pStack,
                           (NwGtpv1uMsgHandleT)thiz->pMsg);
    NW_ASSERT(rc == NW_GTPV1U_OK);
  }

  thiz->next = gpGtpv1uTrxnPool;
  gpGtpv1uTrxnPool = thiz;

  NW_LOG(pStack, NW_LOG_LEVEL_DEBG, "Purged transaction 0x%p", thiz);

  *pthiz = NULL;
  return rc;
}
NwGtpv1uRcT
nwMiniUlpTpduSend(NwMiniUlpEntityT* thiz, NwU8T* tpduBuf, NwU32T tpduLen , NwU16T fromPort)
{
  NwGtpv1uRcT rc;
  NwGtpv1uUlpApiT           ulpReq;

  /*
   *  Send Message Request to GTPv1u Stack Instance
   */

  ulpReq.apiType                        = NW_GTPV1U_ULP_API_SEND_TPDU;
  ulpReq.apiInfo.sendtoInfo.teid        = fromPort;
  ulpReq.apiInfo.sendtoInfo.ipAddr      = inet_addr(thiz->peerIpStr);

  rc = nwGtpv1uGpduMsgNew( thiz->hGtpv1uStack,
      fromPort,
      NW_FALSE,
      thiz->seqNum++,
      tpduBuf,
      tpduLen,
      &(ulpReq.apiInfo.sendtoInfo.hMsg));

  NW_ASSERT( rc == NW_GTPV1U_OK );

  rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq);
  NW_ASSERT( rc == NW_GTPV1U_OK );

  rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg));
  NW_ASSERT( rc == NW_GTPV1U_OK );

  return NW_GTPV1U_OK;
}
Example #4
0
NwSdpRcT
nwSdpProcessIpv4DataIndication(NwSdpT* thiz, 
              NwSdpFlowContextT* pFlowContext,
              NwIpv4MsgHandleT hMsg)
{
  NwSdpRcT rc;

  /*
   * Send Message Request to GTPv1u Stack Instance
   */

  switch(pFlowContext->egressEndPoint.flowType)
  {
    case NW_FLOW_TYPE_GTPU:
      {
        NwGtpv1uUlpApiT           ulpReq;
        NW_ASSERT(pFlowContext->egressEndPoint.ipv4Addr != 0);
        NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Sending IP PDU over GTPU teid 0x%x to "NW_IPV4_ADDR, pFlowContext->egressEndPoint.flowKey.gtpuTeid, NW_IPV4_ADDR_FORMAT(htonl(pFlowContext->egressEndPoint.ipv4Addr)));
        ulpReq.apiType                        = NW_GTPV1U_ULP_API_SEND_TPDU;
        ulpReq.apiInfo.sendtoInfo.teid        = pFlowContext->egressEndPoint.flowKey.gtpuTeid;
        ulpReq.apiInfo.sendtoInfo.ipAddr      = pFlowContext->egressEndPoint.ipv4Addr;

        rc = nwGtpv1uGpduMsgNew( thiz->hGtpv1uStack,
            pFlowContext->egressEndPoint.flowKey.gtpuTeid,      /* TEID                 */
            NW_FALSE,                                           /* Seq Num Present Flag */
            0,                                                  /* seqNum               */
            (NwU8T*) nwIpv4MsgGetBufHandle(thiz->hIpv4Stack, hMsg),
            nwIpv4MsgGetLength(thiz->hIpv4Stack, hMsg),
            (NwGtpv1uMsgHandleT*)&(ulpReq.apiInfo.sendtoInfo.hMsg));

        NW_ASSERT( rc == NW_SDP_OK );

        rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq);
        NW_ASSERT( rc == NW_SDP_OK );

        rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg));
        NW_ASSERT( rc == NW_SDP_OK );
      }
      break;
    case NW_FLOW_TYPE_GRE:
      {
        NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Cannot send IP PDU over GRE! Not supported yet!");
      }
      break;

    default:
      {
        NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Unsupported egress flow end point type! Dropping IP PDU.");
      }
      break;
  }

  return NW_SDP_OK;
}
NwGtpv1uRcT 
nwMiniUlpProcessStackReqCallback (NwGtpv1uUlpHandleT hUlp, 
                       NwGtpv1uUlpApiT *pUlpApi)
{
  NwMiniUlpEntityT* thiz;
  NW_ASSERT(pUlpApi != NULL);

  thiz = (NwMiniUlpEntityT*) hUlp;

  switch(pUlpApi->apiType)
  {
    case NW_GTPV1U_ULP_API_RECV_TPDU:
      {
        struct sockaddr_in peerAddr;
        NwS32T bytesSent;
        NwU8T dataBuf[4096];
        NwU32T dataSize;
        NwU32T peerIpAddr = (inet_addr(thiz->peerIpStr));

        NW_ASSERT( NW_GTPV1U_OK == nwGtpv1uMsgGetTpdu(pUlpApi->apiInfo.recvMsgInfo.hMsg, dataBuf, &dataSize) );

        NW_LOG(NW_LOG_LEVEL_DEBG, "Received TPDU from gtpv1u stack %u!", pUlpApi->apiInfo.recvMsgInfo.teid);

        peerAddr.sin_family       = AF_INET;
        peerAddr.sin_port         = htons(pUlpApi->apiInfo.recvMsgInfo.teid);
        peerAddr.sin_addr.s_addr  = (peerIpAddr);
        memset(peerAddr.sin_zero, '\0', sizeof (peerAddr.sin_zero));

        bytesSent = sendto (thiz->hSocket, dataBuf, dataSize, 0, (struct sockaddr *) &peerAddr, sizeof(peerAddr));

        if(bytesSent < 0)
        {
          NW_LOG(NW_LOG_LEVEL_ERRO, "%s", strerror(errno));
          NW_ASSERT(0);
        }
        else
        {
          NW_LOG(NW_LOG_LEVEL_DEBG, "Sent %u bytes to peer %u.%u.%u.%u", dataSize,
              (peerIpAddr & 0x000000ff),
              (peerIpAddr & 0x0000ff00) >> 8,
              (peerIpAddr & 0x00ff0000) >> 16,
              (peerIpAddr & 0xff000000) >> 24);
        }

        NW_ASSERT(nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (pUlpApi->apiInfo.recvMsgInfo.hMsg)) == NW_GTPV1U_OK);

      }
      break;
    default:
      NW_LOG(NW_LOG_LEVEL_WARN, "Received undefined UlpApi from gtpv1u stack!");
  }
  return NW_GTPV1U_OK;
}
Example #6
0
//-----------------------------------------------------------------------------
void *gtpv1u_eNB_task(void *args)
{
  int                       rc = 0;
  instance_t                instance;
  //const char               *msg_name_p;

  rc = gtpv1u_eNB_init();
  AssertFatal(rc == 0, "gtpv1u_eNB_init Failed");
  itti_mark_task_ready(TASK_GTPV1_U);
  MSC_START_USE();

  while(1) {
    /* Trying to fetch a message from the message queue.
     * If the queue is empty, this function will block till a
     * message is sent to the task.
     */
    MessageDef *received_message_p = NULL;
    itti_receive_msg(TASK_GTPV1_U, &received_message_p);
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_IN);
    DevAssert(received_message_p != NULL);

    instance = ITTI_MSG_INSTANCE(received_message_p);
    //msg_name_p = ITTI_MSG_NAME(received_message_p);

    switch (ITTI_MSG_ID(received_message_p)) {

    case GTPV1U_ENB_DELETE_TUNNEL_REQ: {
      gtpv1u_delete_s1u_tunnel(instance, &received_message_p->ittiMsg.Gtpv1uDeleteTunnelReq);
    }
    break;

    // DATA COMING FROM UDP
    case UDP_DATA_IND: {
      udp_data_ind_t *udp_data_ind_p;
      udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind;
      nwGtpv1uProcessUdpReq(gtpv1u_data_g.gtpv1u_stack,
                            udp_data_ind_p->buffer,
                            udp_data_ind_p->buffer_length,
                            udp_data_ind_p->peer_port,
                            udp_data_ind_p->peer_address);
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), udp_data_ind_p->buffer);
    }
    break;

    // DATA TO BE SENT TO UDP
    case GTPV1U_ENB_TUNNEL_DATA_REQ: {
      gtpv1u_enb_tunnel_data_req_t *data_req_p           = NULL;
      NwGtpv1uUlpApiT               stack_req;
      NwGtpv1uRcT                   rc                   = NW_GTPV1U_FAILURE;
      hashtable_rc_t                hash_rc              = HASH_TABLE_KEY_NOT_EXISTS;
      gtpv1u_ue_data_t             *gtpv1u_ue_data_p     = NULL;
      teid_t                        enb_s1u_teid         = 0;
      teid_t                        sgw_s1u_teid         = 0;

      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN);
      data_req_p = &GTPV1U_ENB_TUNNEL_DATA_REQ(received_message_p);
      //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length);

#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
      gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length);
#endif
      memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));

      hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, (uint64_t)data_req_p->rnti, (void**)&gtpv1u_ue_data_p);

      if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
        LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti);
      } else {
        if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_DRB_Identity)) {
          enb_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB;
          sgw_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_sgw;
          stack_req.apiType                   = NW_GTPV1U_ULP_API_SEND_TPDU;
          stack_req.apiInfo.sendtoInfo.teid   = sgw_s1u_teid;
          stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr;

          rc = nwGtpv1uGpduMsgNew(
                 gtpv1u_data_g.gtpv1u_stack,
                 sgw_s1u_teid,
                 NW_FALSE,
                 gtpv1u_data_g.seq_num++,
                 data_req_p->buffer,
                 data_req_p->length,
                 data_req_p->offset,
                 &(stack_req.apiInfo.sendtoInfo.hMsg));

          if (rc != NW_GTPV1U_OK) {
            LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
            MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
            		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
            (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */
          } else {
            rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
              MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
              		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
            } else {
            	  MSC_LOG_TX_MESSAGE(
            			  MSC_GTPU_ENB,
            			  MSC_GTPU_SGW,
            			  NULL,
            			  0,
            			  MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u",
            			  0,0,
            			  enb_s1u_teid,
            			  sgw_s1u_teid,
            			  data_req_p->length);

            }

            rc = nwGtpv1uMsgDelete(gtpv1u_data_g.gtpv1u_stack,
                                   stack_req.apiInfo.sendtoInfo.hMsg);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
            }
          }
        }
      }

      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT);
      /* Buffer still needed, do not free it */
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), data_req_p->buffer);
    }
    break;

    case TERMINATE_MESSAGE: {
      if (gtpv1u_data_g.ue_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.ue_mapping);
      }

      if (gtpv1u_data_g.teid_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.teid_mapping);
      }

      itti_exit_task();
    }
    break;

    case TIMER_HAS_EXPIRED:
      nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
      break;

    default: {
      LOG_E(GTPU, "Unkwnon message ID %d:%s\n",
            ITTI_MSG_ID(received_message_p),
            ITTI_MSG_NAME(received_message_p));
    }
    break;
    }

    rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
    AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc);
    received_message_p = NULL;
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_OUT);
  }

  return NULL;
}
Example #7
0
//-----------------------------------------------------------------------------
int
gtpv1u_new_data_req(
  uint8_t  enb_module_idP,
  rnti_t   ue_rntiP,
  uint8_t  rab_idP,
  uint8_t *buffer_pP,
  uint32_t buf_lenP,
  uint32_t buf_offsetP
)
{

  NwGtpv1uUlpApiT          stack_req;
  NwGtpv1uRcT              rc            = NW_GTPV1U_FAILURE;
  struct gtpv1u_ue_data_s  ue;
  struct gtpv1u_ue_data_s *ue_inst_p     = NULL;
  struct gtpv1u_bearer_s  *bearer_p      = NULL;
  hashtable_rc_t           hash_rc       = HASH_TABLE_KEY_NOT_EXISTS;;
  gtpv1u_data_t           *gtpv1u_data_p = NULL;

  memset(&ue, 0, sizeof(struct gtpv1u_ue_data_s));
  ue.ue_id = ue_rntiP;

  AssertFatal(enb_module_idP >=0, "Bad parameter enb module id %u\n", enb_module_idP);
  AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET)< GTPV1U_MAX_BEARERS_ID, "Bad parameter rab id %u\n", rab_idP);
  AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET) >= 0 , "Bad parameter rab id %u\n", rab_idP);

  gtpv1u_data_p = &gtpv1u_data_g;
  /* Check that UE context is present in ue map. */
  hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_rntiP, (void**)&ue_inst_p);

  if (hash_rc ==  HASH_TABLE_KEY_NOT_EXISTS ) {
    LOG_E(GTPU, "[UE %d] Trying to send data on non-existing UE context\n", ue_rntiP);
    return -1;
  }

  bearer_p = &ue_inst_p->bearers[rab_idP - GTPV1U_BEARER_OFFSET];

  /* Ensure the bearer in ready.
   * TODO: handle the cases where the bearer is in HANDOVER state.
   * In such case packets should be placed in FIFO.
   */
  if (bearer_p->state != BEARER_UP) {
    LOG_W(GTPU, "Trying to send data over bearer with state(%u) != BEARER_UP\n",
          bearer_p->state);
//#warning  LG: HACK WHILE WAITING FOR NAS, normally return -1

    if (bearer_p->state != BEARER_IN_CONFIG)
      return -1;
  }

  memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));

  stack_req.apiType                   = NW_GTPV1U_ULP_API_SEND_TPDU;
  stack_req.apiInfo.sendtoInfo.teid   = bearer_p->teid_sgw;
  stack_req.apiInfo.sendtoInfo.ipAddr = bearer_p->sgw_ip_addr;

  LOG_D(GTPU, "TX TO TEID %u addr 0x%x\n",bearer_p->teid_sgw, bearer_p->sgw_ip_addr);
  rc = nwGtpv1uGpduMsgNew(gtpv1u_data_p->gtpv1u_stack,
                          bearer_p->teid_sgw,
                          NW_FALSE,
                          gtpv1u_data_p->seq_num++,
                          buffer_pP,
                          buf_lenP,
                          buf_offsetP,
                          &(stack_req.apiInfo.sendtoInfo.hMsg));

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
    return -1;
  }

  rc = nwGtpv1uProcessUlpReq(gtpv1u_data_p->gtpv1u_stack,
                             &stack_req);

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
    return -1;
  }

  rc = nwGtpv1uMsgDelete(gtpv1u_data_p->gtpv1u_stack,
                         stack_req.apiInfo.sendtoInfo.hMsg);

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
    return -1;
  }

  LOG_D(GTPU, "%s() return code OK\n", __FUNCTION__);
  return 0;
}
Example #8
0
NwSdpRcT
nwSdpProcessGtpuDataIndication(NwSdpT* thiz, 
              NwSdpFlowContextT* pFlowContext,
              NwGtpv1uMsgHandleT hMsg)
{
  NwSdpRcT rc;

  /*
   * Send Message Request to GTPv1u Stack Instance
   */

  switch(pFlowContext->egressEndPoint.flowType)
  {
    case NW_FLOW_TYPE_IPv4:
      {
        NwIpv4UlpApiT           ulpReq;
        NwU8T*                  pIpv4Pdu;

        if(thiz->hIpv4Stack)
        {
          /* Send over IP*/
          rc = nwIpv4MsgFromBufferNew(thiz->hIpv4Stack,  
              nwGtpv1uMsgGetTpduHandle(hMsg),
              nwGtpv1uMsgGetTpduLength(hMsg),
              &(ulpReq.apiInfo.sendtoInfo.hMsg));
          NW_ASSERT(NW_OK == rc);

#ifdef NW_SDP_RESPOND_ICMP_PING
          pIpv4Pdu = nwIpv4MsgGetBufHandle(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg);
          if(*(pIpv4Pdu + 20) == 0x08)
          {
#define NW_MAX_ICMP_PING_DATA_SIZE      (1024)
            NwU8T pingRspPdu[14 + NW_MAX_ICMP_PING_DATA_SIZE];
            NwU32T pingRspPduLen;

            *(pIpv4Pdu + 20) = 0x00;

            pingRspPdu[12] = 0x08;
            pingRspPdu[13] = 0x00;

            pingRspPduLen = (NW_MAX_ICMP_PING_DATA_SIZE > nwIpv4MsgGetLength(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg) ? nwIpv4MsgGetLength(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg) : NW_MAX_ICMP_PING_DATA_SIZE);
            memcpy(pingRspPdu + 14, pIpv4Pdu, pingRspPduLen);
            memcpy(pingRspPdu + 14 + 16, pIpv4Pdu + 12, 4);
            memcpy(pingRspPdu + 14 + 12, pIpv4Pdu + 16, 4);
            /* TODO: Add ip-checksum */
            rc = nwSdpProcessIpv4DataInd(thiz, 0, pingRspPdu, pingRspPduLen + 14);
          }
          else
          {
#endif
            ulpReq.apiType          = NW_IPv4_ULP_API_SEND_TPDU;
            rc = nwIpv4ProcessUlpReq(thiz->hIpv4Stack, &ulpReq);
            NW_ASSERT( rc == NW_SDP_OK );
#ifdef NW_SDP_RESPOND_ICMP_PING
          }
#endif
          rc = nwIpv4MsgDelete(thiz->hIpv4Stack, (ulpReq.apiInfo.sendtoInfo.hMsg));
          NW_ASSERT( rc == NW_SDP_OK );

        }
        else
        {
          NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Cannot send PDU over IPv4! IPv4 service does not exist on data plane.");
        }
      }
      break;

    case NW_FLOW_TYPE_GTPU:
      {
        NwGtpv1uUlpApiT           ulpReq;
        ulpReq.apiType                        = NW_GTPV1U_ULP_API_SEND_TPDU;
        ulpReq.apiInfo.sendtoInfo.teid        = pFlowContext->egressEndPoint.flowKey.gtpuTeid;
        ulpReq.apiInfo.sendtoInfo.ipAddr      = pFlowContext->egressEndPoint.ipv4Addr;

        if(thiz->hGtpv1uStack)
        {
          rc = nwGtpv1uMsgFromMsgNew( thiz->hGtpv1uStack,
              hMsg,
              &(ulpReq.apiInfo.sendtoInfo.hMsg));

          NW_ASSERT( rc == NW_SDP_OK );

          rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq);
          NW_ASSERT( rc == NW_SDP_OK );

          rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg));
          NW_ASSERT( rc == NW_SDP_OK );
          NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Sending GTPU PDU over teid 0x%x "NW_IPV4_ADDR, ulpReq.apiInfo.sendtoInfo.teid, NW_IPV4_ADDR_FORMAT(ntohl(ulpReq.apiInfo.sendtoInfo.ipAddr)));
        }
        else
        {
          NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Cannot send PDU over GTPU! GTPU service does not exist on data plane.");
        }
      }
      break;
    case NW_FLOW_TYPE_GRE:
      {
        NW_LOG(thiz, NW_LOG_LEVEL_WARN, "Cannot send TPDU over GRE! Not supported yet!");
      }
      break;
    default:
      {
        NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Unsupported egress flow end point type! Dropping GTP TPDU.");
      }
      break;
  }

  return NW_SDP_OK;
}
static NwGtpv1uRcT
nwGtpv1uHandleEchoReq(NW_IN NwGtpv1uStackT *thiz,
                      NW_IN uint8_t *msgBuf,
                      NW_IN uint32_t msgBufLen,
                      NW_IN uint16_t peerPort,
                      NW_IN uint32_t peerIp)
{
  NwGtpv1uRcT           rc     = NW_GTPV1U_FAILURE;
  uint16_t                seqNum = 0;
  NwGtpv1uMsgHandleT    hMsg   = 0;
  int                   bufLen = 0;

  seqNum = ntohs(*(uint16_t *) (msgBuf + (((*msgBuf) & 0x02) ? 8 : 4)));

  MSC_LOG_RX_MESSAGE(
    (thiz->stackType == GTPU_STACK_ENB) ? MSC_GTPU_ENB:MSC_GTPU_SGW,
    (thiz->stackType == GTPU_STACK_ENB) ? MSC_GTPU_SGW:MSC_GTPU_ENB,
    NULL,
    0,
    MSC_AS_TIME_FMT" ECHO-REQ seq %u size %u",
    0,0,seqNum, msgBufLen);
  /* Send Echo Response */

  rc = nwGtpv1uMsgNew( (NwGtpv1uStackHandleT)thiz,
                       NW_TRUE,         /* SeqNum flag    */
                       NW_FALSE,
                       NW_FALSE,
                       NW_GTP_ECHO_RSP,  /* Msg Type             */
                       0x00000000U,      /* TEID                 */
                       seqNum,           /* Seq Number           */
                       0,
                       0,
                       (&hMsg));

  bufLen = sizeof(NwGtpv1uIeTv1T)+ ((NwGtpv1uMsgT*)hMsg)->msgLen;

  ((NwGtpv1uMsgT*)hMsg)->msgBuf = itti_malloc(
                                    TASK_GTPV1_U,
                                    TASK_UDP,
                                    bufLen);
  ((NwGtpv1uMsgT*)hMsg)->msgBufLen    = bufLen;
  NW_ASSERT(NW_GTPV1U_OK == rc);

  /*
   * The Restart Counter value in the Recovery information element shall
   * not be used, i.e. it shall be set to zero by the sender and shall be
   * ignored by the receiver.
   */
  rc = nwGtpv1uMsgAddIeTV1(hMsg, NW_GTPV1U_IE_RECOVERY, 0x00);

#if defined(LOG_GTPU) && LOG_GTPU > 0
  GTPU_INFO("Sending NW_GTP_ECHO_RSP message to %x:%x with seq %u\n",
            peerIp,
            peerPort,
            seqNum);
#endif
  MSC_LOG_TX_MESSAGE(
    (thiz->stackType == GTPU_STACK_ENB) ? MSC_GTPU_ENB:MSC_GTPU_SGW,
    (thiz->stackType == GTPU_STACK_ENB) ? MSC_GTPU_SGW:MSC_GTPU_ENB,
    NULL,
    0,
    MSC_AS_TIME_FMT" ECHO-RSP seq %u",
    0,0,seqNum);
  rc = nwGtpv1uCreateAndSendMsg(
         thiz,
         peerIp,
         peerPort,
         (NwGtpv1uMsgT *) hMsg);

  rc = nwGtpv1uMsgDelete((NwGtpv1uStackHandleT)thiz, hMsg);
  NW_ASSERT(NW_GTPV1U_OK == rc);

  return rc;
}