static int gtpv1u_create_s1u_tunnel(Gtpv1uCreateTunnelReq *create_tunnel_reqP) { /* Create a new nw-gtpv1-u stack req using API */ NwGtpv1uUlpApiT stack_req; NwGtpv1uRcT rc; /* Local tunnel end-point identifier */ uint32_t s1u_teid = 0; gtpv1u_teid2enb_info_t *gtpv1u_teid2enb_info = NULL; MessageDef *message_p = NULL; hashtable_rc_t hash_rc; GTPU_DEBUG("Rx GTPV1U_CREATE_TUNNEL_REQ Context %d\n", create_tunnel_reqP->context_teid); memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT; do { s1u_teid = gtpv1u_new_teid(); GTPU_DEBUG("gtpv1u_create_s1u_tunnel() %u\n", s1u_teid); stack_req.apiInfo.createTunnelEndPointInfo.teid = s1u_teid; stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0; stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0; rc = nwGtpv1uProcessUlpReq(gtpv1u_sgw_data.gtpv1u_stack, &stack_req); GTPU_DEBUG(".\n"); } while (rc != NW_GTPV1U_OK); gtpv1u_teid2enb_info = malloc (sizeof(gtpv1u_teid2enb_info_t)); memset(gtpv1u_teid2enb_info, 0, sizeof(gtpv1u_teid2enb_info_t)); gtpv1u_teid2enb_info->state = BEARER_IN_CONFIG; //#warning !!! hack because missing modify session request, so force enb address // gtpv1u_teid2enb_info->enb_ip_addr.pdn_type = IPv4; // gtpv1u_teid2enb_info->enb_ip_addr.address.ipv4_address[0] = 192; // gtpv1u_teid2enb_info->enb_ip_addr.address.ipv4_address[1] = 168; // gtpv1u_teid2enb_info->enb_ip_addr.address.ipv4_address[2] = 1; // gtpv1u_teid2enb_info->enb_ip_addr.address.ipv4_address[3] = 2; message_p = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_CREATE_TUNNEL_RESP); message_p->ittiMsg.gtpv1uCreateTunnelResp.S1u_teid = s1u_teid; message_p->ittiMsg.gtpv1uCreateTunnelResp.context_teid = create_tunnel_reqP->context_teid; message_p->ittiMsg.gtpv1uCreateTunnelResp.eps_bearer_id = create_tunnel_reqP->eps_bearer_id; hash_rc = hashtable_is_key_exists(gtpv1u_sgw_data.S1U_mapping, s1u_teid); if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { hash_rc = hashtable_insert(gtpv1u_sgw_data.S1U_mapping, s1u_teid, gtpv1u_teid2enb_info); message_p->ittiMsg.gtpv1uCreateTunnelResp.status = 0; } else { message_p->ittiMsg.gtpv1uCreateTunnelResp.status = 0xFF; } GTPU_DEBUG("Tx GTPV1U_CREATE_TUNNEL_RESP Context %u teid %u eps bearer id %u status %d\n", message_p->ittiMsg.gtpv1uCreateTunnelResp.context_teid, message_p->ittiMsg.gtpv1uCreateTunnelResp.S1u_teid, message_p->ittiMsg.gtpv1uCreateTunnelResp.eps_bearer_id, message_p->ittiMsg.gtpv1uCreateTunnelResp.status); return itti_send_msg_to_task(TASK_SPGW_APP, INSTANCE_DEFAULT, message_p); }
NwGtpv1uRcT nwMiniUlpTpduSend(NwMiniUlpEntityT* thiz, NwU8T* tpduBuf, NwU32T tpduLen , NwU16T fromPort) { NwGtpv1uRcT rc; NwGtpv1uUlpApiT ulpReq; /* * Send Message Request to GTPv1u Stack Instance */ ulpReq.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; ulpReq.apiInfo.sendtoInfo.teid = fromPort; ulpReq.apiInfo.sendtoInfo.ipAddr = inet_addr(thiz->peerIpStr); rc = nwGtpv1uGpduMsgNew( thiz->hGtpv1uStack, fromPort, NW_FALSE, thiz->seqNum++, tpduBuf, tpduLen, &(ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_GTPV1U_OK ); rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_GTPV1U_OK ); rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_GTPV1U_OK ); return NW_GTPV1U_OK; }
//----------------------------------------------------------------------------- int gtpv1u_initial_req( gtpv1u_data_t *gtpv1u_data_pP, teid_t teidP, tcp_udp_port_t portP, uint32_t address) { NwGtpv1uUlpApiT ulp_req; NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT)); ulp_req.apiType = NW_GTPV1U_ULP_API_INITIAL_REQ; ulp_req.apiInfo.initialReqInfo.teid = teidP; ulp_req.apiInfo.initialReqInfo.peerPort = portP; ulp_req.apiInfo.initialReqInfo.peerIp = address; rc = nwGtpv1uProcessUlpReq(gtpv1u_data_pP->gtpv1u_stack, &ulp_req); if (rc == NW_GTPV1U_OK) { LOG_D(GTPU, "Successfully sent initial req for teid %u\n", teidP); } else { LOG_W(GTPU, "Could not send initial req for teid %u\n", teidP); } return (rc == NW_GTPV1U_OK) ? 0 : -1; }
NwSdpRcT nwSdpProcessIpv4DataIndication(NwSdpT* thiz, NwSdpFlowContextT* pFlowContext, NwIpv4MsgHandleT hMsg) { NwSdpRcT rc; /* * Send Message Request to GTPv1u Stack Instance */ switch(pFlowContext->egressEndPoint.flowType) { case NW_FLOW_TYPE_GTPU: { NwGtpv1uUlpApiT ulpReq; NW_ASSERT(pFlowContext->egressEndPoint.ipv4Addr != 0); NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Sending IP PDU over GTPU teid 0x%x to "NW_IPV4_ADDR, pFlowContext->egressEndPoint.flowKey.gtpuTeid, NW_IPV4_ADDR_FORMAT(htonl(pFlowContext->egressEndPoint.ipv4Addr))); ulpReq.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; ulpReq.apiInfo.sendtoInfo.teid = pFlowContext->egressEndPoint.flowKey.gtpuTeid; ulpReq.apiInfo.sendtoInfo.ipAddr = pFlowContext->egressEndPoint.ipv4Addr; rc = nwGtpv1uGpduMsgNew( thiz->hGtpv1uStack, pFlowContext->egressEndPoint.flowKey.gtpuTeid, /* TEID */ NW_FALSE, /* Seq Num Present Flag */ 0, /* seqNum */ (NwU8T*) nwIpv4MsgGetBufHandle(thiz->hIpv4Stack, hMsg), nwIpv4MsgGetLength(thiz->hIpv4Stack, hMsg), (NwGtpv1uMsgHandleT*)&(ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_SDP_OK ); rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_SDP_OK ); rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_SDP_OK ); } break; case NW_FLOW_TYPE_GRE: { NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Cannot send IP PDU over GRE! Not supported yet!"); } break; default: { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Unsupported egress flow end point type! Dropping IP PDU."); } break; } return NW_SDP_OK; }
NwGtpv1uRcT nwMiniUlpDestroyConn(NwMiniUlpEntityT* thiz) { NwGtpv1uRcT rc; NwGtpv1uUlpApiT ulpReq; /*--------------------------------------------------------------------------- * Send Destroy Session Request to GTPv1u Stack Instance *--------------------------------------------------------------------------*/ ulpReq.apiType = NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT; ulpReq.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle = thiz->hGtpv1uConn; rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_GTPV1U_OK ); thiz->hGtpv1uConn = 0; return NW_GTPV1U_OK; }
NwGtpv1uRcT nwMiniUlpSendEchoRequestToPeer(NwMiniUlpEntityT* thiz, NwU32T peerIp) { NwGtpv1uRcT rc; NwGtpv1uUlpApiT ulpReq; /* * Send Message Request to Gtpv1u Stack Instance */ ulpReq.apiType = NW_GTPV1U_ULP_API_INITIAL_REQ; ulpReq.apiInfo.initialReqInfo.hUlpTrxn = (NwGtpv1uUlpTrxnHandleT)thiz; ulpReq.apiInfo.initialReqInfo.teid = 0x00; ulpReq.apiInfo.initialReqInfo.peerIp = (peerIp); ulpReq.apiInfo.initialReqInfo.peerPort = 2152; /* Send Echo Request*/ rc = nwGtpv1uMsgNew( thiz->hGtpv1uStack, NW_TRUE, /* SeqNum flag */ NW_FALSE, NW_FALSE, NW_GTP_ECHO_REQ, /* Msg Type */ 0x00000000UL, /* TEID */ 0x5678, /* Seq Number */ 0, 0, (&ulpReq.hMsg)); NW_ASSERT( rc == NW_GTPV1U_OK ); rc = nwGtpv1uMsgAddIeTV1((ulpReq.hMsg), NW_GTPV1U_IE_RECOVERY, thiz->restartCounter); NW_ASSERT( rc == NW_GTPV1U_OK ); rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_GTPV1U_OK ); return NW_GTPV1U_OK; }
//----------------------------------------------------------------------------- void *gtpv1u_eNB_task(void *args) { int rc = 0; instance_t instance; //const char *msg_name_p; rc = gtpv1u_eNB_init(); AssertFatal(rc == 0, "gtpv1u_eNB_init Failed"); itti_mark_task_ready(TASK_GTPV1_U); MSC_START_USE(); while(1) { /* Trying to fetch a message from the message queue. * If the queue is empty, this function will block till a * message is sent to the task. */ MessageDef *received_message_p = NULL; itti_receive_msg(TASK_GTPV1_U, &received_message_p); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_IN); DevAssert(received_message_p != NULL); instance = ITTI_MSG_INSTANCE(received_message_p); //msg_name_p = ITTI_MSG_NAME(received_message_p); switch (ITTI_MSG_ID(received_message_p)) { case GTPV1U_ENB_DELETE_TUNNEL_REQ: { gtpv1u_delete_s1u_tunnel(instance, &received_message_p->ittiMsg.Gtpv1uDeleteTunnelReq); } break; // DATA COMING FROM UDP case UDP_DATA_IND: { udp_data_ind_t *udp_data_ind_p; udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind; nwGtpv1uProcessUdpReq(gtpv1u_data_g.gtpv1u_stack, udp_data_ind_p->buffer, udp_data_ind_p->buffer_length, udp_data_ind_p->peer_port, udp_data_ind_p->peer_address); //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), udp_data_ind_p->buffer); } break; // DATA TO BE SENT TO UDP case GTPV1U_ENB_TUNNEL_DATA_REQ: { gtpv1u_enb_tunnel_data_req_t *data_req_p = NULL; NwGtpv1uUlpApiT stack_req; NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; teid_t enb_s1u_teid = 0; teid_t sgw_s1u_teid = 0; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN); data_req_p = >PV1U_ENB_TUNNEL_DATA_REQ(received_message_p); //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length); #if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0 gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length); #endif memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, (uint64_t)data_req_p->rnti, (void**)>pv1u_ue_data_p); if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti); } else { if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_DRB_Identity)) { enb_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB; sgw_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_sgw; stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; stack_req.apiInfo.sendtoInfo.teid = sgw_s1u_teid; stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr; rc = nwGtpv1uGpduMsgNew( gtpv1u_data_g.gtpv1u_stack, sgw_s1u_teid, NW_FALSE, gtpv1u_data_g.seq_num++, data_req_p->buffer, data_req_p->length, data_req_p->offset, &(stack_req.apiInfo.sendtoInfo.hMsg)); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc); MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", enb_s1u_teid,sgw_s1u_teid,data_req_p->length); (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */ } else { rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc); MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", enb_s1u_teid,sgw_s1u_teid,data_req_p->length); } else { MSC_LOG_TX_MESSAGE( MSC_GTPU_ENB, MSC_GTPU_SGW, NULL, 0, MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u", 0,0, enb_s1u_teid, sgw_s1u_teid, data_req_p->length); } rc = nwGtpv1uMsgDelete(gtpv1u_data_g.gtpv1u_stack, stack_req.apiInfo.sendtoInfo.hMsg); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc); } } } } VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT); /* Buffer still needed, do not free it */ //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), data_req_p->buffer); } break; case TERMINATE_MESSAGE: { if (gtpv1u_data_g.ue_mapping != NULL) { hashtable_destroy (gtpv1u_data_g.ue_mapping); } if (gtpv1u_data_g.teid_mapping != NULL) { hashtable_destroy (gtpv1u_data_g.teid_mapping); } itti_exit_task(); } break; case TIMER_HAS_EXPIRED: nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg); break; default: { LOG_E(GTPU, "Unkwnon message ID %d:%s\n", ITTI_MSG_ID(received_message_p), ITTI_MSG_NAME(received_message_p)); } break; } rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p); AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc); received_message_p = NULL; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_OUT); } return NULL; }
//----------------------------------------------------------------------------- static int gtpv1u_delete_s1u_tunnel( const instance_t instanceP, const gtpv1u_enb_delete_tunnel_req_t * const req_pP) { NwGtpv1uUlpApiT stack_req; NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; MessageDef *message_p = NULL; gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; teid_t teid_eNB = 0; int erab_index = 0; message_p = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_DELETE_TUNNEL_RESP); GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti = req_pP->rnti; GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status = 0; hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, req_pP->rnti, (void**)>pv1u_ue_data_p); if (hash_rc == HASH_TABLE_OK) { for (erab_index = 0; erab_index < req_pP->num_erab; erab_index++) { teid_eNB = gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB; LOG_D(GTPU, "Rx GTPV1U_ENB_DELETE_TUNNEL user rnti %x eNB S1U teid %u eps bearer id %u\n", req_pP->rnti, teid_eNB, req_pP->eps_bearer_id[erab_index]); { memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); stack_req.apiType = NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT; LOG_D(GTPU, "gtpv1u_delete_s1u_tunnel erab %u %u\n", req_pP->eps_bearer_id[erab_index], teid_eNB); stack_req.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle = gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB_stack_session; rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req); LOG_D(GTPU, ".\n"); } if (rc != NW_GTPV1U_OK) { GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status |= 0xFF; LOG_E(GTPU, "NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT failed"); } //----------------------- // PDCP->GTPV1U mapping //----------------------- gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].state = BEARER_DOWN; gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB = 0; gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_sgw = 0; gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].sgw_ip_addr = 0; gtpv1u_ue_data_p->num_bearers -= 1; if (gtpv1u_ue_data_p->num_bearers == 0) { hash_rc = hashtable_remove(gtpv1u_data_g.ue_mapping, req_pP->rnti); LOG_D(GTPU, "Removed user rnti %x,no more bearers configured\n", req_pP->rnti); } //----------------------- // GTPV1U->PDCP mapping //----------------------- hash_rc = hashtable_remove(gtpv1u_data_g.teid_mapping, teid_eNB); if (hash_rc != HASH_TABLE_OK) { LOG_D(GTPU, "Removed user rnti %x , enb S1U teid %u not found\n", req_pP->rnti, teid_eNB); } } }// else silently do nothing LOG_D(GTPU, "Tx GTPV1U_ENB_DELETE_TUNNEL_RESP user rnti %x eNB S1U teid %u status %u\n", GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti, GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).enb_S1u_teid, GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status); MSC_LOG_TX_MESSAGE( MSC_GTPU_ENB, MSC_RRC_ENB, NULL,0, "0 GTPV1U_ENB_DELETE_TUNNEL_RESP rnti %x teid %x", GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti, teid_eNB); return itti_send_msg_to_task(TASK_RRC_ENB, instanceP, message_p); }
//----------------------------------------------------------------------------- int gtpv1u_create_s1u_tunnel( const instance_t instanceP, const gtpv1u_enb_create_tunnel_req_t * const create_tunnel_req_pP, gtpv1u_enb_create_tunnel_resp_t * const create_tunnel_resp_pP ) { /* Create a new nw-gtpv1-u stack req using API */ NwGtpv1uUlpApiT stack_req; NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; /* Local tunnel end-point identifier */ teid_t s1u_teid = 0; gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL; gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; //MessageDef *message_p = NULL; hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; int i; ebi_t eps_bearer_id = 0; // int ipv4_addr = 0; int ip_offset = 0; in_addr_t in_addr; int addrs_length_in_bytes= 0; MSC_LOG_RX_MESSAGE( MSC_GTPU_ENB, MSC_RRC_ENB, NULL,0, MSC_AS_TIME_FMT" CREATE_TUNNEL_REQ RNTI %"PRIx16" inst %u ntuns %u ebid %u sgw-s1u teid %u", 0,0,create_tunnel_req_pP->rnti, instanceP, create_tunnel_req_pP->num_tunnels, create_tunnel_req_pP->eps_bearer_id[0], create_tunnel_req_pP->sgw_S1u_teid[0]); create_tunnel_resp_pP->rnti = create_tunnel_req_pP->rnti; create_tunnel_resp_pP->status = 0; create_tunnel_resp_pP->num_tunnels = 0; for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) { ip_offset = 0; eps_bearer_id = create_tunnel_req_pP->eps_bearer_id[i]; LOG_D(GTPU, "Rx GTPV1U_ENB_CREATE_TUNNEL_REQ ue rnti %x eps bearer id %u\n", create_tunnel_req_pP->rnti, eps_bearer_id); memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT; do { s1u_teid = gtpv1u_new_teid(); LOG_D(GTPU, "gtpv1u_create_s1u_tunnel() 0x%x %u(dec)\n", s1u_teid, s1u_teid); stack_req.apiInfo.createTunnelEndPointInfo.teid = s1u_teid; stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0; stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0; rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req); LOG_D(GTPU, ".\n"); } while (rc != NW_GTPV1U_OK); //----------------------- // PDCP->GTPV1U mapping //----------------------- hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->rnti, (void **)>pv1u_ue_data_p); if ((hash_rc == HASH_TABLE_KEY_NOT_EXISTS) || (hash_rc == HASH_TABLE_OK)) { if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { gtpv1u_ue_data_p = calloc (1, sizeof(gtpv1u_ue_data_t)); hash_rc = hashtable_insert(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->rnti, gtpv1u_ue_data_p); AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable"); } gtpv1u_ue_data_p->ue_id = create_tunnel_req_pP->rnti; gtpv1u_ue_data_p->instance_id = 0; // TO DO memcpy(&create_tunnel_resp_pP->enb_addr.buffer, >pv1u_data_g.enb_ip_address_for_S1u_S12_S4_up, sizeof (in_addr_t)); create_tunnel_resp_pP->enb_addr.length = sizeof (in_addr_t); addrs_length_in_bytes = create_tunnel_req_pP->sgw_addr[i].length / 8; AssertFatal((addrs_length_in_bytes == 4) || (addrs_length_in_bytes == 16) || (addrs_length_in_bytes == 20), "Bad transport layer address length %d (bits) %d (bytes)", create_tunnel_req_pP->sgw_addr[i].length, addrs_length_in_bytes); if ((addrs_length_in_bytes == 4) || (addrs_length_in_bytes == 20)) { in_addr = *((in_addr_t*)create_tunnel_req_pP->sgw_addr[i].buffer); ip_offset = 4; gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr = in_addr; } if ((addrs_length_in_bytes == 16) || (addrs_length_in_bytes == 20)) { memcpy(gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip6_addr.s6_addr, &create_tunnel_req_pP->sgw_addr[i].buffer[ip_offset], 16); } gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].state = BEARER_IN_CONFIG; gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB = s1u_teid; gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB_stack_session = stack_req.apiInfo.createTunnelEndPointInfo.hStackSession; gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_sgw = create_tunnel_req_pP->sgw_S1u_teid[i]; create_tunnel_resp_pP->enb_S1u_teid[i] = s1u_teid; } else { create_tunnel_resp_pP->enb_S1u_teid[i] = 0; create_tunnel_resp_pP->status = 0xFF; } create_tunnel_resp_pP->eps_bearer_id[i] = eps_bearer_id; create_tunnel_resp_pP->num_tunnels += 1; //----------------------- // GTPV1U->PDCP mapping //----------------------- hash_rc = hashtable_get(gtpv1u_data_g.teid_mapping, s1u_teid, (void**)>pv1u_teid_data_p); if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { gtpv1u_teid_data_p = calloc (1, sizeof(gtpv1u_teid_data_t)); gtpv1u_teid_data_p->enb_id = 0; // TO DO gtpv1u_teid_data_p->ue_id = create_tunnel_req_pP->rnti; gtpv1u_teid_data_p->eps_bearer_id = eps_bearer_id; hash_rc = hashtable_insert(gtpv1u_data_g.teid_mapping, s1u_teid, gtpv1u_teid_data_p); AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting teid mapping in GTPV1U hashtable"); } else { create_tunnel_resp_pP->enb_S1u_teid[i] = 0; create_tunnel_resp_pP->status = 0xFF; } } MSC_LOG_TX_MESSAGE( MSC_GTPU_ENB, MSC_RRC_ENB, NULL,0, "0 GTPV1U_ENB_CREATE_TUNNEL_RESP rnti %x teid %x", create_tunnel_resp_pP->rnti, s1u_teid); LOG_D(GTPU, "Tx GTPV1U_ENB_CREATE_TUNNEL_RESP ue rnti %x status %d\n", create_tunnel_req_pP->rnti, create_tunnel_resp_pP->status); return 0; }
//----------------------------------------------------------------------------- int gtpv1u_new_data_req( uint8_t enb_module_idP, rnti_t ue_rntiP, uint8_t rab_idP, uint8_t *buffer_pP, uint32_t buf_lenP, uint32_t buf_offsetP ) { NwGtpv1uUlpApiT stack_req; NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; struct gtpv1u_ue_data_s ue; struct gtpv1u_ue_data_s *ue_inst_p = NULL; struct gtpv1u_bearer_s *bearer_p = NULL; hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS;; gtpv1u_data_t *gtpv1u_data_p = NULL; memset(&ue, 0, sizeof(struct gtpv1u_ue_data_s)); ue.ue_id = ue_rntiP; AssertFatal(enb_module_idP >=0, "Bad parameter enb module id %u\n", enb_module_idP); AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET)< GTPV1U_MAX_BEARERS_ID, "Bad parameter rab id %u\n", rab_idP); AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET) >= 0 , "Bad parameter rab id %u\n", rab_idP); gtpv1u_data_p = >pv1u_data_g; /* Check that UE context is present in ue map. */ hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_rntiP, (void**)&ue_inst_p); if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS ) { LOG_E(GTPU, "[UE %d] Trying to send data on non-existing UE context\n", ue_rntiP); return -1; } bearer_p = &ue_inst_p->bearers[rab_idP - GTPV1U_BEARER_OFFSET]; /* Ensure the bearer in ready. * TODO: handle the cases where the bearer is in HANDOVER state. * In such case packets should be placed in FIFO. */ if (bearer_p->state != BEARER_UP) { LOG_W(GTPU, "Trying to send data over bearer with state(%u) != BEARER_UP\n", bearer_p->state); //#warning LG: HACK WHILE WAITING FOR NAS, normally return -1 if (bearer_p->state != BEARER_IN_CONFIG) return -1; } memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; stack_req.apiInfo.sendtoInfo.teid = bearer_p->teid_sgw; stack_req.apiInfo.sendtoInfo.ipAddr = bearer_p->sgw_ip_addr; LOG_D(GTPU, "TX TO TEID %u addr 0x%x\n",bearer_p->teid_sgw, bearer_p->sgw_ip_addr); rc = nwGtpv1uGpduMsgNew(gtpv1u_data_p->gtpv1u_stack, bearer_p->teid_sgw, NW_FALSE, gtpv1u_data_p->seq_num++, buffer_pP, buf_lenP, buf_offsetP, &(stack_req.apiInfo.sendtoInfo.hMsg)); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc); return -1; } rc = nwGtpv1uProcessUlpReq(gtpv1u_data_p->gtpv1u_stack, &stack_req); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc); return -1; } rc = nwGtpv1uMsgDelete(gtpv1u_data_p->gtpv1u_stack, stack_req.apiInfo.sendtoInfo.hMsg); if (rc != NW_GTPV1U_OK) { LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc); return -1; } LOG_D(GTPU, "%s() return code OK\n", __FUNCTION__); return 0; }
NwSdpRcT nwSdpProcessGtpuDataIndication(NwSdpT* thiz, NwSdpFlowContextT* pFlowContext, NwGtpv1uMsgHandleT hMsg) { NwSdpRcT rc; /* * Send Message Request to GTPv1u Stack Instance */ switch(pFlowContext->egressEndPoint.flowType) { case NW_FLOW_TYPE_IPv4: { NwIpv4UlpApiT ulpReq; NwU8T* pIpv4Pdu; if(thiz->hIpv4Stack) { /* Send over IP*/ rc = nwIpv4MsgFromBufferNew(thiz->hIpv4Stack, nwGtpv1uMsgGetTpduHandle(hMsg), nwGtpv1uMsgGetTpduLength(hMsg), &(ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT(NW_OK == rc); #ifdef NW_SDP_RESPOND_ICMP_PING pIpv4Pdu = nwIpv4MsgGetBufHandle(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg); if(*(pIpv4Pdu + 20) == 0x08) { #define NW_MAX_ICMP_PING_DATA_SIZE (1024) NwU8T pingRspPdu[14 + NW_MAX_ICMP_PING_DATA_SIZE]; NwU32T pingRspPduLen; *(pIpv4Pdu + 20) = 0x00; pingRspPdu[12] = 0x08; pingRspPdu[13] = 0x00; pingRspPduLen = (NW_MAX_ICMP_PING_DATA_SIZE > nwIpv4MsgGetLength(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg) ? nwIpv4MsgGetLength(thiz->hIpv4Stack, ulpReq.apiInfo.sendtoInfo.hMsg) : NW_MAX_ICMP_PING_DATA_SIZE); memcpy(pingRspPdu + 14, pIpv4Pdu, pingRspPduLen); memcpy(pingRspPdu + 14 + 16, pIpv4Pdu + 12, 4); memcpy(pingRspPdu + 14 + 12, pIpv4Pdu + 16, 4); /* TODO: Add ip-checksum */ rc = nwSdpProcessIpv4DataInd(thiz, 0, pingRspPdu, pingRspPduLen + 14); } else { #endif ulpReq.apiType = NW_IPv4_ULP_API_SEND_TPDU; rc = nwIpv4ProcessUlpReq(thiz->hIpv4Stack, &ulpReq); NW_ASSERT( rc == NW_SDP_OK ); #ifdef NW_SDP_RESPOND_ICMP_PING } #endif rc = nwIpv4MsgDelete(thiz->hIpv4Stack, (ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_SDP_OK ); } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Cannot send PDU over IPv4! IPv4 service does not exist on data plane."); } } break; case NW_FLOW_TYPE_GTPU: { NwGtpv1uUlpApiT ulpReq; ulpReq.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; ulpReq.apiInfo.sendtoInfo.teid = pFlowContext->egressEndPoint.flowKey.gtpuTeid; ulpReq.apiInfo.sendtoInfo.ipAddr = pFlowContext->egressEndPoint.ipv4Addr; if(thiz->hGtpv1uStack) { rc = nwGtpv1uMsgFromMsgNew( thiz->hGtpv1uStack, hMsg, &(ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_SDP_OK ); rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_SDP_OK ); rc = nwGtpv1uMsgDelete(thiz->hGtpv1uStack, (ulpReq.apiInfo.sendtoInfo.hMsg)); NW_ASSERT( rc == NW_SDP_OK ); NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Sending GTPU PDU over teid 0x%x "NW_IPV4_ADDR, ulpReq.apiInfo.sendtoInfo.teid, NW_IPV4_ADDR_FORMAT(ntohl(ulpReq.apiInfo.sendtoInfo.ipAddr))); } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Cannot send PDU over GTPU! GTPU service does not exist on data plane."); } } break; case NW_FLOW_TYPE_GRE: { NW_LOG(thiz, NW_LOG_LEVEL_WARN, "Cannot send TPDU over GRE! Not supported yet!"); } break; default: { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Unsupported egress flow end point type! Dropping GTP TPDU."); } break; } return NW_SDP_OK; }
static NwSdpRcT nwSdpDestroyFlowEndPoint( NW_IN NwSdpT* thiz, NW_IN NwSdpFlowContextT* pFlowContext, NW_IN NwSdpFlowEndPointT* pFlowEndPoint) { NwSdpRcT rc = NW_SDP_OK; switch(pFlowEndPoint->flowType) { case NW_FLOW_TYPE_IPv4: { NwIpv4UlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Destroying IPv4 tunnel endpoint with teid 0x%x", pFlowEndPoint->flowKey.ipv4Addr); if(thiz->hIpv4Stack && pFlowEndPoint->hTunnelEndPoint.ipv4) { ulpReq.apiType = NW_IPv4_ULP_API_DESTROY_TUNNEL_ENDPOINT; ulpReq.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle = (NwIpv4UlpSessionHandleT)pFlowEndPoint->hTunnelEndPoint.ipv4; rc = nwIpv4ProcessUlpReq(thiz->hIpv4Stack, &ulpReq); } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "IPv4 end point does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_GTPU: { NwGtpv1uUlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_INFO, "Destroying GTPU tunnel endpoint with teid 0x%x for handle 0x%x", pFlowEndPoint->flowKey.gtpuTeid, (NwGtpv1uUlpSessionHandleT)pFlowEndPoint->hTunnelEndPoint.gtpu); if(thiz->hGtpv1uStack && pFlowEndPoint->hTunnelEndPoint.gtpu) { ulpReq.apiType = NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT; ulpReq.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle = (NwGtpv1uUlpSessionHandleT)pFlowEndPoint->hTunnelEndPoint.gtpu; rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "GTPU tunnel end point does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_GRE: { NwGreUlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_NOTI, "Destroying GRE tunnel endpoint with key %d", pFlowEndPoint->flowKey.greKey); if(thiz->hGreStack && pFlowEndPoint->hTunnelEndPoint.gre) { ulpReq.apiType = NW_GRE_ULP_API_DESTROY_TUNNEL_ENDPOINT; ulpReq.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle = (NwGtpv1uUlpSessionHandleT)pFlowEndPoint->hTunnelEndPoint.gre; rc = nwGreProcessUlpReq(thiz->hGreStack, &ulpReq); } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "GRE tunnel end point does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_UDP: { #ifdef NW_SDP_SUPPORT_UDP_FLOW_TYPE /* * Destroy local udp listening endpoint */ NW_LOG(thiz, NW_LOG_LEVEL_NOTI, "Destroying UDP tunnel endpoint with port %d", pFlowEndPoint->flowKey.udp.port); event_del(&(pFlowContext->ev)); close(pFlowEndPoint->hTunnelEndPoint.gtpu); #else NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Flow type UDP not supported"); #endif } break; default: { NW_LOG(thiz, NW_LOG_LEVEL_NOTI, "Unsupported encapsulation type %u", pFlowEndPoint->flowType); return NW_SDP_FAILURE; } } return NW_SDP_OK; }
static NwSdpRcT nwSdpCreateFlowEndPoint( NW_IN NwSdpT* thiz, NW_IN NwSdpFlowContextT* pFlowContext, NW_IN NwSdpFlowEndPointT* pFlowEndPoint) { NwSdpRcT rc = NW_SDP_OK; switch(pFlowEndPoint->flowType) { case NW_FLOW_TYPE_IPv4: { NwIpv4UlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Creating IPv4 tunnel endpoint with teid "NW_IPV4_ADDR, NW_IPV4_ADDR_FORMAT(pFlowEndPoint->flowKey.ipv4Addr)); if(thiz->hIpv4Stack) { ulpReq.apiType = NW_IPv4_ULP_API_CREATE_TUNNEL_ENDPOINT; ulpReq.apiInfo.createTunnelEndPointInfo.ipv4Addr = pFlowEndPoint->flowKey.ipv4Addr; ulpReq.apiInfo.createTunnelEndPointInfo.hUlpSession = (NwGtpv1uUlpSessionHandleT)pFlowContext; rc = nwIpv4ProcessUlpReq(thiz->hIpv4Stack, &ulpReq); pFlowEndPoint->hTunnelEndPoint.ipv4 = ulpReq.apiInfo.createTunnelEndPointInfo.hStackSession; } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "IPv4 service does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_GTPU: { NwGtpv1uUlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Creating GTPU tunnel endpoint with teid 0x%x", pFlowEndPoint->flowKey.gtpuTeid); if(thiz->hGtpv1uStack) { ulpReq.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT; ulpReq.apiInfo.createTunnelEndPointInfo.teid = pFlowEndPoint->flowKey.gtpuTeid; ulpReq.apiInfo.createTunnelEndPointInfo.hUlpSession = (NwGtpv1uUlpSessionHandleT)pFlowContext; rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); pFlowEndPoint->hTunnelEndPoint.gtpu = ulpReq.apiInfo.createTunnelEndPointInfo.hStackSession; } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "GTPU service does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_GRE: { NwGreUlpApiT ulpReq; NW_LOG(thiz, NW_LOG_LEVEL_DEBG, "Creating GRE tunnel endpoint with key %d", pFlowEndPoint->flowKey.greKey); if(thiz->hGreStack) { ulpReq.apiType = NW_GRE_ULP_API_CREATE_TUNNEL_ENDPOINT; ulpReq.apiInfo.createTunnelEndPointInfo.greKey = pFlowEndPoint->flowKey.greKey; ulpReq.apiInfo.createTunnelEndPointInfo.hUlpSession = (NwGtpv1uUlpSessionHandleT)pFlowContext; rc = nwGreProcessUlpReq(thiz->hGreStack, &ulpReq); pFlowEndPoint->hTunnelEndPoint.gre = ulpReq.apiInfo.createTunnelEndPointInfo.hStackSession; } else { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "GRE service does not exist on data plane!"); rc = NW_SDP_OK; } } break; case NW_FLOW_TYPE_UDP: { #ifdef NW_SDP_SUPPORT_UDP_FLOW_TYPE /* * Create local udp listening endpoint */ struct sockaddr_in addr; int sd = socket(AF_INET, SOCK_DGRAM, 0); if (sd < 0) { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "%s", strerror(errno)); NW_ASSERT(0); } addr.sin_family = AF_INET; addr.sin_port = htons(pFlowEndPoint->flowKey.udp.port); addr.sin_addr.s_addr = pFlowEndPoint->flowKey.udp.ipv4Addr; memset(addr.sin_zero, '\0', sizeof (addr.sin_zero)); if(bind(sd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "bind - %s", strerror(errno)); } NW_LOG(thiz, NW_LOG_LEVEL_NOTI, "Creating UDP tunnel endpoint with port %d", pFlowEndPoint->flowKey.udp.port); event_set(&(pFlowContext->ev), sd, EV_READ|EV_PERSIST, nwSdpUdpDataIndicationCallback, pFlowContext); event_add(&(pFlowContext->ev), NULL); pFlowEndPoint->hTunnelEndPoint.udp = sd; #else NW_LOG(thiz, NW_LOG_LEVEL_ERRO, "Flow type UDP not supported"); #endif } break; default: { NW_LOG(thiz, NW_LOG_LEVEL_NOTI, "Unsupported encapsulation type %u", pFlowEndPoint->flowType); return NW_SDP_FAILURE; } } return NW_SDP_OK; }
NwGtpv1uRcT nwMiniUlpCreateConn(NwMiniUlpEntityT* thiz, char* localIpStr, NwU16T localport, char* peerIpStr) { NwGtpv1uRcT rc; int sd; struct sockaddr_in addr; NwGtpv1uUlpApiT ulpReq; strcpy(thiz->peerIpStr, peerIpStr); /* * Create local tunnel endpoint */ NW_LOG(NW_LOG_LEVEL_NOTI, "Creating tunnel endpoint with teid %d", localport); ulpReq.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT; ulpReq.apiInfo.createTunnelEndPointInfo.teid = localport; ulpReq.apiInfo.createTunnelEndPointInfo.hUlpSession = (NwGtpv1uUlpSessionHandleT)thiz; rc = nwGtpv1uProcessUlpReq(thiz->hGtpv1uStack, &ulpReq); NW_ASSERT( rc == NW_GTPV1U_OK ); thiz->hGtpv1uConn = ulpReq.apiInfo.createTunnelEndPointInfo.hStackSession; /* * Create local udp listening endpoint */ sd = socket(AF_INET, SOCK_DGRAM, 0); if (sd < 0) { NW_LOG(NW_LOG_LEVEL_ERRO, "%s", strerror(errno)); NW_ASSERT(0); } addr.sin_family = AF_INET; addr.sin_port = htons(localport); addr.sin_addr.s_addr = inet_addr(localIpStr); memset(addr.sin_zero, '\0', sizeof (addr.sin_zero)); if(bind(sd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { NW_LOG(NW_LOG_LEVEL_ERRO, "%s", strerror(errno)); NW_ASSERT(0); } event_set(&(thiz->ev[sd]), sd, EV_READ|EV_PERSIST, nwMiniUlpDataIndicationCallbackData, thiz); event_add(&(thiz->ev[sd]), NULL); thiz->localPort[sd] = localport; /* * Create local udp for sendign data */ sd = socket(AF_INET, SOCK_DGRAM, 0); if (sd < 0) { NW_LOG(NW_LOG_LEVEL_ERRO, "%s", strerror(errno)); NW_ASSERT(0); } thiz->hSocket = sd; return NW_GTPV1U_OK; }