// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done void dlsch_scheduler_pre_processor (module_id_t Mod_id, frame_t frameP, sub_frame_t subframeP, uint8_t dl_pow_off[MAX_NUM_CCs][NUMBER_OF_UE_MAX], uint16_t pre_nb_available_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX], int N_RBG[MAX_NUM_CCs], unsigned char rballoc_sub_UE[MAX_NUM_CCs][NUMBER_OF_UE_MAX][N_RBG_MAX], int *mbsfn_flag){ unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,harq_pid1=0,harq_pid2=0,round=0,round1=0,round2=0,total_ue_count; unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]; int UE_id, UE_id2, i; uint16_t ii,j; uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t i1,i2,i3,r1=0; uint16_t average_rbs_per_user[MAX_NUM_CCs]; rnti_t rnti,rnti1,rnti2; LTE_eNB_UE_stats *eNB_UE_stats1 = NULL; LTE_eNB_UE_stats *eNB_UE_stats2 = NULL; int min_rb_unit[MAX_NUM_CCs]; uint8_t CC_id; UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list; LTE_DL_FRAME_PARMS *frame_parms[MAX_NUM_CCs]; int rrc_status = RRC_IDLE; int transmission_mode = 0; for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { if (mbsfn_flag[CC_id]>0) // If this CC is allocated for MBSFN skip it here continue; frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id); min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id); for (i=UE_list->head;i>=0;i=UE_list->next[i]) { UE_id = i; // Initialize scheduling information for all active UEs dlsch_scheduler_pre_processor_reset(UE_id, CC_id, N_RBG[CC_id], dl_pow_off, nb_rbs_required, pre_nb_available_rbs, nb_rbs_required_remaining, rballoc_sub_UE, rballoc_sub, MIMO_mode_indicator); } } // Store the DLSCH buffer for each logical channel store_dlsch_buffer (Mod_id,frameP,subframeP); // Calculate the number of RBs required by each UE on the basis of logical channel's buffer assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit); // Sorts the user on the basis of dlsch logical channel buffer and CQI sort_UEs (Mod_id,frameP,subframeP); total_ue_count =0; // loop over all active UEs for (i=UE_list->head;i>=0;i=UE_list->next[i]) { rnti = UE_RNTI(Mod_id,i); if(rnti == 0) continue; UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; average_rbs_per_user[CC_id]=0; mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0); if(round>0) nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id]; if (nb_rbs_required[CC_id][UE_id] > 0) { total_ue_count = total_ue_count + 1; } // hypotetical assignement /* * If schedule is enabled and if the priority of the UEs is modified * The average rbs per logical channel per user will depend on the level of * priority. Concerning the hypothetical assignement, we should assign more * rbs to prioritized users. Maybe, we can do a mapping between the * average rbs per user and the level of priority or multiply the average rbs * per user by a coefficient which represents the degree of priority. */ if (total_ue_count == 0) average_rbs_per_user[CC_id] = 0; else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) ) average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count); else average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; } } // note: nb_rbs_required is assigned according to total_buffer_dl // extend nb_rbs_required to capture per LCID RB required for(i=UE_list->head;i>=0;i=UE_list->next[i]){ for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) { CC_id = UE_list->ordered_CCids[ii][i]; // control channel if (mac_get_rrc_status(Mod_id,1,i) < RRC_RECONFIGURED) nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i]; else nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]); } } //Allocation to UEs is done in 2 rounds, // 1st round: average number of RBs allocated to each UE // 2nd round: remaining RBs are allocated to high priority UEs for(r1=0;r1<2;r1++){ for(i=UE_list->head; i>=0;i=UE_list->next[i]) { for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) { CC_id = UE_list->ordered_CCids[ii][i]; if(r1 == 0) nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i]; else // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i]; LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n", r1, CC_id, i, nb_rbs_required_remaining[CC_id][i], nb_rbs_required_remaining_1[CC_id][i], nb_rbs_required[CC_id][i], pre_nb_available_rbs[CC_id][i], N_RBG[CC_id], min_rb_unit[CC_id]); } } if (total_ue_count > 0 ) { for(i=UE_list->head; i>=0;i=UE_list->next[i]) { UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; rnti = UE_RNTI(Mod_id,UE_id); // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti ); if(rnti == 0) continue; transmission_mode = mac_xface->get_transmission_mode(Mod_id,CC_id,rnti); mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0); rrc_status = mac_get_rrc_status(Mod_id,1,UE_id); /* 1st allocate for the retx */ // retransmission in data channels // control channel in the 1st transmission // data channel for all TM LOG_D(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n "); dlsch_scheduler_pre_processor_allocate (Mod_id, UE_id, CC_id, N_RBG[CC_id], transmission_mode, min_rb_unit[CC_id], frame_parms[CC_id]->N_RB_DL, dl_pow_off, nb_rbs_required, pre_nb_available_rbs, nb_rbs_required_remaining, rballoc_sub_UE, rballoc_sub, MIMO_mode_indicator); #ifdef TM5 // data chanel TM5: to be revisted if ((round == 0 ) && (transmission_mode == 5) && (dl_pow_off[CC_id][UE_id] != 1)){ for(j=0;j<N_RBG[CC_id];j+=2) { if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (rballoc_sub_UE[CC_id][UE_id][j] == 0)) || ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (rballoc_sub_UE[CC_id][UE_id][j+1] == 0)) ) && (nb_rbs_required_remaining[CC_id][UE_id]>0)){ for (ii = UE_list->next[i+1];ii >=0;ii=UE_list->next[ii]) { UE_id2 = ii; rnti2 = UE_RNTI(Mod_id,UE_id2); if(rnti2 == 0) continue; eNB_UE_stats2 = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti2); mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0); if ((mac_get_rrc_status(Mod_id,1,UE_id2) >= RRC_RECONFIGURED) && (round2==0) && (mac_xface->get_transmission_mode(Mod_id,CC_id,rnti2)==5) && (dl_pow_off[CC_id][UE_id2] != 1)) { if( (((j == (N_RBG[CC_id]-1)) && (rballoc_sub_UE[CC_id][UE_id2][j] == 0)) || ((j < (N_RBG[CC_id]-1)) && (rballoc_sub_UE[CC_id][UE_id2][j+1] == 0)) ) && (nb_rbs_required_remaining[CC_id][UE_id2]>0)){ if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000){ //MU-MIMO only for 25 RBs configuration rballoc_sub[CC_id][j] = 1; rballoc_sub_UE[CC_id][UE_id][j] = 1; rballoc_sub_UE[CC_id][UE_id2][j] = 1; MIMO_mode_indicator[CC_id][j] = 0; if (j< N_RBG[CC_id]-1) { rballoc_sub[CC_id][j+1] = 1; rballoc_sub_UE[CC_id][UE_id][j+1] = 1; rballoc_sub_UE[CC_id][UE_id2][j+1] = 1; MIMO_mode_indicator[CC_id][j+1] = 0; } dl_pow_off[CC_id][UE_id] = 0; dl_pow_off[CC_id][UE_id2] = 0; if ((j == N_RBG[CC_id]-1) && ((PHY_vars_eNB_g[Mod_id][CC_id]->lte_frame_parms.N_RB_DL == 25) || (PHY_vars_eNB_g[Mod_id][CC_id]->lte_frame_parms.N_RB_DL == 50))){ nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1; pre_nb_available_rbs[CC_id][UE_id] = pre_nb_available_rbs[CC_id][UE_id] + min_rb_unit[CC_id]-1; nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1; pre_nb_available_rbs[CC_id][UE_id2] = pre_nb_available_rbs[CC_id][UE_id2] + min_rb_unit[CC_id]-1; } else { nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4; pre_nb_available_rbs[CC_id][UE_id] = pre_nb_available_rbs[CC_id][UE_id] + 4; nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4; pre_nb_available_rbs[CC_id][UE_id2] = pre_nb_available_rbs[CC_id][UE_id2] + 4; } break; } } } } } } } #endif } } } // total_ue_count } // end of for for r1 and r2 #ifdef TM5 // This has to be revisited!!!! for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { i1=0; i2=0; i3=0; for (j=0;j<N_RBG[CC_id];j++){ if(MIMO_mode_indicator[CC_id][j] == 2) i1 = i1+1; else if(MIMO_mode_indicator[CC_id][j] == 1) i2 = i2+1; else if(MIMO_mode_indicator[CC_id][j] == 0) i3 = i3+1; } if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1; if(i3 == N_RBG[CC_id] && i1==0 && i2==0) PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1; if((i1 < N_RBG[CC_id]) && (i3 > 0)) PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1; PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1; } #endif for(i=UE_list->head; i>=0;i=UE_list->next[i]) { UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id]; LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id); LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,dl_pow_off[CC_id][UE_id]); LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id); for(j=0;j<N_RBG[CC_id];j++){ //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i]; LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,rballoc_sub_UE[CC_id][UE_id][j]); } //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id]; LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,pre_nb_available_rbs[CC_id][UE_id]); } } }
int openair2_stats_read(char *buffer, char **my_buffer, off_t off, int length) #endif { int len = 0,fg,Overhead, Sign; unsigned int i,j,k,kk; unsigned int ue_id, eNB_id; unsigned int Mod_id = 0,CH_index; unsigned int stat_tx_pdcp_sdu; unsigned int stat_tx_pdcp_bytes; unsigned int stat_tx_pdcp_sdu_discarded; unsigned int stat_tx_pdcp_bytes_discarded; unsigned int stat_tx_data_pdu; unsigned int stat_tx_data_bytes; unsigned int stat_tx_retransmit_pdu_by_status; unsigned int stat_tx_retransmit_bytes_by_status; unsigned int stat_tx_retransmit_pdu; unsigned int stat_tx_retransmit_bytes; unsigned int stat_tx_control_pdu; unsigned int stat_tx_control_bytes; unsigned int stat_rx_pdcp_sdu; unsigned int stat_rx_pdcp_bytes; unsigned int stat_rx_data_pdus_duplicate; unsigned int stat_rx_data_bytes_duplicate; unsigned int stat_rx_data_pdu; unsigned int stat_rx_data_bytes; unsigned int stat_rx_data_pdu_dropped; unsigned int stat_rx_data_bytes_dropped; unsigned int stat_rx_data_pdu_out_of_window; unsigned int stat_rx_data_bytes_out_of_window; unsigned int stat_rx_control_pdu; unsigned int stat_rx_control_bytes; unsigned int stat_timer_reordering_timed_out; unsigned int stat_timer_poll_retransmit_timed_out; unsigned int stat_timer_status_prohibit_timed_out; // UE part for (ue_id=0; ue_id<NUM_UE_INST; ue_id++) { // mod_id used for PDCP and RLC Mod_id = NB_eNB_INST + ue_id ; len+=sprintf(&buffer[len],"UE TTI: %d\n",UE_mac_inst[ue_id].frame); for (enb_id= 0; enb_id <NB_SIG_CNX_UE; enb_id++) { switch (mac_get_rrc_status(ue_id,0,enb_id) > RRC_CONNECTED) { case RRC_RECONFIGURED : case RRC_CONNECTED: case RRC_SI_RECEIVED: case RRC_IDLE: break; if (mac_get_rrc_status(ue_id,0,enb_id) > RRC_CONNECTED) { // if (UE_mac_inst[ue_id].Dcch_lchan[CH_index].Active==1) { len+=sprintf(&buffer[len],"eNB %d: Wideband SINR %d dB---\n", CH_index,UE_mac_inst[Mod_id].Def_meas[CH_index].Wideband_sinr); len+=sprintf(&buffer[len],"CH %d: Subband SINR (dB) :", CH_index); for (fg=0; fg<NUMBER_OF_MEASUREMENT_SUBBANDS; fg++) { len+=sprintf(&buffer[len],"%d ",UE_mac_inst[Mod_id].Def_meas[CH_index].Sinr_meas[0][fg]); } len+=sprintf(&buffer[len],"\n"); len+=sprintf(&buffer[len],"BCCH %d, NB_RX_MAC = %d (%d errors)\n", UE_mac_inst[Mod_id].Bcch_lchan[CH_index].Lchan_info.Lchan_id.Index, UE_mac_inst[Mod_id].Bcch_lchan[CH_index].Lchan_info.NB_RX, UE_mac_inst[Mod_id].Bcch_lchan[CH_index].Lchan_info.NB_RX_ERRORS); len+=sprintf(&buffer[len],"CCCH %d, NB_RX_MAC = %d (%d errors)\n", UE_mac_inst[Mod_id].Ccch_lchan[CH_index].Lchan_info.Lchan_id.Index, UE_mac_inst[Mod_id].Ccch_lchan[CH_index].Lchan_info.NB_RX, UE_mac_inst[Mod_id].Ccch_lchan[CH_index].Lchan_info.NB_RX_ERRORS); len+=sprintf(&buffer[len],"LCHAN %d (DCCH), NB_TX_MAC = %d (%d bits/TTI, %d kbits/sec), NB_RX_MAC = %d (%d errors)\n", UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.Lchan_id.Index, UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.NB_TX, UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.output_rate, (10*UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.output_rate)>>5, UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.NB_RX, UE_mac_inst[Mod_id].Dcch_lchan[CH_index].Lchan_info.NB_RX_ERRORS); for(i=1; i<NB_RAB_MAX; i++) { if (UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Active==1) { Overhead=UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.output_rate - Pdcp_stats_tx_rate[k][CH_index][i]; if(Overhead<0) { Overhead=-Overhead; Sign=-1; } else { Sign=1; } len+=sprintf(&buffer[len],"[PDCP]LCHAN %d: NB_TX = %d ,Tx_rate =(%d bits/TTI ,%d Kbits/s), NB_RX = %d ,Rx_rate =(%d bits/TTI ,%d Kbits/s) , LAYER2 TX OVERHEAD: %d Kbits/s\n", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, Pdcp_stats_tx[k][CH_index][i], Pdcp_stats_tx_rate[k][CH_index][i], (10*Pdcp_stats_tx_rate[k][CH_index][i])>>5, Pdcp_stats_rx[k][CH_index][i], Pdcp_stats_rx_rate[k][CH_index][i], (10*Pdcp_stats_rx_rate[k][CH_index][i])>>5, Sign*(10*Overhead)>>5); int status = rlc_stat_req (k, UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, &stat_tx_pdcp_sdu, &stat_tx_pdcp_bytes, &stat_tx_pdcp_sdu_discarded, &stat_tx_pdcp_bytes_discarded, &stat_tx_data_pdu, &stat_tx_data_bytes, &stat_tx_retransmit_pdu_by_status, &stat_tx_retransmit_bytes_by_status, &stat_tx_retransmit_pdu, &stat_tx_retransmit_bytes, &stat_tx_control_pdu, &stat_tx_control_bytes, &stat_rx_pdcp_sdu, &stat_rx_pdcp_bytes, &stat_rx_data_pdus_duplicate, &stat_rx_data_bytes_duplicate, &stat_rx_data_pdu, &stat_rx_data_bytes, &stat_rx_data_pdu_dropped, &stat_rx_data_bytes_dropped, &stat_rx_data_pdu_out_of_window, &stat_rx_data_bytes_out_of_window, &stat_rx_control_pdu, &stat_rx_control_bytes, &stat_timer_reordering_timed_out, &stat_timer_poll_retransmit_timed_out, &stat_timer_status_prohibit_timed_out) ; if (status == RLC_OP_STATUS_OK) { len+=sprintf(&buffer[len],"RLC LCHAN %d, NB_SDU_TO_TX = %d\tNB_SDU_DISC %d\tNB_RX_SDU %d\n", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, tx_pdcp_sdu, tx_pdcp_sdu_discarded, rx_sdu); len+=sprintf(&buffer[len],"RLC LCHAN %d, NB_TB_TX_DATA = %d\tNB_TB_TX_CONTROL %d\tNB_TX_TB_RETRANS %d", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, tx_data_pdu, tx_control_pdu, tx_retransmit_pdu); len+=sprintf(&buffer[len],"\tRLC LCHAN %d, NB_TX_TB_RETRANS_BY_STATUS = %d\tNB_TX_TB_RETRANS_PADD %d\n", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, tx_retransmit_pdu_by_status, tx_retransmit_pdu_unblock); len+=sprintf(&buffer[len],"RLC LCHAN %d, NB_RX_DATA = %d\tNB_RX_TB_OUT_WIN %d\tNB_RX_TB_CORRUPT %d\n", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, rx_data_pdu, rx_data_pdu_out_of_window, rx_error_pdu); } len+=sprintf(&buffer[len],"[MAC]: LCHAN %d, NB_TX_MAC = %d (%d bits/TTI, %d kbits/s), NB_RX_MAC = %d (%d errors)\n", UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.Lchan_id.Index, UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_TX, UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.output_rate, (10*UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.output_rate)>>5, UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_RX, UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_RX_ERRORS); len+=sprintf(&buffer[len]," TX per TB: "); for(kk=0; kk<MAX_NUMBER_TB_PER_LCHAN/2; kk++) { len+=sprintf(&buffer[len],"%d . ",UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_TX_TB[kk]); } len+=sprintf(&buffer[len],"\n"); len+=sprintf(&buffer[len]," RXerr per TB: "); for(kk=0; kk<MAX_NUMBER_TB_PER_LCHAN/2; kk++) len+=sprintf(&buffer[len],"%d/%d . ",UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_RX_ERRORS_TB[kk], UE_mac_inst[Mod_id].Dtch_lchan[i][CH_index].Lchan_info.NB_RX_TB[kk]); len+=sprintf(&buffer[len],"\n"); } } } } #endif //PHY_EMUL_ONE_MACHINE }