/* Returns dmatrix with pow of each element in m raised to the exponent e. \param m dmatrix \param e floating point exponent */ dmatrix pow(const dmatrix& m, const double e) { ivector cmin(m.rowmin(), m.rowmax()); ivector cmax(m.rowmin(), m.rowmax()); for (int i = m.rowmin(); i <= m.rowmax(); ++i) { cmin(i) = m(i).indexmin(); cmax(i) = m(i).indexmax(); } dmatrix tmp(m.rowmin(), m.rowmax(), cmin, cmax); for (int i = m.rowmin(); i <= m.rowmax(); ++i) { tmp(i) = pow(m(i), e); } return tmp; }
/** Returns dmatrix with each of element in m is squared. \param m dmatrix */ dmatrix sqr(const dmatrix& m) { ivector cmin(m.rowmin(), m.rowmax()); ivector cmax(m.rowmin(), m.rowmax()); for (int i = m.rowmin(); i <= m.rowmax(); ++i) { cmin(i) = m(i).indexmin(); cmax(i) = m(i).indexmax(); } dmatrix tmp(m.rowmin(), m.rowmax(), cmin, cmax); for (int i = m.rowmin(); i <= m.rowmax(); ++i) { tmp(i) = sqr(m(i)); } return tmp; }
/** * Description not yet available. * \param */ dmatrix elem_div(const dmatrix& m, const dmatrix& m2) { ivector cmin(m.rowmin(),m.rowmax()); ivector cmax(m.rowmin(),m.rowmax()); int i; for (i=m.rowmin();i<=m.rowmax();i++) { cmin(i)=m(i).indexmin(); cmax(i)=m(i).indexmax(); } dmatrix tmp(m.rowmin(),m.rowmax(),cmin,cmax); for (i=m.rowmin();i<=m.rowmax();i++) { tmp(i)=elem_div(m(i),m2(i)); } return tmp; }
/** * Description not yet available. * \param */ dmatrix log(const dmatrix& m) { ivector cmin(m.rowmin(),m.rowmax()); ivector cmax(m.rowmin(),m.rowmax()); int i; for (i=m.rowmin();i<=m.rowmax();i++) { cmin(i)=m(i).indexmin(); cmax(i)=m(i).indexmax(); } dmatrix tmp(m.rowmin(),m.rowmax(),cmin,cmax); for (i=m.rowmin();i<=m.rowmax();i++) { tmp(i)=log(m(i)); } return tmp; }
void ulsch_extract_rbs_single(int **rxdataF, int **rxdataF_ext, unsigned int first_rb, unsigned int nb_rb, unsigned char l, unsigned char Ns, LTE_DL_FRAME_PARMS *frame_parms) { unsigned short nb_rb1,nb_rb2; unsigned char aarx; int *rxF,*rxF_ext; //unsigned char symbol = l+Ns*frame_parms->symbols_per_tti/2; unsigned char symbol = l+((7-frame_parms->Ncp)*(Ns&1)); ///symbol within sub-frame for (aarx=0;aarx<frame_parms->nb_antennas_rx;aarx++) { nb_rb1 = cmin(cmax((int)(frame_parms->N_RB_UL) - (int)(2*first_rb),(int)0),(int)(2*nb_rb)); // 2 times no. RBs before the DC nb_rb2 = 2*nb_rb - nb_rb1; // 2 times no. RBs after the DC #ifdef DEBUG_ULSCH msg("ulsch_extract_rbs_single: 2*nb_rb1 = %d, 2*nb_rb2 = %d\n",nb_rb1,nb_rb2); #endif rxF_ext = &rxdataF_ext[aarx][(symbol*frame_parms->N_RB_UL*12)*2]; if (nb_rb1) { rxF = &rxdataF[aarx][(first_rb*12 + frame_parms->first_carrier_offset + symbol*frame_parms->ofdm_symbol_size)*2]; memcpy(rxF_ext, rxF, nb_rb1*12*sizeof(int)); rxF_ext += nb_rb1*12; if (nb_rb2) { #ifdef OFDMA_ULSCH rxF = &rxdataF[aarx][(1 + symbol*frame_parms->ofdm_symbol_size)*2]; #else rxF = &rxdataF[aarx][(symbol*frame_parms->ofdm_symbol_size)*2]; #endif memcpy(rxF_ext, rxF, nb_rb2*12*sizeof(int)); rxF_ext += nb_rb2*12; } } else { //there is only data in the second half #ifdef OFDMA_ULSCH rxF = &rxdataF[aarx][(1 + 6*(2*first_rb - frame_parms->N_RB_UL) + symbol*frame_parms->ofdm_symbol_size)*2]; #else rxF = &rxdataF[aarx][(6*(2*first_rb - frame_parms->N_RB_UL) + symbol*frame_parms->ofdm_symbol_size)*2]; #endif memcpy(rxF_ext, rxF, nb_rb2*12*sizeof(int)); rxF_ext += nb_rb2*12; } } _mm_empty(); _m_empty(); }
std::pair<LL,T>dijkstra(int s,int t,LL FLOW_BOUND){ std::vector<int>used(V,0); std::vector<T>dist(V,COST_INF); std::vector<PII>path(V,MP(-1,-1)); std::priority_queue<std::pair<T,int> >Q; dist[s]=0; Q.push(MP(0,s)); while(!Q.empty()){ int x=Q.top().BB; Q.pop(); if(used[x])continue; used[x]=1; for(int i=0;i<adj[x].SZ;i++)if(adj[x][i].cap>0){ edge e=adj[x][i]; int y=e.to; T d=dist[x]+e.cost+pot[x]-pot[y]; if(d<dist[y]&&!used[y]){ dist[y]=d; path[y]=MP(x,i); Q.push(MP(-d,y)); } } } for(int i=0;i<V;i++) pot[i]+=dist[i]; if(dist[t]==COST_INF) return MP(0,0); LL f=FLOW_BOUND; T sum=0; int x=t; while(x!=s){ int y=path[x].AA; int id=path[x].BB; sum+=adj[y][id].cost; cmin(f,adj[y][id].cap); x=y; } x=t; while(x!=s){ int y=path[x].AA; int id=path[x].BB; adj[y][id].cap-=f; int id2=adj[y][id].rev; adj[x][id2].cap+=f; x=y; } return MP(f,f*sum); }
void assign_max_mcs_min_rb(module_id_t module_idP,int frameP, sub_frame_t subframeP, uint16_t *first_rb){ int i; uint16_t n,UE_id; uint8_t CC_id; rnti_t rnti = -1; int mcs=cmin(16,openair_daq_vars.target_ue_ul_mcs); int rb_table_index=0,tbs,tx_power; UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list; UE_TEMPLATE *UE_template; LTE_DL_FRAME_PARMS *frame_parms; for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) { rnti = UE_RNTI(module_idP,i); if (rnti==0) continue; UE_id = i; for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) { // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][UE_id]; frame_parms=mac_xface->get_lte_frame_parms(module_idP,CC_id); UE_template = &UE_list->UE_template[CC_id][UE_id]; // if this UE has UL traffic if (UE_template->ul_total_buffer > 0 ) { tbs = mac_xface->get_TBS_UL(mcs,1); // fixme: set use_srs flag tx_power= mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); while (((UE_template->phr_info - tx_power) < 0 ) && (mcs > 3)){ // LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs); mcs--; tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]); tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); // fixme: set use_srs } while ((tbs < UE_template->ul_total_buffer) && (rb_table[rb_table_index]<(frame_parms->N_RB_UL-first_rb[CC_id])) && ((UE_template->phr_info - tx_power) > 0) && (rb_table_index < 33 )){ // LOG_I(MAC,"tbs %d ul buffer %d rb table %d max ul rb %d\n", tbs, UE_template->ul_total_buffer, rb_table[rb_table_index], frame_parms->N_RB_UL-first_rb[CC_id]); rb_table_index++; tbs = mac_xface->get_TBS_UL(mcs,rb_table[rb_table_index]); tx_power = mac_xface->estimate_ue_tx_power(tbs,rb_table[rb_table_index],0,frame_parms->Ncp,0); } UE_template->ue_tx_power = tx_power; if (rb_table[rb_table_index]>(frame_parms->N_RB_UL-first_rb[CC_id]-1)) { rb_table_index--; } // 1 or 2 PRB with cqi enabled does not work well! if (rb_table[rb_table_index]<3) rb_table_index=2; //3PRB UE_template->pre_assigned_mcs_ul=mcs; UE_template->pre_allocated_rb_table_index_ul=rb_table_index; UE_template->pre_allocated_nb_rb_ul= rb_table[rb_table_index]; LOG_D(MAC,"[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n", module_idP, frameP, subframeP, UE_id, CC_id, UE_template->pre_assigned_mcs_ul, UE_template->pre_allocated_rb_table_index_ul, UE_template->pre_allocated_nb_rb_ul, UE_template->phr_info,tx_power); } else { UE_template->pre_allocated_rb_table_index_ul=-1; UE_template->pre_allocated_nb_rb_ul=0; } } } }
void ulsch_scheduler_pre_processor(module_id_t module_idP, int frameP, sub_frame_t subframeP, uint16_t *first_rb, uint8_t aggregation, uint32_t *nCCE){ int16_t i; uint16_t UE_id,n,r; uint8_t CC_id, round, harq_pid; uint16_t nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs]; int16_t total_remaining_rbs[MAX_NUM_CCs]; uint16_t max_num_ue_to_be_scheduled=0,total_ue_count=0; rnti_t rnti= -1; uint32_t nCCE_to_be_used[CC_id]; UE_list_t *UE_list = &eNB_mac_inst[module_idP].UE_list; UE_TEMPLATE *UE_template; LTE_DL_FRAME_PARMS *frame_parms; // LOG_I(MAC,"store ulsch buffers\n"); // convert BSR to bytes for comparison with tbs store_ulsch_buffer(module_idP,frameP, subframeP); //LOG_I(MAC,"assign max mcs min rb\n"); // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB assign_max_mcs_min_rb(module_idP,frameP, subframeP, first_rb); //LOG_I(MAC,"sort ue \n"); // sort ues sort_ue_ul (module_idP,frameP, subframeP); // we need to distribute RBs among UEs // step1: reset the vars for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { nCCE_to_be_used[CC_id]= nCCE[CC_id]; total_allocated_rbs[CC_id]=0; total_remaining_rbs[CC_id]=0; average_rbs_per_user[CC_id]=0; for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) { nb_allocated_rbs[CC_id][i]=0; } } //LOG_I(MAC,"step2 \n"); // step 2: calculate the average rb per UE total_ue_count =0; max_num_ue_to_be_scheduled=0; for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) { rnti = UE_RNTI(module_idP,i); if (rnti==0) continue; UE_id = i; for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) { // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][UE_id]; UE_template = &UE_list->UE_template[CC_id][UE_id]; average_rbs_per_user[CC_id]=0; frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id); if (UE_template->pre_allocated_nb_rb_ul > 0) { total_ue_count+=1; } if((mac_xface->get_nCCE_max(module_idP,CC_id) - nCCE_to_be_used[CC_id]) > (1<<aggregation)){ nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation); max_num_ue_to_be_scheduled+=1; } if (total_ue_count == 0) average_rbs_per_user[CC_id] = 0; else if (total_ue_count == 1 ) // increase the available RBs, special case, average_rbs_per_user[CC_id] = frame_parms->N_RB_UL-first_rb[CC_id]+1; else if( (total_ue_count <= (frame_parms->N_RB_DL-first_rb[CC_id])) && (total_ue_count <= max_num_ue_to_be_scheduled)) average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/total_ue_count); else if (max_num_ue_to_be_scheduled > 0 ) average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/max_num_ue_to_be_scheduled); else { average_rbs_per_user[CC_id]=1; LOG_W(MAC,"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n", module_idP,frameP,subframeP,UE_id,CC_id); } } } LOG_D(MAC,"[eNB %d] Frame %d subframe %d: total ue %d, max num ue to be scheduled %d\n", module_idP, frameP, subframeP,total_ue_count, max_num_ue_to_be_scheduled); //LOG_D(MAC,"step3\n"); // step 3: assigne RBS for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) { rnti = UE_RNTI(module_idP,i); if (rnti==0) continue; UE_id = i; for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) { // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][UE_id]; mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,1); if(round>0) nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid]; else nb_allocated_rbs[CC_id][UE_id] = cmin(UE_template->pre_allocated_nb_rb_ul, average_rbs_per_user[CC_id]); total_allocated_rbs[CC_id]+= nb_allocated_rbs[CC_id][UE_id]; } } // step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly for(r=0;r<2;r++){ for (i=UE_list->head_ul;i>=0;i=UE_list->next_ul[i]) { rnti = UE_RNTI(module_idP,i); if (rnti==0) continue; UE_id = i; for (n=0;n<UE_list->numactiveULCCs[UE_id];n++) { // This is the actual CC_id in the list CC_id = UE_list->ordered_ULCCids[n][UE_id]; UE_template = &UE_list->UE_template[CC_id][UE_id]; frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id); total_remaining_rbs[CC_id]=frame_parms->N_RB_UL - first_rb[CC_id] - total_allocated_rbs[CC_id]; if (total_ue_count == 1 ) total_remaining_rbs[CC_id]+=1; if ( r == 0 ) { while ( (UE_template->pre_allocated_nb_rb_ul > 0 ) && (nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul) && (total_remaining_rbs[CC_id] > 0)){ nb_allocated_rbs[CC_id][UE_id] = cmin(nb_allocated_rbs[CC_id][UE_id]+1,UE_template->pre_allocated_nb_rb_ul); total_remaining_rbs[CC_id]--; total_allocated_rbs[CC_id]++; } } else { UE_template->pre_allocated_nb_rb_ul= nb_allocated_rbs[CC_id][UE_id]; LOG_D(MAC,"******************UL Scheduling Information for UE%d CC_id %d ************************\n",UE_id, CC_id); LOG_D(MAC,"[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", module_idP, UE_id, CC_id, UE_template->pre_allocated_nb_rb_ul); } } } } for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { frame_parms= mac_xface->get_lte_frame_parms(module_idP,CC_id); if (total_allocated_rbs[CC_id]>0) LOG_D(MAC,"[eNB %d] total RB allocated for all UEs = %d/%d\n", module_idP, total_allocated_rbs[CC_id], frame_parms->N_RB_UL - first_rb[CC_id]); } }
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done void dlsch_scheduler_pre_processor (module_id_t Mod_id, frame_t frameP, sub_frame_t subframeP, uint8_t dl_pow_off[MAX_NUM_CCs][NUMBER_OF_UE_MAX], uint16_t pre_nb_available_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX], int N_RBG[MAX_NUM_CCs], unsigned char rballoc_sub_UE[MAX_NUM_CCs][NUMBER_OF_UE_MAX][N_RBG_MAX], int *mbsfn_flag){ unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,harq_pid1=0,harq_pid2=0,round=0,round1=0,round2=0,total_ue_count; unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX]; int UE_id, UE_id2, i; uint16_t ii,j; uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX]; uint16_t i1,i2,i3,r1=0; uint16_t average_rbs_per_user[MAX_NUM_CCs]; rnti_t rnti,rnti1,rnti2; LTE_eNB_UE_stats *eNB_UE_stats1 = NULL; LTE_eNB_UE_stats *eNB_UE_stats2 = NULL; int min_rb_unit[MAX_NUM_CCs]; uint8_t CC_id; UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list; LTE_DL_FRAME_PARMS *frame_parms[MAX_NUM_CCs]; int rrc_status = RRC_IDLE; int transmission_mode = 0; for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { if (mbsfn_flag[CC_id]>0) // If this CC is allocated for MBSFN skip it here continue; frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id); min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id); for (i=UE_list->head;i>=0;i=UE_list->next[i]) { UE_id = i; // Initialize scheduling information for all active UEs dlsch_scheduler_pre_processor_reset(UE_id, CC_id, N_RBG[CC_id], dl_pow_off, nb_rbs_required, pre_nb_available_rbs, nb_rbs_required_remaining, rballoc_sub_UE, rballoc_sub, MIMO_mode_indicator); } } // Store the DLSCH buffer for each logical channel store_dlsch_buffer (Mod_id,frameP,subframeP); // Calculate the number of RBs required by each UE on the basis of logical channel's buffer assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit); // Sorts the user on the basis of dlsch logical channel buffer and CQI sort_UEs (Mod_id,frameP,subframeP); total_ue_count =0; // loop over all active UEs for (i=UE_list->head;i>=0;i=UE_list->next[i]) { rnti = UE_RNTI(Mod_id,i); if(rnti == 0) continue; UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; average_rbs_per_user[CC_id]=0; mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0); if(round>0) nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid]; //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id]; if (nb_rbs_required[CC_id][UE_id] > 0) { total_ue_count = total_ue_count + 1; } // hypotetical assignement /* * If schedule is enabled and if the priority of the UEs is modified * The average rbs per logical channel per user will depend on the level of * priority. Concerning the hypothetical assignement, we should assign more * rbs to prioritized users. Maybe, we can do a mapping between the * average rbs per user and the level of priority or multiply the average rbs * per user by a coefficient which represents the degree of priority. */ if (total_ue_count == 0) average_rbs_per_user[CC_id] = 0; else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) ) average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count); else average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; } } // note: nb_rbs_required is assigned according to total_buffer_dl // extend nb_rbs_required to capture per LCID RB required for(i=UE_list->head;i>=0;i=UE_list->next[i]){ for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) { CC_id = UE_list->ordered_CCids[ii][i]; // control channel if (mac_get_rrc_status(Mod_id,1,i) < RRC_RECONFIGURED) nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i]; else nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]); } } //Allocation to UEs is done in 2 rounds, // 1st round: average number of RBs allocated to each UE // 2nd round: remaining RBs are allocated to high priority UEs for(r1=0;r1<2;r1++){ for(i=UE_list->head; i>=0;i=UE_list->next[i]) { for (ii=0;ii<UE_num_active_CC(UE_list,i);ii++) { CC_id = UE_list->ordered_CCids[ii][i]; if(r1 == 0) nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i]; else // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i]; LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n", r1, CC_id, i, nb_rbs_required_remaining[CC_id][i], nb_rbs_required_remaining_1[CC_id][i], nb_rbs_required[CC_id][i], pre_nb_available_rbs[CC_id][i], N_RBG[CC_id], min_rb_unit[CC_id]); } } if (total_ue_count > 0 ) { for(i=UE_list->head; i>=0;i=UE_list->next[i]) { UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; rnti = UE_RNTI(Mod_id,UE_id); // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti ); if(rnti == 0) continue; transmission_mode = mac_xface->get_transmission_mode(Mod_id,CC_id,rnti); mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0); rrc_status = mac_get_rrc_status(Mod_id,1,UE_id); /* 1st allocate for the retx */ // retransmission in data channels // control channel in the 1st transmission // data channel for all TM LOG_D(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n "); dlsch_scheduler_pre_processor_allocate (Mod_id, UE_id, CC_id, N_RBG[CC_id], transmission_mode, min_rb_unit[CC_id], frame_parms[CC_id]->N_RB_DL, dl_pow_off, nb_rbs_required, pre_nb_available_rbs, nb_rbs_required_remaining, rballoc_sub_UE, rballoc_sub, MIMO_mode_indicator); #ifdef TM5 // data chanel TM5: to be revisted if ((round == 0 ) && (transmission_mode == 5) && (dl_pow_off[CC_id][UE_id] != 1)){ for(j=0;j<N_RBG[CC_id];j+=2) { if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (rballoc_sub_UE[CC_id][UE_id][j] == 0)) || ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (rballoc_sub_UE[CC_id][UE_id][j+1] == 0)) ) && (nb_rbs_required_remaining[CC_id][UE_id]>0)){ for (ii = UE_list->next[i+1];ii >=0;ii=UE_list->next[ii]) { UE_id2 = ii; rnti2 = UE_RNTI(Mod_id,UE_id2); if(rnti2 == 0) continue; eNB_UE_stats2 = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti2); mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0); if ((mac_get_rrc_status(Mod_id,1,UE_id2) >= RRC_RECONFIGURED) && (round2==0) && (mac_xface->get_transmission_mode(Mod_id,CC_id,rnti2)==5) && (dl_pow_off[CC_id][UE_id2] != 1)) { if( (((j == (N_RBG[CC_id]-1)) && (rballoc_sub_UE[CC_id][UE_id2][j] == 0)) || ((j < (N_RBG[CC_id]-1)) && (rballoc_sub_UE[CC_id][UE_id2][j+1] == 0)) ) && (nb_rbs_required_remaining[CC_id][UE_id2]>0)){ if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000){ //MU-MIMO only for 25 RBs configuration rballoc_sub[CC_id][j] = 1; rballoc_sub_UE[CC_id][UE_id][j] = 1; rballoc_sub_UE[CC_id][UE_id2][j] = 1; MIMO_mode_indicator[CC_id][j] = 0; if (j< N_RBG[CC_id]-1) { rballoc_sub[CC_id][j+1] = 1; rballoc_sub_UE[CC_id][UE_id][j+1] = 1; rballoc_sub_UE[CC_id][UE_id2][j+1] = 1; MIMO_mode_indicator[CC_id][j+1] = 0; } dl_pow_off[CC_id][UE_id] = 0; dl_pow_off[CC_id][UE_id2] = 0; if ((j == N_RBG[CC_id]-1) && ((PHY_vars_eNB_g[Mod_id][CC_id]->lte_frame_parms.N_RB_DL == 25) || (PHY_vars_eNB_g[Mod_id][CC_id]->lte_frame_parms.N_RB_DL == 50))){ nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1; pre_nb_available_rbs[CC_id][UE_id] = pre_nb_available_rbs[CC_id][UE_id] + min_rb_unit[CC_id]-1; nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1; pre_nb_available_rbs[CC_id][UE_id2] = pre_nb_available_rbs[CC_id][UE_id2] + min_rb_unit[CC_id]-1; } else { nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4; pre_nb_available_rbs[CC_id][UE_id] = pre_nb_available_rbs[CC_id][UE_id] + 4; nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4; pre_nb_available_rbs[CC_id][UE_id2] = pre_nb_available_rbs[CC_id][UE_id2] + 4; } break; } } } } } } } #endif } } } // total_ue_count } // end of for for r1 and r2 #ifdef TM5 // This has to be revisited!!!! for (CC_id=0;CC_id<MAX_NUM_CCs;CC_id++) { i1=0; i2=0; i3=0; for (j=0;j<N_RBG[CC_id];j++){ if(MIMO_mode_indicator[CC_id][j] == 2) i1 = i1+1; else if(MIMO_mode_indicator[CC_id][j] == 1) i2 = i2+1; else if(MIMO_mode_indicator[CC_id][j] == 0) i3 = i3+1; } if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1; if(i3 == N_RBG[CC_id] && i1==0 && i2==0) PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1; if((i1 < N_RBG[CC_id]) && (i3 > 0)) PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1; PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1; } #endif for(i=UE_list->head; i>=0;i=UE_list->next[i]) { UE_id = i; for (ii=0;ii<UE_num_active_CC(UE_list,UE_id);ii++) { CC_id = UE_list->ordered_CCids[ii][UE_id]; //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id]; LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id); LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,dl_pow_off[CC_id][UE_id]); LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id); for(j=0;j<N_RBG[CC_id];j++){ //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i]; LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,rballoc_sub_UE[CC_id][UE_id][j]); } //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id]; LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,pre_nb_available_rbs[CC_id][UE_id]); } } }
// This function returns the estimated number of RBs required by each UE for downlink scheduling void assign_rbs_required (module_id_t Mod_id, frame_t frameP, sub_frame_t subframe, uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX], int min_rb_unit[MAX_NUM_CCs]){ rnti_t rnti; uint16_t TBS = 0; LTE_eNB_UE_stats *eNB_UE_stats[MAX_NUM_CCs]; int UE_id,n,i,j,CC_id,pCCid,tmp; UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list; UE_TEMPLATE *UE_template; LTE_DL_FRAME_PARMS *frame_parms[MAX_NUM_CCs]; // clear rb allocations across all CC_ids for (UE_id=UE_list->head;UE_id>=0;UE_id=UE_list->next[UE_id]){ pCCid = UE_PCCID(Mod_id,UE_id); rnti = UE_list->UE_template[pCCid][UE_id].rnti; //update CQI information across component carriers for (n=0;n<UE_list->numactiveCCs[UE_id];n++) { CC_id = UE_list->ordered_CCids[n][UE_id]; frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id); eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti); /* DevCheck(((eNB_UE_stats[CC_id]->DL_cqi[0] < MIN_CQI_VALUE) || (eNB_UE_stats[CC_id]->DL_cqi[0] > MAX_CQI_VALUE)), eNB_UE_stats[CC_id]->DL_cqi[0], MIN_CQI_VALUE, MAX_CQI_VALUE); */ eNB_UE_stats[CC_id]->dlsch_mcs1=cqi_to_mcs[eNB_UE_stats[CC_id]->DL_cqi[0]]; eNB_UE_stats[CC_id]->dlsch_mcs1 = cmin(eNB_UE_stats[CC_id]->dlsch_mcs1,openair_daq_vars.target_ue_dl_mcs); } // provide the list of CCs sorted according to MCS for (i=0;i<UE_list->numactiveCCs[UE_id];i++) { for (j=i+1;j<UE_list->numactiveCCs[UE_id];j++) { if (eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]]->dlsch_mcs1 > eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]]->dlsch_mcs1) { tmp = UE_list->ordered_CCids[i][UE_id]; UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id]; UE_list->ordered_CCids[j][UE_id] = tmp; } } } /* if ((mac_get_rrc_status(Mod_id,1,UE_id) < RRC_RECONFIGURED)){ // If we still don't have a default radio bearer nb_rbs_required[pCCid][UE_id] = PHY_vars_eNB_g[Mod_id][pCCid]->lte_frame_parms.N_RB_DL; continue; } */ /* NN --> RK * check the index of UE_template" */ // if (UE_list->UE_template[UE_id]->dl_buffer_total> 0) { if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) { LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id); for (i=0;i<UE_list->numactiveCCs[UE_id];i++) { CC_id = UE_list->ordered_CCids[i][UE_id]; if (eNB_UE_stats[CC_id]->dlsch_mcs1==0) nb_rbs_required[CC_id][UE_id] = 4; // don't let the TBS get too small else nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id]; TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]); LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n", UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total, nb_rbs_required[CC_id][UE_id],eNB_UE_stats[CC_id]->dlsch_mcs1,TBS); /* calculating required number of RBs for each UE */ while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total) { nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id]; if (nb_rbs_required[CC_id][UE_id] > frame_parms[CC_id]->N_RB_DL) { TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,frame_parms[CC_id]->N_RB_DL); nb_rbs_required[CC_id][UE_id] = frame_parms[CC_id]->N_RB_DL; break; } TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]); } // end of while LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d, nb_required RB %d (TBS %d, mcs %d)\n", Mod_id, frameP,UE_id, CC_id, min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats[CC_id]->dlsch_mcs1); } } } }
void CellConservativeLinear::interp (const FArrayBox& crse, int crse_comp, FArrayBox& fine, int fine_comp, int ncomp, const Box& fine_region, const IntVect& ratio, const Geometry& crse_geom, const Geometry& fine_geom, Array<BCRec>& bcr, int actual_comp, int actual_state) { BL_PROFILE("CellConservativeLinear::interp()"); BL_ASSERT(bcr.size() >= ncomp); // // Make box which is intersection of fine_region and domain of fine. // Box target_fine_region = fine_region & fine.box(); // // crse_bx is coarsening of target_fine_region, grown by 1. // Box crse_bx = CoarseBox(target_fine_region,ratio); // // Slopes are needed only on coarsening of target_fine_region. // Box cslope_bx(crse_bx); cslope_bx.grow(-1); // // Make a refinement of cslope_bx // Box fine_version_of_cslope_bx = BoxLib::refine(cslope_bx,ratio); // // Get coarse and fine edge-centered volume coordinates. // Array<Real> fvc[BL_SPACEDIM]; Array<Real> cvc[BL_SPACEDIM]; int dir; for (dir = 0; dir < BL_SPACEDIM; dir++) { fine_geom.GetEdgeVolCoord(fvc[dir],fine_version_of_cslope_bx,dir); crse_geom.GetEdgeVolCoord(cvc[dir],crse_bx,dir); } // // alloc tmp space for slope calc. // // In ucc_slopes and lcc_slopes , there is a slight abuse of // the number of compenents argument // --> there is a slope for each component in each coordinate // direction // FArrayBox ucc_slopes(cslope_bx,ncomp*BL_SPACEDIM); FArrayBox lcc_slopes(cslope_bx,ncomp*BL_SPACEDIM); FArrayBox slope_factors(cslope_bx,BL_SPACEDIM); FArrayBox cmax(cslope_bx,ncomp); FArrayBox cmin(cslope_bx,ncomp); FArrayBox alpha(cslope_bx,ncomp); Real* fdat = fine.dataPtr(fine_comp); const Real* cdat = crse.dataPtr(crse_comp); Real* ucc_xsldat = ucc_slopes.dataPtr(0); Real* lcc_xsldat = lcc_slopes.dataPtr(0); Real* xslfac_dat = slope_factors.dataPtr(0); #if (BL_SPACEDIM>=2) Real* ucc_ysldat = ucc_slopes.dataPtr(ncomp); Real* lcc_ysldat = lcc_slopes.dataPtr(ncomp); Real* yslfac_dat = slope_factors.dataPtr(1); #endif #if (BL_SPACEDIM==3) Real* ucc_zsldat = ucc_slopes.dataPtr(2*ncomp); Real* lcc_zsldat = lcc_slopes.dataPtr(2*ncomp); Real* zslfac_dat = slope_factors.dataPtr(2); #endif const int* flo = fine.loVect(); const int* fhi = fine.hiVect(); const int* clo = crse.loVect(); const int* chi = crse.hiVect(); const int* fblo = target_fine_region.loVect(); const int* fbhi = target_fine_region.hiVect(); const int* csbhi = cslope_bx.hiVect(); const int* csblo = cslope_bx.loVect(); int lin_limit = (do_linear_limiting ? 1 : 0); const int* cvcblo = crse_bx.loVect(); const int* fvcblo = fine_version_of_cslope_bx.loVect(); int slope_flag = 1; int cvcbhi[BL_SPACEDIM]; int fvcbhi[BL_SPACEDIM]; for (dir=0; dir<BL_SPACEDIM; dir++) { cvcbhi[dir] = cvcblo[dir] + cvc[dir].size() - 1; fvcbhi[dir] = fvcblo[dir] + fvc[dir].size() - 1; } D_TERM(Real* voffx = new Real[fvc[0].size()]; ,
int main ( int argc , char** argv ) { int mtrx [ RW ][ CL ] , i , j , m , i1 , j1 , s , c ; int rslt [ CL ] ; int* rsli , * rslj , rsi , rsj ; rsi = rsj = 0 ; // copy matrix for ( i = 0 ; i < RW ; i++ ) for ( j = 0 ; j < CL ; j++ ) mtrx [ i ][ j ] = matrix [ i ][ j ] ; // print init matrix printf ( "Init\n" ) ; pmtr ( mtrx , RW , CL ) ; printf ( "\n\n" ) ; // Step 1. Find minimal elem in row and // vichitaem from all elements of this row l5: s = 1 ; for ( i = 0 ; i < RW ; i++ ) { m = rmin ( mtrx , i , CL ) ; for ( j = 0 ; j < CL ; j++ ) mtrx [ i ][ j ] -= m ; } #if ( DBG == 1 ) // print matrix printf ( "Step 1\n" ) ; pmtr ( mtrx , RW , CL ) ; printf ( "\n\n" ) ; #endif l0: for ( j = 0 ; j < CL ; j++ ) rslt [ j ] = -1 ; // build result array for ( j = 0 ; j < CL ; j++ ) { for ( i = 0 ; i < RW ; i++ ) { if ( ( mtrx [ i ][ j ] == 0 ) && ( rslt [ j ] == -1 ) && ( inar ( rslt , i , CL ) == -1 ) ) { j1 = inrw ( mtrx , i , 0 , j , CL ) ; if ( j1 == -1 ) { rslt [ j ] = i ; break ; } if ( incl ( mtrx , j , 0 , i , RW ) == -1 ) { rslt [ j ] = i ; break; } l1: if ( incl ( mtrx , j1 , 0 , i , RW ) == -1 ) { rslt [ j1 ] = i ; break; } j1 = inrw ( mtrx , i , 0 , j1 , CL ) ; if ( j1 == -1 ) { rslt [ j ] = i ; break; } goto l1 ; } } } // check if task solved j1 = inar ( rslt , -1 , CL ) ; if ( j1 == -1 ) goto l99 ; else if ( s == 2 ) goto l2 ; // Step 2. Use step for columns. s++ ; for ( j = j1 ; j < CL ; j++ ) { if ( rslt [ j ] != -1 ) continue ; m = cmin ( mtrx , j , RW ) ; for ( i = 0 ; i < RW ; i++ ) mtrx [ i ][ j ] -= m ; } #if ( DBG == 1 ) printf ( "Step 2\n" ) ; pmtr ( mtrx , RW , CL ) ; printf ( "\n\n" ) ; #endif goto l0; l2: // Step 3. Find unused rows c = 0 ; for ( i = 0 ; i < RW ; i++ ) if ( inar ( rslt , i , CL ) == -1 ) { rsi++ ; c++ ; if ( ( rsi - 1 ) == 0 ) { rsli = ( int* ) malloc ( rsi * sizeof ( *rsli ) ) ; *rsli = i ; } else { rsli = realloc ( rsli , rsi * sizeof ( *rsli ) ) ; *( rsli + ( rsi - 1 ) ) = i ; } } if ( c == 0 ) goto l4 ; l3: // in all unused rows find columns' numbers with zero c = 0 ; for ( i = 0 ; i < rsi ; i++ ) { j1 = inrw ( mtrx , *( rsli + i ) , 0 , -1 , CL ) ; while ( j1 != -1 ) { if ( !inpn ( rslj , j1 , rsj ) ) { rsj++ ; c++ ; if ( ( rsj - 1 ) == 0 ) { rslj = ( int* ) malloc ( rsj * sizeof ( *rslj ) ) ; *rslj = j1 ; } else { rslj = realloc ( rslj , rsj * sizeof ( *rslj ) ) ; *( rslj + ( rsj - 1 ) ) = j1 ; } } j1 = inrw ( mtrx , *( rsli + i ) , 0 , j1 , CL ) ; } } if ( c == 0 ) goto l4 ; // in all unused columns find rows' numbers with zero c = 0 ; for ( j = 0 ; j < rsj ; j++ ) { i1 = incl ( mtrx , *( rslj + j ) , 0 , -1 , RW ) ; while ( i1 != -1 ) { if ( !inpn ( rsli , i1 , rsi ) ) { rsi++ ; c++ ; rsli = realloc ( rsli , rsi * sizeof ( *rsli ) ) ; *( rsli + ( rsi - 1 ) ) = i1 ; } i1 = incl ( mtrx , *( rslj + j ) , 0 , i1 , RW ) ; } } if ( c ) goto l3 ; l4: // Step 4. Vicherkivaem otmechennie stolbci // i neotmechennie stroki // for i == rsli , j != rslj m = -1 ; for ( i = 0 ; i < RW ; i++ ) { if ( !inpn ( rsli , i , rsi ) ) continue ; for ( j = 0 ; j < CL ; j++ ) { if ( inpn ( rslj , j , rsj ) ) continue ; m = ( mtrx [ i ][ j ] < m || m < 0 ) ? mtrx [ i ][ j ] : m ; } } for ( i = 0 ; i < RW ; i++ ) { for ( j = 0 ; j < CL ; j++ ) { if ( inpn ( rsli , i , rsi ) && !inpn ( rslj , j , rsj ) ) mtrx [ i ][ j ] -= m ; if ( !inpn ( rsli , i , rsi ) && inpn ( rslj , j , rsj ) ) mtrx [ i ][ j ] += m ; } } free ( rsli ) ; free ( rslj ) ; rsi = rsj = 0 ; #if ( DBG == 1 ) printf ( "Step 4\n" ) ; pmtr ( mtrx , RW , CL ) ; printf ( "\n\n" ) ; #endif goto l5 ; l99: for ( j = 0 ; j < CL ; j++ ) printf ( "For task: %d use worker:%d\n" , j , rslt [ j ] ) ; printf ( "\n" ) ; return 0 ; }