Scheduler fix (#200)

* scheduler still not working with RGBs. The reservation of RGBs is not sufficient for the RAR allocation.

* now in the scheduler, we allocate space not only for pending data but also for headers and CE

* cant connect. going to check if it is an issue of the next branch

* cleaned up the interface

* removed obsolete functioN

* minor fix

* function name change
This commit is contained in:
Francisco Paisana 2018-05-21 15:40:11 +01:00 committed by Ismael Gomez
parent a87e7b3e43
commit c689343d81
5 changed files with 86 additions and 58 deletions

View File

@ -34,7 +34,7 @@ namespace srsenb {
class dl_metric_rr : public sched::metric_dl
{
public:
void new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols, uint32_t tti);
void new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols, uint32_t tti);
dl_harq_proc* get_user_allocation(sched_ue *user);
private:
@ -49,14 +49,14 @@ private:
uint32_t count_rbg(uint32_t mask);
uint32_t calc_rbg_mask(bool mask[25]);
bool used_rb[MAX_RBG];
bool used_rbg[MAX_RBG];
uint32_t current_tti;
uint32_t total_rb;
uint32_t used_rb_mask;
uint32_t total_rbg;
uint32_t used_rbg_mask;
uint32_t nof_ctrl_symbols;
uint32_t available_rb;
uint32_t available_rbg;
};
class ul_metric_rr : public sched::metric_ul

View File

@ -93,11 +93,15 @@ public:
* Functions used by scheduler metric objects
*******************************************************/
uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes);
uint32_t get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols);
uint32_t get_required_prb_ul(uint32_t req_bytes);
uint32_t prb_to_rbg(uint32_t nof_prb);
uint32_t rgb_to_prb(uint32_t nof_rbg);
uint32_t get_pending_dl_new_data(uint32_t tti);
uint32_t get_pending_ul_new_data(uint32_t tti);
uint32_t get_pending_dl_new_data_total(uint32_t tti);
dl_harq_proc *get_pending_dl_harq(uint32_t tti);
dl_harq_proc *get_empty_dl_harq();
@ -147,11 +151,10 @@ private:
static bool bearer_is_ul(ue_bearer_t *lch);
static bool bearer_is_dl(ue_bearer_t *lch);
bool is_first_dl_tx();
sched_interface::ue_cfg_t cfg;
sched_interface::ue_cfg_t cfg;
srslte_cell_t cell;
srslte::log* log_h;
@ -175,7 +178,8 @@ private:
uint32_t max_mcs_dl;
uint32_t max_mcs_ul;
int fixed_mcs_ul;
int fixed_mcs_dl;
int fixed_mcs_dl;
uint32_t P;
int next_tpc_pusch;
int next_tpc_pucch;

View File

@ -610,7 +610,7 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST])
int nof_rar_elems = 0;
for (uint32_t i=0;i<SCHED_MAX_PENDING_RAR;i++)
{
if (pending_rar[i].buf_rar > 0 && avail_rbg >= rar_n_rb)
if (pending_rar[i].buf_rar > 0 && avail_rbg >= (uint32_t)ceil((float)rar_n_rb/P))
{
/* Check if we are still within the RAR window, otherwise discard it */
if (current_tti <= (pending_rar[i].rar_tti + cfg.prach_rar_window + 3)%10240 && current_tti >= pending_rar[i].rar_tti + 3)
@ -664,8 +664,8 @@ int sched::dl_sched_rar(dl_sched_rar_t rar[MAX_RAR_LIST])
if (generate_format1a(start_rbg*P, rar_n_rb, buf_rar, 0, &rar[nof_rar_elems].dci) >= 0) {
rar[nof_rar_elems].tbs = buf_rar;
nof_rar_elems++;
avail_rbg -= rar_n_rb;
start_rbg += rar_n_rb;
avail_rbg -= (uint32_t)ceil((float)rar_n_rb/P);
start_rbg += (uint32_t)ceil((float)rar_n_rb/P);
} else {
Error("SCHED: Allocating Format1A grant\n");
}

View File

@ -47,9 +47,9 @@ uint32_t dl_metric_rr::calc_rbg_mask(bool mask[MAX_RBG])
{
// Build RBG bitmask
uint32_t rbg_bitmask = 0;
for (uint32_t n=0;n<total_rb;n++) {
for (uint32_t n=0;n<total_rbg;n++) {
if (mask[n]) {
rbg_bitmask |= (1<<(total_rb-1-n));
rbg_bitmask |= (1<<(total_rbg-1-n));
}
}
return rbg_bitmask;
@ -73,21 +73,21 @@ uint32_t dl_metric_rr::get_required_rbg(sched_ue *user, uint32_t tti)
return count_rbg(h->get_rbgmask());
}
uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
return user->get_required_prb_dl(pending_data, nof_ctrl_symbols);
return user->prb_to_rbg(user->get_required_prb_dl(pending_data, nof_ctrl_symbols));
}
void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rb, uint32_t nof_rb, uint32_t nof_ctrl_symbols_, uint32_t tti)
void dl_metric_rr::new_tti(std::map<uint16_t,sched_ue> &ue_db, uint32_t start_rbg, uint32_t nof_rbg, uint32_t nof_ctrl_symbols_, uint32_t tti)
{
total_rb = start_rb+nof_rb;
for (uint32_t i=0;i<total_rb;i++) {
if (i<start_rb) {
used_rb[i] = true;
total_rbg = start_rbg+nof_rbg;
for (uint32_t i=0;i<total_rbg;i++) {
if (i<start_rbg) {
used_rbg[i] = true;
} else {
used_rb[i] = false;
used_rbg[i] = false;
}
}
available_rb = nof_rb;
used_rb_mask = calc_rbg_mask(used_rb);
available_rbg = nof_rbg;
used_rbg_mask = calc_rbg_mask(used_rbg);
current_tti = tti;
nof_ctrl_symbols = nof_ctrl_symbols_;
@ -111,8 +111,8 @@ bool dl_metric_rr::new_allocation(uint32_t nof_rbg, uint32_t *rbgmask) {
bool mask_bit[MAX_RBG];
bzero(mask_bit, sizeof(bool)*MAX_RBG);
for (uint32_t i=0;i<total_rb && nof_rbg > 0;i++) {
if (used_rb[i]) {
for (uint32_t i=0;i<total_rbg && nof_rbg > 0;i++) {
if (used_rbg[i]) {
mask_bit[i] = false;
} else {
mask_bit[i] = true;
@ -126,24 +126,24 @@ bool dl_metric_rr::new_allocation(uint32_t nof_rbg, uint32_t *rbgmask) {
}
void dl_metric_rr::update_allocation(uint32_t new_mask) {
used_rb_mask |= new_mask;
for (uint32_t n=0;n<total_rb;n++) {
if (used_rb_mask & (1<<(total_rb-1-n))) {
used_rb[n] = true;
used_rbg_mask |= new_mask;
for (uint32_t n=0;n<total_rbg;n++) {
if (used_rbg_mask & (1<<(total_rbg-1-n))) {
used_rbg[n] = true;
} else {
used_rb[n] = false;
used_rbg[n] = false;
}
}
}
bool dl_metric_rr::allocation_is_valid(uint32_t mask)
{
return (mask & used_rb_mask);
return (mask & used_rbg_mask);
}
dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) {
uint32_t pending_data = user->get_pending_dl_new_data(current_tti);
dl_harq_proc *h = user->get_pending_dl_harq(current_tti);
uint32_t req_bytes = user->get_pending_dl_new_data_total(current_tti);
// Schedule retx if we have space
#if ASYNC_DL_SCHED
@ -160,7 +160,7 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) {
// If not, try to find another mask in the current tti
uint32_t nof_rbg = count_rbg(retx_mask);
if (nof_rbg < available_rb) {
if (nof_rbg < available_rbg) {
if (new_allocation(nof_rbg, &retx_mask)) {
update_allocation(retx_mask);
h->set_rbgmask(retx_mask);
@ -176,10 +176,10 @@ dl_harq_proc* dl_metric_rr::apply_user_allocation(sched_ue *user) {
if (h && h->is_empty()) {
#endif
// Allocate resources based on pending data
if (pending_data) {
uint32_t pending_rb = user->get_required_prb_dl(pending_data, nof_ctrl_symbols);
if (req_bytes) {
uint32_t pending_rbg = user->prb_to_rbg(user->get_required_prb_dl(req_bytes, nof_ctrl_symbols));
uint32_t newtx_mask = 0;
new_allocation(pending_rb, &newtx_mask);
new_allocation(pending_rbg, &newtx_mask);
if (newtx_mask) {
update_allocation(newtx_mask);
h->set_rbgmask(newtx_mask);

View File

@ -71,6 +71,7 @@ void sched_ue::set_cfg(uint16_t rnti_, sched_interface::ue_cfg_t *cfg_, sched_in
rnti = rnti_;
log_h = log_h_;
memcpy(&cell, &cell_cfg->cell, sizeof(srslte_cell_t));
P = srslte_ra_type0_P(cell.nof_prb);
max_mcs_dl = 28;
max_mcs_ul = 28;
@ -710,6 +711,22 @@ uint32_t sched_ue::get_pending_dl_new_data(uint32_t tti)
return pending_data;
}
/// Use this function in the dl-metric to get the bytes to be scheduled. It accounts for the UE data,
/// the RAR resources, and headers
/// \param tti
/// \return number of bytes to be allocated
uint32_t sched_ue::get_pending_dl_new_data_total(uint32_t tti)
{
uint32_t req_bytes = get_pending_dl_new_data(tti);
if(req_bytes>0) {
req_bytes += (req_bytes < 128) ? 2 : 3; // consider the header
if(is_first_dl_tx()) {
req_bytes += 6; // count for RAR
}
}
return req_bytes;
}
uint32_t sched_ue::get_pending_ul_new_data(uint32_t tti)
{
uint32_t pending_data = 0;
@ -746,32 +763,39 @@ uint32_t sched_ue::get_pending_ul_old_data()
return pending_data;
}
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
uint32_t sched_ue::prb_to_rbg(uint32_t nof_prb)
{
int mcs = 0;
uint32_t nbytes = 0;
uint32_t n = 0;
if (req_bytes == 0) {
return 0;
}
uint32_t nof_re = 0;
int tbs = 0;
for (n=1;n<=cell.nof_prb && nbytes < req_bytes;n++) {
nof_re = srslte_ra_dl_approx_nof_re(cell, n, nof_ctrl_symbols);
if (fixed_mcs_dl < 0) {
tbs = alloc_tbs_dl(n, nof_re, 0, &mcs);
return (uint32_t) ceil((float) nof_prb / P);
}
uint32_t sched_ue::rgb_to_prb(uint32_t nof_rbg)
{
return P*nof_rbg;
}
uint32_t sched_ue::get_required_prb_dl(uint32_t req_bytes, uint32_t nof_ctrl_symbols)
{
int mcs = 0;
uint32_t nof_re = 0;
int tbs = 0;
uint32_t nbytes = 0;
uint32_t n;
for (n=0; n < cell.nof_prb && nbytes < req_bytes; ++n) {
nof_re = srslte_ra_dl_approx_nof_re(cell, n+1, nof_ctrl_symbols);
if(fixed_mcs_dl < 0) {
tbs = alloc_tbs_dl(n+1, nof_re, 0, &mcs);
} else {
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n)/8;
tbs = srslte_ra_tbs_from_idx(srslte_ra_tbs_idx_from_mcs(fixed_mcs_dl), n+1)/8;
}
if (tbs > 0) {
nbytes = tbs;
nbytes = tbs;
} else if (tbs < 0) {
return 0;
return 0;
}
}
return n;
return n;
}
uint32_t sched_ue::get_required_prb_ul(uint32_t req_bytes)