paging: Optimize retrieving number of request per paging group

This patch caches the counts of initial paging requests for each paging
group. This count is needed to estimate T3113 when a new incoming paging
request is received and it has to be inserted into the queue.
With this there's no need to traverse the whole initial_req_list every
time a new incoming paging request is receiving, potentially saving lots
of iteration and hence lots of CPU when the queue is long.

Related: SYS#6200
Change-Id: I6994127827d120a0b4dd3de51e1ddde39f2fe531
This commit is contained in:
Pau Espin 2022-11-29 12:37:53 +01:00
parent d681b897b6
commit 867e73e96b
2 changed files with 17 additions and 20 deletions

View File

@ -61,6 +61,9 @@ enum bsc_paging_reason {
*/
#define PAGING_THRESHOLD_X3113_DEFAULT_SEC 60
#define MAX_PAGING_BLOCKS_CCCH 9
#define MAX_BS_PA_MFRMS 9
struct bsc_paging_params {
enum bsc_paging_reason reason;
struct bsc_msc_data *msc;
@ -117,6 +120,9 @@ struct gsm_bts_paging_state {
/* Number of requests in pending_requests_len */
unsigned int retrans_req_list_len;
/* Number of requests in initial_req_list, indexed by pgroup. */
unsigned int initial_req_pgroup_counts[MAX_PAGING_BLOCKS_CCCH * MAX_BS_PA_MFRMS];
struct gsm_bts *bts;
struct osmo_timer_list work_timer;

View File

@ -88,10 +88,12 @@ static void paging_remove_request(struct gsm_paging_request *req)
osmo_timer_del(&req->T3113);
llist_del(&req->entry);
if (req->attempts == 0)
if (req->attempts == 0) {
bts_pag_st->initial_req_list_len--;
else
bts_pag_st->initial_req_pgroup_counts[req->pgroup]--;
} else {
bts_pag_st->retrans_req_list_len--;
}
osmo_stat_item_dec(osmo_stat_item_group_get_item(bts->bts_statg, BTS_STAT_PAGING_REQ_QUEUE_LENGTH), 1);
bsc_subscr_remove_active_paging_request(req->bsub, req);
talloc_free(req);
@ -220,6 +222,7 @@ static void paging_req_timeout_retrans(struct gsm_paging_request *request, const
if (request->attempts == 0) {
/* req is removed from initial_req_list and inserted into retrans_req_list, update list lengths: */
bts_pag_st->initial_req_list_len--;
bts_pag_st->initial_req_pgroup_counts[request->pgroup]--;
bts_pag_st->retrans_req_list_len++;
}
llist_del(&request->entry);
@ -503,9 +506,8 @@ static int _paging_request(const struct bsc_paging_params *params, struct gsm_bt
struct gsm_paging_request *req;
unsigned int t3113_timeout_s;
unsigned int x3113_s = osmo_tdef_get(bts->network->T_defs, -3113, OSMO_TDEF_S, -1);
unsigned int reqs_before = 0, reqs_before_same_pgroup = 0;
uint8_t pgroup = gsm0502_calc_paging_group(&bts->si_common.chan_desc,
str_to_imsi(params->bsub->imsi));
uint8_t pgroup;
unsigned int reqs_before, reqs_before_same_pgroup;
rate_ctr_inc(rate_ctr_group_get_ctr(bts->bts_ctrs, BTS_CTR_PAGING_ATTEMPTED));
@ -530,21 +532,9 @@ static int _paging_request(const struct bsc_paging_params *params, struct gsm_bt
paging_remove_request(first_retrans_req);
}
/* The incoming new req will be stored in initial_req_list giving higher prio
* to it over retransmissions. This avoids new subscribers being paged to
* be delayed if the paging queue is full due to a lot of retranmissions.
* Retranmissions usually mean MS are not reachable/available, so the
* rationale here is to prioritize new subs which may be available.
*
* Count initial reqs already stored in initial_req_list, since those
* will be scheduled for transmission before current incoming req and
need to be taken into account when calculating T3113 for it.
*/
llist_for_each_entry(req, &bts_entry->initial_req_list, entry) {
reqs_before++;
if (req->pgroup == pgroup)
reqs_before_same_pgroup++;
}
pgroup = gsm0502_calc_paging_group(&bts->si_common.chan_desc, str_to_imsi(params->bsub->imsi));
reqs_before = bts_entry->initial_req_list_len;
reqs_before_same_pgroup = bts_entry->initial_req_pgroup_counts[pgroup];
LOG_PAGING_BTS(params, bts, DPAG, LOGL_DEBUG, "Start paging\n");
req = talloc_zero(tall_paging_ctx, struct gsm_paging_request);
@ -559,6 +549,7 @@ static int _paging_request(const struct bsc_paging_params *params, struct gsm_bt
bsc_subscr_add_active_paging_request(req->bsub, req);
bts_entry->initial_req_list_len++;
bts_entry->initial_req_pgroup_counts[req->pgroup]++;
osmo_stat_item_inc(osmo_stat_item_group_get_item(bts->bts_statg, BTS_STAT_PAGING_REQ_QUEUE_LENGTH), 1);
llist_add_tail(&req->entry, &bts_entry->initial_req_list);