Introduce support for libosmo-mgcp-client MGW pooling
Large RAN installations may benefit from distributing the RTP voice stream load over multiple media gateways. libosmo-mgcp-client supports MGW pooling since version 1.8.0 (more than one year ago). OsmoBSC has already been making use of it since then (see osmo-bsc.git 8d22e6870637ed6d392a8a77aeaebc51b23a8a50); lets use this feature in osmo-hngw too. This commit is also part of a series of patches cleaning up libosmo-mgcp-client and slowly getting rid of the old non-mgw-pooled VTY configuration, in order to keep only 1 way to configure libosmo-mgcp-client through VTY. Related: SYS#5091 Related: SYS#5987 Change-Id: I371dc773b58788ee21037dc25d77f556c89c6b61
This commit is contained in:
parent
bef2c345df
commit
e62af4d46a
|
@ -11,11 +11,11 @@ hnbgw
|
|||
iuh
|
||||
local-ip 0.0.0.0
|
||||
hnbap-allow-tmsi 1
|
||||
mgcp
|
||||
mgw remote-ip 127.0.0.1
|
||||
mgw local-port 2729
|
||||
mgw remote-port 2427
|
||||
mgw reset-endpoint rtpbridge/*
|
||||
mgw 0
|
||||
remote-ip 127.0.0.1
|
||||
local-port 2729
|
||||
remote-port 2427
|
||||
reset-endpoint rtpbridge/*
|
||||
pfcp
|
||||
remote-addr 127.0.0.2
|
||||
local-addr 127.0.0.1
|
||||
|
|
|
@ -11,9 +11,9 @@ hnbgw
|
|||
iuh
|
||||
local-ip 0.0.0.0
|
||||
hnbap-allow-tmsi 1
|
||||
mgcp
|
||||
mgw remote-ip 127.0.0.1
|
||||
mgw local-port 2729
|
||||
mgw remote-port 2427
|
||||
mgw reset-endpoint rtpbridge/*
|
||||
mgw 0
|
||||
remote-ip 127.0.0.1
|
||||
local-port 2729
|
||||
remote-port 2427
|
||||
reset-endpoint rtpbridge/*
|
||||
|
||||
|
|
|
@ -136,10 +136,10 @@ An example configuration for OsmoHNBGW's MGCP client:
|
|||
|
||||
----
|
||||
hnbgw
|
||||
mgcp
|
||||
mgw remote-ip 127.0.0.1
|
||||
mgw remote-port 2427
|
||||
mgw reset-endpoint rtpbridge/*
|
||||
mgw 0
|
||||
remote-ip 127.0.0.1
|
||||
remote-port 2427
|
||||
reset-endpoint rtpbridge/*
|
||||
----
|
||||
|
||||
==== Configure co-located User Plane Function
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#define DEBUG
|
||||
#include <osmocom/core/logging.h>
|
||||
|
||||
#include <osmocom/mgcp_client/mgcp_client.h>
|
||||
#include <osmocom/mgcp_client/mgcp_client_pool.h>
|
||||
|
||||
enum {
|
||||
DMAIN,
|
||||
|
@ -160,7 +162,9 @@ struct hnb_gw {
|
|||
struct osmo_sccp_addr iucs_remote_addr;
|
||||
struct osmo_sccp_addr iups_remote_addr;
|
||||
} sccp;
|
||||
struct mgcp_client *mgcp_client;
|
||||
/* MGW pool, also includes the single MGCP client as fallback if no
|
||||
* pool is configured. */
|
||||
struct mgcp_client_pool *mgw_pool;
|
||||
|
||||
struct {
|
||||
struct osmo_pfcp_endpoint *ep;
|
||||
|
|
|
@ -8,6 +8,7 @@ enum osmo_iuh_vty_node {
|
|||
IUCS_NODE,
|
||||
IUPS_NODE,
|
||||
MGCP_NODE,
|
||||
MGW_NODE,
|
||||
PFCP_NODE,
|
||||
};
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ static struct hnb_gw *hnb_gw_create(void *ctx)
|
|||
|
||||
context_map_init(gw);
|
||||
|
||||
gw->mgw_pool = mgcp_client_pool_alloc(gw);
|
||||
gw->config.mgcp_client = talloc_zero(tall_hnb_ctx, struct mgcp_client_conf);
|
||||
mgcp_client_conf_init(gw->config.mgcp_client);
|
||||
|
||||
|
@ -673,6 +674,41 @@ static int hnb_ctrl_node_lookup(void *data, vector vline, int *node_type, void *
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int hnbgw_mgw_setup(void)
|
||||
{
|
||||
struct mgcp_client *mgcp_client_single;
|
||||
unsigned int pool_members_initalized;
|
||||
|
||||
/* Initialize MGW pool. This initalizes and connects all MGCP clients that are currently configured in
|
||||
* the pool. Adding additional MGCP clients to the pool is possible but the user has to configure and
|
||||
* (re)connect them manually from the VTY. */
|
||||
pool_members_initalized = mgcp_client_pool_connect(g_hnb_gw->mgw_pool);
|
||||
if (pool_members_initalized) {
|
||||
LOGP(DMGW, LOGL_NOTICE,
|
||||
"MGW pool with %u pool members configured, (ignoring MGW configuration in VTY node 'mgcp').\n",
|
||||
pool_members_initalized);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Initialize and connect a single MGCP client. This MGCP client will appear as the one and only pool
|
||||
* member if there is no MGW pool configured. */
|
||||
LOGP(DMGW, LOGL_NOTICE, "No MGW pool configured, using MGW configuration in VTY node 'mgcp'\n");
|
||||
mgcp_client_single = mgcp_client_init(tall_hnb_ctx, g_hnb_gw->config.mgcp_client);
|
||||
if (!mgcp_client_single) {
|
||||
LOGP(DMGW, LOGL_ERROR, "MGW (single) client initalization failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (mgcp_client_connect(mgcp_client_single)) {
|
||||
LOGP(DMGW, LOGL_ERROR, "MGW (single) connect failed at (%s:%u)\n",
|
||||
g_hnb_gw->config.mgcp_client->remote_addr,
|
||||
g_hnb_gw->config.mgcp_client->remote_port);
|
||||
return -EINVAL;
|
||||
}
|
||||
mgcp_client_pool_register_single(g_hnb_gw->mgw_pool, mgcp_client_single);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct osmo_stream_srv_link *srv;
|
||||
|
@ -784,17 +820,8 @@ int main(int argc, char **argv)
|
|||
g_hnb_gw->iuh = srv;
|
||||
|
||||
/* Initialize and connect MGCP client. */
|
||||
g_hnb_gw->mgcp_client = mgcp_client_init(tall_hnb_ctx, g_hnb_gw->config.mgcp_client);
|
||||
if (!g_hnb_gw->mgcp_client) {
|
||||
LOGP(DMGW, LOGL_ERROR, "MGW client initalization failed\n");
|
||||
if (hnbgw_mgw_setup() != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (mgcp_client_connect(g_hnb_gw->mgcp_client)) {
|
||||
LOGP(DMGW, LOGL_ERROR, "MGW connect failed at (%s:%u)\n",
|
||||
g_hnb_gw->config.mgcp_client->remote_addr,
|
||||
g_hnb_gw->config.mgcp_client->remote_port);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#if ENABLE_PFCP
|
||||
/* If UPF is configured, set up PFCP socket and send Association Setup Request to UPF */
|
||||
|
|
|
@ -464,14 +464,6 @@ static int config_write_hnbgw_iups(struct vty *vty)
|
|||
return CMD_SUCCESS;
|
||||
}
|
||||
|
||||
static int config_write_hnbgw_mgcp(struct vty *vty)
|
||||
{
|
||||
vty_out(vty, " mgcp%s", VTY_NEWLINE);
|
||||
mgcp_client_config_write(vty, " ");
|
||||
|
||||
return CMD_SUCCESS;
|
||||
}
|
||||
|
||||
#if ENABLE_PFCP
|
||||
static int config_write_hnbgw_pfcp(struct vty *vty)
|
||||
{
|
||||
|
@ -521,7 +513,11 @@ void hnbgw_vty_init(struct hnb_gw *gw, void *tall_ctx)
|
|||
install_element_ve(&show_talloc_cmd);
|
||||
|
||||
install_element(HNBGW_NODE, &cfg_hnbgw_mgcp_cmd);
|
||||
install_node(&mgcp_node, config_write_hnbgw_mgcp);
|
||||
/* Deprecated: Old MGCP config without pooling support in MSC node: */
|
||||
install_node(&mgcp_node, NULL);
|
||||
mgcp_client_vty_init(tall_hnb_ctx, MGCP_NODE, g_hnb_gw->config.mgcp_client);
|
||||
|
||||
mgcp_client_pool_vty_init(HNBGW_NODE, MGW_NODE, " ", g_hnb_gw->mgw_pool);
|
||||
|
||||
#if ENABLE_PFCP
|
||||
install_node(&pfcp_node, config_write_hnbgw_pfcp);
|
||||
|
@ -531,6 +527,5 @@ void hnbgw_vty_init(struct hnb_gw *gw, void *tall_ctx)
|
|||
install_element(PFCP_NODE, &cfg_pfcp_remote_addr_cmd);
|
||||
#endif
|
||||
|
||||
mgcp_client_vty_init(tall_hnb_ctx, MGCP_NODE, g_hnb_gw->config.mgcp_client);
|
||||
osmo_tdef_vty_groups_init(HNBGW_NODE, hnbgw_tdef_group);
|
||||
}
|
||||
|
|
|
@ -141,6 +141,7 @@ static void mgw_fsm_crcx_hnb_onenter(struct osmo_fsm_inst *fi, uint32_t prev_sta
|
|||
RANAP_RAB_AssignmentRequestIEs_t *ies;
|
||||
const char *epname;
|
||||
struct mgcp_conn_peer mgw_info;
|
||||
struct mgcp_client *mgcp_client;
|
||||
int rc;
|
||||
|
||||
LOGPFSML(fi, LOGL_DEBUG, "RAB-AssignmentRequest received, creating HNB side call-leg on MGW...\n");
|
||||
|
@ -172,9 +173,16 @@ static void mgw_fsm_crcx_hnb_onenter(struct osmo_fsm_inst *fi, uint32_t prev_sta
|
|||
mgw_info.codecs[0] = CODEC_IUFP;
|
||||
mgw_info.codecs_len = 1;
|
||||
|
||||
epname = mgcp_client_rtpbridge_wildcard(map->hnb_ctx->gw->mgcp_client);
|
||||
mgcp_client = mgcp_client_pool_get(map->hnb_ctx->gw->mgw_pool);
|
||||
if (!mgcp_client) {
|
||||
LOGPFSML(fi, LOGL_ERROR,
|
||||
"cannot ensure MGW endpoint -- no MGW configured, check configuration!\n");
|
||||
osmo_fsm_inst_state_chg(fi, MGW_ST_FAILURE, 0, 0);
|
||||
return;
|
||||
}
|
||||
epname = mgcp_client_rtpbridge_wildcard(mgcp_client);
|
||||
mgw_fsm_priv->mgcpc_ep =
|
||||
osmo_mgcpc_ep_alloc(fi, MGW_EV_MGCP_TERM, map->hnb_ctx->gw->mgcp_client, mgw_fsm_T_defs, fi->id, "%s", epname);
|
||||
osmo_mgcpc_ep_alloc(fi, MGW_EV_MGCP_TERM, mgcp_client, mgw_fsm_T_defs, fi->id, "%s", epname);
|
||||
mgw_fsm_priv->mgcpc_ep_ci_hnb = osmo_mgcpc_ep_ci_add(mgw_fsm_priv->mgcpc_ep, "to-HNB");
|
||||
|
||||
osmo_mgcpc_ep_ci_request(mgw_fsm_priv->mgcpc_ep_ci_hnb, MGCP_VERB_CRCX, &mgw_info, fi, MGW_EV_MGCP_OK,
|
||||
|
@ -488,9 +496,13 @@ static void mgw_fsm_failure_onenter(struct osmo_fsm_inst *fi, uint32_t prev_stat
|
|||
static void mgw_fsm_allstate_action(struct osmo_fsm_inst *fi, uint32_t event, void *data)
|
||||
{
|
||||
struct mgw_fsm_priv *mgw_fsm_priv = fi->priv;
|
||||
struct mgcp_client *mgcp_client;
|
||||
|
||||
switch (event) {
|
||||
case MGW_EV_MGCP_TERM:
|
||||
/* Put MGCP client back into MGW pool */
|
||||
mgcp_client = osmo_mgcpc_ep_client(mgw_fsm_priv->mgcpc_ep);
|
||||
mgcp_client_pool_put(mgcp_client);
|
||||
mgw_fsm_priv->mgcpc_ep = NULL;
|
||||
LOGPFSML(fi, LOGL_ERROR, "Media gateway failed\n");
|
||||
osmo_fsm_inst_state_chg(fi, MGW_ST_FAILURE, 0, 0);
|
||||
|
@ -547,6 +559,10 @@ static void mgw_fsm_pre_term(struct osmo_fsm_inst *fi, enum osmo_fsm_term_cause
|
|||
struct hnbgw_context_map *map = mgw_fsm_priv->map;
|
||||
|
||||
if (mgw_fsm_priv->mgcpc_ep) {
|
||||
/* Put MGCP client back into MGW pool */
|
||||
struct mgcp_client *mgcp_client = osmo_mgcpc_ep_client(mgw_fsm_priv->mgcpc_ep);
|
||||
mgcp_client_pool_put(mgcp_client);
|
||||
|
||||
osmo_mgcpc_ep_clear(mgw_fsm_priv->mgcpc_ep);
|
||||
mgw_fsm_priv->mgcpc_ep = NULL;
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ AM_CFLAGS = \
|
|||
$(LIBOSMOVTY_CFLAGS) \
|
||||
$(LIBOSMORANAP_CFLAGS) \
|
||||
$(LIBOSMOSIGTRAN_CFLAGS) \
|
||||
$(LIBOSMOMGCPCLIENT_CFLAGS) \
|
||||
$(COVERAGE_CFLAGS) \
|
||||
$(NULL)
|
||||
|
||||
|
@ -32,6 +33,7 @@ ranap_rab_ass_test_LDADD = \
|
|||
$(LIBOSMOVTY_LIBS) \
|
||||
$(LIBOSMORANAP_LIBS) \
|
||||
$(LIBOSMOSIGTRAN_LIBS) \
|
||||
$(LIBOSMOMGCPCLIENT_LIBS) \
|
||||
$(COVERAGE_LDFLAGS) \
|
||||
$(top_builddir)/src/osmo-hnbgw/ranap_rab_ass.o \
|
||||
$(NULL)
|
||||
|
|
Loading…
Reference in New Issue