libosmo-mgcp-client: extend the mgcp_client for MGW pooling

At the moment the MGCP Client only supports one MGW per application.
Depending on the requirements of the application one MGW might not offer
the performance needed. Lets add support for an MGCP Client pool that is
backward compatible to existing applications.

Change-Id: Icaaba0e470e916eefddfee750b83f5f65291a6b0
Related: SYS#5091
This commit is contained in:
Philipp Maier 2021-07-26 13:20:05 +02:00
parent 3d2b76fd95
commit 3f4a4cb49c
14 changed files with 592 additions and 27 deletions

View File

@ -6,6 +6,7 @@ nobase_include_HEADERS = \
osmocom/mgcp_client/mgcp_client.h \
osmocom/mgcp_client/mgcp_client_endpoint_fsm.h \
osmocom/mgcp_client/mgcp_client_fsm.h \
osmocom/mgcp_client/mgcp_client_pool.h \
osmocom/mgcp/mgcp.h \
osmocom/mgcp/mgcp_common.h \
osmocom/mgcp/osmux.h \

View File

@ -4,6 +4,7 @@ BUILT_SOURCES = \
noinst_HEADERS = \
mgcp_client_internal.h \
mgcp_client_pool_internal.h \
$(NULL)
mgcp_common.h: $(top_srcdir)/include/osmocom/mgcp/mgcp_common.h

View File

@ -136,6 +136,8 @@ struct mgcp_client_conf *mgcp_client_conf_actual(struct mgcp_client *mgcp);
struct mgcp_client *mgcp_client_init(void *ctx,
struct mgcp_client_conf *conf);
int mgcp_client_connect(struct mgcp_client *mgcp);
int mgcp_client_connect2(struct mgcp_client *mgcp, unsigned int retry_n_ports);
void mgcp_client_disconnect(struct mgcp_client *mgcp);
const char *mgcp_client_remote_addr_str(struct mgcp_client *mgcp);
uint16_t mgcp_client_remote_port(struct mgcp_client *mgcp);

View File

@ -47,6 +47,7 @@ void osmo_mgcpc_ep_clear(struct osmo_mgcpc_ep *ep);
const char *osmo_mgcpc_ep_name(const struct osmo_mgcpc_ep *ep);
const char *osmo_mgcpc_ep_ci_name(const struct osmo_mgcpc_ep_ci *ci);
const char *osmo_mgcpc_ep_ci_id(const struct osmo_mgcpc_ep_ci *ci);
struct mgcp_client *osmo_mgcpc_ep_client(const struct osmo_mgcpc_ep *ep);
extern const struct value_string osmo_mgcp_verb_names[];
static inline const char *osmo_mgcp_verb_name(enum mgcp_verb val)

View File

@ -69,5 +69,6 @@ int mgcp_conn_modify(struct osmo_fsm_inst *fi, uint32_t parent_evt, struct mgcp_
void mgcp_conn_delete(struct osmo_fsm_inst *fi);
const char *mgcp_conn_get_ci(struct osmo_fsm_inst *fi);
struct mgcp_client *mgcp_conn_get_client(struct osmo_fsm_inst *fi);
const char *osmo_mgcpc_conn_peer_name(const struct mgcp_conn_peer *info);

View File

@ -16,6 +16,7 @@ struct mgcp_client {
mgcp_trans_id_t next_trans_id;
struct llist_head responses_pending;
struct llist_head inuse_endpoints;
struct mgcp_client_pool *pool;
};
struct mgcp_inuse_endpoint {

View File

@ -0,0 +1,11 @@
#pragma once
struct mgcp_client;
struct mgcp_client_pool;
struct mgcp_client_pool *mgcp_client_pool_alloc(void *talloc_ctx);
void mgcp_client_pool_vty_init(int parent_node, int mgw_node, const char *indent, struct mgcp_client_pool *pool);
unsigned int mgcp_client_pool_connect(struct mgcp_client_pool *pool);
void mgcp_client_pool_register_single(struct mgcp_client_pool *pool, struct mgcp_client *mgcp_client);
struct mgcp_client *mgcp_client_pool_get(struct mgcp_client_pool *pool);
void mgcp_client_pool_put(struct mgcp_client *mgcp_client);

View File

@ -0,0 +1,43 @@
#pragma once
/* Struct to handle a member of a pool of MGWs. */
struct mgcp_client_pool_member {
struct llist_head list;
/* Reference number assinged by VTY. This number is used to manage the pool from the VTY and to identify it in
* the log. */
unsigned int nr;
/* MGCP client configuration, this is not the running configuration, when mgcp_client_init() is executed, a
* copy of this config is created. */
struct mgcp_client_conf conf;
/* MGCP client descriptor, will be automatically allocated when mgcp_client_pool_connect() is called. (the MGCP
* client is connected when this pointer is populated) */
struct mgcp_client *client;
/* A pool member may be set as 'blocked' from the VTY, this means that the pool member may still work and serve
* ongoing calls, but it won't be picked from the pool anymore. */
bool blocked;
/* Reference counter to count how often this pool member is currently picked. */
unsigned int refcount;
};
/* Struct to handle a pool of MGWs. (Use _pool functions) */
struct mgcp_client_pool {
/* A pointer to a 'single' mgcp client. This is a non-pooled MGCP client that is configured using
* mgcp_client_vty_init() and actively registered by the API user using mgcp_client_pool_register_single() */
struct mgcp_client *mgcp_client_single;
/* A list that manages the pool members (see above) */
struct llist_head pool;
/* String to use for indentation when writing the configuration file to the VTY. This field is populated by
* mgcp_client_pool_vty_init() */
char *vty_indent;
/* VTY node specification used with this pool. This field is populated by mgcp_client_pool_vty_init() */
struct cmd_node *vty_node;
};

View File

@ -32,6 +32,7 @@ libosmo_mgcp_client_la_SOURCES = \
mgcp_client_vty.c \
mgcp_client_fsm.c \
mgcp_client_endpoint_fsm.c \
mgcp_client_pool.c \
$(NULL)
libosmo_mgcp_client_la_LDFLAGS = $(AM_LDFLAGS) -version-info $(MGCP_CLIENT_LIBVERSION)

View File

@ -875,8 +875,9 @@ static const char *_mgcp_client_name_append_domain(const struct mgcp_client *mgc
/*! Initialize client connection (opens socket)
* \param[in,out] mgcp MGCP client descriptor.
* \param[in] retry_n_ports number of consecutive local ports that should be used to retry on failure.
* \returns 0 on success, -EINVAL on error. */
int mgcp_client_connect(struct mgcp_client *mgcp)
int mgcp_client_connect2(struct mgcp_client *mgcp, unsigned int retry_n_ports)
{
struct osmo_wqueue *wq;
int rc;
@ -895,7 +896,7 @@ int mgcp_client_connect(struct mgcp_client *mgcp)
osmo_fd_setup(&wq->bfd, -1, OSMO_FD_READ, osmo_wqueue_bfd_cb, mgcp, 0);
rc = init_socket(mgcp, 99);
rc = init_socket(mgcp, retry_n_ports);
if (rc < 0) {
LOGP(DLMGCP, LOGL_FATAL,
"Failed to initialize socket %s:%u -> %s:%u for MGCP GW: %s\n",
@ -921,6 +922,35 @@ error_close_fd:
return rc;
}
/*! Initialize client connection (opens socket)
* \param[in,out] mgcp MGCP client descriptor.
* \returns 0 on success, -EINVAL on error. */
int mgcp_client_connect(struct mgcp_client *mgcp)
{
return mgcp_client_connect2(mgcp, 99);
}
/*! Terminate client connection
* \param[in,out] mgcp MGCP client descriptor.
* \returns 0 on success, -EINVAL on error. */
void mgcp_client_disconnect(struct mgcp_client *mgcp)
{
struct osmo_wqueue *wq;
if (!mgcp) {
LOGP(DLMGCP, LOGL_FATAL, "MGCPGW client not initialized properly\n");
return;
}
wq = &mgcp->wq;
osmo_wqueue_clear(wq);
LOGP(DLMGCP, LOGL_INFO, "MGCP GW connection: %s -- closed!\n", osmo_sock_get_name2(wq->bfd.fd));
close(wq->bfd.fd);
wq->bfd.fd = -1;
if (osmo_fd_is_registered(&wq->bfd))
osmo_fd_unregister(&wq->bfd);
}
/*! Get the IP-Aaddress of the associated MGW as string.
* \param[in] mgcp MGCP client descriptor.
* \returns a pointer to the address string. */

View File

@ -216,6 +216,13 @@ const char *osmo_mgcpc_ep_ci_id(const struct osmo_mgcpc_ep_ci *ci)
return ci->mgcp_ci_str;
}
struct mgcp_client *osmo_mgcpc_ep_client(const struct osmo_mgcpc_ep *ep)
{
if (!ep)
return NULL;
return ep->mgcp_client;
}
static struct value_string osmo_mgcpc_ep_fsm_event_names[33] = {};
static char osmo_mgcpc_ep_fsm_event_name_bufs[32][32] = {};

View File

@ -252,6 +252,18 @@ const char *mgcp_conn_get_ci(struct osmo_fsm_inst *fi)
return mgcp_ctx->conn_id;
}
/* Get the mgcp_client that is used with this mgcp_client_fsm instance */
struct mgcp_client *mgcp_conn_get_client(struct osmo_fsm_inst *fi)
{
struct mgcp_ctx *mgcp_ctx;
if (!fi)
return NULL;
mgcp_ctx = fi->priv;
return mgcp_ctx->mgcp;
}
static void mgw_crcx_resp_cb(struct mgcp_response *r, void *priv)
{
struct osmo_fsm_inst *fi = priv;

View File

@ -0,0 +1,186 @@
/* (C) 2021 by sysmocom s.f.m.c. GmbH <info@sysmocom.de>
* All Rights Reserved
*
* Author: Philipp Maier
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <osmocom/mgcp_client/mgcp_client.h>
#include <osmocom/mgcp_client/mgcp_client_internal.h>
#include <osmocom/mgcp_client/mgcp_client_pool_internal.h>
#include <osmocom/mgcp_client/mgcp_client_pool.h>
#include <stddef.h>
/*! Allocate MGCP client pool. This is called once on startup and before the pool is used with
* mgcp_client_pool_vty_init(). Since the pool is linked with the VTY it must exist througout the entire runtime.
* \param[in] talloc_ctx talloc context. */
struct mgcp_client_pool *mgcp_client_pool_alloc(void *talloc_ctx)
{
struct mgcp_client_pool *pool;
pool = talloc_zero(talloc_ctx, struct mgcp_client_pool);
if (!pool)
return NULL;
INIT_LLIST_HEAD(&pool->pool);
return pool;
}
/*! Initialize and connect an mcgp client pool.
* \param[in,out] mgcp MGCP client pool descriptor.
* \returns number of successfully initialized pool members. */
unsigned int mgcp_client_pool_connect(struct mgcp_client_pool *pool)
{
struct mgcp_client_pool_member *pool_member;
unsigned int pool_members_initialized = 0;
llist_for_each_entry(pool_member, &pool->pool, list) {
/* Initialize client */
pool_member->client = mgcp_client_init(pool_member, &pool_member->conf);
if (!pool_member->client) {
LOGP(DLMGCP, LOGL_ERROR, "MGW %u initialization failed\n", pool_member->nr);
continue;
}
/* Set backpointer so that we can detect later that this MGCP client is managed
* by this pool. */
pool_member->client->pool = pool;
/* Connect client */
if (mgcp_client_connect2(pool_member->client, 0)) {
LOGP(DLMGCP, LOGL_ERROR, "MGW %u connect failed at (%s:%u)\n",
pool_member->nr, pool_member->conf.remote_addr, pool_member->conf.remote_port);
talloc_free(pool_member->client);
pool_member->client = NULL;
continue;
}
pool_members_initialized++;
}
return pool_members_initialized;
}
/*! register a single mgcp_client instance to the pool.
* \param[out] pool MGCP client pool descriptor.
* \param[in] mgcp MGCP client descriptor. */
void mgcp_client_pool_register_single(struct mgcp_client_pool *pool, struct mgcp_client *mgcp_client)
{
/*! Some applications still support the non-pooled MGW VTY configuration variant provided by
* mgcp_client_vty_init(). If this is the case the mgcp_client instance created by mgcp_client_init()
* can be registered here so that it will appear as if it were part of the pool. When the user actively
* configures MGW pool members, the MGCP client registered here will be ignored. (The registration of
* multiple singe mgcp_client instances is not possible.) */
pool->mgcp_client_single = mgcp_client;
}
/* Not every pool member may have a functional MGCP client, we will run through the pool once until we meet a
* pool member that is suitable (has a client, is not blocked, has a low load). */
static struct mgcp_client_pool_member *mgcp_client_pool_pick(struct mgcp_client_pool *pool)
{
struct mgcp_client_pool_member *pool_member;
struct mgcp_client_pool_member *pool_member_picked = NULL;
unsigned int n_pool_members = llist_count(&pool->pool);
llist_for_each_entry(pool_member, &pool->pool, list) {
if (pool_member->blocked == false && pool_member->client) {
if (!pool_member_picked)
pool_member_picked = pool_member;
else if (pool_member_picked->refcount > pool_member->refcount)
pool_member_picked = pool_member;
} else {
LOGP(DLMGCP, LOGL_DEBUG, "MGW pool has %u members -- MGW %u is unusable\n", n_pool_members,
pool_member->nr);
}
}
if (pool_member_picked) {
LOGP(DLMGCP, LOGL_DEBUG, "MGW pool has %u members -- using MGW %u (active calls: %u)\n",
n_pool_members, pool_member_picked->nr, pool_member_picked->refcount);
return pool_member_picked;
}
LOGP(DLMGCP, LOGL_ERROR,
"MGW pool has %u members, but no functional MGW pool member found -- check configuration!\n",
n_pool_members);
return NULL;
}
/*! get an MGCP client from the pool (increment reference counter).
* \param[in,out] pool MGCP client pool descriptor.
* \returns MGCP client descriptor, NULL if no member was found (empty pool). */
struct mgcp_client *mgcp_client_pool_get(struct mgcp_client_pool *pool)
{
struct mgcp_client_pool_member *pool_member;
/*! When an MGCP client is taken from the pool it is still available for other calls. In fact only a reference
* counter is incremented to keep track on how many references to a specific MGCP client are currently used
* by the application code. */
/* When the pool is empty, return a single MGCP client if it is registered. */
if (llist_empty(&pool->pool) && pool->mgcp_client_single) {
LOGP(DLMGCP, LOGL_DEBUG, "MGW pool is empty -- using (single) MGW\n");
return pool->mgcp_client_single;
}
/* Abort when the pool is empty */
if (llist_empty(&pool->pool)) {
LOGP(DLMGCP, LOGL_ERROR, "MGW pool is empty -- no MGW available!\n");
return NULL;
}
/* Pick a suitable pool member */
pool_member = mgcp_client_pool_pick(pool);
if (pool_member) {
pool_member->refcount++;
return pool_member->client;
}
return NULL;
}
/*! put an MGCP client back into the pool (decrement reference counter).
* \param[in,out] pool MGCP client pool descriptor.
* \param[in] mgcp MGCP client descriptor.
*
* This function is able to detect automatically to which pool the mgcp_client belongs. If the mgcp_client does
* not belong to a pool at all, the function call will have no effect. */
void mgcp_client_pool_put(struct mgcp_client *mgcp_client)
{
struct mgcp_client_pool_member *pool_member;
struct mgcp_client_pool *pool;
if (!mgcp_client)
return;
if (mgcp_client->pool)
pool = mgcp_client->pool;
else
return;
llist_for_each_entry(pool_member, &pool->pool, list) {
if (pool_member->client == mgcp_client) {
if (pool_member->refcount == 0) {
LOGP(DLMGCP, LOGL_ERROR, "MGW %u has invalid refcount\n", pool_member->nr);
return;
}
pool_member->refcount--;
}
}
}

View File

@ -24,17 +24,36 @@
#include <stdlib.h>
#include <talloc.h>
#include <osmocom/vty/vty.h>
#include <osmocom/vty/command.h>
#include <osmocom/vty/misc.h>
#include <osmocom/core/utils.h>
#include <osmocom/mgcp_client/mgcp_client.h>
#include <osmocom/mgcp_client/mgcp_client_internal.h>
#include <osmocom/mgcp_client/mgcp_client_pool_internal.h>
#define MGW_STR MGCP_CLIENT_MGW_STR
void *global_mgcp_client_ctx = NULL;
struct mgcp_client_conf *global_mgcp_client_conf = NULL;
/* Only common (non-pooled) VTY connands will use this talloc context. All
* pooled VTY commands will use the pool (global_mgcp_client_pool) as
* talloc context. */
static void *global_mgcp_client_ctx = NULL;
/* MGCP Client configuration used with mgcp_client_vty_init(). (This pointer
* points to user provided memory, so it cannot be used as talloc context.) */
static struct mgcp_client_conf *global_mgcp_client_conf = NULL;
/* Pointer to the MGCP pool that is managed by mgcp_client_pool_vty_init() */
static struct mgcp_client_pool *global_mgcp_client_pool = NULL;
struct mgcp_client_conf *get_mgcp_client_config(struct vty *vty)
{
if (global_mgcp_client_pool && vty->node == global_mgcp_client_pool->vty_node->node)
return vty->index;
else
return global_mgcp_client_conf;
}
DEFUN(cfg_mgw_local_ip, cfg_mgw_local_ip_cmd,
"mgw local-ip " VTY_IPV46_CMD,
@ -42,8 +61,10 @@ DEFUN(cfg_mgw_local_ip, cfg_mgw_local_ip_cmd,
"local bind IPv4 address\n"
"local bind IPv6 address\n")
{
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
osmo_talloc_replace_string(global_mgcp_client_ctx,
(char**)&global_mgcp_client_conf->local_addr,
(char **)&conf->local_addr,
argv[0]);
return CMD_SUCCESS;
}
@ -57,7 +78,9 @@ DEFUN(cfg_mgw_local_port, cfg_mgw_local_port_cmd,
MGW_STR "local port to connect to MGW from\n"
"local bind port\n")
{
global_mgcp_client_conf->local_port = atoi(argv[0]);
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
conf->local_port = atoi(argv[0]);
return CMD_SUCCESS;
}
ALIAS_DEPRECATED(cfg_mgw_local_port, cfg_mgcpgw_local_port_cmd,
@ -71,9 +94,10 @@ DEFUN(cfg_mgw_remote_ip, cfg_mgw_remote_ip_cmd,
"remote IPv4 address\n"
"remote IPv6 address\n")
{
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
osmo_talloc_replace_string(global_mgcp_client_ctx,
(char**)&global_mgcp_client_conf->remote_addr,
argv[0]);
(char **)&conf->remote_addr, argv[0]);
return CMD_SUCCESS;
}
ALIAS_DEPRECATED(cfg_mgw_remote_ip, cfg_mgcpgw_remote_ip_cmd,
@ -86,7 +110,9 @@ DEFUN(cfg_mgw_remote_port, cfg_mgw_remote_port_cmd,
MGW_STR "remote port to reach the MGW at\n"
"remote port\n")
{
global_mgcp_client_conf->remote_port = atoi(argv[0]);
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
conf->remote_port = atoi(argv[0]);
return CMD_SUCCESS;
}
ALIAS_DEPRECATED(cfg_mgw_remote_port, cfg_mgcpgw_remote_port_cmd,
@ -136,11 +162,12 @@ DEFUN(cfg_mgw_endpoint_domain_name,
MGW_STR "Set the domain name to send in MGCP messages, e.g. the part 'foo' in 'rtpbridge/*@foo'.\n"
"Domain name, should be alphanumeric.\n")
{
if (osmo_strlcpy(global_mgcp_client_conf->endpoint_domain_name, argv[0],
sizeof(global_mgcp_client_conf->endpoint_domain_name))
>= sizeof(global_mgcp_client_conf->endpoint_domain_name)) {
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
if (osmo_strlcpy(conf->endpoint_domain_name, argv[0], sizeof(conf->endpoint_domain_name))
>= sizeof(conf->endpoint_domain_name)) {
vty_out(vty, "%% Error: 'mgw endpoint-domain' name too long, max length is %zu: '%s'%s",
sizeof(global_mgcp_client_conf->endpoint_domain_name) - 1, argv[0], VTY_NEWLINE);
sizeof(conf->endpoint_domain_name) - 1, argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
return CMD_SUCCESS;
@ -155,9 +182,10 @@ DEFUN(cfg_mgw_reset_ep_name,
{
int rc;
struct reset_ep *reset_ep;
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
/* stop when the address is already in the list */
llist_for_each_entry(reset_ep, &global_mgcp_client_conf->reset_epnames, list) {
llist_for_each_entry(reset_ep, &conf->reset_epnames, list) {
if (strcmp(argv[0], reset_ep->name) == 0) {
vty_out(vty, "%% duplicate endpoint name configured ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
@ -182,7 +210,7 @@ DEFUN(cfg_mgw_reset_ep_name,
return CMD_WARNING;
}
llist_add_tail(&reset_ep->list, &global_mgcp_client_conf->reset_epnames);
llist_add_tail(&reset_ep->list, &conf->reset_epnames);
return CMD_SUCCESS;
}
@ -194,8 +222,9 @@ DEFUN(cfg_mgw_no_reset_ep_name,
"Endpoint name, e.g. 'rtpbridge/*' or 'ds/e1-0/s-3/su16-4'.\n")
{
struct reset_ep *reset_ep;
struct mgcp_client_conf *conf = get_mgcp_client_config(vty);
llist_for_each_entry(reset_ep, &global_mgcp_client_conf->reset_epnames, list) {
llist_for_each_entry(reset_ep, &conf->reset_epnames, list) {
if (strcmp(argv[0], reset_ep->name) == 0) {
llist_del(&reset_ep->list);
talloc_free(reset_ep);
@ -207,44 +236,48 @@ DEFUN(cfg_mgw_no_reset_ep_name,
return CMD_WARNING;
}
int mgcp_client_config_write(struct vty *vty, const char *indent)
static int config_write(struct vty *vty, const char *indent, struct mgcp_client_conf *conf)
{
const char *addr;
int port;
struct reset_ep *reset_ep;
addr = global_mgcp_client_conf->local_addr;
addr = conf->local_addr;
if (addr)
vty_out(vty, "%smgw local-ip %s%s", indent, addr,
VTY_NEWLINE);
port = global_mgcp_client_conf->local_port;
port = conf->local_port;
if (port >= 0)
vty_out(vty, "%smgw local-port %u%s", indent,
(uint16_t)port, VTY_NEWLINE);
addr = global_mgcp_client_conf->remote_addr;
addr = conf->remote_addr;
if (addr)
vty_out(vty, "%smgw remote-ip %s%s", indent, addr,
VTY_NEWLINE);
port = global_mgcp_client_conf->remote_port;
port = conf->remote_port;
if (port >= 0)
vty_out(vty, "%smgw remote-port %u%s", indent,
(uint16_t)port, VTY_NEWLINE);
if (global_mgcp_client_conf->endpoint_domain_name[0])
if (conf->endpoint_domain_name[0])
vty_out(vty, "%smgw endpoint-domain %s%s", indent,
global_mgcp_client_conf->endpoint_domain_name, VTY_NEWLINE);
conf->endpoint_domain_name, VTY_NEWLINE);
llist_for_each_entry(reset_ep, &global_mgcp_client_conf->reset_epnames, list)
llist_for_each_entry(reset_ep, &conf->reset_epnames, list)
vty_out(vty, "%smgw reset-endpoint %s%s", indent, reset_ep->name, VTY_NEWLINE);
return CMD_SUCCESS;
}
void mgcp_client_vty_init(void *talloc_ctx, int node, struct mgcp_client_conf *conf)
int mgcp_client_config_write(struct vty *vty, const char *indent)
{
return config_write(vty, indent, global_mgcp_client_conf);
}
static void vty_init_common(void *talloc_ctx, int node)
{
global_mgcp_client_ctx = talloc_ctx;
global_mgcp_client_conf = conf;
install_lib_element(node, &cfg_mgw_local_ip_cmd);
install_lib_element(node, &cfg_mgw_local_port_cmd);
@ -256,6 +289,13 @@ void mgcp_client_vty_init(void *talloc_ctx, int node, struct mgcp_client_conf *c
install_lib_element(node, &cfg_mgw_reset_ep_name_cmd);
install_lib_element(node, &cfg_mgw_no_reset_ep_name_cmd);
osmo_fsm_vty_add_cmds();
}
void mgcp_client_vty_init(void *talloc_ctx, int node, struct mgcp_client_conf *conf)
{
global_mgcp_client_conf = conf;
/* deprecated 'mgcpgw' commands */
install_lib_element(node, &cfg_mgcpgw_local_ip_cmd);
install_lib_element(node, &cfg_mgcpgw_local_port_cmd);
@ -264,5 +304,233 @@ void mgcp_client_vty_init(void *talloc_ctx, int node, struct mgcp_client_conf *c
install_lib_element(node, &cfg_mgcpgw_endpoint_range_cmd);
install_lib_element(node, &cfg_mgcpgw_rtp_bts_base_port_cmd);
osmo_fsm_vty_add_cmds();
vty_init_common(talloc_ctx, node);
}
static int config_write_pool(struct vty *vty)
{
struct mgcp_client_pool *pool = global_mgcp_client_pool;
struct mgcp_client_pool_member *pool_member;
unsigned int indent_buf_len = strlen(pool->vty_indent) + 1 + 1;
char *indent = talloc_zero_size(vty, indent_buf_len);
snprintf(indent, indent_buf_len, "%s ", pool->vty_indent);
llist_for_each_entry(pool_member, &pool->pool, list) {
vty_out(vty, "%smgw %u%s", pool->vty_indent, pool_member->nr, VTY_NEWLINE);
config_write(vty, indent, &pool_member->conf);
}
talloc_free(indent);
return CMD_SUCCESS;
}
/* Lookup the selected MGCP client config by its reference number */
static struct mgcp_client_pool_member *pool_member_by_nr(unsigned int nr)
{
struct mgcp_client_pool_member *pool_member = NULL;
struct mgcp_client_pool_member *pool_member_tmp;
llist_for_each_entry(pool_member_tmp, &global_mgcp_client_pool->pool, list) {
if (pool_member_tmp->nr == nr) {
pool_member = pool_member_tmp;
break;
}
}
return pool_member;
}
DEFUN_ATTR(cfg_mgw,
cfg_mgw_cmd, "mgw <0-255>", "Select a MGCP client config to setup\n" "reference number", CMD_ATTR_IMMEDIATE)
{
int nr = atoi(argv[0]);
struct mgcp_client_pool_member *pool_member;
pool_member = pool_member_by_nr(nr);
if (!pool_member) {
pool_member = talloc_zero(global_mgcp_client_pool, struct mgcp_client_pool_member);
OSMO_ASSERT(pool_member);
mgcp_client_conf_init(&pool_member->conf);
pool_member->nr = nr;
llist_add_tail(&pool_member->list, &global_mgcp_client_pool->pool);
}
vty->index = &pool_member->conf;
vty->index_sub = NULL;
vty->node = global_mgcp_client_pool->vty_node->node;
return CMD_SUCCESS;
}
DEFUN_ATTR(cfg_no_mgw,
cfg_no_mgw_cmd,
"no mgw <0-255>", "Select a MGCP client config to remove\n" "reference number", CMD_ATTR_IMMEDIATE)
{
int nr = atoi(argv[0]);
struct mgcp_client_pool_member *pool_member;
pool_member = pool_member_by_nr(nr);
if (!pool_member) {
vty_out(vty, "%% no such MGCP client configured ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
/* Make sure that there are no ongoing calls */
if (pool_member->refcount > 0) {
vty_out(vty, "%% MGCP client (MGW %u) is still serving ongoing calls -- can't remove it now!%s",
pool_member->nr, VTY_NEWLINE);
return CMD_WARNING;
}
llist_del(&pool_member->list);
if (pool_member->client) {
mgcp_client_disconnect(pool_member->client);
talloc_free(pool_member->client);
}
talloc_free(pool_member);
return CMD_SUCCESS;
}
DEFUN_ATTR(mgw_reconnect, mgw_reconnect_cmd,
"mgw <0-255> reconnect",
MGW_STR "reconfigure and reconnect MGCP client\n", CMD_ATTR_IMMEDIATE)
{
int nr = atoi(argv[0]);
struct mgcp_client_pool_member *pool_member = NULL;
pool_member = pool_member_by_nr(nr);
if (!pool_member) {
vty_out(vty, "%% no such MGCP client configured ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
/* Make sure that there are no ongoing calls */
if (pool_member->refcount > 0) {
vty_out(vty, "%% MGCP client (MGW %u) is still serving ongoing calls -- can't reconnect it now!%s",
pool_member->nr, VTY_NEWLINE);
return CMD_WARNING;
}
/* Get rid of a possibly existing old MGCP client instance first */
if (pool_member->client) {
mgcp_client_disconnect(pool_member->client);
talloc_free(pool_member->client);
}
/* Create a new MGCP client instance with the current config */
pool_member->client = mgcp_client_init(pool_member, &pool_member->conf);
if (!pool_member->client) {
LOGP(DLMGCP, LOGL_ERROR, "(manual) MGW %u initalization failed\n", pool_member->nr);
vty_out(vty, "%% MGCP client initalization failed ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
/* Set backpointer so that we can detect later that this MGCP client is managed by this pool. */
pool_member->client->pool = global_mgcp_client_pool;
/* Connect client */
if (mgcp_client_connect(pool_member->client)) {
LOGP(DLMGCP, LOGL_ERROR, "(manual) MGW %u connect failed at (%s:%u)\n",
pool_member->nr, pool_member->conf.remote_addr, pool_member->conf.remote_port);
talloc_free(pool_member->client);
pool_member->client = NULL;
vty_out(vty, "%% MGCP client initalization failed ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
return CMD_SUCCESS;
}
DEFUN_ATTR(mgw_block, mgw_block_cmd,
"mgw <0-255> block",
MGW_STR "block MGCP client so that it won't be used for new calls\n", CMD_ATTR_IMMEDIATE)
{
int nr = atoi(argv[0]);
struct mgcp_client_pool_member *pool_member = NULL;
pool_member = pool_member_by_nr(nr);
if (!pool_member) {
vty_out(vty, "%% no such MGCP client configured ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
pool_member->blocked = true;
return CMD_SUCCESS;
}
DEFUN_ATTR(mgw_unblock, mgw_unblock_cmd,
"mgw <0-255> unblock",
MGW_STR "unblock MGCP client so that it will be available for new calls\n", CMD_ATTR_IMMEDIATE)
{
int nr = atoi(argv[0]);
struct mgcp_client_pool_member *pool_member = NULL;
pool_member = pool_member_by_nr(nr);
if (!pool_member) {
vty_out(vty, "%% no such MGCP client configured ('%s')%s", argv[0], VTY_NEWLINE);
return CMD_WARNING;
}
pool_member->blocked = false;
return CMD_SUCCESS;
}
DEFUN(mgw_show, mgw_snow_cmd, "show mgw-pool", SHOW_STR "Display information about the MGW-Pool\n")
{
vty_out(vty, "%% MGW-Pool:%s", VTY_NEWLINE);
struct mgcp_client_pool_member *pool_member;
if (llist_empty(&global_mgcp_client_pool->pool) && global_mgcp_client_pool->mgcp_client_single) {
vty_out(vty, "%% (pool is empty, single MGCP client will be used)%s", VTY_NEWLINE);
return CMD_SUCCESS;
} else if (llist_empty(&global_mgcp_client_pool->pool)) {
vty_out(vty, "%% (pool is empty)%s", VTY_NEWLINE);
return CMD_SUCCESS;
}
llist_for_each_entry(pool_member, &global_mgcp_client_pool->pool, list) {
vty_out(vty, "%% MGW %u%s", pool_member->nr, VTY_NEWLINE);
vty_out(vty, "%% mgcp-client: %s%s", pool_member->client ? "connected" : "disconnected",
VTY_NEWLINE);
vty_out(vty, "%% service: %s%s", pool_member->blocked ? "blocked" : "unblocked", VTY_NEWLINE);
vty_out(vty, "%% ongoing calls: %u%s", pool_member->refcount, VTY_NEWLINE);
}
return CMD_SUCCESS;
}
/*! Set up MGCP client VTY (pooled)
* (called once at startup by the application process).
* \param[in] parent_node identifier of the parent node on which the mgw node appears.
* \param[in] mgw_node identifier that should be used with the newly installed MGW node.
* \param[in] indent indentation string to match the indentation in the VTY config
* \param[in] pool user provided memory to store the configured MGCP client (MGW) pool. */
void mgcp_client_pool_vty_init(int parent_node, int mgw_node, const char *indent, struct mgcp_client_pool *pool)
{
/* Never allow this function to be called twice on the same pool */
OSMO_ASSERT(!pool->vty_indent);
OSMO_ASSERT(!pool->vty_node);
pool->vty_indent = talloc_strdup(pool, indent);
OSMO_ASSERT(pool->vty_indent);
pool->vty_node = talloc_zero(pool, struct cmd_node);
OSMO_ASSERT(pool->vty_node);
pool->vty_node->node = mgw_node;
pool->vty_node->vtysh = 1;
pool->vty_node->prompt = talloc_strdup(pool->vty_node, "%s(config-mgw)# ");
install_lib_element(parent_node, &cfg_mgw_cmd);
install_lib_element(parent_node, &cfg_no_mgw_cmd);
install_node(pool->vty_node, config_write_pool);
vty_init_common(pool, mgw_node);
install_lib_element(ENABLE_NODE, &mgw_reconnect_cmd);
install_lib_element(ENABLE_NODE, &mgw_block_cmd);
install_lib_element(ENABLE_NODE, &mgw_unblock_cmd);
install_lib_element_ve(&mgw_snow_cmd);
global_mgcp_client_pool = pool;
}