2017-03-28 12:30:28 +00:00
|
|
|
# osmo_gsm_tester: specifics for running an osmo-nitb
|
|
|
|
#
|
|
|
|
# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
|
|
|
|
#
|
|
|
|
# Author: Neels Hofmeyr <neels@hofmeyr.de>
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
2017-06-03 07:51:45 +00:00
|
|
|
# it under the terms of the GNU General Public License as
|
2017-03-28 12:30:28 +00:00
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2017-06-03 07:51:45 +00:00
|
|
|
# GNU General Public License for more details.
|
2017-03-28 12:30:28 +00:00
|
|
|
#
|
2017-06-03 07:51:45 +00:00
|
|
|
# You should have received a copy of the GNU General Public License
|
2017-03-28 12:30:28 +00:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
import os
|
|
|
|
import re
|
2017-05-22 18:16:03 +00:00
|
|
|
import pprint
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-05-30 13:33:57 +00:00
|
|
|
from . import log, util, config, template, process, osmo_ctrl, pcap_recorder, smsc
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
class OsmoNitb(log.Origin):
|
|
|
|
suite_run = None
|
2017-05-18 16:35:32 +00:00
|
|
|
ip_address = None
|
2017-03-28 12:30:28 +00:00
|
|
|
run_dir = None
|
|
|
|
config_file = None
|
|
|
|
process = None
|
|
|
|
bts = None
|
2017-05-30 13:33:57 +00:00
|
|
|
smsc = None
|
2017-08-22 17:04:06 +00:00
|
|
|
encryption = None
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-05-18 16:35:32 +00:00
|
|
|
def __init__(self, suite_run, ip_address):
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
super().__init__(log.C_RUN, 'osmo-nitb_%s' % ip_address.get('addr'))
|
2017-03-28 12:30:28 +00:00
|
|
|
self.suite_run = suite_run
|
2017-05-18 16:35:32 +00:00
|
|
|
self.ip_address = ip_address
|
2017-03-28 12:30:28 +00:00
|
|
|
self.bts = []
|
2017-05-30 13:33:57 +00:00
|
|
|
self.smsc = smsc.Smsc((ip_address.get('addr'), 2775))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
self.log('Starting osmo-nitb')
|
2017-06-14 11:27:08 +00:00
|
|
|
self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
|
2017-03-28 12:30:28 +00:00
|
|
|
self.configure()
|
2017-04-08 16:52:39 +00:00
|
|
|
inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-nitb')))
|
|
|
|
binary = inst.child('bin', 'osmo-nitb')
|
2017-03-28 12:30:28 +00:00
|
|
|
if not os.path.isfile(binary):
|
|
|
|
raise RuntimeError('Binary missing: %r' % binary)
|
2017-04-08 16:52:39 +00:00
|
|
|
lib = inst.child('lib')
|
|
|
|
if not os.path.isdir(lib):
|
|
|
|
raise RuntimeError('No lib/ in %r' % inst)
|
2017-05-08 14:21:38 +00:00
|
|
|
|
2017-05-08 15:07:28 +00:00
|
|
|
iface = util.ip_to_iface(self.addr())
|
2017-05-15 12:03:07 +00:00
|
|
|
pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), iface,
|
2017-05-12 14:24:21 +00:00
|
|
|
'host %s and port not 22' % self.addr())
|
2017-05-08 15:07:28 +00:00
|
|
|
|
2017-05-08 14:21:38 +00:00
|
|
|
env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
|
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
self.dbg(run_dir=self.run_dir, binary=binary, env=env)
|
|
|
|
self.process = process.Process(self.name(), self.run_dir,
|
|
|
|
(binary, '-c',
|
|
|
|
os.path.abspath(self.config_file)),
|
|
|
|
env=env)
|
|
|
|
self.suite_run.remember_to_stop(self.process)
|
|
|
|
self.process.launch()
|
|
|
|
|
|
|
|
def configure(self):
|
|
|
|
self.config_file = self.run_dir.new_file('osmo-nitb.cfg')
|
|
|
|
self.dbg(config_file=self.config_file)
|
|
|
|
|
|
|
|
values = dict(nitb=config.get_defaults('nitb'))
|
|
|
|
config.overlay(values, self.suite_run.config())
|
2017-05-22 18:04:05 +00:00
|
|
|
config.overlay(values, dict(nitb=dict(ip_address=self.ip_address)))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
bts_list = []
|
|
|
|
for bts in self.bts:
|
2017-05-18 17:31:44 +00:00
|
|
|
bts_list.append(bts.conf_for_bsc())
|
2017-03-28 12:30:28 +00:00
|
|
|
config.overlay(values, dict(nitb=dict(net=dict(bts_list=bts_list))))
|
2017-05-30 13:33:57 +00:00
|
|
|
config.overlay(values, self.smsc.get_config())
|
2017-08-22 17:04:06 +00:00
|
|
|
|
|
|
|
# runtime parameters:
|
|
|
|
if self.encryption is not None:
|
|
|
|
config.overlay(values, dict(nitb=dict(net=dict(encryption=self.encryption))))
|
|
|
|
|
ofono_client: Implement network registration during connect()
A new mcc_mnc parameter is now optionally passed to connect() in order
to manually register to a specific network with a given MCC+MNC pair.
If no parameter is passed (or None), then the modem will be instructed
to attempt an automatic registration with any available network which
permits it.
We get the MCC+MNC parameter from the MSC/NITB and we pass it to the
modem object at connect time as shown in the modified tests. Two new
simple tests to check network registration is working are added in this
commit.
Ofono modems seem to be automatically registering at some point after
they are set Online=true, and we were actually using that 'feature'
before this patch. Thus, it is possible that a modem quickly becomes
registered, and we then check so before starting the scan+registration
process, which can take a few seconds.
The scanning method can take a few seconds to complete. To avoid
blocking in the dbus ofono Scan() method, this commit adds some code to
make use of glib/gdbus async methods, which are not yet supported
directly by pydbus. This way, we can continue polling while waiting for
the scan process to complete and we can register several modems in
parallel. When scan completes, a callback is run which attempts to
register. If no MCC+MNC was passed, as we just finished scanning the
modem should have enough fresh operator information to take good and
quick decisions on where to connect. If we have an MCC+MNC, then we check
the operator list received by Scan() method. If operator with desired
MCC+MNC is there, we register with it. If it's not there, we start
scanning() again asynchronously hoping the operator will show up in next
scan.
As scanning() and registration is done in the background, tests are
expected to call connect(), and then later on wait for the modem to
register by waiting/polling the method "modem.is_connected()". Tests
first check for the modem being connected and after with MSC
subscriber_attached(). The order is intentional because the later has to
poll through network and adds unneeded garbage to the pcap files bein
recorded.
Change-Id: I8d9eb47eac1044550d3885adb55105c304b0c15c
2017-05-29 12:25:22 +00:00
|
|
|
self.config = values
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-05-22 18:16:03 +00:00
|
|
|
self.dbg('NITB CONFIG:\n' + pprint.pformat(values))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
with open(self.config_file, 'w') as f:
|
|
|
|
r = template.render('osmo-nitb.cfg', values)
|
|
|
|
self.dbg(r)
|
|
|
|
f.write(r)
|
|
|
|
|
|
|
|
def addr(self):
|
2017-05-18 16:35:32 +00:00
|
|
|
return self.ip_address.get('addr')
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-04-09 12:18:34 +00:00
|
|
|
def bts_add(self, bts):
|
2017-03-28 12:30:28 +00:00
|
|
|
self.bts.append(bts)
|
2017-05-18 17:31:44 +00:00
|
|
|
bts.set_bsc(self)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-08-22 17:04:06 +00:00
|
|
|
def set_encryption(self, val):
|
|
|
|
self.encryption = val
|
|
|
|
|
ofono_client: Implement network registration during connect()
A new mcc_mnc parameter is now optionally passed to connect() in order
to manually register to a specific network with a given MCC+MNC pair.
If no parameter is passed (or None), then the modem will be instructed
to attempt an automatic registration with any available network which
permits it.
We get the MCC+MNC parameter from the MSC/NITB and we pass it to the
modem object at connect time as shown in the modified tests. Two new
simple tests to check network registration is working are added in this
commit.
Ofono modems seem to be automatically registering at some point after
they are set Online=true, and we were actually using that 'feature'
before this patch. Thus, it is possible that a modem quickly becomes
registered, and we then check so before starting the scan+registration
process, which can take a few seconds.
The scanning method can take a few seconds to complete. To avoid
blocking in the dbus ofono Scan() method, this commit adds some code to
make use of glib/gdbus async methods, which are not yet supported
directly by pydbus. This way, we can continue polling while waiting for
the scan process to complete and we can register several modems in
parallel. When scan completes, a callback is run which attempts to
register. If no MCC+MNC was passed, as we just finished scanning the
modem should have enough fresh operator information to take good and
quick decisions on where to connect. If we have an MCC+MNC, then we check
the operator list received by Scan() method. If operator with desired
MCC+MNC is there, we register with it. If it's not there, we start
scanning() again asynchronously hoping the operator will show up in next
scan.
As scanning() and registration is done in the background, tests are
expected to call connect(), and then later on wait for the modem to
register by waiting/polling the method "modem.is_connected()". Tests
first check for the modem being connected and after with MSC
subscriber_attached(). The order is intentional because the later has to
poll through network and adds unneeded garbage to the pcap files bein
recorded.
Change-Id: I8d9eb47eac1044550d3885adb55105c304b0c15c
2017-05-29 12:25:22 +00:00
|
|
|
def mcc(self):
|
|
|
|
return self.config['nitb']['net']['mcc']
|
|
|
|
|
|
|
|
def mnc(self):
|
|
|
|
return self.config['nitb']['net']['mnc']
|
|
|
|
|
|
|
|
def mcc_mnc(self):
|
|
|
|
return (self.mcc(), self.mnc())
|
|
|
|
|
2017-04-09 12:18:34 +00:00
|
|
|
def subscriber_add(self, modem, msisdn=None):
|
2017-03-28 12:30:28 +00:00
|
|
|
if msisdn is None:
|
|
|
|
msisdn = self.suite_run.resources_pool.next_msisdn(modem)
|
|
|
|
modem.set_msisdn(msisdn)
|
|
|
|
self.log('Add subscriber', msisdn=msisdn, imsi=modem.imsi())
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
OsmoNitbCtrl(self).subscriber_add(modem.imsi(), msisdn, modem.ki())
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def subscriber_attached(self, *modems):
|
2017-04-09 16:46:48 +00:00
|
|
|
return self.imsi_attached(*[m.imsi() for m in modems])
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-04-09 12:18:34 +00:00
|
|
|
def imsi_attached(self, *imsis):
|
|
|
|
attached = self.imsi_list_attached()
|
2017-04-09 16:46:48 +00:00
|
|
|
self.dbg('attached:', attached)
|
|
|
|
return all([(imsi in attached) for imsi in imsis])
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-04-09 12:18:34 +00:00
|
|
|
def imsi_list_attached(self):
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
return OsmoNitbCtrl(self).subscriber_list_active()
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def running(self):
|
|
|
|
return not self.process.terminated()
|
|
|
|
|
|
|
|
|
|
|
|
class OsmoNitbCtrl(log.Origin):
|
|
|
|
PORT = 4249
|
|
|
|
SUBSCR_MODIFY_VAR = 'subscriber-modify-v1'
|
|
|
|
SUBSCR_MODIFY_REPLY_RE = re.compile("SET_REPLY (\d+) %s OK" % SUBSCR_MODIFY_VAR)
|
|
|
|
SUBSCR_LIST_ACTIVE_VAR = 'subscriber-list-active-v1'
|
|
|
|
|
|
|
|
def __init__(self, nitb):
|
|
|
|
self.nitb = nitb
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
super().__init__(log.C_BUS, 'CTRL(%s:%d)' % (self.nitb.addr(), OsmoNitbCtrl.PORT))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def ctrl(self):
|
|
|
|
return osmo_ctrl.OsmoCtrl(self.nitb.addr(), OsmoNitbCtrl.PORT)
|
|
|
|
|
2017-04-09 12:18:34 +00:00
|
|
|
def subscriber_add(self, imsi, msisdn, ki=None, algo=None):
|
2017-03-28 12:30:28 +00:00
|
|
|
created = False
|
|
|
|
if ki and not algo:
|
|
|
|
algo = 'comp128v1'
|
|
|
|
|
|
|
|
if algo:
|
|
|
|
value = '%s,%s,%s,%s' % (imsi,msisdn,algo,ki)
|
|
|
|
else:
|
|
|
|
value = '%s,%s' % (imsi, msisdn)
|
|
|
|
|
2017-04-09 16:46:48 +00:00
|
|
|
with self.ctrl() as ctrl:
|
2017-03-28 12:30:28 +00:00
|
|
|
ctrl.do_set(OsmoNitbCtrl.SUBSCR_MODIFY_VAR, value)
|
|
|
|
data = ctrl.receive()
|
|
|
|
(answer, data) = ctrl.remove_ipa_ctrl_header(data)
|
|
|
|
answer_str = answer.decode('utf-8')
|
|
|
|
res = OsmoNitbCtrl.SUBSCR_MODIFY_REPLY_RE.match(answer_str)
|
|
|
|
if not res:
|
|
|
|
raise RuntimeError('Cannot create subscriber %r (answer=%r)' % (imsi, answer_str))
|
|
|
|
self.dbg('Created subscriber', imsi=imsi, msisdn=msisdn)
|
|
|
|
|
|
|
|
def subscriber_list_active(self):
|
|
|
|
aslist_str = ""
|
2017-04-09 16:46:48 +00:00
|
|
|
with self.ctrl() as ctrl:
|
|
|
|
ctrl.do_get(OsmoNitbCtrl.SUBSCR_LIST_ACTIVE_VAR)
|
2017-04-09 12:18:34 +00:00
|
|
|
# This is legacy code from the old osmo-gsm-tester.
|
|
|
|
# looks like this doesn't work for long data.
|
2017-04-09 16:46:48 +00:00
|
|
|
data = ctrl.receive()
|
2017-03-28 12:30:28 +00:00
|
|
|
while (len(data) > 0):
|
2017-04-09 16:46:48 +00:00
|
|
|
(answer, data) = ctrl.remove_ipa_ctrl_header(data)
|
|
|
|
answer_str = answer.decode('utf-8')
|
|
|
|
answer_str = answer_str.replace('\n', ' ')
|
|
|
|
aslist_str = answer_str
|
2017-03-28 12:30:28 +00:00
|
|
|
return aslist_str
|
|
|
|
|
|
|
|
# vim: expandtab tabstop=4 shiftwidth=4
|