2017-03-28 10:16:58 +00:00
|
|
|
# osmo_gsm_tester: manage resources
|
|
|
|
#
|
|
|
|
# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
|
|
|
|
#
|
|
|
|
# Author: Neels Hofmeyr <neels@hofmeyr.de>
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
2017-06-03 07:51:45 +00:00
|
|
|
# it under the terms of the GNU General Public License as
|
2017-03-28 10:16:58 +00:00
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2017-06-03 07:51:45 +00:00
|
|
|
# GNU General Public License for more details.
|
2017-03-28 10:16:58 +00:00
|
|
|
#
|
2017-06-03 07:51:45 +00:00
|
|
|
# You should have received a copy of the GNU General Public License
|
2017-03-28 10:16:58 +00:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
import os
|
2017-03-28 12:30:28 +00:00
|
|
|
import copy
|
|
|
|
import atexit
|
|
|
|
import pprint
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2020-05-04 16:58:53 +00:00
|
|
|
from . import log
|
|
|
|
from . import config
|
|
|
|
from . import util
|
|
|
|
from . import schema
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2020-05-04 16:58:53 +00:00
|
|
|
from .util import is_dict, is_list
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
HASH_KEY = '_hash'
|
|
|
|
RESERVED_KEY = '_reserved_by'
|
|
|
|
USED_KEY = '_used'
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
RESERVED_RESOURCES_FILE = 'reserved_resources.state'
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2017-05-18 16:35:32 +00:00
|
|
|
R_IP_ADDRESS = 'ip_address'
|
2020-02-11 16:41:13 +00:00
|
|
|
R_RUN_NODE = 'run_node'
|
2017-03-28 12:30:28 +00:00
|
|
|
R_BTS = 'bts'
|
|
|
|
R_ARFCN = 'arfcn'
|
|
|
|
R_MODEM = 'modem'
|
2018-05-17 14:59:58 +00:00
|
|
|
R_OSMOCON = 'osmocon_phone'
|
2020-02-11 16:45:26 +00:00
|
|
|
R_ENB = 'enb'
|
2017-09-10 14:33:10 +00:00
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
class ResourcesPool(log.Origin):
|
|
|
|
_remember_to_free = None
|
|
|
|
_registered_exit_handler = False
|
|
|
|
|
|
|
|
def __init__(self):
|
2020-03-11 19:11:08 +00:00
|
|
|
self.config_path = config.get_config_file(config.RESOURCES_CONF)
|
2017-03-28 12:30:28 +00:00
|
|
|
self.state_dir = config.get_state_dir()
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
super().__init__(log.C_CNF, conf=self.config_path, state=self.state_dir.path)
|
2017-03-28 12:30:28 +00:00
|
|
|
self.read_conf()
|
|
|
|
|
|
|
|
def read_conf(self):
|
2020-05-04 10:05:05 +00:00
|
|
|
self.all_resources = Resources(config.read(self.config_path, schema.get_resources_schema()))
|
2017-03-28 12:30:28 +00:00
|
|
|
self.all_resources.set_hashes()
|
|
|
|
|
2018-08-21 12:58:29 +00:00
|
|
|
def reserve(self, origin, want, modifiers):
|
2017-03-28 12:30:28 +00:00
|
|
|
'''
|
|
|
|
attempt to reserve the resources specified in the dict 'want' for
|
|
|
|
'origin'. Obtain a lock on the resources lock dir, verify that all
|
|
|
|
wanted resources are available, and if yes mark them as reserved.
|
|
|
|
|
|
|
|
On success, return a reservation object which can be used to release
|
|
|
|
the reservation. The reservation will be freed automatically on program
|
|
|
|
exit, if not yet done manually.
|
|
|
|
|
|
|
|
'origin' should be an Origin() instance.
|
|
|
|
|
2018-08-21 12:58:29 +00:00
|
|
|
'want' is a dict matching RESOURCES_SCHEMA, used to specify what to
|
|
|
|
reserve.
|
|
|
|
|
|
|
|
'modifiers' is a dict matching RESOURCES_SCHEMA, it is overlaid on top
|
|
|
|
of 'want'.
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-09-10 14:33:10 +00:00
|
|
|
If an entry has no attribute set, any of the resources may be
|
2017-03-28 12:30:28 +00:00
|
|
|
reserved without further limitations.
|
|
|
|
|
|
|
|
ResourcesPool may also be selected with narrowed down constraints.
|
2017-05-18 16:35:32 +00:00
|
|
|
This would reserve one IP address, two modems, one BTS of type
|
2017-05-18 17:22:12 +00:00
|
|
|
sysmo and one of type trx, plus 2 ARFCNs in the 1800 band:
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
{
|
2017-05-18 16:35:32 +00:00
|
|
|
'ip_address': [ {} ],
|
2017-05-18 17:22:12 +00:00
|
|
|
'bts': [ { 'type': 'sysmo' }, { 'type': 'trx' } ],
|
2017-09-10 14:33:10 +00:00
|
|
|
'arfcn': [ { 'band': 'GSM-1800' }, { 'band': 'GSM-1800' } ],
|
|
|
|
'modem': [ {}, {} ],
|
2017-03-28 12:30:28 +00:00
|
|
|
}
|
|
|
|
'''
|
2020-05-04 10:05:05 +00:00
|
|
|
schema.validate(want, schema.get_resources_schema())
|
|
|
|
schema.validate(modifiers, schema.get_resources_schema())
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
origin_id = origin.origin_id()
|
|
|
|
|
|
|
|
with self.state_dir.lock(origin_id):
|
|
|
|
rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
|
|
|
|
reserved = Resources(config.read(rrfile_path, if_missing_return={}))
|
2017-05-06 23:16:07 +00:00
|
|
|
to_be_reserved = self.all_resources.without(reserved).find(origin, want)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
to_be_reserved.mark_reserved_by(origin_id)
|
|
|
|
|
|
|
|
reserved.add(to_be_reserved)
|
|
|
|
config.write(rrfile_path, reserved)
|
|
|
|
|
|
|
|
self.remember_to_free(to_be_reserved)
|
2018-08-21 12:58:29 +00:00
|
|
|
return ReservedResources(self, origin, to_be_reserved, modifiers)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def free(self, origin, to_be_freed):
|
fix and refactor logging: drop 'with', simplify
With the recent fix of the junit report related issues, another issue arose:
the 'with log.Origin' was changed to disallow __enter__ing an object twice to
fix problems, now still code would fail because it tries to do 'with' on the
same object twice. The only reason is to ensure that logging is associated with
a given object. Instead of complicating even more, implement differently.
Refactor logging to simplify use: drop the 'with Origin' style completely, and
instead use the python stack to determine which objects are created by which,
and which object to associate a log statement with.
The new way: we rely on the convention that each class instance has a local
'self' referencing the object instance. If we need to find an origin as a new
object's parent, or to associate a log message with, we traverse each stack
frame, fetching the first local 'self' object that is a log.Origin class
instance.
How to use:
Simply call log.log() anywhere, and it finds an Origin object to log for, from
the stack. Alternatively call self.log() for any Origin() object to skip the
lookup.
Create classes as child class of log.Origin and make sure to call
super().__init__(category, name). This constructor will magically find a parent
Origin on the stack.
When an exception happens, we first escalate the exception up through call
scopes to where ever it is handled by log.log_exn(). This then finds an Origin
object in the traceback's stack frames, no need to nest in 'with' scopes.
Hence the 'with log.Origin' now "happens implicitly", we can write pure natural
python code, no more hassles with scope ordering.
Furthermore, any frame can place additional logging information in a frame by
calling log.ctx(). This is automatically inserted in the ancestry associated
with a log statement / exception.
Change-Id: I5f9b53150f2bb6fa9d63ce27f0806f0ca6a45e90
2017-06-09 23:18:27 +00:00
|
|
|
log.ctx(origin)
|
2017-03-28 12:30:28 +00:00
|
|
|
with self.state_dir.lock(origin.origin_id()):
|
|
|
|
rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
|
|
|
|
reserved = Resources(config.read(rrfile_path, if_missing_return={}))
|
|
|
|
reserved.drop(to_be_freed)
|
|
|
|
config.write(rrfile_path, reserved)
|
|
|
|
self.forget_freed(to_be_freed)
|
|
|
|
|
|
|
|
def register_exit_handler(self):
|
|
|
|
if self._registered_exit_handler:
|
|
|
|
return
|
|
|
|
atexit.register(self.clean_up_registered_resources)
|
|
|
|
self._registered_exit_handler = True
|
|
|
|
|
|
|
|
def unregister_exit_handler(self):
|
|
|
|
if not self._registered_exit_handler:
|
|
|
|
return
|
|
|
|
atexit.unregister(self.clean_up_registered_resources)
|
|
|
|
self._registered_exit_handler = False
|
|
|
|
|
|
|
|
def clean_up_registered_resources(self):
|
|
|
|
if not self._remember_to_free:
|
|
|
|
return
|
|
|
|
self.free(log.Origin('atexit.clean_up_registered_resources()'),
|
|
|
|
self._remember_to_free)
|
|
|
|
|
|
|
|
def remember_to_free(self, to_be_reserved):
|
|
|
|
self.register_exit_handler()
|
|
|
|
if not self._remember_to_free:
|
|
|
|
self._remember_to_free = Resources()
|
|
|
|
self._remember_to_free.add(to_be_reserved)
|
|
|
|
|
|
|
|
def forget_freed(self, freed):
|
|
|
|
if freed is self._remember_to_free:
|
|
|
|
self._remember_to_free.clear()
|
|
|
|
else:
|
|
|
|
self._remember_to_free.drop(freed)
|
|
|
|
if not self._remember_to_free:
|
|
|
|
self.unregister_exit_handler()
|
|
|
|
|
2017-11-06 17:09:09 +00:00
|
|
|
def next_persistent_value(self, token, first_val, validate_func, inc_func, origin):
|
2017-03-28 12:30:28 +00:00
|
|
|
origin_id = origin.origin_id()
|
|
|
|
|
|
|
|
with self.state_dir.lock(origin_id):
|
2017-11-06 17:09:09 +00:00
|
|
|
token_path = self.state_dir.child('last_used_%s.state' % token)
|
|
|
|
log.ctx(token_path)
|
|
|
|
last_value = first_val
|
|
|
|
if os.path.exists(token_path):
|
|
|
|
if not os.path.isfile(token_path):
|
|
|
|
raise RuntimeError('path should be a file but is not: %r' % token_path)
|
|
|
|
with open(token_path, 'r') as f:
|
|
|
|
last_value = f.read().strip()
|
|
|
|
validate_func(last_value)
|
|
|
|
|
|
|
|
next_value = inc_func(last_value)
|
|
|
|
with open(token_path, 'w') as f:
|
|
|
|
f.write(next_value)
|
|
|
|
return next_value
|
|
|
|
|
|
|
|
def next_msisdn(self, origin):
|
|
|
|
return self.next_persistent_value('msisdn', '1000', schema.msisdn, util.msisdn_inc, origin)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-11-06 17:40:23 +00:00
|
|
|
def next_lac(self, origin):
|
2017-11-28 11:56:35 +00:00
|
|
|
# LAC=0 has special meaning (MS detached), avoid it
|
|
|
|
return self.next_persistent_value('lac', '1', schema.uint16, lambda x: str(((int(x)+1) % pow(2,16)) or 1), origin)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-11-28 14:50:02 +00:00
|
|
|
def next_rac(self, origin):
|
|
|
|
return self.next_persistent_value('rac', '1', schema.uint8, lambda x: str((int(x)+1) % pow(2,8) or 1), origin)
|
|
|
|
|
2017-11-07 10:13:20 +00:00
|
|
|
def next_cellid(self, origin):
|
|
|
|
return self.next_persistent_value('cellid', '1', schema.uint16, lambda x: str((int(x)+1) % pow(2,16)), origin)
|
|
|
|
|
2017-11-28 14:50:02 +00:00
|
|
|
def next_bvci(self, origin):
|
|
|
|
# BVCI=0 and =1 are reserved, avoid them.
|
|
|
|
return self.next_persistent_value('bvci', '2', schema.uint16, lambda x: str(int(x)+1) if int(x) < pow(2,16) - 1 else '2', origin)
|
|
|
|
|
2017-09-14 15:35:03 +00:00
|
|
|
class NoResourceExn(log.Error):
|
2017-03-28 12:30:28 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
class Resources(dict):
|
|
|
|
|
|
|
|
def __init__(self, all_resources={}, do_copy=True):
|
|
|
|
if do_copy:
|
|
|
|
all_resources = copy.deepcopy(all_resources)
|
|
|
|
self.update(all_resources)
|
|
|
|
|
|
|
|
def drop(self, reserved, fail_if_not_found=True):
|
|
|
|
# protect from modifying reserved because we're the same object
|
|
|
|
if reserved is self:
|
|
|
|
raise RuntimeError('Refusing to drop a list of resources from itself.'
|
|
|
|
' This is probably a bug where a list of Resources()'
|
|
|
|
' should have been copied but is passed as-is.'
|
|
|
|
' use Resources.clear() instead.')
|
|
|
|
|
|
|
|
for key, reserved_list in reserved.items():
|
|
|
|
my_list = self.get(key) or []
|
|
|
|
|
|
|
|
if my_list is reserved_list:
|
|
|
|
self.pop(key)
|
|
|
|
continue
|
|
|
|
|
|
|
|
for reserved_item in reserved_list:
|
|
|
|
found = False
|
|
|
|
reserved_hash = reserved_item.get(HASH_KEY)
|
|
|
|
if not reserved_hash:
|
|
|
|
raise RuntimeError('Resources.drop() only works with hashed items')
|
|
|
|
|
|
|
|
for i in range(len(my_list)):
|
|
|
|
my_item = my_list[i]
|
|
|
|
my_hash = my_item.get(HASH_KEY)
|
|
|
|
if not my_hash:
|
|
|
|
raise RuntimeError('Resources.drop() only works with hashed items')
|
|
|
|
if my_hash == reserved_hash:
|
|
|
|
found = True
|
|
|
|
my_list.pop(i)
|
|
|
|
break
|
|
|
|
|
|
|
|
if fail_if_not_found and not found:
|
|
|
|
raise RuntimeError('Asked to drop resource from a pool, but the'
|
|
|
|
' resource was not found: %s = %r' % (key, reserved_item))
|
|
|
|
|
|
|
|
if not my_list:
|
|
|
|
self.pop(key)
|
|
|
|
return self
|
|
|
|
|
|
|
|
def without(self, reserved):
|
|
|
|
return Resources(self).drop(reserved)
|
|
|
|
|
2017-05-06 23:16:07 +00:00
|
|
|
def find(self, for_origin, want, skip_if_marked=None, do_copy=True, raise_if_missing=True, log_label='Reserving'):
|
2017-05-06 21:18:23 +00:00
|
|
|
'''
|
|
|
|
Pass a dict of resource requirements, e.g.:
|
|
|
|
want = {
|
|
|
|
'bts': [ {'type': 'osmo-bts-sysmo',}, {} ],
|
2017-09-10 14:33:10 +00:00
|
|
|
'modem': [ {}, {}, {} ]
|
2017-05-06 21:18:23 +00:00
|
|
|
}
|
|
|
|
This function tries to find a combination from the available resources that
|
2017-09-10 14:33:10 +00:00
|
|
|
matches these requirements. The return value is a dict (wrapped in a Resources class)
|
2017-05-06 21:18:23 +00:00
|
|
|
that contains the matching resources in the order of 'want' dict: in above
|
|
|
|
example, the returned dict would have a 'bts' list with the first item being
|
|
|
|
a sysmoBTS, the second item being any other available BTS.
|
|
|
|
|
|
|
|
If skip_if_marked is passed, any resource that contains this key is skipped.
|
|
|
|
E.g. if a BTS has the USED_KEY set like
|
|
|
|
reserved_resources = { 'bts' : {..., '_used': True} }
|
|
|
|
then this may be skipped by passing skip_if_marked='_used'
|
|
|
|
(or rather skip_if_marked=USED_KEY).
|
|
|
|
|
|
|
|
If do_copy is True, the returned dict is a deep copy and does not share
|
|
|
|
lists with any other Resources dict.
|
|
|
|
|
|
|
|
If raise_if_missing is False, this will return an empty item for any
|
|
|
|
resource that had no match, instead of immediately raising an exception.
|
2017-09-10 14:33:10 +00:00
|
|
|
|
|
|
|
This function expects input dictionaries whose contents have already
|
|
|
|
been replicated based on its the 'times' attributes. See
|
|
|
|
config.replicate_times() for more details.
|
2017-05-06 21:18:23 +00:00
|
|
|
'''
|
2017-03-28 12:30:28 +00:00
|
|
|
matches = {}
|
2017-04-12 00:42:02 +00:00
|
|
|
for key, want_list in sorted(want.items()): # sorted for deterministic test results
|
2017-05-06 23:16:07 +00:00
|
|
|
# here we have a resource of a given type, e.g. 'bts', with a list
|
|
|
|
# containing as many BTSes as the caller wants to reserve/use. Each
|
|
|
|
# list item contains specifics for the particular BTS.
|
2017-05-28 23:36:21 +00:00
|
|
|
my_list = self.get(key, [])
|
2017-03-28 12:30:28 +00:00
|
|
|
|
2017-06-06 17:47:40 +00:00
|
|
|
if log_label:
|
|
|
|
for_origin.log(log_label, len(want_list), 'x', key, '(candidates: %d)'%len(my_list))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
# Try to avoid a less constrained item snatching away a resource
|
|
|
|
# from a more detailed constrained requirement.
|
|
|
|
|
2017-05-06 21:18:23 +00:00
|
|
|
# first record all matches, so that each requested item has a list
|
|
|
|
# of all available resources that match it. Some resources may
|
|
|
|
# appear for multiple requested items. Store matching indexes.
|
2017-03-28 12:30:28 +00:00
|
|
|
all_matches = []
|
|
|
|
for want_item in want_list:
|
|
|
|
item_match_list = []
|
|
|
|
for i in range(len(my_list)):
|
|
|
|
my_item = my_list[i]
|
|
|
|
if skip_if_marked and my_item.get(skip_if_marked):
|
|
|
|
continue
|
2017-09-10 14:33:10 +00:00
|
|
|
if item_matches(my_item, want_item):
|
2017-03-28 12:30:28 +00:00
|
|
|
item_match_list.append(i)
|
|
|
|
if not item_match_list:
|
2017-05-06 21:20:33 +00:00
|
|
|
if raise_if_missing:
|
|
|
|
raise NoResourceExn('No matching resource available for %s = %r'
|
|
|
|
% (key, want_item))
|
|
|
|
else:
|
|
|
|
# this one failed... see below
|
|
|
|
all_matches = []
|
|
|
|
break
|
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
all_matches.append( item_match_list )
|
|
|
|
|
|
|
|
if not all_matches:
|
2017-05-06 21:20:33 +00:00
|
|
|
# ...this one failed. Makes no sense to solve resource
|
|
|
|
# allocations, return an empty list for this key to mark
|
|
|
|
# failure.
|
|
|
|
matches[key] = []
|
|
|
|
continue
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
# figure out who gets what
|
2017-06-06 17:47:40 +00:00
|
|
|
try:
|
|
|
|
solution = solve(all_matches)
|
|
|
|
except NotSolvable:
|
|
|
|
# instead of a cryptic error message, raise an exception that
|
|
|
|
# conveys meaning to the user.
|
|
|
|
raise NoResourceExn('Could not resolve request to reserve resources: '
|
|
|
|
'%d x %s with requirements: %r' % (len(want_list), key, want_list))
|
2017-03-28 12:30:28 +00:00
|
|
|
picked = [ my_list[i] for i in solution if i is not None ]
|
2017-05-06 23:16:07 +00:00
|
|
|
for_origin.dbg('Picked', config.tostr(picked))
|
2017-03-28 12:30:28 +00:00
|
|
|
matches[key] = picked
|
|
|
|
|
|
|
|
return Resources(matches, do_copy=do_copy)
|
|
|
|
|
|
|
|
def set_hashes(self):
|
|
|
|
for key, item_list in self.items():
|
|
|
|
for item in item_list:
|
|
|
|
item[HASH_KEY] = util.hash_obj(item, HASH_KEY, RESERVED_KEY, USED_KEY)
|
|
|
|
|
|
|
|
def add(self, more):
|
|
|
|
if more is self:
|
|
|
|
raise RuntimeError('adding a list of resources to itself?')
|
2020-05-04 10:05:05 +00:00
|
|
|
schema.add(self, copy.deepcopy(more))
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def mark_reserved_by(self, origin_id):
|
|
|
|
for key, item_list in self.items():
|
|
|
|
for item in item_list:
|
|
|
|
item[RESERVED_KEY] = origin_id
|
|
|
|
|
|
|
|
|
2017-06-06 17:47:40 +00:00
|
|
|
class NotSolvable(Exception):
|
|
|
|
pass
|
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
def solve(all_matches):
|
|
|
|
'''
|
|
|
|
all_matches shall be a list of index-lists.
|
|
|
|
all_matches[i] is the list of indexes that item i can use.
|
|
|
|
Return a solution so that each i gets a different index.
|
|
|
|
solve([ [0, 1, 2],
|
|
|
|
[0],
|
|
|
|
[0, 2] ]) == [1, 0, 2]
|
|
|
|
'''
|
|
|
|
|
|
|
|
def all_differ(l):
|
|
|
|
return len(set(l)) == len(l)
|
|
|
|
|
|
|
|
def search_in_permutations(fixed=[]):
|
|
|
|
idx = len(fixed)
|
|
|
|
for i in range(len(all_matches[idx])):
|
|
|
|
val = all_matches[idx][i]
|
|
|
|
# don't add a val that's already in the list
|
|
|
|
if val in fixed:
|
|
|
|
continue
|
|
|
|
l = list(fixed)
|
|
|
|
l.append(val)
|
|
|
|
if len(l) == len(all_matches):
|
|
|
|
# found a solution
|
|
|
|
return l
|
|
|
|
# not at the end yet, add next digit
|
|
|
|
r = search_in_permutations(l)
|
|
|
|
if r:
|
|
|
|
# nested search_in_permutations() call found a solution
|
|
|
|
return r
|
|
|
|
# this entire branch yielded no solution
|
|
|
|
return None
|
|
|
|
|
|
|
|
if not all_matches:
|
|
|
|
raise RuntimeError('Cannot solve: no candidates')
|
|
|
|
|
|
|
|
solution = search_in_permutations()
|
|
|
|
if not solution:
|
2017-06-06 17:47:40 +00:00
|
|
|
raise NotSolvable('The requested resource requirements are not solvable %r'
|
|
|
|
% all_matches)
|
2017-03-28 12:30:28 +00:00
|
|
|
return solution
|
|
|
|
|
|
|
|
|
|
|
|
def contains_hash(list_of_dicts, a_hash):
|
|
|
|
for d in list_of_dicts:
|
|
|
|
if d.get(HASH_KEY) == a_hash:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def item_matches(item, wanted_item, ignore_keys=None):
|
|
|
|
if is_dict(wanted_item):
|
|
|
|
# match up two dicts
|
|
|
|
if not isinstance(item, dict):
|
|
|
|
return False
|
|
|
|
for key, wanted_val in wanted_item.items():
|
|
|
|
if ignore_keys and key in ignore_keys:
|
|
|
|
continue
|
|
|
|
if not item_matches(item.get(key), wanted_val, ignore_keys=ignore_keys):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
if is_list(wanted_item):
|
resource: Fix list comparison in item_matches
In following commits, cipher attribute containing a list of supported
ciphers is introdued in osmo-gsm-tester. While developing the feature,
it was found that resources containing lists are not being handled
correctly.
Previous implementation regarding lists in several ways:
- In the list coe path, both item and wanted_item are expected to be
lists. Python doesn't support to check for sublists using the "in"
operand.
- want_item basically contains the scenario dic, and item is each of the
items available as resource of a given type. Thus, we want to check that
item supports the subset of features requested by the scenario, ie. that
the list in the scenario is a subset of the ones in the3 item and not
the opposite.
The following failing scenario is going to be checked during "make check"
when the cipher attribute is added: BTS supporting cipher a50 and a51,
and scenario requesting a50 succeeds. If this commit is remove, the test
no longer passes, and it fails with:
osmo_gsm_tester.resource.NoResourceExn: No matching resource available for
bts = {'type': 'osmo-bts-sysmo', 'ciphers': ['a5 1']}
Change-Id: I27b372aa5906feac2843f24f5cdd0d9578d44b4d
2017-08-28 11:29:28 +00:00
|
|
|
if not is_list(item):
|
2017-03-28 12:30:28 +00:00
|
|
|
return False
|
2017-09-14 13:33:15 +00:00
|
|
|
# Validate that all elements in both lists are of the same type:
|
|
|
|
t = util.list_validate_same_elem_type(wanted_item + item)
|
|
|
|
if t is None:
|
|
|
|
return True # both lists are empty, return
|
|
|
|
# For lists of complex objects, we expect them to be sorted lists:
|
|
|
|
if t in (dict, list, tuple):
|
|
|
|
for i in range(max(len(wanted_item), len(item))):
|
|
|
|
log.ctx(idx=i)
|
|
|
|
subitem = item[i] if i < len(item) else util.empty_instance_type(t)
|
|
|
|
wanted_subitem = wanted_item[i] if i < len(wanted_item) else util.empty_instance_type(t)
|
|
|
|
if not item_matches(subitem, wanted_subitem, ignore_keys=ignore_keys):
|
|
|
|
return False
|
|
|
|
else: # for lists of basic elements, we handle them as unsorted sets:
|
|
|
|
for val in wanted_item:
|
|
|
|
if val not in item:
|
|
|
|
return False
|
2017-03-28 12:30:28 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
return item == wanted_item
|
|
|
|
|
|
|
|
|
|
|
|
class ReservedResources(log.Origin):
|
|
|
|
'''
|
|
|
|
After all resources have been figured out, this is the API that a test case
|
|
|
|
gets to interact with resources. From those resources that have been
|
|
|
|
reserved for it, it can pick some to mark them as currently in use.
|
|
|
|
Functions like nitb() provide a resource by automatically picking its
|
|
|
|
dependencies from so far unused (but reserved) resource.
|
|
|
|
'''
|
|
|
|
|
2018-08-21 12:58:29 +00:00
|
|
|
def __init__(self, resources_pool, origin, reserved, modifiers):
|
2017-03-28 12:30:28 +00:00
|
|
|
self.resources_pool = resources_pool
|
|
|
|
self.origin = origin
|
2018-08-21 12:58:29 +00:00
|
|
|
self.reserved_original = reserved
|
|
|
|
self.reserved = copy.deepcopy(self.reserved_original)
|
|
|
|
config.overlay(self.reserved, modifiers)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return 'resources(%s)=%s' % (self.origin.name(), pprint.pformat(self.reserved))
|
|
|
|
|
|
|
|
def get(self, kind, specifics=None):
|
|
|
|
if specifics is None:
|
|
|
|
specifics = {}
|
|
|
|
self.dbg('requesting use of', kind, specifics=specifics)
|
|
|
|
want = { kind: [specifics] }
|
2017-05-06 23:16:07 +00:00
|
|
|
available_dict = self.reserved.find(self.origin, want, skip_if_marked=USED_KEY,
|
|
|
|
do_copy=False, raise_if_missing=False,
|
|
|
|
log_label='Using')
|
2017-03-28 12:30:28 +00:00
|
|
|
available = available_dict.get(kind)
|
|
|
|
self.dbg(available=len(available))
|
|
|
|
if not available:
|
2017-06-06 17:47:40 +00:00
|
|
|
# cook up a detailed error message for the current situation
|
|
|
|
kind_reserved = self.reserved.get(kind, [])
|
|
|
|
used_count = len([r for r in kind_reserved if USED_KEY in r])
|
|
|
|
matching = self.reserved.find(self.origin, want, raise_if_missing=False, log_label=None).get(kind, [])
|
|
|
|
if not matching:
|
|
|
|
msg = 'none of the reserved resources matches requirements %r' % specifics
|
|
|
|
elif not (used_count < len(kind_reserved)):
|
|
|
|
msg = 'suite.conf reserved only %d x %r.' % (len(kind_reserved), kind)
|
|
|
|
else:
|
|
|
|
msg = ('No unused resource left that matches the requirements;'
|
|
|
|
' Of reserved %d x %r, %d match the requirements, but all are already in use;'
|
|
|
|
' Requirements: %r'
|
|
|
|
% (len(kind_reserved), kind, len(matching), specifics))
|
|
|
|
raise NoResourceExn('When trying to use instance nr %d of %r: %s' % (used_count + 1, kind, msg))
|
|
|
|
|
2017-03-28 12:30:28 +00:00
|
|
|
pick = available[0]
|
|
|
|
self.dbg(using=pick)
|
|
|
|
assert not pick.get(USED_KEY)
|
|
|
|
pick[USED_KEY] = True
|
|
|
|
return copy.deepcopy(pick)
|
|
|
|
|
|
|
|
def put(self, item):
|
|
|
|
if not item.get(USED_KEY):
|
|
|
|
raise RuntimeError('Can only put() a resource that is used: %r' % item)
|
|
|
|
hash_to_put = item.get(HASH_KEY)
|
|
|
|
if not hash_to_put:
|
|
|
|
raise RuntimeError('Can only put() a resource that has a hash marker: %r' % item)
|
|
|
|
for key, item_list in self.reserved.items():
|
|
|
|
my_list = self.get(key)
|
|
|
|
for my_item in my_list:
|
|
|
|
if hash_to_put == my_item.get(HASH_KEY):
|
|
|
|
my_item.pop(USED_KEY)
|
|
|
|
|
|
|
|
def put_all(self):
|
2017-06-13 16:07:57 +00:00
|
|
|
if not self.reserved:
|
|
|
|
return
|
2017-03-28 12:30:28 +00:00
|
|
|
for key, item_list in self.reserved.items():
|
2017-06-13 16:07:57 +00:00
|
|
|
for item in item_list:
|
|
|
|
item.pop(USED_KEY, None)
|
2017-03-28 12:30:28 +00:00
|
|
|
|
|
|
|
def free(self):
|
2018-08-21 12:58:29 +00:00
|
|
|
if self.reserved_original:
|
|
|
|
self.resources_pool.free(self.origin, self.reserved_original)
|
|
|
|
self.reserved_original = None
|
2017-03-28 10:16:58 +00:00
|
|
|
|
2017-05-22 18:02:41 +00:00
|
|
|
def counts(self):
|
|
|
|
counts = {}
|
|
|
|
for key in self.reserved.keys():
|
|
|
|
counts[key] = self.count(key)
|
|
|
|
return counts
|
|
|
|
|
|
|
|
def count(self, key):
|
|
|
|
return len(self.reserved.get(key) or [])
|
2017-03-28 10:16:58 +00:00
|
|
|
|
|
|
|
# vim: expandtab tabstop=4 shiftwidth=4
|