tools/check_*.py: Allow most of them run under Windows

This commit is contained in:
Martin Mathieson 2023-06-11 20:49:33 +00:00
parent 014d17b471
commit 04c99663ff
8 changed files with 51 additions and 40 deletions

View File

@ -23,7 +23,6 @@
#include <epan/expert.h>
#include <epan/prefs.h>
#include <wsutil/wmem/wmem_tree.h>
void proto_register_udpcp(void);

View File

@ -81,25 +81,35 @@ if args.file_list:
# Tools that should be run on selected files.
# Boolean arg is for whether build-dir is needed in order to run it.
# 3rd is Windows support.
tools = [
('tools/delete_includes.py --folder .', True),
('tools/check_spelling.py', False),
('tools/check_tfs.py', False),
('tools/check_typed_item_calls.py --all-checks', False),
('tools/check_static.py', True),
('tools/check_dissector_urls.py', False),
('tools/check_val_to_str.py', False),
('tools/cppcheck/cppcheck.sh', False),
('tools/checkhf.pl', False),
('tools/checkAPIs.pl', False),
('tools/fix-encoding-args.pl', False),
('tools/checkfiltername.pl', False),
('tools/delete_includes.py --folder .', True, True),
('tools/check_spelling.py', False, True),
('tools/check_tfs.py', False, True),
('tools/check_typed_item_calls.py --all-checks', False, True),
('tools/check_static.py', True, False),
('tools/check_dissector_urls.py', False, True),
('tools/check_val_to_str.py', False, True),
('tools/cppcheck/cppcheck.sh', False, True),
('tools/checkhf.pl', False, True),
('tools/checkAPIs.pl', False, True),
('tools/fix-encoding-args.pl', False, True),
('tools/checkfiltername.pl', False, True)
]
def run_check(tool, dissectors, python):
# Create command-line with all dissectors included
command = tool[0]
command = ''
# Don't trust shebang on windows.
if sys.platform.startswith('win'):
if python:
command += 'python.exe '
else:
command += 'perl.exe '
command += tool[0]
if tool[1]:
command += ' --build-folder ' + args.build_folder
@ -116,5 +126,8 @@ def run_check(tool, dissectors, python):
for tool in tools:
if should_exit:
exit(1)
if not tool[1] or (tool[1] and args.build_folder):
if ((not sys.platform.startswith('win') or tool[2]) and # Supported on this platform?
(not tool[1] or (tool[1] and args.build_folder))): # Have --build-folder if needed?
# Run it.
run_check(tool, dissectors, tool[0].find('.py') != -1)

View File

@ -122,7 +122,7 @@ files = []
all_urls = set()
def find_links_in_file(filename):
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
for line_number, line in enumerate(f, start=1):
# TODO: not matching
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol

View File

@ -69,7 +69,7 @@ class File:
self.code_file = extension in {'.c', '.cpp'}
with open(file, 'r') as f:
with open(file, 'r', encoding="utf8") as f:
contents = f.read()
if self.code_file:
@ -88,7 +88,7 @@ class File:
# Add a string found in this file.
def add(self, value):
self.values.append(value)
self.values.append(value.encode('utf-8'))
# Whole word is not recognised, but is it 2 words concatenated (without camelcase) ?
def checkMultiWords(self, word):
@ -156,6 +156,8 @@ class File:
if should_exit:
exit(1)
v = str(v)
# Ignore includes.
if v.endswith('.h'):
continue
@ -277,7 +279,7 @@ def removeHexSpecifiers(code_string):
# Create a File object that knows about all of the strings in the given file.
def findStrings(filename):
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments & embedded quotes so as not to trip up RE.
@ -293,7 +295,7 @@ def findStrings(filename):
if file.code_file:
contents = removeComments(contents)
# Code so only checking strings.
matches = re.finditer(r'\"([^\"]*)\"', contents)
matches = re.finditer(r'\"([^\"]*)\"', contents)
for m in matches:
file.add(m.group(1))
else:
@ -314,7 +316,7 @@ def isGeneratedFile(filename):
return True
# Open file
f_read = open(os.path.join(filename), 'r')
f_read = open(os.path.join(filename), 'r', encoding="utf8")
for line_no,line in enumerate(f_read):
# The comment to say that its generated is near the top, so give up once
# get a few lines down.

View File

@ -277,7 +277,7 @@ def removeComments(code_string):
def findTFS(filename):
tfs_found = {}
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Example: const true_false_string tfs_true_false = { "True", "False" };
@ -305,7 +305,7 @@ def findValueStrings(filename):
# { 0, NULL }
#};
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
@ -323,7 +323,7 @@ def findValueStrings(filename):
def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False):
is_generated = isGeneratedFile(filename)
items = {}
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)
@ -342,7 +342,7 @@ def find_items(filename, macros, check_mask=False, mask_exact_width=False, check
def find_macros(filename):
macros = {}
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)

View File

@ -124,7 +124,7 @@ class APICheck:
self.file = file
self.calls = []
with open(file, 'r') as f:
with open(file, 'r', encoding="utf8") as f:
contents = f.read()
lines = contents.splitlines()
total_lines = len(lines)
@ -232,7 +232,7 @@ class ProtoTreeAddItemCheck(APICheck):
def find_calls(self, file, macros):
self.file = file
self.calls = []
with open(file, 'r') as f:
with open(file, 'r', encoding="utf8") as f:
contents = f.read()
lines = contents.splitlines()
@ -771,7 +771,7 @@ class CombinedCallsCheck:
self.all_calls.sort(key=lambda x:x.line_number)
def check_consecutive_item_calls(self):
lines = open(self.file, 'r').read().splitlines()
lines = open(self.file, 'r', encoding="utf8").read().splitlines()
prev = None
for call in self.all_calls:
@ -897,7 +897,7 @@ def removeComments(code_string):
# Test for whether the given file was automatically generated.
def isGeneratedFile(filename):
# Open file
f_read = open(os.path.join(filename), 'r')
f_read = open(os.path.join(filename), 'r', encoding="utf8")
lines_tested = 0
for line in f_read:
# The comment to say that its generated is near the top, so give up once
@ -926,7 +926,7 @@ def isGeneratedFile(filename):
def find_macros(filename):
macros = {}
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)
@ -942,7 +942,7 @@ def find_macros(filename):
def find_items(filename, macros, check_mask=False, mask_exact_width=False, check_label=False, check_consecutive=False):
is_generated = isGeneratedFile(filename)
items = {}
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)
@ -969,7 +969,7 @@ def find_items(filename, macros, check_mask=False, mask_exact_width=False, check
# TODO: return items (rather than local checks) from here so can be checked against list of calls for given filename
def find_field_arrays(filename, all_fields, all_hf):
global warnings_found
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)
@ -1027,7 +1027,7 @@ def find_field_arrays(filename, all_fields, all_hf):
def find_item_declarations(filename):
items = set()
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
lines = f.read().splitlines()
p = re.compile(r'^static int (hf_[a-zA-Z0-9_]*)\s*\=\s*-1;')
for line in lines:
@ -1038,7 +1038,7 @@ def find_item_declarations(filename):
def find_item_extern_declarations(filename):
items = set()
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
lines = f.read().splitlines()
p = re.compile(r'^\s*(hf_[a-zA-Z0-9_]*)\s*\=\s*proto_registrar_get_id_byname\s*\(')
for line in lines:

View File

@ -33,7 +33,7 @@ signal.signal(signal.SIGINT, signal_handler)
# Test for whether the given file was automatically generated.
def isGeneratedFile(filename):
# Open file
f_read = open(os.path.join(filename), 'r')
f_read = open(os.path.join(filename), 'r', encoding="utf8")
lines_tested = 0
for line in f_read:
# The comment to say that its generated is near the top, so give up once
@ -106,7 +106,7 @@ def checkFile(filename):
print(filename, 'does not exist!')
return
with open(filename, 'r') as f:
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.

View File

@ -55,7 +55,7 @@ class bcolors:
# scan whole epan/dissectors folder.
parser = argparse.ArgumentParser(description='Check calls in dissectors')
# required
parser.add_argument('--build-folder', action='store',
parser.add_argument('--build-folder', action='store', required=True,
help='specify individual dissector file to test')
parser.add_argument('--file', action='append',
help='specify individual dissector file to test')
@ -73,10 +73,7 @@ args = parser.parse_args()
test_folder = os.path.join(os.getcwd(), args.folder)
#run_folder = args.build_folder
# Work out wireshark folder based upon CWD. Assume run in wireshark folder
wireshark_root = os.getcwd()
# Usually only building one module, so no -j benefit?
make_command = ['cmake', '--build', args.build_folder]