From 99c31f09214da46166da28c477070a44d3b83367 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Mon, 23 Mar 2015 20:26:19 +0100 Subject: [PATCH] import empty gr-adsbtx skeleton --- gr-adsbtx/CMakeLists.txt | 160 + gr-adsbtx/apps/CMakeLists.txt | 25 + .../Modules/CMakeParseArgumentsCopy.cmake | 138 + gr-adsbtx/cmake/Modules/FindCppUnit.cmake | 36 + .../cmake/Modules/FindGnuradioRuntime.cmake | 36 + gr-adsbtx/cmake/Modules/GrMiscUtils.cmake | 210 + gr-adsbtx/cmake/Modules/GrPlatform.cmake | 46 + gr-adsbtx/cmake/Modules/GrPython.cmake | 227 + gr-adsbtx/cmake/Modules/GrSwig.cmake | 229 + gr-adsbtx/cmake/Modules/GrTest.cmake | 133 + gr-adsbtx/cmake/Modules/UseSWIG.cmake | 304 + gr-adsbtx/cmake/Modules/adsbtxConfig.cmake | 30 + gr-adsbtx/cmake/cmake_uninstall.cmake.in | 32 + gr-adsbtx/docs/CMakeLists.txt | 35 + gr-adsbtx/docs/README.adsbtx | 11 + gr-adsbtx/docs/doxygen/CMakeLists.txt | 52 + gr-adsbtx/docs/doxygen/Doxyfile.in | 1922 ++++ gr-adsbtx/docs/doxygen/Doxyfile.swig_doc.in | 1890 ++++ gr-adsbtx/docs/doxygen/doxyxml/__init__.py | 82 + gr-adsbtx/docs/doxygen/doxyxml/__init__.pyc | Bin 0 -> 2208 bytes gr-adsbtx/docs/doxygen/doxyxml/base.py | 219 + gr-adsbtx/docs/doxygen/doxyxml/base.pyc | Bin 0 -> 7979 bytes gr-adsbtx/docs/doxygen/doxyxml/doxyindex.py | 237 + gr-adsbtx/docs/doxygen/doxyxml/doxyindex.pyc | Bin 0 -> 9889 bytes .../doxygen/doxyxml/generated/__init__.py | 7 + .../doxygen/doxyxml/generated/__init__.pyc | Bin 0 -> 418 bytes .../doxygen/doxyxml/generated/compound.py | 503 + .../doxygen/doxyxml/generated/compound.pyc | Bin 0 -> 34242 bytes .../doxyxml/generated/compoundsuper.py | 8342 +++++++++++++++++ .../doxyxml/generated/compoundsuper.pyc | Bin 0 -> 479090 bytes .../docs/doxygen/doxyxml/generated/index.py | 77 + .../docs/doxygen/doxyxml/generated/index.pyc | Bin 0 -> 3112 bytes .../doxygen/doxyxml/generated/indexsuper.py | 523 ++ .../doxygen/doxyxml/generated/indexsuper.pyc | Bin 0 -> 27746 bytes gr-adsbtx/docs/doxygen/doxyxml/text.py | 56 + gr-adsbtx/docs/doxygen/doxyxml/text.pyc | Bin 0 -> 1461 bytes gr-adsbtx/docs/doxygen/other/group_defs.dox | 7 + gr-adsbtx/docs/doxygen/other/main_page.dox | 10 + gr-adsbtx/docs/doxygen/swig_doc.py | 255 + gr-adsbtx/docs/doxygen/swig_doc.pyc | Bin 0 -> 7519 bytes gr-adsbtx/examples/README | 4 + gr-adsbtx/grc/CMakeLists.txt | 22 + gr-adsbtx/include/adsbtx/CMakeLists.txt | 26 + gr-adsbtx/include/adsbtx/api.h | 33 + gr-adsbtx/lib/CMakeLists.txt | 78 + gr-adsbtx/lib/qa_adsbtx.cc | 36 + gr-adsbtx/lib/qa_adsbtx.h | 38 + gr-adsbtx/lib/test_adsbtx.cc | 48 + gr-adsbtx/python/CMakeLists.txt | 43 + gr-adsbtx/python/__init__.py | 34 + gr-adsbtx/python/__init__.pyc | Bin 0 -> 359 bytes gr-adsbtx/python/build_utils.py | 226 + gr-adsbtx/python/build_utils.pyc | Bin 0 -> 7834 bytes gr-adsbtx/python/build_utils_codes.py | 52 + gr-adsbtx/python/build_utils_codes.pyc | Bin 0 -> 1513 bytes gr-adsbtx/swig/CMakeLists.txt | 65 + gr-adsbtx/swig/adsbtx_swig.i | 13 + 57 files changed, 16552 insertions(+) create mode 100644 gr-adsbtx/CMakeLists.txt create mode 100644 gr-adsbtx/apps/CMakeLists.txt create mode 100644 gr-adsbtx/cmake/Modules/CMakeParseArgumentsCopy.cmake create mode 100644 gr-adsbtx/cmake/Modules/FindCppUnit.cmake create mode 100644 gr-adsbtx/cmake/Modules/FindGnuradioRuntime.cmake create mode 100644 gr-adsbtx/cmake/Modules/GrMiscUtils.cmake create mode 100644 gr-adsbtx/cmake/Modules/GrPlatform.cmake create mode 100644 gr-adsbtx/cmake/Modules/GrPython.cmake create mode 100644 gr-adsbtx/cmake/Modules/GrSwig.cmake create mode 100644 gr-adsbtx/cmake/Modules/GrTest.cmake create mode 100644 gr-adsbtx/cmake/Modules/UseSWIG.cmake create mode 100644 gr-adsbtx/cmake/Modules/adsbtxConfig.cmake create mode 100644 gr-adsbtx/cmake/cmake_uninstall.cmake.in create mode 100644 gr-adsbtx/docs/CMakeLists.txt create mode 100644 gr-adsbtx/docs/README.adsbtx create mode 100644 gr-adsbtx/docs/doxygen/CMakeLists.txt create mode 100644 gr-adsbtx/docs/doxygen/Doxyfile.in create mode 100644 gr-adsbtx/docs/doxygen/Doxyfile.swig_doc.in create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/__init__.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/__init__.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/base.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/base.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/doxyindex.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/doxyindex.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/compound.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/compound.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/index.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/index.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/indexsuper.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/generated/indexsuper.pyc create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/text.py create mode 100644 gr-adsbtx/docs/doxygen/doxyxml/text.pyc create mode 100644 gr-adsbtx/docs/doxygen/other/group_defs.dox create mode 100644 gr-adsbtx/docs/doxygen/other/main_page.dox create mode 100644 gr-adsbtx/docs/doxygen/swig_doc.py create mode 100644 gr-adsbtx/docs/doxygen/swig_doc.pyc create mode 100644 gr-adsbtx/examples/README create mode 100644 gr-adsbtx/grc/CMakeLists.txt create mode 100644 gr-adsbtx/include/adsbtx/CMakeLists.txt create mode 100644 gr-adsbtx/include/adsbtx/api.h create mode 100644 gr-adsbtx/lib/CMakeLists.txt create mode 100644 gr-adsbtx/lib/qa_adsbtx.cc create mode 100644 gr-adsbtx/lib/qa_adsbtx.h create mode 100644 gr-adsbtx/lib/test_adsbtx.cc create mode 100644 gr-adsbtx/python/CMakeLists.txt create mode 100644 gr-adsbtx/python/__init__.py create mode 100644 gr-adsbtx/python/__init__.pyc create mode 100644 gr-adsbtx/python/build_utils.py create mode 100644 gr-adsbtx/python/build_utils.pyc create mode 100644 gr-adsbtx/python/build_utils_codes.py create mode 100644 gr-adsbtx/python/build_utils_codes.pyc create mode 100644 gr-adsbtx/swig/CMakeLists.txt create mode 100644 gr-adsbtx/swig/adsbtx_swig.i diff --git a/gr-adsbtx/CMakeLists.txt b/gr-adsbtx/CMakeLists.txt new file mode 100644 index 0000000..45326e8 --- /dev/null +++ b/gr-adsbtx/CMakeLists.txt @@ -0,0 +1,160 @@ +# Copyright 2011,2012 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + + +######################################################################## +# Project setup +######################################################################## +cmake_minimum_required(VERSION 2.6) +project(gr-adsbtx CXX C) +enable_testing() + +#select the release build type by default to get optimization flags +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") + message(STATUS "Build type not specified: defaulting to release.") +endif(NOT CMAKE_BUILD_TYPE) +set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "") + +list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules) + +######################################################################## +# Compiler specific setup +######################################################################## +if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32) + #http://gcc.gnu.org/wiki/Visibility + add_definitions(-fvisibility=hidden) +endif() + +######################################################################## +# Find boost +######################################################################## +if(UNIX AND EXISTS "/usr/lib64") + list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix +endif(UNIX AND EXISTS "/usr/lib64") +set(Boost_ADDITIONAL_VERSIONS + "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39" + "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44" + "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49" + "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54" + "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59" + "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64" + "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69" +) +find_package(Boost "1.35" COMPONENTS filesystem system) + +if(NOT Boost_FOUND) + message(FATAL_ERROR "Boost required to compile adsbtx") +endif() + +######################################################################## +# Install directories +######################################################################## +include(GrPlatform) #define LIB_SUFFIX +set(GR_RUNTIME_DIR bin) +set(GR_LIBRARY_DIR lib${LIB_SUFFIX}) +set(GR_INCLUDE_DIR include/adsbtx) +set(GR_DATA_DIR share) +set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME}) +set(GR_DOC_DIR ${GR_DATA_DIR}/doc) +set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME}) +set(GR_CONF_DIR etc) +set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d) +set(GR_LIBEXEC_DIR libexec) +set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME}) +set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks) + +######################################################################## +# Find gnuradio build dependencies +######################################################################## +find_package(CppUnit) +find_package(Doxygen) + +# Search for GNU Radio and its components and versions. Add any +# components required to the list of GR_REQUIRED_COMPONENTS (in all +# caps such as FILTER or FFT) and change the version to the minimum +# API compatible version required. +set(GR_REQUIRED_COMPONENTS RUNTIME) +find_package(Gnuradio "3.7.2" REQUIRED) + +if(NOT CPPUNIT_FOUND) + message(FATAL_ERROR "CppUnit required to compile adsbtx") +endif() + +######################################################################## +# Setup doxygen option +######################################################################## +if(DOXYGEN_FOUND) + option(ENABLE_DOXYGEN "Build docs using Doxygen" ON) +else(DOXYGEN_FOUND) + option(ENABLE_DOXYGEN "Build docs using Doxygen" OFF) +endif(DOXYGEN_FOUND) + +######################################################################## +# Setup the include and linker paths +######################################################################## +include_directories( + ${CMAKE_SOURCE_DIR}/lib + ${CMAKE_SOURCE_DIR}/include + ${CMAKE_BINARY_DIR}/lib + ${CMAKE_BINARY_DIR}/include + ${Boost_INCLUDE_DIRS} + ${CPPUNIT_INCLUDE_DIRS} + ${GNURADIO_ALL_INCLUDE_DIRS} +) + +link_directories( + ${Boost_LIBRARY_DIRS} + ${CPPUNIT_LIBRARY_DIRS} + ${GNURADIO_RUNTIME_LIBRARY_DIRS} +) + +# Set component parameters +set(GR_ADSBTX_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE) +set(GR_ADSBTX_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE) + +######################################################################## +# Create uninstall target +######################################################################## +configure_file( + ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake +@ONLY) + +add_custom_target(uninstall + ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake +) + +######################################################################## +# Add subdirectories +######################################################################## +add_subdirectory(include/adsbtx) +add_subdirectory(lib) +add_subdirectory(swig) +add_subdirectory(python) +add_subdirectory(grc) +add_subdirectory(apps) +add_subdirectory(docs) + +######################################################################## +# Install cmake search helper for this library +######################################################################## +install(FILES cmake/Modules/adsbtxConfig.cmake + DESTINATION lib/cmake/adsbtx +) diff --git a/gr-adsbtx/apps/CMakeLists.txt b/gr-adsbtx/apps/CMakeLists.txt new file mode 100644 index 0000000..c837d77 --- /dev/null +++ b/gr-adsbtx/apps/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +include(GrPython) + +GR_PYTHON_INSTALL( + PROGRAMS + DESTINATION bin +) diff --git a/gr-adsbtx/cmake/Modules/CMakeParseArgumentsCopy.cmake b/gr-adsbtx/cmake/Modules/CMakeParseArgumentsCopy.cmake new file mode 100644 index 0000000..7ce4c49 --- /dev/null +++ b/gr-adsbtx/cmake/Modules/CMakeParseArgumentsCopy.cmake @@ -0,0 +1,138 @@ +# CMAKE_PARSE_ARGUMENTS( args...) +# +# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for +# parsing the arguments given to that macro or function. +# It processes the arguments and defines a set of variables which hold the +# values of the respective options. +# +# The argument contains all options for the respective macro, +# i.e. keywords which can be used when calling the macro without any value +# following, like e.g. the OPTIONAL keyword of the install() command. +# +# The argument contains all keywords for this macro +# which are followed by one value, like e.g. DESTINATION keyword of the +# install() command. +# +# The argument contains all keywords for this macro +# which can be followed by more than one value, like e.g. the TARGETS or +# FILES keywords of the install() command. +# +# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the +# keywords listed in , and +# a variable composed of the given +# followed by "_" and the name of the respective keyword. +# These variables will then hold the respective value from the argument list. +# For the keywords this will be TRUE or FALSE. +# +# All remaining arguments are collected in a variable +# _UNPARSED_ARGUMENTS, this can be checked afterwards to see whether +# your macro was called with unrecognized parameters. +# +# As an example here a my_install() macro, which takes similar arguments as the +# real install() command: +# +# function(MY_INSTALL) +# set(options OPTIONAL FAST) +# set(oneValueArgs DESTINATION RENAME) +# set(multiValueArgs TARGETS CONFIGURATIONS) +# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) +# ... +# +# Assume my_install() has been called like this: +# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub) +# +# After the cmake_parse_arguments() call the macro will have set the following +# variables: +# MY_INSTALL_OPTIONAL = TRUE +# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install() +# MY_INSTALL_DESTINATION = "bin" +# MY_INSTALL_RENAME = "" (was not used) +# MY_INSTALL_TARGETS = "foo;bar" +# MY_INSTALL_CONFIGURATIONS = "" (was not used) +# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL" +# +# You can the continue and process these variables. +# +# Keywords terminate lists of values, e.g. if directly after a one_value_keyword +# another recognized keyword follows, this is interpreted as the beginning of +# the new option. +# E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in +# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would +# be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor. + +#============================================================================= +# Copyright 2010 Alexander Neundorf +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + + +if(__CMAKE_PARSE_ARGUMENTS_INCLUDED) + return() +endif() +set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE) + + +function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames) + # first set all result variables to empty/FALSE + foreach(arg_name ${_singleArgNames} ${_multiArgNames}) + set(${prefix}_${arg_name}) + endforeach(arg_name) + + foreach(option ${_optionNames}) + set(${prefix}_${option} FALSE) + endforeach(option) + + set(${prefix}_UNPARSED_ARGUMENTS) + + set(insideValues FALSE) + set(currentArgName) + + # now iterate over all arguments and fill the result variables + foreach(currentArg ${ARGN}) + list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword + list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword + list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword + + if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1) + if(insideValues) + if("${insideValues}" STREQUAL "SINGLE") + set(${prefix}_${currentArgName} ${currentArg}) + set(insideValues FALSE) + elseif("${insideValues}" STREQUAL "MULTI") + list(APPEND ${prefix}_${currentArgName} ${currentArg}) + endif() + else(insideValues) + list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg}) + endif(insideValues) + else() + if(NOT ${optionIndex} EQUAL -1) + set(${prefix}_${currentArg} TRUE) + set(insideValues FALSE) + elseif(NOT ${singleArgIndex} EQUAL -1) + set(currentArgName ${currentArg}) + set(${prefix}_${currentArgName}) + set(insideValues "SINGLE") + elseif(NOT ${multiArgIndex} EQUAL -1) + set(currentArgName ${currentArg}) + set(${prefix}_${currentArgName}) + set(insideValues "MULTI") + endif() + endif() + + endforeach(currentArg) + + # propagate the result variables to the caller: + foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames}) + set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE) + endforeach(arg_name) + set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE) + +endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs) diff --git a/gr-adsbtx/cmake/Modules/FindCppUnit.cmake b/gr-adsbtx/cmake/Modules/FindCppUnit.cmake new file mode 100644 index 0000000..9af308f --- /dev/null +++ b/gr-adsbtx/cmake/Modules/FindCppUnit.cmake @@ -0,0 +1,36 @@ +# http://www.cmake.org/pipermail/cmake/2006-October/011446.html +# Modified to use pkg config and use standard var names + +# +# Find the CppUnit includes and library +# +# This module defines +# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc. +# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit. +# CPPUNIT_FOUND, If false, do not try to use CppUnit. + +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_CPPUNIT "cppunit") + +FIND_PATH(CPPUNIT_INCLUDE_DIRS + NAMES cppunit/TestCase.h + HINTS ${PC_CPPUNIT_INCLUDE_DIR} + PATHS + /usr/local/include + /usr/include +) + +FIND_LIBRARY(CPPUNIT_LIBRARIES + NAMES cppunit + HINTS ${PC_CPPUNIT_LIBDIR} + PATHS + ${CPPUNIT_INCLUDE_DIRS}/../lib + /usr/local/lib + /usr/lib +) + +LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS}) + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) +MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) diff --git a/gr-adsbtx/cmake/Modules/FindGnuradioRuntime.cmake b/gr-adsbtx/cmake/Modules/FindGnuradioRuntime.cmake new file mode 100644 index 0000000..afed684 --- /dev/null +++ b/gr-adsbtx/cmake/Modules/FindGnuradioRuntime.cmake @@ -0,0 +1,36 @@ +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime) + +if(PC_GNURADIO_RUNTIME_FOUND) + # look for include files + FIND_PATH( + GNURADIO_RUNTIME_INCLUDE_DIRS + NAMES gnuradio/top_block.h + HINTS $ENV{GNURADIO_RUNTIME_DIR}/include + ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS} + ${CMAKE_INSTALL_PREFIX}/include + PATHS /usr/local/include + /usr/include + ) + + # look for libs + FIND_LIBRARY( + GNURADIO_RUNTIME_LIBRARIES + NAMES gnuradio-runtime + HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib + ${PC_GNURADIO_RUNTIME_LIBDIR} + ${CMAKE_INSTALL_PREFIX}/lib/ + ${CMAKE_INSTALL_PREFIX}/lib64/ + PATHS /usr/local/lib + /usr/local/lib64 + /usr/lib + /usr/lib64 + ) + + set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND}) +endif(PC_GNURADIO_RUNTIME_FOUND) + +INCLUDE(FindPackageHandleStandardArgs) +# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used. +FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES) +MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS) diff --git a/gr-adsbtx/cmake/Modules/GrMiscUtils.cmake b/gr-adsbtx/cmake/Modules/GrMiscUtils.cmake new file mode 100644 index 0000000..9331d5d --- /dev/null +++ b/gr-adsbtx/cmake/Modules/GrMiscUtils.cmake @@ -0,0 +1,210 @@ +# Copyright 2010-2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE) + return() +endif() +set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE) + +######################################################################## +# Set global variable macro. +# Used for subdirectories to export settings. +# Example: include and library paths. +######################################################################## +function(GR_SET_GLOBAL var) + set(${var} ${ARGN} CACHE INTERNAL "" FORCE) +endfunction(GR_SET_GLOBAL) + +######################################################################## +# Set the pre-processor definition if the condition is true. +# - def the pre-processor definition to set and condition name +######################################################################## +function(GR_ADD_COND_DEF def) + if(${def}) + add_definitions(-D${def}) + endif(${def}) +endfunction(GR_ADD_COND_DEF) + +######################################################################## +# Check for a header and conditionally set a compile define. +# - hdr the relative path to the header file +# - def the pre-processor definition to set +######################################################################## +function(GR_CHECK_HDR_N_DEF hdr def) + include(CheckIncludeFileCXX) + CHECK_INCLUDE_FILE_CXX(${hdr} ${def}) + GR_ADD_COND_DEF(${def}) +endfunction(GR_CHECK_HDR_N_DEF) + +######################################################################## +# Include subdirectory macro. +# Sets the CMake directory variables, +# includes the subdirectory CMakeLists.txt, +# resets the CMake directory variables. +# +# This macro includes subdirectories rather than adding them +# so that the subdirectory can affect variables in the level above. +# This provides a work-around for the lack of convenience libraries. +# This way a subdirectory can append to the list of library sources. +######################################################################## +macro(GR_INCLUDE_SUBDIRECTORY subdir) + #insert the current directories on the front of the list + list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR}) + list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR}) + + #set the current directories to the names of the subdirs + set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) + set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir}) + + #include the subdirectory CMakeLists to run it + file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) + + #reset the value of the current directories + list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR) + list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR) + + #pop the subdir names of the front of the list + list(REMOVE_AT _cmake_source_dirs 0) + list(REMOVE_AT _cmake_binary_dirs 0) +endmacro(GR_INCLUDE_SUBDIRECTORY) + +######################################################################## +# Check if a compiler flag works and conditionally set a compile define. +# - flag the compiler flag to check for +# - have the variable to set with result +######################################################################## +macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have) + include(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG(${flag} ${have}) + if(${have}) + add_definitions(${flag}) + endif(${have}) +endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE) + +######################################################################## +# Generates the .la libtool file +# This appears to generate libtool files that cannot be used by auto*. +# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest]) +# Notice: there is not COMPONENT option, these will not get distributed. +######################################################################## +function(GR_LIBTOOL) + if(NOT DEFINED GENERATE_LIBTOOL) + set(GENERATE_LIBTOOL OFF) #disabled by default + endif() + + if(GENERATE_LIBTOOL) + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN}) + + find_program(LIBTOOL libtool) + if(LIBTOOL) + include(CMakeMacroLibtoolFile) + CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION}) + endif(LIBTOOL) + endif(GENERATE_LIBTOOL) + +endfunction(GR_LIBTOOL) + +######################################################################## +# Do standard things to the library target +# - set target properties +# - make install rules +# Also handle gnuradio custom naming conventions w/ extras mode. +######################################################################## +function(GR_LIBRARY_FOO target) + #parse the arguments for component names + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN}) + + #set additional target properties + set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER}) + + #install the generated files like so... + install(TARGETS ${target} + LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file + ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file + RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file + ) + + #extras mode enabled automatically on linux + if(NOT DEFINED LIBRARY_EXTRAS) + set(LIBRARY_EXTRAS ${LINUX}) + endif() + + #special extras mode to enable alternative naming conventions + if(LIBRARY_EXTRAS) + + #create .la file before changing props + GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR}) + + #give the library a special name with ultra-zero soversion + set_target_properties(${target} PROPERTIES LIBRARY_OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0") + set(target_name lib${target}-${LIBVER}.so.0.0.0) + + #custom command to generate symlinks + add_custom_command( + TARGET ${target} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so + COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 + COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install + ) + + #and install the extra symlinks + install( + FILES + ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so + ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 + DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} + ) + + endif(LIBRARY_EXTRAS) +endfunction(GR_LIBRARY_FOO) + +######################################################################## +# Create a dummy custom command that depends on other targets. +# Usage: +# GR_GEN_TARGET_DEPS(unique_name target_deps ...) +# ADD_CUSTOM_COMMAND( ${target_deps}) +# +# Custom command cant depend on targets, but can depend on executables, +# and executables can depend on targets. So this is the process: +######################################################################## +function(GR_GEN_TARGET_DEPS name var) + file( + WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in + "int main(void){return 0;}\n" + ) + execute_process( + COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in + ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp + ) + add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp) + if(ARGN) + add_dependencies(${name} ${ARGN}) + endif(ARGN) + + if(CMAKE_CROSSCOMPILING) + set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross + else() + set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE) + endif() +endfunction(GR_GEN_TARGET_DEPS) diff --git a/gr-adsbtx/cmake/Modules/GrPlatform.cmake b/gr-adsbtx/cmake/Modules/GrPlatform.cmake new file mode 100644 index 0000000..a2e4f3b --- /dev/null +++ b/gr-adsbtx/cmake/Modules/GrPlatform.cmake @@ -0,0 +1,46 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE) + return() +endif() +set(__INCLUDED_GR_PLATFORM_CMAKE TRUE) + +######################################################################## +# Setup additional defines for OS types +######################################################################## +if(CMAKE_SYSTEM_NAME STREQUAL "Linux") + set(LINUX TRUE) +endif() + +if(LINUX AND EXISTS "/etc/debian_version") + set(DEBIAN TRUE) +endif() + +if(LINUX AND EXISTS "/etc/redhat-release") + set(REDHAT TRUE) +endif() + +######################################################################## +# when the library suffix should be 64 (applies to redhat linux family) +######################################################################## +if(NOT DEFINED LIB_SUFFIX AND REDHAT AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$") + set(LIB_SUFFIX 64) +endif() +set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix") diff --git a/gr-adsbtx/cmake/Modules/GrPython.cmake b/gr-adsbtx/cmake/Modules/GrPython.cmake new file mode 100644 index 0000000..68ca58e --- /dev/null +++ b/gr-adsbtx/cmake/Modules/GrPython.cmake @@ -0,0 +1,227 @@ +# Copyright 2010-2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +if(DEFINED __INCLUDED_GR_PYTHON_CMAKE) + return() +endif() +set(__INCLUDED_GR_PYTHON_CMAKE TRUE) + +######################################################################## +# Setup the python interpreter: +# This allows the user to specify a specific interpreter, +# or finds the interpreter via the built-in cmake module. +######################################################################## +#this allows the user to override PYTHON_EXECUTABLE +if(PYTHON_EXECUTABLE) + + set(PYTHONINTERP_FOUND TRUE) + +#otherwise if not set, try to automatically find it +else(PYTHON_EXECUTABLE) + + #use the built-in find script + find_package(PythonInterp 2) + + #and if that fails use the find program routine + if(NOT PYTHONINTERP_FOUND) + find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5) + if(PYTHON_EXECUTABLE) + set(PYTHONINTERP_FOUND TRUE) + endif(PYTHON_EXECUTABLE) + endif(NOT PYTHONINTERP_FOUND) + +endif(PYTHON_EXECUTABLE) + +#make the path to the executable appear in the cmake gui +set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter") + +#make sure we can use -B with python (introduced in 2.6) +if(PYTHON_EXECUTABLE) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -B -c "" + OUTPUT_QUIET ERROR_QUIET + RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT + ) + if(PYTHON_HAS_DASH_B_RESULT EQUAL 0) + set(PYTHON_DASH_B "-B") + endif() +endif(PYTHON_EXECUTABLE) + +######################################################################## +# Check for the existence of a python module: +# - desc a string description of the check +# - mod the name of the module to import +# - cmd an additional command to run +# - have the result variable to set +######################################################################## +macro(GR_PYTHON_CHECK_MODULE desc mod cmd have) + message(STATUS "") + message(STATUS "Python checking for ${desc}") + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c " +######################################### +try: import ${mod} +except: exit(-1) +try: assert ${cmd} +except: exit(-1) +#########################################" + RESULT_VARIABLE ${have} + ) + if(${have} EQUAL 0) + message(STATUS "Python checking for ${desc} - found") + set(${have} TRUE) + else(${have} EQUAL 0) + message(STATUS "Python checking for ${desc} - not found") + set(${have} FALSE) + endif(${have} EQUAL 0) +endmacro(GR_PYTHON_CHECK_MODULE) + +######################################################################## +# Sets the python installation directory GR_PYTHON_DIR +######################################################################## +execute_process(COMMAND ${PYTHON_EXECUTABLE} -c " +from distutils import sysconfig +print sysconfig.get_python_lib(plat_specific=True, prefix='') +" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE +) +file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR) + +######################################################################## +# Create an always-built target with a unique name +# Usage: GR_UNIQUE_TARGET( ) +######################################################################## +function(GR_UNIQUE_TARGET desc) + file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib +unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] +print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))" + OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE) + add_custom_target(${_target} ALL DEPENDS ${ARGN}) +endfunction(GR_UNIQUE_TARGET) + +######################################################################## +# Install python sources (also builds and installs byte-compiled python) +######################################################################## +function(GR_PYTHON_INSTALL) + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN}) + + #################################################################### + if(GR_PYTHON_INSTALL_FILES) + #################################################################### + install(${ARGN}) #installs regular python files + + #create a list of all generated files + unset(pysrcfiles) + unset(pycfiles) + unset(pyofiles) + foreach(pyfile ${GR_PYTHON_INSTALL_FILES}) + get_filename_component(pyfile ${pyfile} ABSOLUTE) + list(APPEND pysrcfiles ${pyfile}) + + #determine if this file is in the source or binary directory + file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile}) + string(LENGTH "${source_rel_path}" source_rel_path_len) + file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile}) + string(LENGTH "${binary_rel_path}" binary_rel_path_len) + + #and set the generated path appropriately + if(${source_rel_path_len} GREATER ${binary_rel_path_len}) + set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path}) + else() + set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path}) + endif() + list(APPEND pycfiles ${pygenfile}c) + list(APPEND pyofiles ${pygenfile}o) + + #ensure generation path exists + get_filename_component(pygen_path ${pygenfile} PATH) + file(MAKE_DIRECTORY ${pygen_path}) + + endforeach(pyfile) + + #the command to generate the pyc files + add_custom_command( + DEPENDS ${pysrcfiles} OUTPUT ${pycfiles} + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles} + ) + + #the command to generate the pyo files + add_custom_command( + DEPENDS ${pysrcfiles} OUTPUT ${pyofiles} + COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles} + ) + + #create install rule and add generated files to target list + set(python_install_gen_targets ${pycfiles} ${pyofiles}) + install(FILES ${python_install_gen_targets} + DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} + COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} + ) + + + #################################################################### + elseif(GR_PYTHON_INSTALL_PROGRAMS) + #################################################################### + file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native) + + foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS}) + get_filename_component(pyfile_name ${pyfile} NAME) + get_filename_component(pyfile ${pyfile} ABSOLUTE) + string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe") + list(APPEND python_install_gen_targets ${pyexefile}) + + get_filename_component(pyexefile_path ${pyexefile} PATH) + file(MAKE_DIRECTORY ${pyexefile_path}) + + add_custom_command( + OUTPUT ${pyexefile} DEPENDS ${pyfile} + COMMAND ${PYTHON_EXECUTABLE} -c + \"open('${pyexefile}', 'w').write('\#!${pyexe_native}\\n'+open('${pyfile}').read())\" + COMMENT "Shebangin ${pyfile_name}" + ) + + #on windows, python files need an extension to execute + get_filename_component(pyfile_ext ${pyfile} EXT) + if(WIN32 AND NOT pyfile_ext) + set(pyfile_name "${pyfile_name}.py") + endif() + + install(PROGRAMS ${pyexefile} RENAME ${pyfile_name} + DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} + COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} + ) + endforeach(pyfile) + + endif() + + GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets}) + +endfunction(GR_PYTHON_INSTALL) + +######################################################################## +# Write the python helper script that generates byte code files +######################################################################## +file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py " +import sys, py_compile +files = sys.argv[1:] +srcs, gens = files[:len(files)/2], files[len(files)/2:] +for src, gen in zip(srcs, gens): + py_compile.compile(file=src, cfile=gen, doraise=True) +") diff --git a/gr-adsbtx/cmake/Modules/GrSwig.cmake b/gr-adsbtx/cmake/Modules/GrSwig.cmake new file mode 100644 index 0000000..569667b --- /dev/null +++ b/gr-adsbtx/cmake/Modules/GrSwig.cmake @@ -0,0 +1,229 @@ +# Copyright 2010-2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +if(DEFINED __INCLUDED_GR_SWIG_CMAKE) + return() +endif() +set(__INCLUDED_GR_SWIG_CMAKE TRUE) + +include(GrPython) + +######################################################################## +# Builds a swig documentation file to be generated into python docstrings +# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....) +# +# Set the following variable to specify extra dependent targets: +# - GR_SWIG_DOCS_SOURCE_DEPS +# - GR_SWIG_DOCS_TARGET_DEPS +######################################################################## +function(GR_SWIG_MAKE_DOCS output_file) + find_package(Doxygen) + if(DOXYGEN_FOUND) + + #setup the input files variable list, quote formated + set(input_files) + unset(INPUT_PATHS) + foreach(input_path ${ARGN}) + if (IS_DIRECTORY ${input_path}) #when input path is a directory + file(GLOB input_path_h_files ${input_path}/*.h) + else() #otherwise its just a file, no glob + set(input_path_h_files ${input_path}) + endif() + list(APPEND input_files ${input_path_h_files}) + set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"") + endforeach(input_path) + + #determine the output directory + get_filename_component(name ${output_file} NAME_WE) + get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH) + set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs) + make_directory(${OUTPUT_DIRECTORY}) + + #generate the Doxyfile used by doxygen + configure_file( + ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in + ${OUTPUT_DIRECTORY}/Doxyfile + @ONLY) + + #Create a dummy custom command that depends on other targets + include(GrMiscUtils) + GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS}) + + #call doxygen on the Doxyfile + input headers + add_custom_command( + OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml + DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps} + COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile + COMMENT "Generating doxygen xml for ${name} docs" + ) + + #call the swig_doc script on the xml files + add_custom_command( + OUTPUT ${output_file} + DEPENDS ${input_files} ${OUTPUT_DIRECTORY}/xml/index.xml + COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} + ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py + ${OUTPUT_DIRECTORY}/xml + ${output_file} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen + ) + + else(DOXYGEN_FOUND) + file(WRITE ${output_file} "\n") #no doxygen -> empty file + endif(DOXYGEN_FOUND) +endfunction(GR_SWIG_MAKE_DOCS) + +######################################################################## +# Build a swig target for the common gnuradio use case. Usage: +# GR_SWIG_MAKE(target ifile ifile ifile...) +# +# Set the following variables before calling: +# - GR_SWIG_FLAGS +# - GR_SWIG_INCLUDE_DIRS +# - GR_SWIG_LIBRARIES +# - GR_SWIG_SOURCE_DEPS +# - GR_SWIG_TARGET_DEPS +# - GR_SWIG_DOC_FILE +# - GR_SWIG_DOC_DIRS +######################################################################## +macro(GR_SWIG_MAKE name) + set(ifiles ${ARGN}) + + #do swig doc generation if specified + if (GR_SWIG_DOC_FILE) + set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS}) + set(GR_SWIG_DOCS_TAREGT_DEPS ${GR_SWIG_TARGET_DEPS}) + GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS}) + list(APPEND GR_SWIG_SOURCE_DEPS ${GR_SWIG_DOC_FILE}) + endif() + + #append additional include directories + find_package(PythonLibs 2) + list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs) + list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) + list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) + list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}) + + #determine include dependencies for swig file + execute_process( + COMMAND ${PYTHON_EXECUTABLE} + ${CMAKE_BINARY_DIR}/get_swig_deps.py + "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}" + OUTPUT_STRIP_TRAILING_WHITESPACE + OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + ) + + #Create a dummy custom command that depends on other targets + include(GrMiscUtils) + GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS}) + set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag) + add_custom_command( + OUTPUT ${tag_file} + DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps} + COMMAND ${CMAKE_COMMAND} -E touch ${tag_file} + ) + + #append the specified include directories + include_directories(${GR_SWIG_INCLUDE_DIRS}) + list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file}) + + #setup the swig flags with flags and include directories + set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS}) + foreach(dir ${GR_SWIG_INCLUDE_DIRS}) + list(APPEND CMAKE_SWIG_FLAGS "-I${dir}") + endforeach(dir) + + #set the C++ property on the swig .i file so it builds + set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON) + + #setup the actual swig library target to be built + include(UseSWIG) + SWIG_ADD_MODULE(${name} python ${ifiles}) + SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES}) + +endmacro(GR_SWIG_MAKE) + +######################################################################## +# Install swig targets generated by GR_SWIG_MAKE. Usage: +# GR_SWIG_INSTALL( +# TARGETS target target target... +# [DESTINATION destination] +# [COMPONENT component] +# ) +######################################################################## +macro(GR_SWIG_INSTALL) + + include(CMakeParseArgumentsCopy) + CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN}) + + foreach(name ${GR_SWIG_INSTALL_TARGETS}) + install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME} + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + COMPONENT ${GR_SWIG_INSTALL_COMPONENT} + ) + + include(GrPython) + GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + COMPONENT ${GR_SWIG_INSTALL_COMPONENT} + ) + + GR_LIBTOOL( + TARGET ${SWIG_MODULE_${name}_REAL_NAME} + DESTINATION ${GR_SWIG_INSTALL_DESTINATION} + ) + + endforeach(name) + +endmacro(GR_SWIG_INSTALL) + +######################################################################## +# Generate a python file that can determine swig dependencies. +# Used by the make macro above to determine extra dependencies. +# When you build C++, CMake figures out the header dependencies. +# This code essentially performs that logic for swig includes. +######################################################################## +file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py " + +import os, sys, re + +include_matcher = re.compile('[#|%]include\\s*[<|\"](.*)[>|\"]') +include_dirs = sys.argv[2].split(';') + +def get_swig_incs(file_path): + file_contents = open(file_path, 'r').read() + return include_matcher.findall(file_contents, re.MULTILINE) + +def get_swig_deps(file_path, level): + deps = [file_path] + if level == 0: return deps + for inc_file in get_swig_incs(file_path): + for inc_dir in include_dirs: + inc_path = os.path.join(inc_dir, inc_file) + if not os.path.exists(inc_path): continue + deps.extend(get_swig_deps(inc_path, level-1)) + return deps + +if __name__ == '__main__': + ifiles = sys.argv[1].split(';') + deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], []) + #sys.stderr.write(';'.join(set(deps)) + '\\n\\n') + print(';'.join(set(deps))) +") diff --git a/gr-adsbtx/cmake/Modules/GrTest.cmake b/gr-adsbtx/cmake/Modules/GrTest.cmake new file mode 100644 index 0000000..6174c03 --- /dev/null +++ b/gr-adsbtx/cmake/Modules/GrTest.cmake @@ -0,0 +1,133 @@ +# Copyright 2010-2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +if(DEFINED __INCLUDED_GR_TEST_CMAKE) + return() +endif() +set(__INCLUDED_GR_TEST_CMAKE TRUE) + +######################################################################## +# Add a unit test and setup the environment for a unit test. +# Takes the same arguments as the ADD_TEST function. +# +# Before calling set the following variables: +# GR_TEST_TARGET_DEPS - built targets for the library path +# GR_TEST_LIBRARY_DIRS - directories for the library path +# GR_TEST_PYTHON_DIRS - directories for the python path +######################################################################## +function(GR_ADD_TEST test_name) + + if(WIN32) + #Ensure that the build exe also appears in the PATH. + list(APPEND GR_TEST_TARGET_DEPS ${ARGN}) + + #In the land of windows, all libraries must be in the PATH. + #Since the dependent libraries are not yet installed, + #we must manually set them in the PATH to run tests. + #The following appends the path of a target dependency. + foreach(target ${GR_TEST_TARGET_DEPS}) + get_target_property(location ${target} LOCATION) + if(location) + get_filename_component(path ${location} PATH) + string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path}) + list(APPEND GR_TEST_LIBRARY_DIRS ${path}) + endif(location) + endforeach(target) + + #SWIG generates the python library files into a subdirectory. + #Therefore, we must append this subdirectory into PYTHONPATH. + #Only do this for the python directories matching the following: + foreach(pydir ${GR_TEST_PYTHON_DIRS}) + get_filename_component(name ${pydir} NAME) + if(name MATCHES "^(swig|lib|src)$") + list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE}) + endif() + endforeach(pydir) + endif(WIN32) + + file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir) + file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list? + file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list? + + set(environs "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}") + + #http://www.cmake.org/pipermail/cmake/2009-May/029464.html + #Replaced this add test + set environs code with the shell script generation. + #Its nicer to be able to manually run the shell script to diagnose problems. + #ADD_TEST(${ARGV}) + #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}") + + if(UNIX) + set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH") + #set both LD and DYLD paths to cover multiple UNIX OS library paths + list(APPEND libpath "$LD_LIBRARY_PATH" "$DYLD_LIBRARY_PATH") + list(APPEND pypath "$PYTHONPATH") + + #replace list separator with the path separator + string(REPLACE ";" ":" libpath "${libpath}") + string(REPLACE ";" ":" pypath "${pypath}") + list(APPEND environs "PATH=${binpath}" "LD_LIBRARY_PATH=${libpath}" "DYLD_LIBRARY_PATH=${libpath}" "PYTHONPATH=${pypath}") + + #generate a bat file that sets the environment and runs the test + find_program(SHELL sh) + set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh) + file(WRITE ${sh_file} "#!${SHELL}\n") + #each line sets an environment variable + foreach(environ ${environs}) + file(APPEND ${sh_file} "export ${environ}\n") + endforeach(environ) + #load the command to run with its arguments + foreach(arg ${ARGN}) + file(APPEND ${sh_file} "${arg} ") + endforeach(arg) + file(APPEND ${sh_file} "\n") + + #make the shell file executable + execute_process(COMMAND chmod +x ${sh_file}) + + add_test(${test_name} ${SHELL} ${sh_file}) + + endif(UNIX) + + if(WIN32) + list(APPEND libpath ${DLL_PATHS} "%PATH%") + list(APPEND pypath "%PYTHONPATH%") + + #replace list separator with the path separator (escaped) + string(REPLACE ";" "\\;" libpath "${libpath}") + string(REPLACE ";" "\\;" pypath "${pypath}") + list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}") + + #generate a bat file that sets the environment and runs the test + set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat) + file(WRITE ${bat_file} "@echo off\n") + #each line sets an environment variable + foreach(environ ${environs}) + file(APPEND ${bat_file} "SET ${environ}\n") + endforeach(environ) + #load the command to run with its arguments + foreach(arg ${ARGN}) + file(APPEND ${bat_file} "${arg} ") + endforeach(arg) + file(APPEND ${bat_file} "\n") + + add_test(${test_name} ${bat_file}) + endif(WIN32) + +endfunction(GR_ADD_TEST) diff --git a/gr-adsbtx/cmake/Modules/UseSWIG.cmake b/gr-adsbtx/cmake/Modules/UseSWIG.cmake new file mode 100644 index 0000000..c0f1728 --- /dev/null +++ b/gr-adsbtx/cmake/Modules/UseSWIG.cmake @@ -0,0 +1,304 @@ +# - SWIG module for CMake +# Defines the following macros: +# SWIG_ADD_MODULE(name language [ files ]) +# - Define swig module with given name and specified language +# SWIG_LINK_LIBRARIES(name [ libraries ]) +# - Link libraries to swig module +# All other macros are for internal use only. +# To get the actual name of the swig module, +# use: ${SWIG_MODULE_${name}_REAL_NAME}. +# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify +# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add +# special flags to all swig calls. +# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify +# where to write all the swig generated module (swig -outdir option) +# The name-specific variable SWIG_MODULE__EXTRA_DEPS may be used +# to specify extra dependencies for the generated modules. +# If the source file generated by swig need some special flag you can use +# set_source_files_properties( ${swig_generated_file_fullname} +# PROPERTIES COMPILE_FLAGS "-bla") + + +#============================================================================= +# Copyright 2004-2009 Kitware, Inc. +# Copyright 2009 Mathieu Malaterre +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +set(SWIG_CXX_EXTENSION "cxx") +set(SWIG_EXTRA_LIBRARIES "") + +set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py") + +# +# For given swig module initialize variables associated with it +# +macro(SWIG_MODULE_INITIALIZE name language) + string(TOUPPER "${language}" swig_uppercase_language) + string(TOLOWER "${language}" swig_lowercase_language) + set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}") + set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}") + + set(SWIG_MODULE_${name}_REAL_NAME "${name}") + if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN") + message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found") + elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON") + # when swig is used without the -interface it will produce in the module.py + # a 'import _modulename' statement, which implies having a corresponding + # _modulename.so (*NIX), _modulename.pyd (Win32). + set(SWIG_MODULE_${name}_REAL_NAME "_${name}") + elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL") + set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow") + endif() +endmacro() + +# +# For a given language, input file, and output file, determine extra files that +# will be generated. This is internal swig macro. +# + +macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile) + set(${outfiles} "") + get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename + ${infile} SWIG_MODULE_NAME) + if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND") + get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE) + endif() + foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION}) + set(${outfiles} ${${outfiles}} + "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}") + endforeach() +endmacro() + +# +# Take swig (*.i) file and add proper custom commands for it +# +macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile) + set(swig_full_infile ${infile}) + get_filename_component(swig_source_file_path "${infile}" PATH) + get_filename_component(swig_source_file_name_we "${infile}" NAME_WE) + get_source_file_property(swig_source_file_generated ${infile} GENERATED) + get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS) + get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS) + if("${swig_source_file_flags}" STREQUAL "NOTFOUND") + set(swig_source_file_flags "") + endif() + set(swig_source_file_fullname "${infile}") + if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}") + string(REGEX REPLACE + "^${CMAKE_CURRENT_SOURCE_DIR}" "" + swig_source_file_relative_path + "${swig_source_file_path}") + else() + if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}") + string(REGEX REPLACE + "^${CMAKE_CURRENT_BINARY_DIR}" "" + swig_source_file_relative_path + "${swig_source_file_path}") + set(swig_source_file_generated 1) + else() + set(swig_source_file_relative_path "${swig_source_file_path}") + if(swig_source_file_generated) + set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}") + else() + set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}") + endif() + endif() + endif() + + set(swig_generated_file_fullname + "${CMAKE_CURRENT_BINARY_DIR}") + if(swig_source_file_relative_path) + set(swig_generated_file_fullname + "${swig_generated_file_fullname}/${swig_source_file_relative_path}") + endif() + # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir + if(CMAKE_SWIG_OUTDIR) + set(swig_outdir ${CMAKE_SWIG_OUTDIR}) + else() + set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR}) + endif() + SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE} + swig_extra_generated_files + "${swig_outdir}" + "${infile}") + set(swig_generated_file_fullname + "${swig_generated_file_fullname}/${swig_source_file_name_we}") + # add the language into the name of the file (i.e. TCL_wrap) + # this allows for the same .i file to be wrapped into different languages + set(swig_generated_file_fullname + "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap") + + if(swig_source_file_cplusplus) + set(swig_generated_file_fullname + "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}") + else() + set(swig_generated_file_fullname + "${swig_generated_file_fullname}.c") + endif() + + # Shut up some warnings from poor SWIG code generation that we + # can do nothing about, when this flag is available + include(CheckCXXCompilerFlag) + check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + set_source_files_properties(${swig_generated_file_fullname} + PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable") + endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) + + get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES) + set(swig_include_dirs) + foreach(it ${cmake_include_directories}) + set(swig_include_dirs ${swig_include_dirs} "-I${it}") + endforeach() + + set(swig_special_flags) + # default is c, so add c++ flag if it is c++ + if(swig_source_file_cplusplus) + set(swig_special_flags ${swig_special_flags} "-c++") + endif() + set(swig_extra_flags) + if(SWIG_MODULE_${name}_EXTRA_FLAGS) + set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS}) + endif() + + # hack to work around CMake bug in add_custom_command with multiple OUTPUT files + + file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + execute_process( + COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib +unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] +print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))" + OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + file( + WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in + "int main(void){return 0;}\n" + ) + + # create dummy dependencies + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in + ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp + DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS} + COMMENT "" + ) + + # create the dummy target + add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp) + + # add a custom command to the dummy target + add_custom_command( + TARGET ${_target} + # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir) + COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir} + COMMAND "${SWIG_EXECUTABLE}" + ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}" + ${swig_source_file_flags} + ${CMAKE_SWIG_FLAGS} + -outdir ${swig_outdir} + ${swig_special_flags} + ${swig_extra_flags} + ${swig_include_dirs} + -o "${swig_generated_file_fullname}" + "${swig_source_file_fullname}" + COMMENT "Swig source" + ) + + #add dummy independent dependencies from the _target to each file + #that will be generated by the SWIG command above + + set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files}) + + foreach(swig_gen_file ${${outfiles}}) + add_custom_command( + OUTPUT ${swig_gen_file} + COMMAND "" + DEPENDS ${_target} + COMMENT "" + ) + endforeach() + + set_source_files_properties( + ${outfiles} PROPERTIES GENERATED 1 + ) + +endmacro() + +# +# Create Swig module +# +macro(SWIG_ADD_MODULE name language) + SWIG_MODULE_INITIALIZE(${name} ${language}) + set(swig_dot_i_sources) + set(swig_other_sources) + foreach(it ${ARGN}) + if(${it} MATCHES ".*\\.i$") + set(swig_dot_i_sources ${swig_dot_i_sources} "${it}") + else() + set(swig_other_sources ${swig_other_sources} "${it}") + endif() + endforeach() + + set(swig_generated_sources) + foreach(it ${swig_dot_i_sources}) + SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it}) + set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}") + endforeach() + get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES) + set_directory_properties(PROPERTIES + ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}") + add_library(${SWIG_MODULE_${name}_REAL_NAME} + MODULE + ${swig_generated_sources} + ${swig_other_sources}) + string(TOLOWER "${language}" swig_lowercase_language) + if ("${swig_lowercase_language}" STREQUAL "java") + if (APPLE) + # In java you want: + # System.loadLibrary("LIBRARY"); + # then JNI will look for a library whose name is platform dependent, namely + # MacOS : libLIBRARY.jnilib + # Windows: LIBRARY.dll + # Linux : libLIBRARY.so + set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib") + endif () + endif () + if ("${swig_lowercase_language}" STREQUAL "python") + # this is only needed for the python case where a _modulename.so is generated + set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "") + # Python extension modules on Windows must have the extension ".pyd" + # instead of ".dll" as of Python 2.5. Older python versions do support + # this suffix. + # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000 + # + # Windows: .dll is no longer supported as a filename extension for extension modules. + # .pyd is now the only filename extension that will be searched for. + # + if(WIN32 AND NOT CYGWIN) + set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd") + endif() + endif () +endmacro() + +# +# Like TARGET_LINK_LIBRARIES but for swig modules +# +macro(SWIG_LINK_LIBRARIES name) + if(SWIG_MODULE_${name}_REAL_NAME) + target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN}) + else() + message(SEND_ERROR "Cannot find Swig library \"${name}\".") + endif() +endmacro() diff --git a/gr-adsbtx/cmake/Modules/adsbtxConfig.cmake b/gr-adsbtx/cmake/Modules/adsbtxConfig.cmake new file mode 100644 index 0000000..122fbb3 --- /dev/null +++ b/gr-adsbtx/cmake/Modules/adsbtxConfig.cmake @@ -0,0 +1,30 @@ +INCLUDE(FindPkgConfig) +PKG_CHECK_MODULES(PC_ADSBTX adsbtx) + +FIND_PATH( + ADSBTX_INCLUDE_DIRS + NAMES adsbtx/api.h + HINTS $ENV{ADSBTX_DIR}/include + ${PC_ADSBTX_INCLUDEDIR} + PATHS ${CMAKE_INSTALL_PREFIX}/include + /usr/local/include + /usr/include +) + +FIND_LIBRARY( + ADSBTX_LIBRARIES + NAMES gnuradio-adsbtx + HINTS $ENV{ADSBTX_DIR}/lib + ${PC_ADSBTX_LIBDIR} + PATHS ${CMAKE_INSTALL_PREFIX}/lib + ${CMAKE_INSTALL_PREFIX}/lib64 + /usr/local/lib + /usr/local/lib64 + /usr/lib + /usr/lib64 +) + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(ADSBTX DEFAULT_MSG ADSBTX_LIBRARIES ADSBTX_INCLUDE_DIRS) +MARK_AS_ADVANCED(ADSBTX_LIBRARIES ADSBTX_INCLUDE_DIRS) + diff --git a/gr-adsbtx/cmake/cmake_uninstall.cmake.in b/gr-adsbtx/cmake/cmake_uninstall.cmake.in new file mode 100644 index 0000000..9ae1ae4 --- /dev/null +++ b/gr-adsbtx/cmake/cmake_uninstall.cmake.in @@ -0,0 +1,32 @@ +# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F + +IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") +ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") + +FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) +STRING(REGEX REPLACE "\n" ";" files "${files}") +FOREACH(file ${files}) + MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") + IF(EXISTS "$ENV{DESTDIR}${file}") + EXEC_PROGRAM( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + IF(NOT "${rm_retval}" STREQUAL 0) + MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") + ENDIF(NOT "${rm_retval}" STREQUAL 0) + ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}") + EXEC_PROGRAM( + "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" + OUTPUT_VARIABLE rm_out + RETURN_VALUE rm_retval + ) + IF(NOT "${rm_retval}" STREQUAL 0) + MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") + ENDIF(NOT "${rm_retval}" STREQUAL 0) + ELSE(EXISTS "$ENV{DESTDIR}${file}") + MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") + ENDIF(EXISTS "$ENV{DESTDIR}${file}") +ENDFOREACH(file) diff --git a/gr-adsbtx/docs/CMakeLists.txt b/gr-adsbtx/docs/CMakeLists.txt new file mode 100644 index 0000000..f16fbf6 --- /dev/null +++ b/gr-adsbtx/docs/CMakeLists.txt @@ -0,0 +1,35 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Setup dependencies +######################################################################## +find_package(Doxygen) + +######################################################################## +# Begin conditional configuration +######################################################################## +if(ENABLE_DOXYGEN) + +######################################################################## +# Add subdirectories +######################################################################## +add_subdirectory(doxygen) + +endif(ENABLE_DOXYGEN) diff --git a/gr-adsbtx/docs/README.adsbtx b/gr-adsbtx/docs/README.adsbtx new file mode 100644 index 0000000..e37d10d --- /dev/null +++ b/gr-adsbtx/docs/README.adsbtx @@ -0,0 +1,11 @@ +This is the adsbtx-write-a-block package meant as a guide to building +out-of-tree packages. To use the adsbtx blocks, the Python namespaces +is in 'adsbtx', which is imported as: + + import adsbtx + +See the Doxygen documentation for details about the blocks available +in this package. A quick listing of the details can be found in Python +after importing by using: + + help(adsbtx) diff --git a/gr-adsbtx/docs/doxygen/CMakeLists.txt b/gr-adsbtx/docs/doxygen/CMakeLists.txt new file mode 100644 index 0000000..1b44799 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/CMakeLists.txt @@ -0,0 +1,52 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Create the doxygen configuration file +######################################################################## +file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir) +file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir) +file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir) +file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir) + +set(HAVE_DOT ${DOXYGEN_DOT_FOUND}) +set(enable_html_docs YES) +set(enable_latex_docs NO) +set(enable_xml_docs YES) + +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in + ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile +@ONLY) + +set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html) + +######################################################################## +# Make and install doxygen docs +######################################################################## +add_custom_command( + OUTPUT ${BUILT_DIRS} + COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating documentation with doxygen" +) + +add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS}) + +install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR}) diff --git a/gr-adsbtx/docs/doxygen/Doxyfile.in b/gr-adsbtx/docs/doxygen/Doxyfile.in new file mode 100644 index 0000000..c4e81ce --- /dev/null +++ b/gr-adsbtx/docs/doxygen/Doxyfile.in @@ -0,0 +1,1922 @@ +# Doxyfile 1.8.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed +# in front of the TAG it is preceding . +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = "GNU Radio's ADSBTX Package" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, +# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, +# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = YES + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = YES + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields or simple typedef fields will be shown +# inline in the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO (the default), structs, classes, and unions are shown on a separate +# page (for HTML and Man pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can +# be an expensive process and often the same symbol appear multiple times in +# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too +# small doxygen will become slower. If the cache is too large, memory is wasted. +# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid +# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 +# symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = NO + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = NO + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = NO + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= NO + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text " + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @top_srcdir@ \ + @top_builddir@ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.h \ + *.dox + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = @abs_top_builddir@/docs/doxygen/html \ + @abs_top_builddir@/docs/doxygen/xml \ + @abs_top_builddir@/docs/doxygen/other/doxypy.py \ + @abs_top_builddir@/_CPack_Packages \ + @abs_top_srcdir@/cmake + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = */.deps/* \ + */.libs/* \ + */.svn/* \ + */CVS/* \ + */__init__.py \ + */qa_*.cc \ + */qa_*.h \ + */qa_*.py + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = ad9862 \ + numpy \ + *swig* \ + *Swig* \ + *my_top_block* \ + *my_graph* \ + *app_top_block* \ + *am_rx_graph* \ + *_queue_watcher_thread* \ + *parse* \ + *MyFrame* \ + *MyApp* \ + *PyObject* \ + *wfm_rx_block* \ + *_sptr* \ + *debug* \ + *wfm_rx_sca_block* \ + *tv_rx_block* \ + *wxapt_rx_block* \ + *example_signal* + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be ignored. +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = *.py=@top_srcdir@/doc/doxygen/other/doxypy.py + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = @enable_html_docs@ + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = YES + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = YES + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 180 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript +# pieces of code that will be used on startup of the MathJax code. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = NO + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search +# engine library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id +# of to a relative location where the documentation can be found. +# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = @enable_latex_docs@ + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4 will be used. + +PAPER_TYPE = letter + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images +# or other source files which should be copied to the LaTeX output directory. +# Note that the files will be copied as-is; there are no commands or markers +# available. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = NO + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = @enable_xml_docs@ + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = NO + +#--------------------------------------------------------------------------- +# configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files +# that can be used to generate PDF. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. If left blank docbook will be used as the default path. + +DOCBOOK_OUTPUT = docbook + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed +# in the related pages index. If set to NO, only the current project's +# pages will be listed. + +EXTERNAL_PAGES = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = @HAVE_DOT@ + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = NO + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# manageable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/gr-adsbtx/docs/doxygen/Doxyfile.swig_doc.in b/gr-adsbtx/docs/doxygen/Doxyfile.swig_doc.in new file mode 100644 index 0000000..57736d7 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/Doxyfile.swig_doc.in @@ -0,0 +1,1890 @@ +# Doxyfile 1.8.4 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed +# in front of the TAG it is preceding . +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = @CPACK_PACKAGE_NAME@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@ + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, +# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, +# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields or simple typedef fields will be shown +# inline in the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO (the default), structs, classes, and unions are shown on a separate +# page (for HTML and Man pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can +# be an expensive process and often the same symbol appear multiple times in +# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too +# small doxygen will become slower. If the cache is too large, memory is wasted. +# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid +# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 +# symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @INPUT_PATHS@ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.h + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be ignored. +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = NO + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript +# pieces of code that will be used on startup of the MathJax code. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search +# engine library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the +# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is +# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple +# projects and redirect the results back to the right project. + +EXTERNAL_SEARCH_ID = + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen +# projects other than the one defined by this configuration file, but that are +# all added to the same external search index. Each project needs to have a +# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id +# of to a relative location where the documentation can be found. +# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4 will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images +# or other source files which should be copied to the LaTeX output directory. +# Note that the files will be copied as-is; there are no commands or markers +# available. + +LATEX_EXTRA_FILES = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = YES + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options related to the DOCBOOK output +#--------------------------------------------------------------------------- + +# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files +# that can be used to generate PDF. + +GENERATE_DOCBOOK = NO + +# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in +# front of it. If left blank docbook will be used as the default path. + +DOCBOOK_OUTPUT = docbook + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed +# in the related pages index. If set to NO, only the current project's +# pages will be listed. + +EXTERNAL_PAGES = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# manageable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/gr-adsbtx/docs/doxygen/doxyxml/__init__.py b/gr-adsbtx/docs/doxygen/doxyxml/__init__.py new file mode 100644 index 0000000..5cd0b3c --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/__init__.py @@ -0,0 +1,82 @@ +# +# Copyright 2010 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# +""" +Python interface to contents of doxygen xml documentation. + +Example use: +See the contents of the example folder for the C++ and +doxygen-generated xml used in this example. + +>>> # Parse the doxygen docs. +>>> import os +>>> this_dir = os.path.dirname(globals()['__file__']) +>>> xml_path = this_dir + "/example/xml/" +>>> di = DoxyIndex(xml_path) + +Get a list of all top-level objects. + +>>> print([mem.name() for mem in di.members()]) +[u'Aadvark', u'aadvarky_enough', u'main'] + +Get all functions. + +>>> print([mem.name() for mem in di.in_category(DoxyFunction)]) +[u'aadvarky_enough', u'main'] + +Check if an object is present. + +>>> di.has_member(u'Aadvark') +True +>>> di.has_member(u'Fish') +False + +Get an item by name and check its properties. + +>>> aad = di.get_member(u'Aadvark') +>>> print(aad.brief_description) +Models the mammal Aadvark. +>>> print(aad.detailed_description) +Sadly the model is incomplete and cannot capture all aspects of an aadvark yet. + +This line is uninformative and is only to test line breaks in the comments. +>>> [mem.name() for mem in aad.members()] +[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness'] +>>> aad.get_member(u'print').brief_description +u'Outputs the vital aadvark statistics.' + +""" + +from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther + +def _test(): + import os + this_dir = os.path.dirname(globals()['__file__']) + xml_path = this_dir + "/example/xml/" + di = DoxyIndex(xml_path) + # Get the Aadvark class + aad = di.get_member('Aadvark') + aad.brief_description + import doctest + return doctest.testmod() + +if __name__ == "__main__": + _test() + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/__init__.pyc b/gr-adsbtx/docs/doxygen/doxyxml/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..331ed04090bb4c91358d6e8fc3cbdfa4f33cc9a6 GIT binary patch literal 2208 zcmb_dOK%%D5T4b;kF|rgryw^XJ!l2j(!EHGxUL-pO`TxvlMG`)?NSouU2;KkT??>J z>5nV=cX}<*&TuKYM%to>3Rr4}oaZ;gVfVLZ@h`tempO!A2mjyTXC*ELzy#L_iVl9-!jE43=>_gFm zn+_NOVh=t6L<-IK;Ud{0m%sJ^;*Tr0(27argqiVNFsE6rkyFkxJ!6HwTg`=HcU6gW z-c)$Noz!X+$ESC^s!PEdE53~{1nMrt6J_E>5Sr<-5QfdP@nR{iMhF60uo5@mS+2DHUwJ_C;WKnBl;DEgxhfKzJK7Q#X3yCt{BSP;~A?}j3 z;xLZi3dcArrNvlgjF%H7>Oz#(w{69%ikIxTMZWF1N8Lg=j;&NYRlndxxoWk^5P>39uIcKy5Ik2(JC^g> zH3qk^Zrs+?B))qT3$U}5Kp#FgO3RVp*tP!&p-4b0rS|1)Q2G=C9L&*=RxpPz{b;r_+KTjeTB)| zlUlUO!_Tpgo56?1)eQncv*cDf1V0#LmIje^67T*Xj^!WtOLlRvIa)Ti((MOs-N~C{ zoFly2a3>|V*6pl?v5f9nV9v40Y>mTlBmCCrrgrgK7+Xdao5K$ns__$^#%veA6a4HP z7nrv~O^(1u^O(78>04#e-4ify@ z#hxKGZmb#G1vg?mSB>F?)MK>nv@XYUbEw2m$c^cRz{S61{-1_;mPsXDmW}F_Bg8U_ z>=X(6B`y%{M=zo{+V6Z3Z6h6!qyd{{=LtdS)5$V4o@KuL&u<(_6|SBck3C~2c9M+~Z#KKlD%mx#u@PEr#40S=#QTF^-lb`v(B8xV37`ZLs5AtLH-@B1NGfS_ENv0X zN;)@|&YAd%;Dj>fO#(F0oHw6(<^+;XEm^1~T@!cs@$RD8N4aF;u9{~|yr{}qeX^tp zFv7B$XX@sPiO;IrIZd?8VRz4)ctyes5}uRrIn~dre$m7i)M3@c|B#1~b$Y~oc_ z)=Yd!mFG=-Sru$#O_di+{JbhxP0(9^0RWaA6kDUCLE49&#`sb#!k21r_~HZ9&3Xex zNpTov(J%=^sV@wNc|01R>3XRv3(H|PN4D6s2QaVA{cKc3ahh+Uf0^fl&Hds=mVAQN zCT$0Wj7h&Rs7;#i#?f&==Xr(WasIYfF5zc8?~VEg-%f`2lj6T!dO%wW=1`o{LfaVn z^LVBg`e9VqG~1VX|KBW>MDf~Wp?xCEhXCKD{KqItW1c|OUR`OJC!QotT0LL2OzJ9g zb){Wb=FJmOnK2JKrr2fuu%k~pCcDha(?;F1TMvs4m;%Et^246EV0)d)E~&G3qk&CI zt~@*frb%29?l2*5l#63=7)NE~jC3gLu;m%PjMIJ@))EdHrddeC-_MIp>6bh1dhxiRV3Ss&CVT zP*KwyqS+)ldid!H3VRD<`(Mv&8D(V(T&v)-SL8!K@&R2M`v?dJN#TM7(Sl?6`TfJB zUvBuqYVe4A6bmTlCI*OF((W>yx7R(EVi7BwSLJP(Epx;(A3WFl!=+ zvT1Gs51`&t2y=KiBO;$1L{?B}pR_t{R$YVVI6CKona+IY0ny&UV`($STXx&ybmQ&4 zjmG0^w9Zt&at1u0}Jpca=(XNvr{^U)rL=uS;gC*%J8af+v^x>3vQU@rN5VE zu^-h=nirKH`mbKMuWmSONUC7WP+4SR#fbeVbMz}#?ZMgOsE8HaL{Bg)kMiE6&0|r^F7hw%WO{ElZF{wV?jrCy2-OgNr9BRWDb|i9%~vz-%Y|$ihUVN ze=%MYcwdBzE`tacYCC?$O&Nrpn*uxFTgP`URnap=G+^-PC(iT4Kc_=D3z##_`h!uN z*c^4`Yo;;g7OqLgLbt@Cr`HEyQ_Zt6S4J$yPX-Jw9!U_R3SxUuHc zX@8GZb^YqUcfo-B=_4Fv>zvS!eCCX4o<#+Yz7{D=IVv*v>o7VxV&>wqNuNg{9DMWf zSWXU?-eaayjz`?^eNI#L=NlfnyP*ro{Efv&NgR?Us7_oz8v(?{C4EPH?mQDx#HI|{n3*L*37oo{EYU9J&I8*rwIvzXA z2B6X)4AvdS5aD|}*+tW$8o-UnI$k%P=K+u2RKDRLrCkJEK=4Ht$}kpCo3OwHpwls5 zX$EO_rI}x2@+whlB5hcpuh$WdzjyhbY{A{>iqm5ad#)}87zSk|0nZMn8R3W>Zxnum zU&{u4?|)q^iQ_aEOA@Gp&u>wEcD;N_*Acj6hEJt$`D7qRYco zoGn`HPm*Yw$KRtlZr$q{kO&n6-q5L&1ky5xpa_(>qed1j(m-Hl^uDbK4MG4>Fm1+{ z9dn5D3-|@?hqn#>--##r7GqdR4dd8d;u88%61z(pzNl}AvaH$o!wE-;A*ZWRKmMg+ico5d6E5K#={(z=@;&O#(dS^994Sh-8^3O}ZC z97Sm{bM7yhV&n20*Y33gVO#chkXCHMpn0yfgxq^s2W4DF(2eLNV>UTJ1F5XjGNDI^ zL#ZfmiRT^99IujesXm=_@H(6~JK1YE*er-*`lJ}%LwIKOz4w76yWlchj*7nH!wym1 zmicYq2F+eVQ%6Mn1wnr*z|USfNc`*8-S2Uwxvd~94}#ax4oCp*+JT#q9+YYZU*iir z6`%3Mr@%aQVkfdsFx)|V5(#P z)J~fCAo!4lltjll#qB5}{26mthBy-SzuUxrj4R{yp5jtv&APh&3)kx)p9(BAfxCRe zl+r;CSb3SgRy7gzc}eSzLDB^{7v4KLSu4(yDP7j897ubU|J=rlQ{14q_OF=3(iM%; zovydN{vsNazrzGCvnealYH1*DwX3Mso~OAc4<9S0!|DCF43c z2Xq_diUHGRm-^gd?W-)_V(|?YAFvQ4OgMw@upmqm#!zimhk7dY8Xn7t)NZUc`EThQ z?oJl*bUUqfqch)GTy8D5I*@ldOJ|qnJ1zWM?sQPjby`Y~6@bDJe=`fi;F}mrQS2zb zU(&WgxEtch!fqt|%c`hSVQ>65*+7g+$lbM5z@?qeu63}y<^DqW7U@rkU%Rp7Ej3me HE6slbl*V-K literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/doxyindex.py b/gr-adsbtx/docs/doxygen/doxyxml/doxyindex.py new file mode 100644 index 0000000..0132ab8 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/doxyindex.py @@ -0,0 +1,237 @@ +# +# Copyright 2010 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# +""" +Classes providing more user-friendly interfaces to the doxygen xml +docs than the generated classes provide. +""" + +import os + +from generated import index +from base import Base +from text import description + +class DoxyIndex(Base): + """ + Parses a doxygen xml directory. + """ + + __module__ = "gnuradio.utils.doxyxml" + + def _parse(self): + if self._parsed: + return + super(DoxyIndex, self)._parse() + self._root = index.parse(os.path.join(self._xml_path, 'index.xml')) + for mem in self._root.compound: + converted = self.convert_mem(mem) + # For files we want the contents to be accessible directly + # from the parent rather than having to go through the file + # object. + if self.get_cls(mem) == DoxyFile: + if mem.name.endswith('.h'): + self._members += converted.members() + self._members.append(converted) + else: + self._members.append(converted) + + +def generate_swig_doc_i(self): + """ + %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " + Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; + """ + pass + + +class DoxyCompMem(Base): + + + kind = None + + def __init__(self, *args, **kwargs): + super(DoxyCompMem, self).__init__(*args, **kwargs) + + @classmethod + def can_parse(cls, obj): + return obj.kind == cls.kind + + def set_descriptions(self, parse_data): + bd = description(getattr(parse_data, 'briefdescription', None)) + dd = description(getattr(parse_data, 'detaileddescription', None)) + self._data['brief_description'] = bd + self._data['detailed_description'] = dd + +class DoxyCompound(DoxyCompMem): + pass + +class DoxyMember(DoxyCompMem): + pass + + +class DoxyFunction(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'function' + + def _parse(self): + if self._parsed: + return + super(DoxyFunction, self)._parse() + self.set_descriptions(self._parse_data) + self._data['params'] = [] + prms = self._parse_data.param + for prm in prms: + self._data['params'].append(DoxyParam(prm)) + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + params = property(lambda self: self.data()['params']) + +Base.mem_classes.append(DoxyFunction) + + +class DoxyParam(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + def _parse(self): + if self._parsed: + return + super(DoxyParam, self)._parse() + self.set_descriptions(self._parse_data) + self._data['declname'] = self._parse_data.declname + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + declname = property(lambda self: self.data()['declname']) + +class DoxyClass(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'class' + + def _parse(self): + if self._parsed: + return + super(DoxyClass, self)._parse() + self.retrieve_data() + if self._error: + return + self.set_descriptions(self._retrieved_data.compounddef) + # Sectiondef.kind tells about whether private or public. + # We just ignore this for now. + self.process_memberdefs() + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + +Base.mem_classes.append(DoxyClass) + + +class DoxyFile(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'file' + + def _parse(self): + if self._parsed: + return + super(DoxyFile, self)._parse() + self.retrieve_data() + self.set_descriptions(self._retrieved_data.compounddef) + if self._error: + return + self.process_memberdefs() + + brief_description = property(lambda self: self.data()['brief_description']) + detailed_description = property(lambda self: self.data()['detailed_description']) + +Base.mem_classes.append(DoxyFile) + + +class DoxyNamespace(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'namespace' + +Base.mem_classes.append(DoxyNamespace) + + +class DoxyGroup(DoxyCompound): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'group' + + def _parse(self): + if self._parsed: + return + super(DoxyGroup, self)._parse() + self.retrieve_data() + if self._error: + return + cdef = self._retrieved_data.compounddef + self._data['title'] = description(cdef.title) + # Process inner groups + grps = cdef.innergroup + for grp in grps: + converted = DoxyGroup.from_refid(grp.refid, top=self.top) + self._members.append(converted) + # Process inner classes + klasses = cdef.innerclass + for kls in klasses: + converted = DoxyClass.from_refid(kls.refid, top=self.top) + self._members.append(converted) + # Process normal members + self.process_memberdefs() + + title = property(lambda self: self.data()['title']) + + +Base.mem_classes.append(DoxyGroup) + + +class DoxyFriend(DoxyMember): + + __module__ = "gnuradio.utils.doxyxml" + + kind = 'friend' + +Base.mem_classes.append(DoxyFriend) + + +class DoxyOther(Base): + + __module__ = "gnuradio.utils.doxyxml" + + kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page']) + + @classmethod + def can_parse(cls, obj): + return obj.kind in cls.kinds + +Base.mem_classes.append(DoxyOther) + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/doxyindex.pyc b/gr-adsbtx/docs/doxygen/doxyxml/doxyindex.pyc new file mode 100644 index 0000000000000000000000000000000000000000..982bb2e7a0d67021bb6e74f55869ebd9deb6bf0c GIT binary patch literal 9889 zcmd5?O>i7X74F$TNh`_v-`H`SY=kK`m`H>aggC}vOR+0~Y@87lgQ{G^Xs5N>)$Y!E zde)YzEQ%@_j!*@coH)ZJSGaJ-N2)k+<;0cZN)>$HYt8IhuE;jl3d!!?{_oeX-}~O{ z?#cdZviRuV&JSzq;4h2c?;)5!J4yvgts{0+kWnp1ts{Z|S=Gv@c2=!tmBV~awQ{PR zSL=D3&#P8JwMW$Y2=WD`^J){VM$|*4_Lcha`lt#ORi(5<+RoOK&yG%>U=`&D`_hbk&d-sTU}rg zS^H|*sx;85WLss_TIDrvXd6*+rR5u=O}QI|_ZmT?Q!lr}NSAv?M{jOM4c!S^yX8hF z*3qV4L%TRE$6LA_ggd)+-6`+1Tg4!(p?u5lNDWGLMFBKdA!C$okIZ|;R zQKJ*+9h=P|d)+tM=Eji=w5dgnZrlhv@fh-}nDGO4NF|fwTt_e$5NU`HR*pDBnPfUc zEkQC9K?JkXzcS%;P5>LF9GG z%0PD0^tw87$rmg#sdACBn=LKD6s1A~UMSV|<1I-vL&z8B@h}BOL@}kqi^4FDN0F+9 z?QYoX1Tp2J7IyCGDE8XAEhN-+?A2OE8d64XG+J7Q?f7jirI2BBzkyLX7W-}J$VlHL zO?v%q7Y$|A(%tCRX3W)aB2srSu?{N8D2dEXq?UUoS~gpLq?Z$US#F0x9EPpsdh}*T z-$!nlN`gqVMt-qY`L)dL?z$x6D+nTa${BNV&YW`*VJf%a%sLBBDN`Ze>@qwq&-3CU zN<0rU^jgfyrg)gl^JKt2t;bI=nNvnEq`Z(VWC|Hk4XOvj3hPKQqW@wsfZ_Tjm*a`LeAtF%YJ_C_~+j z{4ViFmsc)by38@2t^4A8(!FX~M1o!Fe4k^-xx4WkhEAZ-gE-b@lN#@$ShPyzMY<`$ zD-idObi0Ns`6K79Bba4G2RJ9j7Haws2$v(qgU3iit=9_)@{JJr$bZ=t?iRzx2!fa~ zL`m?47zJcOI-`Dxt;4Wug2PqO2A9g{5@Qb}uhD75o@eXGF;N#ksvFV!?fXm)hl}p3 znC=FGWtmeWYmy-1HjM(pVS>j1$Yd?tVIg%JDktJ7>P_q*U&4*%aEP|x;dd-net@Yg z^%KTaP~taEN7u& z_6$U?Gx0Zdj=K2JqH$g^n@h4d{UDo9vI5aGpGUZxg1VatG2GrPLJnbt9d{5>=a2F| zS)Y?udGY+}whJtU1I$1rP7)*ZHg_sS&(%BS#MhK2=isjb#jM*1G;;3VI`9$6lB&Qy z)Y)&F3N_m$ZPseM%@o%sS|a*mVNev`y@VP82D^9x^iBsBOU-iN5$A ztG1EYpCA}QX5xiS0~;m~#E^0axNhP886+(C_r=dSXC0lYOHZzUv=RJc$6>k{RQM|` zzr7LoS0N6{AXwY~Y$TJTLbCf_ni`%*zR51$-Lv{eeU{L5+906LXrex7hH034|R#U$CjLiB$oMIl(RFIA% zf>r^YQwuwk_dwTLJPFy2$)Uso4_Ds@9`{?!US&kcu|hsX(I$xW5Dek44#(j>5L?A?^Ts0#FE zJc~9k2)Hvf2X{^=H75c+zrfUw`7cF#o0LuYd@2Fx*CZR^l>02dNyt2`*{L(&KCa#b z$zf?WrS@kCsb<6ePr&-qdp&uDrv2T-bwVmx|wvxy&!RJ;Cxh@KMXpqrNV4k(G3}SVCPl|3vgcZiKxS z!Rd~d%77gEAcq%eUYww4oEIfK@A7VB_0C&rFUxm7KGi~4gyiN7ASCz8{XQcendAnq(ZSQMa9XmPQP_qTwArxlf0A;4z#emqQG>o%!_lIk1~-JmK@lU%3#jqrznkoIi*M1BVbHP1>c=v0hM6$o_y} zZ?EkIMJc@xz9C;2q!{tyP{_sF&lXM^2S3Hkf8#=*z`>94Hrc=P=KZGN;58%$aZt;L z3xEt<9^WAG^hFOSEP%i}!X_o!e;f#WFZ@A*-~-vAJhJ9upE z)nY-1UWXr7un_9kz*`Mu|?DoTfsBSbyBeqBH9(allYh4j%)P=q2U6Y}NF z*k=gpnENF}ecN=ba(59!{{!LRt?iH&$fEEZx|f;Pm83El^~vI|Bhf^%b^Gt zGnJP(@p(q0h{SLvZ^VK9a)QFNf$w5ms)lSqtaoC0Zj(N z?f7&7-$vlQN*jS=xfHY@EbcLFw<~6V((97MA+kX3NUzvW6|NH74H;j^Ok}1V{L7Ti djg6LOOW9JcG(Vj!O_df9&mj~LUOV_L{uc;RKk)zn literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.py b/gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.py new file mode 100644 index 0000000..3982397 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.py @@ -0,0 +1,7 @@ +""" +Contains generated files produced by generateDS.py. + +These do the real work of parsing the doxygen xml files but the +resultant classes are not very friendly to navigate so the rest of the +doxyxml module processes them further. +""" diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.pyc b/gr-adsbtx/docs/doxygen/doxyxml/generated/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683a7d039eb4d1c9a1c12fd88bd0794046a94afa GIT binary patch literal 418 zcmYLFK~4iP4BWy26p26h*bB`E5UK=cgw&q6MCr!6jYyNq_O{(0@(_N&l>^RFN~DbA zj50Iv_1B&G_@1rX=DcQFm%7;~gr<2@oHeWYRlWOEFi$0mkC~yzjN}CabTJ{ro=O;6 zZ8zhEdtNQ}s4yA*=Cyu$Fh)_@3+c|yoV;D9tjM-g*KBS?Yl53$!+z<+Km)u_r;gDSq$qzP-{7AG|&chC)_l@{h@ zUdJFe3eJVnIycv2^AG&0tsR&wZ#9VR^FThtVpUj5Nw%l^j7ROPK4*pLV^dXsdl{W| WF6JoC>6qkJMIU~nJyvhl-TVNpz=8w- literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/compound.py b/gr-adsbtx/docs/doxygen/doxyxml/generated/compound.py new file mode 100644 index 0000000..1522ac2 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/generated/compound.py @@ -0,0 +1,503 @@ +#!/usr/bin/env python + +""" +Generated Mon Feb 9 19:08:05 2009 by generateDS.py. +""" + +from string import lower as str_lower +from xml.dom import minidom +from xml.dom import Node + +import sys + +import compoundsuper as supermod +from compoundsuper import MixedContainer + + +class DoxygenTypeSub(supermod.DoxygenType): + def __init__(self, version=None, compounddef=None): + supermod.DoxygenType.__init__(self, version, compounddef) + + def find(self, details): + + return self.compounddef.find(details) + +supermod.DoxygenType.subclass = DoxygenTypeSub +# end class DoxygenTypeSub + + +class compounddefTypeSub(supermod.compounddefType): + def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): + supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers) + + def find(self, details): + + if self.id == details.refid: + return self + + for sectiondef in self.sectiondef: + result = sectiondef.find(details) + if result: + return result + + +supermod.compounddefType.subclass = compounddefTypeSub +# end class compounddefTypeSub + + +class listofallmembersTypeSub(supermod.listofallmembersType): + def __init__(self, member=None): + supermod.listofallmembersType.__init__(self, member) +supermod.listofallmembersType.subclass = listofallmembersTypeSub +# end class listofallmembersTypeSub + + +class memberRefTypeSub(supermod.memberRefType): + def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''): + supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name) +supermod.memberRefType.subclass = memberRefTypeSub +# end class memberRefTypeSub + + +class compoundRefTypeSub(supermod.compoundRefType): + def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.compoundRefType.__init__(self, mixedclass_, content_) +supermod.compoundRefType.subclass = compoundRefTypeSub +# end class compoundRefTypeSub + + +class reimplementTypeSub(supermod.reimplementType): + def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.reimplementType.__init__(self, mixedclass_, content_) +supermod.reimplementType.subclass = reimplementTypeSub +# end class reimplementTypeSub + + +class incTypeSub(supermod.incType): + def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.incType.__init__(self, mixedclass_, content_) +supermod.incType.subclass = incTypeSub +# end class incTypeSub + + +class refTypeSub(supermod.refType): + def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + supermod.refType.__init__(self, mixedclass_, content_) +supermod.refType.subclass = refTypeSub +# end class refTypeSub + + + +class refTextTypeSub(supermod.refTextType): + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + supermod.refTextType.__init__(self, mixedclass_, content_) + +supermod.refTextType.subclass = refTextTypeSub +# end class refTextTypeSub + +class sectiondefTypeSub(supermod.sectiondefType): + + + def __init__(self, kind=None, header='', description=None, memberdef=None): + supermod.sectiondefType.__init__(self, kind, header, description, memberdef) + + def find(self, details): + + for memberdef in self.memberdef: + if memberdef.id == details.refid: + return memberdef + + return None + + +supermod.sectiondefType.subclass = sectiondefTypeSub +# end class sectiondefTypeSub + + +class memberdefTypeSub(supermod.memberdefType): + def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): + supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby) +supermod.memberdefType.subclass = memberdefTypeSub +# end class memberdefTypeSub + + +class descriptionTypeSub(supermod.descriptionType): + def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None): + supermod.descriptionType.__init__(self, mixedclass_, content_) +supermod.descriptionType.subclass = descriptionTypeSub +# end class descriptionTypeSub + + +class enumvalueTypeSub(supermod.enumvalueType): + def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): + supermod.enumvalueType.__init__(self, mixedclass_, content_) +supermod.enumvalueType.subclass = enumvalueTypeSub +# end class enumvalueTypeSub + + +class templateparamlistTypeSub(supermod.templateparamlistType): + def __init__(self, param=None): + supermod.templateparamlistType.__init__(self, param) +supermod.templateparamlistType.subclass = templateparamlistTypeSub +# end class templateparamlistTypeSub + + +class paramTypeSub(supermod.paramType): + def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None): + supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription) +supermod.paramType.subclass = paramTypeSub +# end class paramTypeSub + + +class linkedTextTypeSub(supermod.linkedTextType): + def __init__(self, ref=None, mixedclass_=None, content_=None): + supermod.linkedTextType.__init__(self, mixedclass_, content_) +supermod.linkedTextType.subclass = linkedTextTypeSub +# end class linkedTextTypeSub + + +class graphTypeSub(supermod.graphType): + def __init__(self, node=None): + supermod.graphType.__init__(self, node) +supermod.graphType.subclass = graphTypeSub +# end class graphTypeSub + + +class nodeTypeSub(supermod.nodeType): + def __init__(self, id=None, label='', link=None, childnode=None): + supermod.nodeType.__init__(self, id, label, link, childnode) +supermod.nodeType.subclass = nodeTypeSub +# end class nodeTypeSub + + +class childnodeTypeSub(supermod.childnodeType): + def __init__(self, relation=None, refid=None, edgelabel=None): + supermod.childnodeType.__init__(self, relation, refid, edgelabel) +supermod.childnodeType.subclass = childnodeTypeSub +# end class childnodeTypeSub + + +class linkTypeSub(supermod.linkType): + def __init__(self, refid=None, external=None, valueOf_=''): + supermod.linkType.__init__(self, refid, external) +supermod.linkType.subclass = linkTypeSub +# end class linkTypeSub + + +class listingTypeSub(supermod.listingType): + def __init__(self, codeline=None): + supermod.listingType.__init__(self, codeline) +supermod.listingType.subclass = listingTypeSub +# end class listingTypeSub + + +class codelineTypeSub(supermod.codelineType): + def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): + supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) +supermod.codelineType.subclass = codelineTypeSub +# end class codelineTypeSub + + +class highlightTypeSub(supermod.highlightType): + def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None): + supermod.highlightType.__init__(self, mixedclass_, content_) +supermod.highlightType.subclass = highlightTypeSub +# end class highlightTypeSub + + +class referenceTypeSub(supermod.referenceType): + def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): + supermod.referenceType.__init__(self, mixedclass_, content_) +supermod.referenceType.subclass = referenceTypeSub +# end class referenceTypeSub + + +class locationTypeSub(supermod.locationType): + def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): + supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file) +supermod.locationType.subclass = locationTypeSub +# end class locationTypeSub + + +class docSect1TypeSub(supermod.docSect1Type): + def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect1Type.__init__(self, mixedclass_, content_) +supermod.docSect1Type.subclass = docSect1TypeSub +# end class docSect1TypeSub + + +class docSect2TypeSub(supermod.docSect2Type): + def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect2Type.__init__(self, mixedclass_, content_) +supermod.docSect2Type.subclass = docSect2TypeSub +# end class docSect2TypeSub + + +class docSect3TypeSub(supermod.docSect3Type): + def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect3Type.__init__(self, mixedclass_, content_) +supermod.docSect3Type.subclass = docSect3TypeSub +# end class docSect3TypeSub + + +class docSect4TypeSub(supermod.docSect4Type): + def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None): + supermod.docSect4Type.__init__(self, mixedclass_, content_) +supermod.docSect4Type.subclass = docSect4TypeSub +# end class docSect4TypeSub + + +class docInternalTypeSub(supermod.docInternalType): + def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): + supermod.docInternalType.__init__(self, mixedclass_, content_) +supermod.docInternalType.subclass = docInternalTypeSub +# end class docInternalTypeSub + + +class docInternalS1TypeSub(supermod.docInternalS1Type): + def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): + supermod.docInternalS1Type.__init__(self, mixedclass_, content_) +supermod.docInternalS1Type.subclass = docInternalS1TypeSub +# end class docInternalS1TypeSub + + +class docInternalS2TypeSub(supermod.docInternalS2Type): + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + supermod.docInternalS2Type.__init__(self, mixedclass_, content_) +supermod.docInternalS2Type.subclass = docInternalS2TypeSub +# end class docInternalS2TypeSub + + +class docInternalS3TypeSub(supermod.docInternalS3Type): + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + supermod.docInternalS3Type.__init__(self, mixedclass_, content_) +supermod.docInternalS3Type.subclass = docInternalS3TypeSub +# end class docInternalS3TypeSub + + +class docInternalS4TypeSub(supermod.docInternalS4Type): + def __init__(self, para=None, mixedclass_=None, content_=None): + supermod.docInternalS4Type.__init__(self, mixedclass_, content_) +supermod.docInternalS4Type.subclass = docInternalS4TypeSub +# end class docInternalS4TypeSub + + +class docURLLinkSub(supermod.docURLLink): + def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docURLLink.__init__(self, mixedclass_, content_) +supermod.docURLLink.subclass = docURLLinkSub +# end class docURLLinkSub + + +class docAnchorTypeSub(supermod.docAnchorType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docAnchorType.__init__(self, mixedclass_, content_) +supermod.docAnchorType.subclass = docAnchorTypeSub +# end class docAnchorTypeSub + + +class docFormulaTypeSub(supermod.docFormulaType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docFormulaType.__init__(self, mixedclass_, content_) +supermod.docFormulaType.subclass = docFormulaTypeSub +# end class docFormulaTypeSub + + +class docIndexEntryTypeSub(supermod.docIndexEntryType): + def __init__(self, primaryie='', secondaryie=''): + supermod.docIndexEntryType.__init__(self, primaryie, secondaryie) +supermod.docIndexEntryType.subclass = docIndexEntryTypeSub +# end class docIndexEntryTypeSub + + +class docListTypeSub(supermod.docListType): + def __init__(self, listitem=None): + supermod.docListType.__init__(self, listitem) +supermod.docListType.subclass = docListTypeSub +# end class docListTypeSub + + +class docListItemTypeSub(supermod.docListItemType): + def __init__(self, para=None): + supermod.docListItemType.__init__(self, para) +supermod.docListItemType.subclass = docListItemTypeSub +# end class docListItemTypeSub + + +class docSimpleSectTypeSub(supermod.docSimpleSectType): + def __init__(self, kind=None, title=None, para=None): + supermod.docSimpleSectType.__init__(self, kind, title, para) +supermod.docSimpleSectType.subclass = docSimpleSectTypeSub +# end class docSimpleSectTypeSub + + +class docVarListEntryTypeSub(supermod.docVarListEntryType): + def __init__(self, term=None): + supermod.docVarListEntryType.__init__(self, term) +supermod.docVarListEntryType.subclass = docVarListEntryTypeSub +# end class docVarListEntryTypeSub + + +class docRefTextTypeSub(supermod.docRefTextType): + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docRefTextType.__init__(self, mixedclass_, content_) +supermod.docRefTextType.subclass = docRefTextTypeSub +# end class docRefTextTypeSub + + +class docTableTypeSub(supermod.docTableType): + def __init__(self, rows=None, cols=None, row=None, caption=None): + supermod.docTableType.__init__(self, rows, cols, row, caption) +supermod.docTableType.subclass = docTableTypeSub +# end class docTableTypeSub + + +class docRowTypeSub(supermod.docRowType): + def __init__(self, entry=None): + supermod.docRowType.__init__(self, entry) +supermod.docRowType.subclass = docRowTypeSub +# end class docRowTypeSub + + +class docEntryTypeSub(supermod.docEntryType): + def __init__(self, thead=None, para=None): + supermod.docEntryType.__init__(self, thead, para) +supermod.docEntryType.subclass = docEntryTypeSub +# end class docEntryTypeSub + + +class docHeadingTypeSub(supermod.docHeadingType): + def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docHeadingType.__init__(self, mixedclass_, content_) +supermod.docHeadingType.subclass = docHeadingTypeSub +# end class docHeadingTypeSub + + +class docImageTypeSub(supermod.docImageType): + def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docImageType.__init__(self, mixedclass_, content_) +supermod.docImageType.subclass = docImageTypeSub +# end class docImageTypeSub + + +class docDotFileTypeSub(supermod.docDotFileType): + def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docDotFileType.__init__(self, mixedclass_, content_) +supermod.docDotFileType.subclass = docDotFileTypeSub +# end class docDotFileTypeSub + + +class docTocItemTypeSub(supermod.docTocItemType): + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + supermod.docTocItemType.__init__(self, mixedclass_, content_) +supermod.docTocItemType.subclass = docTocItemTypeSub +# end class docTocItemTypeSub + + +class docTocListTypeSub(supermod.docTocListType): + def __init__(self, tocitem=None): + supermod.docTocListType.__init__(self, tocitem) +supermod.docTocListType.subclass = docTocListTypeSub +# end class docTocListTypeSub + + +class docLanguageTypeSub(supermod.docLanguageType): + def __init__(self, langid=None, para=None): + supermod.docLanguageType.__init__(self, langid, para) +supermod.docLanguageType.subclass = docLanguageTypeSub +# end class docLanguageTypeSub + + +class docParamListTypeSub(supermod.docParamListType): + def __init__(self, kind=None, parameteritem=None): + supermod.docParamListType.__init__(self, kind, parameteritem) +supermod.docParamListType.subclass = docParamListTypeSub +# end class docParamListTypeSub + + +class docParamListItemSub(supermod.docParamListItem): + def __init__(self, parameternamelist=None, parameterdescription=None): + supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription) +supermod.docParamListItem.subclass = docParamListItemSub +# end class docParamListItemSub + + +class docParamNameListSub(supermod.docParamNameList): + def __init__(self, parametername=None): + supermod.docParamNameList.__init__(self, parametername) +supermod.docParamNameList.subclass = docParamNameListSub +# end class docParamNameListSub + + +class docParamNameSub(supermod.docParamName): + def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): + supermod.docParamName.__init__(self, mixedclass_, content_) +supermod.docParamName.subclass = docParamNameSub +# end class docParamNameSub + + +class docXRefSectTypeSub(supermod.docXRefSectType): + def __init__(self, id=None, xreftitle=None, xrefdescription=None): + supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription) +supermod.docXRefSectType.subclass = docXRefSectTypeSub +# end class docXRefSectTypeSub + + +class docCopyTypeSub(supermod.docCopyType): + def __init__(self, link=None, para=None, sect1=None, internal=None): + supermod.docCopyType.__init__(self, link, para, sect1, internal) +supermod.docCopyType.subclass = docCopyTypeSub +# end class docCopyTypeSub + + +class docCharTypeSub(supermod.docCharType): + def __init__(self, char=None, valueOf_=''): + supermod.docCharType.__init__(self, char) +supermod.docCharType.subclass = docCharTypeSub +# end class docCharTypeSub + +class docParaTypeSub(supermod.docParaType): + def __init__(self, char=None, valueOf_=''): + supermod.docParaType.__init__(self, char) + + self.parameterlist = [] + self.simplesects = [] + self.content = [] + + def buildChildren(self, child_, nodeName_): + supermod.docParaType.buildChildren(self, child_, nodeName_) + + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == "ref": + obj_ = supermod.docRefTextType.factory() + obj_.build(child_) + self.content.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameterlist': + obj_ = supermod.docParamListType.factory() + obj_.build(child_) + self.parameterlist.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'simplesect': + obj_ = supermod.docSimpleSectType.factory() + obj_.build(child_) + self.simplesects.append(obj_) + + +supermod.docParaType.subclass = docParaTypeSub +# end class docParaTypeSub + + + +def parse(inFilename): + doc = minidom.parse(inFilename) + rootNode = doc.documentElement + rootObj = supermod.DoxygenType.factory() + rootObj.build(rootNode) + return rootObj + + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/compound.pyc b/gr-adsbtx/docs/doxygen/doxyxml/generated/compound.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f08ae867b51f66c631065b961b8561bf9f2b5c4c GIT binary patch literal 34242 zcmd5_3vgW3c|NO^WLd}$Y-4O=i~wVV0oxb{Y=?*O8)^%BZ46NiVzqlDtyt|YyH~PA zm#5rjl9s&MPSTQ@(srg4S~4Uh2~EChpQbkgVd{pX&u zS691=4d~vr&;51o-EYtR@A=OEpXZ*Neq%}ZSN=P7Jgb6#Y5d)Tqk8^grE*FQK&Di# zO%+mVyiExatAt3X#{{)3CN_nT0kot=zOIS09q{|lj<4) zt#Y8XN+STYMnERjbpl%LKo=;D0MI%CnN-&cXpIA1s5AmV>jh*|-5{WK4s?;y2moym zkV&;yKJ^Yl^%4PXaG*<-MgZs%0hv@c3aHnCE>ju-pp61DscsU`B@T4C z(g*-;5|Bx?Pe2^#s)K zK*y9u0LT-NN%gpZ?r@+xl|}&QxPVNmCj{g<&|OL+0CYk?Ce^zIblid7r8ELScMHg* zIv}7E4)kuN5dazxkV*BVfbMpndz3~1=%j#5s`mG&s#5~; z9cWr<1c0UlWKw-lK$8yiGfE=>^q_!Dsx<*kInYB&BLGwrkV*An0X^tIk0^}*(8B^U zsXi*8ngcziGy*`63dp4TxPTsZpc$nR0D4?NkMM(m(<;}Yj`cmt2GVs9-G_8hS2DiN z^&Tx1dk^ZN-rilk*X-K8ZRhT7@95pWZQHKiq3Pa{0JZHJYUS`O5^52J1+E>a=PD~F2(85{FKh^D;51r9{2WnX@6;I8uvIpUDn4Y zhGw40GZnpcq&QK@O9&fz@N3{(?rLz2E4|)3KxPXTqlMKM4&!pTl6X$h9 z_lLILtG&_4lzz1^HdfjO)QcpIQzMB5x)NJf*(< zrJ|O{_xSy~jtiLCyzRL-s*H4|mZg1Wy`=+eM~m+T_;@eBFbWYA-!!`(4AMCX;_l+v zMo{|Ku}cG!IBm^<+M&QhGv1%adK^+3h7g%+3$?VWwX0f(s&%T`B30{BwZ*E|t!n3} zT92wNQMIM2woKKQtJ=A$wnEibs@f`5J5SYCtJ?Xhwno*~s@gg=bHN;&@`TG}xNRhx zgt|T|=XFsjDt$kSphU`*lJB!D=5u}z{@CxbG>F6T^L|15Om-+!)%N3x9u`J&x{{yN zx$tG)BVWuGCUUwer71Yc4NZ&5;xwnrBb7{fRQyrCI2pcVz7A)V%U67KE0GpXhVxK@ zMN`hQg-o>?d>J27*J?SF)pdM1GZMm&R7w+NpXJHdGtkDDuG16tmhqlPRc$LT0E0XOFMf9~GI6 z6S;4`I3jjeC}jmCH>N_R;Y^`0uE&Sq<@%8RLc6pLxJB5dv2@U>OCdglh;eD~XK?HH zx2XyWBAofuX8E8^?Z4+D^$@&$n;Jve7O(dZB576Wr9AdPuevK&_x?=YqYIM4g<_P? zd6)5;xEA@8CBS1@ln?w)#45TvQScW!JE+3NII6t4YFm+NZ(E*Pmg-CyKj|&ORgYDu zM{|hTBeJYrh$HZbccuOFaTYBxQMX9T7Pq(xk-#mYVn>ypqAx>DPK2w=9Pvy$;=N|t znz$S$-AE7PcbLBMJ0#K+?1d(hRrNJEf^yy8C*`UEeRV}7wlEEctF@+@|n*<{+zbBv^g5I#pkvg0?i_ndpnSHh;A0M$xLBF-#$Dj z75g~*8d5b6O4XJ{&8dt2U>ua+Y%^KGStn}kEPOXvZ34F3aFraFc?CT3hSNo2FKkQ9kW zp+IYpNVjmrY7>x%HfJl57^o8=vF>LRFv8_1~W`g&2gkOh^7sv zusdzuaB6Og^Vgm1H-xl`)|=8eqI&Ff11FUuo`4E>|{zh?ByC4+$ z>?=}T!PsW9efGmpXNZc?vvj7qc(|qlNSy zl}x^>2cxATO_dR0K8}2gYMNw+^8RpM7t9E8sN+zTapBMf%|Tw~mW;2; zaI7v)j7v8$82rV}Gll$v@QhSIPi3`?u~t2Lb?j z@z$!0Si4i)yAPO!9x$E6En1`cWIKDpYJ=*NqYI%Ni`JiwT1c*8)C6hSm!a2AdRJ1`=SrotWCTbT!JPTbT`EgL~|_I*;SNgHZY_Xxq_&0 zEz*!|thS4EO_=}>k|8dB&QOv}Ts7^Q(%a6`&QQ%HO~YUnW1IsJr{j$==Bz*7@rKj5 zcWW3k>QFUfwE@R#5T7$!73I1OAmN%>8gTt~5AVxlO?29<-twgihSpKU;&8w}kNdO+ zXRHd|XSD%mZ1GCX7v-su>xIX4Ei&VM=XJfT_Qe`$Df2hDnvUwz1f70S2YesXO7X${7wpvi}kVa)p_2(8fVC9D{NLPyHD4n3}Xugn( zi_xc{Qfn}J8k+o;)dq~R-RiO;N-GP!(0Vq`lG!hGvMe;DbEbWsL4jGyMBOFHu4oPc zl!|7Cn?|d1Be({J<1o&T6 zdeyz>Xnth0fMdbe^6_zB!qFPG$PUbxtTsVK=(6H%B$P$q3B|cMk`HK%tKUso*AR1+YYVnPGGwF6=={J zv_A<={=#Y#cxt zix$Vt%P18x9>ln;zh@a_B(}!XU*m?YLH$$EE z|MnX^tLd<25qme|?woA#9)NHYE!({+uf;mS@(;LUYtZ{NH2Hh0&1`zNw=BJ)*%{J% zZ36Uu$7X2a_4TvR!|E6^nW*UvvvTz+6tPyR|AjlYMwR+WX!2vL z4JhQ$QYg=k&1p8pO(Li6<|x#V#%F+xm&P{yRK7K6d;!m%vf4DDaZd9nX&OVVqKGyA zZtOHJwi%bWL;WoX`e=w-mFZ zrOKJKN*A2Nph(pO;U(N?yVV0s28I6!)(S*9WanKtWM?wq(5$3<`*t(ogcoej z-ri8g8=E;3@+Qj|ur|A%JqwdFq+qwrnZzyb%g|<*)doBTV=~OBTag(_aK)t(0asRq z2wyVE5eHAV*qlk6Cr*>^vD(x<<1yKSf)iFzrs(3ts?Mg5zy|6*9M!X5co(?F*$Nrn z>fHqX2+woVRB>$MMQ8(JC%YG)45L`hgp0pPcf6nfyX1lJLW-HQzNhPO|A(Oh8_6j{} zrF5z)W|=J&s?v|g6z zsa8Qx>Ldm^D>m~Ir;^LE9vm!8`sM2qF=%uNAV1UzTaj~;$(dv5B`MJ24vY`&`fb;qCo{2U+dHk zhK3X|q}3a!>}nS9IrdzC0MR(QeZOq8Dskdo!xR6K)ds``!&!ImO#rr(hwUsr=7L$| zS(-$sGnbFi1@z49bY0BaeHk6fl@8 z%Oc&?&=y&7*yeBAyiAr~|`RX?2L!g0l>goW?=hvo@m= zr|n0e?HQ{LXbY+!ab(Q0FlCJzpJy^W+I+BP{x#Y`-_Yq|sG}C90mC1^;T1Dxx7h@K z&H4$OZ;7+`W8}p1RvWMwv}o?e6hSLY7Y2nYtd8NbK3OPRuaM9w{M5Dn!Xlo<>{4H} z`ItDX|AXxKjMWCL22J{XrShDmF>ls1-`dPYu+YLP6tPe5Ji!R{HX#D>O1tRrOrxf%8BR58ne$R zBKw67rd1GWw~-F6c(ZR}r2U#bbOKVXszR#Fik-79^8C*+_FAUeh;H!a`2GDna%kpb_y^l=3ZZrgnmFAM(0+ z6Bgx6xy&sK*zh}WWBq+am7md9Cz*UdW;C-Dj{$9ttJ%}aB zYdgEDen<_?wM-pwFu~s#5c%R^yd)J*o3jE3v_S4_-{Sq+x8Hm(?-7 zdL=5cg;}}Si5q>5iqjDB`p!jJoQPGDcCpHx%wd@`gcZOT9=&Mqx~hvTifj z7vHf|_TAbj!mJXO#E{Tit#M!L`+FJ!7EK9-77dhd< zzMHI!zyc>JY?V^U??)Mvg2R*BhwyfJ{9C|WTLZ;j$5G{)00n{<3(0-77tv#I5bEKj ztbCSW+jb(QbmDwaUJfisyvBA5-U}?bV@!>9RmD~xG)KFki@br?WDh?!c|^)23#xcK zFkiIku|wdoJM3+v*h+CN1#2H~7X{h$_}VM)Mv6TYH&c*nZ!g6Giv1L~QXHn}r#MPc zrYKQNP@rTe?-a!(#Wckf#Um8l$pgqK1J~Y#b+o!P4W8_FH(Gt z;nV>x8Sjq_W>3X3*v;N4Bud0WPDQx?pLLT top?P6{C-SFsG+uoC&=ksRYYsKs7wY@zZ{|`ubBe?(o literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.py b/gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.py new file mode 100644 index 0000000..6255dda --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.py @@ -0,0 +1,8342 @@ +#!/usr/bin/env python + +# +# Generated Thu Jun 11 18:44:25 2009 by generateDS.py. +# + +import sys +import getopt +from string import lower as str_lower +from xml.dom import minidom +from xml.dom import Node + +# +# User methods +# +# Calls to the methods in these classes are generated by generateDS.py. +# You can replace these methods by re-implementing the following class +# in a module named generatedssuper.py. + +try: + from generatedssuper import GeneratedsSuper +except ImportError, exp: + + class GeneratedsSuper: + def format_string(self, input_data, input_name=''): + return input_data + def format_integer(self, input_data, input_name=''): + return '%d' % input_data + def format_float(self, input_data, input_name=''): + return '%f' % input_data + def format_double(self, input_data, input_name=''): + return '%e' % input_data + def format_boolean(self, input_data, input_name=''): + return '%s' % input_data + + +# +# If you have installed IPython you can uncomment and use the following. +# IPython is available from http://ipython.scipy.org/. +# + +## from IPython.Shell import IPShellEmbed +## args = '' +## ipshell = IPShellEmbed(args, +## banner = 'Dropping into IPython', +## exit_msg = 'Leaving Interpreter, back to program.') + +# Then use the following line where and when you want to drop into the +# IPython shell: +# ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') + +# +# Globals +# + +ExternalEncoding = 'ascii' + +# +# Support/utility functions. +# + +def showIndent(outfile, level): + for idx in range(level): + outfile.write(' ') + +def quote_xml(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + return s1 + +def quote_attrib(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + if '"' in s1: + if "'" in s1: + s1 = '"%s"' % s1.replace('"', """) + else: + s1 = "'%s'" % s1 + else: + s1 = '"%s"' % s1 + return s1 + +def quote_python(inStr): + s1 = inStr + if s1.find("'") == -1: + if s1.find('\n') == -1: + return "'%s'" % s1 + else: + return "'''%s'''" % s1 + else: + if s1.find('"') != -1: + s1 = s1.replace('"', '\\"') + if s1.find('\n') == -1: + return '"%s"' % s1 + else: + return '"""%s"""' % s1 + + +class MixedContainer: + # Constants for category: + CategoryNone = 0 + CategoryText = 1 + CategorySimple = 2 + CategoryComplex = 3 + # Constants for content_type: + TypeNone = 0 + TypeText = 1 + TypeString = 2 + TypeInteger = 3 + TypeFloat = 4 + TypeDecimal = 5 + TypeDouble = 6 + TypeBoolean = 7 + def __init__(self, category, content_type, name, value): + self.category = category + self.content_type = content_type + self.name = name + self.value = value + def getCategory(self): + return self.category + def getContenttype(self, content_type): + return self.content_type + def getValue(self): + return self.value + def getName(self): + return self.name + def export(self, outfile, level, name, namespace): + if self.category == MixedContainer.CategoryText: + outfile.write(self.value) + elif self.category == MixedContainer.CategorySimple: + self.exportSimple(outfile, level, name) + else: # category == MixedContainer.CategoryComplex + self.value.export(outfile, level, namespace,name) + def exportSimple(self, outfile, level, name): + if self.content_type == MixedContainer.TypeString: + outfile.write('<%s>%s' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeInteger or \ + self.content_type == MixedContainer.TypeBoolean: + outfile.write('<%s>%d' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeFloat or \ + self.content_type == MixedContainer.TypeDecimal: + outfile.write('<%s>%f' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): + if self.category == MixedContainer.CategoryText: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + elif self.category == MixedContainer.CategorySimple: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + else: # category == MixedContainer.CategoryComplex + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s",\n' % \ + (self.category, self.content_type, self.name,)) + self.value.exportLiteral(outfile, level + 1) + showIndent(outfile, level) + outfile.write(')\n') + + +class _MemberSpec(object): + def __init__(self, name='', data_type='', container=0): + self.name = name + self.data_type = data_type + self.container = container + def set_name(self, name): self.name = name + def get_name(self): return self.name + def set_data_type(self, data_type): self.data_type = data_type + def get_data_type(self): return self.data_type + def set_container(self, container): self.container = container + def get_container(self): return self.container + + +# +# Data representation classes. +# + +class DoxygenType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, version=None, compounddef=None): + self.version = version + self.compounddef = compounddef + def factory(*args_, **kwargs_): + if DoxygenType.subclass: + return DoxygenType.subclass(*args_, **kwargs_) + else: + return DoxygenType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compounddef(self): return self.compounddef + def set_compounddef(self, compounddef): self.compounddef = compounddef + def get_version(self): return self.version + def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): + outfile.write(' version=%s' % (quote_attrib(self.version), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): + if self.compounddef: + self.compounddef.export(outfile, level, namespace_, name_='compounddef') + def hasContent_(self): + if ( + self.compounddef is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.version is not None: + showIndent(outfile, level) + outfile.write('version = "%s",\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.compounddef: + showIndent(outfile, level) + outfile.write('compounddef=model_.compounddefType(\n') + self.compounddef.exportLiteral(outfile, level, name_='compounddef') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('version'): + self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compounddef': + obj_ = compounddefType.factory() + obj_.build(child_) + self.set_compounddef(obj_) +# end class DoxygenType + + +class compounddefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): + self.kind = kind + self.prot = prot + self.id = id + self.compoundname = compoundname + self.title = title + if basecompoundref is None: + self.basecompoundref = [] + else: + self.basecompoundref = basecompoundref + if derivedcompoundref is None: + self.derivedcompoundref = [] + else: + self.derivedcompoundref = derivedcompoundref + if includes is None: + self.includes = [] + else: + self.includes = includes + if includedby is None: + self.includedby = [] + else: + self.includedby = includedby + self.incdepgraph = incdepgraph + self.invincdepgraph = invincdepgraph + if innerdir is None: + self.innerdir = [] + else: + self.innerdir = innerdir + if innerfile is None: + self.innerfile = [] + else: + self.innerfile = innerfile + if innerclass is None: + self.innerclass = [] + else: + self.innerclass = innerclass + if innernamespace is None: + self.innernamespace = [] + else: + self.innernamespace = innernamespace + if innerpage is None: + self.innerpage = [] + else: + self.innerpage = innerpage + if innergroup is None: + self.innergroup = [] + else: + self.innergroup = innergroup + self.templateparamlist = templateparamlist + if sectiondef is None: + self.sectiondef = [] + else: + self.sectiondef = sectiondef + self.briefdescription = briefdescription + self.detaileddescription = detaileddescription + self.inheritancegraph = inheritancegraph + self.collaborationgraph = collaborationgraph + self.programlisting = programlisting + self.location = location + self.listofallmembers = listofallmembers + def factory(*args_, **kwargs_): + if compounddefType.subclass: + return compounddefType.subclass(*args_, **kwargs_) + else: + return compounddefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compoundname(self): return self.compoundname + def set_compoundname(self, compoundname): self.compoundname = compoundname + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_basecompoundref(self): return self.basecompoundref + def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref + def add_basecompoundref(self, value): self.basecompoundref.append(value) + def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value + def get_derivedcompoundref(self): return self.derivedcompoundref + def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref + def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) + def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value + def get_includes(self): return self.includes + def set_includes(self, includes): self.includes = includes + def add_includes(self, value): self.includes.append(value) + def insert_includes(self, index, value): self.includes[index] = value + def get_includedby(self): return self.includedby + def set_includedby(self, includedby): self.includedby = includedby + def add_includedby(self, value): self.includedby.append(value) + def insert_includedby(self, index, value): self.includedby[index] = value + def get_incdepgraph(self): return self.incdepgraph + def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph + def get_invincdepgraph(self): return self.invincdepgraph + def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph + def get_innerdir(self): return self.innerdir + def set_innerdir(self, innerdir): self.innerdir = innerdir + def add_innerdir(self, value): self.innerdir.append(value) + def insert_innerdir(self, index, value): self.innerdir[index] = value + def get_innerfile(self): return self.innerfile + def set_innerfile(self, innerfile): self.innerfile = innerfile + def add_innerfile(self, value): self.innerfile.append(value) + def insert_innerfile(self, index, value): self.innerfile[index] = value + def get_innerclass(self): return self.innerclass + def set_innerclass(self, innerclass): self.innerclass = innerclass + def add_innerclass(self, value): self.innerclass.append(value) + def insert_innerclass(self, index, value): self.innerclass[index] = value + def get_innernamespace(self): return self.innernamespace + def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace + def add_innernamespace(self, value): self.innernamespace.append(value) + def insert_innernamespace(self, index, value): self.innernamespace[index] = value + def get_innerpage(self): return self.innerpage + def set_innerpage(self, innerpage): self.innerpage = innerpage + def add_innerpage(self, value): self.innerpage.append(value) + def insert_innerpage(self, index, value): self.innerpage[index] = value + def get_innergroup(self): return self.innergroup + def set_innergroup(self, innergroup): self.innergroup = innergroup + def add_innergroup(self, value): self.innergroup.append(value) + def insert_innergroup(self, index, value): self.innergroup[index] = value + def get_templateparamlist(self): return self.templateparamlist + def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def get_sectiondef(self): return self.sectiondef + def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef + def add_sectiondef(self, value): self.sectiondef.append(value) + def insert_sectiondef(self, index, value): self.sectiondef[index] = value + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_inheritancegraph(self): return self.inheritancegraph + def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph + def get_collaborationgraph(self): return self.collaborationgraph + def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph + def get_programlisting(self): return self.programlisting + def set_programlisting(self, programlisting): self.programlisting = programlisting + def get_location(self): return self.location + def set_location(self, location): self.location = location + def get_listofallmembers(self): return self.listofallmembers + def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='compounddefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): + if self.compoundname is not None: + showIndent(outfile, level) + outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) + if self.title is not None: + showIndent(outfile, level) + outfile.write('<%stitle>%s\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) + for basecompoundref_ in self.basecompoundref: + basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') + for derivedcompoundref_ in self.derivedcompoundref: + derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') + for includes_ in self.includes: + includes_.export(outfile, level, namespace_, name_='includes') + for includedby_ in self.includedby: + includedby_.export(outfile, level, namespace_, name_='includedby') + if self.incdepgraph: + self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') + if self.invincdepgraph: + self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') + for innerdir_ in self.innerdir: + innerdir_.export(outfile, level, namespace_, name_='innerdir') + for innerfile_ in self.innerfile: + innerfile_.export(outfile, level, namespace_, name_='innerfile') + for innerclass_ in self.innerclass: + innerclass_.export(outfile, level, namespace_, name_='innerclass') + for innernamespace_ in self.innernamespace: + innernamespace_.export(outfile, level, namespace_, name_='innernamespace') + for innerpage_ in self.innerpage: + innerpage_.export(outfile, level, namespace_, name_='innerpage') + for innergroup_ in self.innergroup: + innergroup_.export(outfile, level, namespace_, name_='innergroup') + if self.templateparamlist: + self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + for sectiondef_ in self.sectiondef: + sectiondef_.export(outfile, level, namespace_, name_='sectiondef') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + if self.detaileddescription: + self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + if self.inheritancegraph: + self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') + if self.collaborationgraph: + self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') + if self.programlisting: + self.programlisting.export(outfile, level, namespace_, name_='programlisting') + if self.location: + self.location.export(outfile, level, namespace_, name_='location') + if self.listofallmembers: + self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') + def hasContent_(self): + if ( + self.compoundname is not None or + self.title is not None or + self.basecompoundref is not None or + self.derivedcompoundref is not None or + self.includes is not None or + self.includedby is not None or + self.incdepgraph is not None or + self.invincdepgraph is not None or + self.innerdir is not None or + self.innerfile is not None or + self.innerclass is not None or + self.innernamespace is not None or + self.innerpage is not None or + self.innergroup is not None or + self.templateparamlist is not None or + self.sectiondef is not None or + self.briefdescription is not None or + self.detaileddescription is not None or + self.inheritancegraph is not None or + self.collaborationgraph is not None or + self.programlisting is not None or + self.location is not None or + self.listofallmembers is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='compounddefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) + if self.title: + showIndent(outfile, level) + outfile.write('title=model_.xsd_string(\n') + self.title.exportLiteral(outfile, level, name_='title') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('basecompoundref=[\n') + level += 1 + for basecompoundref in self.basecompoundref: + showIndent(outfile, level) + outfile.write('model_.basecompoundref(\n') + basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('derivedcompoundref=[\n') + level += 1 + for derivedcompoundref in self.derivedcompoundref: + showIndent(outfile, level) + outfile.write('model_.derivedcompoundref(\n') + derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('includes=[\n') + level += 1 + for includes in self.includes: + showIndent(outfile, level) + outfile.write('model_.includes(\n') + includes.exportLiteral(outfile, level, name_='includes') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('includedby=[\n') + level += 1 + for includedby in self.includedby: + showIndent(outfile, level) + outfile.write('model_.includedby(\n') + includedby.exportLiteral(outfile, level, name_='includedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.incdepgraph: + showIndent(outfile, level) + outfile.write('incdepgraph=model_.graphType(\n') + self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.invincdepgraph: + showIndent(outfile, level) + outfile.write('invincdepgraph=model_.graphType(\n') + self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('innerdir=[\n') + level += 1 + for innerdir in self.innerdir: + showIndent(outfile, level) + outfile.write('model_.innerdir(\n') + innerdir.exportLiteral(outfile, level, name_='innerdir') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerfile=[\n') + level += 1 + for innerfile in self.innerfile: + showIndent(outfile, level) + outfile.write('model_.innerfile(\n') + innerfile.exportLiteral(outfile, level, name_='innerfile') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerclass=[\n') + level += 1 + for innerclass in self.innerclass: + showIndent(outfile, level) + outfile.write('model_.innerclass(\n') + innerclass.exportLiteral(outfile, level, name_='innerclass') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innernamespace=[\n') + level += 1 + for innernamespace in self.innernamespace: + showIndent(outfile, level) + outfile.write('model_.innernamespace(\n') + innernamespace.exportLiteral(outfile, level, name_='innernamespace') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innerpage=[\n') + level += 1 + for innerpage in self.innerpage: + showIndent(outfile, level) + outfile.write('model_.innerpage(\n') + innerpage.exportLiteral(outfile, level, name_='innerpage') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('innergroup=[\n') + level += 1 + for innergroup in self.innergroup: + showIndent(outfile, level) + outfile.write('model_.innergroup(\n') + innergroup.exportLiteral(outfile, level, name_='innergroup') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.templateparamlist: + showIndent(outfile, level) + outfile.write('templateparamlist=model_.templateparamlistType(\n') + self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('sectiondef=[\n') + level += 1 + for sectiondef in self.sectiondef: + showIndent(outfile, level) + outfile.write('model_.sectiondef(\n') + sectiondef.exportLiteral(outfile, level, name_='sectiondef') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.detaileddescription: + showIndent(outfile, level) + outfile.write('detaileddescription=model_.descriptionType(\n') + self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.inheritancegraph: + showIndent(outfile, level) + outfile.write('inheritancegraph=model_.graphType(\n') + self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.collaborationgraph: + showIndent(outfile, level) + outfile.write('collaborationgraph=model_.graphType(\n') + self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') + showIndent(outfile, level) + outfile.write('),\n') + if self.programlisting: + showIndent(outfile, level) + outfile.write('programlisting=model_.listingType(\n') + self.programlisting.exportLiteral(outfile, level, name_='programlisting') + showIndent(outfile, level) + outfile.write('),\n') + if self.location: + showIndent(outfile, level) + outfile.write('location=model_.locationType(\n') + self.location.exportLiteral(outfile, level, name_='location') + showIndent(outfile, level) + outfile.write('),\n') + if self.listofallmembers: + showIndent(outfile, level) + outfile.write('listofallmembers=model_.listofallmembersType(\n') + self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compoundname': + compoundname_ = '' + for text__content_ in child_.childNodes: + compoundname_ += text__content_.nodeValue + self.compoundname = compoundname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_title(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'basecompoundref': + obj_ = compoundRefType.factory() + obj_.build(child_) + self.basecompoundref.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'derivedcompoundref': + obj_ = compoundRefType.factory() + obj_.build(child_) + self.derivedcompoundref.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'includes': + obj_ = incType.factory() + obj_.build(child_) + self.includes.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'includedby': + obj_ = incType.factory() + obj_.build(child_) + self.includedby.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'incdepgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_incdepgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'invincdepgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_invincdepgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerdir': + obj_ = refType.factory() + obj_.build(child_) + self.innerdir.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerfile': + obj_ = refType.factory() + obj_.build(child_) + self.innerfile.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerclass': + obj_ = refType.factory() + obj_.build(child_) + self.innerclass.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innernamespace': + obj_ = refType.factory() + obj_.build(child_) + self.innernamespace.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innerpage': + obj_ = refType.factory() + obj_.build(child_) + self.innerpage.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'innergroup': + obj_ = refType.factory() + obj_.build(child_) + self.innergroup.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'templateparamlist': + obj_ = templateparamlistType.factory() + obj_.build(child_) + self.set_templateparamlist(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sectiondef': + obj_ = sectiondefType.factory() + obj_.build(child_) + self.sectiondef.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_detaileddescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'inheritancegraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_inheritancegraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'collaborationgraph': + obj_ = graphType.factory() + obj_.build(child_) + self.set_collaborationgraph(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'programlisting': + obj_ = listingType.factory() + obj_.build(child_) + self.set_programlisting(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'location': + obj_ = locationType.factory() + obj_.build(child_) + self.set_location(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'listofallmembers': + obj_ = listofallmembersType.factory() + obj_.build(child_) + self.set_listofallmembers(obj_) +# end class compounddefType + + +class listofallmembersType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, member=None): + if member is None: + self.member = [] + else: + self.member = member + def factory(*args_, **kwargs_): + if listofallmembersType.subclass: + return listofallmembersType.subclass(*args_, **kwargs_) + else: + return listofallmembersType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value + def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): + for member_ in self.member: + member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): + if ( + self.member is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='listofallmembersType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('member=[\n') + level += 1 + for member in self.member: + showIndent(outfile, level) + outfile.write('model_.member(\n') + member.exportLiteral(outfile, level, name_='member') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'member': + obj_ = memberRefType.factory() + obj_.build(child_) + self.member.append(obj_) +# end class listofallmembersType + + +class memberRefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): + self.virt = virt + self.prot = prot + self.refid = refid + self.ambiguityscope = ambiguityscope + self.scope = scope + self.name = name + def factory(*args_, **kwargs_): + if memberRefType.subclass: + return memberRefType.subclass(*args_, **kwargs_) + else: + return memberRefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_scope(self): return self.scope + def set_scope(self, scope): self.scope = scope + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_ambiguityscope(self): return self.ambiguityscope + def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope + def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='memberRefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.ambiguityscope is not None: + outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): + if self.scope is not None: + showIndent(outfile, level) + outfile.write('<%sscope>%s\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): + if ( + self.scope is not None or + self.name is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='memberRefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.ambiguityscope is not None: + showIndent(outfile, level) + outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('ambiguityscope'): + self.ambiguityscope = attrs.get('ambiguityscope').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'scope': + scope_ = '' + for text__content_ in child_.childNodes: + scope_ += text__content_.nodeValue + self.scope = scope_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class memberRefType + + +class scope(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if scope.subclass: + return scope.subclass(*args_, **kwargs_) + else: + return scope(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='scope') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='scope'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='scope'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='scope'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class scope + + +class name(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if name.subclass: + return name.subclass(*args_, **kwargs_) + else: + return name(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='name') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='name'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='name'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='name'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class name + + +class compoundRefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.virt = virt + self.prot = prot + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if compoundRefType.subclass: + return compoundRefType.subclass(*args_, **kwargs_) + else: + return compoundRefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='compoundRefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class compoundRefType + + +class reimplementType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if reimplementType.subclass: + return reimplementType.subclass(*args_, **kwargs_) + else: + return reimplementType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='reimplementType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='reimplementType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class reimplementType + + +class incType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.local = local + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if incType.subclass: + return incType.subclass(*args_, **kwargs_) + else: + return incType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_local(self): return self.local + def set_local(self, local): self.local = local + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='incType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='incType'): + if self.local is not None: + outfile.write(' local=%s' % (quote_attrib(self.local), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='incType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='incType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.local is not None: + showIndent(outfile, level) + outfile.write('local = "%s",\n' % (self.local,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('local'): + self.local = attrs.get('local').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class incType + + +class refType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): + self.prot = prot + self.refid = refid + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if refType.subclass: + return refType.subclass(*args_, **kwargs_) + else: + return refType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='refType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refType'): + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='refType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class refType + + +class refTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + self.kindref = kindref + self.external = external + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if refTextType.subclass: + return refTextType.subclass(*args_, **kwargs_) + else: + return refTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_kindref(self): return self.kindref + def set_kindref(self, kindref): self.kindref = kindref + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='refTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.kindref is not None: + outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='refTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.kindref is not None: + showIndent(outfile, level) + outfile.write('kindref = "%s",\n' % (self.kindref,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('kindref'): + self.kindref = attrs.get('kindref').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class refTextType + + +class sectiondefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, header=None, description=None, memberdef=None): + self.kind = kind + self.header = header + self.description = description + if memberdef is None: + self.memberdef = [] + else: + self.memberdef = memberdef + def factory(*args_, **kwargs_): + if sectiondefType.subclass: + return sectiondefType.subclass(*args_, **kwargs_) + else: + return sectiondefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_header(self): return self.header + def set_header(self, header): self.header = header + def get_description(self): return self.description + def set_description(self, description): self.description = description + def get_memberdef(self): return self.memberdef + def set_memberdef(self, memberdef): self.memberdef = memberdef + def add_memberdef(self, value): self.memberdef.append(value) + def insert_memberdef(self, index, value): self.memberdef[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): + if self.header is not None: + showIndent(outfile, level) + outfile.write('<%sheader>%s\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) + if self.description: + self.description.export(outfile, level, namespace_, name_='description') + for memberdef_ in self.memberdef: + memberdef_.export(outfile, level, namespace_, name_='memberdef') + def hasContent_(self): + if ( + self.header is not None or + self.description is not None or + self.memberdef is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='sectiondefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) + if self.description: + showIndent(outfile, level) + outfile.write('description=model_.descriptionType(\n') + self.description.exportLiteral(outfile, level, name_='description') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('memberdef=[\n') + level += 1 + for memberdef in self.memberdef: + showIndent(outfile, level) + outfile.write('model_.memberdef(\n') + memberdef.exportLiteral(outfile, level, name_='memberdef') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'header': + header_ = '' + for text__content_ in child_.childNodes: + header_ += text__content_.nodeValue + self.header = header_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'description': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_description(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'memberdef': + obj_ = memberdefType.factory() + obj_.build(child_) + self.memberdef.append(obj_) +# end class sectiondefType + + +class memberdefType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): + self.initonly = initonly + self.kind = kind + self.volatile = volatile + self.const = const + self.raisexx = raisexx + self.virt = virt + self.readable = readable + self.prot = prot + self.explicit = explicit + self.new = new + self.final = final + self.writable = writable + self.add = add + self.static = static + self.remove = remove + self.sealed = sealed + self.mutable = mutable + self.gettable = gettable + self.inline = inline + self.settable = settable + self.id = id + self.templateparamlist = templateparamlist + self.type_ = type_ + self.definition = definition + self.argsstring = argsstring + self.name = name + self.read = read + self.write = write + self.bitfield = bitfield + if reimplements is None: + self.reimplements = [] + else: + self.reimplements = reimplements + if reimplementedby is None: + self.reimplementedby = [] + else: + self.reimplementedby = reimplementedby + if param is None: + self.param = [] + else: + self.param = param + if enumvalue is None: + self.enumvalue = [] + else: + self.enumvalue = enumvalue + self.initializer = initializer + self.exceptions = exceptions + self.briefdescription = briefdescription + self.detaileddescription = detaileddescription + self.inbodydescription = inbodydescription + self.location = location + if references is None: + self.references = [] + else: + self.references = references + if referencedby is None: + self.referencedby = [] + else: + self.referencedby = referencedby + def factory(*args_, **kwargs_): + if memberdefType.subclass: + return memberdefType.subclass(*args_, **kwargs_) + else: + return memberdefType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_templateparamlist(self): return self.templateparamlist + def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_definition(self): return self.definition + def set_definition(self, definition): self.definition = definition + def get_argsstring(self): return self.argsstring + def set_argsstring(self, argsstring): self.argsstring = argsstring + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_read(self): return self.read + def set_read(self, read): self.read = read + def get_write(self): return self.write + def set_write(self, write): self.write = write + def get_bitfield(self): return self.bitfield + def set_bitfield(self, bitfield): self.bitfield = bitfield + def get_reimplements(self): return self.reimplements + def set_reimplements(self, reimplements): self.reimplements = reimplements + def add_reimplements(self, value): self.reimplements.append(value) + def insert_reimplements(self, index, value): self.reimplements[index] = value + def get_reimplementedby(self): return self.reimplementedby + def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby + def add_reimplementedby(self, value): self.reimplementedby.append(value) + def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value + def get_param(self): return self.param + def set_param(self, param): self.param = param + def add_param(self, value): self.param.append(value) + def insert_param(self, index, value): self.param[index] = value + def get_enumvalue(self): return self.enumvalue + def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue + def add_enumvalue(self, value): self.enumvalue.append(value) + def insert_enumvalue(self, index, value): self.enumvalue[index] = value + def get_initializer(self): return self.initializer + def set_initializer(self, initializer): self.initializer = initializer + def get_exceptions(self): return self.exceptions + def set_exceptions(self, exceptions): self.exceptions = exceptions + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_inbodydescription(self): return self.inbodydescription + def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription + def get_location(self): return self.location + def set_location(self, location): self.location = location + def get_references(self): return self.references + def set_references(self, references): self.references = references + def add_references(self, value): self.references.append(value) + def insert_references(self, index, value): self.references[index] = value + def get_referencedby(self): return self.referencedby + def set_referencedby(self, referencedby): self.referencedby = referencedby + def add_referencedby(self, value): self.referencedby.append(value) + def insert_referencedby(self, index, value): self.referencedby[index] = value + def get_initonly(self): return self.initonly + def set_initonly(self, initonly): self.initonly = initonly + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_volatile(self): return self.volatile + def set_volatile(self, volatile): self.volatile = volatile + def get_const(self): return self.const + def set_const(self, const): self.const = const + def get_raise(self): return self.raisexx + def set_raise(self, raisexx): self.raisexx = raisexx + def get_virt(self): return self.virt + def set_virt(self, virt): self.virt = virt + def get_readable(self): return self.readable + def set_readable(self, readable): self.readable = readable + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_explicit(self): return self.explicit + def set_explicit(self, explicit): self.explicit = explicit + def get_new(self): return self.new + def set_new(self, new): self.new = new + def get_final(self): return self.final + def set_final(self, final): self.final = final + def get_writable(self): return self.writable + def set_writable(self, writable): self.writable = writable + def get_add(self): return self.add + def set_add(self, add): self.add = add + def get_static(self): return self.static + def set_static(self, static): self.static = static + def get_remove(self): return self.remove + def set_remove(self, remove): self.remove = remove + def get_sealed(self): return self.sealed + def set_sealed(self, sealed): self.sealed = sealed + def get_mutable(self): return self.mutable + def set_mutable(self, mutable): self.mutable = mutable + def get_gettable(self): return self.gettable + def set_gettable(self, gettable): self.gettable = gettable + def get_inline(self): return self.inline + def set_inline(self, inline): self.inline = inline + def get_settable(self): return self.settable + def set_settable(self, settable): self.settable = settable + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='memberdefType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): + if self.initonly is not None: + outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + if self.volatile is not None: + outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) + if self.const is not None: + outfile.write(' const=%s' % (quote_attrib(self.const), )) + if self.raisexx is not None: + outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) + if self.virt is not None: + outfile.write(' virt=%s' % (quote_attrib(self.virt), )) + if self.readable is not None: + outfile.write(' readable=%s' % (quote_attrib(self.readable), )) + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.explicit is not None: + outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) + if self.new is not None: + outfile.write(' new=%s' % (quote_attrib(self.new), )) + if self.final is not None: + outfile.write(' final=%s' % (quote_attrib(self.final), )) + if self.writable is not None: + outfile.write(' writable=%s' % (quote_attrib(self.writable), )) + if self.add is not None: + outfile.write(' add=%s' % (quote_attrib(self.add), )) + if self.static is not None: + outfile.write(' static=%s' % (quote_attrib(self.static), )) + if self.remove is not None: + outfile.write(' remove=%s' % (quote_attrib(self.remove), )) + if self.sealed is not None: + outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) + if self.mutable is not None: + outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) + if self.gettable is not None: + outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) + if self.inline is not None: + outfile.write(' inline=%s' % (quote_attrib(self.inline), )) + if self.settable is not None: + outfile.write(' settable=%s' % (quote_attrib(self.settable), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): + if self.templateparamlist: + self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + if self.type_: + self.type_.export(outfile, level, namespace_, name_='type') + if self.definition is not None: + showIndent(outfile, level) + outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) + if self.argsstring is not None: + showIndent(outfile, level) + outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + if self.read is not None: + showIndent(outfile, level) + outfile.write('<%sread>%s\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) + if self.write is not None: + showIndent(outfile, level) + outfile.write('<%swrite>%s\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) + if self.bitfield is not None: + showIndent(outfile, level) + outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) + for reimplements_ in self.reimplements: + reimplements_.export(outfile, level, namespace_, name_='reimplements') + for reimplementedby_ in self.reimplementedby: + reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') + for param_ in self.param: + param_.export(outfile, level, namespace_, name_='param') + for enumvalue_ in self.enumvalue: + enumvalue_.export(outfile, level, namespace_, name_='enumvalue') + if self.initializer: + self.initializer.export(outfile, level, namespace_, name_='initializer') + if self.exceptions: + self.exceptions.export(outfile, level, namespace_, name_='exceptions') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + if self.detaileddescription: + self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + if self.inbodydescription: + self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') + if self.location: + self.location.export(outfile, level, namespace_, name_='location', ) + for references_ in self.references: + references_.export(outfile, level, namespace_, name_='references') + for referencedby_ in self.referencedby: + referencedby_.export(outfile, level, namespace_, name_='referencedby') + def hasContent_(self): + if ( + self.templateparamlist is not None or + self.type_ is not None or + self.definition is not None or + self.argsstring is not None or + self.name is not None or + self.read is not None or + self.write is not None or + self.bitfield is not None or + self.reimplements is not None or + self.reimplementedby is not None or + self.param is not None or + self.enumvalue is not None or + self.initializer is not None or + self.exceptions is not None or + self.briefdescription is not None or + self.detaileddescription is not None or + self.inbodydescription is not None or + self.location is not None or + self.references is not None or + self.referencedby is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='memberdefType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.initonly is not None: + showIndent(outfile, level) + outfile.write('initonly = "%s",\n' % (self.initonly,)) + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.volatile is not None: + showIndent(outfile, level) + outfile.write('volatile = "%s",\n' % (self.volatile,)) + if self.const is not None: + showIndent(outfile, level) + outfile.write('const = "%s",\n' % (self.const,)) + if self.raisexx is not None: + showIndent(outfile, level) + outfile.write('raisexx = "%s",\n' % (self.raisexx,)) + if self.virt is not None: + showIndent(outfile, level) + outfile.write('virt = "%s",\n' % (self.virt,)) + if self.readable is not None: + showIndent(outfile, level) + outfile.write('readable = "%s",\n' % (self.readable,)) + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.explicit is not None: + showIndent(outfile, level) + outfile.write('explicit = "%s",\n' % (self.explicit,)) + if self.new is not None: + showIndent(outfile, level) + outfile.write('new = "%s",\n' % (self.new,)) + if self.final is not None: + showIndent(outfile, level) + outfile.write('final = "%s",\n' % (self.final,)) + if self.writable is not None: + showIndent(outfile, level) + outfile.write('writable = "%s",\n' % (self.writable,)) + if self.add is not None: + showIndent(outfile, level) + outfile.write('add = "%s",\n' % (self.add,)) + if self.static is not None: + showIndent(outfile, level) + outfile.write('static = "%s",\n' % (self.static,)) + if self.remove is not None: + showIndent(outfile, level) + outfile.write('remove = "%s",\n' % (self.remove,)) + if self.sealed is not None: + showIndent(outfile, level) + outfile.write('sealed = "%s",\n' % (self.sealed,)) + if self.mutable is not None: + showIndent(outfile, level) + outfile.write('mutable = "%s",\n' % (self.mutable,)) + if self.gettable is not None: + showIndent(outfile, level) + outfile.write('gettable = "%s",\n' % (self.gettable,)) + if self.inline is not None: + showIndent(outfile, level) + outfile.write('inline = "%s",\n' % (self.inline,)) + if self.settable is not None: + showIndent(outfile, level) + outfile.write('settable = "%s",\n' % (self.settable,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.templateparamlist: + showIndent(outfile, level) + outfile.write('templateparamlist=model_.templateparamlistType(\n') + self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + showIndent(outfile, level) + outfile.write('),\n') + if self.type_: + showIndent(outfile, level) + outfile.write('type_=model_.linkedTextType(\n') + self.type_.exportLiteral(outfile, level, name_='type') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('reimplements=[\n') + level += 1 + for reimplements in self.reimplements: + showIndent(outfile, level) + outfile.write('model_.reimplements(\n') + reimplements.exportLiteral(outfile, level, name_='reimplements') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('reimplementedby=[\n') + level += 1 + for reimplementedby in self.reimplementedby: + showIndent(outfile, level) + outfile.write('model_.reimplementedby(\n') + reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('param=[\n') + level += 1 + for param in self.param: + showIndent(outfile, level) + outfile.write('model_.param(\n') + param.exportLiteral(outfile, level, name_='param') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('enumvalue=[\n') + level += 1 + for enumvalue in self.enumvalue: + showIndent(outfile, level) + outfile.write('model_.enumvalue(\n') + enumvalue.exportLiteral(outfile, level, name_='enumvalue') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.initializer: + showIndent(outfile, level) + outfile.write('initializer=model_.linkedTextType(\n') + self.initializer.exportLiteral(outfile, level, name_='initializer') + showIndent(outfile, level) + outfile.write('),\n') + if self.exceptions: + showIndent(outfile, level) + outfile.write('exceptions=model_.linkedTextType(\n') + self.exceptions.exportLiteral(outfile, level, name_='exceptions') + showIndent(outfile, level) + outfile.write('),\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.detaileddescription: + showIndent(outfile, level) + outfile.write('detaileddescription=model_.descriptionType(\n') + self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.inbodydescription: + showIndent(outfile, level) + outfile.write('inbodydescription=model_.descriptionType(\n') + self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') + showIndent(outfile, level) + outfile.write('),\n') + if self.location: + showIndent(outfile, level) + outfile.write('location=model_.locationType(\n') + self.location.exportLiteral(outfile, level, name_='location') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('references=[\n') + level += 1 + for references in self.references: + showIndent(outfile, level) + outfile.write('model_.references(\n') + references.exportLiteral(outfile, level, name_='references') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('referencedby=[\n') + level += 1 + for referencedby in self.referencedby: + showIndent(outfile, level) + outfile.write('model_.referencedby(\n') + referencedby.exportLiteral(outfile, level, name_='referencedby') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('initonly'): + self.initonly = attrs.get('initonly').value + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('volatile'): + self.volatile = attrs.get('volatile').value + if attrs.get('const'): + self.const = attrs.get('const').value + if attrs.get('raise'): + self.raisexx = attrs.get('raise').value + if attrs.get('virt'): + self.virt = attrs.get('virt').value + if attrs.get('readable'): + self.readable = attrs.get('readable').value + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('explicit'): + self.explicit = attrs.get('explicit').value + if attrs.get('new'): + self.new = attrs.get('new').value + if attrs.get('final'): + self.final = attrs.get('final').value + if attrs.get('writable'): + self.writable = attrs.get('writable').value + if attrs.get('add'): + self.add = attrs.get('add').value + if attrs.get('static'): + self.static = attrs.get('static').value + if attrs.get('remove'): + self.remove = attrs.get('remove').value + if attrs.get('sealed'): + self.sealed = attrs.get('sealed').value + if attrs.get('mutable'): + self.mutable = attrs.get('mutable').value + if attrs.get('gettable'): + self.gettable = attrs.get('gettable').value + if attrs.get('inline'): + self.inline = attrs.get('inline').value + if attrs.get('settable'): + self.settable = attrs.get('settable').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'templateparamlist': + obj_ = templateparamlistType.factory() + obj_.build(child_) + self.set_templateparamlist(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'type': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_type(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'definition': + definition_ = '' + for text__content_ in child_.childNodes: + definition_ += text__content_.nodeValue + self.definition = definition_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'argsstring': + argsstring_ = '' + for text__content_ in child_.childNodes: + argsstring_ += text__content_.nodeValue + self.argsstring = argsstring_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'read': + read_ = '' + for text__content_ in child_.childNodes: + read_ += text__content_.nodeValue + self.read = read_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'write': + write_ = '' + for text__content_ in child_.childNodes: + write_ += text__content_.nodeValue + self.write = write_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'bitfield': + bitfield_ = '' + for text__content_ in child_.childNodes: + bitfield_ += text__content_.nodeValue + self.bitfield = bitfield_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'reimplements': + obj_ = reimplementType.factory() + obj_.build(child_) + self.reimplements.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'reimplementedby': + obj_ = reimplementType.factory() + obj_.build(child_) + self.reimplementedby.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'param': + obj_ = paramType.factory() + obj_.build(child_) + self.param.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'enumvalue': + obj_ = enumvalueType.factory() + obj_.build(child_) + self.enumvalue.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'initializer': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_initializer(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'exceptions': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_exceptions(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_detaileddescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'inbodydescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_inbodydescription(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'location': + obj_ = locationType.factory() + obj_.build(child_) + self.set_location(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'references': + obj_ = referenceType.factory() + obj_.build(child_) + self.references.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'referencedby': + obj_ = referenceType.factory() + obj_.build(child_) + self.referencedby.append(obj_) +# end class memberdefType + + +class definition(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if definition.subclass: + return definition.subclass(*args_, **kwargs_) + else: + return definition(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='definition') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='definition'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='definition'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='definition'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class definition + + +class argsstring(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if argsstring.subclass: + return argsstring.subclass(*args_, **kwargs_) + else: + return argsstring(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='argsstring') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='argsstring'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class argsstring + + +class read(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if read.subclass: + return read.subclass(*args_, **kwargs_) + else: + return read(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='read') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='read'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='read'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='read'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class read + + +class write(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if write.subclass: + return write.subclass(*args_, **kwargs_) + else: + return write(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='write') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='write'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='write'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='write'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class write + + +class bitfield(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if bitfield.subclass: + return bitfield.subclass(*args_, **kwargs_) + else: + return bitfield(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='bitfield') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='bitfield'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class bitfield + + +class descriptionType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if descriptionType.subclass: + return descriptionType.subclass(*args_, **kwargs_) + else: + return descriptionType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='descriptionType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect1 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='descriptionType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + childobj_ = docSect1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect1', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class descriptionType + + +class enumvalueType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): + self.prot = prot + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if enumvalueType.subclass: + return enumvalueType.subclass(*args_, **kwargs_) + else: + return enumvalueType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_initializer(self): return self.initializer + def set_initializer(self, initializer): self.initializer = initializer + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription + def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def get_prot(self): return self.prot + def set_prot(self, prot): self.prot = prot + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): + if self.prot is not None: + outfile.write(' prot=%s' % (quote_attrib(self.prot), )) + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.name is not None or + self.initializer is not None or + self.briefdescription is not None or + self.detaileddescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='enumvalueType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.prot is not None: + showIndent(outfile, level) + outfile.write('prot = "%s",\n' % (self.prot,)) + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('prot'): + self.prot = attrs.get('prot').value + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + value_ = [] + for text_ in child_.childNodes: + value_.append(text_.nodeValue) + valuestr_ = ''.join(value_) + obj_ = self.mixedclass_(MixedContainer.CategorySimple, + MixedContainer.TypeString, 'name', valuestr_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'initializer': + childobj_ = linkedTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'initializer', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + childobj_ = descriptionType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'briefdescription', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'detaileddescription': + childobj_ = descriptionType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'detaileddescription', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class enumvalueType + + +class templateparamlistType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, param=None): + if param is None: + self.param = [] + else: + self.param = param + def factory(*args_, **kwargs_): + if templateparamlistType.subclass: + return templateparamlistType.subclass(*args_, **kwargs_) + else: + return templateparamlistType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_param(self): return self.param + def set_param(self, param): self.param = param + def add_param(self, value): self.param.append(value) + def insert_param(self, index, value): self.param[index] = value + def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): + for param_ in self.param: + param_.export(outfile, level, namespace_, name_='param') + def hasContent_(self): + if ( + self.param is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='templateparamlistType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('param=[\n') + level += 1 + for param in self.param: + showIndent(outfile, level) + outfile.write('model_.param(\n') + param.exportLiteral(outfile, level, name_='param') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'param': + obj_ = paramType.factory() + obj_.build(child_) + self.param.append(obj_) +# end class templateparamlistType + + +class paramType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): + self.type_ = type_ + self.declname = declname + self.defname = defname + self.array = array + self.defval = defval + self.briefdescription = briefdescription + def factory(*args_, **kwargs_): + if paramType.subclass: + return paramType.subclass(*args_, **kwargs_) + else: + return paramType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_declname(self): return self.declname + def set_declname(self, declname): self.declname = declname + def get_defname(self): return self.defname + def set_defname(self, defname): self.defname = defname + def get_array(self): return self.array + def set_array(self, array): self.array = array + def get_defval(self): return self.defval + def set_defval(self, defval): self.defval = defval + def get_briefdescription(self): return self.briefdescription + def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='paramType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='paramType'): + if self.type_: + self.type_.export(outfile, level, namespace_, name_='type') + if self.declname is not None: + showIndent(outfile, level) + outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) + if self.defname is not None: + showIndent(outfile, level) + outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) + if self.array is not None: + showIndent(outfile, level) + outfile.write('<%sarray>%s\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) + if self.defval: + self.defval.export(outfile, level, namespace_, name_='defval') + if self.briefdescription: + self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + def hasContent_(self): + if ( + self.type_ is not None or + self.declname is not None or + self.defname is not None or + self.array is not None or + self.defval is not None or + self.briefdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='paramType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + if self.type_: + showIndent(outfile, level) + outfile.write('type_=model_.linkedTextType(\n') + self.type_.exportLiteral(outfile, level, name_='type') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) + if self.defval: + showIndent(outfile, level) + outfile.write('defval=model_.linkedTextType(\n') + self.defval.exportLiteral(outfile, level, name_='defval') + showIndent(outfile, level) + outfile.write('),\n') + if self.briefdescription: + showIndent(outfile, level) + outfile.write('briefdescription=model_.descriptionType(\n') + self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'type': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_type(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'declname': + declname_ = '' + for text__content_ in child_.childNodes: + declname_ += text__content_.nodeValue + self.declname = declname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'defname': + defname_ = '' + for text__content_ in child_.childNodes: + defname_ += text__content_.nodeValue + self.defname = defname_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'array': + array_ = '' + for text__content_ in child_.childNodes: + array_ += text__content_.nodeValue + self.array = array_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'defval': + obj_ = linkedTextType.factory() + obj_.build(child_) + self.set_defval(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'briefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_briefdescription(obj_) +# end class paramType + + +class declname(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if declname.subclass: + return declname.subclass(*args_, **kwargs_) + else: + return declname(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='declname') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='declname'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='declname'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='declname'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class declname + + +class defname(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if defname.subclass: + return defname.subclass(*args_, **kwargs_) + else: + return defname(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='defname') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='defname'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='defname'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='defname'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class defname + + +class array(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if array.subclass: + return array.subclass(*args_, **kwargs_) + else: + return array(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='array') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='array'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='array'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='array'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class array + + +class linkedTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, ref=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if linkedTextType.subclass: + return linkedTextType.subclass(*args_, **kwargs_) + else: + return linkedTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def add_ref(self, value): self.ref.append(value) + def insert_ref(self, index, value): self.ref[index] = value + def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='linkedTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class linkedTextType + + +class graphType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, node=None): + if node is None: + self.node = [] + else: + self.node = node + def factory(*args_, **kwargs_): + if graphType.subclass: + return graphType.subclass(*args_, **kwargs_) + else: + return graphType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_node(self): return self.node + def set_node(self, node): self.node = node + def add_node(self, value): self.node.append(value) + def insert_node(self, index, value): self.node[index] = value + def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='graphType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='graphType'): + for node_ in self.node: + node_.export(outfile, level, namespace_, name_='node') + def hasContent_(self): + if ( + self.node is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='graphType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('node=[\n') + level += 1 + for node in self.node: + showIndent(outfile, level) + outfile.write('model_.node(\n') + node.exportLiteral(outfile, level, name_='node') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'node': + obj_ = nodeType.factory() + obj_.build(child_) + self.node.append(obj_) +# end class graphType + + +class nodeType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, label=None, link=None, childnode=None): + self.id = id + self.label = label + self.link = link + if childnode is None: + self.childnode = [] + else: + self.childnode = childnode + def factory(*args_, **kwargs_): + if nodeType.subclass: + return nodeType.subclass(*args_, **kwargs_) + else: + return nodeType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_label(self): return self.label + def set_label(self, label): self.label = label + def get_link(self): return self.link + def set_link(self, link): self.link = link + def get_childnode(self): return self.childnode + def set_childnode(self, childnode): self.childnode = childnode + def add_childnode(self, value): self.childnode.append(value) + def insert_childnode(self, index, value): self.childnode[index] = value + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='nodeType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): + if self.label is not None: + showIndent(outfile, level) + outfile.write('<%slabel>%s\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) + if self.link: + self.link.export(outfile, level, namespace_, name_='link') + for childnode_ in self.childnode: + childnode_.export(outfile, level, namespace_, name_='childnode') + def hasContent_(self): + if ( + self.label is not None or + self.link is not None or + self.childnode is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='nodeType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) + if self.link: + showIndent(outfile, level) + outfile.write('link=model_.linkType(\n') + self.link.exportLiteral(outfile, level, name_='link') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('childnode=[\n') + level += 1 + for childnode in self.childnode: + showIndent(outfile, level) + outfile.write('model_.childnode(\n') + childnode.exportLiteral(outfile, level, name_='childnode') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'label': + label_ = '' + for text__content_ in child_.childNodes: + label_ += text__content_.nodeValue + self.label = label_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'link': + obj_ = linkType.factory() + obj_.build(child_) + self.set_link(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'childnode': + obj_ = childnodeType.factory() + obj_.build(child_) + self.childnode.append(obj_) +# end class nodeType + + +class label(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if label.subclass: + return label.subclass(*args_, **kwargs_) + else: + return label(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='label') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='label'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='label'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='label'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class label + + +class childnodeType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, relation=None, refid=None, edgelabel=None): + self.relation = relation + self.refid = refid + if edgelabel is None: + self.edgelabel = [] + else: + self.edgelabel = edgelabel + def factory(*args_, **kwargs_): + if childnodeType.subclass: + return childnodeType.subclass(*args_, **kwargs_) + else: + return childnodeType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_edgelabel(self): return self.edgelabel + def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel + def add_edgelabel(self, value): self.edgelabel.append(value) + def insert_edgelabel(self, index, value): self.edgelabel[index] = value + def get_relation(self): return self.relation + def set_relation(self, relation): self.relation = relation + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='childnodeType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): + if self.relation is not None: + outfile.write(' relation=%s' % (quote_attrib(self.relation), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): + for edgelabel_ in self.edgelabel: + showIndent(outfile, level) + outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) + def hasContent_(self): + if ( + self.edgelabel is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='childnodeType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.relation is not None: + showIndent(outfile, level) + outfile.write('relation = "%s",\n' % (self.relation,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('edgelabel=[\n') + level += 1 + for edgelabel in self.edgelabel: + showIndent(outfile, level) + outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('relation'): + self.relation = attrs.get('relation').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'edgelabel': + edgelabel_ = '' + for text__content_ in child_.childNodes: + edgelabel_ += text__content_.nodeValue + self.edgelabel.append(edgelabel_) +# end class childnodeType + + +class edgelabel(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if edgelabel.subclass: + return edgelabel.subclass(*args_, **kwargs_) + else: + return edgelabel(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='edgelabel') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='edgelabel'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class edgelabel + + +class linkType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, external=None, valueOf_=''): + self.refid = refid + self.external = external + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if linkType.subclass: + return linkType.subclass(*args_, **kwargs_) + else: + return linkType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='linkType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='linkType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='linkType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class linkType + + +class listingType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, codeline=None): + if codeline is None: + self.codeline = [] + else: + self.codeline = codeline + def factory(*args_, **kwargs_): + if listingType.subclass: + return listingType.subclass(*args_, **kwargs_) + else: + return listingType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_codeline(self): return self.codeline + def set_codeline(self, codeline): self.codeline = codeline + def add_codeline(self, value): self.codeline.append(value) + def insert_codeline(self, index, value): self.codeline[index] = value + def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='listingType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='listingType'): + for codeline_ in self.codeline: + codeline_.export(outfile, level, namespace_, name_='codeline') + def hasContent_(self): + if ( + self.codeline is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='listingType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('codeline=[\n') + level += 1 + for codeline in self.codeline: + showIndent(outfile, level) + outfile.write('model_.codeline(\n') + codeline.exportLiteral(outfile, level, name_='codeline') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'codeline': + obj_ = codelineType.factory() + obj_.build(child_) + self.codeline.append(obj_) +# end class listingType + + +class codelineType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): + self.external = external + self.lineno = lineno + self.refkind = refkind + self.refid = refid + if highlight is None: + self.highlight = [] + else: + self.highlight = highlight + def factory(*args_, **kwargs_): + if codelineType.subclass: + return codelineType.subclass(*args_, **kwargs_) + else: + return codelineType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_highlight(self): return self.highlight + def set_highlight(self, highlight): self.highlight = highlight + def add_highlight(self, value): self.highlight.append(value) + def insert_highlight(self, index, value): self.highlight[index] = value + def get_external(self): return self.external + def set_external(self, external): self.external = external + def get_lineno(self): return self.lineno + def set_lineno(self, lineno): self.lineno = lineno + def get_refkind(self): return self.refkind + def set_refkind(self, refkind): self.refkind = refkind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='codelineType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): + if self.external is not None: + outfile.write(' external=%s' % (quote_attrib(self.external), )) + if self.lineno is not None: + outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) + if self.refkind is not None: + outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): + for highlight_ in self.highlight: + highlight_.export(outfile, level, namespace_, name_='highlight') + def hasContent_(self): + if ( + self.highlight is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='codelineType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = "%s",\n' % (self.external,)) + if self.lineno is not None: + showIndent(outfile, level) + outfile.write('lineno = %s,\n' % (self.lineno,)) + if self.refkind is not None: + showIndent(outfile, level) + outfile.write('refkind = "%s",\n' % (self.refkind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('highlight=[\n') + level += 1 + for highlight in self.highlight: + showIndent(outfile, level) + outfile.write('model_.highlight(\n') + highlight.exportLiteral(outfile, level, name_='highlight') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('external'): + self.external = attrs.get('external').value + if attrs.get('lineno'): + try: + self.lineno = int(attrs.get('lineno').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (lineno): %s' % exp) + if attrs.get('refkind'): + self.refkind = attrs.get('refkind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'highlight': + obj_ = highlightType.factory() + obj_.build(child_) + self.highlight.append(obj_) +# end class codelineType + + +class highlightType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): + self.classxx = classxx + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if highlightType.subclass: + return highlightType.subclass(*args_, **kwargs_) + else: + return highlightType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_sp(self): return self.sp + def set_sp(self, sp): self.sp = sp + def add_sp(self, value): self.sp.append(value) + def insert_sp(self, index, value): self.sp[index] = value + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def add_ref(self, value): self.ref.append(value) + def insert_ref(self, index, value): self.ref[index] = value + def get_class(self): return self.classxx + def set_class(self, classxx): self.classxx = classxx + def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='highlightType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): + if self.classxx is not None: + outfile.write(' class=%s' % (quote_attrib(self.classxx), )) + def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.sp is not None or + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='highlightType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.classxx is not None: + showIndent(outfile, level) + outfile.write('classxx = "%s",\n' % (self.classxx,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('class'): + self.classxx = attrs.get('class').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sp': + value_ = [] + for text_ in child_.childNodes: + value_.append(text_.nodeValue) + valuestr_ = ''.join(value_) + obj_ = self.mixedclass_(MixedContainer.CategorySimple, + MixedContainer.TypeString, 'sp', valuestr_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class highlightType + + +class sp(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if sp.subclass: + return sp.subclass(*args_, **kwargs_) + else: + return sp(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='sp') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sp'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='sp'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='sp'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class sp + + +class referenceType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): + self.endline = endline + self.startline = startline + self.refid = refid + self.compoundref = compoundref + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if referenceType.subclass: + return referenceType.subclass(*args_, **kwargs_) + else: + return referenceType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_endline(self): return self.endline + def set_endline(self, endline): self.endline = endline + def get_startline(self): return self.startline + def set_startline(self, startline): self.startline = startline + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_compoundref(self): return self.compoundref + def set_compoundref(self, compoundref): self.compoundref = compoundref + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='referenceType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): + if self.endline is not None: + outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) + if self.startline is not None: + outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.compoundref is not None: + outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) + def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='referenceType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.endline is not None: + showIndent(outfile, level) + outfile.write('endline = %s,\n' % (self.endline,)) + if self.startline is not None: + showIndent(outfile, level) + outfile.write('startline = %s,\n' % (self.startline,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.compoundref is not None: + showIndent(outfile, level) + outfile.write('compoundref = %s,\n' % (self.compoundref,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('endline'): + try: + self.endline = int(attrs.get('endline').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (endline): %s' % exp) + if attrs.get('startline'): + try: + self.startline = int(attrs.get('startline').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (startline): %s' % exp) + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('compoundref'): + self.compoundref = attrs.get('compoundref').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class referenceType + + +class locationType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): + self.bodystart = bodystart + self.line = line + self.bodyend = bodyend + self.bodyfile = bodyfile + self.file = file + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if locationType.subclass: + return locationType.subclass(*args_, **kwargs_) + else: + return locationType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_bodystart(self): return self.bodystart + def set_bodystart(self, bodystart): self.bodystart = bodystart + def get_line(self): return self.line + def set_line(self, line): self.line = line + def get_bodyend(self): return self.bodyend + def set_bodyend(self, bodyend): self.bodyend = bodyend + def get_bodyfile(self): return self.bodyfile + def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile + def get_file(self): return self.file + def set_file(self, file): self.file = file + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='locationType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): + if self.bodystart is not None: + outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) + if self.line is not None: + outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) + if self.bodyend is not None: + outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) + if self.bodyfile is not None: + outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) + if self.file is not None: + outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) + def exportChildren(self, outfile, level, namespace_='', name_='locationType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='locationType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.bodystart is not None: + showIndent(outfile, level) + outfile.write('bodystart = %s,\n' % (self.bodystart,)) + if self.line is not None: + showIndent(outfile, level) + outfile.write('line = %s,\n' % (self.line,)) + if self.bodyend is not None: + showIndent(outfile, level) + outfile.write('bodyend = %s,\n' % (self.bodyend,)) + if self.bodyfile is not None: + showIndent(outfile, level) + outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) + if self.file is not None: + showIndent(outfile, level) + outfile.write('file = %s,\n' % (self.file,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('bodystart'): + try: + self.bodystart = int(attrs.get('bodystart').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (bodystart): %s' % exp) + if attrs.get('line'): + try: + self.line = int(attrs.get('line').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (line): %s' % exp) + if attrs.get('bodyend'): + try: + self.bodyend = int(attrs.get('bodyend').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (bodyend): %s' % exp) + if attrs.get('bodyfile'): + self.bodyfile = attrs.get('bodyfile').value + if attrs.get('file'): + self.file = attrs.get('file').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class locationType + + +class docSect1Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect1Type.subclass: + return docSect1Type.subclass(*args_, **kwargs_) + else: + return docSect1Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect2(self): return self.sect2 + def set_sect2(self, sect2): self.sect2 = sect2 + def add_sect2(self, value): self.sect2.append(value) + def insert_sect2(self, index, value): self.sect2[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect2 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect1Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect2': + childobj_ = docSect2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect2', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect1Type + + +class docSect2Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect2Type.subclass: + return docSect2Type.subclass(*args_, **kwargs_) + else: + return docSect2Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect3 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect2Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect2Type + + +class docSect3Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect3Type.subclass: + return docSect3Type.subclass(*args_, **kwargs_) + else: + return docSect3Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect4(self): return self.sect4 + def set_sect4(self, sect4): self.sect4 = sect4 + def add_sect4(self, value): self.sect4.append(value) + def insert_sect4(self, index, value): self.sect4[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.sect4 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect3Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect4': + childobj_ = docSect4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect4', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect3Type + + +class docSect4Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docSect4Type.subclass: + return docSect4Type.subclass(*args_, **kwargs_) + else: + return docSect4Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.title is not None or + self.para is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSect4Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + childobj_ = docTitleType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'title', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + childobj_ = docInternalS4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'internal', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docSect4Type + + +class docInternalType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalType.subclass: + return docInternalType.subclass(*args_, **kwargs_) + else: + return docInternalType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect1 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + childobj_ = docSect1Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect1', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalType + + +class docInternalS1Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS1Type.subclass: + return docInternalS1Type.subclass(*args_, **kwargs_) + else: + return docInternalS1Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect2(self): return self.sect2 + def set_sect2(self, sect2): self.sect2 = sect2 + def add_sect2(self, value): self.sect2.append(value) + def insert_sect2(self, index, value): self.sect2[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect2 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS1Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect2': + childobj_ = docSect2Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect2', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS1Type + + +class docInternalS2Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS2Type.subclass: + return docInternalS2Type.subclass(*args_, **kwargs_) + else: + return docInternalS2Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect3 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS2Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect3Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS2Type + + +class docInternalS3Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS3Type.subclass: + return docInternalS3Type.subclass(*args_, **kwargs_) + else: + return docInternalS3Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect3(self): return self.sect3 + def set_sect3(self, sect3): self.sect3 = sect3 + def add_sect3(self, value): self.sect3.append(value) + def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None or + self.sect3 is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS3Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect3': + childobj_ = docSect4Type.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'sect3', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS3Type + + +class docInternalS4Type(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None, mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docInternalS4Type.subclass: + return docInternalS4Type.subclass(*args_, **kwargs_) + else: + return docInternalS4Type(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docInternalS4Type'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + childobj_ = docParaType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'para', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docInternalS4Type + + +class docTitleType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docTitleType.subclass: + return docTitleType.subclass(*args_, **kwargs_) + else: + return docTitleType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTitleType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTitleType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docTitleType + + +class docParaType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docParaType.subclass: + return docParaType.subclass(*args_, **kwargs_) + else: + return docParaType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParaType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParaType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docParaType + + +class docMarkupType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docMarkupType.subclass: + return docMarkupType.subclass(*args_, **kwargs_) + else: + return docMarkupType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docMarkupType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docMarkupType + + +class docURLLink(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): + self.url = url + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docURLLink.subclass: + return docURLLink.subclass(*args_, **kwargs_) + else: + return docURLLink(*args_, **kwargs_) + factory = staticmethod(factory) + def get_url(self): return self.url + def set_url(self, url): self.url = url + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docURLLink') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): + if self.url is not None: + outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docURLLink'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.url is not None: + showIndent(outfile, level) + outfile.write('url = %s,\n' % (self.url,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('url'): + self.url = attrs.get('url').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docURLLink + + +class docAnchorType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docAnchorType.subclass: + return docAnchorType.subclass(*args_, **kwargs_) + else: + return docAnchorType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docAnchorType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docAnchorType + + +class docFormulaType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docFormulaType.subclass: + return docFormulaType.subclass(*args_, **kwargs_) + else: + return docFormulaType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docFormulaType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docFormulaType + + +class docIndexEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, primaryie=None, secondaryie=None): + self.primaryie = primaryie + self.secondaryie = secondaryie + def factory(*args_, **kwargs_): + if docIndexEntryType.subclass: + return docIndexEntryType.subclass(*args_, **kwargs_) + else: + return docIndexEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_primaryie(self): return self.primaryie + def set_primaryie(self, primaryie): self.primaryie = primaryie + def get_secondaryie(self): return self.secondaryie + def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie + def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): + if self.primaryie is not None: + showIndent(outfile, level) + outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) + if self.secondaryie is not None: + showIndent(outfile, level) + outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) + def hasContent_(self): + if ( + self.primaryie is not None or + self.secondaryie is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docIndexEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'primaryie': + primaryie_ = '' + for text__content_ in child_.childNodes: + primaryie_ += text__content_.nodeValue + self.primaryie = primaryie_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'secondaryie': + secondaryie_ = '' + for text__content_ in child_.childNodes: + secondaryie_ += text__content_.nodeValue + self.secondaryie = secondaryie_ +# end class docIndexEntryType + + +class docListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, listitem=None): + if listitem is None: + self.listitem = [] + else: + self.listitem = listitem + def factory(*args_, **kwargs_): + if docListType.subclass: + return docListType.subclass(*args_, **kwargs_) + else: + return docListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_listitem(self): return self.listitem + def set_listitem(self, listitem): self.listitem = listitem + def add_listitem(self, value): self.listitem.append(value) + def insert_listitem(self, index, value): self.listitem[index] = value + def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docListType'): + for listitem_ in self.listitem: + listitem_.export(outfile, level, namespace_, name_='listitem') + def hasContent_(self): + if ( + self.listitem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('listitem=[\n') + level += 1 + for listitem in self.listitem: + showIndent(outfile, level) + outfile.write('model_.listitem(\n') + listitem.exportLiteral(outfile, level, name_='listitem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'listitem': + obj_ = docListItemType.factory() + obj_.build(child_) + self.listitem.append(obj_) +# end class docListType + + +class docListItemType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, para=None): + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docListItemType.subclass: + return docListItemType.subclass(*args_, **kwargs_) + else: + return docListItemType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docListItemType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docListItemType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docListItemType + + +class docSimpleSectType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, title=None, para=None): + self.kind = kind + self.title = title + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docSimpleSectType.subclass: + return docSimpleSectType.subclass(*args_, **kwargs_) + else: + return docSimpleSectType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_title(self): return self.title + def set_title(self, title): self.title = title + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): + if self.title: + self.title.export(outfile, level, namespace_, name_='title') + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.title is not None or + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docSimpleSectType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + if self.title: + showIndent(outfile, level) + outfile.write('title=model_.docTitleType(\n') + self.title.exportLiteral(outfile, level, name_='title') + showIndent(outfile, level) + outfile.write('),\n') + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'title': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_title(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docSimpleSectType + + +class docVarListEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, term=None): + self.term = term + def factory(*args_, **kwargs_): + if docVarListEntryType.subclass: + return docVarListEntryType.subclass(*args_, **kwargs_) + else: + return docVarListEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_term(self): return self.term + def set_term(self, term): self.term = term + def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): + if self.term: + self.term.export(outfile, level, namespace_, name_='term', ) + def hasContent_(self): + if ( + self.term is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docVarListEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + if self.term: + showIndent(outfile, level) + outfile.write('term=model_.docTitleType(\n') + self.term.exportLiteral(outfile, level, name_='term') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'term': + obj_ = docTitleType.factory() + obj_.build(child_) + self.set_term(obj_) +# end class docVarListEntryType + + +class docVariableListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docVariableListType.subclass: + return docVariableListType.subclass(*args_, **kwargs_) + else: + return docVariableListType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docVariableListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docVariableListType + + +class docRefTextType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): + self.refid = refid + self.kindref = kindref + self.external = external + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docRefTextType.subclass: + return docRefTextType.subclass(*args_, **kwargs_) + else: + return docRefTextType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def get_kindref(self): return self.kindref + def set_kindref(self, kindref): self.kindref = kindref + def get_external(self): return self.external + def set_external(self, external): self.external = external + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): + if self.refid is not None: + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + if self.kindref is not None: + outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) + if self.external is not None: + outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docRefTextType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + if self.kindref is not None: + showIndent(outfile, level) + outfile.write('kindref = "%s",\n' % (self.kindref,)) + if self.external is not None: + showIndent(outfile, level) + outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('refid'): + self.refid = attrs.get('refid').value + if attrs.get('kindref'): + self.kindref = attrs.get('kindref').value + if attrs.get('external'): + self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docRefTextType + + +class docTableType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, rows=None, cols=None, row=None, caption=None): + self.rows = rows + self.cols = cols + if row is None: + self.row = [] + else: + self.row = row + self.caption = caption + def factory(*args_, **kwargs_): + if docTableType.subclass: + return docTableType.subclass(*args_, **kwargs_) + else: + return docTableType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_row(self): return self.row + def set_row(self, row): self.row = row + def add_row(self, value): self.row.append(value) + def insert_row(self, index, value): self.row[index] = value + def get_caption(self): return self.caption + def set_caption(self, caption): self.caption = caption + def get_rows(self): return self.rows + def set_rows(self, rows): self.rows = rows + def get_cols(self): return self.cols + def set_cols(self, cols): self.cols = cols + def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTableType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): + if self.rows is not None: + outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) + if self.cols is not None: + outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) + def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): + for row_ in self.row: + row_.export(outfile, level, namespace_, name_='row') + if self.caption: + self.caption.export(outfile, level, namespace_, name_='caption') + def hasContent_(self): + if ( + self.row is not None or + self.caption is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTableType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.rows is not None: + showIndent(outfile, level) + outfile.write('rows = %s,\n' % (self.rows,)) + if self.cols is not None: + showIndent(outfile, level) + outfile.write('cols = %s,\n' % (self.cols,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('row=[\n') + level += 1 + for row in self.row: + showIndent(outfile, level) + outfile.write('model_.row(\n') + row.exportLiteral(outfile, level, name_='row') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.caption: + showIndent(outfile, level) + outfile.write('caption=model_.docCaptionType(\n') + self.caption.exportLiteral(outfile, level, name_='caption') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('rows'): + try: + self.rows = int(attrs.get('rows').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (rows): %s' % exp) + if attrs.get('cols'): + try: + self.cols = int(attrs.get('cols').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (cols): %s' % exp) + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'row': + obj_ = docRowType.factory() + obj_.build(child_) + self.row.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'caption': + obj_ = docCaptionType.factory() + obj_.build(child_) + self.set_caption(obj_) +# end class docTableType + + +class docRowType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, entry=None): + if entry is None: + self.entry = [] + else: + self.entry = entry + def factory(*args_, **kwargs_): + if docRowType.subclass: + return docRowType.subclass(*args_, **kwargs_) + else: + return docRowType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_entry(self): return self.entry + def set_entry(self, entry): self.entry = entry + def add_entry(self, value): self.entry.append(value) + def insert_entry(self, index, value): self.entry[index] = value + def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docRowType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): + for entry_ in self.entry: + entry_.export(outfile, level, namespace_, name_='entry') + def hasContent_(self): + if ( + self.entry is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docRowType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('entry=[\n') + level += 1 + for entry in self.entry: + showIndent(outfile, level) + outfile.write('model_.entry(\n') + entry.exportLiteral(outfile, level, name_='entry') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'entry': + obj_ = docEntryType.factory() + obj_.build(child_) + self.entry.append(obj_) +# end class docRowType + + +class docEntryType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, thead=None, para=None): + self.thead = thead + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docEntryType.subclass: + return docEntryType.subclass(*args_, **kwargs_) + else: + return docEntryType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_thead(self): return self.thead + def set_thead(self, thead): self.thead = thead + def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docEntryType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): + if self.thead is not None: + outfile.write(' thead=%s' % (quote_attrib(self.thead), )) + def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docEntryType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.thead is not None: + showIndent(outfile, level) + outfile.write('thead = "%s",\n' % (self.thead,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('thead'): + self.thead = attrs.get('thead').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docEntryType + + +class docCaptionType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docCaptionType.subclass: + return docCaptionType.subclass(*args_, **kwargs_) + else: + return docCaptionType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCaptionType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docCaptionType + + +class docHeadingType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): + self.level = level + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docHeadingType.subclass: + return docHeadingType.subclass(*args_, **kwargs_) + else: + return docHeadingType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_level(self): return self.level + def set_level(self, level): self.level = level + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): + if self.level is not None: + outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) + def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docHeadingType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.level is not None: + showIndent(outfile, level) + outfile.write('level = %s,\n' % (self.level,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('level'): + try: + self.level = int(attrs.get('level').value) + except ValueError, exp: + raise ValueError('Bad integer attribute (level): %s' % exp) + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docHeadingType + + +class docImageType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): + self.width = width + self.type_ = type_ + self.name = name + self.height = height + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docImageType.subclass: + return docImageType.subclass(*args_, **kwargs_) + else: + return docImageType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_width(self): return self.width + def set_width(self, width): self.width = width + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_height(self): return self.height + def set_height(self, height): self.height = height + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docImageType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): + if self.width is not None: + outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) + if self.type_ is not None: + outfile.write(' type=%s' % (quote_attrib(self.type_), )) + if self.name is not None: + outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + if self.height is not None: + outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docImageType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.width is not None: + showIndent(outfile, level) + outfile.write('width = %s,\n' % (self.width,)) + if self.type_ is not None: + showIndent(outfile, level) + outfile.write('type_ = "%s",\n' % (self.type_,)) + if self.name is not None: + showIndent(outfile, level) + outfile.write('name = %s,\n' % (self.name,)) + if self.height is not None: + showIndent(outfile, level) + outfile.write('height = %s,\n' % (self.height,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('width'): + self.width = attrs.get('width').value + if attrs.get('type'): + self.type_ = attrs.get('type').value + if attrs.get('name'): + self.name = attrs.get('name').value + if attrs.get('height'): + self.height = attrs.get('height').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docImageType + + +class docDotFileType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): + self.name = name + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docDotFileType.subclass: + return docDotFileType.subclass(*args_, **kwargs_) + else: + return docDotFileType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): + if self.name is not None: + outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docDotFileType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.name is not None: + showIndent(outfile, level) + outfile.write('name = %s,\n' % (self.name,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('name'): + self.name = attrs.get('name').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docDotFileType + + +class docTocItemType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): + self.id = id + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docTocItemType.subclass: + return docTocItemType.subclass(*args_, **kwargs_) + else: + return docTocItemType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id): self.id = id + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTocItemType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docTocItemType + + +class docTocListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, tocitem=None): + if tocitem is None: + self.tocitem = [] + else: + self.tocitem = tocitem + def factory(*args_, **kwargs_): + if docTocListType.subclass: + return docTocListType.subclass(*args_, **kwargs_) + else: + return docTocListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_tocitem(self): return self.tocitem + def set_tocitem(self, tocitem): self.tocitem = tocitem + def add_tocitem(self, value): self.tocitem.append(value) + def insert_tocitem(self, index, value): self.tocitem[index] = value + def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docTocListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): + for tocitem_ in self.tocitem: + tocitem_.export(outfile, level, namespace_, name_='tocitem') + def hasContent_(self): + if ( + self.tocitem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docTocListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('tocitem=[\n') + level += 1 + for tocitem in self.tocitem: + showIndent(outfile, level) + outfile.write('model_.tocitem(\n') + tocitem.exportLiteral(outfile, level, name_='tocitem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'tocitem': + obj_ = docTocItemType.factory() + obj_.build(child_) + self.tocitem.append(obj_) +# end class docTocListType + + +class docLanguageType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, langid=None, para=None): + self.langid = langid + if para is None: + self.para = [] + else: + self.para = para + def factory(*args_, **kwargs_): + if docLanguageType.subclass: + return docLanguageType.subclass(*args_, **kwargs_) + else: + return docLanguageType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_langid(self): return self.langid + def set_langid(self, langid): self.langid = langid + def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): + if self.langid is not None: + outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): + if ( + self.para is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docLanguageType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.langid is not None: + showIndent(outfile, level) + outfile.write('langid = %s,\n' % (self.langid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('langid'): + self.langid = attrs.get('langid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) +# end class docLanguageType + + +class docParamListType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, parameteritem=None): + self.kind = kind + if parameteritem is None: + self.parameteritem = [] + else: + self.parameteritem = parameteritem + def factory(*args_, **kwargs_): + if docParamListType.subclass: + return docParamListType.subclass(*args_, **kwargs_) + else: + return docParamListType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parameteritem(self): return self.parameteritem + def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem + def add_parameteritem(self, value): self.parameteritem.append(value) + def insert_parameteritem(self, index, value): self.parameteritem[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamListType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): + if self.kind is not None: + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): + for parameteritem_ in self.parameteritem: + parameteritem_.export(outfile, level, namespace_, name_='parameteritem') + def hasContent_(self): + if ( + self.parameteritem is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamListType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parameteritem=[\n') + level += 1 + for parameteritem in self.parameteritem: + showIndent(outfile, level) + outfile.write('model_.parameteritem(\n') + parameteritem.exportLiteral(outfile, level, name_='parameteritem') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameteritem': + obj_ = docParamListItem.factory() + obj_.build(child_) + self.parameteritem.append(obj_) +# end class docParamListType + + +class docParamListItem(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, parameternamelist=None, parameterdescription=None): + if parameternamelist is None: + self.parameternamelist = [] + else: + self.parameternamelist = parameternamelist + self.parameterdescription = parameterdescription + def factory(*args_, **kwargs_): + if docParamListItem.subclass: + return docParamListItem.subclass(*args_, **kwargs_) + else: + return docParamListItem(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parameternamelist(self): return self.parameternamelist + def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist + def add_parameternamelist(self, value): self.parameternamelist.append(value) + def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value + def get_parameterdescription(self): return self.parameterdescription + def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription + def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): + for parameternamelist_ in self.parameternamelist: + parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') + if self.parameterdescription: + self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) + def hasContent_(self): + if ( + self.parameternamelist is not None or + self.parameterdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamListItem'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parameternamelist=[\n') + level += 1 + for parameternamelist in self.parameternamelist: + showIndent(outfile, level) + outfile.write('model_.parameternamelist(\n') + parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.parameterdescription: + showIndent(outfile, level) + outfile.write('parameterdescription=model_.descriptionType(\n') + self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameternamelist': + obj_ = docParamNameList.factory() + obj_.build(child_) + self.parameternamelist.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parameterdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_parameterdescription(obj_) +# end class docParamListItem + + +class docParamNameList(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, parametername=None): + if parametername is None: + self.parametername = [] + else: + self.parametername = parametername + def factory(*args_, **kwargs_): + if docParamNameList.subclass: + return docParamNameList.subclass(*args_, **kwargs_) + else: + return docParamNameList(*args_, **kwargs_) + factory = staticmethod(factory) + def get_parametername(self): return self.parametername + def set_parametername(self, parametername): self.parametername = parametername + def add_parametername(self, value): self.parametername.append(value) + def insert_parametername(self, index, value): self.parametername[index] = value + def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): + for parametername_ in self.parametername: + parametername_.export(outfile, level, namespace_, name_='parametername') + def hasContent_(self): + if ( + self.parametername is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamNameList'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('parametername=[\n') + level += 1 + for parametername in self.parametername: + showIndent(outfile, level) + outfile.write('model_.parametername(\n') + parametername.exportLiteral(outfile, level, name_='parametername') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'parametername': + obj_ = docParamName.factory() + obj_.build(child_) + self.parametername.append(obj_) +# end class docParamNameList + + +class docParamName(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): + self.direction = direction + if mixedclass_ is None: + self.mixedclass_ = MixedContainer + else: + self.mixedclass_ = mixedclass_ + if content_ is None: + self.content_ = [] + else: + self.content_ = content_ + def factory(*args_, **kwargs_): + if docParamName.subclass: + return docParamName.subclass(*args_, **kwargs_) + else: + return docParamName(*args_, **kwargs_) + factory = staticmethod(factory) + def get_ref(self): return self.ref + def set_ref(self, ref): self.ref = ref + def get_direction(self): return self.direction + def set_direction(self, direction): self.direction = direction + def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docParamName') + outfile.write('>') + self.exportChildren(outfile, level + 1, namespace_, name_) + outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): + if self.direction is not None: + outfile.write(' direction=%s' % (quote_attrib(self.direction), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): + for item_ in self.content_: + item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): + if ( + self.ref is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docParamName'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.direction is not None: + showIndent(outfile, level) + outfile.write('direction = "%s",\n' % (self.direction,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('content_ = [\n') + for item_ in self.content_: + item_.exportLiteral(outfile, level, name_) + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('direction'): + self.direction = attrs.get('direction').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'ref': + childobj_ = docRefTextType.factory() + childobj_.build(child_) + obj_ = self.mixedclass_(MixedContainer.CategoryComplex, + MixedContainer.TypeNone, 'ref', childobj_) + self.content_.append(obj_) + elif child_.nodeType == Node.TEXT_NODE: + obj_ = self.mixedclass_(MixedContainer.CategoryText, + MixedContainer.TypeNone, '', child_.nodeValue) + self.content_.append(obj_) +# end class docParamName + + +class docXRefSectType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id=None, xreftitle=None, xrefdescription=None): + self.id = id + if xreftitle is None: + self.xreftitle = [] + else: + self.xreftitle = xreftitle + self.xrefdescription = xrefdescription + def factory(*args_, **kwargs_): + if docXRefSectType.subclass: + return docXRefSectType.subclass(*args_, **kwargs_) + else: + return docXRefSectType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_xreftitle(self): return self.xreftitle + def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle + def add_xreftitle(self, value): self.xreftitle.append(value) + def insert_xreftitle(self, index, value): self.xreftitle[index] = value + def get_xrefdescription(self): return self.xrefdescription + def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription + def get_id(self): return self.id + def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): + if self.id is not None: + outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): + for xreftitle_ in self.xreftitle: + showIndent(outfile, level) + outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) + if self.xrefdescription: + self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) + def hasContent_(self): + if ( + self.xreftitle is not None or + self.xrefdescription is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docXRefSectType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.id is not None: + showIndent(outfile, level) + outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('xreftitle=[\n') + level += 1 + for xreftitle in self.xreftitle: + showIndent(outfile, level) + outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.xrefdescription: + showIndent(outfile, level) + outfile.write('xrefdescription=model_.descriptionType(\n') + self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('id'): + self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'xreftitle': + xreftitle_ = '' + for text__content_ in child_.childNodes: + xreftitle_ += text__content_.nodeValue + self.xreftitle.append(xreftitle_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'xrefdescription': + obj_ = descriptionType.factory() + obj_.build(child_) + self.set_xrefdescription(obj_) +# end class docXRefSectType + + +class docCopyType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, link=None, para=None, sect1=None, internal=None): + self.link = link + if para is None: + self.para = [] + else: + self.para = para + if sect1 is None: + self.sect1 = [] + else: + self.sect1 = sect1 + self.internal = internal + def factory(*args_, **kwargs_): + if docCopyType.subclass: + return docCopyType.subclass(*args_, **kwargs_) + else: + return docCopyType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_para(self): return self.para + def set_para(self, para): self.para = para + def add_para(self, value): self.para.append(value) + def insert_para(self, index, value): self.para[index] = value + def get_sect1(self): return self.sect1 + def set_sect1(self, sect1): self.sect1 = sect1 + def add_sect1(self, value): self.sect1.append(value) + def insert_sect1(self, index, value): self.sect1[index] = value + def get_internal(self): return self.internal + def set_internal(self, internal): self.internal = internal + def get_link(self): return self.link + def set_link(self, link): self.link = link + def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCopyType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): + if self.link is not None: + outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): + for para_ in self.para: + para_.export(outfile, level, namespace_, name_='para') + for sect1_ in self.sect1: + sect1_.export(outfile, level, namespace_, name_='sect1') + if self.internal: + self.internal.export(outfile, level, namespace_, name_='internal') + def hasContent_(self): + if ( + self.para is not None or + self.sect1 is not None or + self.internal is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCopyType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.link is not None: + showIndent(outfile, level) + outfile.write('link = %s,\n' % (self.link,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('para=[\n') + level += 1 + for para in self.para: + showIndent(outfile, level) + outfile.write('model_.para(\n') + para.exportLiteral(outfile, level, name_='para') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + showIndent(outfile, level) + outfile.write('sect1=[\n') + level += 1 + for sect1 in self.sect1: + showIndent(outfile, level) + outfile.write('model_.sect1(\n') + sect1.exportLiteral(outfile, level, name_='sect1') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + if self.internal: + showIndent(outfile, level) + outfile.write('internal=model_.docInternalType(\n') + self.internal.exportLiteral(outfile, level, name_='internal') + showIndent(outfile, level) + outfile.write('),\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('link'): + self.link = attrs.get('link').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'para': + obj_ = docParaType.factory() + obj_.build(child_) + self.para.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'sect1': + obj_ = docSect1Type.factory() + obj_.build(child_) + self.sect1.append(obj_) + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'internal': + obj_ = docInternalType.factory() + obj_.build(child_) + self.set_internal(obj_) +# end class docCopyType + + +class docCharType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, char=None, valueOf_=''): + self.char = char + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docCharType.subclass: + return docCharType.subclass(*args_, **kwargs_) + else: + return docCharType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_char(self): return self.char + def set_char(self, char): self.char = char + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docCharType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): + if self.char is not None: + outfile.write(' char=%s' % (quote_attrib(self.char), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docCharType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.char is not None: + showIndent(outfile, level) + outfile.write('char = "%s",\n' % (self.char,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('char'): + self.char = attrs.get('char').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docCharType + + +class docEmptyType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, valueOf_=''): + self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): + if docEmptyType.subclass: + return docEmptyType.subclass(*args_, **kwargs_) + else: + return docEmptyType(*args_, **kwargs_) + factory = staticmethod(factory) + def getValueOf_(self): return self.valueOf_ + def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): + if self.valueOf_.find('![CDATA')>-1: + value=quote_xml('%s' % self.valueOf_) + value=value.replace('![CDATA','') + outfile.write(value) + else: + outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): + if ( + self.valueOf_ is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='docEmptyType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + pass + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + self.valueOf_ = '' + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + pass + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.TEXT_NODE: + self.valueOf_ += child_.nodeValue + elif child_.nodeType == Node.CDATA_SECTION_NODE: + self.valueOf_ += '![CDATA['+child_.nodeValue+']]' +# end class docEmptyType + + +USAGE_TEXT = """ +Usage: python .py [ -s ] +Options: + -s Use the SAX parser, not the minidom parser. +""" + +def usage(): + print USAGE_TEXT + sys.exit(1) + + +def parse(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygen", + namespacedef_='') + return rootObj + + +def parseString(inString): + doc = minidom.parseString(inString) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygen", + namespacedef_='') + return rootObj + + +def parseLiteral(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('from compound import *\n\n') + sys.stdout.write('rootObj = doxygen(\n') + rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") + sys.stdout.write(')\n') + return rootObj + + +def main(): + args = sys.argv[1:] + if len(args) == 1: + parse(args[0]) + else: + usage() + + +if __name__ == '__main__': + main() + #import pdb + #pdb.run('main()') + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.pyc b/gr-adsbtx/docs/doxygen/doxyxml/generated/compoundsuper.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb8b73e346795915de34504837253dc63f33a5a7 GIT binary patch literal 479090 zcmeFa34mP1buL^z`;7K|L7&JaeH8+kZfZc zI|S!B#If@nJ8@#1cu(x)IeAW;{kQWRui4(qAM+=_ll;lc_WyBS^5XydzPeTS_A)cw z5`9||65N`q>T~O!I#qS5&Z$$UTK{QA&+q(uX6KNJ{V+Nt4+<4R3Xr{V1r@oHXWyF^>XlG-DnKyUgzCChTVR?inWRQDmkGrztYa zgwqw7ZNeEUU?!6)V2%lAne1G%A1KfEx${go$7H*eJJ;s|#d$7wzRz7?!UZmOq0e1t z!bL83vCmy(!X++usn1<(!V6sPGM~G|gcrKpi+t`<6E1hTy*~E>6RvQ%D}C-V6RvW( z7yH}`O}N_SuJO4SnQ*Pk?en?IO}NhGuJ^gUCcMPuZt%G)On9lwz0BvXG~q^~c5z-1R2B#pT}Wb1yOBZ7z3<&)s0ctuA+)&%M-y+glT<#M-_a+k#y4-y}casT+ zTyE%dZ#H4pnn@xDYJau503TTOV_bh$s_b9bBYKfBxy`P@Aw{7IMl zVW0b$2|wa;f6C|XHQ`UY+>iR)$4&S%F861B?tlqD=5k;0xlfqz<1Y8-eD0tLf8Olg zXTnb~Yxj@|KdDG)!cQrZHQ}cf*>A$nD1s4l(1f2ghg(ekTb;Q+F-)H@6nk<67(=9vduP zcq(;>dJVf-Gg>0iox-uB`-Zd2Po)xt<~Bs4JMF{h$_|dMh-=ZuUR~@$FfhPTIxwJN zJ1~IJcXSxZfWHOZ3^=@lX$;u5wo-`LSmOq@Sl(iMr$7WcGSge^e`2PCg`r$-h+~(1 zgpWoxkvqPEoD75eM8<5}JI_4bV%}uTA+ShgWsb}+yTC2o$R`);#Xm6um7O0PJ&;xG z$$YMuwakw68arCtpBuJ64QG#Khl~7hE<7I7q;cd>`xFk2J$ct?m>n%{#+SvKO=ezZ zh^*NWLxgw~8KeS2)EZON0>;N7)qw9Av+!xLzeAWS@P8{I;v7sA?WH*#6dOIvjz0E{ zh-Vg1zAjJUEn%8zncoqnIzUj)f~yAve4Jk)vb9>;UGTE&MKfzsW4f^knAhKTmabAyNG& z5ork3A*7>2AMP;u_fk}k;%}q0jyRjKivl~Eh%I~MgB05F0~4b?5meOXJNn4oTwQ;~1I9SSnvzQt( zSjHqLgj8CP?PXhVu!u#$zPphwu)fJgoSvDUS<*72rKidudLKYBSfY`kH3N7GMhw?{ zTSa+fV)Xb-NONr>{0c&Q13!5^wlZ!vLabcLCS1AFjxx6M{@iGI2DBGe zbO*;q$-@fs5tvK|hTVcpSIe{({$;Gl>FvJU@ocztY_vF-!)kemp!`Q-XvDsZh+iIa zVQiPgEm{mix29z8T42?t$8FXz>k9BGB5A~Q;m_a?( zqa3KmtilXRvFXa0ONH4O@B)|S(J!iLC^pXNBY za%6`?O!6U=SPe%Ix&^xD03w>=29Ru34wC4G_(|)3w{mB>n(~JrR~#72KUL%i83Oq* zp9~bA8qaD;&*gw-zhi^LN3(tWs$GMvW^4_EwG4;>pGL0(qE!mSrQ*QAaePm`+hn@+ zpO`QS$VZeRy-;@yBv*0%6cBrfFse){h;z+^mBiMF+)pFvR`YB}K2u>$a$DZ1c2E*o z4W~Ir7g4vJgMH&I$Z^;^dQXMDx`?Px>{Fq6mqq+x_T6Vs0cCdWl)DK#ENI=2ad#qt zF-hhgb!_zLsAFRi#+=VtXtm}Hjrv1voM1Aewax)8ZcqC0&a2sk{=74Lyco>k?`&ByWcSmK+{ic<%H%KE2_TUP9ShpnI%=t5t% ztR-%N>!TL13u`n20WkR^Y%}{?%|bQGR}$}auu%$+C2~5|OwYNJ#LXuv2~c8g#Gp$h zeKe8Nt&;FH{0ippI!0BTfVnRTJ4NVCZJFpjTZ;+mf)xsRE2+|7AdlO$y0Ce5VbezZ ziPK?OdVg8^fYN=ED>bR8e{o&26Wj)ieQ0rP?_#g8U5vo|^)9O(DZ2C)E8nW?@b}xP zc<{e&%MRs628ShP@Yih;>HFBSc48~-fT?u3%kl9S@I>pIxtTQ*DdNy~J`NdmzJEg` zZZJFi?=cWTxeRFG+Fz{)=}pT5aErLrG4|2aoo0Vq)E|E?QIvX7{(Yxf)L_*YZ67Yh z!Hous%OS}zz#P4F5wWjU70c5S{f)h(blKOpI^599hl^PouG`R4pqMckV2_qvn$gzv zXgwXXx!^Lq94ufU{-*xvyJ4^qKSls|bU>$??3@%}?fx)E=$b171hk#Xx2cKe%Q>SP;H z79J5PS{Q*G(xDuQgUo?6VkM0(iVB<(gEj<2*m&09g#k)`*w_Nr$X8-jyPSj6}O(LCGG%Ja$ zMz13@917W@6sn)V_X_-I%>s(hY|3I;2!GNeTpzL7ITFNHlJ#!z_!a*pv3(!@l&=7Z z=pId-oyg#%6ORMYS-7T4arRrR^<%6*?Xj+p?6=~*1lg5DR%>%;GcnXnh@BxHgCE z9_!CKtSxW#)1jTGZo{v^76$A-!8Qil8QhK_W`Ygw;sJI1AH!4NMBR~@+h$cB3^fsL zvm2zGjw>gk-^V-AL^M}(4nj;g&`RNdV@^WZg=r@fO*`36la+#!+Az#ln1ZAVV?^38 zMx+p9L^?4j#HhZ+G=<3x`Z3L2REwcxD9UUS$}uF9)?|1K`Y|ZnWKhP^pfR)hF(793 zW6+q*77oyu%~3=evw5gel|3xR5+H+7jl^7C^on)Z>Wv z1a~sH3qjn-4W8+K1s{`EmNT7Nk?6MKxu3sq#e^LN>|W)UL=iADRQElopWSMR5@i(* z?%2eOPfyuBF{pGiaJ&~_-St9J&x>o8;tD6;B1`J1jhR>8lpJb2F|6PZ^7V;XNLmUF`;Y?HbI0BC95{Vw$L`XxCvCc_^P!yfAU`7<}ZtrX24P=wyV&c-5JrRxtF8;x}Euw_o~r=zvxidRG_3U}RCCDoVcI9Ld^*rq%~~BD+`ByK!@m zRRnrjyEq#_R<6EHyLLy5*cT=(4-OV0b@YIAjTUI@!Q60|&yLE1sZ5*8 z(Yn#+f%~Vz{3PZ69zI{7uE{_QUz%Bhok#w~)8hAc+VHD-!U{y#dXgZa>$A-b!o%2F z?IHTJTU#AmZ`93hUR|*BnG_c+MUz@pb?d|Yl|<>o)9CRK?7%m^g~x3oaKh!o0iY^V zC%|f==k`Srbl$j!qa#Djg&sZ{X&_tiLcc0t=WGpfvQ?4$45;!$z~wu5;t8IU7ayXk zN^^7ERvHa@xZC9C8CW^&fG*5#^b)w=3goPrcIc#gVNcnaKWew?ZXX;jSmVLecDI6^ zK;Vz@*onMlR!du~G56h=b)F5ne9?9l2JHzHEf zE3Rd)u#|C%UhsU&7O-s}@dP(8EL$KajkrSZ%~m_wV|x9ROq9|=QWo`p zzUOf82K=hFKDOVx{l4w}I|usj-?m+nQrR#M?gM0SkO2kq1e@H?yaNa{Cy(trluG9b zhVaGzg~xAJ(}HbAroU`o$Fldki5>@;EajyJuo^DbO7({cW-&Jei=TsIVbG6URWo>i zu?HDk!GLSGfU6lzu3~8cd3He7VyVLx7IJZaQmldc6RJ3V8Bbvi0=t1TtF^m*I+P0b zzq!+9wIHW+dV6VP?AvpxcJ(Fr|<$s3dRD@Q9L4L8A5P29Bz3H|A*3T z>FFxnP6A2UqA~K?nWIJOHXOXP>NcFUoGZQCVjkP%w{W6zz9Kk8xj+$|n_Q>}jEWa2 zg42a$a)^hWDd?5u2EzIk7F|DQiBs6jNky_Iz@11aJ?cpCU}V=I2pJ> zkt=xwlfYM*@G?bkyHSx_dCZbuzs-c36xqTfmdx2|!p(|o9*_w8rRoh_&i&Hu?r_Nml*Bo+2lXP3mf@{Hs$Lu@+o4RHA={}4hSfug;H22Y zir{eAE=Bfm$CI|7LmZxb{1oy3Vg0~W-Bd=8P?Hb_%MoayAy##n{B{FY&CCun3c~{# zL?HeqsG}{weh1Jg02oC97$MZ)7U1m;pi2O-$^`&IsK+h9&pH4cti~tsKL8NIMjH`? z(-c8C-M#WB?v?3!1xDL|L^#6%{JR5~Apq!L0Dur0b^Fl^Gf}-}3Sg!H5W+Ck0&I2w zvji|p00^Oew*UhUV735e3jiV1@)qD32QWtfa|D2Jt|ADbuD6h{ILLW|oF@Q;^A$m; zQwReH7b*wgB1I4`Rs^B!(gqMNRSx(~tjcF^&~$qkhRTYJ=f{dV7LW@ypSkIqDjqGg zin-!&R+yJ^q6C7o5jKPx(XyqF`#o!GL-pBwQ-vl(F4c^S)EexJv@GOJp z7+_^+f|Cs1%HVAb-p=582Jb+i6^VidE=$(oLml8$g(ncP1`KvNf|XiJH&$!qA%CsR zt;}&yZh6DMTFa0@Vg+-$TSis&}$%=vlIA!8@CQRW0KH4TM;jCTi>r264MB)CdQMnpiniC-*BoEc~V`--{X<5mE2^NDU*91K1p(y z&7Jpp+#S*RcWT0`1_ZU;T}h-GPIy*>gfFYceG^w>KHk8*b4WUBbR?=565U93Ao@}# z4UUgzN5e#q;_yv_b!uiG48!t@_4|r(Zg7vr2Lx3*!UMj^Om zZ7(sFT9uaIYte|Qtkn)*D@1AZo^4jTcvtfJyn91^hs6c;~w?;sILmd)K?N8XgKv*3rqcEE#Nw#PJ3ADBvWpH zcy(kI%TIWu>m$8Tk4dj&8l;a`M;8CViSe&xGLTldS(8D$IKfk zIc@^wL^rip;M`IH9(ReP2j0cmcmz&PfVl&``)9t}#SF&bC_%ZQ>1WGAjbEyIC` zH%*#mW$fB0stha=PkCviCTP}pn2nnFrBPIw1HLkd)HEQtPnGh3h{Xk9t-ZyA9_{+( z2O%Gi2qjUUhEL70xZr9)gJ153RM}9eWk_hZuvs%iEH1d(z*6s&$AlW8S!1F^TyV8H z>}!KaO@o5_wkZ#ac+Cp5{SN!EN4>s*p$fzULrL~d>BAehp)4B(KUMfzj7(gMS_X%9 z(VI0m*e-s)@HN791gAVe)ELbgAPZ2PHl9HtqZBE8L*7c1K z;U13 zN|sWp%`smaHy-%6wlTq~#9Q0JORQOHdvxVXDb?mlUmHYf8Whj64g}Udqt+jdUW9Ot zqrhEZe0bbrU*FhJ9pbT}B)-w`vB8RPHC0iOr+h^asi_&BBf11muNfR~f)#6I?fC|e zc7600@-h9DL{%D2f5)5R8oY7h8q~s@h)nExsN^(mkrj1Qt)Uf=;;+7U7d@gfLrSWB!cC-?ah%PTdZw1wpMjjgm+;yd78- zwnLSwJnO6Cnt^{OYYN(;r`!~7Jd~UDKj)FJuN_o{xE+*4ts34AZakFN19q5oG9{h` zNVyScVaD-Ld5q$uN58&iP$lAKP%@2XU`;q2Dr@pqUlT-XVoW@bk#a*gC1B~nmA84+ z>uUs6AZ`RD(`W?Nf{jpGi?{n))Y<^gr{4f>lAL)g9OnN2LAidkTcKE-; z<6kS$b@qtB=?MyM4k$m4^-hm`eZ*H4V&W^A2Jzh-P+pIB`Fhma2;=GZ{4RWwa{!Mk zzTgqAkMKe}CcKhq5T3Y`;Sl$Cd)#X!_L1fhn@2+5;}Ncp*g`udwvuTQo4Egw$Gui! z=hG)PPwBB|=fW2~y7iG-NXMjBG7VA_bN1_8_`?o!>ymF6Kb(s$`K+HJ)~;@EeAe;D zPP{7bAJv@f1rPJrfWa;VeRvuz7sw~cOKiNk{2IKUOyEtAO)b&vV~9Z1$6+TNbfTBF zjTHWQZ(2sT7w9KQ6);<|&JHa&T!2{c#~pZ!1DE5dGI03QwctPPz~QF`MR&NDaIGG` z+DTtDh=eGfIhjN;D8OFEYP~A`Gv}y zU!rXTvV;u_TzGxQBH#y_T#_qF`Z+9L8_jLf@fOb|LK9}T44Cwm{tKi z%K|TJaWt4`f#G{&dIj)o3w)Oco^OHShhs(s@Ei+#%mXj5!0^dYNwm2Z_#-W%?n0CQ zyB3mfVMVF)1c*KvQEibe6n;D^DLmg6`h8pIVq8_&%0d@c6uQ6_%Ac2*{9(Z@seoIU zgjDjk+(jmT zmEbO_fV;rKiOVhn6S2Ak?m4>==V58R%pUk3hWU9yiFZt@@Uewe>6M zmkicZNvTVgfyh%yxlb1Bsm~qZ8gOn{=%NLuSKP0#^McP6dz-O$y5fMH?b?7`!RTji zs*`wyh6;&Sv`1^x@xDkp&fXpZ$$pAc;S-2(Dx8to9EGzp7s6p+{B&oQ!)eCylDzWY znRalMe~0iP>F~HH*Yrx0WDYsEY$`k|rjBKk);UoUj!2V9w`hufb&H}XX&o|kn<#1B zFZraKZKGeUYbF0H?jJ~g2)7U) zKPCrozGgqde9MjG{BYH^uf5_8mmHP~O>V&FKV0=KyIh7J`}i#ki5((mzdW{3=J5`T zl|#WN&gn*_a)el2fXiJ9LqRO}(7MB3+UDh+=C;%xAoMS6DNcWug)bEgx8V@G5f9;b zS#=$p-DBi)b2`4&57HteRZDR=&W7wFQ3esot86V?4*rg+FIxY-7Q|oniOT{Yf1aRQ z+P=Wv?kx3Ca@_Dn% ze&>38r&_rXPvDcK+Aokt@Jj`j6uE$9d)3k)oA4b7V-TD~f07AO$mCe=g>WQl)Q=^E zmTFk`SS%r*R%sl_E_?@4U=m9RDU)N_3VGlZv4l)h4NFR-pe6JfRj`C;!*?KlCb5JZ zGdY$Ys|`!`=IJK?hSqAIh5*D*3#8=h5O^m2dL;ADF!^s(Ef|7OR50YeWVImqOjU3- zL?H-Oqw7pd*RHB`g;2zIAgL!2fCw}x0!K1!r`-Awo2p?s%VODG70YGzo$gXBFPsX? zpE-3bFRG3u)eGpOJyo$>Zr_0}AxX>Lsj$4S{gi3BqB@q;YXHk>Rk2)Y-+>Y%iRG%P zu>7Y}$MWLpSW=+^ET>n+a4sDztLA8+O6c*FQ4M;5mR1d@&~IHe1$J~QE9beSWWQFm;H!MW zP@N?)zPh%8p;24fQ;ihY_=2HpOBQ@>Z3ROGSEyR>b-rLI=aL0qUt7V@*?pyI!8iDV zq0LJcd}D0|L&>*#S~XJKjJ)Qob%wcSpm3g8OuHt|9}rYNAPtB%I$^ScM@)%qc5D`D;p40F>Rgrt&@Cf6@qd@ zO!rE%579cbuo^+7xnMQ$x4E(*@YcNv0m zt050x=A=4$PgYGA;=RQEJ*>A?%6*%$SdUH}j8&$mpq}faj-Z;lsVW8>aT9yYX1_^A zOZ6B&lz}$6KpPS6)31X1*BCPiQVXh|{xYxCDV zQGeKP6`--kvmKd1jsZzk{wpdKu@aIs6=JfTzeZv@n~ufMf%4IP&eVOj;@OI)@FG%{ z0P#Ig4<_z`!i^*?yaDR?MFMMuyfcq#M;c;4zt{$yWUD!H88nAdH4n6#(VIHT3w&7x zwi;e6^)iiOu=qTIi>0h71}q`0vu6r)uuO>5q5s@oUd(q@Oq+`NE^k_zxEP4EqQ=DWa66~+9xikYKge!Q&+o018~KkP_ig#IB@UbXM4q`4~T zyKTkgla$o!5qhPfn04)>&^#5hu6@#CR`_CuDvB9bG4oZ-c>AOSZKW?}Sw%6QS1}7z z%;zUBW|c4ITNP~kKUB;@74skM#fvAQ&1zrFdn$_A-a*D%nKdGzDbH%=Zlf)!;U_x68fzazzH{DNR!6ee?32m^@Y*Bg~4IQo=@O)HY-lI1J)%^TxWMq;e1&p^QLMN!Df`Cn=6b1UpTL?;tN`j~54UE^f4#9W4$7 zBYYcA!`G%m_{Mj*ocbQWmYIv|gE}Bc&B0&Bc_HL%ADdTWEPf-7)%tEVxN4=udG*Ko z`zTKaJh>gPuf$he23Rwj_+raojLCO0co%~g7_jpP?_uym41Sowdl~!)ga5?feGGn- z!OIMOjKTXE{5XRTF!&&YpJ4Ex8GML=hQfy#<9G;uios7a_$Y&)VeqpIKE~h`1|Mhe za}0i-!6z7elEJ4Ke44>$7<`t&=NS9~gI{Fuc?Mr#@JkGSnZbWy@I?k3NWrf%_^%AU z#NgK${5k`Tn%`hdL+7^`)A;!v#yE(A-($d$6MTiiR~h_11CET~YYhH?!PgmlgTXf$ ze2c*q40bShh`~+>M34Cio><6?IsVn%li zS{Dxb)m`}lzG!)a&N|pz=n~!>{sBM79j}3XZXNMxMKeL51(W7|ZfZi}hwRh6t&Lrx zskgQ5itAR_ZEb1*j76)1NTVrqKO#@7+0L++h~xWJ`Whz}^tCrtbqgZI)w|k>eOet! z*z?4i#s2Y$v7b-up|ZgLs6+Mps)%*dyyf)ZUOW_98(F~+Wx zN_S!}iTpj9TZa%l5ZK=iP9W z#g?Y?Q1+6uF5qjAq2aW{>H^Fy?J+;r?p!)aILOAEi%Ly$l9235jmWtdGm)EX_*Oz! z%~HyaIHb`bwdfjVHOQSzMhE5C_bAB;?JE$8jt&_pO#PuVv5BaYp)a9d@*G)@*`+$7 zL`b~>Ld)#L`@}$4UP4Z)Qg6wH(Kt_q23$Ojk_K!d1Nmo&jmC#)iwYMYV)-Mg$77)L zXj8kR*ji$R&-YbdD_}gzWRvXTkFY*kKD5kwUE8`JCn3m?Ds0^+9hquMP1bP_Wp_@c zLsII&R)imvM25JqJ=Gm43s#>HoWW&F(!jdKT6R@8u-0CY46L#1VP^q*ym$h$P;11k zZ4yi_EoLIq<%;>B8%Wz>4bucVXg7sf%))`<;epHSFR)GUOv78(fD#7wG1eM?0TR|s zU+jcZVpTTki%F%$9s?S0VT;k)hqFPoedzQ1rjv`>2SK_QNJK97s_dIiI$-0~brvtD z(&*GiyhMDXxs&~rL)Fh3{YY?igu#liMf&5r6C09u^LRh@9ZzVuW8rlOyNL1rp|#Vu z<8w4xJh1G3JEwLbTBh5TveAwtr=h)%P^7G#m|jFIaja!Oe>lKkgg@Z1K?8-`A$;RU zNHyEaPV3m;w-7(ntfTu!0{;_(e`fIiGT_qU4AmlW<$FIXy#^7hMS_l|eL9Q-?0?YQ zw9IPl?nG`&_Y7!mT7rK8D2b(iT#s$RQ&@>nfF4fG%~EM@5Q1MXt@z)VlO3n4$T^7* zL4VUJjSDn4(C8osIveP65P{AHS{y{6u(4VkC~Tm@K@Jo)R)qtF4d4NZ5Sp7-vm2Tl zn*;q#i`k81k2X11NgR8erxI}B5pT_xLTIrG7btUy2^T7Y`YuxB0uwG)WSI$e2&T$gbxp+kcNxR*TB;+9EAc=8^2$GO@5Yfprw{qt!Tx>&dESE3J zNrrAY#!f~q)KH)N~kTBWHDmzjpOV>p4uiM{|S#gY3ZJ=h!-Xx zw|cz@&$VtUa}hb}Cq1I<@9q(zlTume65I}EC6Q`0m4n;xjZb-;9bx%*GUAfgq}-Rg z@JXsp$tFMT5q1RT->Hc#v|}PGi5DA9WVl&EKaFtzjK{rJVw0z*OKk0`ebysdACZM_ zOk^cfAhI?AKj(3-mAITM(m4$aqWhGOfvggV|{ue#+wUV2wl9b6k z7oQ~KC|yT=-lJY0-Bp2@?nB>H|fq#E(>5wVNP(5j5jRL_A#()r8D|36Y#K(QymCw4#xhYL_)Bpnb^zgOXa zkn?8bfRNLEg#$wB0DHxWybnS$LJ_sMbfgeKgOz%MK(f*!jLl)Nf&nKgD?0JB31?M$ zjf=?gV=oUgFF??Tr_qZDsUm)ZU1m1F29x#P=((Qe>%@-_SHtQsUqGa!!<;293DLcs z=Siw?u;nKKAaZbKH3KN&Fip`OEB3O7SSeVo9jklQ#uxF|Z!`D~11-9n z-_YDZ`YIdRiqWyNEiYVS#kL)<^|!K5S4rEK4MXChlnaQ&DM#9Uu&o}?~k z^Qm$8J^1*avj;3jq|!xG!cmj!6D z^-Iw%Hf#^Klf#nZDrMD?F?CnrN6UW8xNKAD{1~wMJJMN92b*iF@SNXHn|eF#pGY*j z(ZaKz)sG>52-LOPR4E@rj6Nzmr2GdQ4oXI+6>$VF+V~Qau?(S%18~KiII{SCCd)u# z@h^*CQ(63RbMRkDeYf~?=N7Ag?{qhUSd! zc5G;Ns~3>u`p2$A3Kf0{0Y)V^G-qo=6QTS^f*D1o=q=jU>oD0)lhstW8!MGoWy0@k zn8;W2KIWvR z=-6kR1MH>Bsc(-)3g%-xdv*W*fy7=-JGwzC?1?*n3)uC^8cjyc!YG>fbocu-*zZO; zwqExj7Tn7uo`%cZ_u(6hST8b~I`{iTd^CVvPo{ycPM$&4sZ)692^SJ`B3@r_3WPbE z&#r{i>P12yI!=fFMGk#C)Y$~?*^2l%qt|2)KbQ2H%h_&I=ruVm)QEsRL+zN?vlMU}c5z$B@Dpmq`8E{9dtV>$cl=-X@nOLO8moWmEKn^7ihv zj=}bBj_ez`LRg710*8!{LFCG0KOG-?atZmA8z zOm0n0R@QuL>Z@5V%_y&PYwDXwh%PkWS!PXrEAft41EQr0I?nXf)GIO+gKF04yF!j) z7Gm87y5*Z!cuC!o@c*T0m|ts}e(EuNd<&~iEVyZ+j<}R?YUx9h}EH8JxjFKG(H#S<#hXA7KzBKvXHOLNETTx5R> z31L^RWfm%&A;si5;=*|HXRk|~5#(yzx+VztgMMYuz>c{G#h)bJ5o;iVe7L9)#>9ea z7b~omc*MX`=WG|*-^Qx5zUM{uQ}Wq$5R`idX=wQxEg#MTk^Mzt9CdhhbUy3^>hNcX z$R2coX~dFx(2ZnMcw&!(`DwFX4hGf$J8!~Z_7`yce7jW=g$4@5@598!8 zauO$pv0J|gB(S^nqzWI7Hw~~sL^z$`v@ar3g_ax*hPD%X6>uYW6%YRs47N$~@NMHb zDI5XpbbjVwG6Mj6f>^&g>6#xdH)WD#@5T5*o2nzZF?hzN6 z^u(~xjftxy3TQZSzkzRj#^YQoadkj3UE;FQKkE^7gyr9@)ta!^$}O-#>7=J4dQacLc&RrK$DI$^|YfJi)f$1F~vR} zok$!?nZx7?2IS?{jv^-ZuVXdJ!vs!Em%?84t8pX7rx0}lk(9uXwh_-OTtSIO>a{qy ze=}~2bIL~QtE?Bh6AlHwu8#zMlY}6judtD7MV`3jM-eIEme*E2a3^sTEY%qx;VcZ_ zl65?J>~Ph$tQikn^kyO#z4k3Dz~ZeBSA7dJ9*V|X8NU^BUPEQTD!Hz$_i&$`#VA5p z1HO1$&2!76$+8W3#-J5th<+s7J+j(kiZ} z)GE&2;D)n{77Vsk7~;sCBCal}ZaeAMJnZW_lEU3l765)}4ZoIV~`p8~)t@Aa$G z$MZ8*dV7Siiy6eKbp0BGXvD?80=D6DI2G-!z@RXbUqiQPjFNpiht}P!4{08xe{gRG z>nIVTJ|%mTRF(6HE?dM~g8RNYsUEnQs>=BuXEb(AN_Hvx2%MghojiVlN0dZ$kEZ)c zi7Rx~e^CH%Mp3EEfK*j3bT|hbN<-|)?h)e8A!7phK@6+Os!EOjvshKRh~%GIRY|^v zNhDTP+6zCS?6vZ?){GVa$6&a4)R}WE?Xae+;`0piJS1eQ2CH~Bvm3S?@m2_y%CL*c z6%4$Sl`}+9co9j+3elO(c?BU_hf-5dbMb5h5)Q4MV-D|YKCpT5N;8We zQkf&D7~2|m7|hcgCG0)OJg*SL%G;Wyk|Ir2h~3Ucs;&@QNZe{wh*3W zcE!Y9Hw$`_$}cFrS|g=bo0MLy!C&H)Aw2rWHNqA=g}*|OwOX$EvRFk3#SQE}@jsML zHUdRg`{^mVD$P|OD>eR%*10U!4w& zMAua{yz_o|Vr>}V?5LW-ZmHEolT>04#p8>*)W#^q*fX1|6nlcTB-QKmX2s@7*ErJO zlb|lNpLi4PI<;fMJDygRWHFjs8)t&*?@18y2R!m*X715gq@kBxn!BOHpY3WuF}#}2 zLV$Q3^oWvV?h&Gss>zzEgUorH9bx%*GU7H#2MOKJDRtpy>L3q0ob8-S#^D}O_&H=u zAV{4FI!LkGvsec?O!7~ygM0&8p+vr~(_HeHWEf~9t+5Yul$!rx8bp<4hru1nh(KQ% zGNzSs-EU*!XnY(o1ye1RvmA)qP zcsv-+DNW`xte2K#uX9c2+v9;&cDB3~gHQ9#I3gvSu4YXp-?9=;J*JKlY54Vi?UswZ zc@J&k%q1#OR3ORSyh_xr=O~R?VTUGyO*jPqLOgld9E4nM9HW zCKCA+G*0f5x4aLMI&Q&m-p24%(EiM{G5jzas=7**LPO0e)tQhugINsDGUck|BNAaf zs%tF^7f(wnS0k&!_DI3nDFthL@I8PJ;n6=XUAEvkuV4ic#P?ECuqMulNtn7A-%nT? zvR&+Q!`o$BUmNM08-6+-XpPgYvQsuyxB5laQVaR|3=Y{m>FQQ>bI7j~U3|J;m8#I? z93OWs&8i9-IJJHQpZv1NnMywQXwaQmn$+L$h>}w75u#_TCiOQx&W^DBI~j4Cq)GiP zkEkOo|4vQZW@=J@+u>{{PqGR32=V8TaRzHr#ay{4I!h0p{|?E2mS|GDK;~XvbvUIW zRTI1B{yBKZ8iAgD^jiU#~i6OS^!nAK{!bF&ydE`w>1g(p^{D?CcciB2 zQ8mn}=~3e!xVpB&UKX)b<|&$`{wt~D)(hwLsIRgW&d9Uc|G|cO4fLpAXALRlpKW^7 z?*ZpekO=kZQRSQy8;Coz$4gIUQ$vPl@jzzQ}u8lvIkO%;uo&5y#$ zwVD!^z?OANE}H|5N-&oJbrA_=Ex;OWlc>vGW?+4sf``@FtX(q?e=lJ@B@5K2i`7I3 z-57cSiDsnU&t&RC_WpDg-R?@@BEadal&gL!Mk+s2;jfs?38hT zGV;A^g)Z4CCtWMh$3lgjGS^}gx>hJm!>_&3G(p8XsnY9s7&5eMvp!q3l#A8w6j93@ zFa&;~XLVVS4Y2NGFpq&({HC(N*nv-87B}>)%=I`H-Fx08L);AM>ff)4+v1$kzu(My z;laY{_3g8D8wo+KFtJO9kK)G?j@n>DW`66me72GuCT*Ri?}^xDY!N*RP>yFJcXeSy zk9FJ{v&>kTAM;3AaF+2%8jq&J9(g_3U2HM(XKqctz8$yjASur4^-oK$|1fE2msOML z^`An2du{EbqtfJoG=J0;u4H6JjWgYm88zFu-=4dQN1_H{Y)h?S8~++hV_CeM%IM@q zz}}dg=N-46>5f}*p~Xa-0d+ZUrAEp6shG#udp^vlp-7J~Vsr8tz-Oo-Aro0gn|nXO_HL%yUV2uYi+_HE~erTx)7b$Xqfs>0Rpovan zl0alm(kk7G2pA!jJap~q{aaX{7D#$W7gsqy7Nw3Z!q|@m%MEzw3|{+iJU%-ZX7f_Y zh1tSTJ~v*>jg3n0uLJ)h*^zzOe3;#z5c>FXu!voDF@q%tQsJBH@Lev({WRnZuhpqY zt5!Vpf_oZwy0eouRn7lK){sq8r#lNaNXlx_9w14zsO?105^>^Pr@~)|$J4NqEJl5* zaSHz;v8PhhV*i}S9(1fp_0P_qGQlZ}kl=}-dcxyP61qoBV^XVZVINanNfgjCubV=xTk%DmlIhI4>>)EcxSwo~RIO`@9;HBlc@ z6Yv$gtNjvVsnvuwCb`i9=HR$8&-=>MIv_YDr^}0iui=yL@QBto283=r29!i`jb=W< zAK)ABbU4e4zFm|etMur?$B}W`doYl2iMT5o=(_j4r>UHV_*n*8eNJX~IeM3M3P&Vq(>- zt&sPTEUY(meIf*VG&hGth|eNYLWtc}Z@5B-bWU>M=+3@S;w{HH_y(A}F%p^=0>Xpd zb5Z0Fu8>gI<_#2(OWMte)=UO)$JYy@$86iX+&qmBwCie_BrIe)z#e(Q>{3J6-7W4# zZdzTi%e2j_3%ngNO7sLWMv^QN04yPpaPP|sDcaSFJ2!_)a1jIVmB+qz1@kl)ka%`3 zsM4rfH~L9*AdTrx!|=!0#xsGL)#q-85zz9??97mxBG_xy+BYDQZ0*9ON}};#w81y z6%vDedEyP3*ThFQ*S+L|enX_37y6Oad?(qTBu(%{Lqu+1sP15voe|KxtIe^Q=!zWd zzmwr%>mFPyy62BWsl(*oQT0W6^U#I39kc<6=5*^-tR<7F_AQl6epu~CDYY_O7vL_f zr(4YNJrP&kw)X+^H1NXE7Q;Nzf_r#@Di#$o705sZRDwXaIWZ{$M?PEZ)u3*JB-Pk%tso@ya^9%8dli zLvP*_El$X7qeA?Tc%3|1Z|=p`TQ^_=-{*o=Oo~yJadUD(%gv?mQV?@4E8fPz+dq+oTvx!jo%?rH6w_ivoi}V**F*a`S21X zEZHnI@@q?yq$1R+Eq)V;FZN4ATJF%iOl)DLQ>iL9jDDY0WmnJ=k1d9b*!}-sB4H~T z-|+XK1dNP1++y+%QcwPLt2us=z*^B$=8>V;-hiHR9ezA=8F+%#DZ>r&4_6fUvI=Zf zfiF{k432XGz0t1ax(H8}!G4BCV8rO|t|;caDyB`veAi$-HE}W1%%SPNq-c|mW1`=V zdD0`X=HpzIxFYZ{lTR>sm4Ud#-!OI`1NNWbLQV*L&GQ?wEb$Gi+aQtcQgA)rwqV!ZnI)FyUH7E;V7F zBA1zPogy1exL%RVO?ZhSSD0{vB3GL5Qbn#Z;bn?kZNiO;Tw}t^6}i@gS159w39nS- zdJ|ry$PFgET9F$~c#R@AnebXgHkt4`MQ%3X^@?mZ;SGx1V!|60xz&U>DRP?$Hz~5k zgf}a))r6ZB*=E996xnXVTNSz8gtsYjhY7bRa;FKmDsqG0e4GwjiRl-~f_|-C+KNq_jA= z^WaTTA|eM#$S8;)iP;(vBr#JXf+X(nMFh!diXe%p7dc2m<%kH9GZjH{mLf>bRs_j8 ziXb^x5hUj+g5-QfkX)b$k_#0;A|C5j-qR1qXEPz1?kiXeHRB1nQO*!vqU z%wTFJk{9U?Q>=0ne-pTc1=#EWmJ49H01);%fB^^4D}Y`BAOv@@B|PH*RtR8)01&QJ z1mP-05MHbZ!qti(T%!m=tUZAkLU0{h>~~zTeJZw300_a2EWr02z&Zh}6M%ko{`J{8 zbZ##bSQphZs_8!Z&Ie8~^y3fbewgYUaH^RNRL+&7$+ z1GeZ4;P`NEC|88zY-4aBTqW$!;Z&f0elnjc@&)~jtr%Go6pHxRkfQnQ$k?&0&CL!D zXG48=#V_s475C?|!=W{6ux6kB{@_CSg~Hiz-&6YH_+Wl;L{<>l z(W4{B28WMkWhkMK4-V(vfK8ho6rVjll$F^;Avl3#aFXBU$8)3m#=@r(@^J7xf8sl^ zJI&^^qeIz3F)~B7NqoDH&1u)1tGVE!)Df&wIh*Y%8D!e9e~OBr0o zU?YRe8C=2ON(NUkxSGK=3_O*CYni-`!SxJC%-{ybSeM{N#%^M;iNVbbHZ!<|!L1B@ z?StExWMzXbjBQ2Ghm;K(x2Lc3px@)V?-0Kht?%4Aub#j0T`9?XQ%XySVYE~vd*)a< zO#>7zJ)3HPa&af<$1z!_0m`#@BW11d7xBsUM3>msX?7xX*=BaADam5gPaD`pe+A#T z#N&*{uF2ShV>4aiigVK9fsA*9N0=0I&sM|>l%D-!1XGrqlvp#dl!SeNYJvPl6WPrp z!u?W@d#%JK4@{ZZ+}N>}J#KEh%%knd&A(HVT*wpD_G=}PYB&RbFIYX8Y*4ldfv@-@YNpS`iLyFVy-XmQf#f5rIaV68B zIPoXN9sW0X{A(pTrLdHVK06|MJ{t?pH+sbDBfQX$39n=tgl8Q{_+%Y!nz#=00S;vi zBuQ=&RyTr`Jp3X66N_BVo@vaWMb?K#u*v#t^7Wx8>K?5c$x^Jc5LH5bD(qm@5PZe1 z9A9EAwaU!H*CIREWPNV-^{I7mP&<+G;2`egND0KLMB40euWx*)2J!e%5?^Te3ZB&{ z8y~F3EfZIxmLWp)n`S4@DwPcpR_Rt>B{xF&x3*ED=4jR^SqL;MM+vKRo3B!>1BDvA zln07iDM_=s#baFG@DSed@K6$DY54GPDl4+b?svjzi~g3(~`mAT7Trq%($ot%^hggeog*i*aP z<6Ym75cctqP!eQm_>gcX8WUBx$5)|N(sTbTWzsv9bG&_duSdH+$_x3J@=B&bd8cxY zYjB^hL9K*8!5%{3^faHpj<5B5MC&89(2Ys0WD2AXzKoCD?{Th`xZDX(nYh1!Pd?xg zt&g}uHzuxm#+0k4dd$3ZzyItRBbN810(42DL2GWy8~~MLOHVs&HIw9`Utl#c%G}ig;m49j6+B zuQWr)N$kA##?|IgUz=J7g{-LvoZ6B_s%ER41_s)8JpGUerLJRen&`PF2XyH#*@9-b;_}5Bwnp&q! zbhkMoD}9{*J|(MG;T=<4$uubLHb;a!+%wk6KuGE)ky}S0_Ir%$>)^sWCbyDll3U0h z@W|KNyLs#*U2+HiflnUvh}K7Cp&L_K$rPv@`~V-xd7Nt{E{|oTOI**d-6GNANKgyO7tT@GhLzwU&kkhJ)-sXXrUVuTFDd$t(yo(JkGTem&c3J zC9dz$+2i&U+NejmK0*uin9xe5Kxp5i9sXk;|5}O8<6J2d9XLw8i_?DN9^d-tEv#dD zE13qpi8@z&K>dhEy;f55&|J!-c8fh7sLXqe>m#=Cj)|>g8pL*sJt1H4$k$449#2e} z+)hOIr&c}E^-)}?#}rpG4T?Jv-Qj=K<6kS$c^oulq60^<6DX;Ud3@`mx3G@stz;VX zChBA-K>bONdab19q1%*6-4l`89>6~CQLc~LLOiCnl4($z*pu28`=>niwGx~MoSQ^& zd(8a}9_9K7F2rMkE14$2E%tBp*w;#M9wu)R!3%OG06!L|B;W|~nBYpLNpOq((;oX; z3C@H2DHEJN|EL3uTsFMf<6K`C7v?d=l}v-;M4vn*LjNru{aQ&*Hwh_|+^teTc6q>f z#v@%H&4qeQb0yQDxm%?;{Gavs*GhD{rAV3RZtAoiNS^a3*GF<89+O2pxP2A9=gO*?Q|=1rk@@sQhsN7OfWwIOL5z<656saDUbA{gv}T9~@@k z7liQB3s1Z7`3tAx`~k71dh2KH&LwaC&$IF7va7%`Uepj%H+^@Gmeuu*WS! z_`QN-@~OS`6CC~*E%@VXRkSUd6h5i!PbV^Ff8@q_)RvA*#4Px`9XPIBw&3ztS_Y2m z#Vq(29XNb2TX4B*E(7v*c#|_RFe9~K>m&%tjaPM&7a_U@GdbdO7E(hM}z~Sw= zqN=$6%;J9Bfp@NYZt9tS?X3OH^=v!%DUM9nnK zfzPM{j(gcG_|*=4x&xnC1su1xS@0nTKEr{}ssfHX-Yob_4t%BqpIrqUH^Eu(UvuEI z9Qd3n;P8rR!T;WY&vxK*tAN9IrUhT#8c}AB1D{s~93D0;c)tUm>%iw%0f!$>3;whN zpXb0AQ~`&#P7D5X4t%}?UswekK0Ph?KRIxD_&c=Ny|jog9a>_FOBsQ4Ph53Zo)Y6K`msj_M3f5+oAr=W4V0Mzv;G?{IBErvG`Y~ zrsuC@k|dM%&yRmX{yLL(X?VwA#QBzyifyhr$^Oj=3MuRMiss z7>MEQswA4Vuu!q%>=pV#w5u1l&cPIg6^@%^;5~%}<86#PFmQ2t+`)pG1>+82Wf!-2 zuHTlaRx5tCZ~Lk}p&(8U9MkO4bZh zQZm0)rDQ+d&uytj4fs7RD_M(xl9D++RVmp|Xv0-YhX2&Ek~Phhl+3BAtmNoQomt5% zb>yoe?njuRT5+Y$v6fvC@N5eV7qIZ(UIvW$iNGK6z;i4xxx;iP55nZ1| z7jX_eb|MymCo+2E7qmdbXcRm;IFgNE6zvJt;s^IDr`zaPMSJ8)v_Ss_cvsGbqMvQN zN3&I7nf=|l#)&|Dvgh=@kZO)3rL6n<*ypLH%=lsvO=o4-M3uQkJ#CMuoIC6}ger5S zt|dr#6e$eIf<2K#w+{pIu&V>`);X>}{yQLFwecyIfvCXPxH`SNtA3T!@MEYgU5YN( zGYcZ&W=zPFg|jmk;y#1;aqpJ@wJ<~I#)c176`GFOUL2h!nm!8~B<4fT(rg+f-Q*f2 zoj@5Soxm6+-3$>Woxm3*yGl!T0$cQ}6Ud^Zn|h<9n=hlJo8+RTn?ItY6L_Me6KJBO z6Ii086G)<@6F8!z7bq^z;8@&_;=+=Pz!rg)frg95M?750#)8(Gr#m2;db`-)vX5>+ zb)(zGc*EG_|FyKW3}L~i3wroheU1TUUtBfA`_21x3>>(bBKv zXUu%)ojc@Dsz1l>Y;U2_of=%p>SB%N-nGR`K03i5r?`JC527T8U;f&Ot{v1EEz?K1 zahM;j`j(|_+l2a^4_AH5F0!BwEY%9RQJw$8s&8R^4GcTV-*SULKQmKwIz_iC2UEd9 z9dC7(zvXyDep}VIur9+}UFB~%R+4{1)wi%f!&}|uZ#j;W|5VkttgO&e{+44*`M<3C z7M3vh*0l1s9RJEMYpF&J-7bL3e99>A*jzrS`j)ODI5eaDEyoS>&sTj*_ZA$QS^kz| zn)zR?`j##^I5eyLEyr8)|6KJg-H32#cKKV51?ShdR-=ZlPdGHE{4K|+^G{TLOLr|C znp^&sW9a$!ReehrGhoSGMtRTozf$!r-QsX)e)(HopVZP;tzNqF;n0Hex4c^E`l@f~ zeuzU0%ir?arO~QySyj`b+=%hSEH%lPb-#im5n{b&a%BaVEd(R8(;;KV$V&aULO^vMSD{E-gm{d3j(TRelLw zRlM*{L08q>*^@K*bk3e{M#Ns`x>O6{^g}IQsJQgIFkizei0mDm_)$}IqNkhHw61|> zNy}(JVLAl}a`8V_H@tAN$}4S9To@cL($TBRkmnWC#klMMvL)T9k`#6atV9x~JDs?jRtY8$0$ zt7ylh63dwuOYE*yDNWlZ8D1})#1a~)$+7(Csbi_Flu9hAmjf-asaFL{?Wbg@giB%x zb=Blp-rIW0w1kR_Lsu=4xLlSBL12jvQw2+?yZ8>Ye@QH%+?pKAf2<8lj`lew|K+x7 zpN3Y9pB6~T*P#KM^y`uGaIVQOYp+)Dg}&gqrPaEqwt}I(JX*Ej<-Xu~r3LraRxs3^ zzhAZB6~167LX!=#vbKVuA6?r~jTEbV!O)y03%%v&ojD1Sn`pmpopKSBEDy056PI2{YQ(T!yJ#3R)L_1Zm z&0&EFg=oRwB+cbybdz-QcT|W@uH9wQvQ0_+PK7FpC~$3B`8Iz=0h?F=wZozz!MR3{ zz?BCedPdczu2;trkXqgd(B}h~o+AsZ@EhfooV{3VLgLb`VPPeJW2bi$!rn^Qb%5QM z&t>-~?jdt;B(kEmgI>yN!NP2D5ckG~36P23I?1=zBdGAFHSs(2^;MvPeRymLx5kZ) zI#8F6FnXS4v*}}1Hfc$RA-;{AFC7q*4t~_pJb2WoC0!y`g5#B~c%^R%%Cm~jSQOD^ zwo-Yrb?nt>@ipUR8qm{Ke)h4qm+uAy%#)LOcDux-YjVjX!~xN04nCt=bb7VY+nNrE9 zw5|l0WY+xk4ot}itUCcFnKeJ)z^n*sKiZQ3lc<`1n*+0ws|A~u0F#88|9J;y1y&0- zJpm>`GyhEoX5~%`HX{KhK{Nkv4$K;3Sg@H1Fd01LSITA%1hMuH7Hn1mOd@FhE(fM$ zM3C7DFxm0t#~qlG5!jprm;}%Kk2)|VBe1y%FiD|M8%O<0$p~y-0!)Hu{x2Muk`dVa z1eomf^3!E9hngxGfh|aYNvO zAM`Ph!CC3Y8*)i$;)<0lzRPbneB$HP_1_^6b2em)p{aWA2 zF65oK7;Fti#oSv_%%7>4R$=yMv_5;f%^b&#%d$Azws)R+8W_py{!kB8OKs-J7tJna zfIWlF{99W&UYAQL(XIk7&#=Ib$^ua_XmnU$hdDAMvn#sO8Vu=`iUOZifgLLFSryn> zSs?bzq5?b3k;hS>Hqi#ax*n}4@arnDQw4rq1$I>y2%1F&c9|oeM1k608vv8*swi+( z3-vHvDsWW`tK3~#Aa>fK0=tcrXxe%k06YAA#1-N$QdEK6DzK;mdnyaWCR|itk2&%n z^j+GO8vx_nUr`{q9Wd-sf!HvcGf8KG8U-6aQGZ!hQBqecOPZ#Vx>_eK3F?iwq_0Pe zG_GWzlBTPqpcUo{6L(;!9paLnuPEsQDrtsF`ao+Dc3u;XDA;zzC2g)K=~b09QzgCH zI_aRoUFH#y{w10X*R#p`A0~Iq@#r$CMV!%J5V*aFp{oUS9+RRfix3?FUPg1E~ zU(AnG6!W5rnXh7AY%i{uq?nb-Hov=~q(4?k3slk{PhQfhWJx8H_Osyj0d*x-u%r~x zn2WbBX2}prOrS$thZ*}T2KO;I$bgp;%WrV0GAGzh^qwHZs|;ugSRFMWBTBn$f(FVO)=B#`pjtT1Q#w|2LqMH!%?W=(4Utn9j@W=RU2L@M1=H#aC_UTZ5oxT6foi^)|SsS~TPH*qNwfn8z9o?6R zrIWJy$2o{2sPIb&AY`G>hqKLY7_%XiK_2=qntnH|Vq45f0CX5Q`^-wofjEo+m~-FgvbVaV*!w_xlwO_{LYnyv^; zvSug(E327`z;J4oBJ(ho`)Gfn^TJZmanj?n70=|i)wiR~6H=yqYYtY5-@qkTJ?gn}5|JFgS@UP8{O=DG>2(49Qf`?ZD^WNObW*bCz_S zPT-6aB^GBpm`QB+h{7L720ICqX;7S=y~pF(m(Qi+@eui^JRZ*xphj#%Br&*=GH1vc z%ja1QvMX>d=&scFh^^3m8WC_ONFQKcm^`p}z|3(NR$v1(wZ?>Df2CO#)kaWHV737e z#POCy4rT*PGByW#CTW-ON@f(ou+rs)@kh~%{H0%qdROe(y6v`|x8;6-e?l1NXuI(1 zrsxOd?cHm4hxcxd$cq*TRst%}VIUnJ;{4LbAy&R!_b~IcXGeyW3e)1k>78r=8h{$; zCb`6(o8+{9?j!D&+8_))EZ``33+9j3k`o_nI&ojSB7c<0-3T!rD) zW!CHZu*Hv&5bUZ3`>69E>&bAFEj1~BuuTc5fR+dL3^sdbDjkzc_@79y1|q|!6^~d0 zv6x_50=H=KcHN;r4r7hA2ZxVl@83V*4U6shn0VJ-D~G05{V~++L##TnpzjkcCdeC^ zp~~+?gp8|ls~F4=gLI(99KTKwtuP+i;8guEjRhyv_9s~966n@Bpr!MNf}L~wk#)u% z4CSZ#t=AlWOkA&z%rJPIfkY~j%Bpt;nYWL@5Q40K`-VRH>xWeGcM7A0Z7b zU*onu){mKmmX_pclXiUqSnf>RQ^A-NeI?Q1+0pszBGOMe*6LNV=cWZZNcZ;;FB#!>E3vI!3UM>d_tY}EOXjnH;2eb~~_N~zf*`)y8xHUBz^@x^lNN4 zu9zorTgQ5;5y_gdMsm6%H`i? zy)<{c&gJs|PD0p~>sck2e>cGz$iVn;Q6r8<26gVr<^LC}&ibC0%b&#giH+gdPyxa5 zrw(LC_4oM5@J7V5c^nf8Hx7-BjE@~14GTxdv-!)$tyTB`O&Xq6a{1pSIy^f%A9ezD z`15jkWit?VevTDgi5k}+m)`(*5=Z~og|^^1FPDP|>B{B*mKf1& z3odjys!vlcXY&Tll*|7&>vi_X<*hwHm-Ii22kk(E?eLnl*^a0UgzanPjtbuDwK3vpn z#Z%z$ICtf89)CYCmmkPq3iofPDwkhE8lF{h`IU(d&yLRLyj(ts`eg_Nx|&3@-0j?z z%ei>{ejRc-ZSqJQ{W~w0H_eXymc)o=Tf9bcIh(hsa`|nn*V!YN-$8Uq|FcLgztiEY zy8zBzxtx9ZT$0Q0V!NIFa`{eHM%yI0{9(4hIVhJu&U&4f%WN(@>7<}$4JAoN-jT^=VHQEr^EioiuFK_Iyl!GeS0Ylw zemUJ@o|ns`!*ifTv(A^FNQ`K-+-oJ5vw52;m%o+uI(y{ucM)CE|16TrUvN0<_Pujg zE@xjpm*nzyv)#^qx%_2T8 zCS0P(g>*1SPdY;+8XqoAx8f<>jsOmfLO9pyGWo^2?MI$GM&-!adi8+Y>ImQl17{k{ zgvSQ}=;Bwz@vA#b+J*wH7oLk5>!GYY4S{vzA#V*Z2(x#-)5)rwmlm=^#Vc@k9WP@k zX7i(i!-*G>+i(lZb@;|_$D9?8OIy0(vSxaVx*MrQ&UKo0`s}_<)jNERH9Q5q0~;)5 zS4uD7liwz~_*<{uLFls0tTPoQSxg^;2D)B&H;Vs`#~FRTCZ~Zv$-zP3)cjOwpy5{3 z0gk^v^azt;?h$XGFB3R!;czcGFD_(-c1&a?0pDmM3wPYpD%}5QV%+Bwcjv?7(Y4el(e2kyRAJ^t9&gIvZvY6GgS;Q`TB1@+jJV6|~O@Dd%uwX?-L_mOZO+zP3!)w2)n;3@;`h<*pc0%R+zH?i}ZY@ z8U3GfSk(Vjkw5blah;!kRgnn>z(>*_07RMtz%8}@PmgqcV?e0KV?aq1-S9C${5b|3 z{(tW9w~pf^Byf@A9+Ce!WMCn{>Fm_~$-;@5_3|0r@#-(XY&VN=69m24wc++x3+5c$ z?Qb!PeSh@O=l*}MT;o6Pd;B!P<(m+zyE%2bLO0xGfB>K z=FFV5&z}7}=eexC_S$Q&z1G@mDFUoYd5N^Y<&(n zzIRS8of5E>aS$Pxhozz=G0#VEk*J;VTmQe5l6zJtZx$r4xihT51;GA zd1(~#Bz&&Ijt7B*@VOh}83$AjM_J@tCa_%O-1|7)OL0HNh>9F?)k$p&ntQGml8sWX zs*?lSz@I6JE5o7VnWKW>Kc-qC#6Za0CjoCPEr9eX+~sS6FLZAfGIzhz50;&LYkz5$ zWQZhEQ+9ZjQbJ3{uv`&`O&%&8SY|mb~nv{!U21_|ekj88f5{Tk0>Q-$~?` zdfz!`+IN)P)q(KI%J}rM@>?}dMOX73QEkKaJT?t)Z}4~ejJG$GmF=tP?}%?V{LW3e z?}Y4iUnwKt1W;EdsCSOa1In-JY?0hB1^Y2IGRTI5EK?)KK(JfzWvNYM_GPZ!O0l29 zI`U1_o<*^Z!t4*-R7eAMWaI_X)sZ<&eX0*18QF=-+!t1a3(t!+=T0v+AR3G==S7dJ z$c^%STzf!;^Mj4-A3rpD;RDy%!0>sFDp`^+(t%}(wwgOzqoB)9aDb|& znS@-gRf&Fhp>sVDk5wUMszKa?}ubyQK!iLuUZS_{TD8Q2CE zxq%JTaWFaT0EMK8L+fBAdeG!5`I_>!YAVCzb_ufqzbd=8ENUU2tC*Jzk>Q5tCnh)L z_0|m`KdUWvi6+(4{4sqes!%v7o!N;Q3!G#NAcc7{yN{)4^lWJR1hq#gvL|%bYE6#w zZx+?B!%zLLXZuwNILzB4PeQ3~xBYkY9ZMJ13sDdZa547om4!&;8qy+YGH zDa;9C`8yMG;VA_p_fPc6ds9HcRB{;584WrHH$$#Z^@5fo_r3bY?iA)`kqfUb7`cC{ zPrf(>G(jwXXGgBV&5-L;J=k*O{+Yg^?CtSrYZkf8kOd?6r=h1 z<;eYnzOgrjxmn~Qp%jeVPwJERr-054xdt~wu21#4mLvC5`o>5CbFgb!LPvy14v)J> zbfLb!G3bY6n%LJH3{cE~TIEal^jcrJc;xDKqPLtFtYp19?RG5-UL9=Lk`vzyb}f3s zz3@Y-1Oa2zCXq_Wh9nG^WAR?Hhp@Y^9J)f;qPab_k-WqT3Ei%q(e)+CL++BGmeAAc zu2HU(bFb0&c6xaqtcn|sR*pS#2Mr1Awd~Q_Kb(X}ZP~`B&)2!mM+}DV9HsSrZheu4 zIpp@RjI3E6w$m_IO^0IFFsJ@`I2#x>;UatWtGUuqRi<#IYsj4$CV&%hN?j)|4L9Yu z$`s~wsP0pNQ}yLpnt2gl4`v=%QJV0ORMDmJQJrtD%GiU#JjaRFaB6ns85W8UE69xX zM7mZ8*C7YwBPp$khqRt5O6uAXD`Q(9n0T&Lqdq~(uji1C4uOW_IE`xMWoa0r3 zD@n2?*7ld=^(>nvrFPW_&$?ZN_>5J9LN-%3i@6=lg5gkSZ(62JgZd4`MHkXPCovp#voJI2tF|F** zIomh4>#NZX6!(ER6t~fzaQa?~Kc)CHijPyEzlqR|L?|2PvwagK!i}Icvu|FoY!>_G zlGwi4KZt#EN%U2rI2I>xwoC_IT@HIMsGRooc^d*e<9Ie!53H2K99f)UabvdkD|;Mm_(=%G(FA`FP-*{co0Kd&4HiK+NK2IJtIPMQx+?C05^MeR0vPIQ=j`=z|~jl z(AJDbm3EQOU4r@V8pC|HTEe8L|3&@v_l5y_yEex};zy;&Q}}xYZgT8Uy~;v4coL`y zf`8hcFyoy^!Xo;$)F75Z2<9FE|K$Yupt?5mWhf_yoAGvCIAztf`cO*jQmOKDU_$*l znyIlRkg0#ARh_auquwWL_i9WmPu?a;OH!p+=T3_t_4aPix`h;i>F?>hRcchSmjkI} zJx7m~uSj9)g@Hw$z4v<{V!$0+qxPL~NV+RME*aRtm=md12tlGzEz4~+8I1q~ zEcB;Gj?nJ-_`oq`V6Q__rQ_YYbYwXaT%l*Tap^R5}o5RJIHr(#uVK(!2 z2eTY~qUF-}z4}Hk{e|H+s40iEjAg6CIXccN52@^KYnVOf@Jf(4WF$^gN6t80y#n)a z9n;f=3{q5t=i~+{a~%T}P&`P7RYAnkP*oYW7~Gsw4&FT5wg&0{aLGr#7t0L1d)PBf z(C_Hod1can6OMAaArBFBOJS>!(X)>yPzDw2zOJ(C9VYfUcBEy zwxOWUm->PGr0xsK??;G68%#xsTdWpLT19zX72NN`15#cPnPCtuMyyuU_KDtfi$(lq zIX8tjf1&Q1gSANzINktn9^!>l-i*4j&YKm3IK26qay^)}Ik*PqWz*ga@O9pNMyc{* zdrv85gsq|()&!lbAaUE4mnuK%`xNt`=01HZ+-H$fU6mM1Uc+d5g3cB6ED`^%-(20c zHFr7`_9-BaR@W!DT53t}3}-`LZ&uN^=yjfG<2yoqsCFrOrtXZqTE$8emaH~tWM0Pu z{NPXmpQgtChq|j;>zrI~!y|8)Xhe915s6!s*<4cx`LI zFNO1tH=rl`3WUqW9rSA-C}B)=SkSM7z#UHyeS)5Y(dq@-FNpX8@mKD0m~;CEBn)4Z zLFPcJAtE;F-!)}uCb01+;dZ%4g0F!%qg3KkClEcRU0l}31*h0_7ng%U*1KP`LAZaw zJlm6lriF(0%bhmd@9Ajj`@^JC*(}C?}Qw+Af=t2r}!I|e72)EXQqA+vqz54iJMpD?NfaPj=d7TbSfET7m{(4ro z){ikq6FlyAsiLu=Nk^7d$j9SKgk4^&(awGpk>KmH_ttzRwx ztG%y4EKt!`2TIrK$72u5if^8{XRtK!)M+pLs$UqQiG9^CTsr-QlFs87E-f8YkMP-f5gh5e;tYs+`Yd!H9BD^0w5+6#pQ%^XYZx&L3w z^2cc$@9d1!nksbHbin)lnts3L{WFJW)6NOaZ#knZy*nJiTmz>vFREwaA?{dhWV6@t z7*>lA*$m}+bRVaWQ`E?Rf5NX{=ZE*IsOREutFB%86W+cj`j+mEEmvL;?6Hl?jJ=qM zxg!GL)eMR4s$>z&Rw{zo8D;$a&(hzS>X?he-NA4?rx<3~SR&)~52*bi#s8#uH^m=O zyochCDU9Ji5tPR8d^RwnaJc}aUL$DZ7-s!>19N8G3pE>@R}P+$i(v+N(K-GH+0sGP zcc~Z)KK?81B`e$irtiLp7f1taytm(B|y~1#XG(ZZ)D2D9S4{3np z(eG70SCkA$F8p4ab1Hv-ohnICNC~7QLAoe6e|;G&FJGVkN&d+y%lbrSYg5( zJ9Nx!~1aJ6UqwWf-PMp9UbYT|9br@D5ObgxShv@+w>Vb0|5O!7G=Ovx{=HS2%_YJ{J+Udal zDca?^W05FB$eGF7H#m zZ6AT~H*T(OR9qC^7-dQ$OAdUgt;(RsX*y8g3vOXzih_rD^Ys-xVv?+M<+y*X?;v2n zr34Vrl2d$7icv98*l2GliEE$i#1eljAGMSqCYZ+4vC&n54n>m7q zEI9yghYs0-l0MJ0K2N5z`-0wAy?bpBNkT&)G%Lu}-Gqz3J-H3)@cg${RIoPO9=D!v zg{^k+e0EPfp@peRQ*i7jJ^*uf*FVm+L7j#%1cgS}yX+la$n-U$ul*B93*2p0d5yFH z!y!xp>gi@8lq0uC2*UGut&Z^en>2KzS`|Bk_atj|ugeDpgCltFV83FEqdWpR{s#$d z*U?><3kfFOLWmP9#=qxAv`$agy4`=Y> zb`=c-AkFq(;>;d~RHBkp(QVz!4J~`Bxjx?02QSA@WpnlVA+dgl(k|Q6Yo`QJ+N*h|j7A7Sy_Y z9qsbmvB=lu8$blyuEW;ln{%uI*^;AIho>cmM}ui;0j$fPOw(y^cU}Hcc?xY+R;;Ru z;}1PBHsQa=XWycI2{;UVtwYx3SL6sDLg#=8Oz_=x`PPvmhmRh4aALIj;L)+lY7b1#XmtUV7(ewp^E~EMs?d5cD zTcg}Phjw+&y8I@<1^=m9mX0u6)i3BOQD#rk&kDmat#E6>q=TC9xe3hP^ssR4<0 z%FLh>3`d~9wlU~u3lh8%^}ijkQtx8nS{n@wHz2nw`n{X6cXX6y{B+t3MrU%EMX)g@ zumZJ2x|wM~@QrpJ3BB-IYV?46q19b6qz{cxRL3grjh{)H5qucU-x&?b;RZdT3k_uA z=3OY{Pv^)abSj;!C)LT~q*FrR<94Up5?{-GJzrz<3CwL1%dnQZzR0%7prQ8o=l*Wj+uP zkio{+V}a&C{U5ZOZaL*p_U391MAFWk0KZ6kJQprGH8&8UOQc5* z4@5kt>vM5f^6^ccdI?~^&WZDKVd~k2-7BZ}O{F{Py}*w0VH;4dz7bNajvSeo9GjSA z1HnBFL(4nQ8sR5$uJ9Qni}XWrNsaH-*M1HBC7hU@&}-{%E7+x5nW(yPBQRp`7dCze z#35O9NZg1&%ncN1cFI6`r!Iu<&2|v%x2RI~Hi7-K#f^}hNW?UiTFgqw(eP0=Lq~s5 zJT(QWk=rR_ZsMlgch2y;m5oudkpt`~yJP$cEht58XEu6EerH1DnS`;wrA`opNdk_^ z7bP0omxQdzg+v%awBDpUqGwTTqwolI)bArVX?#FK!#hFv|X z6e7t-$h|KI0T=0uL3y)kFxl)tD1ue}xybkZj(qx`?iDXcrVPkizEyTVJ262F75UvY2zCqR7awqk#eTFEobzYyrg#G?NMj znA@g;84!S|U}(n|({?Zrl4FPJ-orYh4#tS*k#^`{2!0;xU}(o*C+!GI81raGdvdr% z3uPY*ty@U`GJiwov1q}hiuR^6a}s`yX26$nI-Gu;nf7MY;e==sA%=0ioP-(IGxOkm zv+PCEJY3BDidgpz8qXwu%8DE}ukDJQf1*8>7Z$l9=U+htQ$~kv9r=&kXoN1273fco z#aZ3?0p;v(aNpE3*U)4Pg6&v#0-2bK0qkX69R`aMnp?Hvr z6RC>gLCU{I6$OI~5h7POJfyBD800elbXI9p_v=oTXFAxV9DKr+6^<^sIyqhG)3ZyX zXZgW3erlD2O`cO4UG0l&OQUDIqIRuuWnF1>S{gzHn_LHyKn_Pb%$}o@SaYLF@V>FW z6B{$~4@Dn?Fh5T52^EFRJSc{q!~hEhix7MCbgPFT&bF$DE~14NWrPT+^}@sNN>F+OC1~21E7QjG>Zjyz zZ=9tIGipNJ1w7IYP3TH}H`auvjkzjqOtS-mdAnHd6Ma&jygCJRZezgUW@EsodR?oz zPV^~#<7o-ZLDy{Ilj$`%Ofw5b*G%gclCJq0I&YO4HG{61j&vtJUzK3W(X6MGT9uHD zK^~X3b;UUy0cE#R+~U;st1*-ZyMfY{U4d64ivPXeM4S zNT~%!c34it6c=4U@o5SYm*t!WXhgf*6>k8Gw5FtVT3%FuGumLh{iZ5Y@OC$0j}alH z-&X8T)r7m?*TH>?itL=`A~x`tBu7f|sMUQEU`(jw2`xw-i7WCD32)Y2qgs$GNfOn? zE|Wv@$2vcW&|Gt`x=Tszj=eGtMM?vPljL(2t=S*3@ygfB~>y3aR6I`g%qn z`Q$LAQ;4WV%GbsTkaTW3$7M+#f1pNMDeXRac8r%PKZ~44SwVx&VLM-k2&Z<9*`9}F&{;g$paF_CGS5zBM>wS?{x{;Fw6oDdhe`ha+*iZcm8yT|V_SOemgFTm5T z9`JE)Ir+8b0f-aJH3Z<;?<&ud#z=qZ>H1E+_lDGR9tbefVGld=+>p%;7c{g;NoF?c z`DAf0jMUGr{BQw84y89vBg7-BRl5Gh_LPMy!`)-|S_T>CVB!T<8l7y$muTd6DWm!vkr2B`VJRHV6_{a%( z=nHBMM)z^;aSFJE{lgN@l=BJx48QSs%6DL0<`6;qVd=+AWFPN(K!vlOnE{5BXvMk& z{|NN3L0|-z;C$Az+U#ae&`nJ{2Y*tSh(1N}X$lY%{T0P$DLzN>d5TZ+QdZjY*-oV~ zUpfy&U8zd2Q>_`uyAJ1bnkjC2?}~xJLFF^+8(bqd;$D-?h#vJnh;6%$ZrpSsq1gCr zKTRLPcuQq>2i0C3r|1-Hx#4ELC+Nn}Bt*Axs-xvvs$2DqpQT-}7WR|O!Rq6zBpQWb zNm>ZFOZB;*2V9`-p!jUBPhbv%8DuBN4%HV-+;nJpA?X1BJN?FjUeHt@6NFXfZ;%>F>3&ddRmxZ)9Cbk zX78ctv=`=XVFKRB1vkE%&#Z~K>F))HiBD8V}6OR7?~MtNa||K&(}%JJDA z)%$02B$W%PvW?4Il+VedRCQfhZa-1^m4;I}l`2QMoJ!?Rs&~qrRQJf2R5{A?zNB&_ z)vq%;lFDs#ty-SWdPtjQqt~h;-5csWRcz7F&R;CBA*$OYiMQyCLEEe{tBf7mJ2`%2 zBKof0nfZy1jXt0s_Iycw6K?xk)e4zSFBVXrfqaHkVJSU$+V>Abacy-F{cGB1 zac^#_0(XLdFHTjEI?<)n@nvAAYv!moFzk?Xkkg8yr__lqZNT3*Zot_BPa{m4Mhpo- z0j}|tggrF$0N!>dy0kH0NgI<)!u&PInAM5SUBK;acA`rg^ABlbpz-8zV{jA+J(F~X zhXMSlSSPwkq%^!r6E6CXDbSEdau{fA5Gl{j(NP9^HXhs(-QQ|erQye^G=cxCDf|h- z`8yNRnKp_=dIT|iEd?||GJj`Bw86~~?NdG2YD7n6!Tj|E=HR(#Q76$@^4HM6U6lsQ zLmTvDGE1DX7y_F6#hg@X?w64tua|Nho$y3kvpv$fg`~Co2AwyH)-vtHA5td<28BQ- zzpId{Sshd+^iudq-XS>;MXrV2KDzxS^s4$+UCMQNHin1fY@+L_gI;hyneOPwl6%-I z$PLNBmM4e3dvsx6&iT>w&l)6x<>Z_nFW?%#1~i!O5!OyC${X@tkX-aY zLe+5G39&VU-?;nv7_()m4#OpJ({Yak!+7jYYK!!AUsBzZ47)TuK%c%aT*bR^+vaLI zZ^b@~*?H`@=n0mYE3(j#dQ$}iPxVopIkbSC64Un==al$8kRAk8_6LuMVqnGDZnpDu zGKK6{Ns?oIe>Q|4aqqUvew1eRk)^@0_ zTVmjIeR+mx5s9x4I~GpD$mE>3adUO!r9x$RgF9b|-m2Kw z(f&YwRU@ZBPc|T9i zHQkX+=!u)&CK(#)>8*F^0jA_`z|16I<`Y3*qLm@i=s7{%pNQk8WqLKBZB_@p_%{35 zR)&+Od>Z@lNtYGdg3T4eNY93L=FDeN#y@0&{#Zg)rmq$nCh_RIFyp2xl+syyG=P|4 z#9w0u3Jguoyyx!BdG+1dv{#?y!|Bl$0**ZV7^7P(d5C^Xk&$wc+I*=+~nc5WJTX#zdYRf2Z!Y0 zw!~cuD*X=<*si1N;U**$>*4k^NSQHLD!V(V_UgDzNA(gFn(W2lD(m8kq$M$iS4+`| z6p)^b*3&#f`|a{@yOwr^)j%hBxLpUhLf2e1)pp1;%M6@RBeD=KNCwjD6PUwb2HDB6 zL-hsY;l}KQ0qI_GeSf#BfD% z66L_A$cf=p-flXTx10Rj1fl%g^pu?3o zu(#&xiufs4ab~mA>Dv)xRGO%R~Q?{j!%wNCiWg8 z;G2060%_*$o{JN90Y9W>>Tq*r4g*u|+H3kVbQN$DBIN0LcPGRikFu^l4)nBel75k1 z|KI3`1*nhvMcTz!Qn0l}9cb;X*+83zeo#Wo}LaxaMUAzmDhgmkR zG8d%mhit_o^-eLv3>Tz#gPlZTNL`Q^Ua7>;)mqlqTD~_!U81!-S9ojM?=@k)ukjrwZ6jqGVO5PKT+%G+p3^bOrUqQn0nukAXzDR~oR zrZz2Ss!Ov2CT4HG9Z}ve#jMMx<^8HUp^(WKo#jX9ZOeV9eOoO=(N>-1C$hH9$v?>* zpiU@GXPNFIOvvH0fG!B;s=qYo*@RORDW$sz6Zk*Z82+=>s-Q<`yr~wr+qS6|8QYmD z;-UHZv@J}Y$+1IqZ%YW9@1+)zuB9VQrCTv&STNm*JZb|r7aa-xW1{M&NPB~w%+u8Pwbl;>s zT?JC9^e-SHth;IyNX!UkuylKFrho`?K~@;yLCp+77co;rsAayJ8c`7LQ7h&@}<8V0d<;Qg`^8*0BT)_$x0iIv||F4y};2$Vnjel;5m!hSW-KU`g= zG8x$Eb~VCaBys2y*?TBs9cdJMSjz4s1w!V5=E3$1{Z@AybjE>nN9ej1>Z9+~H~xcO zV{yuyknCYwE$)TY@fn}E{)n9aBAsGhH@B5;DQ zt+#8MO1Y-3pZdQ%SeCRWm+zy3O^#^6J5ABn{ zhjdKl#b8MHTNM94g>}{ctXf8#Eu>4-PETMko~cTR=+=s;Zjf?Y)lZvgBb_zJ0PyYz z0bii{3AZhRSf#Y`6krGRIYxCe>`;GT^+nNC8r3arz@?2F(2N2Kz}u#PqBl@LrH#2P zZA`CzN{$_>&#r-T7w||sG*E&&(Lkk*xjbzQG@cx8%=~h5+?oOkK_`a+rH4|sKAP$F zJ!)5}%q3*@sqSz2asV^aQ&X4|#PWA0qF|`MRC?^OV`(%yYxIA= zp+20!K#+FgU+XrlEu<&V1a=Y;3P-JFa0Dq{6Y>XZRhWb%bd)J+5cqLv@{ONP9!dim zTDzC*2aRt}eS<%j+`iPW8Azfa2H)U8^2JO^l-;CO&dRLBznf0i*lG zk?YuJ8nQq$fmmfofJD|&(*q`YB*Kln{Hl$W$O|Hj3AsfT5M&O;b#;cfb4y{GUQ`v^ zArl=UlNDt`Ctei)ot{Ygl+7^~Kh4XiSzVG}htl@d6e$)wzox=sarT(!gOolarCyfh zIRrLzL)&hT8Kym6eJ*l`-WP!g#^JOL_Px61`Rog-OkssB4s00sWz2TpST`JEi1w;T zzgY(RDU9^5>OK{=P%OCBpVoe0bbZ+cOa~D07GS1ZAg*Xt~yV4((ciEOIIMr+h zFLSeJ=p}V0uku?<@SQ@r?@8?4)g^b5ud< z!yml2e7?%H@tFfuBL5()x&Mb%38+2IhD2COz@ru)xoDKu;-*Zh#*%10^cvltn^5-g zc=49RAI(NjW=VATZsyWv0#p7krp{;|HkQeD85Va?-ZF!`8>8bvYuWC za;^~`g2nON!CLq6nzYP=E9641pq0z@-_pS|qVMY+BYNEb7_HlNRF~^&Y=J!27D$y^ zYsyTQ|G#|}NZB5h6H0bP?2R%-s;(GsCkbX8*()~Jdw6tY2GU};eoLpG@CR>$p$)v+&Ub@bzj)sbnzR>qM- zd#lys$Ai?jNmrvG2y~f>+{z9%qR8*+x}(wi_&vKx7hA2Qbs*T+LWyVcHBEA7l+Wln z{h6a6*0$OhUr!quC3Cbfmen=IYD8_UMhRU#`bK~T<~bS{4Ky?rW>KH=8c(6EGgupP z^v~6!ztkA=W~_=p+%~HsRDxBpdh|_cE095w!*f2~aWgu!8|ggK4s#;PwegASScSfL zbJ_|G!lvwq@S$RfKKgTg@+~Q#&~9=V-80$|4Q@v6KGh3aP43bA1@kXAhPfF+H^+Ys zux9WxN(&Ko|3dJ;HHAMRZ~hwRvy!+G&FyU<@#q5r{#R1qb5Al@_#Gva&Lg z=|QrV7OoPgnn}tP9`SNE&d`OdCKJk%(%SHv6MgBMW%BTw_k~q)aBRjyV%XYDB^=sZ zWoc>^j0F+gfX5-3aKl#axPBn~iE8xl(H**0$arNz^A7p|1y*9Y zB0GxB5y+h^OWg}ssUq2SE;cJkz&p)_;vg~Za+!7lTA;WZJ5F6aS8ARp1S53OZ-NmT z&T>69%s`XI1W224pUe_xhLaz!C@S42191$8psL90!F%<|x;P6hJf@oqX*o``zmv7R zo+aiI-IAvPrQhOIqbQg$H=&Ok2-)@6-r7ONjdm(Jk>5PB@9f0bV zK2x)7XlbT!e}(14a_L}CsqzYxbRv)A7x=bbk;^$A+olypxJ@8sw#tcS(u`0_dkO9I z6LI2zoMu!iEU>6gxA&DE4!$+Z$Pf1;VXxi9W7FXHmf`3%9N#Kw%g=%3r4xdtzw{&h zr2}VjKa`w(XNA9`n8Bh~Ip@^>&P}=RoaJ|4lY7OgdjNyzHSO`L9}K+v>GGx{LE-pVNy8$F9+8^r)WT&`L$WzdV+ z5Q=__Uw@n8cPI|?>pN8|gw{s%wJ$+y{#rT7pLMook~4I++E>mHtIE#Sl!v!G0U%kg zHDki)L2~Y;=%ZLdfwsfygNQY_*Rs)4e(0w_WSys5rC2`w-yrNt!5AAZh8h_zR`svw zmC<5AMvFchEqYCKAl?5UzU?}?nYM(4Vl!?12Yo176N7bkQ0>)$S>zO=Z+qczv)<9J zOk4j+yMiz{r${z!iecP)mcSLd=BnUtXAI-M1m-Z9L3VQNP<_E<+JcrBlF#!J`i(VO zqB&w-ThGyx>W{?TgM3C4BuvSi0D_$sTfvg3qR;P(-C9$_ywQ_N+TH)Kn!7h~PS1`gv znSRB?{64PjrMRDBL`8095%$}rWO)iW41BFll-wCfAEp;bzfd*Fl{tck&^hP`6MQ#j zZT*`-IAEKXVZPnuIHCBSS|pTK@-lTku#yh#Nb$1E(-zh{1k&kS+@j+f{rNP zmMT(~DbtoJO0XpWLVm8)r7&DcT?)hLX?H0!uNwhX954WnIy7kFwD43_tiIl(Q|b21 zWif!;yHE;5fqu|&Or(S5qFl5U$HYoT)ujuJZ`hr<9LhOcH%O7FoBeCzatH)=IUJi9 z#Th%8?I-tECOLD|l1X;sjT||A^vHt~quQQQ*DI0t1U5FP21P>@-{smeYG&V7|Jj96 z*TdA_I)^fQCqA3;$_T}CFZ|aUA4SGMTYVI7p`{i;)y|wqEw}=4F`ugQMPo_N^HW4; z6>FL`HR03}6C9agPDYPm$OV0l>bB^t)pTX>achae-@A`N?jf z$++H?LJbC!!%%Z?%7w&6rjUza&yem@-QRMg1C+f(lLovfZ9sx@{?3GUm{GB4_kM!g zQ$Q0e^LKVk8{7=jKGlP*#yfEEOKG;#Uj^%m+c#uO-y&DaL*0bhCai!PYb}c zn_v&QtO<9eO=uSN*5`7wXWt@Npwlc!emW_(ZE!PO`?LUDTk5_ifjP*miRziZ9*~cy z(qNr$hk!QwA6m%hVE@B=L4QO4!?YK^r(Ot>3PS8)sq!@IALM%$^evW$CC5m)AkBgz zQsjeRcsmUb9Y`JuJ{{p9{c1jg+TS_I;>s=#Ajg*Ow2y!BZ*!E-uUmTOMQPoR8>{^5o70>LM9hCm;EYIaHz@Sg@^l{&8{?M*K>HTT3(R64|b@+uHQ zd%Nb7lTI@D8gN+Gd;$?6XPCF3UJ0hC*h}>eN5PO-7-Cr1JyHZEo0Hgr_F>;~><&f2 z=yMc|opBTl%YeBk7%oIQfSV{p%i(A@8%_5j6U6@_%(h(7cdIxmRv zc+sx0W5^o$&(6_X_6+E8ImMY2D=1(Y)@em6IX#P_Bdl;N_P2qUD}|w8g?rNS#0r-f zGm^jZmK}_(a?5R%TM>OzkAYnOgJ*2lQQfGwYIM$;WIzP}=+btWO@IRBbod8D&}}ub{UI&%DsXNlHu|Sv-8kkFbmJ*`{Ryz^;#{4D z4Lco}ky;VcLYitIZNPie28e?-WoBbU6pVJnCD4xL{7+MWL0@w0P~H3SG}*%y#Jg%N zYz8~SxKH&^tL;8!HP|A6{m)X^n?*L9r(k5GzD#Ibyz+f1%n7dfJ3GD&d4_MF7J+ZT z2j4NP|ML|5W>F5GFBs*BnhDC8iQbi^bF@dEduAjpWqz$|00FIS)?ie&J?MK1IwsUK)HT1$1smHn%%AgGRi1rWloh=~Gn$QSxhO03 zJ}J|hKbeGFnOL5@4VzjipIZ|0nHquxA+QRLuOFHogvDC84OFT55G`pEOK9E%o8}?- zO7c)JgL-Q;@CUtCnpi;Zs5OIf*RrIQmpKfO?vgS^*VET5gIw5Z9r6?C2ChuAro!}x zgB3KDCUf>wBwTNgcd~-;145Ujv<>m>94;dpSWD^4@HA|x%wAT^Y)Gz{y;?2frCE)2 zm6zvMYS4Ie0YzqF^$SeHF4}2g{VW7}w!q7qSi5G{+Bi;3PK8WRFt9JvC(u_(A%@v! zzCboneS>y+fxXDhtp5u{z!(~vSy$E01q;Z`8n}Es$DLW8v}#ylGmpIBm9aIv^e&avRe>^-@(BEAG8Ppkvi@Gli$H7;Vv@a*b4uT^ z7J@pLVjabLit{L*K=DM1CsCYFv4Nr^?7C<$-pQ*A>+dffj&@pc{6YjsuQ2ixuwoW4P&SdlWQPyb&@9Kw!E*?INI($ZLeY0T?# z)wQbU;y_8R2vi19^iYw5^AcCIHec$BaxV0{qJ5MDu4u<)*m^0WZP@elExis>l>wV=(rP1?TIiJMxPZR>DZ^)dQ=po&8S(E6&DiPAlr@Jm|5gL|5P3u8 z6%1-5=hmfrfVkUg=Y1A!^fGjAc3!%vSldAKN`3MMz@%wu%HB3w zV?8&bc_a|^YpJo6jh6(+7X+si%=dbBzR-!lRxxEcU zuOPKqx)>UAbJ~!E`1$LGG#CJgGQ|#n=pFjx^HV_QM)w9cqkEsKg{`Lh=vM{vEh)?( zH92NNZUxA=(X>$r8~micc54dg+>mQ|{ zR6?rC6w+F8>AZPEx^ziXjOTjN?>qGrB^I99oip7!9{jvBeYK6^n3Lv{J^VKMASe~l zR=G7_qkTk$pkDsO1QE2ez-v*4sQHj+hVT9V}F{BxKHmJ zw#>g_oRst-S-q*xezw$d8BDxByMSw{%{CW@OiifG^HRvTqeE{8f3yZ|mhm=)dauwA z3pAn0U0KWPB{ca#wLDKT)onf%L70HzQ(2bsr(Uu{Ue96ay;DTAQ@8$TY*o-KPN^v`1Po7)ePbW!V9Yi) zr`oIE3!^{)CzbpJ#rODVY&Z5vk zXr!g6jW`+y8eMcbTwVck2@8VSHJ3_05w1OIpOT_Y99;u-hE(z#6=U8gg@O6 z-(og%C>A2HD4&YV8kX8mC~b<|SM}=#kSY%J0JCTdc1M-qfXD#?02NH z(u`XFJLgRM&eO}x${`KR)63cm({y{ZkZAaw>FH+P;O}U6wD9(Z^5mJ#e@EzQ_???_ z{j~fB->YVGphXPO|%)kvz50I zO3-MdcuRC4#U_f)6zE*6#E3r0W30I6bG8~*(wBo=M8kOhZNqFe$$rN;Yt4|e)~p*` zI@lXsq_&Uf75;}1x?M-tR=ap?wO6&vR{MKUZ)`dVA-*ChL5L!&W-L3 zZbtV$RSR29_g?0FB89nGCm;+R=18GIsVWCV-x;+{P3Zzs*hFn zPL7REcaXHQ`v-R(o<=H}4fu{VPP(wbgE|?%!J(+xo!7gO$Cb<43k0J~BFa zVa{%z*8Q?%7yruU4{Km6<3YP~&nop<9dl6|#T zx64-hY*<5UZM6`0TWz(^qm7+otGx|yVFVqtul5Bg%+2bYZGOzRc}8N^-Yjw}j7V-S)>eB@3h3OBYj88B;!^?NYNlc@-+L37gRRz!tWLAl zT6peYTkUT8plho=sbGgYHBLssU#jUu%b8HHr;;mlVS({i&WqqRkTpKz5QO^dAmOQ^9$L+cCjFMAH>*N(<`*vR@3W8 zX=CS@UOx@E>Y7e6y?!Qzxmgu*n@q2Nl>!PYNDiy9=50vh&o+j+8FN(IO|PFz;YaXJ z4&!{*9fKwl9%+LW%G+c=p90@3Ww*)l^o11AgsS;#EYC>U?X*07F@?EV{m?PyYb^`b49PZkOQK zvXSx#uXmFb(b z+e*n`Wm?p+TmKbgbZtzjjp;wYM_9?#-w(J(azy>PDR}20`hioS2IzzCs?Ddo;2DvU4 zpt>SS$2UBq^ne#D$`Z*9jyA2jeoa;)eR^SCa|yi=G=E;5yiq;WhP;o@VmyXbyn+n( z3=(R%$CLs}m{2e$RWDI7>41fmb#Xiu)kJb5EewM7oFa)@ zJuBb~bOdJu)y;sbug=v(VsMRd6P8bTjm>RU(gAIKuv$6&-Zlige12n?^-H{%pU1&2 zZ%N^Y;FDvA>K(9w>edu^h&4G3JQi7vOvx7sH{$0AR#Wg6hqvmJ!zrMUX>u6Q8MXKZ zH=}Hy>IE&Q>|YVgw>5@YzsyS6X6o$<6B%; z;R!I17>9ZL@2fIdm$%P%mG>wj7jtI^>!lCVHB4QN^wM2j^z6Fm9|DO(M&h&tUo(!( zOyC{U(}hfzWa;`Zx$y}7P8pBu1#vOz$+MX4?g9Fmn_6#nUzP2ZNvW7p<`X9Z$ zT}M}6JCF3WP6%AQQ~S?DP_5M0LV0b~*S?7sc82-WX>6GOJb# z(AyOR$hipLk;0FNk{ml!?*M)6ucp8YjZNumVKoKoYd7kXkEMX>WDfdTgUcg9Lh-52 zTCVN6P~ZHu#xU!bSt;90y;} zf8C5nRGF;1rpLT01L~5}zTVO~EbtM;Wo)%(7n`he=bq>-JFl*6)*+uScUe9sx9*bC z+VGkaeR^K58M;jy+5F~xJ>j)pn`0Uow(e-#=IZ9^hRs#0B`tVG9jvc?CwZCyiqtLTGH*4M710dS8+rmsDZcC%t^oxaxGt)2SXC(*{P zz7~I?i&9i{mcI5Wx#0=5PSFo_^|kHP*Iq`~boI5TrmuY_NbDee?Q?SD5!r9bc&+Ik6t=dph+qW^bJQeXQtJv*Y|=YL&&ZC77Qsuu!? z+{2h_rM?!*YpcF?looc5zV-m%GE6&2Uwg1I%*|-E+ODrXl){gQk{ml!?*M)6;S~6J zuCJX)0Y!b19LDmD@er&xtegsS=59Az8t+}Rfy55d*S;Y)9+CaJ`q~47rO`RZLpY84TIbD} zm-^Z_LMvT;?O0{+ed?XsXP;J55t%Cegu~s*lhIovf+)6b)ehTV$H5&(Llr;rgD1=jn$q^pOPyOO@57 z$sUF3RJhI@AK>{AitJ8m?err29PROB*R!}|bp9=fuzIeubW&f*4MgZ_${Lu!YLlLZ zG$ozrNuzF*z18ygCevz9>BimbycM%A3)Z2i94C59 zhCr38rUa^t161}qKxMxJRDM~v!YTX@!q~2(`bR3nD@Z7`(h3=s;!@ggFExn%R}!J1 zkm<4BV`*uuzcjWKMt`>yDZRcpKwzgw^%ChB^`NvjDwp~}WdRCT`mJwK@}F=j<&5$I zNjW36j2@I)Mo&p6qu1tjGE&Is*BKQuQopde^0H;5Pc;r?&>cFeH>oIE2eU-WsW#*V z8Wh2|R0s2A;j*ndm{qipsj#GQtSfs)gy*KB@zEh$xY>*ALgvDy`rNsI3$%0egEP2L zaL@(#l-C&W7DlA$7tHGtm?L_71Y{?N$I~OaP*3!e;A1JK&9UH2q8+ROSf7pI<>mF| zb&@*QhoEW*CXldu0Twr!X^?c5m;W-0>+SlX*xVf#&;V#RrvU(3gZ;27VI546zeEr| z*jK9TVL^$B;znJBTt@uDa@tW=!|r&kTH3$KJ*Nl4ix0&urvU#;DQ^^I={XeLCjAqQTZ!l zeL`jj#fXY| zqFLAm4ueXDv>Xxu%jX1KLJl)djoc9WUn`wrdL1>*jvT>5=yU@8?mmBmDz&-=X#qs+ zy(Q&vQqNLqi6siq9M|tPf0!T#N^el-=X12Q zFRMjMY4AoWW&X>Q238mh35j*wOvP7VL&$F)9bWh&Re|G#;%N` zwkDCVljhtkqQRG}6g{8f7EX19qJ^v3TlJ0Iw9qnRM<@(N02ka?L;zr@Y2^ZXt?cc*;8-U=HjeQ5NvmfIp&21Ho=SC=5)Z9jq`I%f@hLDhv+M0BE zcWHnqr>i}PZWj_lLPVBER{gnXvG(9y83`>Y?ZF?@uCDf=RC+&%81N7ql6mZma_VF$ z=`gItk?uxo+>yA}HkV71aMId?kAuUG(jI&^NAO+k z!Mt=g_&i;-ShNQS$FHZs8^ws4&>paBNZNxJMYr5CK5;Nr9iRZ)uA}-%6=PwiLQUuh zJr|-!XtP3}qenEkSz(E{7if=yHY@1OVRwRJMd*cT^a$1S(Idj?X^$QewlC0D4yLUg zI;yv+kaG?`V5+2E2N&Ss%EjK&gk6C`UC9y!`oV!xWqV1!G+gZS4Nj=2x_(V|o%0!O zKrNLXtQ?B$+__07xor#F5GL?v2%O-wQ@oGD*C3!X1qv?zt(5nIT|42bcgiQ_)I2x#Nb08Zi z{2bg!Lrteez#HLG_PrEP5SAPUbly6I{AU7l#K7t}m5#YV2di}cH|P&?YaNviT*l;s zFIJ#)W(jaDH^|KtZ&dI3ldq_pL{F53zdhM$ZQE zhU+$0BkGX{ds++gjCbp?po}h{_%y|5ROI}OGdJzq`L!@#?VbAClC?r0%_)^vbX`&s z^hfZ#LiipoDFgIDAmRn3R9Z>9x=N)|X)TC=2TXHGK_oSYdZsEX%E*ofmV7lE6GAHTpav?ZusIg)^cw9Y274~~*;`R>A%9E#@YG>mlKhxzh0GA_t+^y47|qfE2KBIJbtB)PN@(Pyn%d(HFWucW>Z!v* zOd*{#q7Pk0C)HPXNlI;gcg-_JWYM!II%1yDi& zeyO{LQ$WE~av0EgtM0ljfjMF*b)34(7>)3>|&J}v25!MWfu#GXVCOHD7#Q;$kXelBbCDs9@^X0b_pF`mwP#bhTZYr{|@%>tl4%;?s7qfcQz8 zJ_l_V`yHh1dfvq7*zq5jn5;ZBMd1Z!+^(bg5f#!ZlZdHid!zO?DlN9sB`Mmwb~x&u znk-CVx1+K!g>+2H!W7amX=7v9;i!#`A?vlubYo+<((<&}=+QY?;CgY3>tD8~G3;Yh zqq7}X+&PIY>u6>C@ZQQp<71P{bXFZ3IWjTo^IWFfXY20hIVuYAYP?u*d@18kgXIxj zaMNfa`n&2SD&(jwKibN=xFUyIHTczPAs*c6NllZZ>uE|meX$;=WhbGXq7RDo#j?L7 z8xd0y`6{pr^>bBK8Fs-RJ7)Qm*R&049#R`AjRSjF^a{UQJbP_;<%zyhx!xy}6d(I~LQ<58<;mNW zCL&37#0qTK>#%K`tDCFZ0287E(K^RPxqfII;t98G<(}(@EU0hTdi_w?jFSA=mbFg1#M$_$qh|!)X^vC_+^3Tq+tt~8*>?CH zn!#`=W%<5vCG$Y~JyjqP1J7>FyBqC=3K>F_0ZZw#`~bT*my$nJm2~%}$ZKD3OgrJt z5+1@n(BG>2X3=KNn+JH|ls7M7U+B~~8`VrG!&MSyi4aqEcm3w-HW6KRJ`{G0!hjty zLO17zXncGA%21g05fmbxs@c;OF#tlIu1{0@428K26cM(pKsGhXutMmTG4$_rcv#c1 zT%_m-#&ONAvdRCMd zcSiBS6cwEnjQdRBhAPFM`cx)|iBz=MU~UX?O0=CEZeg~h3ix(tj_!^KlB(xA3h8*qlJRR71xe?SWsF@e} z>+XGvDxrI`4T~>y=Hjw9FTRk;TY2Pc?Z3m+lDj^qwW9 z2|aFl$zJ$Izp&RYe4}5uWcmvwI4JQLFI-YO_Hw<@lEj{+>4s29ayX154;;^_9%>@a zQUs5P4pQ7l@i>L2BHu@&!%*8t?Ew{&^kF)j_e9^~8WUq~Jnt!FD30i(Po$T0h^kge zlw2yhI!k}E`WzFB-@x3voni;YjTBet?LneO5C~%RGpJ!Mh+qiOehN0ah7^3N6vK^J z9efeV|I>ZJ7w?UvK2im`UeM`SbPbI#jTu?g6o#?>STS@)(7>jvAU`)>9v&c zyCd~lLR2X^JcKvvf=1kEh`@Dj5`nAi2wY)TwSP?_aPhEZ*2w|Y4hehXlVgWNsw@j| zKd3tjp?kDiPlfIooh66aVko$Iz508KdadWG9c|TXJuPe7obrad1Bz9!!1qwFB;@cc zK&LO}7-h?#o5BhurI>GL@Na7j|JiC)SlJ-UI2J)j(hSCj!#JQBP8u0b-4=#K5yQf5 z**iL#_QiE+TNo6{u|sv|qLnxnnxnRC6BkIvxe3Vq!6RYvdo8uX8iLAee4;v5$@=B` zq%A?^#SmS%Dlb~MkW^mJrt>;q<#jX7fJbRb$;f8*NmX9RNXIXUN7RkG&rj07n^iE> zLS}d^fqKj3V?%MCV@B9;&~tjk@%G3}g!j#?wzSD9;k(5MoQQw7fCU8Ul0?msY5Je@ z^h01P2o_TPt!9n96Qxg%$MAe;=bhB2 z>}i8;%k?7nwm%frJ-{6gO8`uUrXhZtF4Qwggy~!3Qp?n9rJ*=}kEymt>(m#pPObNh zl|p4!n_9$LogurmtRE+O%5}9vRtlB9$n|%guJ6=)?=&L_*5hPgUw2Pz_Bcxym0E1< z(4Nba-?j{8tfLAmxmcPd{_aj|j^>6cbjomLdU-IM6kWEtmTpg^#Vnu_EAkTFRmfh;HS+{S+QC zuTd?>;`Z?aN}yr`Q&qBlMzMp%aE@x7=Lr?j%@oh4xP@XKt96ioFQd^H3D-fbBLz-A zBo%5Mg`%=rXE|z}XjspT=yv~uvbO7}c6B*SRf>>TUzZd8q9*q(!d$%2^C=kgM9{)O zKj?5?Mh{!`I_)|fh@q`IoD*5w=A^@M2Y3mN98<`2{3dj@j3K=-|(Y=+u#VKQc4h$gc zoU$^;ZM;)2YO6BlztbK|1dCi5^Cl3%nx(^(F~0)b@sL~?$`~}f4C+)7gO_luh~avo zgkb>9riAezn^_6NErm#Z_o^9hr?+%;l`y9e+UfT|Vh1T<-klqwNRY)SVNBOBv>GK$ zMAG)0*2P0)erR3ZlXdJ#YF$`aMJ!b;?^|x3&srD6+#jaV7pb3`)4EKemX;1}=lJ2H zhsJh}jWlqn`L=Ew3obS1lEnY*)@LjjnNJoxG;D$T_K$dZ02hdS!I2DzRcl6lWdg%vxKdyx4DMA6)@h0Q9cbv~4h8{)^41#d)`PnUbn3xveBs&+m>`z)lY zbt`*P8>q&4=v+jGU;|A$i=W(21sgbDsqcOkaP@btsvXEGry2Aqud#H}Le1c-^o`G@ zFoU_|@W^R^TvNEwT&Ujc^C_SV?c^|^GwvocxZED}H-(K)CZjJjhWTtk6ezc`j}x%A zRd4pi6#ia;n;gbDv(b!xO-3}2v`cUH*D3G`RrA-tH=t~Ie8Ftvqx$6Eq<|(=&0hmL zBV`-hjIw>I2V2fIKBjMcDS!(>`9kSuz_ynQwgLM}&wR3X>3vs5$x{eZQ$n`?j=sN|=H$V&R zo|4}YUSUMwIu%-5*)oN7*ZQ2vCLa5z;pEcDKTY;?{L`+C{nPkv?4M>SGh?4Vy+S_+ z3nJoBV5#(A%^t<*Hj!!}2*W|`pFkKr%s!j*>pH1jq960r)BGkyH~UUK?q;`DSNbn$ zI~V;X#qXu4sNJO4tFH1mIW6fKnfPEE3R%B#b9H0lWtB0H)ZZ%SGm;I6NRJmX4A_@b zUZ-IYRw-$QMnq#S86&+CJYvpq7cMs}N3&Fw=A`)2gj+I~H~k>&^Y2MCH5F6&N1>JVbl4x|R~B6JS)0xJ=r0fqE*GFdo4u+(??afT zw_K+5mP@=vv~O^=d*Oq2^?psCW zNHcDm8adYD>+nr-rPXmZp1|_0H0COTjIu1cp@G zsz|jhrM4kp)Q2Tq0uWLa+pe~49%Y|rSc2)%C;(% zuclo=T$__p*~mwQWY$weH+cImdp;q9X970;FgG%a3^v^uIA&2KWY~$O1?jN$$-y`@Iqp^^9)qV}OCk#2=}|dcMLK6vno0cty(j92pkVR{n5s@_ zujUCQ!rkDLzz}&Qg5?9S3U-azU8;nYTsK10Lta+y1#u0O1@K6t4g8sIxH23BK8Uvs zEB*&^1E*IY09wk&pRbeJh5AXY_v7aBVwocJC&WpUwj>|C>C2pE3-n#w2Rve@?- zIj)VCXk_OcZ+t35v|Itq^7_Htkmj4}S$kNVVo0SILBS}SLc8D%(Jh?nXnDKf3NNKy zC$YRX^5L|FEw7zM^I^aRHyuJc{X)^p5||@;d*QC6Y289vNxz(a>-?4UPtXk5RVj0woZ6feKFU}RC?kAr=s${xbg$f6m3CDNC9#SN)>89!>;*=d<&xQfm2f_rw{c*Cw6#=ozB23UckSv75ofAF>4 zyMx_u_x16rT>~uIsJlXhXsng?RGB$MPy~N1gvYY5ezA;7efzGS#YI$e3g;{zjS(6sSopOiWpWTf1RvYV8_a%uns#qsU0tnza$Z z_tGTrib5cX6FQu;Sm!$8`Z@wj*wromSUz^~6b~}vfsdK%g$3M;bs9p(f~x2~PWMvW zPcfn*r?3?EOOciWrDz`o#??Y_xL+T7GdRrj88Xkz_t4Gn$Pqkb$w3$F&`~AMTJ6I8 z$#h^}aPlFRa?c*^I|(k88XY2#B@V)TdvY7p;rVZ^s979&u90<6Iz(6 zvSs@Kz3#4mTo6TrI?d%tDz=+P1i@n>19JQi>1Df)>i<>|uge?k=8AWADgD<{Z|iDunI6)A$@Z;fy4L-& zdWYRyvUh7~t}C*42gA3F-LiV?ij3W&Jl&}>-PRSZq}#f}>1p5A6&Ckkg@d`l18lD< zOebQWs^s=|1%@i&Qm`HzC{?a1$(+tbML(BUN(fx8Qkl-e4ZD(EV2Wn08w9W7svqi1**G#C~g!3~35VyAzv z806bly~Cf;&XdqP(4_@$`oIkLLBNFuEGj_JDOKY+lv1_mGoe5|6^JUQvNH6#-f~hZ z>+2s%VFr}s@JM8IXvk>JnF1|CrC{W))+axl0-7MUIpi8#ZVi&8Pj%LEd23!fX)rM z1~)^lPm4e>$ci7 zcxIxosCsGdJafr=u{1sDU9f9qE6ts{Yu=@~`vm9@izd?B;YYr{j*H753--XF5qY$a zOC~~L%U;1;?~O$yv2dxqdG_Zf0^~HkHzS&CldE>_yn8Y2uaj$R!eXm3$v z3P-)hN$NZ;p_zld+O`pHs^N%oG7o(CP~E3O%8rDlm7E|6&x~&j3Z?Jrhp^<$X2gs~f||A|VA+QEZF(#$ zblHZt-f1&KIlb>tX*Q5agyl}xMSmYu&>>x2^h>qLP+Q36zMrf|eu9m4(TLh+0uQOe zK$qfcwjE{hZC2)2hLfj!Dxp=Q8&z=nHiWKH<-z1OpT@WK)nwfBz|2OXl~3b>D}u^v z)KEFy0ZM-EX!-J%J!^v*I#-m>)!5`1ftNz3fNXTWL6v}~Z137l=6se<5ldwfkz!5= z6V@ea_SsaTL0hxotoAL3c}w4cq4+sz(ubxh-j`YN{`%xL3BKVAYbF1pc-QUf$-Pey0gzb2F}YQ{3(#_w72WkcEaJZg=N;Q=3W&28wHz z{;%C_eMQ7;dsJX(7hBlZrd@0@1jq!SQp$VDzl@OGMXv)0hLnlUZ$qS4MHVXA#CwZhskq_4$^7uAfXrp?fN z(3E=G{D+$hQ%^gYFN7cS#w${oA%WyD>Sr`y8S;#3iejV&AL5U}zcK~CS(Gya6wGS| zgxQ$aRIf?_PH@fN+3{_#Gkp7054N1y0DH`As#mA5H;Zg$+BP8@iQs7|z;i>k!OoEF z(;|@#?AIi)ht1Z^?F-jMoYvXv>H0W%A7r;}>*ng_>h(ibfo#}%{ZNqOGOd;EIJuMTUmZ$-EHQV2;!Iy zO{L9;05m3Sqoe6<4FPyd+RjBE0&rJ~iiRNo367QT@=A*>BOXGGmSLNK;Y^8%1CuZe zDY0bKaHDKaFgIct){d@7*S6?rwZs%*#JCIO24j9^#IX4yVMu01y^^Eqn=jz-Vlv|1 z^CQ`Am})BDuM4h?{*HTClj>JN17}S2o3KrmQNvaosL#{9G2kzbEvU;$NI|$svZp@@ z6<9rAx5g`iTs(jsLH!d4BsAYO-DK2a*7OWL9JsQ7b&<_t(2cowA;fd9zIGqPvq=@q zKJ_yFE*FF57koslvjAjh-b95!gvwi4UxW2g%YR!Z%JI$w!r7)f6PzJ7*gQmR$<73M zb=ao|`{%I@Q*y6on-V;SWt$T2(|4o26!%k%sK_baG`8EeAAwjA^}+4s_` zC>I255E>s*l(Z<_ywQT320@y(DXMOD7j#!O_uj_FfKk9CCebL7n9)R0Q6r*6CMF@t z7{{4!Vm{}K#_{z{CNnxD&KQkvVjhmn|M%bLb#L8zb=N(&s=6A^u6-W&oW1sA?X~}F z?X~<`RcY)D!*W@zEDE*_jg-g6>y};?n^=R1%f>WR-i?-W_&|qUa`) zT_mq2(UvQ1h9VgvxrGF-7-=Xj=&dCzdQYZuo~J4ZI;rDVx6(;jFSTafirDAkD*Ieq z6OHQk!}_WJ!Tz`DSdQP!Fo#1+8P^1Nvj(Zd@GIl>>YX{vd#`G;5XOZX=mDCXxwACy zk$&hA9aR9mGox)z3LrOtW{(1KaIgzNS@(qpDK||rM-`CEtxAt)!J_(6ox+34y72Fv zy70`88VizNrd9$`=uU;FF(D{{Ovw9ELJ%yHV>`40H))Cuv}k}&QUkf_`ALvR?kC5P zE@D{QLLgQHnUMD%QyNyYdy?RA1OQz3E7=-d8%=P9~Jr8-ovjGQ+-vH!rt!SRvW!2^}*mIJ{_=jW(I z0F2<>biw-q`BH9>GUG8&nMXZH2bGyG7%65@yn8V8YsOSF^aBbm=;soatWDsLhl|a( zC}R+ON68g$YW$U~kuzv#fU;mkG@Ae#W2%Qpa9Sx6h*paBkyu}v?$(U1f2Z7b2LGM%2NE2E$je7?3?eJv zT$pFM1d)3X{DH_MSdKxI*A>|e>sxXU5>DkCM5pB zSth}#Tnx%GqEroGG;L8PMr*vh6S=tgI5qL&(QPjtUiJy))7`{+k+&Ghsqd`}jRcDX zhoa#|7zh*kN^!BE0Ct|BJ1~S{bn_j=nZDCeXy=tt5OyKtC-jT;lt(roXCX^H8t$>Hjr)^yk9&bqiy^@4VH z@f5Ul;eV7AK0sui8sRRXuDT=Jhk9Zp8)??#akse`JAbOkT(F zww2Ihk>@DK+tyMML9%Va+nBLE$}eah<+HXGv{=t{G*|;W@>oyZ7zBhU8}X^>R_DRI z4QcO?*@+CXV0eok(8ExAh9JE_re0ai1m}4aPY>srqv?I^7CsCX4A5fBaNeHkx#N|? zcBt@7Dsc)pk5O!H@N8uQOcw0ss`8f`jNrTx5oNS2G_<^Lz`!ht!clSodVMVuRt1z4 zgYN(-odu~3YfQ{0o0PQ;3-q99fdz&Dy01`q0_^4SwJ`9%bFyBM;eQOZS>t~!+9OuM zUP^jN79CK-N4JwKU?dRX{%Z)tXN%l`1g=||M*?H>t)6vqCveP)zTQ62z@BK2emAV2 z_#eG&n~vI_D~Zv-e0*Gu6CrW>UpSyJJ3AAE71IPmoi=HuXzBfFtX zZJ%v`17F0D2zvn9^D&kK*ha2%ztlGAkfT>tZjvn51Ib2S6 zA{p#RLRG5&g&+h^gohnsNUyTrctQ*bjF4?nxS~KqvKW%%z;G(_BAv>-C^#dOVUd0% zt0EZ?<*kZjQY0W_T9YCf5N)LSdzGZpH?xk1E}?ccbYaZOq}ai!cCBo@Adel$pP{v8kppYj^>;r6XpQVR_i{_Sbu zkzqj)C(6pw3fjm)kM3&qxWY0bV-3nABL`F@TgS! zeJWx#J*gQy>d`e1w0W_tf!CP{B@VQjn9l;TtNBFQH!`~#N}E>5(#ED9hy1{Hm$9kC z4EphB)L~0(icXYbK09@iH%qC`pUbBa1OPrZ=o!9tssXs+6>mmJ2ReS7BsXsx};>aF7F{0LML~Ugf+2e}+Lwgz{doUw!(@`Vf ziALM>kv-xl9|G1^&BZk5@M3odFiilOkTexcAwGn*1| zq3RFvico-aZdZcTtuj)#$`AW) zVg^JBydm&4VFMvH*Kpb)Nbjq3WAJVu zz3^XqEQk(5DnxgzR11@>I{Z>NRVD47+o$FL~2x}%Hr$$v}>N|hyt7e&(odoFHP%Rbc$ z+TNotQJKG!mU*_73!5sqa<}P|-%SgeR4jjIu3Q&4t6ZPzhi$Lii}a1}rDdLN<-*2` ztz7%+gi(Z9=lf}q=T^-wcvj6mEuflRdOXX8(*GbW{cJ1TpgIqGRxN_LSv>t=TF|*w zb0>8GKT66RF%K^~9e{<0MXUpOlIovX2e4Y5xj_f8*aEIoczNy!3XEQdGnN6qW#}w! zyF9f^Ta=I6j#KwZaobT(4Aw}?7A4ZS?Fcr;G0Sy1C?;q-l684}Xg?d{Bk_`CuofY8BhMjS%b03 zJ0DM8cV=&$eMlWHx|0NyF9J4;w()|CNM6g0Um#f&dMB2_Ut%HfJQY6Zou08auXjoy z{8$Io7we$bS_jpaSan}KQNMI-s`&EDR^9V%)eUahQX>Q>4b0Gw01$P+s{3+=T6?X! zwcWn6%AW`t4@3E5$XU%&AjG&$U%P?g>hD~2J}$26I@ha^O$!0cUzL`b`brLqHLG;t zE-#c49crjjrNsi~X7e|u1*N8v!v&pI)zeA*c4t!Nh@rIT@LP}nMU3CxMD@>%-=fqG z{X7N)4j;eO3lghtK!_CGWGGqU-kj#b)G`&Exj^vCVQMfeB9}4C)WF_mcZG0h+yjhe zy?9AsmgCU1j9HdS2dN0y#nhPPyjXa%&m@QT1$$ z=ChuY7*c6+oAY9{i2XL}q$vRBV4I%4P%f}BY z6Np^)T%h(DrIGfK>{n9AJU*-s{Thul4_4RPDEUPsivs;(5pfBmpQ$3x%Idl&LAabmCm*?>whEIle`C2Bx7ityu-?I+0jCQG27NjNE-_l6)fOfIU^s;Sn zpj|8=-T~={g?7DstX6M8yI_mkbkshpM1u@31CzXV;x6;Z;{W1yQ^w}0vrgr8C3V&r zPUqcO=j5HyXNXDx)g!_C<_XE3{x0w74+gEio7kh60RdRmS!UEHhQ|aKhMnqw-_bAX zfkN=zv-GuZ)9eAdneWiPuM-^|-G%@Cj5fJ(?jD{ka^eQy;llQM*3ojZxi;<%Kc?im zG*^VmC3pWSv?sav?5>TA|Ibqw|2PqKm%%D*y&W~HOCzUYY(Pb)0{)tmp^=e&p_hnD%-kz>5YDBqGsrpW9 z@bvR62#a3fUnr9MrVz{J-riDIt52@9bCuqbLel$t<7<<*t#U{gS2ruNL9ffUo>4oa zR@Mg0h-)OuIqHLPbv?(IotH*MkkzXwk)2x+GkDH~GZ6;+F^niM5x11p-s6_Nnx0-XityIhm~o?p2OTN-md|9zE%*2c6#J?moDCQP3i95 zmcP-gKrF6a=7)G1(ALxynrOPFm;!kI_ARCB!*^b!@3bn*qzQrj_wl%X$$jFlszxShPsG%49QK8_JEwHF+NOsVV%xA=SuK2`T9luNBkQ)NbBPAGmm65{ zF1>#2Dg6sP_tiR$*bf)oOfp1r3(2sO+;k)+UWzItqa=H{tE26exL4melS-to;E`;t zs&F8L&5`inT?HNhj0hDn5m9+Mw_2m*g_M`7JSR6$nfE9fWpz0Rl?lz88BAU2sFYo$ z+qz5DD@%2^>AhC5`TSYJ%`6&T}rh z>qy2Tc#AzW?o-u{jRYmKqaSl8O!dD+hbk;gI8e_8j0uKu5>aKCJk(Muth5~JSM#eT zhU$q-w9F8$gSUt{oo6RGYI#HSM##BRxlXxUd>Pasv_Rb)z#cb&m+MG9g>dqR^tnqZ zE~TA}Cr)c7|bQ23tXVVrYjr&3&^Hx zEbSbbO&4Nj$|)b#3Cn;09Obn`#`P8^76`98FjO5%fk2uh1_HUBpnwd}nNUCv;^|R9 z9w~%9-2@r5*fI*Z=ZJP)zLrX4x^962YOV>);MvLqbS+?_)v4X3Ns5Srhew|9VJJB( zG+*PrVJRVSrI`K#fcSbyO=oio#z{(K+aN>_3b<`Eg!rgNOAbQx_*xhc;=Ng~$RI?9 z+N`5Bv(ArLID7ROv5Jd+S#=%#3JH=Dv zN2~_Uhy1hk*#IW4TbTzF?elBRx}F$N>@}d+Q%A!hK=B$e)%{ajg?&v=snahbigQF1 zqXViquNmshU`$>6EA>j%=40kDhJ^gouc`&J{?f*-0EdSN!KZpbV6$!Huz#&@JeZU@V3F{|rHe&E z-BQ9L-%I1ofJGvOy9+*FnFcKKJg+(=Ds-W~54W_~9_@;aBJ-MhPr~Kf#>Uys8kF#K z(;Uuts!nsZ7nG{WB(8{w%3|*$R7<)PNnnQg0~WLS%iMovV9_SC4Lq$llfXx_$~Z!p z(ZqHfW_%2KmSM(H>2WF|$i{7l85#Hj8BaT-cG@}p&BK2I!Hl0wJI7Lj8P|)KHgJy= zZ^Z5>hc{-6w%GiTAv+WB=wUrQ@W>;DaBr}QPt%6Wz~hktkDsFw+XFmWJoOqO+y3fn z>Li^29=`;s>1T(Z5h9s^^(H`B7aSd9{oDW zn@Qe6@_FinwZ%dPB3TOjI^@p{h>YH<-wlg4|D&UA(^31HlA{}V9GP~HD*L!(>u6Xq zS*e?aAWj;_Nar^^vhLHqU&8_E)saeVxH@*AJ~lDF**`QO`8K-S{Yna%oB$;MjxG~~ zt`K0_eL*Fd4@*b-R!0HJe}KBf03>OLV)2$bLT3XYne?0QL4$726_hj$G-S}7`IOhV zYqbxAF(D10WJ1XIrzT`Jz@*29rXenZ-n5YI|G9l^*DN&5#&#X%Yx%7%(}O`lFNp`bu(`fN6nClaY0= zqByY|qhZYCXn8WBB>uS%PilYLcGO1x*;Gl0jWnWj&{ zDucwtcnFNN1yoz-S}J@ce6BdqvWE&>tunBW`RUqrF`RH#xQ&M}a(lq65k31#wP(cj z6HcqA0c!84_m+0#`bZ`(QQ9?~J%tJ`r9T*FeMVq+?KVgWj$h1ZdlWTnp_tDaoxxv+fXaLr1%1ARDKw zPpd+EOdG5wrycOrQ*$>itlZS$@wu^1Vd!FvEjM0|%3fFdEqtp+dQoqw`c%u}64^X- zw5I*l3W7fE{_3RjfkX|~5L+iPl@!Z7Nq3vE%!KnKhLrHIW<0ZYZvM47N#G^%+j5t1 zCZ%_oDVi6g*mY#~Xe+7gt{-U}4}PwYLBX)xMCUqPnW?s7ul4TYwOuDJZ_7S(=b`=k z&f8NNuT+QXm67v?C-xthI5<91J9wZ{-E!bg+q|7kg-(HsPRnOtpMlR*lq;kIV4nA! z9Czu_IUFXP0w2&xFdRMdZP{UCinx__@ppSu_uD+}BzgHW5r5Du(`tja_Nq4S?xv`N zVMuv2QRCCWq=N!|Fzd^~q#lo~U!!l4{I;mhO;V#=+e5NnNg>TKtPeep=75crx)Q*2 zW11V>%o=OfHk`jHilVoZJV5eWB<~=B)P)S4vgr6af;uADG&Y>w>sIuw)>iTEzKwk= z`?{ld>UUJ4|Iv}R=~(vUXX!xTs6_c}F{7$#Wsu2ynDY?vEuE}{LEmdn}VMNq2qpX z^shqt7}ebh?ELwJpn$EJp~Df9!=x=0Yz^v`61Eo6cr0cXx1YaVkO9jowZPVZx8{Vc znTasWV$3XKh-N2qHSJ^67l)GVrYaDmj@a9ui8UnFNi^&h=&^)xlM5b0qAwKqm-y-(j68LL)?F^vn`aZcBzmcdHjr$R;lMLZ~X zSI>s|rF|w&5$Ms|=sQp2YtmQ*)4BOZk?3w6MUmbGjh0{M<|vRRDb{>0`U`#Xy%d)g znWLjX7nlAWpb4My8jC4y>_(qanctU|StqmF3reRiwsO6P9*8a-m+wzYO|>P5C)jE2 zt6cW1rhQsKO`8DVxs-s15&{D7W5!NLPY#oEk1n(p@PlLLSp|5I`bj`|7Sur-6RkXN z4C;WysmGe*I9PDU?eWy^sIsdcC6%;T*x> z1GK@Eu}o8e{P}t`K&7e;T+YpDnbnX zonMB{-Gz;HOszhd_K?Qioygm+7Az)O&p;@Q4xJv`$6dVZM8ZhqgGI!)bhn>gy1vy5 z6c{GaIMD`!HTdN4#CW|jUf->%+u3iZXIHX0Hm$S}jBt90r!cO8MbW3J!(c&^+m*U_(XuH1Yr6aBPcE@oz72z?Gx(>axbC97H!`Is&Q z$S*S%+MA08v3I6u5QuO>Y-YUoXlI~OdAFDW&yj2;$)by{ z=I$1*J)tBg4~=ro#z~LyNTC)=U!d&(qB0AmV`QNetdx%Lnj=WXGBIM^6}?}5F?xu^ z!cZz9FDQ@-5&tbzz?q^yfKhnpu~JftGG{JRFDeA9aTo*Df&wzYn&=Vg+8YU72(QS2dp?Y zIaR0%#jnVRs?e|2JAQ>?YaHvh9S_vTYje>m6nTRyCzM8xcjiRmS8NjHsZl!o$SmM| zWmuQZE`9~eKRu~Hewl$&uilV_A2*D3s9G7VkJa~8+$NmmEO&F#N0py`kgoV4C56b_ z-ujuJ0Oo)5=LaC@N|DwnZ*<>+KFi;8L$7Fw4goA_kNG}jSKZAO%5vFN{us*gDX(#- zY9Ex90xT&3KbV?;<5V6EpIfK1ee!7N)Mig)VK#fF#QZQNhB-Gm%s7A#!e(&MBMvet zvZNU3v7^U1XA^zr0yL(pg` z2hZ_km)jI%!=flQTZasA$0joVDJ5gJO~4dUEKiEQs89YlEof5L{GGYJUEHj`eX1X} zn+-XR{68mU4$v;s(9(4P2z5)zrte>9ybW{}t>RD8E}&!lSNZPYWX;Bu?k;$hGGQXo zc%QdmB)q;nhFic0?Gk@6Zf_uHsv2+o`l2!i*HvQsLT^8PNmmk?GPiNtkQ! zW*THwsrm-L-s|GUG#6*o(i!T)GioR@vh)7X5*7*~Jsjj@ zG{r$|&d@2h3Gd6=#1DIsH$B+xm`3S0i!*v$MPmgVhPb3#C9Do7(?ZH^&lXAfu0H$C z!LBr8;mmp+-5k6YN@=0eQNKC(__QjTHwX8qdOS+!1rleBjN$3Q-@Z@J*-u- z+5%WNUmE}qpE-JRrpH}bs{ZemCGzXMi9LjgHCL5pS7AG++FKm9BPV-{X|q8?+9@|m zjWnqjnleG>YqSuL<%Hy^w+W}bGqK~`Y<`*cgTPInsaH0$&*=ewuW|INws)1A^o?iG zRbcUXo6gf6$9Z}4@Wi1zwPERQ~)<1?`Ws+AvxvHV?AOo z8;$Urk%)_&ubXBc5`UzEw;=~CWqk5J#Yk^b{( zq73O@DP7*NG0x8_U~n@I6=uB~`S&2=!z2%re1zl?63F)A)~m&ZjQ+E@`#K9EbTR?` z->@o={u5g!jW4s(dy{+n)>{iepODq@_K5>`HfR8tRJZAgEk0W6x3Is_Zyh;_HX%c9n=nHC|Af-9Z z)B8AmzmnVv(~mUW)lb_#37@5JT)|icU2L>|)zXD))K-s*CQpSvY3Xws|s>sp?+XS=vr&-ST) z*!G_NQI&Z(E%R(E7g42Pw((_sawIM2+$z__%_`TYMO5xrROU)j=Agbp23or6E2vva z>Z?&24+yn$>Z{fIczNI0_-&~I%iY%sEOLuNfpt6e*z0;+7rbInehzcSUs7h;O%Pgk zSwn@c38hs0FsSe;rMm^r2%6O*&d@AtG(9X`77w2vG*~^Q>OGc!dL}(62so#~VzCyx z+ORC@Y1`bG;zzz_1H@)Q{};q8V|Kod8q{m z4ssz2TY0HH*Qa42oSuedulDLZ7tP@gLF{>_wNMvo#(Jr=W;&{z`UU7W7K(D}DED1^ zAC&L#=5w*%J*v`;cxyrs-|TJzq8fRJ(A8#g8JFtG|)A9d;8((=qwm|m7eM7d@Wo0 zRMfzQ^r_wY&|{FAN_`ZJ6AEJv`@fVCpJ~w1U76F-T{+V>9oZ#G*n??}%NKCdo8Im> z1@FLbFU{Z`IQOQ>`n|u&>G%GoB(tKa{S0eoP+XvRPY(nShOwWDMVpbzn)Q2+io4`C z_Lu>H7Dd0$4gdDz~|v&J;5oMw?X6xB-VX>s|7e`=N5m-SLH)O?uTQRtd;3mlQ2*^6sy3+q=wI7()c#6|1~=-zCZ{#(iCxyC zY4=aJ)nO}3l~tvRr|%nO$I|1At1;)hPjAqcw-wH4@6JkRv^z&HS><%K6?W19hAZ0o zBMsf!@L-{sJB^*stIe_*q$w*s{&1B zKNl?AbVZ_t#EEc4a(J@2plkdj%jGi4ZF(6(yXt<2H)4c^`>8v(UwqNE7mYoof9{!Q z)C^f|jc@qcu3cfhzU#u+a7IHmI*m%Vq=}7BL>uJn;97yQu)spt;C1@YH)w-^#GYXL zw+;D;5SoDW#_qSK+Vy9w*eFwTc^&jQ_aGsrh7^!;vz`1SmI<({I~1&vr5;G zz*qUyU{E>pF>}?h>+RJ#jdqhDSVcFJ8zQ-dWLQbgCQUy)lr-=vtnfH))RF6dtz(w8LX4xFn1x+nw=j8|G0-0I@Cl zYPyX3%@bVf(`jyDU2Gp2RteUWRXMAb!JEwSI_Bs<=HIKiqdy>F!QvIbLWYu%+CKwp zIa3q|C}~|!9!lD#1gi`>v+HmeHCV4?-N$^;J@D685jYN?A}>=O6r?-3eUe?qV9C>nG^S#((> zoEsYCCB6n=;Jk$}UB>$}D6YP`Sh@}X#VJXdBU*du0u)f^A#!2=7(FAb1!I4>o+-=c z$uTZsRZjZb!RfUmth{De1zyi1IzrM$!}#aw8|AWSKpLIKlngqz>o9p@or$S7zsd<^ znN0gi-6+T}Eb#1e!PUI1O5JC1TVVJj68 zWZFd)Td16zg<^*NqT* znn(-l^kce{b-LE*-SHZ|yN@+`ca23z|8=#YJ(UZ}2kxxzoft1~y=JIdt5h#kVEFRR z^0~G0uJYEg@!j$Qv3qoEU**F7s}B%Sr*=VqS^soD{&!uiQm*f$Fiso|MHi{eIn>|W`*9T#18 z$?j_}x#3!=DfII@Yc{&592%=SN{XoF@Orjez z@dkWwzZ|tXnB&WXs5~8*gmS|6I2 zMKduhJ?`#PW}e1fHV(t+Y#4{hAhcH|d#Yo=1}pX4h&(~X8Lf&74o~bqFmZ5vq&%j- z)q45t{{GrV-HiLZG)(c3+?*S#Zdp(IXZ2?T)O~l>ZHX0l(9nI7mMgvujS0IKzI;r! z>_hwRDr?v=4WoU?hTVEOY*gqLUsuwj!!7!A%7hlx1aXR1{Zdp1o~rZPx=YpPz)*EX zyzxZ%<@hSB@$S{@M?f`rQJ6~10|T^Sjs?RWs_wbN9BE%=JXGxC^ldZ3_`cm_wHvna zRJMO;Z2VWtsvU+M(*1_6W4p~Kckkxj?%kWAAxinJyxdM4sK>0=L!2Mle|W84-Mw$( zHI=GatLB6)nv5gpibP<^yj=pb8l+wssnrf1s8l1Wst(h*!kzSzYIUL-{ZzlPf69P- z+4SL5Uh_4{rz)t?8Lbk{^@&8_Bas{+{F*-uex4|CeMvr~g3i zndHtVc@fErNz9ZkAondEc_+DVlKXe${)ps{NggNpZIb`PUFP8EuQ~mnBwr`_Tas^c zGW@XL9XalAk59IrOv0Z6SF9$qPyTj$hcG`g~4bM6#9SB_tP; zTt+fTat+DPlUzsgSw8tna-$^UBzs7zBnL^f*`*XoWGF?iBY7jqn@HYDLKsTNm5lKA z6Jh&nOEROi(I0Y3Zw~z$RvP_hlFyK!uEOP34sFVefCfot^a vpx(b-Pmk*PLH+tF9S3}uv+D+S>*|hy%D_FkbNNb1+I?&F-_ztlsq6m(l*T2} literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/index.py b/gr-adsbtx/docs/doxygen/doxyxml/generated/index.py new file mode 100644 index 0000000..7a70e14 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/generated/index.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python + +""" +Generated Mon Feb 9 19:08:05 2009 by generateDS.py. +""" + +from xml.dom import minidom + +import os +import sys +import compound + +import indexsuper as supermod + +class DoxygenTypeSub(supermod.DoxygenType): + def __init__(self, version=None, compound=None): + supermod.DoxygenType.__init__(self, version, compound) + + def find_compounds_and_members(self, details): + """ + Returns a list of all compounds and their members which match details + """ + + results = [] + for compound in self.compound: + members = compound.find_members(details) + if members: + results.append([compound, members]) + else: + if details.match(compound): + results.append([compound, []]) + + return results + +supermod.DoxygenType.subclass = DoxygenTypeSub +# end class DoxygenTypeSub + + +class CompoundTypeSub(supermod.CompoundType): + def __init__(self, kind=None, refid=None, name='', member=None): + supermod.CompoundType.__init__(self, kind, refid, name, member) + + def find_members(self, details): + """ + Returns a list of all members which match details + """ + + results = [] + + for member in self.member: + if details.match(member): + results.append(member) + + return results + +supermod.CompoundType.subclass = CompoundTypeSub +# end class CompoundTypeSub + + +class MemberTypeSub(supermod.MemberType): + + def __init__(self, kind=None, refid=None, name=''): + supermod.MemberType.__init__(self, kind, refid, name) + +supermod.MemberType.subclass = MemberTypeSub +# end class MemberTypeSub + + +def parse(inFilename): + + doc = minidom.parse(inFilename) + rootNode = doc.documentElement + rootObj = supermod.DoxygenType.factory() + rootObj.build(rootNode) + + return rootObj + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/index.pyc b/gr-adsbtx/docs/doxygen/doxyxml/generated/index.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5591c8c4cd0d8b728396fef16f05df037d8b725d GIT binary patch literal 3112 zcmcIm-ESL35TE<7lQ>S3LRv@>(!Btd5OF{PHC3sAe(+QyZK_sSLZ`EDot(K3bGxRo zlsq|q8BhE_yduGG=FWE728kCtH*>q$+1uIQ&TnSDzjj)G{_VUNQvLJr{tjaPfQpIw z&<@2er4D5-^<8qf?@{Ve-;*)4ed_y*c7ytjMY~D;4T^oz8#Lae5hgb1ifB&sbAO98 z9vKK*6gQ>4Ek+wlqc&+gFxnN!Sf;yp6Njl90y+>KRCMo;OOh%tD~diU@!|#YpW+Ghm+Z1EBOb%j)VDj11d}M zBrdX^y@k$`;(P}47qf{znGW7S@COW=07}CXfe#1*%_|@%ps|NOsgn-lJ!Hk=LmorS zJt#{wc4+L<1<{2=7cR{`fKS*I`_q%Dc9YGg6J2ISY3fosnK-yoL#5s+;w-Idu~tmQAc1NWjYLP3ua-6fSIP;Wng+mt|crKGitSwbwwaE zXBT34@usuuS?)cqga2(R^C<|GKSe`VR0e(VAS+fpM&?7qm6Rid<`09qU^UNX;Zqh? z2WY&@>MDHajd!@Mm>v=|*ZmW;t_2CN_bY-*BB<1(Bo<&9a*3}h)>g=_iPpTpby~Aq zoYG|D-=a~|`e;e(PlVRsDroI7cdv7`rt+IC!{bu&x<7bDp(^&u;cNVNr3v6N3s zaS~NbRWVsBaGO}=bBJL-z#x|yv|n>aabzmymb%S~5mh{6<&w14aG>vDmCqnGOYV4f z2fgQ#ZQkYxyCodqL{W1DwNA<%ZVFd&O{fc<;&d_O87z#pgvZBd+{lZ${~x4ayjJd1 zln{L61n*!}-DULw)C#UOAjOM5g4ChktK#T}lUsSQu0uP38xI$*G~FSmI(CnM05+%B@9t^=aIoIjV0}ZJToCKuHBV7gr80ItoIPZ6;A^v?Nem0H&GF z?bB5AOEQ;Ynn~RnMMGPZGnq4(CTT1vNjXUJXGy9hS+P03BPF9O3fn7C`*=a_{4^M^ z5M0AKeuQ -- Entering ipshell.\nHit Ctrl-D to exit') + +# +# Globals +# + +ExternalEncoding = 'ascii' + +# +# Support/utility functions. +# + +def showIndent(outfile, level): + for idx in range(level): + outfile.write(' ') + +def quote_xml(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + return s1 + +def quote_attrib(inStr): + s1 = (isinstance(inStr, basestring) and inStr or + '%s' % inStr) + s1 = s1.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + if '"' in s1: + if "'" in s1: + s1 = '"%s"' % s1.replace('"', """) + else: + s1 = "'%s'" % s1 + else: + s1 = '"%s"' % s1 + return s1 + +def quote_python(inStr): + s1 = inStr + if s1.find("'") == -1: + if s1.find('\n') == -1: + return "'%s'" % s1 + else: + return "'''%s'''" % s1 + else: + if s1.find('"') != -1: + s1 = s1.replace('"', '\\"') + if s1.find('\n') == -1: + return '"%s"' % s1 + else: + return '"""%s"""' % s1 + + +class MixedContainer: + # Constants for category: + CategoryNone = 0 + CategoryText = 1 + CategorySimple = 2 + CategoryComplex = 3 + # Constants for content_type: + TypeNone = 0 + TypeText = 1 + TypeString = 2 + TypeInteger = 3 + TypeFloat = 4 + TypeDecimal = 5 + TypeDouble = 6 + TypeBoolean = 7 + def __init__(self, category, content_type, name, value): + self.category = category + self.content_type = content_type + self.name = name + self.value = value + def getCategory(self): + return self.category + def getContenttype(self, content_type): + return self.content_type + def getValue(self): + return self.value + def getName(self): + return self.name + def export(self, outfile, level, name, namespace): + if self.category == MixedContainer.CategoryText: + outfile.write(self.value) + elif self.category == MixedContainer.CategorySimple: + self.exportSimple(outfile, level, name) + else: # category == MixedContainer.CategoryComplex + self.value.export(outfile, level, namespace,name) + def exportSimple(self, outfile, level, name): + if self.content_type == MixedContainer.TypeString: + outfile.write('<%s>%s' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeInteger or \ + self.content_type == MixedContainer.TypeBoolean: + outfile.write('<%s>%d' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeFloat or \ + self.content_type == MixedContainer.TypeDecimal: + outfile.write('<%s>%f' % (self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): + if self.category == MixedContainer.CategoryText: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + elif self.category == MixedContainer.CategorySimple: + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ + (self.category, self.content_type, self.name, self.value)) + else: # category == MixedContainer.CategoryComplex + showIndent(outfile, level) + outfile.write('MixedContainer(%d, %d, "%s",\n' % \ + (self.category, self.content_type, self.name,)) + self.value.exportLiteral(outfile, level + 1) + showIndent(outfile, level) + outfile.write(')\n') + + +class _MemberSpec(object): + def __init__(self, name='', data_type='', container=0): + self.name = name + self.data_type = data_type + self.container = container + def set_name(self, name): self.name = name + def get_name(self): return self.name + def set_data_type(self, data_type): self.data_type = data_type + def get_data_type(self): return self.data_type + def set_container(self, container): self.container = container + def get_container(self): return self.container + + +# +# Data representation classes. +# + +class DoxygenType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, version=None, compound=None): + self.version = version + if compound is None: + self.compound = [] + else: + self.compound = compound + def factory(*args_, **kwargs_): + if DoxygenType.subclass: + return DoxygenType.subclass(*args_, **kwargs_) + else: + return DoxygenType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_compound(self): return self.compound + def set_compound(self, compound): self.compound = compound + def add_compound(self, value): self.compound.append(value) + def insert_compound(self, index, value): self.compound[index] = value + def get_version(self): return self.version + def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): + outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): + for compound_ in self.compound: + compound_.export(outfile, level, namespace_, name_='compound') + def hasContent_(self): + if ( + self.compound is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.version is not None: + showIndent(outfile, level) + outfile.write('version = %s,\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('compound=[\n') + level += 1 + for compound in self.compound: + showIndent(outfile, level) + outfile.write('model_.compound(\n') + compound.exportLiteral(outfile, level, name_='compound') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('version'): + self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'compound': + obj_ = CompoundType.factory() + obj_.build(child_) + self.compound.append(obj_) +# end class DoxygenType + + +class CompoundType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None, member=None): + self.kind = kind + self.refid = refid + self.name = name + if member is None: + self.member = [] + else: + self.member = member + def factory(*args_, **kwargs_): + if CompoundType.subclass: + return CompoundType.subclass(*args_, **kwargs_) + else: + return CompoundType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_member(self): return self.member + def set_member(self, member): self.member = member + def add_member(self, value): self.member.append(value) + def insert_member(self, index, value): self.member[index] = value + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='CompoundType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'): + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'): + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + for member_ in self.member: + member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): + if ( + self.name is not None or + self.member is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='CompoundType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + showIndent(outfile, level) + outfile.write('member=[\n') + level += 1 + for member in self.member: + showIndent(outfile, level) + outfile.write('model_.member(\n') + member.exportLiteral(outfile, level, name_='member') + showIndent(outfile, level) + outfile.write('),\n') + level -= 1 + showIndent(outfile, level) + outfile.write('],\n') + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ + elif child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'member': + obj_ = MemberType.factory() + obj_.build(child_) + self.member.append(obj_) +# end class CompoundType + + +class MemberType(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, refid=None, name=None): + self.kind = kind + self.refid = refid + self.name = name + def factory(*args_, **kwargs_): + if MemberType.subclass: + return MemberType.subclass(*args_, **kwargs_) + else: + return MemberType(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''): + showIndent(outfile, level) + outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) + self.exportAttributes(outfile, level, namespace_, name_='MemberType') + if self.hasContent_(): + outfile.write('>\n') + self.exportChildren(outfile, level + 1, namespace_, name_) + showIndent(outfile, level) + outfile.write('\n' % (namespace_, name_)) + else: + outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'): + outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='MemberType'): + if self.name is not None: + showIndent(outfile, level) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): + if ( + self.name is not None + ): + return True + else: + return False + def exportLiteral(self, outfile, level, name_='MemberType'): + level += 1 + self.exportLiteralAttributes(outfile, level, name_) + if self.hasContent_(): + self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): + if self.kind is not None: + showIndent(outfile, level) + outfile.write('kind = "%s",\n' % (self.kind,)) + if self.refid is not None: + showIndent(outfile, level) + outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): + showIndent(outfile, level) + outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + def build(self, node_): + attrs = node_.attributes + self.buildAttributes(attrs) + for child_ in node_.childNodes: + nodeName_ = child_.nodeName.split(':')[-1] + self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): + if attrs.get('kind'): + self.kind = attrs.get('kind').value + if attrs.get('refid'): + self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): + if child_.nodeType == Node.ELEMENT_NODE and \ + nodeName_ == 'name': + name_ = '' + for text__content_ in child_.childNodes: + name_ += text__content_.nodeValue + self.name = name_ +# end class MemberType + + +USAGE_TEXT = """ +Usage: python .py [ -s ] +Options: + -s Use the SAX parser, not the minidom parser. +""" + +def usage(): + print USAGE_TEXT + sys.exit(1) + + +def parse(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygenindex", + namespacedef_='') + return rootObj + + +def parseString(inString): + doc = minidom.parseString(inString) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('\n') + rootObj.export(sys.stdout, 0, name_="doxygenindex", + namespacedef_='') + return rootObj + + +def parseLiteral(inFileName): + doc = minidom.parse(inFileName) + rootNode = doc.documentElement + rootObj = DoxygenType.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. + doc = None + sys.stdout.write('from index import *\n\n') + sys.stdout.write('rootObj = doxygenindex(\n') + rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex") + sys.stdout.write(')\n') + return rootObj + + +def main(): + args = sys.argv[1:] + if len(args) == 1: + parse(args[0]) + else: + usage() + + + + +if __name__ == '__main__': + main() + #import pdb + #pdb.run('main()') + diff --git a/gr-adsbtx/docs/doxygen/doxyxml/generated/indexsuper.pyc b/gr-adsbtx/docs/doxygen/doxyxml/generated/indexsuper.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63d6f1c5efb679b2201bbb0da3931c5f4b549ce4 GIT binary patch literal 27746 zcmeHQTaaAUbv?IxUfq%yJx2(HX$d5au#oJS7z{$77Z?l!+z|!_aJ$v_&ZwoH?or=6 zFmkNeM6w;@N1o+Sp;E5=xGHuk52x~zRH{;ypFEt$PkvmfN-C+!Pd@yS&p4H=wf23@ zXhw`M-2#_KI%m!~_jS(R`|Q2X+UMRW|M#ZqU;o%U({#ztFn&LZ6wP_g1^Dl|wsSLC z*>f|V+Doooa`R<3Gvqv;S9a}TH$UQLMyx)h`ly>3b>5}zx@6e3$J~6y%~YKCLTA4l z3ajoy)%C7AZ)O9o8gapgTrldcI(NmnmvCCgb1w#CZf2tkDr}zF;=+nrw`Q&A;Xm?)^dsZDqb-vYU1>JdT7(&BTHwdjZj@sj4C+s!iFo>oX7s8%j zMq3j}KPBWRkfKw_LUh@|W`_sD!Vh`ZhPk-6Tzo$z(kjs)Figly^3T-ed?rFk4qNphss3Bsj_ zbUD0mDc*$J&USnAjkq4gy;fav9%IWHRys~XN23YY^J(Zuv5Tc>pDz91i&Pj>kv ziBC@Te+0MSDai<0oj9Bed$*(IhKDeF6Nj)dAHrq7h0--}9u%Y~=q{dZ zhj+f45kxnU1qCTOi|G_LI(zaVOj45LF=X{Rr)9mad0MYyx-GU*^?A&%FynmoH!vet zZA2(osZX<53hq`sB;wOYyp1LMC)L@Anyprola->uq^ZoFE$yM+!fd|cxl^xhbC*l* z^Uj@zLR6D?ag%!?aL`^h)v2>;_)%5&?ln4dp-Qj!T5)Kqjl}9M#F~<9y9K)xbA#b+7URI%V&^Xm3PoxSw=- zNNor9MF+%SpT^d|jTvp0xajX@l@_4?>?5;`eT^8#|Ll{fM;vUCgB!h#-mcQ7QgxN- z^DUf;`I5{D`36uWm@9PbPKxYuhV#Y;Trec6Ph;|s`qQ0f((^{Bhj?+>_2$)xYGF>n zK_nwr-0@BFRd+I`PfQ0?S)4mlehs;N$%xahnvp-^)I4y2oH%g6&#cuedvm7ojJvpKRqV33>H-ZVb!y7AYDDgK-G40vYt)(zH+3m!Q7X0BR zPvs9Op}G1N3Yh~;*Vmlh5?3)$b~AEx;o`D=l#H3d1VVd-Jex&b5$Si~Vg; zqqSs1$b}ZwXp7UTMJ?E5m5FCb$#CvKCLo)26lB)wsFu}&Dr69c^D6o$-An2*i7X@*297C|j+%#jc3mqJjimp*rh5dx;bcPS%c!RQIyuPnPF*IU zO<#%ws-mgC)aImOzfC465???|+Vzyu@6>fkhUm_&_n+Eh*6UAGUf*~Z{iQe$QaUK4 zpz;(Z+Lc0gNvt$Um?6c9FvH%$ywm3FlZDVUKR-ls;&qM}P8!xpPpf4_3Ie(SZe1DY z?NaNIJ3pEpfRey5Lv+&lLSkyZ_;;fu6_JYZnQ$rgx9~W5Dh-?-nr_W6z#FDQ36~bS zy*O#02D65OBH7AYY-2{s$MmtNbfN`1dZ_eO#{G}t0bfGWQo_N9+lDkzYEnHikp&|E z7)01~NlyL;67mxxfSQq(Jr6st@@&~nh$BBJTsK0FM6VQDM#ZCFO7A4yz1E#@mb4jr zFs3_wr_fT-o$xdqg{n`bf|p#=C-gkK6L2_)(vX*2XQNnSEO17sOi7ightuaX-c ze~gGicJx%(Y|S^?T1)WgDJ|D0$yqaF%cZaX^7aut`RhoDE3?(xuXRJ7XOhRsBFXz7 z6mo9a#Q&7T3X>MULm!9&%EYl-WfSe1t*D|WCd zzd2WM{$X6~?_eg)CgIFv&_99$f)GuKJp~M)*J$5FY5D{@^jk>LZe-M6o8SrEmHsZV zcAK{ot&?2l_*~`rwCc+g`Bb+P+Q2nFBFd>EKiQ=sUy|R&>BCdAiVVQVc+2~keVp0- z%pPF&^UMx1yNB6_nH^$wFEgrLpL4$ScE@ zQu1%KvO%h({&YBhHtbC=gv};ypdadx6G+js$ifU**QyYUhpSjQNW`z=5Y#cibQA=l zuzM>9(_jdmsfO?4i8m(P>gY`%c-G}V zNxN)nG&6fe3gimr1rc6Yl@S+?x=>Ifp)J@YGz!;ZLC_WpLbq5D zz{P?vE*1oH0pNmlBDw{$2D}Spv;gD6vw(V86-XDV3dQq6U|%=~U>EQ-z+VP-0sc}j zp77Tebp}y(1>$EASl_0Woiu=S!mf?T(m%`W09{DuE{wU}9yh0{UoW{1yi56@DC20V z31|fsLU=9gMJ<@UAv9#}OcQ=icd-)~OwHAe|4C*~BFiPBpBC^h@T3gQ0tQ_Y+o>0}Ea_0Pi{YF~9EBQ&B zJV?BfzzgR>qDcQ9)ps)$)a_h$RK>0O>)f#KoC-KOVvB~t=yZeLW}7fG(QkwZfi^l>bKL3lRNIN)~{ zF036nRyA}sgF)iWILRQ7;|iQE#t6!jmggE#5*?@u0Jf7(o@=#(Uf9u2u@x|zkhl!r zt@|(FE^_j-wjiACzY!7u@sD_RL;%nMA-T=lg~%^|BbBlc8%054K%9cC)B2ul^Bnda zB#cG-5O?l@2?^6Z5Bm%Xo~3&dqjKui zJKbdflf$H5K21b4PLqq(WCKnaS50XM=7LF2P+5y_tI3t04O&QWYU>6dN!T9i{sl>) zbmwiheFHQv?6ru%-42u#q30JWuD6Y{GY#C+g7-Xm${-jG@(I-Cpq}Y1TJ-g?Mmw?{ zpM!~6`V6=KE0TF%o-OnI^mwXKiNx_>PhG+Mwx7$T!G$uL~fq54#uaIS)XJSG(^u)|Iv|RWT30oS0IoZ|sl^+x5fgvB_$# z#3G6S0~GxIYRa+9$V$-}8m_g0vgI5b&~mhi{{09kx)m8s<0fox+pOOc$hV*dMsm)w zkC`$!gNhl*Y+og)N&+w4uLC7)x;*4AgEeI`2o@7kZWePF7)p7EZIFXQo?fgKje+)< zJ*d4+wgJ70$JsV6c_VbC&o096$W?`0jV65&ZkLa=0PdhPm2PQ?7TT>?&WEndbVx>j zbDVHVT_rv)l10{C>lUj{?%01zqdusoKgM8_fOQ4D*z8RhKgbg%O{@>2kU1c4X_i>H)e8xzYqZaefTaM>WIP_aSg zQTZnmo5ZO0lN<*m?Fx47nczXh64e5E8`1U$Qya_aKs&4rt(n*mi}U$lEvQdFb^7Vk zQ)lW^&zw4K(X<3Y(rN6L-(@Cy{v2hzwG$u@wj2qlJka|8hSpW!W@W#%)ZBJiJQ_GxDH z0Ocq8^ps@3rRMyztWwz+rO0h{GZ!qC`h~g?6#FwI;c6BO93QC+Zv+yR|J%A@yfj{} zjBXsR3{^H&Mk-~Wi(U3wKU5VbkfJJR8v^u%6}nZJEOIC~yd9MNckU|o5x&>R>{UGU zoi-ry-6-I>fMS8(0*XZ$EdXQz*8-5WDqvYui?A%t0Wu5pwt&n6V&)Sus2wWcttY)2 zHG{XFcB@JR6+1airR0fJWw``pRCva9l8D4dyqGE$TY zC}c5}He6v6Q$Rb_b+q@FxX1B;TGnCqBTXk!jz*DT@<6*U>#F3MCk|${C|f~H(B|8o ztiLI7*GFLWnx~qRF_6h=W-jac-nF~FJtMV&7xZikBBFk?%f76AJ`7I4zE468gQ&Q1 z;I3>KWjsAik{GU1O!Y!&AnVs);vzdQF)mEAG?+1P_9bw*4dd=?7$~eu@TuPis88e^ zQt)Yo_GP^#^V1}vUstcF&$+y)iW~G*uUIrN>G=Mv0AV01qCJCGR>TqL z_F8bKMwNpgLPQCxNxZUhzj(c#p$gSu@$plE4 zx0jI2G}dm#*azjhKp**0pJiJE*#Iq#;E9JhOimS78utbo`7lgG>aWRxb558Wme0)> zcmS!dc1sA&>5LA5d{>WVETA7nX7|<|&EQqG-p7P)uR5VkKMwI*Ef(%hkjgS1Iu+5U zq7Fm=L@xkTwY-)hOk*OFDk;7vaB^YRNkQH`;+{oso+NP%5a3+8O*87+@W>Bw>S`g0 zHKNa=(`x4%63;M4xfo6vf!13yoRVx&D7+&EO^9vlYlTxROmguv7EUR_&mXmb;MN4*?e+>e}8VHzzDa)t_ZH$Lt)l7BjdxMJirk8{-51JTt=F z{nu86+CNJwnsHh?C_;cbbIl-yy|sfPnUluTrqZi4kCNC)Ig6c0Bbkeps}|};i=IdY z=|H~xAc^`<7%~j%GYv|Qd$&T*Xj?6v7%i&R# z?ei&sWfiT19={(46-PBJci43hg^>%K-|%hSu&lQY-=>#~_tLH)E@K-)^ic!q1saBm zJCCneXg`gUzrNu{1ZQzP& zkSJzvGvSpJr_D_}}LD?~qbM5YW+hC6<%6lEgxk(%B zIcg^AC24{?QNYB7K&%>~338yhj5BkUrzuwOS2LAo2XJnG{Z$G%zRQvaAu66&%Ym_{ zj8XmsJLs?C@|zc3d80V68`vh#Md%Ey3wy`)5^ zNmKTH2QJHgyqR@)y=0Yx#FJ%#`|lfl5s&&>*2|Qn{Qb^kUE$2BiVORz8t6~Okk-9^oZUP|_{2@ZB$xNi13kxAg z?>U4VWb2c4pu*Z!X6u+I(5nI}HOA_4EO=FTVu0v8QQx;oTCR)?^=+amGnr}18`$1X zkHq$veU_yLvOzj1FX4IL&W1DCUG48LZ1+2)0tL?f4BLG#zc4K>e~-jn72BnTVv8=ZU7FK68L-x%rLY@70wVHU!_em6vo%V0g+s8j}Xy%`i zT0cWG5#Pzb-Y)q0&vV69Kr?CV%Sf76+$S0gUSNQDZKAux6j1tsQ-~(ZieC$M9DKYA@IBjcTvfj@zeD)r(Zmy*Ega|k@iT3ORYE;HI@3b*I7HU7fHB( zBH2yd58Eq|tYi*3POp* zwA{9E0|sV)r9c5Gxtjfj>I3PAAAD#R);WMkW3>L;(Ijh@@bgtEhtXIY~K{$yv*jSHntTfB>mOdUU$Bx ziB`jBWAU-r+C9~(-aSl29lJS-({ZveSH)N%MEH~=dFgX^{|{x3S& zL|uPtNvaiw;V}NplF1Owt8{zzr-UNC4W~aoqK{uZA!$9`>p^D854@1B9hHXYs1JM4 z?BB)gZr+aCAFum`%~&4*GetvRL!n6j%Pf(=MruxO|2LVv#f&h2PHUC=r20kHqxQ#M zj*a56(S6AH!r*59tCYR%*cP|5HZp>j;>Hk6-MsD5i7gXnCfXBcMoRd6<|h2RrBdDK F{XckCdPM*L literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/doxyxml/text.py b/gr-adsbtx/docs/doxygen/doxyxml/text.py new file mode 100644 index 0000000..629edd1 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/doxyxml/text.py @@ -0,0 +1,56 @@ +# +# Copyright 2010 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# +""" +Utilities for extracting text from generated classes. +""" + +def is_string(txt): + if isinstance(txt, str): + return True + try: + if isinstance(txt, unicode): + return True + except NameError: + pass + return False + +def description(obj): + if obj is None: + return None + return description_bit(obj).strip() + +def description_bit(obj): + if hasattr(obj, 'content'): + contents = [description_bit(item) for item in obj.content] + result = ''.join(contents) + elif hasattr(obj, 'content_'): + contents = [description_bit(item) for item in obj.content_] + result = ''.join(contents) + elif hasattr(obj, 'value'): + result = description_bit(obj.value) + elif is_string(obj): + return obj + else: + raise StandardError('Expecting a string or something with content, content_ or value attribute') + # If this bit is a paragraph then add one some line breaks. + if hasattr(obj, 'name') and obj.name == 'para': + result += "\n\n" + return result diff --git a/gr-adsbtx/docs/doxygen/doxyxml/text.pyc b/gr-adsbtx/docs/doxygen/doxyxml/text.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed94e9e952a50ff02dcd14ca2ddc16748c0c6b62 GIT binary patch literal 1461 zcmb_c&2G~`5T3P@G&QaI!=b8FAq#{QBxsJkP=qK&;*x_?1VTXM+S|mbV~5=}O{G>s z6!9uN0;GB~zW-JS3C%u*|6>e$7a}Ek4XNle zfTGkB;8E%e@F}fPw^iruI}pJOm*ttuv<;6+6Y7aGapJO~A3EUh$dse7uM2Hrr_(UW zV{5hD4idhT2OHYMuumWy(HW$o3y;iAavpV0H|APjwD&~&vRI>@1ucmXXWNxEZ99mKZug7I#A#NxM`h~DGH>_IgF>HR)=taBLQGDP1o=-!d7D$S zJwBCNGuwk#NZ2N<+f}|pt*DyvA)9I$=rUIx)+ARU01|~#9XH9WGUy^}P+ce*dUS#L zEE62Hgxo0$?N%^LwM|Smc3D~U4l^ggvIpb8!Ip=^>kszE|99WP)N-9jEzN`E((sTV ze*#Pi0WvZn_NXsm4;4LqyH4kdsc?ohMejU%ze(p{sYgSf&X58{=ROr5=O(7?`7$?A zyVAs)t4!?9P1MATuW~EVus*+Ii(F%td-IJ!7t~5+`4fDcmZ~9#fAI3KN$CO|BO*-E zPfglkSFz7W*@TJ0&7>@xE*uOX5lx7hq9-N$5a*S)&!OI*jPygi7itu2*fGzX%Ji99xX#3v8%FmSY}1U z+=`gL5jU3O9bI%@8k5qoD7%bNR E0D#;^8~^|S literal 0 HcmV?d00001 diff --git a/gr-adsbtx/docs/doxygen/other/group_defs.dox b/gr-adsbtx/docs/doxygen/other/group_defs.dox new file mode 100644 index 0000000..722e637 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/other/group_defs.dox @@ -0,0 +1,7 @@ +/*! + * \defgroup block GNU Radio ADSBTX C++ Signal Processing Blocks + * \brief All C++ blocks that can be used from the ADSBTX GNU Radio + * module are listed here or in the subcategories below. + * + */ + diff --git a/gr-adsbtx/docs/doxygen/other/main_page.dox b/gr-adsbtx/docs/doxygen/other/main_page.dox new file mode 100644 index 0000000..39899e6 --- /dev/null +++ b/gr-adsbtx/docs/doxygen/other/main_page.dox @@ -0,0 +1,10 @@ +/*! \mainpage + +Welcome to the GNU Radio ADSBTX Block + +This is the intro page for the Doxygen manual generated for the ADSBTX +block (docs/doxygen/other/main_page.dox). Edit it to add more detailed +documentation about the new GNU Radio modules contained in this +project. + +*/ diff --git a/gr-adsbtx/docs/doxygen/swig_doc.py b/gr-adsbtx/docs/doxygen/swig_doc.py new file mode 100644 index 0000000..4e1ce2e --- /dev/null +++ b/gr-adsbtx/docs/doxygen/swig_doc.py @@ -0,0 +1,255 @@ +# +# Copyright 2010,2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# +""" +Creates the swig_doc.i SWIG interface file. +Execute using: python swig_doc.py xml_path outputfilename + +The file instructs SWIG to transfer the doxygen comments into the +python docstrings. + +""" + +import sys + +try: + from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base +except ImportError: + from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base + + +def py_name(name): + bits = name.split('_') + return '_'.join(bits[1:]) + +def make_name(name): + bits = name.split('_') + return bits[0] + '_make_' + '_'.join(bits[1:]) + + +class Block(object): + """ + Checks if doxyxml produced objects correspond to a gnuradio block. + """ + + @classmethod + def includes(cls, item): + if not isinstance(item, DoxyClass): + return False + # Check for a parsing error. + if item.error(): + return False + return item.has_member(make_name(item.name()), DoxyFriend) + + +def utoascii(text): + """ + Convert unicode text into ascii and escape quotes. + """ + if text is None: + return '' + out = text.encode('ascii', 'replace') + out = out.replace('"', '\\"') + return out + + +def combine_descriptions(obj): + """ + Combines the brief and detailed descriptions of an object together. + """ + description = [] + bd = obj.brief_description.strip() + dd = obj.detailed_description.strip() + if bd: + description.append(bd) + if dd: + description.append(dd) + return utoascii('\n\n'.join(description)).strip() + + +entry_templ = '%feature("docstring") {name} "{docstring}"' +def make_entry(obj, name=None, templ="{description}", description=None): + """ + Create a docstring entry for a swig interface file. + + obj - a doxyxml object from which documentation will be extracted. + name - the name of the C object (defaults to obj.name()) + templ - an optional template for the docstring containing only one + variable named 'description'. + description - if this optional variable is set then it's value is + used as the description instead of extracting it from obj. + """ + if name is None: + name=obj.name() + if "operator " in name: + return '' + if description is None: + description = combine_descriptions(obj) + docstring = templ.format(description=description) + if not docstring: + return '' + return entry_templ.format( + name=name, + docstring=docstring, + ) + + +def make_func_entry(func, name=None, description=None, params=None): + """ + Create a function docstring entry for a swig interface file. + + func - a doxyxml object from which documentation will be extracted. + name - the name of the C object (defaults to func.name()) + description - if this optional variable is set then it's value is + used as the description instead of extracting it from func. + params - a parameter list that overrides using func.params. + """ + if params is None: + params = func.params + params = [prm.declname for prm in params] + if params: + sig = "Params: (%s)" % ", ".join(params) + else: + sig = "Params: (NONE)" + templ = "{description}\n\n" + sig + return make_entry(func, name=name, templ=utoascii(templ), + description=description) + + +def make_class_entry(klass, description=None): + """ + Create a class docstring for a swig interface file. + """ + output = [] + output.append(make_entry(klass, description=description)) + for func in klass.in_category(DoxyFunction): + name = klass.name() + '::' + func.name() + output.append(make_func_entry(func, name=name)) + return "\n\n".join(output) + + +def make_block_entry(di, block): + """ + Create class and function docstrings of a gnuradio block for a + swig interface file. + """ + descriptions = [] + # Get the documentation associated with the class. + class_desc = combine_descriptions(block) + if class_desc: + descriptions.append(class_desc) + # Get the documentation associated with the make function + make_func = di.get_member(make_name(block.name()), DoxyFunction) + make_func_desc = combine_descriptions(make_func) + if make_func_desc: + descriptions.append(make_func_desc) + # Get the documentation associated with the file + try: + block_file = di.get_member(block.name() + ".h", DoxyFile) + file_desc = combine_descriptions(block_file) + if file_desc: + descriptions.append(file_desc) + except base.Base.NoSuchMember: + # Don't worry if we can't find a matching file. + pass + # And join them all together to make a super duper description. + super_description = "\n\n".join(descriptions) + # Associate the combined description with the class and + # the make function. + output = [] + output.append(make_class_entry(block, description=super_description)) + creator = block.get_member(block.name(), DoxyFunction) + output.append(make_func_entry(make_func, description=super_description, + params=creator.params)) + return "\n\n".join(output) + + +def make_swig_interface_file(di, swigdocfilename, custom_output=None): + + output = [""" +/* + * This file was automatically generated using swig_doc.py. + * + * Any changes to it will be lost next time it is regenerated. + */ +"""] + + if custom_output is not None: + output.append(custom_output) + + # Create docstrings for the blocks. + blocks = di.in_category(Block) + make_funcs = set([]) + for block in blocks: + try: + make_func = di.get_member(make_name(block.name()), DoxyFunction) + make_funcs.add(make_func.name()) + output.append(make_block_entry(di, block)) + except block.ParsingError: + print('Parsing error for block %s' % block.name()) + + # Create docstrings for functions + # Don't include the make functions since they have already been dealt with. + funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs] + for f in funcs: + try: + output.append(make_func_entry(f)) + except f.ParsingError: + print('Parsing error for function %s' % f.name()) + + # Create docstrings for classes + block_names = [block.name() for block in blocks] + klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names] + for k in klasses: + try: + output.append(make_class_entry(k)) + except k.ParsingError: + print('Parsing error for class %s' % k.name()) + + # Docstrings are not created for anything that is not a function or a class. + # If this excludes anything important please add it here. + + output = "\n\n".join(output) + + swig_doc = file(swigdocfilename, 'w') + swig_doc.write(output) + swig_doc.close() + +if __name__ == "__main__": + # Parse command line options and set up doxyxml. + err_msg = "Execute using: python swig_doc.py xml_path outputfilename" + if len(sys.argv) != 3: + raise StandardError(err_msg) + xml_path = sys.argv[1] + swigdocfilename = sys.argv[2] + di = DoxyIndex(xml_path) + + # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined! + # This is presumably a bug in SWIG. + #msg_q = di.get_member(u'gr_msg_queue', DoxyClass) + #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction) + #delete_head = msg_q.get_member(u'delete_head', DoxyFunction) + output = [] + #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail')) + #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head')) + custom_output = "\n\n".join(output) + + # Generate the docstrings interface file. + make_swig_interface_file(di, swigdocfilename, custom_output=custom_output) diff --git a/gr-adsbtx/docs/doxygen/swig_doc.pyc b/gr-adsbtx/docs/doxygen/swig_doc.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e7cd19b16f3c5b645a93818627ae1c90b56147e GIT binary patch literal 7519 zcmc&(-ESOM6~A|Pz4orxvEw9mk|xgJl4jEn8`=tJD3r$OM<_LATq$8iS&e6|cgNnH z+031-ZPgZ19T3k5@q*NM{sUe>NWAdezkvUNS1S1Z&YfLv+6q<30w;5QKhHh)obx;9 z-0Si`=IVd@r+zQ_~(s`}HaHLdzHsx_nZgXA?;F*UVUSJ^L>)|%9?y#CdeCt5v>D(W#9nN`u0 zR2nL(Dl?~cKxvviCrbUabxuVyGCZ%Mnp754R9CI@WIn=TlnwP5JYSGHn)p8_Jr|{Z zPCdpMFR5r=mM=-?f^;q`b6G{_)m}+uztPd6YF$z0sxn&bKBZ2;#|5=lR@v*?TvO%> zdp_1GQN@JlqPo($K5AS-V?~*=GS@k=bwhPX|sx&o{%pB}R!$6VfO@_Vl=2p(Syk$^kG64P(Vtla{vGq@=~z zV<6P#K@u9DBO-=3gPt`~Gg+2qGOruhcHi{3P3F&Go>LH+bJj@~!=4r6jC0dJTeXqf zI0<{h$k>-rx3mmZn%9+5Lszs|m_V~F2Hb87%k4HSe%Lc;(z@DoSg6mU9un-pQa6qK zOK6|6E44EKlz$Fn*YSykRm;`V7)g}X21mYw0!TuH*_%-rLI}J9ktIF?V1mBjIH*KK zj|xN5xM!=$BiUC`IFn104^5VP!z2#V$apzCoAZc)4dd7gU}(mM!N7QrhAG^*Gcg!b zp>vrga>~r02Tv}JY1LB2KU>`tXC>O*OcNszHVJp~uVKQehvL3D4+yt@D%3NOr>J+B zr@|-wowI=o^}mRJB3=5Zv2^KdI!Yuco)}_1L9-;zt3)ZYSyo4gvv)+wQU_ER{EiS} z-y8Kp6|NT&po0@+p$q>VnZg6Si!BRv^ta=r2$tK3B|D;l$m9V6N94hd!z>;!q*yQI zVBs0y4LUGOlR5o}`0I5Sx6XsQ&}vUC7nm~64f2Z^AMNnO6gu2sFhD@_S5b9tma=l1 z+TImROcJHmwjx2kK__Do8p;d(%whET|l8KRm7cnUDXTnS^DXlJ4il;nQ5+$ z!}{uN?<2bLV{i4Nap&=>rF;5l;xNY{kIEp#Zv&q_Kmk}4z*I<#swlfH%U~=VLl?G!$y6|-Q~>ucaJJ4; zB&+fQgo+|v@sQ1Ahu%(_p_d_=IDR6#l7)yQ-V3tMg%}rEyqzrVd;8ru?2_V;sZl^P z^Y-Ij&)YU00Gb71ZX#!4lpyFZsgo*A^xIP(hp^CG*mxcZk9W8OBWhd>CYLu#MD@dqm!?2_@cg z+)nghTLjp=8v{V2y~hO5i3rA%Bwn1~vLM|XBBiuX>^HR7C2$$-BxOcK6ChacjHE@D z&L)lX|7G^V zl-T_s$b!BVsYunpJ9)j>5^q87rAS1w7%aQnpC=35ngTbxRlO%f?s(0c_O?X1=RM1m zY23g0(b-~+XA-P&`?lC&hAz8^Le(39+BHCIsZ`Z<{ge(Fgc-RQ z#RY1Is4%fXBm!o@GQt&LAwlazYXCwY{7?W`;&ITv_rWWIHRM4ILd4oKkmef81f?Sc zK)GOsS&q=7b@c!M#4SP)hWi6qvEM@>w%{HG5QZe9o`BbX2w!ydqB_z0ojaldsUy>& zQ}Lf+L4Ao|jFWbVfYM2`L%+-sg53WW3%Vbd{gbUdX7xf@EWy1j_%CrHmz=E@nW~5u z<>XF{zeb;>bu)*#qL=hlecdULmku6H6!;4KlT#q_Z}>83h86GFeSwrTCHJEG~9qAUUqYAO02_SY|nDs){Q=Q1B?+BWvn7aWtyFiJ1T!EDXPr%R=Q3525hqT)Za)*;9 z)|jc_t@_bhszMU8@?8BVd9DULMnsq1dK)ty;(GvAu#{eaSOP063TOkc+#c9+56JzL zJxr@URmU}T07h$=y1}W3%f*3VJ%Y!AgU`SLs5lN@1E{sbpQB3-J_84zISwd;Map2z zA~$r{t8x8Uw^jl zJ?p*C8-+YT@8fC^;K>2`c^n2kTv+hl%xoXG26@^Xh#Bf?}Xh$8=OHi}%1?7qRrLg&B` z;0k|6&*pM1dKt?luSw#79x^+66c_&{#aTExUZ#XeAwRy* zix2)Icct@JGc`E7g)(nkbGoco^aW%P3wpWovb2|3FW=A;t*Lpug3f8-9PgdDUANno z68f+I0uxim^7n|CrZNhCKS90uGzyW5n_%WQ_?+@^t zEOr8jA_*$~P_Z^5!AW9nE`gKBaxqAMl_SrwSYshKX=aGd-XwIs&1Q?m5(;4r0BZMb zN7#iyi6I7A=R+~8Ej)WiK^8fFsQb|$rTq6ug*GX%&)pwH0_Zy9Jd*pq>K>}Ue+_kc z;6~6xpnDOTeX(4t)TWl^YqPa;RlRt1ajDU$eZBEg1J{E~xRcM}Ut_vjo-h9!G^@Z? literal 0 HcmV?d00001 diff --git a/gr-adsbtx/examples/README b/gr-adsbtx/examples/README new file mode 100644 index 0000000..c012bdf --- /dev/null +++ b/gr-adsbtx/examples/README @@ -0,0 +1,4 @@ +It is considered good practice to add examples in here to demonstrate the +functionality of your OOT module. Python scripts, GRC flow graphs or other +code can go here. + diff --git a/gr-adsbtx/grc/CMakeLists.txt b/gr-adsbtx/grc/CMakeLists.txt new file mode 100644 index 0000000..d776de7 --- /dev/null +++ b/gr-adsbtx/grc/CMakeLists.txt @@ -0,0 +1,22 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +install(FILES + DESTINATION share/gnuradio/grc/blocks +) diff --git a/gr-adsbtx/include/adsbtx/CMakeLists.txt b/gr-adsbtx/include/adsbtx/CMakeLists.txt new file mode 100644 index 0000000..3cbf09e --- /dev/null +++ b/gr-adsbtx/include/adsbtx/CMakeLists.txt @@ -0,0 +1,26 @@ +# Copyright 2011,2012 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Install public header files +######################################################################## +install(FILES + api.h + DESTINATION include/adsbtx +) diff --git a/gr-adsbtx/include/adsbtx/api.h b/gr-adsbtx/include/adsbtx/api.h new file mode 100644 index 0000000..081047a --- /dev/null +++ b/gr-adsbtx/include/adsbtx/api.h @@ -0,0 +1,33 @@ +/* + * Copyright 2011 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * GNU Radio is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * GNU Radio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Radio; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, + * Boston, MA 02110-1301, USA. + */ + +#ifndef INCLUDED_ADSBTX_API_H +#define INCLUDED_ADSBTX_API_H + +#include + +#ifdef gnuradio_adsbtx_EXPORTS +# define ADSBTX_API __GR_ATTR_EXPORT +#else +# define ADSBTX_API __GR_ATTR_IMPORT +#endif + +#endif /* INCLUDED_ADSBTX_API_H */ diff --git a/gr-adsbtx/lib/CMakeLists.txt b/gr-adsbtx/lib/CMakeLists.txt new file mode 100644 index 0000000..92f39e2 --- /dev/null +++ b/gr-adsbtx/lib/CMakeLists.txt @@ -0,0 +1,78 @@ +# Copyright 2011,2012 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Setup library +######################################################################## +include(GrPlatform) #define LIB_SUFFIX + +include_directories(${Boost_INCLUDE_DIR}) +link_directories(${Boost_LIBRARY_DIRS}) + +list(APPEND adsbtx_sources +) + +set(adsbtx_sources "${adsbtx_sources}" PARENT_SCOPE) +if(NOT adsbtx_sources) + MESSAGE(STATUS "No C++ sources... skipping lib/") + return() +endif(NOT adsbtx_sources) + +add_library(gnuradio-adsbtx SHARED ${adsbtx_sources}) +target_link_libraries(gnuradio-adsbtx ${Boost_LIBRARIES} ${GNURADIO_ALL_LIBRARIES}) +set_target_properties(gnuradio-adsbtx PROPERTIES DEFINE_SYMBOL "gnuradio_adsbtx_EXPORTS") + +if(APPLE) + set_target_properties(gnuradio-adsbtx PROPERTIES + INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib" + ) +endif(APPLE) + +######################################################################## +# Install built library files +######################################################################## +install(TARGETS gnuradio-adsbtx + LIBRARY DESTINATION lib${LIB_SUFFIX} # .so/.dylib file + ARCHIVE DESTINATION lib${LIB_SUFFIX} # .lib file + RUNTIME DESTINATION bin # .dll file +) + +######################################################################## +# Build and register unit test +######################################################################## +include(GrTest) + +include_directories(${CPPUNIT_INCLUDE_DIRS}) + +list(APPEND test_adsbtx_sources + ${CMAKE_CURRENT_SOURCE_DIR}/test_adsbtx.cc + ${CMAKE_CURRENT_SOURCE_DIR}/qa_adsbtx.cc +) + +add_executable(test-adsbtx ${test_adsbtx_sources}) + +target_link_libraries( + test-adsbtx + ${GNURADIO_RUNTIME_LIBRARIES} + ${Boost_LIBRARIES} + ${CPPUNIT_LIBRARIES} + gnuradio-adsbtx +) + +GR_ADD_TEST(test_adsbtx test-adsbtx) diff --git a/gr-adsbtx/lib/qa_adsbtx.cc b/gr-adsbtx/lib/qa_adsbtx.cc new file mode 100644 index 0000000..80e4ced --- /dev/null +++ b/gr-adsbtx/lib/qa_adsbtx.cc @@ -0,0 +1,36 @@ +/* + * Copyright 2012 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * GNU Radio is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * GNU Radio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Radio; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, + * Boston, MA 02110-1301, USA. + */ + +/* + * This class gathers together all the test cases for the gr-filter + * directory into a single test suite. As you create new test cases, + * add them here. + */ + +#include "qa_adsbtx.h" + +CppUnit::TestSuite * +qa_adsbtx::suite() +{ + CppUnit::TestSuite *s = new CppUnit::TestSuite("adsbtx"); + + return s; +} diff --git a/gr-adsbtx/lib/qa_adsbtx.h b/gr-adsbtx/lib/qa_adsbtx.h new file mode 100644 index 0000000..97ac3b4 --- /dev/null +++ b/gr-adsbtx/lib/qa_adsbtx.h @@ -0,0 +1,38 @@ +/* -*- c++ -*- */ +/* + * Copyright 2012 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * GNU Radio is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * GNU Radio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Radio; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, + * Boston, MA 02110-1301, USA. + */ + +#ifndef _QA_ADSBTX_H_ +#define _QA_ADSBTX_H_ + +#include +#include + +//! collect all the tests for the gr-filter directory + +class __GR_ATTR_EXPORT qa_adsbtx +{ + public: + //! return suite of tests for all of gr-filter directory + static CppUnit::TestSuite *suite(); +}; + +#endif /* _QA_ADSBTX_H_ */ diff --git a/gr-adsbtx/lib/test_adsbtx.cc b/gr-adsbtx/lib/test_adsbtx.cc new file mode 100644 index 0000000..0988876 --- /dev/null +++ b/gr-adsbtx/lib/test_adsbtx.cc @@ -0,0 +1,48 @@ +/* -*- c++ -*- */ +/* + * Copyright 2012 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * GNU Radio is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * GNU Radio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Radio; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, + * Boston, MA 02110-1301, USA. + */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include + +#include +#include "qa_adsbtx.h" +#include +#include + +int +main (int argc, char **argv) +{ + CppUnit::TextTestRunner runner; + std::ofstream xmlfile(get_unittest_path("adsbtx.xml").c_str()); + CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile); + + runner.addTest(qa_adsbtx::suite()); + runner.setOutputter(xmlout); + + bool was_successful = runner.run("", false); + + return was_successful ? 0 : 1; +} diff --git a/gr-adsbtx/python/CMakeLists.txt b/gr-adsbtx/python/CMakeLists.txt new file mode 100644 index 0000000..5d91bd5 --- /dev/null +++ b/gr-adsbtx/python/CMakeLists.txt @@ -0,0 +1,43 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Include python install macros +######################################################################## +include(GrPython) +if(NOT PYTHONINTERP_FOUND) + return() +endif() + +######################################################################## +# Install python sources +######################################################################## +GR_PYTHON_INSTALL( + FILES + __init__.py + DESTINATION ${GR_PYTHON_DIR}/adsbtx +) + +######################################################################## +# Handle the unit tests +######################################################################## +include(GrTest) + +set(GR_TEST_TARGET_DEPS gnuradio-adsbtx) +set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig) diff --git a/gr-adsbtx/python/__init__.py b/gr-adsbtx/python/__init__.py new file mode 100644 index 0000000..8a49ac1 --- /dev/null +++ b/gr-adsbtx/python/__init__.py @@ -0,0 +1,34 @@ +# +# Copyright 2008,2009 Free Software Foundation, Inc. +# +# This application is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# This application is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + +# The presence of this file turns this directory into a Python package + +''' +This is the GNU Radio ADSBTX module. Place your Python package +description here (python/__init__.py). +''' + +# import swig generated symbols into the adsbtx namespace +try: + # this might fail if the module is python-only + from adsbtx_swig import * +except ImportError: + pass + +# import any pure python here +# diff --git a/gr-adsbtx/python/__init__.pyc b/gr-adsbtx/python/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22b20cc07ada175ee6817c20dbc7a0277654d12 GIT binary patch literal 359 zcmZXPPfNrw5XC3$`iICac=MQ>RkXQ!5K(avFU#UqT)c$X#5Qa?Zg2^o_w2+&=1 literal 0 HcmV?d00001 diff --git a/gr-adsbtx/python/build_utils.py b/gr-adsbtx/python/build_utils.py new file mode 100644 index 0000000..cf58a97 --- /dev/null +++ b/gr-adsbtx/python/build_utils.py @@ -0,0 +1,226 @@ +# +# Copyright 2004,2009,2012 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +"""Misc utilities used at build time +""" + +import re, os, os.path +from build_utils_codes import * + + +# set srcdir to the directory that contains Makefile.am +try: + srcdir = os.environ['srcdir'] +except KeyError, e: + srcdir = "." +srcdir = srcdir + '/' + +# set do_makefile to either true or false dependeing on the environment +try: + if os.environ['do_makefile'] == '0': + do_makefile = False + else: + do_makefile = True +except KeyError, e: + do_makefile = False + +# set do_sources to either true or false dependeing on the environment +try: + if os.environ['do_sources'] == '0': + do_sources = False + else: + do_sources = True +except KeyError, e: + do_sources = True + +name_dict = {} + +def log_output_name (name): + (base, ext) = os.path.splitext (name) + ext = ext[1:] # drop the leading '.' + + entry = name_dict.setdefault (ext, []) + entry.append (name) + +def open_and_log_name (name, dir): + global do_sources + if do_sources: + f = open (name, dir) + else: + f = None + log_output_name (name) + return f + +def expand_template (d, template_filename, extra = ""): + '''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file + ''' + global do_sources + output_extension = extract_extension (template_filename) + template = open_src (template_filename, 'r') + output_name = d['NAME'] + extra + '.' + output_extension + log_output_name (output_name) + if do_sources: + output = open (output_name, 'w') + do_substitution (d, template, output) + output.close () + template.close () + +def output_glue (dirname): + output_makefile_fragment () + output_ifile_include (dirname) + +def output_makefile_fragment (): + global do_makefile + if not do_makefile: + return +# overwrite the source, which must be writable; this should have been +# checked for beforehand in the top-level Makefile.gen.gen . + f = open (os.path.join (os.environ.get('gendir', os.environ.get('srcdir', '.')), 'Makefile.gen'), 'w') + f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n') + output_subfrag (f, 'h') + output_subfrag (f, 'i') + output_subfrag (f, 'cc') + f.close () + +def output_ifile_include (dirname): + global do_sources + if do_sources: + f = open ('%s_generated.i' % (dirname,), 'w') + f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n') + files = name_dict.setdefault ('i', []) + files.sort () + f.write ('%{\n') + for file in files: + f.write ('#include <%s>\n' % (file[0:-1] + 'h',)) + f.write ('%}\n\n') + for file in files: + f.write ('%%include <%s>\n' % (file,)) + +def output_subfrag (f, ext): + files = name_dict.setdefault (ext, []) + files.sort () + f.write ("GENERATED_%s =" % (ext.upper ())) + for file in files: + f.write (" \\\n\t%s" % (file,)) + f.write ("\n\n") + +def extract_extension (template_name): + # template name is something like: GrFIRfilterXXX.h.t + # we return everything between the penultimate . and .t + mo = re.search (r'\.([a-z]+)\.t$', template_name) + if not mo: + raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,) + return mo.group (1) + +def open_src (name, mode): + global srcdir + return open (os.path.join (srcdir, name), mode) + +def do_substitution (d, in_file, out_file): + def repl (match_obj): + key = match_obj.group (1) + # print key + return d[key] + + inp = in_file.read () + out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp) + out_file.write (out) + + + +copyright = '''/* -*- c++ -*- */ +/* + * Copyright 2003,2004 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * GNU Radio is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * GNU Radio is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU Radio; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, + * Boston, MA 02110-1301, USA. + */ +''' + +def is_complex (code3): + if i_code (code3) == 'c' or o_code (code3) == 'c': + return '1' + else: + return '0' + + +def standard_dict (name, code3, package='gr'): + d = {} + d['NAME'] = name + d['NAME_IMPL'] = name+'_impl' + d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) + d['GUARD_NAME_IMPL'] = 'INCLUDED_%s_%s_IMPL_H' % (package.upper(), name.upper()) + d['BASE_NAME'] = re.sub ('^' + package + '_', '', name) + d['SPTR_NAME'] = '%s_sptr' % name + d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' + d['COPYRIGHT'] = copyright + d['TYPE'] = i_type (code3) + d['I_TYPE'] = i_type (code3) + d['O_TYPE'] = o_type (code3) + d['TAP_TYPE'] = tap_type (code3) + d['IS_COMPLEX'] = is_complex (code3) + return d + + +def standard_dict2 (name, code3, package): + d = {} + d['NAME'] = name + d['BASE_NAME'] = name + d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) + d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' + d['COPYRIGHT'] = copyright + d['TYPE'] = i_type (code3) + d['I_TYPE'] = i_type (code3) + d['O_TYPE'] = o_type (code3) + d['TAP_TYPE'] = tap_type (code3) + d['IS_COMPLEX'] = is_complex (code3) + return d + +def standard_impl_dict2 (name, code3, package): + d = {} + d['NAME'] = name + d['IMPL_NAME'] = name + d['BASE_NAME'] = name.rstrip("impl").rstrip("_") + d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) + d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' + d['COPYRIGHT'] = copyright + d['FIR_TYPE'] = "fir_filter_" + code3 + d['CFIR_TYPE'] = "fir_filter_" + code3[0:2] + 'c' + d['TYPE'] = i_type (code3) + d['I_TYPE'] = i_type (code3) + d['O_TYPE'] = o_type (code3) + d['TAP_TYPE'] = tap_type (code3) + d['IS_COMPLEX'] = is_complex (code3) + return d diff --git a/gr-adsbtx/python/build_utils.pyc b/gr-adsbtx/python/build_utils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba1dab0238cb08a1d8cee4fcde6deab64e3560c9 GIT binary patch literal 7834 zcmc&(O?MO58NMU=OM@|BK8y)%E&&GxY-AwmM{v^!V}nmYxFb2lgk&<-jO{V9Mx8qX z+`y(u+cZC*KcMUEy6B?op3_Bl{SDoA)6-qgX`ko4k}T)obb(K6d9Lo~d++Opf-4MfB0v8&QgmGf{!IpLtfK+{nlWfvb>o|5LI3ej^~D4$W`q;$Tf!ZT7iOQ~0juTdFqptu*Cx^8E2BhKQ;=(dSM9b|f~ z9XCRq#oLh^{~LdmA{3`G7MCDiEKciToJz+PDNETQbccz*9XyWK<3=RCvzcpqP0~*5 zk*Sly0G>Rac|2wTMW)o2qqcJDxl%yLqnvu-s26Ao99~B)%d4#cwl^>X!+G@_J%gPE z4j@%6=0K?ubAuvJn6aIhjJvghY%?1{#k9a^^eoE?Xf%WE$PeRsCWkap7DnqqyO9Zl zL8}!t!y;D;2~L*ET7a26KFJW+Q8P<-il`_l%nj5^ZIhPFW{^gujb=Ly!Z;~yCt;Q( zjnYOs(~O>?Rch^In@O`I!uF{Ia|K;Tu)dLO_(?l!wKJa^{{UTv=U2{Y$IXp9bu!08 z^yD~=2IQEYrj{Vb=Z**}N0dT5R)$u-4&+!Bg;s<=PsBw|6YwldR+47q9Yar%8+eCF zoVBjb*AI|iZo`fJpc(pPoE-lcorc&^xglp9kL!RtrRqGzFdiOv9*_A91%}8kK$92t z#ibA@zzhzZQr zApWLwcN%axRl%C6;t;pvCs9)enuv;%W{~daTN*r}tF~}&`EI$k;4d!SU8t1rEllg^ zS&L&?wB2e1S)}7;mT0STP4jUz{=|k-s}Zd7sCDUkS}9Tp8D$3?ra?XP@f|e{b{3?I zL}8_(MbWS~&J{!#tkksEOcrMV9p)3o^+sYMOOz%Q3hD0Ye4a=wKmgATv!j^Rf#}|y zyNkpj^nqB+%C-*>J>TuF^Z~l53Y;40^vJ<<5G3DgP&3dC9M%HCgO|TjY?*O@`uk9RY zqtT8&K-177lr!wqYYJ{M2Pe5d0pF#jhKeU_jXX7}d2%Cr_gEX&PrR z4EMY%ma{2k3}%7(dXcB`C^@kXk0SF%P>9WKC9q0%Ma?I13PZ`Gz7bisPr6YyhMtZh zU{b_{_g!>}?mc4b;FRuN1UY0*%Fy)a7YFilz_k4cvdIXYxoG{5s6gxKALufmU+7`o zQK#SysY|pao1?pFbv=NcuIn>B{9Tk90C_3>Reb* z6WoSGf3~U~bQZg^%d;uF_)Op8LtNZj@mnNQfD!GFXs8$pc*^*@dX6nB?x4R^a!aLG z#RZ6XGazQD+^Nr8+3|eGhV|T(dCx@(rc6y+SCizHFQ2xQ+wu0JlOTO-Zf0| z1ZA{Yft5+p%m&`KI4B;7ewEI{<}0inAaFK-tL)ngl?4y6 z@|Hhk^p6bTtRK0BDPwp<*DX?1yAt)@W|LBuFxE!emRjLbt{yeyy~E;rC=O7j*Fmn0 zN1T=tX-+_rr)`MgQHdv!B(pawNo$2Y^D~#}bYYMk)BFXpHOXWsSP@QDSh0e6bfx%F zF!S?|FTeHZN_N3af$CDTo}_71&l)>=JxRBbymy13WKsI!l(}eAVhUKCUK-g1J_^$M zrewY!1PCDuX$n|Uwi{{EZb>$sN#57VP=ugR*iH^mj=YH=uyoSf{5V4Oahu+D5h*e} z%j4r;91g_8giSmZzzKPFqqSgzp?}wtdnGpIT0n|6m!&k2fghxi@r1v`BQRAtNmL15 zZbc}VGKyb|4+ac=1+H<40Xvd=AW^lf5e(>uJ>?bhRJd9&+euYJdPS0|h_fd*e2(KW z6DS}MPy=!JUikMf1xk~9E=Ce-c|o!I-z%e^3228nIHPV_4|T$@sVj1gq|3V$gaI6CT^W8k0xO`Zc1Wm z^Icq(=r%oySy+jiE%`=xws;lhx%gFC^1cHizagO!6kum?)H&&xcX8CxB|UR#M%OQ2 zmhVf5P?ubNNzW&(oiyIq%=ER{+3V9N-qwq06zOWRo;}4qQ7JzBje4al%yI~ z5U=lWC-mV2QL3}e2yP_ZHr$F0lD{p9Rzokh*BWtM-;L|IY(zRRy2T!|3E#6Obhu(a z)^6x1#(1icnDMHjuTN_nqKH=xIA^Mp76tGYj}tU^G;c<|xm}6uVRo0N5LYNU?Ix~1 zsDLCm>Sls?v9aE6Op`DKpdT#N?%ZFg>2hUNKPY=%xl&ubfyua!V>IGLj%>uZoW)=a z3QB`!wgZ8X0&ZgScd(>F4Zav)v8{+@9DB$E_=15`IWn6PcN@{%lE4b zNKdO!3y6}yw+Iw&96A9W!YB*)Mga4<1J|vqKl5V6DCU|B(!u3B=Nz(h z#svu+7_pW0oI1tvL5w3G-*Cn-|w2m*&dV3chkcNncP=|6&n zcE(ZEa0H1nTyg9h?Qygbx_aZs(Z-Q_?GtDZ_q9)|IgqIqnVnX=NtBX@__RKGQO7qX z{t{lH$jxV|GJkjFmgI*>5&b(N=i4h~@0QPrhI!vN=5Q@0G8x>RC=>-FpGfI@45G5( z&2n|Y5*tPrHZiR%m67Ul&9fsnF#^|7fw8;`yZH9I>B4^3Qi)?7GO_{f&s(~ErzT;r zwz|9^SY7ga)%&)}&{Hcf+lGtAQq`Zo4@MV0EON~IJ`09WkGK-z^b`_>gl6n#JFTuM zNR{idpe6m@Jv3}4%{0)v%Q1?h71SRG8KhVK70$mI3!dYm#V zM;7NxmcF>;iNk_opq(6#bq9s@eBHtQ;Ys%+t}A6G+p;e1|A|z&dv%=_MJ=A|+Fx!?%YJwkk{?}tg<_pQ4YE?Fe_J&3LoD?a02EVzjWo3ozt*Z?d?=;vx&WbMHC}-p#$UEO<~) z1Wg~{)melr9-|QdzgxU=Bp3nBIowOyjp#jwWV483D0jhe2lCD_yvc{@=ks_?P{_G# g_1AMlxx#RvFgjKkC>$<~6-MwGD4b*O=y+l5KflJMUH||9 literal 0 HcmV?d00001 diff --git a/gr-adsbtx/python/build_utils_codes.py b/gr-adsbtx/python/build_utils_codes.py new file mode 100644 index 0000000..9ea96ba --- /dev/null +++ b/gr-adsbtx/python/build_utils_codes.py @@ -0,0 +1,52 @@ +# +# Copyright 2004 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +def i_code (code3): + return code3[0] + +def o_code (code3): + if len (code3) >= 2: + return code3[1] + else: + return code3[0] + +def tap_code (code3): + if len (code3) >= 3: + return code3[2] + else: + return code3[0] + +def i_type (code3): + return char_to_type[i_code (code3)] + +def o_type (code3): + return char_to_type[o_code (code3)] + +def tap_type (code3): + return char_to_type[tap_code (code3)] + + +char_to_type = {} +char_to_type['s'] = 'short' +char_to_type['i'] = 'int' +char_to_type['f'] = 'float' +char_to_type['c'] = 'gr_complex' +char_to_type['b'] = 'unsigned char' diff --git a/gr-adsbtx/python/build_utils_codes.pyc b/gr-adsbtx/python/build_utils_codes.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79529fd5a7ff81735e1067a6c2e3815fd7603711 GIT binary patch literal 1513 zcmcIkO^?$s5FIydwk_M`vdr#d+tR?W576JChcV2J z(wHwZ&MvTtNT!A>;|QkowtCgi(GlLlxFddcUEz)G<<4D>^N_rskCCMjv zBBRS}h@}h1K#tsdDXN0mud62Gpg`X%Bs!oPuWIHDN^xQynDO8Mipai~#i+dqsyw0` z7Dim}RM2DKzX>8Ot)eq7#^3N*a7w5pCVHV26;A<66v`_4RlMF+1${@P-q#g(dWwyH zud9C}U9jv6u01l*J|+CN{)Z?F|A<6?76Wto{1;Wd8^YGN5ho7;k@<2>KXU55to^zl zU&blCn-&#ywX3Vo`brsQnCzP1Hce%puu63PVv(J|p_QViP*7pPlP%OiMtVlf+~GfF ahCj#s)z#H&y_{$!$s|Qf(1xj*?%NOO&NGkz literal 0 HcmV?d00001 diff --git a/gr-adsbtx/swig/CMakeLists.txt b/gr-adsbtx/swig/CMakeLists.txt new file mode 100644 index 0000000..c2e741d --- /dev/null +++ b/gr-adsbtx/swig/CMakeLists.txt @@ -0,0 +1,65 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# GNU Radio is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# GNU Radio is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Radio; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. + +######################################################################## +# Check if there is C++ code at all +######################################################################## +if(NOT adsbtx_sources) + MESSAGE(STATUS "No C++ sources... skipping swig/") + return() +endif(NOT adsbtx_sources) + +######################################################################## +# Include swig generation macros +######################################################################## +find_package(SWIG) +find_package(PythonLibs 2) +if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND) + return() +endif() +include(GrSwig) +include(GrPython) + +######################################################################## +# Setup swig generation +######################################################################## +foreach(incdir ${GNURADIO_RUNTIME_INCLUDE_DIRS}) + list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/gnuradio/swig) +endforeach(incdir) + +set(GR_SWIG_LIBRARIES gnuradio-adsbtx) +set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/adsbtx_swig_doc.i) +set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include) + +GR_SWIG_MAKE(adsbtx_swig adsbtx_swig.i) + +######################################################################## +# Install the build swig module +######################################################################## +GR_SWIG_INSTALL(TARGETS adsbtx_swig DESTINATION ${GR_PYTHON_DIR}/adsbtx) + +######################################################################## +# Install swig .i files for development +######################################################################## +install( + FILES + adsbtx_swig.i + ${CMAKE_CURRENT_BINARY_DIR}/adsbtx_swig_doc.i + DESTINATION ${GR_INCLUDE_DIR}/adsbtx/swig +) diff --git a/gr-adsbtx/swig/adsbtx_swig.i b/gr-adsbtx/swig/adsbtx_swig.i new file mode 100644 index 0000000..88aa939 --- /dev/null +++ b/gr-adsbtx/swig/adsbtx_swig.i @@ -0,0 +1,13 @@ +/* -*- c++ -*- */ + +#define ADSBTX_API + +%include "gnuradio.i" // the common stuff + +//load generated python docstrings +%include "adsbtx_swig_doc.i" + +%{ +%} + +