From f0c6e6c61f1192402f1c075a306de976f01d3a26 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 14:14:27 +0100 Subject: [PATCH 01/13] adapt 3.10 template --- .gitignore | 2 + CMakeLists.txt | 106 +- LICENSE | 674 ----- MANIFEST.md | 1 + README.md | 4 +- apps/CMakeLists.txt | 17 +- cmake/Modules/FindCppUnit.cmake | 39 - cmake/Modules/FindGnuradioRuntime.cmake | 36 - cmake/Modules/GrMiscUtils.cmake | 528 ---- cmake/Modules/GrPlatform.cmake | 62 - cmake/Modules/GrPython.cmake | 241 -- cmake/Modules/GrSwig.cmake | 256 -- cmake/Modules/GrTest.cmake | 143 - cmake/Modules/UseSWIG.cmake | 304 -- ...fig.cmake => gnuradio-verilogConfig.cmake} | 18 +- cmake/Modules/targetConfig.cmake.in | 14 +- docs/CMakeLists.txt | 17 +- docs/doxygen/CMakeLists.txt | 18 +- docs/doxygen/Doxyfile.in | 41 +- docs/doxygen/Doxyfile.swig_doc.in | 1878 ------------ docs/doxygen/doxyxml/__init__.py | 18 +- docs/doxygen/doxyxml/__init__.pyc | Bin 2208 -> 0 bytes docs/doxygen/doxyxml/base.py | 20 +- docs/doxygen/doxyxml/base.pyc | Bin 7979 -> 0 bytes docs/doxygen/doxyxml/doxyindex.py | 47 +- docs/doxygen/doxyxml/doxyindex.pyc | Bin 12590 -> 0 bytes docs/doxygen/doxyxml/generated/__init__.py | 1 - docs/doxygen/doxyxml/generated/__init__.pyc | Bin 418 -> 0 bytes docs/doxygen/doxyxml/generated/compound.py | 147 +- docs/doxygen/doxyxml/generated/compound.pyc | Bin 34242 -> 0 bytes .../doxyxml/generated/compoundsuper.py | 2540 ++++++++++++----- .../doxyxml/generated/compoundsuper.pyc | Bin 479090 -> 0 bytes docs/doxygen/doxyxml/generated/index.py | 7 +- docs/doxygen/doxyxml/generated/index.pyc | Bin 3112 -> 0 bytes docs/doxygen/doxyxml/generated/indexsuper.py | 123 +- docs/doxygen/doxyxml/generated/indexsuper.pyc | Bin 27746 -> 0 bytes docs/doxygen/doxyxml/text.py | 21 +- docs/doxygen/doxyxml/text.pyc | Bin 1461 -> 0 bytes docs/doxygen/other/doxypy.py | 446 +++ docs/doxygen/other/group_defs.dox | 1 - docs/doxygen/pydoc_macros.h | 19 + docs/doxygen/swig_doc.pyc | Bin 9589 -> 0 bytes docs/doxygen/{swig_doc.py => update_pydoc.py} | 271 +- grc/CMakeLists.txt | 17 +- include/{ => gnuradio}/verilog/CMakeLists.txt | 0 include/{ => gnuradio}/verilog/Shared_lib.h | 0 include/{ => gnuradio}/verilog/Shell_cmd.h | 0 include/{ => gnuradio}/verilog/api.h | 0 include/{ => gnuradio}/verilog/constants.h | 0 .../verilog/gr_verilog_iotype.h | 0 .../{ => gnuradio}/verilog/verilog_axi_bb.h | 0 .../{ => gnuradio}/verilog/verilog_axi_cc.h | 0 .../{ => gnuradio}/verilog/verilog_axi_ff.h | 0 .../{ => gnuradio}/verilog/verilog_axi_ii.h | 0 .../{ => gnuradio}/verilog/verilog_axi_ss.h | 0 lib/CMakeLists.txt | 46 +- lib/qa_verilog.cc | 36 - lib/qa_verilog.h | 38 - lib/test_verilog.cc | 48 - python/CMakeLists.txt | 48 - python/__init__.py | 31 - python/__init__.pyc | Bin 361 -> 0 bytes python/build_utils.py | 226 -- python/build_utils.pyc | Bin 7834 -> 0 bytes python/build_utils_codes.py | 52 - python/build_utils_codes.pyc | Bin 1513 -> 0 bytes python/qa_verilog_axi_bb.py | 73 - python/qa_verilog_axi_cc.py | 41 - python/qa_verilog_axi_ff.py | 82 - python/qa_verilog_axi_ii.py | 73 - python/qa_verilog_axi_ss.py | 73 - python/verilog/.gitignore | 5 + python/verilog/CMakeLists.txt | 41 + python/verilog/__init__.py | 23 + python/verilog/bindings/CMakeLists.txt | 47 + python/verilog/bindings/README.md | 0 python/verilog/bindings/bind_oot_file.py | 58 + python/verilog/bindings/docstrings/README.md | 1 + python/verilog/bindings/header_utils.py | 80 + python/verilog/bindings/python_bindings.cc | 53 + swig/CMakeLists.txt | 65 - swig/verilog_swig.i | 28 - 82 files changed, 3126 insertions(+), 6219 deletions(-) delete mode 100644 LICENSE delete mode 100644 cmake/Modules/FindCppUnit.cmake delete mode 100644 cmake/Modules/FindGnuradioRuntime.cmake delete mode 100644 cmake/Modules/GrMiscUtils.cmake delete mode 100644 cmake/Modules/GrPlatform.cmake delete mode 100644 cmake/Modules/GrPython.cmake delete mode 100644 cmake/Modules/GrSwig.cmake delete mode 100644 cmake/Modules/GrTest.cmake delete mode 100644 cmake/Modules/UseSWIG.cmake rename cmake/Modules/{verilogConfig.cmake => gnuradio-verilogConfig.cmake} (56%) delete mode 100644 docs/doxygen/Doxyfile.swig_doc.in delete mode 100644 docs/doxygen/doxyxml/__init__.pyc delete mode 100644 docs/doxygen/doxyxml/base.pyc delete mode 100644 docs/doxygen/doxyxml/doxyindex.pyc delete mode 100644 docs/doxygen/doxyxml/generated/__init__.pyc delete mode 100644 docs/doxygen/doxyxml/generated/compound.pyc delete mode 100644 docs/doxygen/doxyxml/generated/compoundsuper.pyc delete mode 100644 docs/doxygen/doxyxml/generated/index.pyc delete mode 100644 docs/doxygen/doxyxml/generated/indexsuper.pyc delete mode 100644 docs/doxygen/doxyxml/text.pyc create mode 100644 docs/doxygen/other/doxypy.py create mode 100644 docs/doxygen/pydoc_macros.h delete mode 100644 docs/doxygen/swig_doc.pyc rename docs/doxygen/{swig_doc.py => update_pydoc.py} (51%) rename include/{ => gnuradio}/verilog/CMakeLists.txt (100%) rename include/{ => gnuradio}/verilog/Shared_lib.h (100%) rename include/{ => gnuradio}/verilog/Shell_cmd.h (100%) rename include/{ => gnuradio}/verilog/api.h (100%) rename include/{ => gnuradio}/verilog/constants.h (100%) rename include/{ => gnuradio}/verilog/gr_verilog_iotype.h (100%) rename include/{ => gnuradio}/verilog/verilog_axi_bb.h (100%) rename include/{ => gnuradio}/verilog/verilog_axi_cc.h (100%) rename include/{ => gnuradio}/verilog/verilog_axi_ff.h (100%) rename include/{ => gnuradio}/verilog/verilog_axi_ii.h (100%) rename include/{ => gnuradio}/verilog/verilog_axi_ss.h (100%) delete mode 100644 lib/qa_verilog.cc delete mode 100644 lib/qa_verilog.h delete mode 100644 lib/test_verilog.cc delete mode 100644 python/CMakeLists.txt delete mode 100644 python/__init__.py delete mode 100644 python/__init__.pyc delete mode 100644 python/build_utils.py delete mode 100644 python/build_utils.pyc delete mode 100644 python/build_utils_codes.py delete mode 100644 python/build_utils_codes.pyc delete mode 100755 python/qa_verilog_axi_bb.py delete mode 100755 python/qa_verilog_axi_cc.py delete mode 100755 python/qa_verilog_axi_ff.py delete mode 100755 python/qa_verilog_axi_ii.py delete mode 100755 python/qa_verilog_axi_ss.py create mode 100644 python/verilog/.gitignore create mode 100644 python/verilog/CMakeLists.txt create mode 100644 python/verilog/__init__.py create mode 100644 python/verilog/bindings/CMakeLists.txt create mode 100644 python/verilog/bindings/README.md create mode 100644 python/verilog/bindings/bind_oot_file.py create mode 100644 python/verilog/bindings/docstrings/README.md create mode 100644 python/verilog/bindings/header_utils.py create mode 100644 python/verilog/bindings/python_bindings.cc delete mode 100644 swig/CMakeLists.txt delete mode 100644 swig/verilog_swig.i diff --git a/.gitignore b/.gitignore index 40a538b..3d23046 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ # build/ +*.pyc +obj_dir diff --git a/CMakeLists.txt b/CMakeLists.txt index c0c4bbe..c783ea5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,21 +1,10 @@ -# Copyright 2011,2012,2014,2016 Free Software Foundation, Inc. +# Copyright 2011-2020 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. ######################################################################## # Project setup @@ -39,63 +28,35 @@ set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "") # Make sure our local CMake Modules path comes first list(INSERT CMAKE_MODULE_PATH 0 ${CMAKE_SOURCE_DIR}/cmake/Modules) +# Find gnuradio to get access to the cmake modules +find_package(Gnuradio "3.10" REQUIRED) # Set the version information here set(VERSION_MAJOR 1) -set(VERSION_API 0) -set(VERSION_ABI 0) -set(VERSION_PATCH git) +set(VERSION_API 0) +set(VERSION_ABI 0) +set(VERSION_PATCH 0) -# Set cmake policies. -# This will suppress developer warnings during the cmake process that can occur -# if a newer cmake version than the minimum is used. +cmake_policy(SET CMP0011 NEW) -if(POLICY CMP0026) - cmake_policy(SET CMP0026 OLD) -endif() -if(POLICY CMP0043) - cmake_policy(SET CMP0043 OLD) -endif() -if(POLICY CMP0045) - cmake_policy(SET CMP0045 OLD) -endif() -if(POLICY CMP0046) - cmake_policy(SET CMP0046 OLD) -endif() +# Enable generation of compile_commands.json for code completion engines +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) ######################################################################## -# Compiler specific setup +# Minimum Version Requirements ######################################################################## -if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT WIN32) - #http://gcc.gnu.org/wiki/Visibility - add_definitions(-fvisibility=hidden) -endif() + +include(GrMinReq) ######################################################################## -# Find boost +# Compiler settings ######################################################################## -if(UNIX AND EXISTS "/usr/lib64") - list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix -endif(UNIX AND EXISTS "/usr/lib64") -set(Boost_ADDITIONAL_VERSIONS - "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39" - "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44" - "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49" - "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54" - "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59" - "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64" - "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69" -) -find_package(Boost "1.35" COMPONENTS filesystem system) -if(NOT Boost_FOUND) - message(FATAL_ERROR "Boost required to compile verilog") -endif() +include(GrCompilerSettings) ######################################################################## # Install directories ######################################################################## -find_package(Gnuradio "3.8" REQUIRED) include(GrVersion) include(GrPlatform) #define LIB_SUFFIX @@ -104,8 +65,8 @@ if(NOT CMAKE_MODULES_DIR) set(CMAKE_MODULES_DIR lib${LIB_SUFFIX}/cmake) endif(NOT CMAKE_MODULES_DIR) -set(GR_INCLUDE_DIR include/verilog) -set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/${CMAKE_PROJECT_NAME}) +set(GR_INCLUDE_DIR include/gnuradio/verilog) +set(GR_CMAKE_DIR ${CMAKE_MODULES_DIR}/gnuradio-verilog) set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME}) set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME}) set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d) @@ -134,12 +95,8 @@ endif(APPLE) ######################################################################## # Find gnuradio build dependencies ######################################################################## -find_package(CppUnit) find_package(Doxygen) -if(NOT CPPUNIT_FOUND) - message(FATAL_ERROR "CppUnit required to compile verilog") -endif() ######################################################################## # Setup doxygen option @@ -165,19 +122,32 @@ add_custom_target(uninstall ######################################################################## # Add subdirectories ######################################################################## -add_subdirectory(include/verilog) +add_subdirectory(include/gnuradio/verilog) add_subdirectory(lib) add_subdirectory(apps) add_subdirectory(docs) -add_subdirectory(swig) -add_subdirectory(python) -add_subdirectory(grc) +# NOTE: manually update below to use GRC to generate C++ flowgraphs w/o python +if(ENABLE_PYTHON) + message(STATUS "PYTHON and GRC components are enabled") + add_subdirectory(python/verilog) + add_subdirectory(grc) add_subdirectory(templates) +else(ENABLE_PYTHON) + message(STATUS "PYTHON and GRC components are disabled") +endif(ENABLE_PYTHON) ######################################################################## # Install cmake search helper for this library ######################################################################## -install(FILES cmake/Modules/verilogConfig.cmake - DESTINATION ${CMAKE_MODULES_DIR}/${CMAKE_PROJECT_NAME} +install(FILES cmake/Modules/gnuradio-verilogConfig.cmake + DESTINATION ${GR_CMAKE_DIR} ) + +include(CMakePackageConfigHelpers) +configure_package_config_file( + ${PROJECT_SOURCE_DIR}/cmake/Modules/targetConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/cmake/Modules/${target}Config.cmake + INSTALL_DESTINATION ${GR_CMAKE_DIR} +) + diff --git a/LICENSE b/LICENSE deleted file mode 100644 index f288702..0000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/MANIFEST.md b/MANIFEST.md index 1b947d9..973d619 100644 --- a/MANIFEST.md +++ b/MANIFEST.md @@ -7,6 +7,7 @@ author: copyright_owner: - Copyright Owner 1 license: +gr_supported_version: # Put a comma separated list of supported GR versions here #repo: # Put the URL of the repository here, or leave blank for default #website: # If you have a separate project website, put it here #icon: # Put a URL to a square image here that will be used as an icon on CGRAN diff --git a/README.md b/README.md index a01fd52..810d0fc 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ First you need to install the dependencies (see below). Then, you need to download this repository ```bash -$ git clone https://github.com/B0WEN-HU/gr-verilog.git +$ git clone https://github.com/gnuradio/gr-verilog.git ``` After this, gr-verilog should be installed as any other GNU Radio out-of-tree module. @@ -88,4 +88,4 @@ The `Complex` type of `Verilog AXI`, `verilog_axi_cc`, is not the block that is ## Future Work Add more examples. -Bring verilog_general_xx into the module. \ No newline at end of file +Bring verilog_general_xx into the module. diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt index c837d77..c241798 100644 --- a/apps/CMakeLists.txt +++ b/apps/CMakeLists.txt @@ -1,21 +1,10 @@ # Copyright 2011 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. include(GrPython) diff --git a/cmake/Modules/FindCppUnit.cmake b/cmake/Modules/FindCppUnit.cmake deleted file mode 100644 index f93ade3..0000000 --- a/cmake/Modules/FindCppUnit.cmake +++ /dev/null @@ -1,39 +0,0 @@ -# http://www.cmake.org/pipermail/cmake/2006-October/011446.html -# Modified to use pkg config and use standard var names - -# -# Find the CppUnit includes and library -# -# This module defines -# CPPUNIT_INCLUDE_DIR, where to find tiff.h, etc. -# CPPUNIT_LIBRARIES, the libraries to link against to use CppUnit. -# CPPUNIT_FOUND, If false, do not try to use CppUnit. - -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_CPPUNIT "cppunit") - -FIND_PATH(CPPUNIT_INCLUDE_DIRS - NAMES cppunit/TestCase.h - HINTS ${PC_CPPUNIT_INCLUDE_DIR} - ${CMAKE_INSTALL_PREFIX}/include - PATHS - /usr/local/include - /usr/include -) - -FIND_LIBRARY(CPPUNIT_LIBRARIES - NAMES cppunit - HINTS ${PC_CPPUNIT_LIBDIR} - ${CMAKE_INSTALL_PREFIX}/lib - ${CMAKE_INSTALL_PREFIX}/lib64 - PATHS - ${CPPUNIT_INCLUDE_DIRS}/../lib - /usr/local/lib - /usr/lib -) - -LIST(APPEND CPPUNIT_LIBRARIES ${CMAKE_DL_LIBS}) - -INCLUDE(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(CPPUNIT DEFAULT_MSG CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) -MARK_AS_ADVANCED(CPPUNIT_LIBRARIES CPPUNIT_INCLUDE_DIRS) diff --git a/cmake/Modules/FindGnuradioRuntime.cmake b/cmake/Modules/FindGnuradioRuntime.cmake deleted file mode 100644 index afed684..0000000 --- a/cmake/Modules/FindGnuradioRuntime.cmake +++ /dev/null @@ -1,36 +0,0 @@ -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime) - -if(PC_GNURADIO_RUNTIME_FOUND) - # look for include files - FIND_PATH( - GNURADIO_RUNTIME_INCLUDE_DIRS - NAMES gnuradio/top_block.h - HINTS $ENV{GNURADIO_RUNTIME_DIR}/include - ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS} - ${CMAKE_INSTALL_PREFIX}/include - PATHS /usr/local/include - /usr/include - ) - - # look for libs - FIND_LIBRARY( - GNURADIO_RUNTIME_LIBRARIES - NAMES gnuradio-runtime - HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib - ${PC_GNURADIO_RUNTIME_LIBDIR} - ${CMAKE_INSTALL_PREFIX}/lib/ - ${CMAKE_INSTALL_PREFIX}/lib64/ - PATHS /usr/local/lib - /usr/local/lib64 - /usr/lib - /usr/lib64 - ) - - set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND}) -endif(PC_GNURADIO_RUNTIME_FOUND) - -INCLUDE(FindPackageHandleStandardArgs) -# do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used. -FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES) -MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS) diff --git a/cmake/Modules/GrMiscUtils.cmake b/cmake/Modules/GrMiscUtils.cmake deleted file mode 100644 index 5bad57c..0000000 --- a/cmake/Modules/GrMiscUtils.cmake +++ /dev/null @@ -1,528 +0,0 @@ -# Copyright 2010-2011,2014 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE) - return() -endif() -set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE) - -######################################################################## -# Set global variable macro. -# Used for subdirectories to export settings. -# Example: include and library paths. -######################################################################## -function(GR_SET_GLOBAL var) - set(${var} ${ARGN} CACHE INTERNAL "" FORCE) -endfunction(GR_SET_GLOBAL) - -######################################################################## -# Set the pre-processor definition if the condition is true. -# - def the pre-processor definition to set and condition name -######################################################################## -function(GR_ADD_COND_DEF def) - if(${def}) - add_definitions(-D${def}) - endif(${def}) -endfunction(GR_ADD_COND_DEF) - -######################################################################## -# Check for a header and conditionally set a compile define. -# - hdr the relative path to the header file -# - def the pre-processor definition to set -######################################################################## -function(GR_CHECK_HDR_N_DEF hdr def) - include(CheckIncludeFileCXX) - CHECK_INCLUDE_FILE_CXX(${hdr} ${def}) - GR_ADD_COND_DEF(${def}) -endfunction(GR_CHECK_HDR_N_DEF) - -######################################################################## -# Include subdirectory macro. -# Sets the CMake directory variables, -# includes the subdirectory CMakeLists.txt, -# resets the CMake directory variables. -# -# This macro includes subdirectories rather than adding them -# so that the subdirectory can affect variables in the level above. -# This provides a work-around for the lack of convenience libraries. -# This way a subdirectory can append to the list of library sources. -######################################################################## -macro(GR_INCLUDE_SUBDIRECTORY subdir) - #insert the current directories on the front of the list - list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR}) - list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR}) - - #set the current directories to the names of the subdirs - set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) - set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir}) - - #include the subdirectory CMakeLists to run it - file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) - include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) - - #reset the value of the current directories - list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR) - list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR) - - #pop the subdir names of the front of the list - list(REMOVE_AT _cmake_source_dirs 0) - list(REMOVE_AT _cmake_binary_dirs 0) -endmacro(GR_INCLUDE_SUBDIRECTORY) - -######################################################################## -# Check if a compiler flag works and conditionally set a compile define. -# - flag the compiler flag to check for -# - have the variable to set with result -######################################################################## -macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have) - include(CheckCXXCompilerFlag) - CHECK_CXX_COMPILER_FLAG(${flag} ${have}) - if(${have}) - if(${CMAKE_VERSION} VERSION_GREATER "2.8.4") - STRING(FIND "${CMAKE_CXX_FLAGS}" "${flag}" flag_dup) - if(${flag_dup} EQUAL -1) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") - endif(${flag_dup} EQUAL -1) - endif(${CMAKE_VERSION} VERSION_GREATER "2.8.4") - endif(${have}) -endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE) - -######################################################################## -# Generates the .la libtool file -# This appears to generate libtool files that cannot be used by auto*. -# Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest]) -# Notice: there is not COMPONENT option, these will not get distributed. -######################################################################## -function(GR_LIBTOOL) - if(NOT DEFINED GENERATE_LIBTOOL) - set(GENERATE_LIBTOOL OFF) #disabled by default - endif() - - if(GENERATE_LIBTOOL) - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN}) - - find_program(LIBTOOL libtool) - if(LIBTOOL) - include(CMakeMacroLibtoolFile) - CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION}) - endif(LIBTOOL) - endif(GENERATE_LIBTOOL) - -endfunction(GR_LIBTOOL) - -######################################################################## -# Do standard things to the library target -# - set target properties -# - make install rules -# Also handle gnuradio custom naming conventions w/ extras mode. -######################################################################## -function(GR_LIBRARY_FOO target) - #parse the arguments for component names - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN}) - - #set additional target properties - set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER}) - - #install the generated files like so... - install(TARGETS ${target} - LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file - ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file - RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file - ) - - #extras mode enabled automatically on linux - if(NOT DEFINED LIBRARY_EXTRAS) - set(LIBRARY_EXTRAS ${LINUX}) - endif() - - #special extras mode to enable alternative naming conventions - if(LIBRARY_EXTRAS) - - #create .la file before changing props - GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR}) - - #give the library a special name with ultra-zero soversion - set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0") - set(target_name lib${target}-${LIBVER}.so.0.0.0) - - #custom command to generate symlinks - add_custom_command( - TARGET ${target} - POST_BUILD - COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so - COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 - COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install - ) - - #and install the extra symlinks - install( - FILES - ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so - ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 - DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} - ) - - endif(LIBRARY_EXTRAS) -endfunction(GR_LIBRARY_FOO) - -######################################################################## -# Create a dummy custom command that depends on other targets. -# Usage: -# GR_GEN_TARGET_DEPS(unique_name target_deps ...) -# ADD_CUSTOM_COMMAND( ${target_deps}) -# -# Custom command cant depend on targets, but can depend on executables, -# and executables can depend on targets. So this is the process: -######################################################################## -function(GR_GEN_TARGET_DEPS name var) - file( - WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in - "int main(void){return 0;}\n" - ) - execute_process( - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in - ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp - ) - add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp) - if(ARGN) - add_dependencies(${name} ${ARGN}) - endif(ARGN) - - if(CMAKE_CROSSCOMPILING) - set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross - else() - set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE) - endif() -endfunction(GR_GEN_TARGET_DEPS) - -######################################################################## -# Control use of gr_logger -# Usage: -# GR_LOGGING() -# -# Will set ENABLE_GR_LOG to 1 by default. -# Can manually set with -DENABLE_GR_LOG=0|1 -######################################################################## -function(GR_LOGGING) - find_package(Log4cpp) - - OPTION(ENABLE_GR_LOG "Use gr_logger" ON) - if(ENABLE_GR_LOG) - # If gr_logger is enabled, make it usable - add_definitions( -DENABLE_GR_LOG ) - - # also test LOG4CPP; if we have it, use this version of the logger - # otherwise, default to the stdout/stderr model. - if(LOG4CPP_FOUND) - SET(HAVE_LOG4CPP True CACHE INTERNAL "" FORCE) - add_definitions( -DHAVE_LOG4CPP ) - else(not LOG4CPP_FOUND) - SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) - SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) - endif(LOG4CPP_FOUND) - - SET(ENABLE_GR_LOG ${ENABLE_GR_LOG} CACHE INTERNAL "" FORCE) - - else(ENABLE_GR_LOG) - SET(HAVE_LOG4CPP False CACHE INTERNAL "" FORCE) - SET(LOG4CPP_INCLUDE_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARY_DIRS "" CACHE INTERNAL "" FORCE) - SET(LOG4CPP_LIBRARIES "" CACHE INTERNAL "" FORCE) - endif(ENABLE_GR_LOG) - - message(STATUS "ENABLE_GR_LOG set to ${ENABLE_GR_LOG}.") - message(STATUS "HAVE_LOG4CPP set to ${HAVE_LOG4CPP}.") - message(STATUS "LOG4CPP_LIBRARIES set to ${LOG4CPP_LIBRARIES}.") - -endfunction(GR_LOGGING) - -######################################################################## -# Run GRCC to compile .grc files into .py files. -# -# Usage: GRCC(filename, directory) -# - filenames: List of file name of .grc file -# - directory: directory of built .py file - usually in -# ${CMAKE_CURRENT_BINARY_DIR} -# - Sets PYFILES: output converted GRC file names to Python files. -######################################################################## -function(GRCC) - # Extract directory from list of args, remove it for the list of filenames. - list(GET ARGV -1 directory) - list(REMOVE_AT ARGV -1) - set(filenames ${ARGV}) - file(MAKE_DIRECTORY ${directory}) - - SET(GRCC_COMMAND ${CMAKE_SOURCE_DIR}/gr-utils/python/grcc) - - # GRCC uses some stuff in grc and gnuradio-runtime, so we force - # the known paths here - list(APPEND PYTHONPATHS - ${CMAKE_SOURCE_DIR} - ${CMAKE_SOURCE_DIR}/gnuradio-runtime/python - ${CMAKE_SOURCE_DIR}/gnuradio-runtime/lib/swig - ${CMAKE_BINARY_DIR}/gnuradio-runtime/lib/swig - ) - - if(WIN32) - #SWIG generates the python library files into a subdirectory. - #Therefore, we must append this subdirectory into PYTHONPATH. - #Only do this for the python directories matching the following: - foreach(pydir ${PYTHONPATHS}) - get_filename_component(name ${pydir} NAME) - if(name MATCHES "^(swig|lib|src)$") - list(APPEND PYTHONPATHS ${pydir}/${CMAKE_BUILD_TYPE}) - endif() - endforeach(pydir) - endif(WIN32) - - file(TO_NATIVE_PATH "${PYTHONPATHS}" pypath) - - if(UNIX) - list(APPEND pypath "$PYTHONPATH") - string(REPLACE ";" ":" pypath "${pypath}") - set(ENV{PYTHONPATH} ${pypath}) - endif(UNIX) - - if(WIN32) - list(APPEND pypath "%PYTHONPATH%") - string(REPLACE ";" "\\;" pypath "${pypath}") - #list(APPEND environs "PYTHONPATH=${pypath}") - set(ENV{PYTHONPATH} ${pypath}) - endif(WIN32) - - foreach(f ${filenames}) - execute_process( - COMMAND ${GRCC_COMMAND} -d ${directory} ${f} - ) - string(REPLACE ".grc" ".py" pyfile "${f}") - string(REPLACE "${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" pyfile "${pyfile}") - list(APPEND pyfiles ${pyfile}) - endforeach(f) - - set(PYFILES ${pyfiles} PARENT_SCOPE) -endfunction(GRCC) - -######################################################################## -# Check if HAVE_PTHREAD_SETSCHEDPARAM and HAVE_SCHED_SETSCHEDULER -# should be defined -######################################################################## -macro(GR_CHECK_LINUX_SCHED_AVAIL) -set(CMAKE_REQUIRED_LIBRARIES -lpthread) - CHECK_CXX_SOURCE_COMPILES(" - #include - int main(){ - pthread_t pthread; - pthread_setschedparam(pthread, 0, 0); - return 0; - } " HAVE_PTHREAD_SETSCHEDPARAM - ) - GR_ADD_COND_DEF(HAVE_PTHREAD_SETSCHEDPARAM) - - CHECK_CXX_SOURCE_COMPILES(" - #include - int main(){ - pid_t pid; - sched_setscheduler(pid, 0, 0); - return 0; - } " HAVE_SCHED_SETSCHEDULER - ) - GR_ADD_COND_DEF(HAVE_SCHED_SETSCHEDULER) -endmacro(GR_CHECK_LINUX_SCHED_AVAIL) - -######################################################################## -# Macros to generate source and header files from template -######################################################################## -macro(GR_EXPAND_X_H component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -sys.path.append('${CMAKE_SOURCE_DIR}/python') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_dict2(name, sig, '${component}') - build_utils.expand_template(d, inp) -") - - #make a list of all the generated headers - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the headers - add_custom_command( - OUTPUT ${expanded_files_h} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.h.t ${ARGN} - ) - - #install rules for the generated headers - list(APPEND generated_includes ${expanded_files_h}) - -endmacro(GR_EXPAND_X_H) - -macro(GR_EXPAND_X_CC_H component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -sys.path.append('${CMAKE_SOURCE_DIR}/python') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_impl_dict2(name, sig, '${component}') - build_utils.expand_template(d, inp) -") - - #make a list of all the generated files - unset(expanded_files_cc) - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_cc ${CMAKE_CURRENT_BINARY_DIR}/${name}.cc) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the source files - add_custom_command( - OUTPUT ${expanded_files_cc} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.cc.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.cc.t ${ARGN} - ) - - #create a command to generate the header files - add_custom_command( - OUTPUT ${expanded_files_h} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}.h.t ${ARGN} - ) - - #make source files depends on headers to force generation - set_source_files_properties(${expanded_files_cc} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" - ) - - #install rules for the generated files - list(APPEND generated_sources ${expanded_files_cc}) - list(APPEND generated_headers ${expanded_files_h}) - -endmacro(GR_EXPAND_X_CC_H) - -macro(GR_EXPAND_X_CC_H_IMPL component root) - - include(GrPython) - - file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py -"#!${PYTHON_EXECUTABLE} - -import sys, os, re -sys.path.append('${GR_RUNTIME_PYTHONPATH}') -sys.path.append('${CMAKE_SOURCE_DIR}/python') -os.environ['srcdir'] = '${CMAKE_CURRENT_SOURCE_DIR}' -os.chdir('${CMAKE_CURRENT_BINARY_DIR}') - -if __name__ == '__main__': - import build_utils - root, inp = sys.argv[1:3] - for sig in sys.argv[3:]: - name = re.sub ('X+', sig, root) - d = build_utils.standard_dict(name, sig, '${component}') - build_utils.expand_template(d, inp, '_impl') -") - - #make a list of all the generated files - unset(expanded_files_cc_impl) - unset(expanded_files_h_impl) - unset(expanded_files_h) - foreach(sig ${ARGN}) - string(REGEX REPLACE "X+" ${sig} name ${root}) - list(APPEND expanded_files_cc_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.cc) - list(APPEND expanded_files_h_impl ${CMAKE_CURRENT_BINARY_DIR}/${name}_impl.h) - list(APPEND expanded_files_h ${CMAKE_CURRENT_BINARY_DIR}/../include/gnuradio/${component}/${name}.h) - endforeach(sig) - unset(name) - - #create a command to generate the _impl.cc files - add_custom_command( - OUTPUT ${expanded_files_cc_impl} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.cc.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}_impl.cc.t ${ARGN} - ) - - #create a command to generate the _impl.h files - add_custom_command( - OUTPUT ${expanded_files_h_impl} - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${root}_impl.h.t - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_CURRENT_BINARY_DIR}/generate_helper.py - ${root} ${root}_impl.h.t ${ARGN} - ) - - #make _impl.cc source files depend on _impl.h to force generation - set_source_files_properties(${expanded_files_cc_impl} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h_impl}" - ) - - #make _impl.h source files depend on headers to force generation - set_source_files_properties(${expanded_files_h_impl} - PROPERTIES OBJECT_DEPENDS "${expanded_files_h}" - ) - - #install rules for the generated files - list(APPEND generated_sources ${expanded_files_cc_impl}) - list(APPEND generated_headers ${expanded_files_h_impl}) - -endmacro(GR_EXPAND_X_CC_H_IMPL) diff --git a/cmake/Modules/GrPlatform.cmake b/cmake/Modules/GrPlatform.cmake deleted file mode 100644 index 00a53d0..0000000 --- a/cmake/Modules/GrPlatform.cmake +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE) - return() -endif() -set(__INCLUDED_GR_PLATFORM_CMAKE TRUE) - -######################################################################## -# Setup additional defines for OS types -######################################################################## -if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - set(LINUX TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/debian_version") - set(DEBIAN TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/redhat-release") - set(REDHAT TRUE) -endif() - -if(NOT CMAKE_CROSSCOMPILING AND LINUX AND EXISTS "/etc/slackware-version") - set(SLACKWARE TRUE) -endif() - -######################################################################## -# when the library suffix should be 64 (applies to redhat linux family) -######################################################################## -if (REDHAT OR SLACKWARE) - set(LIB64_CONVENTION TRUE) -endif() - -if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$") - set(LIB_SUFFIX 64) -endif() - -######################################################################## -# Detect /lib versus /lib64 -######################################################################## -if (CMAKE_INSTALL_LIBDIR MATCHES lib64) - set(LIB_SUFFIX 64) -endif() - -set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix") diff --git a/cmake/Modules/GrPython.cmake b/cmake/Modules/GrPython.cmake deleted file mode 100644 index 06e061e..0000000 --- a/cmake/Modules/GrPython.cmake +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2010-2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -if(DEFINED __INCLUDED_GR_PYTHON_CMAKE) - return() -endif() -set(__INCLUDED_GR_PYTHON_CMAKE TRUE) - -######################################################################## -# Setup the python interpreter: -# This allows the user to specify a specific interpreter, -# or finds the interpreter via the built-in cmake module. -######################################################################## -#this allows the user to override PYTHON_EXECUTABLE -if(PYTHON_EXECUTABLE) - - set(PYTHONINTERP_FOUND TRUE) - -#otherwise if not set, try to automatically find it -else(PYTHON_EXECUTABLE) - - #use the built-in find script - find_package(PythonInterp 2) - - #and if that fails use the find program routine - if(NOT PYTHONINTERP_FOUND) - find_program(PYTHON_EXECUTABLE NAMES python python2 python2.7 python2.6 python2.5) - if(PYTHON_EXECUTABLE) - set(PYTHONINTERP_FOUND TRUE) - endif(PYTHON_EXECUTABLE) - endif(NOT PYTHONINTERP_FOUND) - -endif(PYTHON_EXECUTABLE) - -if (CMAKE_CROSSCOMPILING) - set(QA_PYTHON_EXECUTABLE "/usr/bin/python") -else (CMAKE_CROSSCOMPILING) - set(QA_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE}) -endif(CMAKE_CROSSCOMPILING) - -#make the path to the executable appear in the cmake gui -set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter") -set(QA_PYTHON_EXECUTABLE ${QA_PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter for QA tests") - -#make sure we can use -B with python (introduced in 2.6) -if(PYTHON_EXECUTABLE) - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -B -c "" - OUTPUT_QUIET ERROR_QUIET - RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT - ) - if(PYTHON_HAS_DASH_B_RESULT EQUAL 0) - set(PYTHON_DASH_B "-B") - endif() -endif(PYTHON_EXECUTABLE) - -######################################################################## -# Check for the existence of a python module: -# - desc a string description of the check -# - mod the name of the module to import -# - cmd an additional command to run -# - have the result variable to set -######################################################################## -macro(GR_PYTHON_CHECK_MODULE desc mod cmd have) - message(STATUS "") - message(STATUS "Python checking for ${desc}") - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -c " -######################################### -try: - import ${mod} - assert ${cmd} -except ImportError, AssertionError: exit(-1) -except: pass -#########################################" - RESULT_VARIABLE ${have} - ) - if(${have} EQUAL 0) - message(STATUS "Python checking for ${desc} - found") - set(${have} TRUE) - else(${have} EQUAL 0) - message(STATUS "Python checking for ${desc} - not found") - set(${have} FALSE) - endif(${have} EQUAL 0) -endmacro(GR_PYTHON_CHECK_MODULE) - -######################################################################## -# Sets the python installation directory GR_PYTHON_DIR -######################################################################## -if(NOT DEFINED GR_PYTHON_DIR) -execute_process(COMMAND ${PYTHON_EXECUTABLE} -c " -from distutils import sysconfig -print sysconfig.get_python_lib(plat_specific=True, prefix='') -" OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE -) -endif() -file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR) - -######################################################################## -# Create an always-built target with a unique name -# Usage: GR_UNIQUE_TARGET( ) -######################################################################## -function(GR_UNIQUE_TARGET desc) - file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib -unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] -print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))" - OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE) - add_custom_target(${_target} ALL DEPENDS ${ARGN}) -endfunction(GR_UNIQUE_TARGET) - -######################################################################## -# Install python sources (also builds and installs byte-compiled python) -######################################################################## -function(GR_PYTHON_INSTALL) - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN}) - - #################################################################### - if(GR_PYTHON_INSTALL_FILES) - #################################################################### - install(${ARGN}) #installs regular python files - - #create a list of all generated files - unset(pysrcfiles) - unset(pycfiles) - unset(pyofiles) - foreach(pyfile ${GR_PYTHON_INSTALL_FILES}) - get_filename_component(pyfile ${pyfile} ABSOLUTE) - list(APPEND pysrcfiles ${pyfile}) - - #determine if this file is in the source or binary directory - file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile}) - string(LENGTH "${source_rel_path}" source_rel_path_len) - file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile}) - string(LENGTH "${binary_rel_path}" binary_rel_path_len) - - #and set the generated path appropriately - if(${source_rel_path_len} GREATER ${binary_rel_path_len}) - set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path}) - else() - set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path}) - endif() - list(APPEND pycfiles ${pygenfile}c) - list(APPEND pyofiles ${pygenfile}o) - - #ensure generation path exists - get_filename_component(pygen_path ${pygenfile} PATH) - file(MAKE_DIRECTORY ${pygen_path}) - - endforeach(pyfile) - - #the command to generate the pyc files - add_custom_command( - DEPENDS ${pysrcfiles} OUTPUT ${pycfiles} - COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles} - ) - - #the command to generate the pyo files - add_custom_command( - DEPENDS ${pysrcfiles} OUTPUT ${pyofiles} - COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles} - ) - - #create install rule and add generated files to target list - set(python_install_gen_targets ${pycfiles} ${pyofiles}) - install(FILES ${python_install_gen_targets} - DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} - COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} - ) - - #################################################################### - elseif(GR_PYTHON_INSTALL_PROGRAMS) - #################################################################### - file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native) - - if (CMAKE_CROSSCOMPILING) - set(pyexe_native "/usr/bin/env python") - endif() - - foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS}) - get_filename_component(pyfile_name ${pyfile} NAME) - get_filename_component(pyfile ${pyfile} ABSOLUTE) - string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe") - list(APPEND python_install_gen_targets ${pyexefile}) - - get_filename_component(pyexefile_path ${pyexefile} PATH) - file(MAKE_DIRECTORY ${pyexefile_path}) - - add_custom_command( - OUTPUT ${pyexefile} DEPENDS ${pyfile} - COMMAND ${PYTHON_EXECUTABLE} -c - "import re; R=re.compile('^\#!.*$\\n',flags=re.MULTILINE); open('${pyexefile}','w').write('\#!${pyexe_native}\\n'+R.sub('',open('${pyfile}','r').read()))" - COMMENT "Shebangin ${pyfile_name}" - VERBATIM - ) - - #on windows, python files need an extension to execute - get_filename_component(pyfile_ext ${pyfile} EXT) - if(WIN32 AND NOT pyfile_ext) - set(pyfile_name "${pyfile_name}.py") - endif() - - install(PROGRAMS ${pyexefile} RENAME ${pyfile_name} - DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} - COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} - ) - endforeach(pyfile) - - endif() - - GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets}) - -endfunction(GR_PYTHON_INSTALL) - -######################################################################## -# Write the python helper script that generates byte code files -######################################################################## -file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py " -import sys, py_compile -files = sys.argv[1:] -srcs, gens = files[:len(files)/2], files[len(files)/2:] -for src, gen in zip(srcs, gens): - py_compile.compile(file=src, cfile=gen, doraise=True) -") diff --git a/cmake/Modules/GrSwig.cmake b/cmake/Modules/GrSwig.cmake deleted file mode 100644 index 33f37d2..0000000 --- a/cmake/Modules/GrSwig.cmake +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright 2010-2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -if(DEFINED __INCLUDED_GR_SWIG_CMAKE) - return() -endif() -set(__INCLUDED_GR_SWIG_CMAKE TRUE) - -include(GrPython) - -######################################################################## -# Builds a swig documentation file to be generated into python docstrings -# Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....) -# -# Set the following variable to specify extra dependent targets: -# - GR_SWIG_DOCS_SOURCE_DEPS -# - GR_SWIG_DOCS_TARGET_DEPS -######################################################################## -function(GR_SWIG_MAKE_DOCS output_file) - if(ENABLE_DOXYGEN) - - #setup the input files variable list, quote formated - set(input_files) - unset(INPUT_PATHS) - foreach(input_path ${ARGN}) - if(IS_DIRECTORY ${input_path}) #when input path is a directory - file(GLOB input_path_h_files ${input_path}/*.h) - else() #otherwise its just a file, no glob - set(input_path_h_files ${input_path}) - endif() - list(APPEND input_files ${input_path_h_files}) - set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"") - endforeach(input_path) - - #determine the output directory - get_filename_component(name ${output_file} NAME_WE) - get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH) - set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs) - make_directory(${OUTPUT_DIRECTORY}) - - #generate the Doxyfile used by doxygen - configure_file( - ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in - ${OUTPUT_DIRECTORY}/Doxyfile - @ONLY) - - #Create a dummy custom command that depends on other targets - include(GrMiscUtils) - GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS}) - - #call doxygen on the Doxyfile + input headers - add_custom_command( - OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml - DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps} - COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile - COMMENT "Generating doxygen xml for ${name} docs" - ) - - #call the swig_doc script on the xml files - add_custom_command( - OUTPUT ${output_file} - DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml - COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} - ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py - ${OUTPUT_DIRECTORY}/xml - ${output_file} - COMMENT "Generating python docstrings for ${name}" - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen - ) - - else(ENABLE_DOXYGEN) - file(WRITE ${output_file} "\n") #no doxygen -> empty file - endif(ENABLE_DOXYGEN) -endfunction(GR_SWIG_MAKE_DOCS) - -######################################################################## -# Build a swig target for the common gnuradio use case. Usage: -# GR_SWIG_MAKE(target ifile ifile ifile...) -# -# Set the following variables before calling: -# - GR_SWIG_FLAGS -# - GR_SWIG_INCLUDE_DIRS -# - GR_SWIG_LIBRARIES -# - GR_SWIG_SOURCE_DEPS -# - GR_SWIG_TARGET_DEPS -# - GR_SWIG_DOC_FILE -# - GR_SWIG_DOC_DIRS -######################################################################## -macro(GR_SWIG_MAKE name) - set(ifiles ${ARGN}) - - # Shimming this in here to take care of a SWIG bug with handling - # vector and vector (on 32-bit machines) and - # vector (on 64-bit machines). Use this to test - # the size of size_t, then set SIZE_T_32 if it's a 32-bit machine - # or not if it's 64-bit. The logic in gr_type.i handles the rest. - INCLUDE(CheckTypeSize) - CHECK_TYPE_SIZE("size_t" SIZEOF_SIZE_T) - CHECK_TYPE_SIZE("unsigned int" SIZEOF_UINT) - if(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) - list(APPEND GR_SWIG_FLAGS -DSIZE_T_32) - endif(${SIZEOF_SIZE_T} EQUAL ${SIZEOF_UINT}) - - #do swig doc generation if specified - if(GR_SWIG_DOC_FILE) - set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS}) - list(APPEND GR_SWIG_DOCS_TARGET_DEPS ${GR_SWIG_TARGET_DEPS}) - GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS}) - add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE}) - list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc ${GR_RUNTIME_SWIG_DOC_FILE}) - endif() - - #append additional include directories - find_package(PythonLibs 2) - list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs) - list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) - - #prepend local swig directories - list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_SOURCE_DIR}) - list(INSERT GR_SWIG_INCLUDE_DIRS 0 ${CMAKE_CURRENT_BINARY_DIR}) - - #determine include dependencies for swig file - execute_process( - COMMAND ${PYTHON_EXECUTABLE} - ${CMAKE_BINARY_DIR}/get_swig_deps.py - "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}" - OUTPUT_STRIP_TRAILING_WHITESPACE - OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - ) - - #Create a dummy custom command that depends on other targets - include(GrMiscUtils) - GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS}) - set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag) - add_custom_command( - OUTPUT ${tag_file} - DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps} - COMMAND ${CMAKE_COMMAND} -E touch ${tag_file} - ) - - #append the specified include directories - include_directories(${GR_SWIG_INCLUDE_DIRS}) - list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file}) - - #setup the swig flags with flags and include directories - set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS}) - foreach(dir ${GR_SWIG_INCLUDE_DIRS}) - list(APPEND CMAKE_SWIG_FLAGS "-I${dir}") - endforeach(dir) - - #set the C++ property on the swig .i file so it builds - set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON) - - #setup the actual swig library target to be built - include(UseSWIG) - SWIG_ADD_MODULE(${name} python ${ifiles}) - if(APPLE) - set(PYTHON_LINK_OPTIONS "-undefined dynamic_lookup") - else() - set(PYTHON_LINK_OPTIONS ${PYTHON_LIBRARIES}) - endif(APPLE) - SWIG_LINK_LIBRARIES(${name} ${PYTHON_LINK_OPTIONS} ${GR_SWIG_LIBRARIES}) - if(${name} STREQUAL "runtime_swig") - SET_TARGET_PROPERTIES(${SWIG_MODULE_runtime_swig_REAL_NAME} PROPERTIES DEFINE_SYMBOL "gnuradio_runtime_EXPORTS") - endif(${name} STREQUAL "runtime_swig") - -endmacro(GR_SWIG_MAKE) - -######################################################################## -# Install swig targets generated by GR_SWIG_MAKE. Usage: -# GR_SWIG_INSTALL( -# TARGETS target target target... -# [DESTINATION destination] -# [COMPONENT component] -# ) -######################################################################## -macro(GR_SWIG_INSTALL) - - include(CMakeParseArgumentsCopy) - CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN}) - - foreach(name ${GR_SWIG_INSTALL_TARGETS}) - install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME} - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - COMPONENT ${GR_SWIG_INSTALL_COMPONENT} - ) - - include(GrPython) - GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - COMPONENT ${GR_SWIG_INSTALL_COMPONENT} - ) - - GR_LIBTOOL( - TARGET ${SWIG_MODULE_${name}_REAL_NAME} - DESTINATION ${GR_SWIG_INSTALL_DESTINATION} - ) - - endforeach(name) - -endmacro(GR_SWIG_INSTALL) - -######################################################################## -# Generate a python file that can determine swig dependencies. -# Used by the make macro above to determine extra dependencies. -# When you build C++, CMake figures out the header dependencies. -# This code essentially performs that logic for swig includes. -######################################################################## -file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py " - -import os, sys, re - -i_include_matcher = re.compile('%(include|import)\\s*[<|\"](.*)[>|\"]') -h_include_matcher = re.compile('#(include)\\s*[<|\"](.*)[>|\"]') -include_dirs = sys.argv[2].split(';') - -def get_swig_incs(file_path): - if file_path.endswith('.i'): matcher = i_include_matcher - else: matcher = h_include_matcher - file_contents = open(file_path, 'r').read() - return matcher.findall(file_contents, re.MULTILINE) - -def get_swig_deps(file_path, level): - deps = [file_path] - if level == 0: return deps - for keyword, inc_file in get_swig_incs(file_path): - for inc_dir in include_dirs: - inc_path = os.path.join(inc_dir, inc_file) - if not os.path.exists(inc_path): continue - deps.extend(get_swig_deps(inc_path, level-1)) - break #found, we dont search in lower prio inc dirs - return deps - -if __name__ == '__main__': - ifiles = sys.argv[1].split(';') - deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], []) - #sys.stderr.write(';'.join(set(deps)) + '\\n\\n') - print(';'.join(set(deps))) -") diff --git a/cmake/Modules/GrTest.cmake b/cmake/Modules/GrTest.cmake deleted file mode 100644 index 62caab4..0000000 --- a/cmake/Modules/GrTest.cmake +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2010-2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -if(DEFINED __INCLUDED_GR_TEST_CMAKE) - return() -endif() -set(__INCLUDED_GR_TEST_CMAKE TRUE) - -######################################################################## -# Add a unit test and setup the environment for a unit test. -# Takes the same arguments as the ADD_TEST function. -# -# Before calling set the following variables: -# GR_TEST_TARGET_DEPS - built targets for the library path -# GR_TEST_LIBRARY_DIRS - directories for the library path -# GR_TEST_PYTHON_DIRS - directories for the python path -# GR_TEST_ENVIRONS - other environment key/value pairs -######################################################################## -function(GR_ADD_TEST test_name) - - #Ensure that the build exe also appears in the PATH. - list(APPEND GR_TEST_TARGET_DEPS ${ARGN}) - - #In the land of windows, all libraries must be in the PATH. - #Since the dependent libraries are not yet installed, - #we must manually set them in the PATH to run tests. - #The following appends the path of a target dependency. - foreach(target ${GR_TEST_TARGET_DEPS}) - get_target_property(location ${target} LOCATION) - if(location) - get_filename_component(path ${location} PATH) - string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path}) - list(APPEND GR_TEST_LIBRARY_DIRS ${path}) - endif(location) - endforeach(target) - - if(WIN32) - #SWIG generates the python library files into a subdirectory. - #Therefore, we must append this subdirectory into PYTHONPATH. - #Only do this for the python directories matching the following: - foreach(pydir ${GR_TEST_PYTHON_DIRS}) - get_filename_component(name ${pydir} NAME) - if(name MATCHES "^(swig|lib|src)$") - list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE}) - endif() - endforeach(pydir) - endif(WIN32) - - file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir) - file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list? - file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list? - - set(environs "VOLK_GENERIC=1" "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}") - list(APPEND environs ${GR_TEST_ENVIRONS}) - - #http://www.cmake.org/pipermail/cmake/2009-May/029464.html - #Replaced this add test + set environs code with the shell script generation. - #Its nicer to be able to manually run the shell script to diagnose problems. - #ADD_TEST(${ARGV}) - #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}") - - if(UNIX) - set(LD_PATH_VAR "LD_LIBRARY_PATH") - if(APPLE) - set(LD_PATH_VAR "DYLD_LIBRARY_PATH") - endif() - - set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH") - list(APPEND libpath "$${LD_PATH_VAR}") - list(APPEND pypath "$PYTHONPATH") - - #replace list separator with the path separator - string(REPLACE ";" ":" libpath "${libpath}") - string(REPLACE ";" ":" pypath "${pypath}") - list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}") - - #generate a bat file that sets the environment and runs the test - if (CMAKE_CROSSCOMPILING) - set(SHELL "/bin/sh") - else(CMAKE_CROSSCOMPILING) - find_program(SHELL sh) - endif(CMAKE_CROSSCOMPILING) - set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh) - file(WRITE ${sh_file} "#!${SHELL}\n") - #each line sets an environment variable - foreach(environ ${environs}) - file(APPEND ${sh_file} "export ${environ}\n") - endforeach(environ) - #load the command to run with its arguments - foreach(arg ${ARGN}) - file(APPEND ${sh_file} "${arg} ") - endforeach(arg) - file(APPEND ${sh_file} "\n") - - #make the shell file executable - execute_process(COMMAND chmod +x ${sh_file}) - - add_test(${test_name} ${SHELL} ${sh_file}) - - endif(UNIX) - - if(WIN32) - list(APPEND libpath ${DLL_PATHS} "%PATH%") - list(APPEND pypath "%PYTHONPATH%") - - #replace list separator with the path separator (escaped) - string(REPLACE ";" "\\;" libpath "${libpath}") - string(REPLACE ";" "\\;" pypath "${pypath}") - list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}") - - #generate a bat file that sets the environment and runs the test - set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat) - file(WRITE ${bat_file} "@echo off\n") - #each line sets an environment variable - foreach(environ ${environs}) - file(APPEND ${bat_file} "SET ${environ}\n") - endforeach(environ) - #load the command to run with its arguments - foreach(arg ${ARGN}) - file(APPEND ${bat_file} "${arg} ") - endforeach(arg) - file(APPEND ${bat_file} "\n") - - add_test(${test_name} ${bat_file}) - endif(WIN32) - -endfunction(GR_ADD_TEST) diff --git a/cmake/Modules/UseSWIG.cmake b/cmake/Modules/UseSWIG.cmake deleted file mode 100644 index c0f1728..0000000 --- a/cmake/Modules/UseSWIG.cmake +++ /dev/null @@ -1,304 +0,0 @@ -# - SWIG module for CMake -# Defines the following macros: -# SWIG_ADD_MODULE(name language [ files ]) -# - Define swig module with given name and specified language -# SWIG_LINK_LIBRARIES(name [ libraries ]) -# - Link libraries to swig module -# All other macros are for internal use only. -# To get the actual name of the swig module, -# use: ${SWIG_MODULE_${name}_REAL_NAME}. -# Set Source files properties such as CPLUSPLUS and SWIG_FLAGS to specify -# special behavior of SWIG. Also global CMAKE_SWIG_FLAGS can be used to add -# special flags to all swig calls. -# Another special variable is CMAKE_SWIG_OUTDIR, it allows one to specify -# where to write all the swig generated module (swig -outdir option) -# The name-specific variable SWIG_MODULE__EXTRA_DEPS may be used -# to specify extra dependencies for the generated modules. -# If the source file generated by swig need some special flag you can use -# set_source_files_properties( ${swig_generated_file_fullname} -# PROPERTIES COMPILE_FLAGS "-bla") - - -#============================================================================= -# Copyright 2004-2009 Kitware, Inc. -# Copyright 2009 Mathieu Malaterre -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distribute this file outside of CMake, substitute the full -# License text for the above reference.) - -set(SWIG_CXX_EXTENSION "cxx") -set(SWIG_EXTRA_LIBRARIES "") - -set(SWIG_PYTHON_EXTRA_FILE_EXTENSION "py") - -# -# For given swig module initialize variables associated with it -# -macro(SWIG_MODULE_INITIALIZE name language) - string(TOUPPER "${language}" swig_uppercase_language) - string(TOLOWER "${language}" swig_lowercase_language) - set(SWIG_MODULE_${name}_LANGUAGE "${swig_uppercase_language}") - set(SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG "${swig_lowercase_language}") - - set(SWIG_MODULE_${name}_REAL_NAME "${name}") - if("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "UNKNOWN") - message(FATAL_ERROR "SWIG Error: Language \"${language}\" not found") - elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PYTHON") - # when swig is used without the -interface it will produce in the module.py - # a 'import _modulename' statement, which implies having a corresponding - # _modulename.so (*NIX), _modulename.pyd (Win32). - set(SWIG_MODULE_${name}_REAL_NAME "_${name}") - elseif("${SWIG_MODULE_${name}_LANGUAGE}" STREQUAL "PERL") - set(SWIG_MODULE_${name}_EXTRA_FLAGS "-shadow") - endif() -endmacro() - -# -# For a given language, input file, and output file, determine extra files that -# will be generated. This is internal swig macro. -# - -macro(SWIG_GET_EXTRA_OUTPUT_FILES language outfiles generatedpath infile) - set(${outfiles} "") - get_source_file_property(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename - ${infile} SWIG_MODULE_NAME) - if(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename STREQUAL "NOTFOUND") - get_filename_component(SWIG_GET_EXTRA_OUTPUT_FILES_module_basename "${infile}" NAME_WE) - endif() - foreach(it ${SWIG_${language}_EXTRA_FILE_EXTENSION}) - set(${outfiles} ${${outfiles}} - "${generatedpath}/${SWIG_GET_EXTRA_OUTPUT_FILES_module_basename}.${it}") - endforeach() -endmacro() - -# -# Take swig (*.i) file and add proper custom commands for it -# -macro(SWIG_ADD_SOURCE_TO_MODULE name outfiles infile) - set(swig_full_infile ${infile}) - get_filename_component(swig_source_file_path "${infile}" PATH) - get_filename_component(swig_source_file_name_we "${infile}" NAME_WE) - get_source_file_property(swig_source_file_generated ${infile} GENERATED) - get_source_file_property(swig_source_file_cplusplus ${infile} CPLUSPLUS) - get_source_file_property(swig_source_file_flags ${infile} SWIG_FLAGS) - if("${swig_source_file_flags}" STREQUAL "NOTFOUND") - set(swig_source_file_flags "") - endif() - set(swig_source_file_fullname "${infile}") - if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_SOURCE_DIR}") - string(REGEX REPLACE - "^${CMAKE_CURRENT_SOURCE_DIR}" "" - swig_source_file_relative_path - "${swig_source_file_path}") - else() - if(${swig_source_file_path} MATCHES "^${CMAKE_CURRENT_BINARY_DIR}") - string(REGEX REPLACE - "^${CMAKE_CURRENT_BINARY_DIR}" "" - swig_source_file_relative_path - "${swig_source_file_path}") - set(swig_source_file_generated 1) - else() - set(swig_source_file_relative_path "${swig_source_file_path}") - if(swig_source_file_generated) - set(swig_source_file_fullname "${CMAKE_CURRENT_BINARY_DIR}/${infile}") - else() - set(swig_source_file_fullname "${CMAKE_CURRENT_SOURCE_DIR}/${infile}") - endif() - endif() - endif() - - set(swig_generated_file_fullname - "${CMAKE_CURRENT_BINARY_DIR}") - if(swig_source_file_relative_path) - set(swig_generated_file_fullname - "${swig_generated_file_fullname}/${swig_source_file_relative_path}") - endif() - # If CMAKE_SWIG_OUTDIR was specified then pass it to -outdir - if(CMAKE_SWIG_OUTDIR) - set(swig_outdir ${CMAKE_SWIG_OUTDIR}) - else() - set(swig_outdir ${CMAKE_CURRENT_BINARY_DIR}) - endif() - SWIG_GET_EXTRA_OUTPUT_FILES(${SWIG_MODULE_${name}_LANGUAGE} - swig_extra_generated_files - "${swig_outdir}" - "${infile}") - set(swig_generated_file_fullname - "${swig_generated_file_fullname}/${swig_source_file_name_we}") - # add the language into the name of the file (i.e. TCL_wrap) - # this allows for the same .i file to be wrapped into different languages - set(swig_generated_file_fullname - "${swig_generated_file_fullname}${SWIG_MODULE_${name}_LANGUAGE}_wrap") - - if(swig_source_file_cplusplus) - set(swig_generated_file_fullname - "${swig_generated_file_fullname}.${SWIG_CXX_EXTENSION}") - else() - set(swig_generated_file_fullname - "${swig_generated_file_fullname}.c") - endif() - - # Shut up some warnings from poor SWIG code generation that we - # can do nothing about, when this flag is available - include(CheckCXXCompilerFlag) - check_cxx_compiler_flag("-Wno-unused-but-set-variable" HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - if(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - set_source_files_properties(${swig_generated_file_fullname} - PROPERTIES COMPILE_FLAGS "-Wno-unused-but-set-variable") - endif(HAVE_WNO_UNUSED_BUT_SET_VARIABLE) - - get_directory_property(cmake_include_directories INCLUDE_DIRECTORIES) - set(swig_include_dirs) - foreach(it ${cmake_include_directories}) - set(swig_include_dirs ${swig_include_dirs} "-I${it}") - endforeach() - - set(swig_special_flags) - # default is c, so add c++ flag if it is c++ - if(swig_source_file_cplusplus) - set(swig_special_flags ${swig_special_flags} "-c++") - endif() - set(swig_extra_flags) - if(SWIG_MODULE_${name}_EXTRA_FLAGS) - set(swig_extra_flags ${swig_extra_flags} ${SWIG_MODULE_${name}_EXTRA_FLAGS}) - endif() - - # hack to work around CMake bug in add_custom_command with multiple OUTPUT files - - file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) - execute_process( - COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib -unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] -print(re.sub('\\W', '_', '${name} ${reldir} ' + unique))" - OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE - ) - - file( - WRITE ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in - "int main(void){return 0;}\n" - ) - - # create dummy dependencies - add_custom_command( - OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp - COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp.in - ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp - DEPENDS "${swig_source_file_fullname}" ${SWIG_MODULE_${name}_EXTRA_DEPS} - COMMENT "" - ) - - # create the dummy target - add_executable(${_target} ${CMAKE_CURRENT_BINARY_DIR}/${_target}.cpp) - - # add a custom command to the dummy target - add_custom_command( - TARGET ${_target} - # Let's create the ${swig_outdir} at execution time, in case dir contains $(OutDir) - COMMAND ${CMAKE_COMMAND} -E make_directory ${swig_outdir} - COMMAND "${SWIG_EXECUTABLE}" - ARGS "-${SWIG_MODULE_${name}_SWIG_LANGUAGE_FLAG}" - ${swig_source_file_flags} - ${CMAKE_SWIG_FLAGS} - -outdir ${swig_outdir} - ${swig_special_flags} - ${swig_extra_flags} - ${swig_include_dirs} - -o "${swig_generated_file_fullname}" - "${swig_source_file_fullname}" - COMMENT "Swig source" - ) - - #add dummy independent dependencies from the _target to each file - #that will be generated by the SWIG command above - - set(${outfiles} "${swig_generated_file_fullname}" ${swig_extra_generated_files}) - - foreach(swig_gen_file ${${outfiles}}) - add_custom_command( - OUTPUT ${swig_gen_file} - COMMAND "" - DEPENDS ${_target} - COMMENT "" - ) - endforeach() - - set_source_files_properties( - ${outfiles} PROPERTIES GENERATED 1 - ) - -endmacro() - -# -# Create Swig module -# -macro(SWIG_ADD_MODULE name language) - SWIG_MODULE_INITIALIZE(${name} ${language}) - set(swig_dot_i_sources) - set(swig_other_sources) - foreach(it ${ARGN}) - if(${it} MATCHES ".*\\.i$") - set(swig_dot_i_sources ${swig_dot_i_sources} "${it}") - else() - set(swig_other_sources ${swig_other_sources} "${it}") - endif() - endforeach() - - set(swig_generated_sources) - foreach(it ${swig_dot_i_sources}) - SWIG_ADD_SOURCE_TO_MODULE(${name} swig_generated_source ${it}) - set(swig_generated_sources ${swig_generated_sources} "${swig_generated_source}") - endforeach() - get_directory_property(swig_extra_clean_files ADDITIONAL_MAKE_CLEAN_FILES) - set_directory_properties(PROPERTIES - ADDITIONAL_MAKE_CLEAN_FILES "${swig_extra_clean_files};${swig_generated_sources}") - add_library(${SWIG_MODULE_${name}_REAL_NAME} - MODULE - ${swig_generated_sources} - ${swig_other_sources}) - string(TOLOWER "${language}" swig_lowercase_language) - if ("${swig_lowercase_language}" STREQUAL "java") - if (APPLE) - # In java you want: - # System.loadLibrary("LIBRARY"); - # then JNI will look for a library whose name is platform dependent, namely - # MacOS : libLIBRARY.jnilib - # Windows: LIBRARY.dll - # Linux : libLIBRARY.so - set_target_properties (${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".jnilib") - endif () - endif () - if ("${swig_lowercase_language}" STREQUAL "python") - # this is only needed for the python case where a _modulename.so is generated - set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES PREFIX "") - # Python extension modules on Windows must have the extension ".pyd" - # instead of ".dll" as of Python 2.5. Older python versions do support - # this suffix. - # http://docs.python.org/whatsnew/ports.html#SECTION0001510000000000000000 - # - # Windows: .dll is no longer supported as a filename extension for extension modules. - # .pyd is now the only filename extension that will be searched for. - # - if(WIN32 AND NOT CYGWIN) - set_target_properties(${SWIG_MODULE_${name}_REAL_NAME} PROPERTIES SUFFIX ".pyd") - endif() - endif () -endmacro() - -# -# Like TARGET_LINK_LIBRARIES but for swig modules -# -macro(SWIG_LINK_LIBRARIES name) - if(SWIG_MODULE_${name}_REAL_NAME) - target_link_libraries(${SWIG_MODULE_${name}_REAL_NAME} ${ARGN}) - else() - message(SEND_ERROR "Cannot find Swig library \"${name}\".") - endif() -endmacro() diff --git a/cmake/Modules/verilogConfig.cmake b/cmake/Modules/gnuradio-verilogConfig.cmake similarity index 56% rename from cmake/Modules/verilogConfig.cmake rename to cmake/Modules/gnuradio-verilogConfig.cmake index fca291a..fc11e46 100644 --- a/cmake/Modules/verilogConfig.cmake +++ b/cmake/Modules/gnuradio-verilogConfig.cmake @@ -1,9 +1,10 @@ -INCLUDE(FindPkgConfig) -PKG_CHECK_MODULES(PC_VERILOG verilog) +find_package(PkgConfig) + +PKG_CHECK_MODULES(PC_GR_VERILOG gnuradio-verilog) FIND_PATH( - VERILOG_INCLUDE_DIRS - NAMES verilog/api.h + GR_VERILOG_INCLUDE_DIRS + NAMES gnuradio/verilog/api.h HINTS $ENV{VERILOG_DIR}/include ${PC_VERILOG_INCLUDEDIR} PATHS ${CMAKE_INSTALL_PREFIX}/include @@ -12,7 +13,7 @@ FIND_PATH( ) FIND_LIBRARY( - VERILOG_LIBRARIES + GR_VERILOG_LIBRARIES NAMES gnuradio-verilog HINTS $ENV{VERILOG_DIR}/lib ${PC_VERILOG_LIBDIR} @@ -24,9 +25,8 @@ FIND_LIBRARY( /usr/lib64 ) -include("${CMAKE_CURRENT_LIST_DIR}/verilogTarget.cmake") +include("${CMAKE_CURRENT_LIST_DIR}/gnuradio-verilogTarget.cmake") INCLUDE(FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS(VERILOG DEFAULT_MSG VERILOG_LIBRARIES VERILOG_INCLUDE_DIRS) -MARK_AS_ADVANCED(VERILOG_LIBRARIES VERILOG_INCLUDE_DIRS) - +FIND_PACKAGE_HANDLE_STANDARD_ARGS(GR_VERILOG DEFAULT_MSG GR_VERILOG_LIBRARIES GR_VERILOG_INCLUDE_DIRS) +MARK_AS_ADVANCED(GR_VERILOG_LIBRARIES GR_VERILOG_INCLUDE_DIRS) diff --git a/cmake/Modules/targetConfig.cmake.in b/cmake/Modules/targetConfig.cmake.in index 79e4a28..4a1fb31 100644 --- a/cmake/Modules/targetConfig.cmake.in +++ b/cmake/Modules/targetConfig.cmake.in @@ -2,20 +2,8 @@ # # This file is part of GNU Radio # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. include(CMakeFindDependencyMacro) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index f16fbf6..ba13138 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -1,21 +1,10 @@ # Copyright 2011 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. ######################################################################## # Setup dependencies diff --git a/docs/doxygen/CMakeLists.txt b/docs/doxygen/CMakeLists.txt index 1b44799..543c82e 100644 --- a/docs/doxygen/CMakeLists.txt +++ b/docs/doxygen/CMakeLists.txt @@ -1,21 +1,10 @@ # Copyright 2011 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. ######################################################################## # Create the doxygen configuration file @@ -28,6 +17,7 @@ file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir) set(HAVE_DOT ${DOXYGEN_DOT_FOUND}) set(enable_html_docs YES) set(enable_latex_docs NO) +set(enable_mathjax NO) set(enable_xml_docs YES) configure_file( diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in index a3350a4..8e47b79 100644 --- a/docs/doxygen/Doxyfile.in +++ b/docs/doxygen/Doxyfile.in @@ -199,13 +199,6 @@ TAB_SIZE = 8 ALIASES = -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding -# "class=itcl::class" will allow you to use the command class in the -# itcl::class meaning. - -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list @@ -723,8 +716,6 @@ EXCLUDE_PATTERNS = */.deps/* \ EXCLUDE_SYMBOLS = ad9862 \ numpy \ - *swig* \ - *Swig* \ *my_top_block* \ *my_graph* \ *app_top_block* \ @@ -790,7 +781,7 @@ INPUT_FILTER = # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. -FILTER_PATTERNS = *.py="@top_srcdir@"/doc/doxygen/other/doxypy.py +FILTER_PATTERNS = *.py=@top_srcdir@/docs/doxygen/other/doxypy.py # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source @@ -879,12 +870,6 @@ VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that @@ -1220,14 +1205,14 @@ FORMULA_TRANSPARENT = YES # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. -USE_MATHJAX = NO +USE_MATHJAX = @enable_mathjax@ # When MathJax is enabled you can set the default output format to be used for # the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. -MATHJAX_FORMAT = HTML-CSS +MATHJAX_FORMAT = SVG # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination @@ -1239,12 +1224,12 @@ MATHJAX_FORMAT = HTML-CSS # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +MATHJAX_RELPATH = @MATHJAX2_PATH@ # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. -MATHJAX_EXTENSIONS = +MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # The MATHJAX_CODEFILE tag can be used to specify a file with javascript # pieces of code that will be used on startup of the MathJax code. @@ -1680,11 +1665,6 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- @@ -1697,15 +1677,6 @@ PERL_PATH = /usr/bin/perl CLASS_DIAGRAMS = YES -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. @@ -1834,7 +1805,7 @@ DIRECTORY_GRAPH = YES # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). -DOT_IMAGE_FORMAT = png +DOT_IMAGE_FORMAT = svg # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. diff --git a/docs/doxygen/Doxyfile.swig_doc.in b/docs/doxygen/Doxyfile.swig_doc.in deleted file mode 100644 index cbe06d6..0000000 --- a/docs/doxygen/Doxyfile.swig_doc.in +++ /dev/null @@ -1,1878 +0,0 @@ -# Doxyfile 1.8.4 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed -# in front of the TAG it is preceding . -# All text after a hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" "). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or sequence of words) that should -# identify the project. Note that if you do not use Doxywizard you need -# to put quotes around the project name if it contains spaces. - -PROJECT_NAME = @CPACK_PACKAGE_NAME@ - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer -# a quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify an logo or icon that is -# included in the documentation. The maximum height of the logo should not -# exceed 55 pixels and the maximum width should not exceed 200 pixels. -# Doxygen will copy the logo to the output directory. - -PROJECT_LOGO = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = "@OUTPUT_DIRECTORY@" - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, -# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English -# messages), Korean, Korean-en, Latvian, Lithuanian, Norwegian, Macedonian, -# Persian, Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, -# Slovak, Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = NO - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. Note that you specify absolute paths here, but also -# relative paths, which will be relative from the directory where doxygen is -# started. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful if your file system -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding -# "class=itcl::class" will allow you to use the command class in the -# itcl::class meaning. - -TCL_SUBST = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, -# and language is one of the parsers supported by doxygen: IDL, Java, -# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, -# C++. For instance to make doxygen treat .inc files as Fortran files (default -# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note -# that for custom extensions you also need to set FILE_PATTERNS otherwise the -# files are not read by doxygen. - -EXTENSION_MAPPING = - -# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all -# comments according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you -# can mix doxygen, HTML, and XML commands with Markdown formatting. -# Disable only in case of backward compatibilities issues. - -MARKDOWN_SUPPORT = YES - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also makes the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES (the -# default) will make doxygen replace the get and set methods by a property in -# the documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and -# unions are shown inside the group in which they are included (e.g. using -# @ingroup) instead of on a separate page (for HTML and Man pages) or -# section (for LaTeX and RTF). - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and -# unions with only public data fields or simple typedef fields will be shown -# inline in the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO (the default), structs, classes, and unions are shown on a separate -# page (for HTML and Man pages) or section (for LaTeX and RTF). - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can -# be an expensive process and often the same symbol appear multiple times in -# the code, doxygen keeps a cache of pre-resolved symbols. If the cache is too -# small doxygen will become slower. If the cache is too large, memory is wasted. -# The cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid -# range is 0..9, the default is 0, corresponding to a cache size of 2^16 = 65536 -# symbols. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal -# scope will be included in the documentation. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = NO - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespaces are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen -# will list include files with double quotes in the documentation -# rather than with sharp brackets. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen -# will sort the (brief and detailed) documentation of class members so that -# constructors and destructors are listed first. If set to NO (the default) -# the constructors will appear in the respective orders defined by -# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. -# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO -# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. - -SORT_MEMBERS_CTORS_1ST = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to -# do proper type resolution of all parameters of a function it will reject a -# match between the prototype and the implementation of a member function even -# if there is only one candidate or it is obvious which candidate to choose -# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen -# will still accept a match between prototype and implementation in such cases. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if section-label ... \endif -# and \cond section-label ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or macro consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and macros in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. -# This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. -# You can optionally specify a file name after the option, if omitted -# DoxygenLayout.xml will be used as the name of the layout file. - -LAYOUT_FILE = - -# The CITE_BIB_FILES tag can be used to specify one or more bib files -# containing the references data. This must be a list of .bib files. The -# .bib extension is automatically appended if omitted. Using this command -# requires the bibtex tool to be installed. See also -# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style -# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this -# feature you need bibtex and perl available in the search path. Do not use -# file names with spaces, bibtex cannot handle them. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# The WARN_NO_PARAMDOC option can be enabled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = @INPUT_PATHS@ - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh -# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py -# *.f90 *.f *.for *.vhd *.vhdl - -FILE_PATTERNS = *.h - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. -# If FILTER_PATTERNS is specified, this tag will be ignored. -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. -# Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. -# The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty or if -# non of the patterns match the file name, INPUT_FILTER is applied. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) -# and it is also possible to disable source filtering for a specific pattern -# using *.ext= (so without naming a filter). This option only has effect when -# FILTER_SOURCE_FILES is enabled. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C, C++ and Fortran comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. -# Otherwise they will link to the documentation. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = NO - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = NO - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. Note that when using a custom header you are responsible -# for the proper inclusion of any scripts and style sheets that doxygen -# needs, which is dependent on the configuration options used. -# It is advised to generate a default header using "doxygen -w html -# header.html footer.html stylesheet.css YourConfigFile" and then modify -# that header. Note that the header is subject to change so you typically -# have to redo this when upgrading to a newer version of doxygen or when -# changing the value of configuration settings such as GENERATE_TREEVIEW! - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If left blank doxygen will -# generate a default style sheet. Note that it is recommended to use -# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this -# tag will in the future become obsolete. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional -# user-defined cascading style sheet that is included after the standard -# style sheets created by doxygen. Using this option one can overrule -# certain style aspects. This is preferred over using HTML_STYLESHEET -# since it does not replace the standard style sheet and is therefore more -# robust against future updates. Doxygen will copy the style sheet file to -# the output directory. - -HTML_EXTRA_STYLESHEET = - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that -# the files will be copied as-is; there are no commands or markers available. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. -# Doxygen will adjust the colors in the style sheet and background images -# according to this color. Hue is specified as an angle on a colorwheel, -# see http://en.wikipedia.org/wiki/Hue for more information. -# For instance the value 0 represents red, 60 is yellow, 120 is green, -# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. -# The allowed range is 0 to 359. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of -# the colors in the HTML output. For a value of 0 the output will use -# grayscales only. A value of 255 will produce the most vivid colors. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to -# the luminance component of the colors in the HTML output. Values below -# 100 gradually make the output lighter, whereas values above 100 make -# the output darker. The value divided by 100 is the actual gamma applied, -# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, -# and 100 does not change the gamma. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting -# this to NO can help when comparing the output of multiple runs. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of -# entries shown in the various tree structured indices initially; the user -# can expand and collapse entries dynamically later on. Doxygen will expand -# the tree to such a level that at most the specified number of entries are -# visible (unless a fully collapsed tree already exceeds this amount). -# So setting the number of entries 1 will produce a full collapsed tree by -# default. 0 is a special value representing an infinite number of entries -# and will result in a full expanded tree by default. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. -# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely -# identify the documentation publisher. This should be a reverse domain-name -# style string, e.g. com.mycompany.MyDocSet.documentation. - -DOCSET_PUBLISHER_ID = org.doxygen.Publisher - -# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. - -DOCSET_PUBLISHER_NAME = Publisher - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated -# that can be used as input for Qt's qhelpgenerator to generate a -# Qt Compressed Help (.qch) of the generated HTML documentation. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can -# be used to specify the file name of the resulting .qch file. -# The path specified is relative to the HTML output folder. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#namespace - -QHP_NAMESPACE = - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating -# Qt Help Project output. For more information please see -# http://doc.trolltech.com/qthelpproject.html#virtual-folders - -QHP_VIRTUAL_FOLDER = doc - -# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to -# add. For more information please see -# http://doc.trolltech.com/qthelpproject.html#custom-filters - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see -# -# Qt Help Project / Custom Filters. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's -# filter section matches. -# -# Qt Help Project / Filter Attributes. - -QHP_SECT_FILTER_ATTRS = - -# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can -# be used to specify the location of Qt's qhelpgenerator. -# If non-empty doxygen will try to run qhelpgenerator on the generated -# .qhp file. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files -# will be generated, which together with the HTML files, form an Eclipse help -# plugin. To install this plugin and make it available under the help contents -# menu in Eclipse, the contents of the directory containing the HTML and XML -# files needs to be copied into the plugins directory of eclipse. The name of -# the directory within the plugins directory should be the same as -# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before -# the help appears. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have -# this name. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) -# at top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. Since the tabs have the same information as the -# navigation tree you can set this option to NO if you already set -# GENERATE_TREEVIEW to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to YES, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). -# Windows users are probably better off using the HTML help feature. -# Since the tree basically has the same information as the tab index you -# could consider to set DISABLE_INDEX to NO when enabling this option. - -GENERATE_TREEVIEW = NO - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values -# (range [0,1..20]) that doxygen will group on one line in the generated HTML -# documentation. Note that a value of 0 will completely suppress the enum -# values from appearing in the overview section. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open -# links to external symbols imported via tag files in a separate window. - -EXT_LINKS_IN_WINDOW = NO - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are -# not supported properly for IE 6.0, but are supported on all modern browsers. -# Note that when changing this option you need to delete any form_*.png files -# in the HTML output before the changes have effect. - -FORMULA_TRANSPARENT = YES - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax -# (see http://www.mathjax.org) which uses client side Javascript for the -# rendering instead of using prerendered bitmaps. Use this if you do not -# have LaTeX installed or if you want to formulas look prettier in the HTML -# output. When enabled you may also need to install MathJax separately and -# configure the path to it using the MATHJAX_RELPATH option. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and -# SVG. The default value is HTML-CSS, which is slower, but has the best -# compatibility. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the -# HTML output directory using the MATHJAX_RELPATH option. The destination -# directory should contain the MathJax.js script. For instance, if the mathjax -# directory is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to -# the MathJax Content Delivery Network so you can quickly see the result without -# installing MathJax. -# However, it is strongly recommended to install a local -# copy of MathJax from http://www.mathjax.org before deployment. - -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest - -# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension -# names that should be enabled during MathJax rendering. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript -# pieces of code that will be used on startup of the MathJax code. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box -# for the HTML output. The underlying search engine uses javascript -# and DHTML and should work on any modern browser. Note that when using -# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets -# (GENERATE_DOCSET) there is already a search function so this one should -# typically be disabled. For large projects the javascript based search engine -# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. - -SEARCHENGINE = YES - -# When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. -# There are two flavours of web server based search depending on the -# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for -# searching and an index file used by the script. When EXTERNAL_SEARCH is -# enabled the indexing and searching needs to be provided by external tools. -# See the manual for details. - -SERVER_BASED_SEARCH = NO - -# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP -# script for searching. Instead the search results are written to an XML file -# which needs to be processed by an external indexer. Doxygen will invoke an -# external search engine pointed to by the SEARCHENGINE_URL option to obtain -# the search results. Doxygen ships with an example indexer (doxyindexer) and -# search engine (doxysearch.cgi) which are based on the open source search -# engine library Xapian. See the manual for configuration details. - -EXTERNAL_SEARCH = NO - -# The SEARCHENGINE_URL should point to a search engine hosted by a web server -# which will returned the search results when EXTERNAL_SEARCH is enabled. -# Doxygen ships with an example search engine (doxysearch) which is based on -# the open source search engine library Xapian. See the manual for configuration -# details. - -SEARCHENGINE_URL = - -# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed -# search data is written to a file for indexing by an external tool. With the -# SEARCHDATA_FILE tag the name of this file can be specified. - -SEARCHDATA_FILE = searchdata.xml - -# When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the -# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is -# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple -# projects and redirect the results back to the right project. - -EXTERNAL_SEARCH_ID = - -# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen -# projects other than the one defined by this configuration file, but that are -# all added to the same external search index. Each project needs to have a -# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id -# of to a relative location where the documentation can be found. -# The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... - -EXTRA_SEARCH_MAPPINGS = - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. -# Note that when enabling USE_PDFLATEX this option is only used for -# generating bitmaps for formulas in the HTML output, but not in the -# Makefile that is written to the output directory. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, letter, legal and -# executive. If left blank a4 will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for -# the generated latex document. The footer should contain everything after -# the last chapter. If it is left blank doxygen will generate a -# standard footer. Notice: only use this tag if you know what you are doing! - -LATEX_FOOTER = - -# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images -# or other source files which should be copied to the LaTeX output directory. -# Note that the files will be copied as-is; there are no commands or markers -# available. - -LATEX_EXTRA_FILES = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -# If LATEX_SOURCE_CODE is set to YES then doxygen will include -# source code with syntax highlighting in the LaTeX output. -# Note that which sources are shown also depends on other settings -# such as SOURCE_BROWSER. - -LATEX_SOURCE_CODE = NO - -# The LATEX_BIB_STYLE tag can be used to specify the style to use for the -# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See -# http://en.wikipedia.org/wiki/BibTeX for more info. - -LATEX_BIB_STYLE = plain - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load style sheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = YES - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options related to the DOCBOOK output -#--------------------------------------------------------------------------- - -# If the GENERATE_DOCBOOK tag is set to YES Doxygen will generate DOCBOOK files -# that can be used to generate PDF. - -GENERATE_DOCBOOK = NO - -# The DOCBOOK_OUTPUT tag is used to specify where the DOCBOOK pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in -# front of it. If left blank docbook will be used as the default path. - -DOCBOOK_OUTPUT = docbook - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. -# This is useful -# if you want to understand what is going on. -# On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# pointed to by INCLUDE_PATH will be searched when a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition that -# overrules the definition found in the source code. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all references to function-like macros -# that are alone on a line, have an all uppercase name, and do not end with a -# semicolon, because these will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. For each -# tag file the location of the external documentation should be added. The -# format of a tag file without this location is as follows: -# -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths -# or URLs. Note that each tag file must have a unique name (where the name does -# NOT include the path). If a tag file is not located in the directory in which -# doxygen is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed -# in the related pages index. If set to NO, only the current project's -# pages will be listed. - -EXTERNAL_PAGES = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option also works with HAVE_DOT disabled, but it is recommended to -# install and use dot, since it yields more powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is -# allowed to run in parallel. When set to 0 (the default) doxygen will -# base this on the number of processors available in the system. You can set it -# explicitly to a value larger than 0 to get control over the balance -# between CPU load and processing speed. - -DOT_NUM_THREADS = 0 - -# By default doxygen will use the Helvetica font for all dot files that -# doxygen generates. When you want a differently looking font you can specify -# the font name using DOT_FONTNAME. You need to make sure dot is able to find -# the font, which can be done by putting it in a standard location or by setting -# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the -# directory containing the font. - -DOT_FONTNAME = Helvetica - -# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. -# The default size is 10pt. - -DOT_FONTSIZE = 10 - -# By default doxygen will tell dot to use the Helvetica font. -# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to -# set the path where dot can find it. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = YES - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If the UML_LOOK tag is enabled, the fields and methods are shown inside -# the class node. If there are many fields or methods and many nodes the -# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS -# threshold limits the number of items for each type to make the size more -# manageable. Set this to 0 for no limit. Note that the threshold may be -# exceeded by 50% before the limit is enforced. - -UML_LIMIT_NUM_FIELDS = 10 - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will generate a graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are svg, png, jpg, or gif. -# If left blank png will be used. If you choose svg you need to set -# HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible in IE 9+ (other browsers do not have this requirement). - -DOT_IMAGE_FORMAT = png - -# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to -# enable generation of interactive SVG images that allow zooming and panning. -# Note that this requires a modern browser other than Internet Explorer. -# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you -# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files -# visible. Older versions of IE do not have SVG support. - -INTERACTIVE_SVG = NO - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MSCFILE_DIRS tag can be used to specify one or more directories that -# contain msc files that are included in the documentation (see the -# \mscfile command). - -MSCFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 0 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not -# seem to support this out of the box. Warning: Depending on the platform used, -# enabling this option may lead to badly anti-aliased labels on the edges of -# a graph (i.e. they become hard to read). - -DOT_TRANSPARENT = NO - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = YES - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES diff --git a/docs/doxygen/doxyxml/__init__.py b/docs/doxygen/doxyxml/__init__.py index 0690874..381c2c3 100644 --- a/docs/doxygen/doxyxml/__init__.py +++ b/docs/doxygen/doxyxml/__init__.py @@ -4,20 +4,8 @@ # This file was generated by gr_modtool, a tool from the GNU Radio framework # This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. # """ Python interface to contents of doxygen xml documentation. @@ -64,10 +52,10 @@ u'Outputs the vital aadvark statistics.' """ -from __future__ import unicode_literals from .doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther + def _test(): import os this_dir = os.path.dirname(globals()['__file__']) @@ -79,6 +67,6 @@ def _test(): import doctest return doctest.testmod() + if __name__ == "__main__": _test() - diff --git a/docs/doxygen/doxyxml/__init__.pyc b/docs/doxygen/doxyxml/__init__.pyc deleted file mode 100644 index 8bc3177984e81430be622eb4cb5d6f6eb59182e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2208 zcmb_d&2Aev5T4bKf7TA#o`T$n^q>`7OZOr%;<|PaGj{K>tYHb|T)^ntu71FSmxHGD` zTa1uXyb{U0)KgyCWOzBqvY9MJmJP0kUJFyrNEWp=1`gQsamZwh=i}#IvXChAEh6+r z72-BoD-PrMwQ!8HQd*2<#&}sGuJvIlZbZrSbSZL&0ec&@LF|*uN>n2s*w90SEahLw z5mHlOFi?!{vKbun;)a{+!B?yq@RnX>Lg{9{@Ou@PYH$?-K)16-Q1UG$1=>d zStXR&SV!A5i$gp~~5f*zUWQz$cxXs$6l81DL@dwk00P16rGieut zVSI+w5+N__SBC&iSBxN|K4N+69GkW_x)#Psu}&Fd#xkMjxp05ynYya20!ogiMv7Th z2%8&O6LQ1&dtHdq`nIijRq>J?x5&3W_n=z{$FY@)$LbfnC|9jE86r@m$~9db7lP+X zX~%M2yT;%a){Wbmn#6aHVgYux5(wmrZ{8lCfA{vy`RP&ofi6fXl_0B)k_zuvh4=HO zHH@2Dk#o(Qz+Uu{QzQ5_#o%MYK!<_NW%E^6*pa<7V)#adm@0&@8!;4#5uwnWaHwoy5C8h-3K&{*qlBY>t-At#tc= zTX*v280QGDHrz?ct#vzVVJxG25}0#rGF#(t+z7umx~W~f7RHuQ#pduGhHCtTr!m{b z?*u|pdsi5 z!qD_|7@R4dgGb<@lI{!K`{e$#7IaPx=xHJ#b2ms5T zl7j@lcClwjjT>vmcEODp&sAf1A@vxoJFUy{+#D+LBXVPUA#m|;ng6FDo@G)=mt~`R z=-A|1(i?_yQqmN^x|Iw55%$&a2Mu-hT8a D?QnyJ diff --git a/docs/doxygen/doxyxml/base.py b/docs/doxygen/doxyxml/base.py index 47caa35..fab7c14 100644 --- a/docs/doxygen/doxyxml/base.py +++ b/docs/doxygen/doxyxml/base.py @@ -4,20 +4,8 @@ # This file was generated by gr_modtool, a tool from the GNU Radio framework # This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. # """ A base class is created. @@ -25,8 +13,6 @@ Classes based upon this are used to make more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ -from __future__ import print_function -from __future__ import unicode_literals import os import pdb @@ -97,8 +83,8 @@ def get_cls(self, mem): for cls in self.mem_classes: if cls.can_parse(mem): return cls - raise Exception(("Did not find a class for object '%s'." \ - % (mem.get_name()))) + raise Exception(("Did not find a class for object '%s'." + % (mem.get_name()))) def convert_mem(self, mem): try: diff --git a/docs/doxygen/doxyxml/base.pyc b/docs/doxygen/doxyxml/base.pyc deleted file mode 100644 index 60ee156bbdff958e1441667dce4c8019e80c6cd7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7979 zcmd5>O>-Pq6@5K38fiwdCEKxV#c|T4>_k8;QB*1}QpPy3^1+u9dQznz%5=4-TT;t2 z-EzMk#{wG)lpQRoVnG!Pb}U#>uwuoQ1sfLp0{#I%0M0qDduCiv1x1ajh|_Q0yZ!oo zoO|!N_dPZLey01k-~ImMzN!9e;`a_7`!R~xm|c{fi5q6%nO#&|yL=FJ|YZS$!yC&qkxcY#Co*5^6HFHv;g^6y7B@%w|wT0gaZ zzeu7oi8s34yA&l>?_+;-lxKcOdewq8j=t1HSb0;Zo>=kK}#e-u%&B~%5KnWyJX$TT;3`vuaRMO^H z+9H;fbZ#u2Gw~I{31!Ti1Zbi;Z$9>M9HP4uMQI)g$ zWJwcXgk?3))XfzWpH;VWnrNBB?w&XCii8&=JSX9Es-IW=qKPl4!>Wm&Q{|G0FRF6c z#H*^TnfQ__&ztzND%i-HDleG$c~!2Optt@404zHwwnj&Tv=2Rv@ugaXFV*1i#RsUH z^#+QP;xNpjVG@Q?Ul68#qvL?i^9sk~{B5sX!q0Zz8}$#qnGEkI#eci>fVLFOp*W?5 zwlVZ)@k}rD!>F)nwlDMkzgZ}W;_`$U)z0lrK54^foHJb|jcy3#ODJV~0gdcJCz z)K%u{O1rMin|cA1r@jk;&I9u^%i1%_SZhdpt@_BxeaQfKc* z1Dljwd3Xd&lei?@VM5+07sui-j>^ax=}^{T%QJi#r~NXlB^)$Nvyg_rpBKlPz-VwqKF@`NYU(5bl$cKL91G+T!5fBcN!UYMU1;_C7 z`-e%t-0+3j_Bz)ZtgtwbLb&T&X+DbM5G{3*BKiXxTw-Seu2_*`1+zz}h-l zElV3Rtt;MQ<2=wl;|XmV>KN_Xi4dR^qfVNJI3hARzu1y4#&x^`{!k5cp;xG*vvdKS zKpmAXT2o_upYvS3K)hSp*Ex5>n z*t)m0x7mZcj8O&d#DTs6k);(Fp!=$~=$-eLfbxphCq?n0`^lzBxTGqfe9|z*^@>nn z)I1(q@de?6$}0 z#@l-fjmOu>i-(BT_twaqV?Tul7T{0heh<55r*sah4WAgZinl+O;Z@tV*D=->+%U^a ze=p5qKdPNHFDgIuU%hT$-Ei2DRKb{`vdF}W5&Kc*=vS=TgR{p`5i7cho?uqW4a5l( z^e~JJYUU8uwv0vEdswffry3Z_vbW&19iFs#K6mk0i{kjB3QrgZK$=FBID_#TQ=YnD zinqxvaAJojeGA>@=z5Td19uSCTIlqkYl_TxPm#F8|K58)^UTK+!?Ra0`*vQTK8(u# z0W)sq-xUq6GpGerRKUK@_e6&;vpI238fI{f1s&DtCeI!v1yb&kIb1S(tZ5K^Hwi;2 z_GK*n#duBNeGx9Y3?f{p?f4ltWe|353haPy9pAZBMb8w`fWf1mIL{ORoDShEV9qq_ z4@PlfbJUfunZ}q~xF#73-4ctQUTeUQr&jn9n|NF?qevfo1_RPk>y>dudO%r zrgCw<-qykt2R|xCNx%$Wd6YbH=F4;$m{|K0%;Xm9e2&QR(PDp!iX*TSYy(cm29l0> zQ~Zc&ARO@V2NH^};Zs}pZdV=%^2SG?IQB*+sVIrk$!|T})OUz`_;gyjLxHBje5C(z zW6i14{sF7%`qh8$f&ur_M>xvXIiVl<%o)=>iwYclEmD|rRAlhiVRUrF%*ADsK952; z_~zrWoE$E_$4sXjkGSFcoTlo}H#~I5^$CnbkmxODn+%4@+^L*d>Bd;@A~~4>9=Vqm z!!XNTG91V@=&REiE|tNVb((Tn);P9d%OYRLA-z-j5AwfW|Y16eD{AKVOv;q*B>dQ`Qny=M)h7*1apDdBjGU3QVkc zgEsTshIxnz3p|2Lp4(!JH{XQ& z!%#_Hf^VIa6%cEs{uZ|rrTFYHPqW~Ql+z+$MFAtF8aNipsuf0PU(Nw_9VX%ncv#Z# z8tQ&pT8$33v2OGN>5bT|`V{?VtG}BV@Mm}|oeuhIEJKGEycZiULX&OO#)q|Wrt&3p zJa(21K&3$#tUHV$!uNKvi>5_2fE$r@yly( zDEu0~mJRye|GHQb$7wE>Bv1vP-=O;Jdij#BBXG$O`v|<=@C(`x?-=~Q6HoFj#;}qa#<9D^CG?{tc9%4KQQr{9iMxHl6)}S~o8%CO z16siwXa!_sS-HE{ac3G_U=k7BDg;!G2!P=>iznP6q8P@dbvHkpg-DLF^x-J6a+ly0 zeoW&yiqc}{++Q-q#^pJ#-D?NJw(M^qt=NP?^IU5Qx%aXT%D9Z68_`R~Y;u4GQdy^E zLXQxKQc>U%&pVzuUM1;LeLCsjbvSQ!ve$61SrEnaNin>K@XYFa?*mD8!DYA{6@ABt z9iqA|^V`4;n!SXkj)?dRg8o#1pS^UD_}8tw-{VSiTR~VJ1h1nVkO17Z12-c*DAf$U z!WVcdKI4f`fqCl0PI}`E%y?$65GGi)Mam%m2tgH@U&2{v6`J6soU~9AoBWRF8N!b4 ztEvELPzoaEl-Y~j6bD~J_tPW{T0M@S{Rh>R>Yum|8Zs{x|8@1WbGLdfnl+?RFH3~L zRLB0Qoiy=5@F5E+iH>uM+fhXLGv=@iaU|-0w~7B4SH|l-#ih!cb#?t0uGc|66@-ls`Y9i|MlGYuAqziB^ymxZ4R-7kOx~x?>koG43xq}y{xIuC4UonTJ zD;lLcU2l8+MKmUVhY4P0Q&ytY(m>p5S5ecgzAm4JNqLaR!4DoAWRd+P;FL+dMfoA9?OW-Zmc%> zZ|NNFP8RWWJFRx3Gv8TUZY{Sukas#uXP4$XE&N;VbWqN9T1t-ljN> z>?pop(zZdk8{)~rZY2E6s;E+7Z~QmeK#WSr-L+G|rJc^Mb+EkU{z7=0^rytH-B|LL K8Y_*J=05>nqIGxx diff --git a/docs/doxygen/doxyxml/doxyindex.py b/docs/doxygen/doxyxml/doxyindex.py index ba0d2b2..1e734cb 100644 --- a/docs/doxygen/doxyxml/doxyindex.py +++ b/docs/doxygen/doxyxml/doxyindex.py @@ -4,27 +4,13 @@ # This file was generated by gr_modtool, a tool from the GNU Radio framework # This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. # """ Classes providing more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ -from __future__ import absolute_import -from __future__ import unicode_literals import os @@ -32,6 +18,7 @@ from .base import Base from .text import description + class DoxyIndex(Base): """ Parses a doxygen xml directory. @@ -60,17 +47,8 @@ def _parse(self): self._members.append(converted) -def generate_swig_doc_i(self): - """ - %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " - Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; - """ - pass - - class DoxyCompMem(Base): - kind = None def __init__(self, *args, **kwargs): @@ -106,9 +84,11 @@ def set_parameters(self, data): class DoxyCompound(DoxyCompMem): pass + class DoxyMember(DoxyCompMem): pass + class DoxyFunction(DoxyMember): __module__ = "gnuradio.utils.doxyxml" @@ -129,9 +109,11 @@ def _parse(self): self._data['params'].append(DoxyParam(prm)) brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) + detailed_description = property( + lambda self: self.data()['detailed_description']) params = property(lambda self: self.data()['params']) + Base.mem_classes.append(DoxyFunction) @@ -156,9 +138,11 @@ def description(self): return '\n\n'.join(descriptions) brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) + detailed_description = property( + lambda self: self.data()['detailed_description']) name = property(lambda self: self.data()['declname']) + class DoxyParameterItem(DoxyMember): """A different representation of a parameter in Doxygen.""" @@ -200,9 +184,11 @@ def _parse(self): self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) + detailed_description = property( + lambda self: self.data()['detailed_description']) params = property(lambda self: self.data()['params']) + Base.mem_classes.append(DoxyClass) @@ -223,7 +209,9 @@ def _parse(self): self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) - detailed_description = property(lambda self: self.data()['detailed_description']) + detailed_description = property( + lambda self: self.data()['detailed_description']) + Base.mem_classes.append(DoxyFile) @@ -244,6 +232,7 @@ def _parse(self): return self.process_memberdefs() + Base.mem_classes.append(DoxyNamespace) @@ -287,6 +276,7 @@ class DoxyFriend(DoxyMember): kind = 'friend' + Base.mem_classes.append(DoxyFriend) @@ -301,4 +291,5 @@ class DoxyOther(Base): def can_parse(cls, obj): return obj.kind in cls.kinds + Base.mem_classes.append(DoxyOther) diff --git a/docs/doxygen/doxyxml/doxyindex.pyc b/docs/doxygen/doxyxml/doxyindex.pyc deleted file mode 100644 index 145f113618255b64b74489c29dcd067666ea8f18..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12590 zcmd5?-EUk+6`#9b_O9*Njz5w#X|uSEx=|XNf>5B1Du~_m1G;T*BvMS>)%D(4`>yxP zcka55uq7as5JCb8@qh#fi9Y~==mX-Bm%n1a z=j+a#Idjf$&Y79?KPL(w{^tF+s;c*w#_tyq%wHU(e5Fo>F&}`r7KK@<*9j%_~12R~M4% zg7U|dF33mYN$HsKC*smmN$I%qCzT$Two^$d8fI1fl&Yg{Lfv&h#&q23bkb@{`7?3p zY*IR{{5hq^q{sQB^z=ZFr)`gC;#OyqR_HDncgD6l7q@yWX*E00YSy+oAGf-Yv;q-H zUvswA<1w{QB(3HLTFu*57gXHRSL8QSjSUly7=1sX?g@ zy-54T>VdBHVj=i1{>mjtmx{88f|jp$Z8n4K4bNzs8$-_5rWyvFC}_8$Jn}0T@fCiM z$SeD~fnXj-q#;z8IpP#0mFX0@07-Rkl_=WgGNM8ee{Xmpr|b>RqUZ;qu14+f&Z4w2 z^Ju-%>V}>lv=_Tk&@hXfG$vf-!0eGkFC&-lJZ%S#>rD41?Xhc1~44#ib2QdUx= zB{-^7+klZub-ZX(67@EOljBq{1bRdvJ=$hUEt^m?5R8b%XHvYRr#HXZ1T z@Ig8>x>1WbKMqZLd!%T;gp88NyoA(J*Mv)E(+l-dEPhMPwjZ_Ijirt7YD>S1+!A#T zktm`3V)pWD+1RU$v_^eUWor4kiT zQmJ5^Zi87_tO|y#RHWNJ#m7%JSyV(Yq(7I=rE)3JH|iY2GV4e&ROeQS5@CLTNObm6 zO?y!{)TIToCklaP3zv%5A6~Hd>W-t75!nr=eS2S;~qr&pl_WOa#~}E=X6m zL$5l6 zvN61lAb1=>rUbo+fWR%JQ|kLzL-c=3P`)h6;j#<&2}C^+3|c`{sn|NQPPE4hH;e$o z*1Jp&hn?<44EG9x<(*R{kK!P5H-!R%ZH)GS-*_%uY9VzC8Yprp+D>pGU)t;S;Sg=H z#A{hF{R)P%)Te~{4Dw{KP+vx(w=CElWwX4Cy-D_!emS;JEV(UDbJ4hMKrGVg`X2H& zlTp`Y9ocs|B3ssBE7S(}Y|5suBM*q`50+2IjVpttvjeRMD+HWeE<%jgUKEAG=yJQI zE!O*9nT9 z6;)&ncf3Yd%Mx#ThD+=|%XZvQlkS2DTPMYbc=kp-$SWw7UCE_k;XZ??L?#RFilB%T zkUZb7iWuxzo$Nq2skS?!p&bxQIqNj+M@c@AYQGZ!*g{BaRTlxT~_-r{g?VU+-Be~!bXniNB4EPUo1 zv4Kb}Hcgm(j~1;jf=+Bg39T__Z+2T%I!p<*6lme>t4LVR0=KO*Pe& zDrPhNt)JO!oE_oX&^Wt~ha~!4i;3a#QvDLx`!O;^kNdextOu$g&I;%-1!y{uhb`i$ zbLK~ap{SAkb=w9m7QCn3E8MSF){3$3WIq9JVSTL|gc&>AO?A5ke4uGUmX+PlAhs|< zs;!Z7lPS(z)JPP}?z*zih*}B!2%w2J3nV4pt3cKv!=@89dtekc`_g@^>L_akstxzp z*b$g=rksh?gfq^4i3`NQsV^fKLK4|1r#9qq%odU;#+ysC(WZuI-;QFC>kY5D?t3qM z6KR8B?ahZH_cEHF9J%gm817pLM#;M$9;sg8wf>4bQq^X@XMn+ zE*IV&=Mmbv#;jo4C z{UL$~txDx^+%h8}5tD=?j|jF33jBm5BRt}m`qPg=CUxP2q5o$wnTIh|Y=$yw#gQGr zQaUqziImi_Vktr5JESVc()`F|Ua<5UO7F$e&!M!BrE-u?EKQ3+m|&^359JgH`ir9> z7=#2S!_U8jgY;TWhw$u*q3(p*Am!00Ew*b#ub6l_xKt?8zQ+00qG(ALUo!}pQZgR| zrWRLrpO=Pmy1ue@m4@E#Q?%`Xsw->f=s@$xW`r9W9B5FC|IYTZy1o5b__1;la*jh; zUzV)|r0Z~YWwU@wP5;`qz$u6TH0N6m5Gj|7C<}YoUL=n#M8Axbc&^Fp3F+bhjVTJt_Nk=#e{E5n*tgZIzn9R}$QUkWy1!fO{%djDZLj7|RG>Rzaz&JZK z=_D?W+$P?OE5xBbu87JF)BB_@$mOHp3KuJlAh@!H1XcOLt|yeA*JyA*!)xLhG7}qW z@GglWEXqrNa;Fy;333b`bdOza6;(P0MKy$_(2@lWF`8LmQ4CTY`n&b<<2bSpllItiYpW z|9n8gN%s-R`~-Z;KYR=_KLMZek1?5t?Vkly4B!;)oRje-sc7$$s@VQXd`VteR1xHl zvUvl+JdfyO<5cfD?0&~$gn$|@L=tgJqd>HRlpYnAEFkfOz6V94DttA8DjecNAH`T9 z0+#LRdmn_z0lO%8=hHhGew(Q^Mqg+24x$qQkUwJ}LxTDMLiZ_i6#Icvb;9<;Ur9Ep zPS*3mDEA4#$KPTy58DqPrI%#ofM0T~E(urpPb7$rdhd9rfWsX=w@DO|0xJr6@MX_E zh67ZN8gxIE%cEZGVm`-6)`?#&^UIUhJ5Kab(YwEs1^-X)?9od9ZYhzJVTuMh^C2ms z4P-?lF)UvS+ucqgA5=4uk7tm0c)*VRR1OrstvUF#OOEb0WGu=c?QI&9A}aqt;;0aP zz;pys;~b0*Cj3#kV`X=E^XTAe9Ytf}yMBSb>&n`*bX?)R@$pw$;msG$GTHnT(B7d- zinP4>f^SOv-wwo?#RD%~5AULrtnDPHU>UL#JUn}FC%;4|=ol{;67D(nz=m7)RiaE}t1+yn zBxJ>Z5DYb^zpHUi8+YbYd0e}kcV?5{1enOFWq$XOW-{4$!u#9|bV2qxx%V4zKsmVq z!%j#;(XjRo3faFAtnX>B2IfU9c#w2cUaOJ@`J?{cK#;SEdoco7KwjT(>);yNC$RR? z?jRfv6A4=w`5c)}c_tDjIQTlAr1dYOY1Yad4?Eae&h7vizFc$QY6Gva@eUiWukr5M zmv`11V+4-g;hueMEtkLG4BgWEab^_tJ6;%g>p%pu36Jf%)kt8X+v1B|7D5jLJkg=j zMRz(nR!hFTvrj4l`;e#O;e1Pa!2>noH>VG8c>#sMH}nk=f#jhx-jwy336CL7x^$9VrVA$evrLQRLDhlNTQD3R5pKx`{}x z!|}Ei&uQ`IL>qgINx5|898}PnriNI0uE!L`+$E($y(zJjE;U isnq<1{OJ6X^J)A$GnJk{J3lACGe}P%TtvtrjQtN+k6K9p diff --git a/docs/doxygen/doxyxml/generated/__init__.py b/docs/doxygen/doxyxml/generated/__init__.py index 23095c1..3982397 100644 --- a/docs/doxygen/doxyxml/generated/__init__.py +++ b/docs/doxygen/doxyxml/generated/__init__.py @@ -5,4 +5,3 @@ resultant classes are not very friendly to navigate so the rest of the doxyxml module processes them further. """ -from __future__ import unicode_literals diff --git a/docs/doxygen/doxyxml/generated/__init__.pyc b/docs/doxygen/doxyxml/generated/__init__.pyc deleted file mode 100644 index daace25e84c94146986cf20a3159b863bd6a74fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 418 zcmYLFK~4iP4BWy26p26h*bB`E5JG}8LMjKuB}zBmZA6+>wzuuR3!c;;@By5qlt>xJ z8D(bT%kLX={QP=ro70+UebCKDAvDdq;;dQDFY4V8bJzVzl*)b0{(MjsYX?%8dUL(CQVoqw>YUOxq)sVt+X)D z^Ew8(QgAMm*15SJn}6UJZSBBhd96Wgp9k_G7OTQiO0qrOX54FM^*Jj{ADgQB+wF45BrQpswv)7ErnH?Yg_aCSNJ5jeX+qny2}w)nOdpdzCT){6Egd>!l1@5(eBXcW zIeT@rtJr|5SbpP`wUpSdn!M`;A?!i&LaIsQ3r3N5V zD%Yk8DK*}v2GT0U^Rz0otMLvs&|%Ko<-Ai3bei)HRam437Rh%J?@|McCEm&Sc()qp z25ynki`Ce9Y8VN2sWVE=DE01v9;FfFgTW;#w^-s!1=Q_8%ald{XsIITJc%zCP>%zh zuQUQc%LTMV;wuES)PYtijR4RJ0hv@+323e6%KTv(g*;p7LZAG zjeu4;&|0Mt09qp;lj=GFt#+V`ltut(oq$ZL>jkvNfi6}W0ig8)GO2D5&^iaYL}>(o zHVDY1+AE;-4s@x~2mtj8$fSCifHpYLp)j1jR4R_0hv@c3FtBh zx>9KbfHn!pq}nH-jSh5`(g*T{rLl|}&Q8UdM9 zw+Lvn18r3r0iZ1cGO2D8&@~Qpoze&ZZ4;15b-RGJIMDSq`F5yH#yL)N+STYM?faky#l(~f%Ykl0MK3m znN;@+XpaLOP#OWC{Q@$n9u&}C2RfuQ0zd}^WKumWp#2VXo6-mX9Tt#D^>zUrbfBM5 z8Udi&1!Ph^BA~+#bW~{sfQ|^rq}nf_+a2f*r4az?7m!KyP5~Wppr2G40iZhtWK#76 z)bBvYltuu^6Oc*uxPb0-pu3bt0O+`YOsXdY?flL9*KK<`!>0icrt zGN}#-=!65kM`;9r1_WeMJtd%%4s?&w2mqZDkV*Ak0S!3Npwb8c-76rI>U{z_j94LQ(=(g*+z3&^B8DxjPL z<&{PNXjDKZ)iD7LJJ9_~BLFleAd_lAK%)*ct~3Hb1p%2dC4+?0~fqqJ91b`kCkV&;BpeYA>NNEIsY63E;J}jUI9q19I5deBv zKql2k1ypmO$CO3@=urWgR38`6!wxi~Gy*`63+NGkFmOiYI@GbgN7+ES0iyemF6v6g z*SX%KrDE?vJ=EK~tM|HHySMG!z3m;n+qZ4o)jKrZI})JwAKOx%-qM}_Km6-U;ro6E zM4@zASIp@moQ~&<`CMt-TxiFI{!&i&o71H@J({1=xqYRgpULCiJ}>PrO-DRw%lHPTP+uByXv?8pcShQBXv*=i+Ld>#k1f9}1rOeLACBre2w$nOlp1SO zHKl4PRcli-Yz8Xi1RtVbzUp@oK2g?{@lwt|4<{k5JNcy%Vvm6V=Mr>S(5-w~iDiDw$lqw3Rn2l?q!&D%TeE zX`F4%m9kX`$r1TGHI9!hPg`yBV8Y|ASKz3&L8w%gf9xSozZ@6v&|{JTc=VZ+duHOi zZU~=GGMq2w`s5O`L7pk6!BC;P(9gguy8wrIhPM7b7O5b=i^7tOVU)ON~Gn=rk~$Ra>NLU8=TN)w)&f zJXPyawI!;yRMnQL+HzGpU)5Hq+DcVhrD_+b+GcYm(17UtaAB^k8UN>!pU$R zO0a0kS+oF!qU^Ig`FgxufD0~XDw**@zUoV&crG6v z1;xsz5f4@JdKlVgEBP|7`zvwLSg3}lsb42JUmQgq_?co>n`bfwwNS_mmEi30_4=bC zvvDH#%@;?+?h2)>faJzhs5G1@6vp-V5WHL;(qCwowgI;YyEK*#I&~?;hY&F?4gL&n z{r)ypK|zEwpV}-Rw5k2~T%sO=w{KHpDBI%o9zrCoD!r7)9_Uqf=jz^{$$NA`Qn*lz z@;UDcUK7_MpRxpaEQ|7i--%d7S0@VoLT3k6xD-c~H&<;dQtfTaQ_E7FDdQ)-CAjLb z3iW6XF?&RowTp2C9`Ww9e<9AIB_`??Y1!fyS0NI(MO5sl(o^(hsL6?Nb(tfcX-B-* zOj{F|!=xMOVf+r$H-3jinu5L1M6#;B4o6U~`}?F^HK4Dqh{P7A;jk5&Y_r;Qro_@s zK6rEMF{&kXJ13A=0U02vZy(A(I1S1@>^^sD>&;!t(}GMW~)s=c}3^Z?9k)Lji`fWqfP9c zzP)bmVY1ujSUSJNqdD2q%$%3(h)#xVCMxzV7VYik={qTIf{261gSby?nC`ov$pNbk zcr=aAY?MD%YQkSHB6@bWxo)qR`-nJeeve0U3ZU=A+v5CnC;JT{t)lg&G>)hqJKexZ<%lO>$F-0Ey{Di`YnbPLX!0Jb4d|5- zAJ-R;S}FaSFuVzoz}1?;9W#`Jc9B$-(E%FMI_9@|&`(MOsTYxEt>YQaRM$JL(#GbO zr2y7P3+mpNADW7jV+GP@9n~3(j!4TWJZ8Wl4b!rYY0OgNh5_Bu&*Q`JkWAQu$kdfV zAAkQn>upya1Gh4W%ML$Ar`phu@1EO_r)!nYys2r}E0*zElM~W~CuE@;4z#~f9Mvue zg+BX=R97&znQWi^Fw_~MV)QJX=?l`KzF&he%*{Ya0slEfKwEtX-gW=pT*P!5!}4s> zfjwO{r`SeW+2f_Y`KgrPzXJkq++o55%y zy+81ub=U;M~iFd^YdjCPHeCbhY$GXzEn}=zk6zuol3K7D$QOr7x#^JO^jPBLJE%f+ zMPt0RDkIkJ6!-1}W}yd6Cvl6`s6N@wp0L`W`sC;W@3wCxDrI`&3sYR|L zDqM>+Bpa*kB3)A^z=LFni=Q);BokLnyQcKEv$Qi*GfC4hSj8CU0L1BdV~jcLk9WM` z4DQ_;#*8{t%~)-~@fyVE%vMFYZUacTW|jtAzum+8GFcOyHmkRMse++(6tOrQ@Xz8t zt-%?qg7;f(z!_V-Qu9T5YUFz1ab1hdc;9(lFROjAMq0}J4X&o6`ZPhOU(^BL$8>p@ zoX!?x`j=Xx!QiHXGnGnaTAC%eiaK44HV(!=1Vvhd@hNEX3sxI2&ZY`lU-#==qcI73 zP%%$>(7JiTLXDB81K&#<$`Wjk#%@>UZkoDXj*ZM##W2h?O)EEz7+Op5de~pVOF7wYw+_JH2F2F4ftWV zp1BaEhy4qo=gmx+u)`$Q)r&|oH#*a7_=oP%s$@u`GN$@-iyE-kgMRSx+PCz(@?237(D|`e#>eDM%iw4SrMg`gmy{fm!ly z5212Scca-VQZsW+nye-f3eB+OA>L{?L>!EL3ioLZ#-4yCFIa8Bm}wtWeGK+QNn_0; zrni6<9=$nN2?e*0R(WJG3ug}|W2yE&Z*wN`&OHZEKWDW8PaH+JIT0m_wU-dpi%7IV z>6(>xwwbhM`4t{)%!_$ljz~jKX%?o=@JD%#rasuY3F~DTWYVVoqnSzPfD{%>GG>X^ zCMUh>-g7iRGFrf~;A{E#xG&*o4O?Uf=1W$apdxfx@ivl7Kib%b&cB*~LpHPYiI6M2 z21|Qbs;Xu0QT&OwC-D{Dn0HUyWIqfY{>*9<(CLPuqjb`)R(B^bUHu9) zXbsw*geHGswFx}6UB4Y=o!Kg^)7>^;-4H+Th!2wxV`dU>)?v zIM{#t4W89>ShI+|n{jtews;RfxQUkSUX|BkonZL~+_5$2eHxnlz13zmz1v%sUeW9< z>AgMydcSQmH1YcSS!nVvR-4)MUf;6xie_g??~VlMea&WQ;`BZTP5#|#Gn?KWQF@!y z%Ms^1Z@t{SDK`Hy!H{PbwIbDV#4{(r==W_NCeG;d(BuuP4H%V`4W;aD!8(8_sU3hy zV=FWwYKa98G`$SYhQ`~Xxx(gJ;v9Yf&%V)W0}cbPb+A+!pD1Knf<#W$ zqF9AgY7rK%vH6xbi(f<*U2U}q*MvDeb)e{1rlUmCO-fD3u}^K5MI)CH;7hT;Rh!)m z!}wCdc{HyJ=UcX%-hDCAM93+YnkKY(c>Zt0+@+abi{H(nnwebsvuExi7p6T;pto z3~%*r0)K?(xoN667(8tAEO7>3#F;+%aQnbU8z0cSxW_gZeVf}na0DP{;{Wn+5i*D`jH;t)mreT5S?9}{Qw z6=c9&RvWMyR5-`^>)oguZPaByn@I?+pc7Bpi-A|Fu)5wrXT}*q0 z9<@?BT@|y;mI_tr$K#4D^T=l8$G35~*L!R>CQj!@l1S_tRf*-7&FBfW$gIQP;IXq-o4VaKtoqoUlIBaOW9O#*WaDDH%OR0G-fW*P zur;TJA&V&*TO(OMxBZB%)p@_oqr{Esn|O9?5NgU#z5Y9l+-R`iypgY5@S(Aw<_oD5 zb(U1CpeJ<_gPawcd5KfWWmykcZR*B%+c@^rje5u~V8!?nS=<^Jq|?msNuBBjc^VrV zk9zm<+Kmvhl6h2f33>u8n`xV^iIe;-7zE^5@(T1>>jb&)LA@^wJ8mTWRhetq*9bc*$E<| z8jPb+8dQ}a&1zss@aAiI5N?of{d?q zY6n9@iWt)B4ODhD3-}y+u0Mci9NoTOwpo=paj)Ttf5~bC;)3Cy-oWa-e_)l7G8ZgK@uQeE?CJ_c@M9`7PJyi-A z%#~%4?rLa@tT=4*H*H=f&g>gt_BX6H5oY_5RkY7AvvJgcVXL${L~FrW21!oipzT?k zQHj&`L(uk&)dsW$RggF`W?7iBMvc$286IsuSTp|`?VxYybTQOX3)6t%kKgc$8ME7L z0>5VcxXri3S^N=l;(4nLSPWV;Cox6P3e$x_p$e;GxU5eW%GN6+bP7Lpt-r8{XED3f z7i~T!&g%alJ3eEz0joiieqX6PXKBovHO;p+a}g}GunI-&(|a%UmsYCz6-5=NaX8m6 z*epw&K(4y~1FKDlK)li}Iy}>;scME(&Dy5OZ^{X`$3DcX96687JT-mWkEJb8!_dam zj9aBqX7O2UzY+cun?;EemWC#OY_+Kq#u}(8VZF#MZ5E@Mt;{W9jW;p3tnsffw;hoN z%#Fx?p@V4^MA~hngDc+bn;2=oW)Gc!RI93xD)VAx_Kw)wkj5%uRar-Zb!vXqOd*=L zyoC1s?m!;IM*qvq@Gj`3QY+gQrxvHO%o~lg9A;|2tdKNKCjO>GHf63Ci#{38BhXvKPuWHun!x^N`?v`jHg5KQ znxC-=FAE@#A2@k@u>X$z2h4(G(kB~f?!#;PMoL)7iB~1aJ_Ix(zKT-5#m&@C@a;oh zH*dnCoGF*NWdR$02aX&#dZ0f@_8Ux*<5k0%td9icH}9g6_n)}x@fp0YF~;BPn70S9 zM43%bsPv$_C0n)TrI@9mu?2s#Q4jQa6-QNSzSL^GlCCFpL24y7xbMM>>6SE%&GWK4 zrdO{-CAKgt7dvsIuTpUuB3|FQD2o%ZO42S?xsy38bB3@27{f#Ml>>O0Al4|1iBZ;V z2D_Z#O|rBZyi>4`srC1fQ;$)*8eAm(e^U5W0ah^E4l|Xx|AcSQpxcMR{ zJlJ=Wl@VCrB!#U~D*62=V^VNo$fE{}f;m}_gG*y}i|Toa%`@M0mkkM<&Z3=Tp) zyp)yC5^UQ}q?Asa56a7d1&P<#ZozwjC3lRe(XOi4>VxKJH*}FV@S5!5$0m08iceGg9>t3k zpQHFJ#qU#mp5hNF_)`S$OB7$E_#=v!DE^e<%M@Rw_zK0>C|;)cbBeE1e1qZ@ioc|I zmEvzHxPZd@JBn{o{38YDb-jP0_!h-?D85bcuN2><_&190QTzwRYZT%mUuW$56mL-c z7sU@L_=97QJxK3=DcI}q`0G`V%~!9TB2B@nz@uC6SjzhV3yB4BLQ;lrGA}Z|xdZpB vQo~NX9t8i}dfIwY%hPMSdfL}6!;x-ybgXUfNze0nv+uRy_4L}_o{s+qWAh|K diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.py b/docs/doxygen/doxyxml/generated/compoundsuper.py index 6e984e1..40f548a 100644 --- a/docs/doxygen/doxyxml/generated/compoundsuper.py +++ b/docs/doxygen/doxyxml/generated/compoundsuper.py @@ -4,17 +4,12 @@ # Generated Thu Jun 11 18:44:25 2009 by generateDS.py. # -from __future__ import print_function -from __future__ import unicode_literals import sys from xml.dom import minidom from xml.dom import Node -import six - - # # User methods # @@ -29,12 +24,16 @@ class GeneratedsSuper(object): def format_string(self, input_data, input_name=''): return input_data + def format_integer(self, input_data, input_name=''): return '%d' % input_data + def format_float(self, input_data, input_name=''): return '%f' % input_data + def format_double(self, input_data, input_name=''): return '%e' % input_data + def format_boolean(self, input_data, input_name=''): return '%s' % input_data @@ -46,9 +45,9 @@ def format_boolean(self, input_data, input_name=''): ## from IPython.Shell import IPShellEmbed ## args = '' -## ipshell = IPShellEmbed(args, +# ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', -## exit_msg = 'Leaving Interpreter, back to program.') +# exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: @@ -64,20 +63,23 @@ def format_boolean(self, input_data, input_name=''): # Support/utility functions. # + def showIndent(outfile, level): for idx in range(level): outfile.write(' ') + def quote_xml(inStr): - s1 = (isinstance(inStr, six.string_types) and inStr or + s1 = (isinstance(inStr, str) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, six.string_types) and inStr or + s1 = (isinstance(inStr, str) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') @@ -91,6 +93,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -122,26 +125,33 @@ class MixedContainer(object): TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value + def getCategory(self): return self.category + def getContenttype(self, content_type): return self.content_type + def getValue(self): return self.value + def getName(self): return self.name + def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace,name) + self.value.export(outfile, level, namespace, name) + def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s' % (self.name, self.value, self.name)) @@ -153,19 +163,20 @@ def exportSimple(self, outfile, level, name): outfile.write('<%s>%f' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % + (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % + (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write('MixedContainer(%d, %d, "%s",\n' % + (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') @@ -176,6 +187,7 @@ def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container + def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -191,9 +203,11 @@ def get_container(self): return self.container class DoxygenType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, version=None, compounddef=None): self.version = version self.compounddef = compounddef + def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) @@ -204,6 +218,7 @@ def get_compounddef(self): return self.compounddef def set_compounddef(self, compounddef): self.compounddef = compounddef def get_version(self): return self.version def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -215,27 +230,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): outfile.write(' version=%s' % (quote_attrib(self.version), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): if self.compounddef: - self.compounddef.export(outfile, level, namespace_, name_='compounddef') + self.compounddef.export( + outfile, level, namespace_, name_='compounddef') + def hasContent_(self): if ( self.compounddef is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = "%s",\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): if self.compounddef: showIndent(outfile, level) @@ -243,18 +265,21 @@ def exportLiteralChildren(self, outfile, level, name_): self.compounddef.exportLiteral(outfile, level, name_='compounddef') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compounddef': + nodeName_ == 'compounddef': obj_ = compounddefType.factory() obj_.build(child_) self.set_compounddef(obj_) @@ -264,6 +289,7 @@ def buildChildren(self, child_, nodeName_): class compounddefType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): self.kind = kind self.prot = prot @@ -324,6 +350,7 @@ def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, self.programlisting = programlisting self.location = location self.listofallmembers = listofallmembers + def factory(*args_, **kwargs_): if compounddefType.subclass: return compounddefType.subclass(*args_, **kwargs_) @@ -335,13 +362,23 @@ def set_compoundname(self, compoundname): self.compoundname = compoundname def get_title(self): return self.title def set_title(self, title): self.title = title def get_basecompoundref(self): return self.basecompoundref - def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref + def set_basecompoundref( + self, basecompoundref): self.basecompoundref = basecompoundref + def add_basecompoundref(self, value): self.basecompoundref.append(value) - def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value + def insert_basecompoundref( + self, index, value): self.basecompoundref[index] = value + def get_derivedcompoundref(self): return self.derivedcompoundref - def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref - def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) - def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value + + def set_derivedcompoundref( + self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref + + def add_derivedcompoundref( + self, value): self.derivedcompoundref.append(value) + def insert_derivedcompoundref( + self, index, value): self.derivedcompoundref[index] = value + def get_includes(self): return self.includes def set_includes(self, includes): self.includes = includes def add_includes(self, value): self.includes.append(value) @@ -353,7 +390,9 @@ def insert_includedby(self, index, value): self.includedby[index] = value def get_incdepgraph(self): return self.incdepgraph def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph def get_invincdepgraph(self): return self.invincdepgraph - def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph + def set_invincdepgraph( + self, invincdepgraph): self.invincdepgraph = invincdepgraph + def get_innerdir(self): return self.innerdir def set_innerdir(self, innerdir): self.innerdir = innerdir def add_innerdir(self, value): self.innerdir.append(value) @@ -367,9 +406,13 @@ def set_innerclass(self, innerclass): self.innerclass = innerclass def add_innerclass(self, value): self.innerclass.append(value) def insert_innerclass(self, index, value): self.innerclass[index] = value def get_innernamespace(self): return self.innernamespace - def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace + def set_innernamespace( + self, innernamespace): self.innernamespace = innernamespace + def add_innernamespace(self, value): self.innernamespace.append(value) - def insert_innernamespace(self, index, value): self.innernamespace[index] = value + def insert_innernamespace( + self, index, value): self.innernamespace[index] = value + def get_innerpage(self): return self.innerpage def set_innerpage(self, innerpage): self.innerpage = innerpage def add_innerpage(self, value): self.innerpage.append(value) @@ -379,35 +422,51 @@ def set_innergroup(self, innergroup): self.innergroup = innergroup def add_innergroup(self, value): self.innergroup.append(value) def insert_innergroup(self, index, value): self.innergroup[index] = value def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def set_templateparamlist( + self, templateparamlist): self.templateparamlist = templateparamlist + def get_sectiondef(self): return self.sectiondef def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef def add_sectiondef(self, value): self.sectiondef.append(value) def insert_sectiondef(self, index, value): self.sectiondef[index] = value def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def set_briefdescription( + self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def set_detaileddescription( + self, detaileddescription): self.detaileddescription = detaileddescription + def get_inheritancegraph(self): return self.inheritancegraph - def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph + def set_inheritancegraph( + self, inheritancegraph): self.inheritancegraph = inheritancegraph + def get_collaborationgraph(self): return self.collaborationgraph - def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph + def set_collaborationgraph( + self, collaborationgraph): self.collaborationgraph = collaborationgraph + def get_programlisting(self): return self.programlisting - def set_programlisting(self, programlisting): self.programlisting = programlisting + def set_programlisting( + self, programlisting): self.programlisting = programlisting + def get_location(self): return self.location def set_location(self, location): self.location = location def get_listofallmembers(self): return self.listofallmembers - def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers + def set_listofallmembers( + self, listofallmembers): self.listofallmembers = listofallmembers + def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='compounddefType') + self.exportAttributes(outfile, level, namespace_, + name_='compounddefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -415,32 +474,41 @@ def export(self, outfile, level, namespace_='', name_='compounddefType', namespa outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): if self.compoundname is not None: showIndent(outfile, level) - outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) + outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string( + quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) if self.title is not None: showIndent(outfile, level) - outfile.write('<%stitle>%s\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) + outfile.write('<%stitle>%s\n' % (namespace_, self.format_string( + quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) for basecompoundref_ in self.basecompoundref: - basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') + basecompoundref_.export( + outfile, level, namespace_, name_='basecompoundref') for derivedcompoundref_ in self.derivedcompoundref: - derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') + derivedcompoundref_.export( + outfile, level, namespace_, name_='derivedcompoundref') for includes_ in self.includes: includes_.export(outfile, level, namespace_, name_='includes') for includedby_ in self.includedby: includedby_.export(outfile, level, namespace_, name_='includedby') if self.incdepgraph: - self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') + self.incdepgraph.export( + outfile, level, namespace_, name_='incdepgraph') if self.invincdepgraph: - self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') + self.invincdepgraph.export( + outfile, level, namespace_, name_='invincdepgraph') for innerdir_ in self.innerdir: innerdir_.export(outfile, level, namespace_, name_='innerdir') for innerfile_ in self.innerfile: @@ -448,29 +516,38 @@ def exportChildren(self, outfile, level, namespace_='', name_='compounddefType') for innerclass_ in self.innerclass: innerclass_.export(outfile, level, namespace_, name_='innerclass') for innernamespace_ in self.innernamespace: - innernamespace_.export(outfile, level, namespace_, name_='innernamespace') + innernamespace_.export( + outfile, level, namespace_, name_='innernamespace') for innerpage_ in self.innerpage: innerpage_.export(outfile, level, namespace_, name_='innerpage') for innergroup_ in self.innergroup: innergroup_.export(outfile, level, namespace_, name_='innergroup') if self.templateparamlist: - self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + self.templateparamlist.export( + outfile, level, namespace_, name_='templateparamlist') for sectiondef_ in self.sectiondef: sectiondef_.export(outfile, level, namespace_, name_='sectiondef') if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + self.briefdescription.export( + outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: - self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + self.detaileddescription.export( + outfile, level, namespace_, name_='detaileddescription') if self.inheritancegraph: - self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') + self.inheritancegraph.export( + outfile, level, namespace_, name_='inheritancegraph') if self.collaborationgraph: - self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') + self.collaborationgraph.export( + outfile, level, namespace_, name_='collaborationgraph') if self.programlisting: - self.programlisting.export(outfile, level, namespace_, name_='programlisting') + self.programlisting.export( + outfile, level, namespace_, name_='programlisting') if self.location: self.location.export(outfile, level, namespace_, name_='location') if self.listofallmembers: - self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') + self.listofallmembers.export( + outfile, level, namespace_, name_='listofallmembers') + def hasContent_(self): if ( self.compoundname is not None or @@ -496,15 +573,17 @@ def hasContent_(self): self.programlisting is not None or self.location is not None or self.listofallmembers is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='compounddefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) @@ -515,9 +594,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) + outfile.write('compoundname=%s,\n' % quote_python( + self.compoundname).encode(ExternalEncoding)) if self.title: showIndent(outfile, level) outfile.write('title=model_.xsd_string(\n') @@ -530,7 +611,8 @@ def exportLiteralChildren(self, outfile, level, name_): for basecompoundref in self.basecompoundref: showIndent(outfile, level) outfile.write('model_.basecompoundref(\n') - basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') + basecompoundref.exportLiteral( + outfile, level, name_='basecompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 @@ -542,7 +624,8 @@ def exportLiteralChildren(self, outfile, level, name_): for derivedcompoundref in self.derivedcompoundref: showIndent(outfile, level) outfile.write('model_.derivedcompoundref(\n') - derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') + derivedcompoundref.exportLiteral( + outfile, level, name_='derivedcompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 @@ -581,7 +664,8 @@ def exportLiteralChildren(self, outfile, level, name_): if self.invincdepgraph: showIndent(outfile, level) outfile.write('invincdepgraph=model_.graphType(\n') - self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') + self.invincdepgraph.exportLiteral( + outfile, level, name_='invincdepgraph') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) @@ -626,7 +710,8 @@ def exportLiteralChildren(self, outfile, level, name_): for innernamespace in self.innernamespace: showIndent(outfile, level) outfile.write('model_.innernamespace(\n') - innernamespace.exportLiteral(outfile, level, name_='innernamespace') + innernamespace.exportLiteral( + outfile, level, name_='innernamespace') showIndent(outfile, level) outfile.write('),\n') level -= 1 @@ -659,7 +744,8 @@ def exportLiteralChildren(self, outfile, level, name_): if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') - self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + self.templateparamlist.exportLiteral( + outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) @@ -677,31 +763,36 @@ def exportLiteralChildren(self, outfile, level, name_): if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + self.briefdescription.exportLiteral( + outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') - self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + self.detaileddescription.exportLiteral( + outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inheritancegraph: showIndent(outfile, level) outfile.write('inheritancegraph=model_.graphType(\n') - self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') + self.inheritancegraph.exportLiteral( + outfile, level, name_='inheritancegraph') showIndent(outfile, level) outfile.write('),\n') if self.collaborationgraph: showIndent(outfile, level) outfile.write('collaborationgraph=model_.graphType(\n') - self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') + self.collaborationgraph.exportLiteral( + outfile, level, name_='collaborationgraph') showIndent(outfile, level) outfile.write('),\n') if self.programlisting: showIndent(outfile, level) outfile.write('programlisting=model_.listingType(\n') - self.programlisting.exportLiteral(outfile, level, name_='programlisting') + self.programlisting.exportLiteral( + outfile, level, name_='programlisting') showIndent(outfile, level) outfile.write('),\n') if self.location: @@ -713,15 +804,18 @@ def exportLiteralChildren(self, outfile, level, name_): if self.listofallmembers: showIndent(outfile, level) outfile.write('listofallmembers=model_.listofallmembersType(\n') - self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') + self.listofallmembers.exportLiteral( + outfile, level, name_='listofallmembers') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value @@ -729,120 +823,121 @@ def buildAttributes(self, attrs): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compoundname': + nodeName_ == 'compoundname': compoundname_ = '' for text__content_ in child_.childNodes: compoundname_ += text__content_.nodeValue self.compoundname = compoundname_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'basecompoundref': + nodeName_ == 'basecompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.basecompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'derivedcompoundref': + nodeName_ == 'derivedcompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.derivedcompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includes': + nodeName_ == 'includes': obj_ = incType.factory() obj_.build(child_) self.includes.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'includedby': + nodeName_ == 'includedby': obj_ = incType.factory() obj_.build(child_) self.includedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'incdepgraph': + nodeName_ == 'incdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_incdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'invincdepgraph': + nodeName_ == 'invincdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_invincdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerdir': + nodeName_ == 'innerdir': obj_ = refType.factory() obj_.build(child_) self.innerdir.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerfile': + nodeName_ == 'innerfile': obj_ = refType.factory() obj_.build(child_) self.innerfile.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerclass': + nodeName_ == 'innerclass': obj_ = refType.factory() obj_.build(child_) self.innerclass.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innernamespace': + nodeName_ == 'innernamespace': obj_ = refType.factory() obj_.build(child_) self.innernamespace.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innerpage': + nodeName_ == 'innerpage': obj_ = refType.factory() obj_.build(child_) self.innerpage.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'innergroup': + nodeName_ == 'innergroup': obj_ = refType.factory() obj_.build(child_) self.innergroup.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': + nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sectiondef': + nodeName_ == 'sectiondef': obj_ = sectiondefType.factory() obj_.build(child_) self.sectiondef.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': + nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': + nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inheritancegraph': + nodeName_ == 'inheritancegraph': obj_ = graphType.factory() obj_.build(child_) self.set_inheritancegraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'collaborationgraph': + nodeName_ == 'collaborationgraph': obj_ = graphType.factory() obj_.build(child_) self.set_collaborationgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'programlisting': + nodeName_ == 'programlisting': obj_ = listingType.factory() obj_.build(child_) self.set_programlisting(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': + nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listofallmembers': + nodeName_ == 'listofallmembers': obj_ = listofallmembersType.factory() obj_.build(child_) self.set_listofallmembers(obj_) @@ -852,11 +947,13 @@ def buildChildren(self, child_, nodeName_): class listofallmembersType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, member=None): if member is None: self.member = [] else: self.member = member + def factory(*args_, **kwargs_): if listofallmembersType.subclass: return listofallmembersType.subclass(*args_, **kwargs_) @@ -867,10 +964,12 @@ def get_member(self): return self.member def set_member(self, member): self.member = member def add_member(self, value): self.member.append(value) def insert_member(self, index, value): self.member[index] = value + def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') + self.exportAttributes(outfile, level, namespace_, + name_='listofallmembersType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -878,25 +977,31 @@ def export(self, outfile, level, namespace_='', name_='listofallmembersType', na outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): if ( self.member is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='listofallmembersType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('member=[\n') @@ -910,17 +1015,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': + nodeName_ == 'member': obj_ = memberRefType.factory() obj_.build(child_) self.member.append(obj_) @@ -930,6 +1038,7 @@ def buildChildren(self, child_, nodeName_): class memberRefType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): self.virt = virt self.prot = prot @@ -937,6 +1046,7 @@ def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope= self.ambiguityscope = ambiguityscope self.scope = scope self.name = name + def factory(*args_, **kwargs_): if memberRefType.subclass: return memberRefType.subclass(*args_, **kwargs_) @@ -954,11 +1064,15 @@ def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_ambiguityscope(self): return self.ambiguityscope - def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope + + def set_ambiguityscope( + self, ambiguityscope): self.ambiguityscope = ambiguityscope + def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='memberRefType') + self.exportAttributes(outfile, level, namespace_, + name_='memberRefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -966,35 +1080,44 @@ def export(self, outfile, level, namespace_='', name_='memberRefType', namespace outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.ambiguityscope is not None: - outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) + outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib( + self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): if self.scope is not None: showIndent(outfile, level) - outfile.write('<%sscope>%s\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) + outfile.write('<%sscope>%s\n' % (namespace_, self.format_string( + quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) if self.name is not None: showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string( + quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): if ( self.scope is not None or self.name is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='memberRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) @@ -1008,17 +1131,22 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.ambiguityscope is not None: showIndent(outfile, level) outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) + outfile.write('scope=%s,\n' % quote_python( + self.scope).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + outfile.write('name=%s,\n' % quote_python( + self.name).encode(ExternalEncoding)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value @@ -1028,15 +1156,16 @@ def buildAttributes(self, attrs): self.refid = attrs.get('refid').value if attrs.get('ambiguityscope'): self.ambiguityscope = attrs.get('ambiguityscope').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'scope': + nodeName_ == 'scope': scope_ = '' for text__content_ in child_.childNodes: scope_ += text__content_.nodeValue self.scope = scope_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': + nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue @@ -1047,8 +1176,10 @@ def buildChildren(self, child_, nodeName_): class scope(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if scope.subclass: return scope.subclass(*args_, **kwargs_) @@ -1057,6 +1188,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -1068,33 +1200,40 @@ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_='') outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='scope'): pass + def exportChildren(self, outfile, level, namespace_='', name_='scope'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='scope'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1102,21 +1241,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class scope class name(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if name.subclass: return name.subclass(*args_, **kwargs_) @@ -1125,6 +1268,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -1136,33 +1280,40 @@ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='name'): pass + def exportChildren(self, outfile, level, namespace_='', name_='name'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='name'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1170,19 +1321,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class name class compoundRefType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.virt = virt self.prot = prot @@ -1195,6 +1349,7 @@ def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=No self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if compoundRefType.subclass: return compoundRefType.subclass(*args_, **kwargs_) @@ -1209,40 +1364,48 @@ def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') + self.exportAttributes(outfile, level, namespace_, + name_='compoundRefType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='compoundRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) @@ -1253,9 +1416,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1263,6 +1428,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value @@ -1270,21 +1436,23 @@ def buildAttributes(self, attrs): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class compoundRefType class reimplementType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid if mixedclass_ is None: @@ -1295,6 +1463,7 @@ def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if reimplementType.subclass: return reimplementType.subclass(*args_, **kwargs_) @@ -1305,43 +1474,53 @@ def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='reimplementType') + self.exportAttributes(outfile, level, namespace_, + name_='reimplementType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='reimplementType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1349,24 +1528,27 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class reimplementType class incType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.local = local self.refid = refid @@ -1378,6 +1560,7 @@ def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, conten self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if incType.subclass: return incType.subclass(*args_, **kwargs_) @@ -1390,6 +1573,7 @@ def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -1397,31 +1581,37 @@ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=' outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='incType'): if self.local is not None: outfile.write(' local=%s' % (quote_attrib(self.local), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='incType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='incType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.local is not None: showIndent(outfile, level) @@ -1429,9 +1619,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1439,26 +1631,29 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('local'): self.local = attrs.get('local').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class incType class refType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.prot = prot self.refid = refid @@ -1470,6 +1665,7 @@ def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if refType.subclass: return refType.subclass(*args_, **kwargs_) @@ -1482,6 +1678,7 @@ def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -1489,31 +1686,37 @@ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=' outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='refType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) @@ -1521,9 +1724,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1531,26 +1736,29 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class refType class refTextType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref @@ -1563,6 +1771,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if refTextType.subclass: return refTextType.subclass(*args_, **kwargs_) @@ -1577,6 +1786,7 @@ def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -1584,33 +1794,40 @@ def export(self, outfile, level, namespace_='', name_='refTextType', namespacede outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + outfile.write(' external=%s' % (self.format_string(quote_attrib( + self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='refTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) @@ -1621,9 +1838,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -1631,6 +1850,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value @@ -1638,21 +1858,23 @@ def buildAttributes(self, attrs): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class refTextType class sectiondefType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, header=None, description=None, memberdef=None): self.kind = kind self.header = header @@ -1661,6 +1883,7 @@ def __init__(self, kind=None, header=None, description=None, memberdef=None): self.memberdef = [] else: self.memberdef = memberdef + def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) @@ -1677,10 +1900,12 @@ def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') + self.exportAttributes(outfile, level, namespace_, + name_='sectiondefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -1688,38 +1913,47 @@ def export(self, outfile, level, namespace_='', name_='sectiondefType', namespac outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): if self.header is not None: showIndent(outfile, level) - outfile.write('<%sheader>%s\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) + outfile.write('<%sheader>%s\n' % (namespace_, self.format_string( + quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) if self.description: - self.description.export(outfile, level, namespace_, name_='description') + self.description.export( + outfile, level, namespace_, name_='description') for memberdef_ in self.memberdef: memberdef_.export(outfile, level, namespace_, name_='memberdef') + def hasContent_(self): if ( self.header is not None or self.description is not None or self.memberdef is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='sectiondefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) + outfile.write('header=%s,\n' % quote_python( + self.header).encode(ExternalEncoding)) if self.description: showIndent(outfile, level) outfile.write('description=model_.descriptionType(\n') @@ -1738,29 +1972,32 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'header': + nodeName_ == 'header': header_ = '' for text__content_ in child_.childNodes: header_ += text__content_.nodeValue self.header = header_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'description': + nodeName_ == 'description': obj_ = descriptionType.factory() obj_.build(child_) self.set_description(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'memberdef': + nodeName_ == 'memberdef': obj_ = memberdefType.factory() obj_.build(child_) self.memberdef.append(obj_) @@ -1770,6 +2007,7 @@ def buildChildren(self, child_, nodeName_): class memberdefType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): self.initonly = initonly self.kind = kind @@ -1830,6 +2068,7 @@ def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx= self.referencedby = [] else: self.referencedby = referencedby + def factory(*args_, **kwargs_): if memberdefType.subclass: return memberdefType.subclass(*args_, **kwargs_) @@ -1837,7 +2076,9 @@ def factory(*args_, **kwargs_): return memberdefType(*args_, **kwargs_) factory = staticmethod(factory) def get_templateparamlist(self): return self.templateparamlist - def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist + def set_templateparamlist( + self, templateparamlist): self.templateparamlist = templateparamlist + def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_definition(self): return self.definition @@ -1855,11 +2096,17 @@ def set_bitfield(self, bitfield): self.bitfield = bitfield def get_reimplements(self): return self.reimplements def set_reimplements(self, reimplements): self.reimplements = reimplements def add_reimplements(self, value): self.reimplements.append(value) - def insert_reimplements(self, index, value): self.reimplements[index] = value + def insert_reimplements( + self, index, value): self.reimplements[index] = value + def get_reimplementedby(self): return self.reimplementedby - def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby + def set_reimplementedby( + self, reimplementedby): self.reimplementedby = reimplementedby + def add_reimplementedby(self, value): self.reimplementedby.append(value) - def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value + def insert_reimplementedby( + self, index, value): self.reimplementedby[index] = value + def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) @@ -1873,11 +2120,17 @@ def set_initializer(self, initializer): self.initializer = initializer def get_exceptions(self): return self.exceptions def set_exceptions(self, exceptions): self.exceptions = exceptions def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def set_briefdescription( + self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def set_detaileddescription( + self, detaileddescription): self.detaileddescription = detaileddescription + def get_inbodydescription(self): return self.inbodydescription - def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription + def set_inbodydescription( + self, inbodydescription): self.inbodydescription = inbodydescription + def get_location(self): return self.location def set_location(self, location): self.location = location def get_references(self): return self.references @@ -1887,7 +2140,9 @@ def insert_references(self, index, value): self.references[index] = value def get_referencedby(self): return self.referencedby def set_referencedby(self, referencedby): self.referencedby = referencedby def add_referencedby(self, value): self.referencedby.append(value) - def insert_referencedby(self, index, value): self.referencedby[index] = value + def insert_referencedby( + self, index, value): self.referencedby[index] = value + def get_initonly(self): return self.initonly def set_initonly(self, initonly): self.initonly = initonly def get_kind(self): return self.kind @@ -1930,10 +2185,12 @@ def get_settable(self): return self.settable def set_settable(self, settable): self.settable = settable def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='memberdefType') + self.exportAttributes(outfile, level, namespace_, + name_='memberdefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -1941,6 +2198,7 @@ def export(self, outfile, level, namespace_='', name_='memberdefType', namespace outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): if self.initonly is not None: outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) @@ -1983,54 +2241,73 @@ def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType') if self.settable is not None: outfile.write(' settable=%s' % (quote_attrib(self.settable), )) if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): if self.templateparamlist: - self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') + self.templateparamlist.export( + outfile, level, namespace_, name_='templateparamlist') if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.definition is not None: showIndent(outfile, level) - outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) + outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string( + quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) if self.argsstring is not None: showIndent(outfile, level) - outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) + outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string( + quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) if self.name is not None: showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string( + quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) if self.read is not None: showIndent(outfile, level) - outfile.write('<%sread>%s\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) + outfile.write('<%sread>%s\n' % (namespace_, self.format_string( + quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) if self.write is not None: showIndent(outfile, level) - outfile.write('<%swrite>%s\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) + outfile.write('<%swrite>%s\n' % (namespace_, self.format_string( + quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) if self.bitfield is not None: showIndent(outfile, level) - outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) + outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string( + quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) for reimplements_ in self.reimplements: - reimplements_.export(outfile, level, namespace_, name_='reimplements') + reimplements_.export( + outfile, level, namespace_, name_='reimplements') for reimplementedby_ in self.reimplementedby: - reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') + reimplementedby_.export( + outfile, level, namespace_, name_='reimplementedby') for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') for enumvalue_ in self.enumvalue: enumvalue_.export(outfile, level, namespace_, name_='enumvalue') if self.initializer: - self.initializer.export(outfile, level, namespace_, name_='initializer') + self.initializer.export( + outfile, level, namespace_, name_='initializer') if self.exceptions: - self.exceptions.export(outfile, level, namespace_, name_='exceptions') + self.exceptions.export( + outfile, level, namespace_, name_='exceptions') if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + self.briefdescription.export( + outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: - self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') + self.detaileddescription.export( + outfile, level, namespace_, name_='detaileddescription') if self.inbodydescription: - self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') + self.inbodydescription.export( + outfile, level, namespace_, name_='inbodydescription') if self.location: - self.location.export(outfile, level, namespace_, name_='location', ) + self.location.export( + outfile, level, namespace_, name_='location', ) for references_ in self.references: references_.export(outfile, level, namespace_, name_='references') for referencedby_ in self.referencedby: - referencedby_.export(outfile, level, namespace_, name_='referencedby') + referencedby_.export( + outfile, level, namespace_, name_='referencedby') + def hasContent_(self): if ( self.templateparamlist is not None or @@ -2053,15 +2330,17 @@ def hasContent_(self): self.location is not None or self.references is not None or self.referencedby is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='memberdefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.initonly is not None: showIndent(outfile, level) @@ -2126,11 +2405,13 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') - self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') + self.templateparamlist.exportLiteral( + outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') if self.type_: @@ -2140,17 +2421,23 @@ def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) - outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) + outfile.write('definition=%s,\n' % quote_python( + self.definition).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) + outfile.write('argsstring=%s,\n' % quote_python( + self.argsstring).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + outfile.write('name=%s,\n' % quote_python( + self.name).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) + outfile.write('read=%s,\n' % quote_python( + self.read).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) + outfile.write('write=%s,\n' % quote_python( + self.write).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) + outfile.write('bitfield=%s,\n' % quote_python( + self.bitfield).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('reimplements=[\n') level += 1 @@ -2169,7 +2456,8 @@ def exportLiteralChildren(self, outfile, level, name_): for reimplementedby in self.reimplementedby: showIndent(outfile, level) outfile.write('model_.reimplementedby(\n') - reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') + reimplementedby.exportLiteral( + outfile, level, name_='reimplementedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 @@ -2214,19 +2502,22 @@ def exportLiteralChildren(self, outfile, level, name_): if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + self.briefdescription.exportLiteral( + outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') - self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') + self.detaileddescription.exportLiteral( + outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inbodydescription: showIndent(outfile, level) outfile.write('inbodydescription=model_.descriptionType(\n') - self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') + self.inbodydescription.exportLiteral( + outfile, level, name_='inbodydescription') showIndent(outfile, level) outfile.write('),\n') if self.location: @@ -2259,12 +2550,14 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('initonly'): self.initonly = attrs.get('initonly').value @@ -2308,110 +2601,111 @@ def buildAttributes(self, attrs): self.settable = attrs.get('settable').value if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'templateparamlist': + nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': + nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'definition': + nodeName_ == 'definition': definition_ = '' for text__content_ in child_.childNodes: definition_ += text__content_.nodeValue self.definition = definition_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'argsstring': + nodeName_ == 'argsstring': argsstring_ = '' for text__content_ in child_.childNodes: argsstring_ += text__content_.nodeValue self.argsstring = argsstring_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': + nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'read': + nodeName_ == 'read': read_ = '' for text__content_ in child_.childNodes: read_ += text__content_.nodeValue self.read = read_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'write': + nodeName_ == 'write': write_ = '' for text__content_ in child_.childNodes: write_ += text__content_.nodeValue self.write = write_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'bitfield': + nodeName_ == 'bitfield': bitfield_ = '' for text__content_ in child_.childNodes: bitfield_ += text__content_.nodeValue self.bitfield = bitfield_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplements': + nodeName_ == 'reimplements': obj_ = reimplementType.factory() obj_.build(child_) self.reimplements.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'reimplementedby': + nodeName_ == 'reimplementedby': obj_ = reimplementType.factory() obj_.build(child_) self.reimplementedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': + nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'enumvalue': + nodeName_ == 'enumvalue': obj_ = enumvalueType.factory() obj_.build(child_) self.enumvalue.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': + nodeName_ == 'initializer': obj_ = linkedTextType.factory() obj_.build(child_) self.set_initializer(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'exceptions': + nodeName_ == 'exceptions': obj_ = linkedTextType.factory() obj_.build(child_) self.set_exceptions(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': + nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': + nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'inbodydescription': + nodeName_ == 'inbodydescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_inbodydescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'location': + nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'references': + nodeName_ == 'references': obj_ = referenceType.factory() obj_.build(child_) self.references.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'referencedby': + nodeName_ == 'referencedby': obj_ = referenceType.factory() obj_.build(child_) self.referencedby.append(obj_) @@ -2421,8 +2715,10 @@ def buildChildren(self, child_, nodeName_): class definition(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if definition.subclass: return definition.subclass(*args_, **kwargs_) @@ -2431,6 +2727,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -2442,33 +2739,40 @@ def export(self, outfile, level, namespace_='', name_='definition', namespacedef outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='definition'): pass + def exportChildren(self, outfile, level, namespace_='', name_='definition'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='definition'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -2476,21 +2780,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class definition class argsstring(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if argsstring.subclass: return argsstring.subclass(*args_, **kwargs_) @@ -2499,6 +2807,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -2510,33 +2819,40 @@ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): pass + def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='argsstring'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -2544,21 +2860,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class argsstring class read(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if read.subclass: return read.subclass(*args_, **kwargs_) @@ -2567,6 +2887,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -2578,33 +2899,40 @@ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='read'): pass + def exportChildren(self, outfile, level, namespace_='', name_='read'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='read'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -2612,21 +2940,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class read class write(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if write.subclass: return write.subclass(*args_, **kwargs_) @@ -2635,6 +2967,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -2646,33 +2979,40 @@ def export(self, outfile, level, namespace_='', name_='write', namespacedef_='') outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='write'): pass + def exportChildren(self, outfile, level, namespace_='', name_='write'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='write'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -2680,21 +3020,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class write class bitfield(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if bitfield.subclass: return bitfield.subclass(*args_, **kwargs_) @@ -2703,6 +3047,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -2714,33 +3059,40 @@ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_= outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): pass + def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='bitfield'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -2748,19 +3100,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class bitfield class descriptionType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -2770,6 +3125,7 @@ def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_ self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if descriptionType.subclass: return descriptionType.subclass(*args_, **kwargs_) @@ -2788,35 +3144,43 @@ def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal + def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='descriptionType') + self.exportAttributes(outfile, level, namespace_, + name_='descriptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect1 is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='descriptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -2842,46 +3206,49 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) + MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': + nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) + MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': childobj_ = docInternalType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) + MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class descriptionType @@ -2889,6 +3256,7 @@ def buildChildren(self, child_, nodeName_): class enumvalueType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): self.prot = prot self.id = id @@ -2900,6 +3268,7 @@ def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescrip self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if enumvalueType.subclass: return enumvalueType.subclass(*args_, **kwargs_) @@ -2911,43 +3280,55 @@ def set_name(self, name): self.name = name def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + def set_briefdescription( + self, briefdescription): self.briefdescription = briefdescription + def get_detaileddescription(self): return self.detaileddescription - def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription + def set_detaileddescription( + self, detaileddescription): self.detaileddescription = detaileddescription + def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') + self.exportAttributes(outfile, level, namespace_, + name_='enumvalueType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.name is not None or self.initializer is not None or self.briefdescription is not None or self.detaileddescription is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='enumvalueType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) @@ -2955,6 +3336,7 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -2980,51 +3362,54 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': + nodeName_ == 'name': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'name', valuestr_) + MixedContainer.TypeString, 'name', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'initializer': + nodeName_ == 'initializer': childobj_ = linkedTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'initializer', childobj_) + MixedContainer.TypeNone, 'initializer', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': + nodeName_ == 'briefdescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'briefdescription', childobj_) + MixedContainer.TypeNone, 'briefdescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'detaileddescription': + nodeName_ == 'detaileddescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'detaileddescription', childobj_) + MixedContainer.TypeNone, 'detaileddescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class enumvalueType @@ -3032,11 +3417,13 @@ def buildChildren(self, child_, nodeName_): class templateparamlistType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, param=None): if param is None: self.param = [] else: self.param = param + def factory(*args_, **kwargs_): if templateparamlistType.subclass: return templateparamlistType.subclass(*args_, **kwargs_) @@ -3047,10 +3434,12 @@ def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value + def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') + self.exportAttributes(outfile, level, namespace_, + name_='templateparamlistType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -3058,25 +3447,31 @@ def export(self, outfile, level, namespace_='', name_='templateparamlistType', n outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') + def hasContent_(self): if ( self.param is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='templateparamlistType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('param=[\n') @@ -3090,17 +3485,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'param': + nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) @@ -3110,6 +3508,7 @@ def buildChildren(self, child_, nodeName_): class paramType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): self.type_ = type_ self.declname = declname @@ -3117,6 +3516,7 @@ def __init__(self, type_=None, declname=None, defname=None, array=None, defval=N self.array = array self.defval = defval self.briefdescription = briefdescription + def factory(*args_, **kwargs_): if paramType.subclass: return paramType.subclass(*args_, **kwargs_) @@ -3134,7 +3534,10 @@ def set_array(self, array): self.array = array def get_defval(self): return self.defval def set_defval(self, defval): self.defval = defval def get_briefdescription(self): return self.briefdescription - def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription + + def set_briefdescription( + self, briefdescription): self.briefdescription = briefdescription + def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3146,24 +3549,31 @@ def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_ outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='paramType'): if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.declname is not None: showIndent(outfile, level) - outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) + outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string( + quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) if self.defname is not None: showIndent(outfile, level) - outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) + outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string( + quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) if self.array is not None: showIndent(outfile, level) - outfile.write('<%sarray>%s\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) + outfile.write('<%sarray>%s\n' % (namespace_, self.format_string( + quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) if self.defval: self.defval.export(outfile, level, namespace_, name_='defval') if self.briefdescription: - self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') + self.briefdescription.export( + outfile, level, namespace_, name_='briefdescription') + def hasContent_(self): if ( self.type_ is not None or @@ -3172,17 +3582,20 @@ def hasContent_(self): self.array is not None or self.defval is not None or self.briefdescription is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='paramType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): if self.type_: showIndent(outfile, level) @@ -3191,11 +3604,14 @@ def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) - outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) + outfile.write('declname=%s,\n' % quote_python( + self.declname).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) + outfile.write('defname=%s,\n' % quote_python( + self.defname).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) + outfile.write('array=%s,\n' % quote_python( + self.array).encode(ExternalEncoding)) if self.defval: showIndent(outfile, level) outfile.write('defval=model_.linkedTextType(\n') @@ -3205,48 +3621,52 @@ def exportLiteralChildren(self, outfile, level, name_): if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') - self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') + self.briefdescription.exportLiteral( + outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'type': + nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'declname': + nodeName_ == 'declname': declname_ = '' for text__content_ in child_.childNodes: declname_ += text__content_.nodeValue self.declname = declname_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defname': + nodeName_ == 'defname': defname_ = '' for text__content_ in child_.childNodes: defname_ += text__content_.nodeValue self.defname = defname_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'array': + nodeName_ == 'array': array_ = '' for text__content_ in child_.childNodes: array_ += text__content_.nodeValue self.array = array_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'defval': + nodeName_ == 'defval': obj_ = linkedTextType.factory() obj_.build(child_) self.set_defval(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'briefdescription': + nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) @@ -3256,8 +3676,10 @@ def buildChildren(self, child_, nodeName_): class declname(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if declname.subclass: return declname.subclass(*args_, **kwargs_) @@ -3266,6 +3688,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3277,33 +3700,40 @@ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_= outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='declname'): pass + def exportChildren(self, outfile, level, namespace_='', name_='declname'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='declname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -3311,21 +3741,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class declname class defname(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if defname.subclass: return defname.subclass(*args_, **kwargs_) @@ -3334,6 +3768,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3345,33 +3780,40 @@ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=' outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='defname'): pass + def exportChildren(self, outfile, level, namespace_='', name_='defname'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='defname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -3379,21 +3821,25 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class defname class array(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if array.subclass: return array.subclass(*args_, **kwargs_) @@ -3402,6 +3848,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3413,33 +3860,40 @@ def export(self, outfile, level, namespace_='', name_='array', namespacedef_='') outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='array'): pass + def exportChildren(self, outfile, level, namespace_='', name_='array'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='array'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -3447,19 +3901,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class array class linkedTextType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, ref=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -3469,6 +3926,7 @@ def __init__(self, ref=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if linkedTextType.subclass: return linkedTextType.subclass(*args_, **kwargs_) @@ -3479,32 +3937,40 @@ def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value + def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') + self.exportAttributes(outfile, level, namespace_, + name_='linkedTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.ref is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='linkedTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -3512,25 +3978,28 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': + nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) + MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class linkedTextType @@ -3538,11 +4007,13 @@ def buildChildren(self, child_, nodeName_): class graphType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, node=None): if node is None: self.node = [] else: self.node = node + def factory(*args_, **kwargs_): if graphType.subclass: return graphType.subclass(*args_, **kwargs_) @@ -3553,6 +4024,7 @@ def get_node(self): return self.node def set_node(self, node): self.node = node def add_node(self, value): self.node.append(value) def insert_node(self, index, value): self.node[index] = value + def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3564,25 +4036,31 @@ def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_ outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='graphType'): for node_ in self.node: node_.export(outfile, level, namespace_, name_='node') + def hasContent_(self): if ( self.node is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='graphType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('node=[\n') @@ -3596,17 +4074,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'node': + nodeName_ == 'node': obj_ = nodeType.factory() obj_.build(child_) self.node.append(obj_) @@ -3616,6 +4097,7 @@ def buildChildren(self, child_, nodeName_): class nodeType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, label=None, link=None, childnode=None): self.id = id self.label = label @@ -3624,6 +4106,7 @@ def __init__(self, id=None, label=None, link=None, childnode=None): self.childnode = [] else: self.childnode = childnode + def factory(*args_, **kwargs_): if nodeType.subclass: return nodeType.subclass(*args_, **kwargs_) @@ -3640,6 +4123,7 @@ def add_childnode(self, value): self.childnode.append(value) def insert_childnode(self, index, value): self.childnode[index] = value def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3651,38 +4135,47 @@ def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_= outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): if self.label is not None: showIndent(outfile, level) - outfile.write('<%slabel>%s\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) + outfile.write('<%slabel>%s\n' % (namespace_, self.format_string( + quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) if self.link: self.link.export(outfile, level, namespace_, name_='link') for childnode_ in self.childnode: childnode_.export(outfile, level, namespace_, name_='childnode') + def hasContent_(self): if ( self.label is not None or self.link is not None or self.childnode is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='nodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) + outfile.write('label=%s,\n' % quote_python( + self.label).encode(ExternalEncoding)) if self.link: showIndent(outfile, level) outfile.write('link=model_.linkType(\n') @@ -3701,29 +4194,32 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'label': + nodeName_ == 'label': label_ = '' for text__content_ in child_.childNodes: label_ += text__content_.nodeValue self.label = label_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'link': + nodeName_ == 'link': obj_ = linkType.factory() obj_.build(child_) self.set_link(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'childnode': + nodeName_ == 'childnode': obj_ = childnodeType.factory() obj_.build(child_) self.childnode.append(obj_) @@ -3733,8 +4229,10 @@ def buildChildren(self, child_, nodeName_): class label(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if label.subclass: return label.subclass(*args_, **kwargs_) @@ -3743,6 +4241,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3754,33 +4253,40 @@ def export(self, outfile, level, namespace_='', name_='label', namespacedef_='') outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='label'): pass + def exportChildren(self, outfile, level, namespace_='', name_='label'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='label'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -3788,19 +4294,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class label class childnodeType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, relation=None, refid=None, edgelabel=None): self.relation = relation self.refid = refid @@ -3808,6 +4317,7 @@ def __init__(self, relation=None, refid=None, edgelabel=None): self.edgelabel = [] else: self.edgelabel = edgelabel + def factory(*args_, **kwargs_): if childnodeType.subclass: return childnodeType.subclass(*args_, **kwargs_) @@ -3822,10 +4332,12 @@ def get_relation(self): return self.relation def set_relation(self, relation): self.relation = relation def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='childnodeType') + self.exportAttributes(outfile, level, namespace_, + name_='childnodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -3833,27 +4345,34 @@ def export(self, outfile, level, namespace_='', name_='childnodeType', namespace outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): if self.relation is not None: outfile.write(' relation=%s' % (quote_attrib(self.relation), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): for edgelabel_ in self.edgelabel: showIndent(outfile, level) - outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) + outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string( + quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) + def hasContent_(self): if ( self.edgelabel is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='childnodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.relation is not None: showIndent(outfile, level) @@ -3861,30 +4380,35 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('edgelabel=[\n') level += 1 for edgelabel in self.edgelabel: showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) + outfile.write('%s,\n' % quote_python( + edgelabel).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('relation'): self.relation = attrs.get('relation').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'edgelabel': + nodeName_ == 'edgelabel': edgelabel_ = '' for text__content_ in child_.childNodes: edgelabel_ += text__content_.nodeValue @@ -3895,8 +4419,10 @@ def buildChildren(self, child_, nodeName_): class edgelabel(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if edgelabel.subclass: return edgelabel.subclass(*args_, **kwargs_) @@ -3905,6 +4431,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3916,33 +4443,40 @@ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_ outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): pass + def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='edgelabel'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -3950,23 +4484,27 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class edgelabel class linkType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, refid=None, external=None, valueOf_=''): self.refid = refid self.external = external self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if linkType.subclass: return linkType.subclass(*args_, **kwargs_) @@ -3979,6 +4517,7 @@ def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -3990,31 +4529,38 @@ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_= outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + outfile.write(' external=%s' % (self.format_string(quote_attrib( + self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='linkType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='linkType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) @@ -4022,9 +4568,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -4032,27 +4580,31 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('external'): self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class linkType class listingType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, codeline=None): if codeline is None: self.codeline = [] else: self.codeline = codeline + def factory(*args_, **kwargs_): if listingType.subclass: return listingType.subclass(*args_, **kwargs_) @@ -4063,6 +4615,7 @@ def get_codeline(self): return self.codeline def set_codeline(self, codeline): self.codeline = codeline def add_codeline(self, value): self.codeline.append(value) def insert_codeline(self, index, value): self.codeline[index] = value + def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4074,25 +4627,31 @@ def export(self, outfile, level, namespace_='', name_='listingType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='listingType'): for codeline_ in self.codeline: codeline_.export(outfile, level, namespace_, name_='codeline') + def hasContent_(self): if ( self.codeline is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='listingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('codeline=[\n') @@ -4106,17 +4665,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'codeline': + nodeName_ == 'codeline': obj_ = codelineType.factory() obj_.build(child_) self.codeline.append(obj_) @@ -4126,6 +4688,7 @@ def buildChildren(self, child_, nodeName_): class codelineType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): self.external = external self.lineno = lineno @@ -4135,6 +4698,7 @@ def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlig self.highlight = [] else: self.highlight = highlight + def factory(*args_, **kwargs_): if codelineType.subclass: return codelineType.subclass(*args_, **kwargs_) @@ -4153,6 +4717,7 @@ def get_refkind(self): return self.refkind def set_refkind(self, refkind): self.refkind = refkind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4164,30 +4729,37 @@ def export(self, outfile, level, namespace_='', name_='codelineType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): if self.external is not None: outfile.write(' external=%s' % (quote_attrib(self.external), )) if self.lineno is not None: - outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) + outfile.write(' lineno="%s"' % self.format_integer( + self.lineno, input_name='lineno')) if self.refkind is not None: outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): for highlight_ in self.highlight: highlight_.export(outfile, level, namespace_, name_='highlight') + def hasContent_(self): if ( self.highlight is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='codelineType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) @@ -4201,6 +4773,7 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('highlight=[\n') @@ -4214,12 +4787,14 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('external'): self.external = attrs.get('external').value @@ -4232,9 +4807,10 @@ def buildAttributes(self, attrs): self.refkind = attrs.get('refkind').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'highlight': + nodeName_ == 'highlight': obj_ = highlightType.factory() obj_.build(child_) self.highlight.append(obj_) @@ -4244,6 +4820,7 @@ def buildChildren(self, child_, nodeName_): class highlightType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): self.classxx = classxx if mixedclass_ is None: @@ -4254,6 +4831,7 @@ def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=N self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if highlightType.subclass: return highlightType.subclass(*args_, **kwargs_) @@ -4270,36 +4848,44 @@ def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def get_class(self): return self.classxx def set_class(self, classxx): self.classxx = classxx + def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='highlightType') + self.exportAttributes(outfile, level, namespace_, + name_='highlightType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): if self.classxx is not None: outfile.write(' class=%s' % (quote_attrib(self.classxx), )) + def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.sp is not None or self.ref is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='highlightType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.classxx is not None: showIndent(outfile, level) outfile.write('classxx = "%s",\n' % (self.classxx,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -4313,35 +4899,38 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('class'): self.classxx = attrs.get('class').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sp': + nodeName_ == 'sp': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, - MixedContainer.TypeString, 'sp', valuestr_) + MixedContainer.TypeString, 'sp', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': + nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) + MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class highlightType @@ -4349,8 +4938,10 @@ def buildChildren(self, child_, nodeName_): class sp(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if sp.subclass: return sp.subclass(*args_, **kwargs_) @@ -4359,6 +4950,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4370,33 +4962,40 @@ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='sp'): pass + def exportChildren(self, outfile, level, namespace_='', name_='sp'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='sp'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -4404,19 +5003,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class sp class referenceType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): self.endline = endline self.startline = startline @@ -4430,6 +5032,7 @@ def __init__(self, endline=None, startline=None, refid=None, compoundref=None, v self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if referenceType.subclass: return referenceType.subclass(*args_, **kwargs_) @@ -4446,42 +5049,53 @@ def get_compoundref(self): return self.compoundref def set_compoundref(self, compoundref): self.compoundref = compoundref def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='referenceType') + self.exportAttributes(outfile, level, namespace_, + name_='referenceType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): if self.endline is not None: - outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) + outfile.write(' endline="%s"' % self.format_integer( + self.endline, input_name='endline')) if self.startline is not None: - outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) + outfile.write(' startline="%s"' % self.format_integer( + self.startline, input_name='startline')) if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.compoundref is not None: - outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) + outfile.write(' compoundref=%s' % (self.format_string(quote_attrib( + self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) + def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='referenceType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.endline is not None: showIndent(outfile, level) @@ -4495,9 +5109,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.compoundref is not None: showIndent(outfile, level) outfile.write('compoundref = %s,\n' % (self.compoundref,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -4505,6 +5121,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('endline'): try: @@ -4520,21 +5137,23 @@ def buildAttributes(self, attrs): self.refid = attrs.get('refid').value if attrs.get('compoundref'): self.compoundref = attrs.get('compoundref').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class referenceType class locationType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): self.bodystart = bodystart self.line = line @@ -4542,6 +5161,7 @@ def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file= self.bodyfile = bodyfile self.file = file self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if locationType.subclass: return locationType.subclass(*args_, **kwargs_) @@ -4560,6 +5180,7 @@ def get_file(self): return self.file def set_file(self, file): self.file = file def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4571,37 +5192,47 @@ def export(self, outfile, level, namespace_='', name_='locationType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): if self.bodystart is not None: - outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) + outfile.write(' bodystart="%s"' % self.format_integer( + self.bodystart, input_name='bodystart')) if self.line is not None: - outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) + outfile.write(' line="%s"' % self.format_integer( + self.line, input_name='line')) if self.bodyend is not None: - outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) + outfile.write(' bodyend="%s"' % self.format_integer( + self.bodyend, input_name='bodyend')) if self.bodyfile is not None: - outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) + outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib( + self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) if self.file is not None: - outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) + outfile.write(' file=%s' % (self.format_string(quote_attrib( + self.file).encode(ExternalEncoding), input_name='file'), )) + def exportChildren(self, outfile, level, namespace_='', name_='locationType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='locationType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.bodystart is not None: showIndent(outfile, level) @@ -4618,9 +5249,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.file is not None: showIndent(outfile, level) outfile.write('file = %s,\n' % (self.file,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -4628,6 +5261,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('bodystart'): try: @@ -4648,17 +5282,19 @@ def buildAttributes(self, attrs): self.bodyfile = attrs.get('bodyfile').value if attrs.get('file'): self.file = attrs.get('file').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class locationType class docSect1Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -4669,6 +5305,7 @@ def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mi self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docSect1Type.subclass: return docSect1Type.subclass(*args_, **kwargs_) @@ -4689,6 +5326,7 @@ def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4696,31 +5334,38 @@ def export(self, outfile, level, namespace_='', name_='docSect1Type', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect2 is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docSect1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -4746,47 +5391,50 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) + MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': + nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) + MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': childobj_ = docInternalS1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) + MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect1Type @@ -4794,6 +5442,7 @@ def buildChildren(self, child_, nodeName_): class docSect2Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -4804,6 +5453,7 @@ def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mi self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docSect2Type.subclass: return docSect2Type.subclass(*args_, **kwargs_) @@ -4824,6 +5474,7 @@ def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4831,31 +5482,38 @@ def export(self, outfile, level, namespace_='', name_='docSect2Type', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect3 is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docSect2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -4881,47 +5539,50 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) + MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': + nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) + MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': childobj_ = docInternalS2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) + MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect2Type @@ -4929,6 +5590,7 @@ def buildChildren(self, child_, nodeName_): class docSect3Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -4939,6 +5601,7 @@ def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mi self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docSect3Type.subclass: return docSect3Type.subclass(*args_, **kwargs_) @@ -4959,6 +5622,7 @@ def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -4966,31 +5630,38 @@ def export(self, outfile, level, namespace_='', name_='docSect3Type', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect4 is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docSect3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5016,47 +5687,50 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) + MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect4': + nodeName_ == 'sect4': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect4', childobj_) + MixedContainer.TypeNone, 'sect4', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': childobj_ = docInternalS3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) + MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect3Type @@ -5064,6 +5738,7 @@ def buildChildren(self, child_, nodeName_): class docSect4Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -5074,6 +5749,7 @@ def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=No self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docSect4Type.subclass: return docSect4Type.subclass(*args_, **kwargs_) @@ -5090,6 +5766,7 @@ def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -5097,30 +5774,37 @@ def export(self, outfile, level, namespace_='', name_='docSect4Type', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.title is not None or self.para is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docSect4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5140,40 +5824,43 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'title', childobj_) + MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': childobj_ = docInternalS4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'internal', childobj_) + MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect4Type @@ -5181,6 +5868,7 @@ def buildChildren(self, child_, nodeName_): class docInternalType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5190,6 +5878,7 @@ def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docInternalType.subclass: return docInternalType.subclass(*args_, **kwargs_) @@ -5204,33 +5893,41 @@ def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalType') + self.exportAttributes(outfile, level, namespace_, + name_='docInternalType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.para is not None or self.sect1 is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docInternalType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5244,32 +5941,35 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': + nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect1', childobj_) + MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalType @@ -5277,6 +5977,7 @@ def buildChildren(self, child_, nodeName_): class docInternalS1Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5286,6 +5987,7 @@ def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docInternalS1Type.subclass: return docInternalS1Type.subclass(*args_, **kwargs_) @@ -5300,33 +6002,41 @@ def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') + self.exportAttributes(outfile, level, namespace_, + name_='docInternalS1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.para is not None or self.sect2 is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docInternalS1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5340,32 +6050,35 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect2': + nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect2', childobj_) + MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS1Type @@ -5373,6 +6086,7 @@ def buildChildren(self, child_, nodeName_): class docInternalS2Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5382,6 +6096,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docInternalS2Type.subclass: return docInternalS2Type.subclass(*args_, **kwargs_) @@ -5396,33 +6111,41 @@ def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') + self.exportAttributes(outfile, level, namespace_, + name_='docInternalS2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.para is not None or self.sect3 is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docInternalS2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5436,32 +6159,35 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': + nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) + MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS2Type @@ -5469,6 +6195,7 @@ def buildChildren(self, child_, nodeName_): class docInternalS3Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5478,6 +6205,7 @@ def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docInternalS3Type.subclass: return docInternalS3Type.subclass(*args_, **kwargs_) @@ -5492,33 +6220,41 @@ def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') + self.exportAttributes(outfile, level, namespace_, + name_='docInternalS3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.para is not None or self.sect3 is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docInternalS3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5532,32 +6268,35 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect3': + nodeName_ == 'sect3': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'sect3', childobj_) + MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS3Type @@ -5565,6 +6304,7 @@ def buildChildren(self, child_, nodeName_): class docInternalS4Type(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5574,6 +6314,7 @@ def __init__(self, para=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docInternalS4Type.subclass: return docInternalS4Type.subclass(*args_, **kwargs_) @@ -5584,32 +6325,40 @@ def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') + self.exportAttributes(outfile, level, namespace_, + name_='docInternalS4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.para is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docInternalS4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -5617,25 +6366,28 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'para', childobj_) + MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS4Type @@ -5643,6 +6395,7 @@ def buildChildren(self, child_, nodeName_): class docTitleType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5652,6 +6405,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docTitleType.subclass: return docTitleType.subclass(*args_, **kwargs_) @@ -5660,6 +6414,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -5667,33 +6422,40 @@ def export(self, outfile, level, namespace_='', name_='docTitleType', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docTitleType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -5701,23 +6463,26 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docTitleType class docParaType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5727,6 +6492,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docParaType.subclass: return docParaType.subclass(*args_, **kwargs_) @@ -5735,6 +6501,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -5742,33 +6509,40 @@ def export(self, outfile, level, namespace_='', name_='docParaType', namespacede outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docParaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -5776,23 +6550,26 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docParaType class docMarkupType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -5802,6 +6579,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docMarkupType.subclass: return docMarkupType.subclass(*args_, **kwargs_) @@ -5810,40 +6588,49 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') + self.exportAttributes(outfile, level, namespace_, + name_='docMarkupType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docMarkupType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -5851,23 +6638,26 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docMarkupType class docURLLink(GeneratedsSuper): subclass = None superclass = None + def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): self.url = url if mixedclass_ is None: @@ -5878,6 +6668,7 @@ def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docURLLink.subclass: return docURLLink.subclass(*args_, **kwargs_) @@ -5888,6 +6679,7 @@ def get_url(self): return self.url def set_url(self, url): self.url = url def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -5895,36 +6687,44 @@ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): if self.url is not None: - outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) + outfile.write(' url=%s' % (self.format_string(quote_attrib( + self.url).encode(ExternalEncoding), input_name='url'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docURLLink'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.url is not None: showIndent(outfile, level) outfile.write('url = %s,\n' % (self.url,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -5932,24 +6732,27 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('url'): self.url = attrs.get('url').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docURLLink class docAnchorType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -5960,6 +6763,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docAnchorType.subclass: return docAnchorType.subclass(*args_, **kwargs_) @@ -5970,43 +6774,53 @@ def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') + self.exportAttributes(outfile, level, namespace_, + name_='docAnchorType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docAnchorType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -6014,24 +6828,27 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docAnchorType class docFormulaType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -6042,6 +6859,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docFormulaType.subclass: return docFormulaType.subclass(*args_, **kwargs_) @@ -6052,43 +6870,53 @@ def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') + self.exportAttributes(outfile, level, namespace_, + name_='docFormulaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docFormulaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -6096,27 +6924,31 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docFormulaType class docIndexEntryType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, primaryie=None, secondaryie=None): self.primaryie = primaryie self.secondaryie = secondaryie + def factory(*args_, **kwargs_): if docIndexEntryType.subclass: return docIndexEntryType.subclass(*args_, **kwargs_) @@ -6127,10 +6959,12 @@ def get_primaryie(self): return self.primaryie def set_primaryie(self, primaryie): self.primaryie = primaryie def get_secondaryie(self): return self.secondaryie def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie + def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') + self.exportAttributes(outfile, level, namespace_, + name_='docIndexEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -6138,52 +6972,65 @@ def export(self, outfile, level, namespace_='', name_='docIndexEntryType', names outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): if self.primaryie is not None: showIndent(outfile, level) - outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) + outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string( + quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) if self.secondaryie is not None: showIndent(outfile, level) - outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) + outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string( + quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) + def hasContent_(self): if ( self.primaryie is not None or self.secondaryie is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docIndexEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) + outfile.write('primaryie=%s,\n' % quote_python( + self.primaryie).encode(ExternalEncoding)) showIndent(outfile, level) - outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) + outfile.write('secondaryie=%s,\n' % quote_python( + self.secondaryie).encode(ExternalEncoding)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'primaryie': + nodeName_ == 'primaryie': primaryie_ = '' for text__content_ in child_.childNodes: primaryie_ += text__content_.nodeValue self.primaryie = primaryie_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'secondaryie': + nodeName_ == 'secondaryie': secondaryie_ = '' for text__content_ in child_.childNodes: secondaryie_ += text__content_.nodeValue @@ -6194,11 +7041,13 @@ def buildChildren(self, child_, nodeName_): class docListType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, listitem=None): if listitem is None: self.listitem = [] else: self.listitem = listitem + def factory(*args_, **kwargs_): if docListType.subclass: return docListType.subclass(*args_, **kwargs_) @@ -6209,6 +7058,7 @@ def get_listitem(self): return self.listitem def set_listitem(self, listitem): self.listitem = listitem def add_listitem(self, value): self.listitem.append(value) def insert_listitem(self, index, value): self.listitem[index] = value + def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -6220,25 +7070,31 @@ def export(self, outfile, level, namespace_='', name_='docListType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docListType'): for listitem_ in self.listitem: listitem_.export(outfile, level, namespace_, name_='listitem') + def hasContent_(self): if ( self.listitem is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('listitem=[\n') @@ -6252,17 +7108,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'listitem': + nodeName_ == 'listitem': obj_ = docListItemType.factory() obj_.build(child_) self.listitem.append(obj_) @@ -6272,11 +7131,13 @@ def buildChildren(self, child_, nodeName_): class docListItemType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, para=None): if para is None: self.para = [] else: self.para = para + def factory(*args_, **kwargs_): if docListItemType.subclass: return docListItemType.subclass(*args_, **kwargs_) @@ -6287,10 +7148,12 @@ def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value + def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docListItemType') + self.exportAttributes(outfile, level, namespace_, + name_='docListItemType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -6298,25 +7161,31 @@ def export(self, outfile, level, namespace_='', name_='docListItemType', namespa outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): if ( self.para is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docListItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') @@ -6330,17 +7199,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) @@ -6350,6 +7222,7 @@ def buildChildren(self, child_, nodeName_): class docSimpleSectType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, title=None, para=None): self.kind = kind self.title = title @@ -6357,6 +7230,7 @@ def __init__(self, kind=None, title=None, para=None): self.para = [] else: self.para = para + def factory(*args_, **kwargs_): if docSimpleSectType.subclass: return docSimpleSectType.subclass(*args_, **kwargs_) @@ -6371,10 +7245,12 @@ def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') + self.exportAttributes(outfile, level, namespace_, + name_='docSimpleSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -6382,31 +7258,37 @@ def export(self, outfile, level, namespace_='', name_='docSimpleSectType', names outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.title: self.title.export(outfile, level, namespace_, name_='title') for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): if ( self.title is not None or self.para is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docSimpleSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): if self.title: showIndent(outfile, level) @@ -6426,23 +7308,26 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'title': + nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) @@ -6452,8 +7337,10 @@ def buildChildren(self, child_, nodeName_): class docVarListEntryType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, term=None): self.term = term + def factory(*args_, **kwargs_): if docVarListEntryType.subclass: return docVarListEntryType.subclass(*args_, **kwargs_) @@ -6462,10 +7349,12 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def get_term(self): return self.term def set_term(self, term): self.term = term + def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') + self.exportAttributes(outfile, level, namespace_, + name_='docVarListEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -6473,25 +7362,31 @@ def export(self, outfile, level, namespace_='', name_='docVarListEntryType', nam outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): if self.term: self.term.export(outfile, level, namespace_, name_='term', ) + def hasContent_(self): if ( self.term is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docVarListEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): if self.term: showIndent(outfile, level) @@ -6499,17 +7394,20 @@ def exportLiteralChildren(self, outfile, level, name_): self.term.exportLiteral(outfile, level, name_='term') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'term': + nodeName_ == 'term': obj_ = docTitleType.factory() obj_.build(child_) self.set_term(obj_) @@ -6519,8 +7417,10 @@ def buildChildren(self, child_, nodeName_): class docVariableListType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if docVariableListType.subclass: return docVariableListType.subclass(*args_, **kwargs_) @@ -6529,10 +7429,12 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') + self.exportAttributes(outfile, level, namespace_, + name_='docVariableListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -6540,33 +7442,40 @@ def export(self, outfile, level, namespace_='', name_='docVariableListType', nam outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docVariableListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -6574,19 +7483,22 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docVariableListType class docRefTextType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref @@ -6599,6 +7511,7 @@ def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedcl self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docRefTextType.subclass: return docRefTextType.subclass(*args_, **kwargs_) @@ -6613,40 +7526,49 @@ def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') + self.exportAttributes(outfile, level, namespace_, + name_='docRefTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): if self.refid is not None: - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: - outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) + outfile.write(' external=%s' % (self.format_string(quote_attrib( + self.external).encode(ExternalEncoding), input_name='external'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docRefTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) @@ -6657,9 +7579,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -6667,6 +7591,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value @@ -6674,21 +7599,23 @@ def buildAttributes(self, attrs): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docRefTextType class docTableType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, rows=None, cols=None, row=None, caption=None): self.rows = rows self.cols = cols @@ -6697,6 +7624,7 @@ def __init__(self, rows=None, cols=None, row=None, caption=None): else: self.row = row self.caption = caption + def factory(*args_, **kwargs_): if docTableType.subclass: return docTableType.subclass(*args_, **kwargs_) @@ -6713,6 +7641,7 @@ def get_rows(self): return self.rows def set_rows(self, rows): self.rows = rows def get_cols(self): return self.cols def set_cols(self, cols): self.cols = cols + def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -6724,29 +7653,36 @@ def export(self, outfile, level, namespace_='', name_='docTableType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): if self.rows is not None: - outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) + outfile.write(' rows="%s"' % self.format_integer( + self.rows, input_name='rows')) if self.cols is not None: - outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) + outfile.write(' cols="%s"' % self.format_integer( + self.cols, input_name='cols')) + def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): for row_ in self.row: row_.export(outfile, level, namespace_, name_='row') if self.caption: self.caption.export(outfile, level, namespace_, name_='caption') + def hasContent_(self): if ( self.row is not None or self.caption is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docTableType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.rows is not None: showIndent(outfile, level) @@ -6754,6 +7690,7 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.cols is not None: showIndent(outfile, level) outfile.write('cols = %s,\n' % (self.cols,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('row=[\n') @@ -6773,12 +7710,14 @@ def exportLiteralChildren(self, outfile, level, name_): self.caption.exportLiteral(outfile, level, name_='caption') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('rows'): try: @@ -6790,14 +7729,15 @@ def buildAttributes(self, attrs): self.cols = int(attrs.get('cols').value) except ValueError as exp: raise ValueError('Bad integer attribute (cols): %s' % exp) + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'row': + nodeName_ == 'row': obj_ = docRowType.factory() obj_.build(child_) self.row.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'caption': + nodeName_ == 'caption': obj_ = docCaptionType.factory() obj_.build(child_) self.set_caption(obj_) @@ -6807,11 +7747,13 @@ def buildChildren(self, child_, nodeName_): class docRowType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, entry=None): if entry is None: self.entry = [] else: self.entry = entry + def factory(*args_, **kwargs_): if docRowType.subclass: return docRowType.subclass(*args_, **kwargs_) @@ -6822,6 +7764,7 @@ def get_entry(self): return self.entry def set_entry(self, entry): self.entry = entry def add_entry(self, value): self.entry.append(value) def insert_entry(self, index, value): self.entry[index] = value + def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -6833,25 +7776,31 @@ def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): for entry_ in self.entry: entry_.export(outfile, level, namespace_, name_='entry') + def hasContent_(self): if ( self.entry is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docRowType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('entry=[\n') @@ -6865,17 +7814,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'entry': + nodeName_ == 'entry': obj_ = docEntryType.factory() obj_.build(child_) self.entry.append(obj_) @@ -6885,12 +7837,14 @@ def buildChildren(self, child_, nodeName_): class docEntryType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, thead=None, para=None): self.thead = thead if para is None: self.para = [] else: self.para = para + def factory(*args_, **kwargs_): if docEntryType.subclass: return docEntryType.subclass(*args_, **kwargs_) @@ -6903,6 +7857,7 @@ def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_thead(self): return self.thead def set_thead(self, thead): self.thead = thead + def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -6914,28 +7869,34 @@ def export(self, outfile, level, namespace_='', name_='docEntryType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): if self.thead is not None: outfile.write(' thead=%s' % (quote_attrib(self.thead), )) + def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): if ( self.para is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.thead is not None: showIndent(outfile, level) outfile.write('thead = "%s",\n' % (self.thead,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') @@ -6949,18 +7910,21 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('thead'): self.thead = attrs.get('thead').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) @@ -6970,6 +7934,7 @@ def buildChildren(self, child_, nodeName_): class docCaptionType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer @@ -6979,6 +7944,7 @@ def __init__(self, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docCaptionType.subclass: return docCaptionType.subclass(*args_, **kwargs_) @@ -6987,40 +7953,49 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') + self.exportAttributes(outfile, level, namespace_, + name_='docCaptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docCaptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -7028,23 +8003,26 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docCaptionType class docHeadingType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): self.level = level if mixedclass_ is None: @@ -7055,6 +8033,7 @@ def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docHeadingType.subclass: return docHeadingType.subclass(*args_, **kwargs_) @@ -7065,43 +8044,53 @@ def get_level(self): return self.level def set_level(self, level): self.level = level def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') + self.exportAttributes(outfile, level, namespace_, + name_='docHeadingType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): if self.level is not None: - outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) + outfile.write(' level="%s"' % self.format_integer( + self.level, input_name='level')) + def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docHeadingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.level is not None: showIndent(outfile, level) outfile.write('level = %s,\n' % (self.level,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -7109,27 +8098,30 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('level'): try: self.level = int(attrs.get('level').value) except ValueError as exp: raise ValueError('Bad integer attribute (level): %s' % exp) + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docHeadingType class docImageType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): self.width = width self.type_ = type_ @@ -7143,6 +8135,7 @@ def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docImageType.subclass: return docImageType.subclass(*args_, **kwargs_) @@ -7159,6 +8152,7 @@ def get_height(self): return self.height def set_height(self, height): self.height = height def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -7166,35 +8160,43 @@ def export(self, outfile, level, namespace_='', name_='docImageType', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): if self.width is not None: - outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) + outfile.write(' width=%s' % (self.format_string(quote_attrib( + self.width).encode(ExternalEncoding), input_name='width'), )) if self.type_ is not None: outfile.write(' type=%s' % (quote_attrib(self.type_), )) if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + outfile.write(' name=%s' % (self.format_string(quote_attrib( + self.name).encode(ExternalEncoding), input_name='name'), )) if self.height is not None: - outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) + outfile.write(' height=%s' % (self.format_string(quote_attrib( + self.height).encode(ExternalEncoding), input_name='height'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docImageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.width is not None: showIndent(outfile, level) @@ -7208,9 +8210,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.height is not None: showIndent(outfile, level) outfile.write('height = %s,\n' % (self.height,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -7218,6 +8222,7 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('width'): self.width = attrs.get('width').value @@ -7227,21 +8232,23 @@ def buildAttributes(self, attrs): self.name = attrs.get('name').value if attrs.get('height'): self.height = attrs.get('height').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docImageType class docDotFileType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): self.name = name if mixedclass_ is None: @@ -7252,6 +8259,7 @@ def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docDotFileType.subclass: return docDotFileType.subclass(*args_, **kwargs_) @@ -7262,43 +8270,53 @@ def get_name(self): return self.name def set_name(self, name): self.name = name def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') + self.exportAttributes(outfile, level, namespace_, + name_='docDotFileType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): if self.name is not None: - outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + outfile.write(' name=%s' % (self.format_string(quote_attrib( + self.name).encode(ExternalEncoding), input_name='name'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docDotFileType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -7306,24 +8324,27 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('name'): self.name = attrs.get('name').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docDotFileType class docTocItemType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: @@ -7334,6 +8355,7 @@ def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docTocItemType.subclass: return docTocItemType.subclass(*args_, **kwargs_) @@ -7344,43 +8366,53 @@ def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') + self.exportAttributes(outfile, level, namespace_, + name_='docTocItemType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docTocItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -7388,29 +8420,33 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docTocItemType class docTocListType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, tocitem=None): if tocitem is None: self.tocitem = [] else: self.tocitem = tocitem + def factory(*args_, **kwargs_): if docTocListType.subclass: return docTocListType.subclass(*args_, **kwargs_) @@ -7421,10 +8457,12 @@ def get_tocitem(self): return self.tocitem def set_tocitem(self, tocitem): self.tocitem = tocitem def add_tocitem(self, value): self.tocitem.append(value) def insert_tocitem(self, index, value): self.tocitem[index] = value + def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docTocListType') + self.exportAttributes(outfile, level, namespace_, + name_='docTocListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7432,25 +8470,31 @@ def export(self, outfile, level, namespace_='', name_='docTocListType', namespac outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): for tocitem_ in self.tocitem: tocitem_.export(outfile, level, namespace_, name_='tocitem') + def hasContent_(self): if ( self.tocitem is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docTocListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('tocitem=[\n') @@ -7464,17 +8508,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'tocitem': + nodeName_ == 'tocitem': obj_ = docTocItemType.factory() obj_.build(child_) self.tocitem.append(obj_) @@ -7484,12 +8531,14 @@ def buildChildren(self, child_, nodeName_): class docLanguageType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, langid=None, para=None): self.langid = langid if para is None: self.para = [] else: self.para = para + def factory(*args_, **kwargs_): if docLanguageType.subclass: return docLanguageType.subclass(*args_, **kwargs_) @@ -7502,10 +8551,12 @@ def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_langid(self): return self.langid def set_langid(self, langid): self.langid = langid + def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') + self.exportAttributes(outfile, level, namespace_, + name_='docLanguageType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7513,28 +8564,35 @@ def export(self, outfile, level, namespace_='', name_='docLanguageType', namespa outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): if self.langid is not None: - outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) + outfile.write(' langid=%s' % (self.format_string(quote_attrib( + self.langid).encode(ExternalEncoding), input_name='langid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') + def hasContent_(self): if ( self.para is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docLanguageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.langid is not None: showIndent(outfile, level) outfile.write('langid = %s,\n' % (self.langid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') @@ -7548,18 +8606,21 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('langid'): self.langid = attrs.get('langid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) @@ -7569,12 +8630,14 @@ def buildChildren(self, child_, nodeName_): class docParamListType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, parameteritem=None): self.kind = kind if parameteritem is None: self.parameteritem = [] else: self.parameteritem = parameteritem + def factory(*args_, **kwargs_): if docParamListType.subclass: return docParamListType.subclass(*args_, **kwargs_) @@ -7582,15 +8645,21 @@ def factory(*args_, **kwargs_): return docParamListType(*args_, **kwargs_) factory = staticmethod(factory) def get_parameteritem(self): return self.parameteritem - def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem + def set_parameteritem( + self, parameteritem): self.parameteritem = parameteritem + def add_parameteritem(self, value): self.parameteritem.append(value) - def insert_parameteritem(self, index, value): self.parameteritem[index] = value + def insert_parameteritem( + self, index, value): self.parameteritem[index] = value + def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind + def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListType') + self.exportAttributes(outfile, level, namespace_, + name_='docParamListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7598,28 +8667,35 @@ def export(self, outfile, level, namespace_='', name_='docParamListType', namesp outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): for parameteritem_ in self.parameteritem: - parameteritem_.export(outfile, level, namespace_, name_='parameteritem') + parameteritem_.export( + outfile, level, namespace_, name_='parameteritem') + def hasContent_(self): if ( self.parameteritem is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docParamListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameteritem=[\n') @@ -7633,18 +8709,21 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameteritem': + nodeName_ == 'parameteritem': obj_ = docParamListItem.factory() obj_.build(child_) self.parameteritem.append(obj_) @@ -7654,12 +8733,14 @@ def buildChildren(self, child_, nodeName_): class docParamListItem(GeneratedsSuper): subclass = None superclass = None + def __init__(self, parameternamelist=None, parameterdescription=None): if parameternamelist is None: self.parameternamelist = [] else: self.parameternamelist = parameternamelist self.parameterdescription = parameterdescription + def factory(*args_, **kwargs_): if docParamListItem.subclass: return docParamListItem.subclass(*args_, **kwargs_) @@ -7667,15 +8748,25 @@ def factory(*args_, **kwargs_): return docParamListItem(*args_, **kwargs_) factory = staticmethod(factory) def get_parameternamelist(self): return self.parameternamelist - def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist - def add_parameternamelist(self, value): self.parameternamelist.append(value) - def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value + + def set_parameternamelist( + self, parameternamelist): self.parameternamelist = parameternamelist + + def add_parameternamelist( + self, value): self.parameternamelist.append(value) + def insert_parameternamelist( + self, index, value): self.parameternamelist[index] = value + def get_parameterdescription(self): return self.parameterdescription - def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription + + def set_parameterdescription( + self, parameterdescription): self.parameterdescription = parameterdescription + def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') + self.exportAttributes(outfile, level, namespace_, + name_='docParamListItem') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7683,28 +8774,36 @@ def export(self, outfile, level, namespace_='', name_='docParamListItem', namesp outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): for parameternamelist_ in self.parameternamelist: - parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') + parameternamelist_.export( + outfile, level, namespace_, name_='parameternamelist') if self.parameterdescription: - self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) + self.parameterdescription.export( + outfile, level, namespace_, name_='parameterdescription', ) + def hasContent_(self): if ( self.parameternamelist is not None or self.parameterdescription is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docParamListItem'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameternamelist=[\n') @@ -7712,7 +8811,8 @@ def exportLiteralChildren(self, outfile, level, name_): for parameternamelist in self.parameternamelist: showIndent(outfile, level) outfile.write('model_.parameternamelist(\n') - parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') + parameternamelist.exportLiteral( + outfile, level, name_='parameternamelist') showIndent(outfile, level) outfile.write('),\n') level -= 1 @@ -7721,25 +8821,29 @@ def exportLiteralChildren(self, outfile, level, name_): if self.parameterdescription: showIndent(outfile, level) outfile.write('parameterdescription=model_.descriptionType(\n') - self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') + self.parameterdescription.exportLiteral( + outfile, level, name_='parameterdescription') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameternamelist': + nodeName_ == 'parameternamelist': obj_ = docParamNameList.factory() obj_.build(child_) self.parameternamelist.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parameterdescription': + nodeName_ == 'parameterdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_parameterdescription(obj_) @@ -7749,11 +8853,13 @@ def buildChildren(self, child_, nodeName_): class docParamNameList(GeneratedsSuper): subclass = None superclass = None + def __init__(self, parametername=None): if parametername is None: self.parametername = [] else: self.parametername = parametername + def factory(*args_, **kwargs_): if docParamNameList.subclass: return docParamNameList.subclass(*args_, **kwargs_) @@ -7761,13 +8867,19 @@ def factory(*args_, **kwargs_): return docParamNameList(*args_, **kwargs_) factory = staticmethod(factory) def get_parametername(self): return self.parametername - def set_parametername(self, parametername): self.parametername = parametername + def set_parametername( + self, parametername): self.parametername = parametername + def add_parametername(self, value): self.parametername.append(value) - def insert_parametername(self, index, value): self.parametername[index] = value + + def insert_parametername( + self, index, value): self.parametername[index] = value + def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') + self.exportAttributes(outfile, level, namespace_, + name_='docParamNameList') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7775,25 +8887,32 @@ def export(self, outfile, level, namespace_='', name_='docParamNameList', namesp outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): for parametername_ in self.parametername: - parametername_.export(outfile, level, namespace_, name_='parametername') + parametername_.export( + outfile, level, namespace_, name_='parametername') + def hasContent_(self): if ( self.parametername is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docParamNameList'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parametername=[\n') @@ -7807,17 +8926,20 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'parametername': + nodeName_ == 'parametername': obj_ = docParamName.factory() obj_.build(child_) self.parametername.append(obj_) @@ -7827,6 +8949,7 @@ def buildChildren(self, child_, nodeName_): class docParamName(GeneratedsSuper): subclass = None superclass = None + def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): self.direction = direction if mixedclass_ is None: @@ -7837,6 +8960,7 @@ def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): self.content_ = [] else: self.content_ = content_ + def factory(*args_, **kwargs_): if docParamName.subclass: return docParamName.subclass(*args_, **kwargs_) @@ -7847,6 +8971,7 @@ def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def get_direction(self): return self.direction def set_direction(self, direction): self.direction = direction + def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -7854,28 +8979,34 @@ def export(self, outfile, level, namespace_='', name_='docParamName', namespaced outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) + def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): if self.direction is not None: outfile.write(' direction=%s' % (quote_attrib(self.direction), )) + def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) + def hasContent_(self): if ( self.ref is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docParamName'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.direction is not None: showIndent(outfile, level) outfile.write('direction = "%s",\n' % (self.direction,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') @@ -7883,26 +9014,29 @@ def exportLiteralChildren(self, outfile, level, name_): item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('direction'): self.direction = attrs.get('direction').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'ref': + nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, - MixedContainer.TypeNone, 'ref', childobj_) + MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, - MixedContainer.TypeNone, '', child_.nodeValue) + MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docParamName @@ -7910,6 +9044,7 @@ def buildChildren(self, child_, nodeName_): class docXRefSectType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, id=None, xreftitle=None, xrefdescription=None): self.id = id if xreftitle is None: @@ -7917,6 +9052,7 @@ def __init__(self, id=None, xreftitle=None, xrefdescription=None): else: self.xreftitle = xreftitle self.xrefdescription = xrefdescription + def factory(*args_, **kwargs_): if docXRefSectType.subclass: return docXRefSectType.subclass(*args_, **kwargs_) @@ -7928,13 +9064,17 @@ def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle def add_xreftitle(self, value): self.xreftitle.append(value) def insert_xreftitle(self, index, value): self.xreftitle[index] = value def get_xrefdescription(self): return self.xrefdescription - def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription + def set_xrefdescription( + self, xrefdescription): self.xrefdescription = xrefdescription + def get_id(self): return self.id def set_id(self, id): self.id = id + def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) - self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') + self.exportAttributes(outfile, level, namespace_, + name_='docXRefSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) @@ -7942,66 +9082,80 @@ def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespa outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): if self.id is not None: - outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) + outfile.write(' id=%s' % (self.format_string(quote_attrib( + self.id).encode(ExternalEncoding), input_name='id'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): for xreftitle_ in self.xreftitle: showIndent(outfile, level) - outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) + outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string( + quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) if self.xrefdescription: - self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) + self.xrefdescription.export( + outfile, level, namespace_, name_='xrefdescription', ) + def hasContent_(self): if ( self.xreftitle is not None or self.xrefdescription is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docXRefSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('xreftitle=[\n') level += 1 for xreftitle in self.xreftitle: showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) + outfile.write('%s,\n' % quote_python( + xreftitle).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.xrefdescription: showIndent(outfile, level) outfile.write('xrefdescription=model_.descriptionType(\n') - self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') + self.xrefdescription.exportLiteral( + outfile, level, name_='xrefdescription') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xreftitle': + nodeName_ == 'xreftitle': xreftitle_ = '' for text__content_ in child_.childNodes: xreftitle_ += text__content_.nodeValue self.xreftitle.append(xreftitle_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'xrefdescription': + nodeName_ == 'xrefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_xrefdescription(obj_) @@ -8011,6 +9165,7 @@ def buildChildren(self, child_, nodeName_): class docCopyType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, link=None, para=None, sect1=None, internal=None): self.link = link if para is None: @@ -8022,6 +9177,7 @@ def __init__(self, link=None, para=None, sect1=None, internal=None): else: self.sect1 = sect1 self.internal = internal + def factory(*args_, **kwargs_): if docCopyType.subclass: return docCopyType.subclass(*args_, **kwargs_) @@ -8040,6 +9196,7 @@ def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_link(self): return self.link def set_link(self, link): self.link = link + def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -8051,9 +9208,12 @@ def export(self, outfile, level, namespace_='', name_='docCopyType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): if self.link is not None: - outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) + outfile.write(' link=%s' % (self.format_string(quote_attrib( + self.link).encode(ExternalEncoding), input_name='link'), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') @@ -8061,24 +9221,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): sect1_.export(outfile, level, namespace_, name_='sect1') if self.internal: self.internal.export(outfile, level, namespace_, name_='internal') + def hasContent_(self): if ( self.para is not None or self.sect1 is not None or self.internal is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docCopyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.link is not None: showIndent(outfile, level) outfile.write('link = %s,\n' % (self.link,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') @@ -8110,28 +9274,31 @@ def exportLiteralChildren(self, outfile, level, name_): self.internal.exportLiteral(outfile, level, name_='internal') showIndent(outfile, level) outfile.write('),\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('link'): self.link = attrs.get('link').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'para': + nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'sect1': + nodeName_ == 'sect1': obj_ = docSect1Type.factory() obj_.build(child_) self.sect1.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'internal': + nodeName_ == 'internal': obj_ = docInternalType.factory() obj_.build(child_) self.set_internal(obj_) @@ -8141,9 +9308,11 @@ def buildChildren(self, child_, nodeName_): class docCharType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, char=None, valueOf_=''): self.char = char self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if docCharType.subclass: return docCharType.subclass(*args_, **kwargs_) @@ -8154,6 +9323,7 @@ def get_char(self): return self.char def set_char(self, char): self.char = char def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -8165,36 +9335,43 @@ def export(self, outfile, level, namespace_='', name_='docCharType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): if self.char is not None: outfile.write(' char=%s' % (quote_attrib(self.char), )) + def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docCharType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.char is not None: showIndent(outfile, level) outfile.write('char = "%s",\n' % (self.char,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -8202,22 +9379,26 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('char'): self.char = attrs.get('char').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docCharType class docEmptyType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ + def factory(*args_, **kwargs_): if docEmptyType.subclass: return docEmptyType.subclass(*args_, **kwargs_) @@ -8226,6 +9407,7 @@ def factory(*args_, **kwargs_): factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ + def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -8237,33 +9419,40 @@ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): pass + def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): - if self.valueOf_.find('![CDATA')>-1: - value=quote_xml('%s' % self.valueOf_) - value=value.replace('![CDATA','') + if self.valueOf_.find('![CDATA') > -1: + value = quote_xml('%s' % self.valueOf_) + value = value.replace('![CDATA', '') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) + def hasContent_(self): if ( self.valueOf_ is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='docEmptyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): pass + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) @@ -8271,13 +9460,15 @@ def build(self, node_): for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): pass + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: - self.valueOf_ += '![CDATA['+child_.nodeValue+']]' + self.valueOf_ += '![CDATA[' + child_.nodeValue + ']]' # end class docEmptyType @@ -8287,6 +9478,7 @@ def buildChildren(self, child_, nodeName_): -s Use the SAX parser, not the minidom parser. """ + def usage(): print(USAGE_TEXT) sys.exit(1) @@ -8301,7 +9493,7 @@ def parse(inFileName): doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') + namespacedef_='') return rootObj @@ -8314,7 +9506,7 @@ def parseString(inString): doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygen", - namespacedef_='') + namespacedef_='') return rootObj @@ -8343,4 +9535,4 @@ def main(): if __name__ == '__main__': main() #import pdb - #pdb.run('main()') + # pdb.run('main()') diff --git a/docs/doxygen/doxyxml/generated/compoundsuper.pyc b/docs/doxygen/doxyxml/generated/compoundsuper.pyc deleted file mode 100644 index a715daa040e2e35521020491a2f8fa1c76ad45f1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 479090 zcmeFa34m3{btYV|_ipxm5fTjuXb=+GSA--M?P^IEBqR`OHGLoOpf}!oAY@y%B(N>p zvZKfuM{(?oV<%25C*Bh~87Je!*?&9Zc+EDGKh`9_ll;kK`~NtTnRveMt6O#NTe|x_ z(7O*1NVmGG-nn&8ovJ!j=hUfFt^YKm=f8jUbB_#}=wCbj-ioL2N1eum_&;NYjoIzv z8M8a1{1!9ZVn$lc?lzMlT&o#wHzOTpcZW^4Dcxyycbd#o^Yu!*8SXM8-DY>U$?O_k zW7@Jk=5UY6pEQ}>)9_Y@*^h!c%}HZU81pE=MlY8Zo+P6@19}89z|xFaGD~s zOgLSU*(RK!0%kI)0_K=-mdVaF`+@RopF7Wlb4<2dxpRFkP@Ly-=lk3RCS2fh7y8_V zCS2rl7yH~rCS2ljm-^hrCcMDqF7vrdOn9Noy~yV-HQ{oX+v{^LFyRW9yVB<_GvO+i zd$G^G(1fd9?i!zakqOtj+&-VX+=S~~?s}iwYr;!h?gpQ`!i1N)+{=9KN)v8$xtIIg zRVKW`UT4BbUG8q5d%X$wxZKBl?hPi~>vA9Wxi^|{z~w&Sb8j-?pv&Fob2piA z$mNDU_hu7jUG9FLyV-;XT<$@idy5HkF87enz14(=UGA{Yz0HIpE_c-DZZYAQ%N_T* zTTOVx<>r0vHWL2vQh;agqq+k9@n3E%E=pZB@+(%9LL6`dzK6keX|D((Okk8#?!k=`x zANIMAneZbn_osaBUK9Sb%l)X&ecXgU<8pu2=MI?gV=ngWJF=tM{9rK~7IqvR z&*psYuV6>_udA2~W!%no4g*~0I#_nXXuOiyO6{_|947ZTNf z5|M^b9YQ)f^x+PZe=kM#DE>A|>xi=%yC|@uiP*A7K1iV*KQJ-c6G267zN3%a&9z>z z5Q(_)f<;WSK5V-{qfaR`of>wf!nFcfxw^2jz$tPK>2#Cw)~+tBRT~GKfrF(CIE$$v zgJn!|LP(_r*#OayonI$bVT6(GsqW1w5gC!anS~GyBV8n3E zw^fu!CPt6Xgf!PC!tcNkVgu|Lz2CugIJj1mA5kV=ON=b69od#;CBBnQtCDQHFWeE5 zcX0&u2fYZj%q(ya_V^YkaDc8|%f?u{Ht>_zV=Lo!BgD#;Y{Hc*?I>eA@6U~fXFz*l zMR#y)lsv33AA!kqVAw6lbhS)t;a|pzoZjxs9nXea$3}~TIjok42+DsXhDPkmi1_6( z7shr;+@i%WbZc4$L!-w0PB!dr3?t-svl(||;ItcP&d^BIkU`f5^;nk{|4@%vg&EXi zJ<5T4%qq;F6q~M`xm1{)#Nr$Zv6+fM2R2KQ1zd8^268z(CP#d8WNkUkCv1ou`Du=W zD@S%X#3Ua=iPdlfpaqM{8L4akRgx{ z^T|N*sqw6q^jr>T_B%E>d^Fp~ui7=(YR1+uSj&JI@M-iqAX=qBTq+I>9LM+6yG^EB z|A`5cfP6$5(hGIRKynr5PXV!)2&2lRf;iVqSV?S+$o({uZZ*$#ov>(4v0$BV%n{?3*aLv}yy$c>C+p-ldgJw85`FGd+; zXf`9kSLU*ac??MWA{SodL4|QH0Fh{PXAiExH{Oazf`-A`ZXTY6Ekk6OeAo(VfiCoA z%Ua?VxIStDyRb$h5CD@u!Zx$N)htw_d?oQ-2OFjESR$uW&GeirN!)y*k^m*?feu47ci37Gqmuv3KI)Ru|fv$dF@E?A+Ew~{LT1@gE}s|%Y~7dCCg zpEwDYbUnS4wy=pyBr^X0Z+8PnVVT7ks=O#=i`u3=leHA z;s&$B{~iMol*@n?uKm?|klwT`0Jn%+9b+F&-D&o>Mg8&T5=E&O<==OzMGaPc(e~j| z9NcKIxEzuk1I*D&7ZLkvRk1uR(cjohN|$|otHTYwe7Kmk;kpey1&SGy0rqIwr5SBq zkJi&Mn+q<(%fSK$;&1Aoz8eM`@q<*3mX+NLB|(00xVatb2H^CgcnZr9kcZ8{@@SR& z+rre+%p&CWQ347m0|uPrOE`M}*l5<)T~(wW+Hv%)P`M6+;d4IVAR?$wXePLdMPAL| zS_aoKxSqi^4AwDN&)^aU8yH;5fXp}Ga0pn1c7*e6PTd)-0e1&894th43dm!u1iGog+bPC0XwVk6-a$65IFTPx%Uv zi0;wU*@+BJI`KFVorP<<6lcH1T0h46(;n;k$bKu{OORbjWTi%S&n!NJkJi`Gg==%j z?y>%?!`kvzKONe6>NfluY+=Ce6KrF!ox$x0VkX$&E*?9}$t`hC0;O+<4w=ODy{1FaPPH|8XiU6^(<(X^B8G+8MqsSU$?g(*m?Fh-;e zV?+uuMx+yiLX7H5OjDTLpdZuRMYR}8hN8?Sp&UapX-$T=pdW+6O$KEw4H`469|K}m zKL(B2Y~cWn*&IcrF`I`fRqp0;J>#bWuL%p|vCe^|O@(9Gd?ANPj$(t`LJqV0t-hM# zOmHWIyAZ^U+~Aq+SMV`uWjWKS6^U*up8NR=S4`MZ!0uIkNfZGiLv`PS`q{09C{b47 z;10go#WIfW8yX%g6vQp;=2`wgVL%i){Ny0xY1CHV+dnu|gyQi_M2mtAD?PiV<|$Gw zlP>4r8kh__(O-Ux*c16WmnFiUt!t&uw7OZ5Rp3mSsKReE3s3E<6ON3O^=0Y&Hy>})KuMV^4Zui7V}GSDaY zV^2{ktQoE}Yz0HlD1OrgeEX%3hYo0Up?7sb3`Q0Os-ooU&5^84Xli|+C$f80y&E_8 zSVf?hwTrU>WaaAHv}<>?h<#zw^59@0Qb!L+*Jy#Z9?T7g`Ru4Hn99^S()f5);y@6f z6s;S59=Lxh%uiD8@8RY5D1@THk0*m>k%JS~2IrwzZVC#*n(ttSZ*x<1?7AUuq% z)gGchyS3H9^+w(7=G6r|pGk4SQZ%VmRkuFOUrCfcJdGX?!47=mTX@_i0w-KP8~~~^ zbpos=dTw77LFbKoI65-aT!t4h$Y);Q4Ms=R+1{_hVAy z4SVp(zr+(Q3TJ^SdZLg=h$wpziTJ~#G;1m)hdLQ2CJTZo(Ec7xj4hc=wO8!S=;+2z z^%pKim{=>_43oI*@W5q>EJ|E`Ju&Agu;HL|j>lXF{a*p3wHM++Epqbyde#c>bP);LGgJXfmwj$%TON3-VeAx`qNk1j}}8AYt$St%68 zhjT^E>Uz@;b5={k+^$IQ%77Jqu&O8&Iq9g>0lVKAm6*z6Kxhcre*hNIrXw3=c4nb% z5;hATb^A*Yu}xwronKp$tp-9+wa@nzjL7f{%zYODU}WL;66YG2N_TxPq4}T%sYTUbMn}}L#cG0 zUcw0IT6*tyF)QU>0*ju=qJR76$#uRW*YL z7<-Vx6%4p`3%Hum8)Rts`Er?+>vb$51mbhie>03`|akBhG@cnU9Iq+l%Y9K|D2mLUXZ!{L_K@P8=X zmY%NC?Ie(tEgB=QojF>hZo|P#t8T+t%em6KE#|RJehVim=PQCklnWHWxygl!z^HhU zA~+qnSP>kET%rihKrU6Jm&Y_&&I%JQQ)DHNXfkJ&2`^IQVjj2`^IwNB%Y{f>VB%D}sZ4S15wBd{-(0gU_oJ!HK=A6~Q6BYZSq`ylWM~ zQM~IE!Rfo}6~TeK8x+AAyBig`mB%dk_1jFiNs%o)V#%DXCfuyZHXg5J&UO>ts>tm; zTFIO{Ot?jnJ9(^?!}$9B6S_wqO;bM7Uq+Fgp^OxoRw;27FH zis0lK-~oxSU#i}~<=ijb?hcoHKuMf4dr&XpXc^uLr0T_ivK`9A8M2* zK?LG&f;!p)>~{d20)SBzfDu9sZUNrz0J;PKt6TsegnHZp{Hz1O!D@U0{{sLaY_t(U zI870R)7>k7;$E4qS75XaNQ5&S!2fapGXww~3;+;9qi#QXVJ52AOaaUk074k1T7b>C~?-pRd0n8S_YylvITHXRY;{fIeV2%I~&Q$~<)b$qf6$d#_kn;q9aK0i4 zbqZks;X>sgT%-uX#fl)5UD^P`rOE-niBD7mdvuR!zMY*<+Z@fjUu7LBepP_D^n>Sy^N&SNyYHvgIKqD_yhT|qvJ)Y zb&6ToV!)_$d@w&aGMpah__59sc2Ln9Qe` zm>WF^8t62JtrN*IG&VduxNi)n5b=6kRZtuWeHSKLT2>8@4GBmeCPHKT2Zx78WF}GQ zW24#S<~UmOITN%8K!P^-;&)^*hv?5W^z-7rge5eDQs_+CN)_}n-N3c?B>BefUJmjyH zxs^E%$}MmBS8EwENUUH^cgu)tqu_1?c4L_f^59-3@ibgL$Q9`pQmunuL(S^g7h*?B zd2@oJCb=)f;d>|1C-!x23<-Uxb78I%m@*7TdB2TId`FlL_wUV2xGG%hl!zW4Z zvbpnKkGmr}|4vPK)qtS3yDN!Q!wJu7knm;IxNqWW%*Pv;cMeHMjgCb1LZTa~4n$w- zq`~p=>}Z(iQ5?Q$uujeFgJD=+v3_4M&JFHS`*0OuO(jq_RBThCR_utHkYl?DEu^sG z${v>%DV0jmtOF-Rv-ye!yWiJ~sJch>>SVptVD9{aYQvR9%43G;bIeq(Eg$gc*Eeib ziFnv3iH|gV*svxXHkCDbaN?TOGICgh;6>ITu2pJdhPC3DsjOAtYvqOv|JF8a)F=eE ztnDSnQmfJud@UL=m9^U8YlTQngN7^dln2dpe3FC4NnSfW()A4$p&oZCB~f(4hYIoM zP;vM_G%^0Q3=qje%^D!A3kQg+%fr4d^$!a*K(mGgH*9DQ91_LG1ln291 zd@?Z@pm})2qg~%{5c2VGP!feVd^oTMiQ#}6JUVd=Y8epH3p8s$ur`SSf!biZAd>g$ z9};SWW(^50|4N4hYO}}J29cTu1vNA&4+;+EXizx)+hZQ@`bLGYk4J@)D81pMf>nq{ zg{#6|UxivpPrX>mq^C;Il0K>GeB7g6AN5s%nEFcM0}ZD>YhkIMtOZ;r)M*b(on*=l z5U-A`V)+S=bbX{3>M`k+OoR0C>d4|hI5GaUOa{^lH)}G8S4Xxk`+QyMp9<6f&6)~I zR!6ojL%uGE)HD%LH=pugplZ)@BOuL<6&S~ylJZr!$D={XG)4m}U>Q+Vf$YQ;sAV`1 z@uo@Btc+b7MU{a?;wdkU)CA2M53^Adzch*}bHG;yk(ves_o-4I5V5!bthKjz(4$@7 z{2=7x5uqgN)9|S|78hI%Xz8Bfp(Lu)@bOWylwt*rOk9ClhK6iQnl&^^mQt)t-dBd*+dW$mFHE_OrzU9Ds3>1b zu`+a2X=ijqY8nvF050VLQL>b>W>+5V`bLD1k4J=(s87R3M9ET0H8?tP4Qd$@#Jp+J zMafc1wK?W%1%^XO@rcD)`7sF;<`T!S}GT!UJ86ESZ(Z;ERJbDC3L#i|jSHD4!qQ(PN3zpit_ zKEXbOw^AGw>?Pz)KU{wIHZ9r%@8AhPMN& z!gi=qm1libTr=?RWKBU^^pu;zjfZlx{^vaM^|gbl5VwPps8z$;!HtLVdcY2|PNu}O z04X;DEzCF`Dvwc|^yt^u45~!j3`(Za46F%T7~XO^k`>F;Z>_rvxlLxbilS zdVP(c3dD_|WEzdYTCfpHYw>npi&`7t`ScsWZCtx!LeG1o>m$8Tk4dj&8l-m{*AD-8 zc>HT6y3QUEI6XnZ%>m`dvEJ#CuaEeuLQH%m(;&W^1Ip|1E?Cp5KK}at`2e z#TPun^$}iZ$Anih4Z;(5G92RmZjXDd#6HqIV)IDodpyGR5nE`-#8xs*ViWft^0?PZ z?0ovf<|#e)>|FSwN4Gvw3+b5DN~S?-V$Ob@3xC*QZe8*X}^ed1V_mVj;3^f=5tj` zhKq@^lGU^&C991~OFq6{Saidg)^hVSP--`abd{fVjAxxE)79aTUp2cT;As{ZJ}WvZ zfbmfR|BVNpZh_&yqO$_H+X64nxRPgBVED4=ssQe>z}qvsNVb_K|DqhM&8z^#?Ss}e zBjIM5{1*f_s{(GigNv#)+kRXQGsu0G?xkk9pt)78pJ`Dv37N0)M1M)Lm%u zf7e3tEvzVYo&eD&BdRU3g~E?VC57kPLcebdU5u*=TUqGhib5B-LizI&lRqrDB^7WB zlWt|gIC0r!U?SGpAAHw3-ygKxi0(2f$?CD3mRr}lLK3g&{B{Iu<2}}yh2}yY z3r@wO#d3a#c$GPHv={hZ#2>!R;5!V8420Shj2&a}BmA8vbKID z{gT0YDk*izG7xzxDfh`@J@vUGTm#Mx3thC}^osixc3$wgVsA6{PFEbTvt1jID;WLk zO?48l&`=@qiuP!2I^Gva$JyIMAlXlGDtrPFPK7fvo1<`c=0Z3OjGykzayZRcUXoY- zJJSxX^6wBnBpn_X<(gh;lFT9JmQ966#niEE(mE$f!VzgQ=@w1VuWnHkC9OlIZWAS~ z`z4=rvu*UNb*REXd{1*H8mH4)4nT7sc^M7gj9JUS*B?~=9DzzafXKZBssYpOK;6wjNqH#?>u^-`& z;>Y9w&e!Zmm~Xj}oFA^b_O(~M;gZ8rp~(&S{D-T)WtYqFV;{eTA+bZ`?3c$D$~@j- zv2rN*#5vulRE`j<3vjthVJL{@9$I(UOWVBM)7+NY1BCvCEyd~2vhby1;Wiv%H{u~2 zFRQMDvwMtuZcfLy`axQRq-rS+$JvlwB+4Knd6lh&%fa7K^+oHy*Mj)VK5)9)wc3xmCnf{ra03jzdLQ4TKGuaL=>rBnfUCVDsVIXR3`!7yBr+)iM?Pjry^K z&{7S{9*ZU9(<+Sv*@f>w3QS@NA!TwbTOkjeB9@S8s$ofq6tsjsqY9P~ZTJqv&m@+R zVDpD5t`Lg&4kYy?0uX^FMc_!L?UY*|VpBCNXIU(}t75s#zSCWb<%LsW z`7@`EW>R4V}9ZM=yfaUb6SgyA3KzowJa?Mm&{#eH;({gQfEUC!>mNTki*=OG=8@}tN z!t$2Rny@6xnQ!v5yQ&TL_4d;ODWTCNQ+$1bX?R2R>Qmi>>d&lN{Y&jTP(&pg>9Q#h zII_8`=ITR-HLJT?^*8$JL%Wsy`sGu6Jz~m>O#Wcig0JueFDfng%GwHsg6!9-7JQX2 z7^<@*##h%?Ff?jQd#aJ*8ecGUZOMYKt*v0F;0jd>zRnj6uW0*I=in_E%*js zFtmBef^V#?U?}-kPpd|Xn|#4g|0N6FGQ2eUm$;D=eb{9KluA2^Mo}I#U<9650gN92PW4`axv z_XEIrFjh1w)#mY)MP(oGsy@8Nbp-c3;Vwf^ zZZ+iL%bZk4@5!p^LcEu_zlZgK8O|(xf#Bn5tX^PjW$11SXAKzTds2WOR>HEAMfNz@+HQA1z5nU~T@o zC+ZLTtpYUGc(x-m$T1+P%6~cPZ4P`Ht#g*QMQzer%Mkay-$?MOom=oj0dlWa9dE`#P!s^)=qGkQ};d4Vsh zz*fVHrCz2{3>Kd!aIus%#egM*b@ohw4wea#I`p60%ZvH0ifL0Z-{nn96Bh&V9mWMw zNv~9tv_Y!Mc9patGigZ>UgMIUs3_@}O6pKa$1=EJVq%G*HndKP6U5xOq$es$ zI;N6lsH9_)mjt;sE@@#!NncV)GgZ=;CNBwsY+TY;TFU9v)5hA(Qb|2+lhzKBYh2QE z6(v2Wl4h%<2iuAgM<*D#%X}BOsiK%4S21%`%#XJfVN){U_=g=yjL<)1%B%KWl{8l+ zeYdT+e3FuSJwmTk6tk|K6q=`E*0oPs%nD!3P(?B0DrUZl8E>C-psn=9EUPHy^D1V6 ziuwHH#jNthe5-wGaXeb~`QRYJd&0yyC&3~AC>`>*FGvA!_6w=g)&*b@l+-IWgy z4j;`5Jzatnjt%YPwMUd{5*HZ0o3BugaH^6ZyI&yGU+4t2R1mOO;iE)`E8+;jg!EZA7Z3e%KKn6hx zaWVM6n922q6BbqESD8DXxm85K;J@I9gq5>jaq5CmM>Z-F>>cFA?D66N&c%%uv!lg< zV1#euY53Z72;cY)ms8*4*D`Z)eNYDksX6$|I4^{p?PK$5jKy!nv0C4)23M_=IIsR# ze;?(^fG4*D_LcaG%K&R;6JKl@j4}C62Jd3<0t0sb;5`g}h`|ptcrSw=VelUqypO?; zGI*K6k1=>ZgCA$`0R|sr@DmLFBZChy&`|g=V;m2`Pcisu1|MbcGYo!~!N(Z9!r=y_#A^@VDO6!KF{C_41S5hFEjX048F*K11b1b2LGAC zml*sSgI{N$QS%#&Y3TeGV;Vod!x#rq@Oun6a)Pfg_$q_nXTXsWe2u{$F!(xyZ!q{K zgKsgog24_34>8!u;06YJ7^v|MGA5yqUAPZVA}H{+jYkXljfI1Q`Rv96qet_DVQy?A z)P%*cvEhve@|TWgpG0aSI9vh2@uv=CNA>sk2!75&J%@YI!i{dE>e|!G#-Gx)pRE}T z7}C{V6}oFv@~Y6}om$O~Xnzq%`5z!xoV&{+q23thsS!$08XxZ^dj&#faKt!O3)v|!S_&rMB8{E&URx3#fL zH1)Q&U2)y&x~)wOfU#(G5NR}p?nmTlHQO2X5^;RLN?+sTg1+{qs%}AqxO!JRu}`Zb z345Mcv)DgAG4}I`JyaI>A9bjHUlp-#nzx)De4KtCZA}JzO%SQmvz>^~*Q`$bCC1p5 zQt3|YC6T{JTazbTP3%cRachox_iRPHz*)a&yB9QmEimGN(olwdR&@o1#s1dVo)5-; zXheiCL4)`MWgyPiuyqkLCjWl^%-3}iO+P>frZ;e^Y>_SUL|b*^I9-+wZQ1^o=)4<_ zve?pe9?D*F)&+d+F*KZZSY3d*r9I}y+MP=$2?yDDb5W^DP7;z`sS!E%VkUBP4c|)W zs#!|e5r;H7q!wMntOmK0$>^XQ`yM4Zp?w7+(a|9Tg{ePuCN>duGV~?%OP(X^F}qYp zlnALeKxmnrc%K*u%S*^fRq8FdFdFBn(144_QPO}-WFY?xvC;StZBgL@L@a+q^>_?) z9&KuO6kAKI@cF*#YXyu)nQW3>{1Mhi%ZHX(uWMWP<0J$bQiZMiq$5*JsmVI-q3q77 zbVy1)*oyFjlE@Gjwx_xyWx?tbf-|^mNg7zUSj(>J2G-gul7TgLJ?t!Cj~7p17HW;S zwM~M_rNvBSx?C|IbOUKStYMl!2koXXi&;2OJUnoj{ROrOo@sdN8c@Q(KE_((FF?YY z>5H9EO03F8eKD!D*keHBEo?Db`*1d>whw)N-*j?O`yfd70*T1QUX^{*Ne67ay3XR| zR2rSyh?j_OGv`>d|fc+1e zo0eIv-JQs7>7D`2O-t}E041^XkL$55cnT{q3edx;xmhaB4MOnir4|1hbF$-f6*(vI zA?R;9rE!7g1{xjYKxYG84kFOmK#PM26gF0i1BDG#ILLv*#;S0jumL)9m$3tPgmWXmf$Wz-Sm#zz zjft#e3Pjc>;O9KfwGx+eWxB-mTv=26FL;dWqq6XhsjOrQRQ6n1$p4~8zE*N`RgyBf z=i-xO9Hr~1&wJGCqq`~)(_P6l=+0V@g_PFf3$7Mc->)@3mnQDn%D2uHeLrh=F6sM! ziH*m0okZWyhEyZ|JtB5d8CsR`nd&)^NIHKx`Tti63n=y^_{0tf;cx*ejHClX>h~%f z5OUs(91wEauW&#}9bm6Gk@rDJMku28mW~txXs}XG5J*;fgt0jcRxsd1Wkn}`HsP#F zuW=Dse(dF8<^>4)@HBc6Ayve0u*=Nm*I=^V8$H+4e4Y65;c8eN<_n0FbeOZmB_X=E z^E^ov4z~Ox07MSXY&`OUMp2}|y=DL<9HuGSW5r(f5Gw_%wPSU!+V~>=`fUc^VW35K z^BbBQNMB_`TQNFzw&g`l&Od78^@t=J8E4X%#DKF*VkK3AJD^_JA+mCo?zj4FX$C|S z*1D-qm%homvOVN<>2raD7yAPXP-V4M6;5lJ@rndwT27&;eg`9G}c zfJxdh0F^sn;sCS`n8@>EcS{tXOC2zAe2Du;KA3dmxs>fY?@VNi39cXWftc%y&6CvS zY(6y(zXu=xbM}D6h*Y|0N;qnAy<#ii3L%2(+jkQ?YN`eg&qmT=qD?qEmpFF9H+JZ< zx_&9z#fI(Sc5+y9T&1i!GN$e-{Ak&48JBG;ogV{Me@8lt>0onh6`u3kX;W{f{S%31 zH(Gf1v-&Z_4}rRNn=0jFh|x!7hm`+-!$HaDv?7k+MH^pYGL|8faR9Ek6Gs-m&tw@$ zEdFKjYbuLBZVvt{sqYql?%ZPavx^@ExFtutRd||b7x8-8-s6W8XBR)L!;tNjhas`` z5gmqXH%H!r!;p3_q{FuaIYGbW5n>8jgo#~4fnI*lBzTp1TDu5NjinXae*SO(fsAd% zp9TU7P3bMrhw!x))FEQu<%c2c!HE7oEjl@w)pB^?e-Z9~GuXo5HU|IB;0Fx8&fpsi zs1gq{3|bhpGPr_2>|nqZ4x}78U7aN-0vpT!Gu%S7l zyB!;v-RcD-x&E>1kV1uDLV!`p4b9ow&_pQzkzhuVDSC_c^*T(p(_}T(?Z!%_RhjVn z+NOvkSlBOy;1?a*XOz=d=3x60`;2n>$|N>1k;Jy9-NeMUCNi;)3A1->M0#s%1I*_Egykr_Mqm*H=dQ@X zT!AUb=1As3K3>U;F4b}iGFVw*{4u03#wAj}HosTw*}Cnvowv!Qi4czNY1tJ0puD|% ztz)pgnaDA8s^uUrk{EYAK${N6ANxyXxsUVhJdBAOuwc5L6e z^RD~*1z?94J8#PVyoOjG($g}&xtRr+*ZTXok5#k1atG(JP6lVNkk57PTvl`?B9WDH zixcu?q1+W&D8o`27RuDrW zujiLU5sY+<-S=QRtnYb|{gixm9R%gxK^j`VM$3n@KxBWB7)KqR9i0z5fjayd zBC-cvU>dQc9&{twRJr^m63ueAb5}0s;`L!xbR{A+$mM?lupvD9*NJcoo&v29po8K@ z`$BEBBg95K4&2)RF$dTP_TBBt;Tred+qD6Y?RM5D`u%IPHswRG}qDgQ4xjUIpC9UB$z{1cPmoJbc?Y zP6|f=JDs07n9Kmco*>q*PP*oY%T1YN*?Td5(5C80?szsF8XhbZ24pJW9EDDNdRQ9N z9*)jw9Fr)h;d|}0A^2b$dY0Wfw3Z)qR@eQSyR2i6ZmKHcW2~o^ll7@0XpNTc@hMpw zeUj+n)Agz%gf7Q~Raq;^V$`1+xZtC4)Tcbo=)g6d7(PPG2%Mgt=j-^|r#+&ilzYTQ zCOt7MbYtQwi2@o<+;89;pYb@?N?aXKOqaN9^v`-k9bx%*YT^prn7B%&L0p>fea_=t zD{-5oq5K7pXnn*Lx-oH;OoO=GF!@D?bD(7nd9ZubE1pBf1frn!B4M8~wR1%5Z&_0h z-d73h<#rV;Byy8;TsU6E5T1neT%g-rH zI(88RPs14px2ir*@>@A-MrLVd31q3I@!>VLgj(Tah>&nnB+#VeOg-(W#vpk3OXEBNp z)_^bGR`cAl`C8^bVq;nPmB)7NiM_IOd%oE?=}xfJ0PW6pt@EoS1)KZ4*7=mR&VNZ7 zPN;R}3R>g;DMYxK#A>N&7yoHs#v`#EC{1{ZE^$I73%FDkpFBYKw=p^GrGNhL9Q!Fz z7CAqJh$UxycowSPU zDYc5TH@M;Kq6LF39E<2Qi(nN=t2lsbNgOd!DIYaCl$*w}(}f}yq+1{ZaBC{Jl*n)nspSOS zsBaIq*9L2Eu_!U~tAQSegP`7IxLl>TQl>~+T7N?KL)u#Zk%6t9l z^zr|zG7DqX+EAR2M;uYhg198N`hD=;X`qD9c=^xyi z!8%HWs87k>Bvs`+qRSTXmf*gxPO1lPrmAwj#~F=XlagJ^J_4twWG9bb;1MNJ-J|J# zQsN3-^nLWaSJ|6kbFUvO;ubb6!D+7NVXuAO}f>i%>zhBJRfLl!9;_>!sQKb*>=1oP;0` zm{>u0BYrI5Q8g*$fbZa#B3TfhRy>+IIr=7; zI?n;s+YPL~WxMCkXr96)tq0LNHj#!?tKKdFsMYdJsd|ftl7vHR=a|F$nh$JVywc3# zhg9YWD#o_P9R~9>M+timGS4f-u=2KMsia6#6=Juuk*X`i7818w6=D?AtgEQALit58 zf?Y9j*Uf^Sr1A?&uhvNE)h4A^Yw(wNWeAV{agDGAPvNf+WUZEKzARP|LU9ATPy7$% zlZ`;p)qZ-4u1a%N6mA_#LAeFvRM@TB9C(L&`3I6JCluv=<1(Il1FL-F{cF10aAG4{-+D#e~)ElKq{y;-q&(lw6s z_avwb?I+$uyH4$x@Q$ZdC0UH-*2bBj`g;L7C-XGd86os76m(m_J^b4p#fnL5bB4re>3l5x046n+jF z69`ggf(}yb_AJ&x4wL*->mc8NRw$A0>ok`d!?_* zJRT2*b4ru>4C|#O+3Q@B`Sy6Am7Ohb#o*I?Gmc0Jr>j|$$+xV8Q;(^mL>hj*U%TaE zZ{9p4neR@kA5U=t3(zYve0*gWy|rX77RiNK~fZhKcWs zJ5rV7@RS=gS~*A5s2z>KfuKoVPK26|_GyZ45umhN`YorO;5bN_8eA&R`aUvrM@v`G`bV zkLp^>!o|~)%GJoKusu?+c1pq89()hrLwNL$OP4Kp&MR0!1o6F;6s(DJViKk<#`hDJ zhHMwR-0*hU*4IY*=7yh+2U_EFtL&6b)vbP!wbVksK7&IxPrABQ-5m1kL>Hf~SEVX+ zImgGHOS7ti22QQtz$d@#ai)^bJsNapmL~N#JfftOdxYp2t4aM$kFz5z|4v5SCTUWC z%OmOt%fC|-x0#yM-*!0L$&+ltJwp6BWSqg8R54dBiq6u5=f6YppCy{qE|9sGR~=5N zNY%uyxqlAcu|}Y0t|Ilnunkzb^NLif{P9QkWdfx6Q$YhKQJRE)Gad`)lp^&{Sud@> zUgwI`SL1^*#0KKd?D5i*+0Y3$bm248mFtCEnjfQAAc~eE>W%Hx3 za;>I>C9q|klFQ~mqY}(zKwU&aSqrd6+a&68ml;?er{H0AHfz_+!{19-PssxH>0&hz z0(skLsZ4cLV=gDU>;rY0w+h{OdRCIf2=NArk@Qp9>v1N%+%uWFki9=$MYp>WcOV+D zD?H95ntP_!918Q8&Pt|0XLlv8Lw}`5-;tYtCnGs|P0A#vmJPQM4>-QK$|LRQ&A(IA zT&Tx1S27Kn6MtMo?D4jBoHG1b73A6;@*E;(uv%R6 z+*zy^UrX{&trq_^v_gs5-Kje{U@`IoM3;l!@E8{z+dDsHX`JvHNboM5I|BHxI6GzB zpNxF(TA@pJ%1PG>^s!K3r_8n3gsv3|)9`C=G)++PPO9`e9)=7p+pN!4E#+dhJ4Mto z2MmE<=viGBWCN`G7|dhf6~C!0Fm~XRm&FY|D|0=LMfaX}$q+X~y88EP;3bq}8Cyip0+i#K$X#97 z&|@99#w;^d=Epoz7Mx`~lE$N{ut#1Gb{AWW{Fz&muW!e#J4lN2di~SV>px5y+GW*b zdi|%+-(Fk$=%_S#Ak80jg)14EQR7T^WJb+4?ziWz;*qF97~4{7*v7xc(pVNRr!qRZ z5wJHV=XuAiXS(B7Txc=TWvA{ddU&z`WQWOinyh@zA&wKC%!EYM zst6oRv?+r7*6hV=xDU-y7V}&Z-YekbB6-;w-okxraB?x}eQUT)jvrcT!bOT)VBq9p3236z zm?RLHle9{=A_7K;B@bP@djA&Irv;MU(ZyBHk434Yi!kikCI((OlaX$??!)tXa z(yA2?z2Kh4o$l zYMjEqNbIQ;wb(!Bu?HP%QvI{@r%Z6lA|!ZXsGjh6lZ5UO)0osMTiC}`R}uv@oa(Fs zsh+69NneFpNiUW|VC_Su!F-A&CQ0uZ;#)n|j_CY5HQ|MOO!&08rz$|g`wF~m;tI@1 zO{^%-CAMy0$Rwr^VNb;6Vn1>B0b3%q)))-KxH50|mEj!V9<>JTi0zblNR#NML`~F( z)C7FR?rOipSZXz)jY)2_fH^p>%=5l7wGIeQ$?5W<;A{BgJ3ONGjRBz>j{zl7T%(y! z@CW$DI~~sQqHhe zGhAufSg_-=3;0&MT#u_6U75YPr}C78R3g0_H}}}JU982Tc0Jh1fVS0V(q((^V&hq* zmc49mx>Q?;xA;YK8*tXZzY38Oh2+#eWW*ZSF{8`xs13wMiS_@=ZkljXy#mR^s+d?c zYb)e^Bn#_JU7rZS9?i`m5#qCmln`Qf)f=wRA)S*PIJ&bhlz7W=4!!~AZj6NHg@Ev& z_goY?gexS}wRr;t@sch>H=?vj1oP8jFBXZ1OQ72B;5P5LW*{^;?B*X5?sW?0we}5&BwM?1sgh_s7G_4s(ni(lOI<5QA({0*9Evs z>**GAd{4wxx9xqvJPo`sw8bz_wBR0Ipo&F>Oa(Gf0hJ)oZB9&zz>&|JUG_WI<2%*L z?I8*5@`B8?U*-gs@nTBc&{Mb=xd}1u=8>^5J3Me%B8zu3_4QcCTI3;xR=lzfuW}>7 z^U$03M2i!0+o%x#BVH#@)|-2=_0|oT!1uXe6_a9AW!#)x&~kGrycERTT$6=(J-`~i z?0gL0_$>RTCJU@sR)KlXz<<0fc(!6a(cgtg&B^p2loo|(x8YIm-H8a>+!(YNyQS?3 zXr$xU>4z3fts8W4TPsaT=p0J28&srA(0TX@rd@e4WdAwR8_lz9mHuEa!rTw=&vO6A zk(BS3;0eYC8SG;)gdnl*!K`6|EQ9?F4)9kz4JYbBeB*aWRLuw?==~Sx94Wr*@RoNA^#AAygBXG2b;NZGZyA<5d7<;#@ zzbABM4II2poEU0F(;7h?op-5$pitn1jEJQy7}bfv?sRA?TKB;Z9u|YWWptiEH~j&MS4wmfg&qRxJ;3iCcIFQ zRVKVhk&8{ZT#?l#>{Voq30Ekx)`Tk+=`-OfMb??{Vnx=QaJ3?rm~f3E8%(%XkxNb3 zr^sa{T&KuJ6Rua}auZ&n$Q35spvaXbyi}2^On8|hSDSF7BG;Jkaz(B+;T4KpXTmEL zx!#0VDRP4euU6zn6JDdpO(winkxeGNPLZ2Uc)cQA^O~HcP&p!k#4{3lu?enIcGDs0fna3iken z3p1FSiR4AP!xXC=#oq*OVF5NffaL;ME&znR4q(6m^a`L?00_ZdYzfaefE5B*ApnFc z6+yU45rh{jf^fAW2-hfr5Nl5$h7erG7W*AnY@dql697VRBMb0-2e3{6>ja=*oqv6H z4xQV}jpmADqr*?x{rxUv9~&DUEarx@5NOep#zqT8Eno73xkC2%@!Z9XV=WB5K%8vkA=8*a8{Ojb>FcHmQ^!f0*>~Lt!8m!r;zdyK8exYzS-1n5eI6jyk9FY}7 zcJ%1TvBBY^Ss6;``t@!?C4OoP>jq_Z4%$^V{_Uy=V~svD0KvD7+cGLiZ``eu#T}K{B=EJmoV7C;8F&c zG1$o9at2p0xRSwD46bHy4FgZ*;94fHV{knK5;M4gG1euxk+GW?Y+`UTgUt+XVQ?!0 zU;E%TCRy2F3u9Xm^dV(~#_j3rJm~kh?mNV9m;n+-*F zoOhZ;_;?j@xktM`!VCGB@Jgmh__zjF_!`to__gdH1Xia>WvbqSuU+X8t&h|~Hzu`` zDUdq24IjD6<6J9oxrR!YxSn@&9elM%xIQ8a?U=|)ra)xRyNUZX9`{;_&9!RE#O9V1 zc{fF(?6n@_`lv0uV`?jz2DOPi*|w0s&LdwdxhWN-Om50cB)605uJ=gSM{%JZQ(Vb3 zC{FxIafkm69{*a2PAM#9qR)5;B@e#{z{Db#vu7GJXp!}y5p1$Pn|ysJin>RuMzR#EEJT%1p9(uzH3VO= zE60}@ORX~V@U_SeHd&vWeSK;j9Mn#vJUECuIZ^^~Dv>sO-0K@3szE$Hl*AVrzJg~p z%Ekw)am&QjsAY%{{ifN8vr1({gjKrLSILbK{;h44s5zQ7N)`gm%2C29-R7%Q>p-Cf zFXe&aR!Y*WZt)n`H#~%QJUo;HSsFe(+)7EvZ=D$VS_Xx*r_Gu-SP>2iRb-p5Nc}@W zq;J+x;4&&23aZFA|d}Z$Pm8o?=a3?3_0pU(GCic|s z_ITGfB!qoDB$NbM8a^c4iN-_~?(tQqmGs;{OPTaekMcr3ro57AP~NGW z;~L!OYfvlUPq2p&I6ckhuj6a|9?|+pEp%g2E13eRgD>MF_j{acB`$ZuQzq_j;FAw{ zMC&82(2a?!WE#Z%O?>1*k8`cWFieH2$6Vu~x70>xDkE@LVx zvcp%T)@FDf_@>(o!B_CfogUHpC@yqkiYu7{#e=WnBM*6;Yb7qNN75y3(M^l7h0ens z?fOV9Yd9)YqogK_P1@0;jfQk*e8hI3u*7aoX(>uWwig{dicUWgDhClne{h zVb8>MsAXUf_onGKDf5*K4Akc_UmrIt__wy9p@wMI&?r&=$9-(CuTQOmgC>(H&lqk) zk^&&DZy)za*EcYPdOR?cMC&zNfOQ*^4u4pMM;3Lp5`COl5;!%Eq+>`=ctq)~TsMk8`cWrRi_F#Fa@h*}bzK-{+C8kI+IrCbW_%5L))ZLn&cMp z2R!n%_HG{gNSEBfzvGh!J)-qdS?I=8Rx$-D2S30^avtYeiOXXd=@QrTYj=tV2dYnr zdkFQI&`PF2XwR=5{)avOwG#aZ&`g);!PoK0VUK8iJzD6-gjO;ILhB~N5s!1N#O3j# zbcySGboRJCg*NJuu8+_{Jtnl0DG=KCXovrp$G=vh^Eg+^LA5cHyQLmNMJT#Xwsoi2v2P*R(M_NYOoQT1M0fZf_4wCHbRGvyndrb#>;y`xV;cdMlX* zy@@*62~dC1qh2eidFVD}Qujopwg<3}dz9;=wh)i0tz;V1CibMZ#r`ReeXRuN0p}(W z+#Yj(gGaeOf(!AO;7X=RaEtvLJ@&N{oQKJqMDT)~3BZrVDG4}2JSMo3X%gIG|Fp-x zR)X{3e#!)=&p+w_BbNGlmo2#bm6n0y zdNB+BMF$Qa%obd(n#;ibEzp1Cz;T1K1)ua5=%w=I4BR^$xSTqdmEP@;xyylfI&gS< zuBa;RKeM$PT&|tVO2;i}7W`)%c(((eR;Bdm4*c5=yvKo0uL6!6(QN7MEm1Q~ zbKoqi5>+tNmu5(srnS^H?rl^l!SYCI9Pqek}gg zsp*ID3V@5bf&4t#dF%VTI!+8F)`&!FU_v4h&qJ9(S-{X2G}vSlPub zp6j<|s+A1?pk*a%E+{FPQ&p9c{RHuN)so?7w5()J8zm(Z`6?y*x#SB~ONPJFvXV8! zl$6YGRVmp|_j6mSQ3HNY%SzTFprm9@PgP3x6WVaqlHotKtYl4dB_(reDl0j zU{?h&<|YFF@0MNcp7Tup&Q=>KM**`E0Hc0C-{i*yH@~7lOh+-?0+WAPa0@EnFyF** z3r+q@f?HStw>Sy6$mG8#xJ4CkOOkMlO@4M8>#?{34ildJZbbGamN@A4-4(DG2=>U* zHrGZ=Eihd0!a06f8!fZIceL3%k}d@A>9;p*Jd3EjpVDJrp%*D!n-n`sjjm2gWJK2| z(M6mCkDZ7`;E9YL`2{V|Fd7Aq4vu6a7)5)6wfMpP%IP-xRnZ=K5-rew0p69fq3CBD z@6l{kSZ05Bu5ls|pX@n(FQl3yNh#~TKK6O4DKoxUMAKQ>HBn`5QBT_=D(4P+4x!2% zscQ)m9z_ZRvS3f-(Cx#3JnZTKymgN2kN*zHS8aTXWgsdrHm*+Z?y6tqH2fH9OP8X{ z^~{1uxET}jWZ~@0g}Bcke%!m|e=W=qy0PIyRfVQwwiidIiKfrO28sEQvoxDVNjJGh zNheT7NhdHyNjF18Nhk0{$*$6poxm3T>IAYV>89Q&>E_EQ=_a`->E@3p=>(oA=>(c6 z=>(Q2=>(D}=>(1_=>>|*GdLEvqqwl-BCtiEWuW1r@evP~vaz7G=IIWIrrs|0x9p=E zP~GTuG2SpX`F|~KEkjuF>4F~qRi9&k*%w#M@ct&-`|N@ZGyI8Gdpio8NkP$cOSJSW z`57}Gdgl)Llj_g0JKI}mbf*SavbtEKxp!^xl8;U>$SLk0%Y!J%;g`R5qH6~=M$7aO zZXD)^tG;Dv+cu$o=fhRsvWqOJ1533+ZdB+0u6JJEv(D%R#*93j+Nx!Q1vY=(C}7w`CE?Tk|&mDSyjx>iiQ`-_l(R zhvt^QsH{jXGgOSd>2nqU5w*C(~KRjZe-d^ohA{4K9my1wdL zx*y`u!t%Ggc4@TgTUOPyDEXE+C)b$$Itl7@N&fCaIjb(Y%}{Vos}#e{L>W>o#aXkF z6M713BGNy_S$xweq2gS=2~AX- zA-5#DkD)Zr>ZD5ZtYRw8V_hTdRGi80Iu%u%%gu%|sY}ZdL0%r%N0nbf zR~0Y3Q_xj4clP8=KAp4Yn-Q^>xh~a0IQ>w|7b-6OF3i`k3L<+)Cw|lvo#^RiHLYu) zS<*5ZP?$~uf?WKM)eSG4tnx}56c@&c^lc{pnW_cb>*Dxr++>+77&?}z3a<85?2XtN z*y?DI)Sk*h;!+JBng7O7Z0c3PQu`?xD&dk? zLR~dEmiM-vGA*Iv;?Pw~BrcbwLJ(M@!&Jc%>Mp(m?Ozg0D7PlZ@*iu%lB0c&$$z=6 z+NYrvSijObr+X= z6&H6M&W1a)$BWTBFG6ck;JPqYHDjL=w>~p3)+gIMt_rC?j#FGc+Z0zOQV-iC7tu}? zY;#y(LLpl4H%W6j8QmnE{2djdlWTXGv}{unzf+-#A_`oaR=&+&QNSh^K<%(-NN}#v zBXH#bh@Mfksq59T1f-TX0`&O+rsv4QD*Q%yC1)=dn~=D4Ygkwb;MnQigs`^~b{$~% z<#XBniF?T08;Pu_?Vy*kTCgx%9K^kGVFG00w@&h{^$05bX-)hNeSH_bE!mV** zqYl)iBaEJB*=+h)l}%dGVTf-d=Sv5~q=O%IG!Gs%YDt%fmEd?~D_-eag7U1QGZsa3 znXOcwY#n6%#}}NljMpYViNoo{>%(V=`wYBR5o%7W`=xpo0uh9#^B1%~ zrMsX1fSraN+jg4j7qnm3>c}?b1?`r)G@{6ue1Al2o8&$zuGVbjJ?;@0L{fkqElEaT zZ3!^Rr}+;!FeM`}T!ZdQlYpB4vIA2x0_#YCN!-l;wF9%#r^T%^0VaFE{2Uq2VWw0v zDy=I4CYd#Vy#rG+0_#qINoLItI4~>1+K=`mz$B{X-{!!qqC4*kO*$$n1*lv<5?ZrJ}%RRbYn-d{zZ^ zRu+gov#7vMbL4Rps7{NkYSAkuX1%hT#fnDavCsCmG*9O4kx+)4> z)j~Z?mkM0f!YX%H7Koj;sK9O`C7QP02EYzKA901aixgF0w+bw(z@Ewiu?ZIy*kg`7 z2z{4!siN_tf#%~VOR zwoW>zaF=;Rq<@KK!?}S#PS}(t6TK%0@hSsa3QmmUbpGmZM3bUK zx2H=fLTO&|tIW{6`a{{e1Z0qJ4>9-~e(K8%ev?6#d4J2;R}qw2vFY_#7LE<-_7na9 zgAu;OA7X9PK=Ai2Gge0p$cWM|o1lTRhIP_D4X74Q^OTO&+z=3Kfo`nuO+-v)E{;y| z@YL+GSO;?E;1~XWI&qdg&zC)UtQ?1GcukZSm)zf{CF2a#E}#M}kbT+RjA3nSbd$U7 zpkqvakpX*Yu#Me8{qlCk?qF~ygS!}PA;@hE?q>2H2KO?!k3m0!`x!jI;6VlfgB=WZ zGI)r=!whyYc!a^D40bcv!{9Lndl@{=V1U6B4Ah19F{Xa}>qJBS_%|3+XNH6u_2{oK zk3BkoVFvmtgM$oc^c7&CZ-T=Nh8c`77-cZV;A?#C4;YLyd4xfpL4g4#6%!m~K&N~1 zViz1^@<|3?XWlm$9B1+=25(^SMg~}#nc!&#Z)Wfo2G1~fmcerjAXA&*B!jmycpHPa zGkBiCI~cr^!Mhl|z~J2s-oxOB7_dw0aHloF-NEET47kV+q;!+D+ha_MJ#iTxa7e_9 ze#IJiP5UZf`WM(1FZ?k++kwH=kvX}kyL~#8UuW-sbEnO^WY)&+rPJHHZ|#0-cSrXn zV(Fx;{&5cC2rB#%0ti{?^Wki>8^&x1Wsrydi>BWVtJoHE5&#_r&OWnJav%;P0A|9( ztxXYGt?~2;xNnC!*=lynB8@qn=46Z6EmJclyOe|tS+`zr}hz0jD ziKpT9x)0wt%6iefgc`uvEo6)`(&k^a2n~*FDTU?b(rGrNXqhaC#?OfCiuj zx=Ajv=O#I=pZkcrr8Wpd4+}U7-h%m~wdBMHn@-%}0iwW{Z zW~lOe5h3HM+$sk1!yp}KF~_eHL@SJkHaJy3Ok=?bwfza!xdgg(4ruB8pBQp#hXCRS^q_XOrLFVmaFoYm6vxLlpC;%~)0aYrgbf1Ix&__r^ z%h$NAkM(0_p`|5x+N52d0G2xw_f#+@MPEsDcy@F?zRk%+ZJWnx-?ePOHf#EYS?#(O zZeUx_Hy9o0Aa+;eVF$+Eq4{nRdch(K9xV*`t$uNdXg=E$O=xziLRX6vfL&~LHT|p1 z>qgR!A@3lZwM7U1dcn@>z?)eUjo9&jOiChHTpcpYa&SE6cGpD?|} z=@THAx6JD5?u1<4(%stK+1(;7D~Y3j>_S`coR`Z%1S=YnE*jPi)B&x-H$I;j(flSE z3rE^%%H=X1Yo=WOORSgHR%eA={-231>947jjlgFso-U<{rDIh?i+n`*M{p-R~6Q)GolKGKpV-5B(b3 zjVtC!+}5$4YDBVTtdX4Vif<)WPw~3ji-xIGgT(y`tHDuX&dcQ>LKEcj-)9S)gL3(| zSuf2UuXDNlzmgDk<$6}h<=;)P1~M={T-1o8kwKlia`}H})mh*3a`}@uKd~_!8!8|; z{?vi&sQw-w8QzF^HjiUM;l`n{k@2ykqhaCbcs768xV7s3e@Mf#N-qDqM2BZb=fh5* z4u4)QuWaTa5a;hnG|S!2UAdf#*Uzz{D^cSb*Y1r%fJ-qkre+@}}9b-;x;7Y>U@OE@$&LRW84c^*Vdx@;itw>3|eZ5*_}$Tu$?dDa;}$dLAdy&ULw*i`Px8=t@Lt z*e|DB%=2=2ba)Q5Xx91i6NwRxmV2$_ayD;MG?Si|c~ zF8>s(eqJs=t^M-Pl7?rMT>i_64u4)QuTd`lB8hgc%jI0WKEsOED3|{YWe^G?{71JC zTksTCq7*nx*bN6fOXc?sA^g5At@z)VlO1vc2Y+ytZs4qMIJkkc{@~yS&iaFc_qTSz z!|xmX!Qn-Ce1j)AMBwoaKHw06yEp3t4({II01i2D_hucy!QGp600(z()&U%@z63lV z5zdx_Ib3ULbLPmS9PX>M$+=46BFcFx6L(AEt@&~=x7dUWl)1!&3l%|q7b$Xq2^TA} z%!Er6xsVR#=t*aYMB~Gy=~g_2+Y!KlQ3&TcT_(R+xBbYI$EX}RTdy8)TO9%1VBk!H zneg}k0A2iwIDU19N!w7M^}=&8V?C6$ry;OzJmjqb24VKjcRE>>^U^|gsCWhLuH$7a z#cY0ba5(WIavN@8xenj>?U=K|acN68T-Ho)QFkM?$hl6_PM_Vksd|U6v4*FhcVL61 z>`LhceDd2w7k}&3I|yC2nRTY3B#Y@|&_LGoSL614K&<} zI>7PwhaO>4%st`_^ko9aEgbG8=f#Dr(2j|$B;XrOWZ{l`T7~-`O^o||;_iI7p%UJ; z8j+9uOcLkdMdBRSgMCE(q`{p<^}yY?s>dJudXUSwM{PjWH9R2Ns-Pa560A0E2fkuE z`%8?aR*7l&T5hzE&D-|)6JHOa<{s69la8%~n<#|+hY_aS`C*^OD}e_z=lrS1*!6t= z6~>eHe&HSW8YS^!!+SrGXYUvC|8-*IYw7+(x@p~?6=C;RMgE(w2s^TS)CyDff03TA zG^77h4vYG~D)MK(BChlEuPQRZ0QgAy1As_#0Jx>r|Lu{kZwv_acnm0sq8mO2h(E`G z!~f46{?>7vgaj^f+#~WohYTzPIGvrkKUp|2vtB-mpS zo}p~m5Qhnx1QhirgA?NH$2Y~~edF$RqzMySPtzPT#(Oo!Hd&?A)lVzWfDrMCoo0l?4tzIqL`t5CNuuE@DQ>TNW%s2+wbPtgCV4gLP7slP zh#pfndYR^;g$w(8^|kLqP)}8b8IGhjYyEfjx}7E1Hw@Vd-gz0f~x+Pr~Oa?06732%ozlo^e3saFj*PWdh4Z&b^P*y%hITjHt*VSDn4-(gH}I!d<>5_(Jz)A#?XT{b1S2xAvE2 zNrp%wHD!laDJ8UI49gX9*yN$ofn^pni6j5AY01kz>hFXEj2|sco-sqZu%-S&`kh36 zsrQ|8rhP}rT^$IYtc*`DE5B9aRCG1p5!E(q&tuc@_6C2a&v<)7S=qjt{*L%|!|&Xb z`%cJS_mwgNP5^aff_mquJfQrl&KAiHQ?MUXBZF)>$TBrz3>sFEcKBOO?lXsfxiH43`y1P7>U znn}p@O743V#hbv-nm`0#)vwUay@Nvz-MmgIm3pF|Qyb|T|3f*mT}KtwoEYournO*flYwnu zksH`R9S4)c4p2ynIJ6E{q6bZ`lCLRmtEMtcZkI3{@T;P^GvBn3dTvP(NlR~b+&5-L;HLT^x-77TR zlfs-JmcKJ07oJiua{ol1yf+0DOeKc_ozb9Ua5LokR4-^ba^I_O>`q~B7P;{1f|2{D z`s9mKKoi9BcXs3&+zh!s)q^cZ?w{!!%HAH2wq}vb3|TO8Kdw*SmjXIBKDo8-spGrip#M!2rb!s8zm%Pp|cri$|_*Cwj|?!AjPv({9(Y;MKu)EjjVcVArB2 z+zUUXN)RwcZ4#-3Y)HazITr6FdkDMx%AqTiEt=a?8_7$okkIYw8C_qJJmfA3Y6(59 z?i%GvIrkcUZ>N{{!K%37XywS{b;=P4XdA{yRgI8-yfWA_K+OvPuMy7T^n&Eq$iIG? z)6b~L=?qqJZ8b$2W1$d9l-2e@IsnK@GR17rk)~@*k$;7LEX+DroYEOJ!%znUu@Rb>iSx`y1DVFEZ2r_^=g(r{Cb zt4v`|hw45RI8|SsrI{D;^BQY*qNs$sP@70vqd;bu+IgnYDM`hZH*152ZFM; zr-YCeuA8tc!&6S<+q4fNwgZ{*1)i2Mv2lIN?zp)hvgL$S>&5UiY?g1$CX5wIxhzQv zh!L&eX~xWuKnqRIESImySuS6JD*EBx((wy?8(Ch@v4@0h)c8p(@fzaT-~+sGu^UzXU^hyXL<6h(t&2bBj_7`XS!wcJ^oI}l=_}I zeMiV__?^6EbCq$mv?oJ}a%T(mLW!jSI;%eRr&u;E40=YMDh!5vr&`y<8CnlgyN?31 zrR|QBJa*v7_(aacxs@NUqOghcby6JUOre_%0BZF8xZ(AgSn%~>?h5Yx); zoU?s%yS^ITKye?4Lvb7Z38(L+_*05Mqxd)l`kM&dNQAOsKHE1@BHRdCGyCQR%Vx1} zE{W}%{e##ymqcGx8|fbZgDk;S^@A$JgzwhcdX={hs??%fTlHkhET5_=Q@aGUV~{O^ zz1}pax~^!WV2`&BhEr`9)T#FSh3$gc;HP`EV^Ck$Kn*(vwTJLbZ{!nTD_qeYLTw!s z&AKY~s5XT?ei}UD346-6Cx+S1zb5uX%>wcA?>b>Vw>ieA z`myEeuZ#7KPt)TJ@zQDEhzBvW)g1WwtZhmV-ZLUJH)XMM2XM1jM}_dTJN3E01zdf# z4sFe7RB0Fa+$EU*t})DKt0hc|`d`#ve{UF|w`+4uBz{zSJcYkk;3mfo)vGL&gC~KS zAo!>42{Yb#BrKv|OATTvgkbIw@Lx`V52|Z3UxspWxEXKPg;Q2ts}H5bE|n@j2PV{? zqnR380-5?(TGc7rGwOY^cCW_7^5kujv?NuEb?&qXQg81Dty@STnEsy5Tct)NdpVFw z)^qe&`HB>#J`PKUK2?kzuEb86+)%5&DPD8@Et%4&8joDUQBW__K|c) z2SaK&Y}mhvVn|WJD^wYvnAp;P_@juge`lkMl4q^cW4ye9klZI9XJ=&zvJ7tpw z&qr>a%?0gL{(hVGSQuF3*?YeSA_m;CHEQ1(hormGxhdTcpf(%UX}?ZZ8#rMo0pg(-wJ#h;8Gt~^~vBnPVx!gZ&8OnHtw>eyVX~XRf9%eId zcQDJ*Ct5Cj->Yx*(q9;EgPL+k%UHHLoTKBc@{r2zwuaer4zC1>Lq_5>b>xi0)hjR$ z*D*a^$RI^UcusDRGS@Ls0mXxKSQSJp4ONw4i^0t~<>1Y;ZEKMJ50`w@d$G*GyN5l) z1pSWQomVFPH{mF!8}blAw-mMt89n=W0%cIK4(#V<9lMIvsm8|UbZUjWT*~7JP1D`QuTkeX2Fd8y$f1e7hsU(tNTYIWpX-dR zy_L$|hpc1ORcTc1C7z4o#2cdDrT9IHcTxO4#R)xJ2+O)yAKJu7)S-DQ+v#AcgSqtT zIK`k%W>mg^< z6&YOmEUX-T%4>}07P^lAfMC8dg;^&}DdM2}Vv*~8Uxe*Q2JBTS&|ob&cBt-6wkew! zt?_y{=ozAYs)t&QXv0s~js*UzQ}~-jI((*Jq-)A^tdy8BJ}reg!8dVBKMO`T z%ag!XnBHmcKEGV@rTjENv@5TEa zWE%?le5oI}PwKv){C%3Vph{Kz&Dc6Hpn}cg$UN-H`0AJ_LXOt>0 zw)d1`M%XHvVNKA<3KF+%d8zWFzE3e9YVOmw!hIGw)m4eH!rbaK;vW0QyW<~+fvgSSCzRA~5DY-gWCy*D~GRV#9H$rvY zJ!K%or)eOs@T;`vCb{_65Gy);HbjP&1?j~^&DbfG!cO4pp#G!zCBb?5yhu5|S z{8BjYcmsN}uRyq5+(EzgffB|D_Nbkqz&c8%Sx4>uX~rL zF~3;Oy!jVvW|-~8<{IihZ60}|%1MQ~*>hcNO*=F`ad2#Om;5^kZj+eRJgfBTUd(Ff zmXfJOo8EMVcg4&0aDlagr-*P00}YzZhM!>>Q$@>oe`S1Z-{@F%q%wYVa{S0d@yK`zu-XO{x=rqKreJSMo(-e_m91m2jf z_6izi2L2k(55ZR5z2Y0OS@5|zv*2@QGYcj*!T&lf$k^-rnqX;s9c)-8|Km^c*ZSoG zu-f|y!~zw4b)a;uemwS|toY`Mdj?ArPo4I{ulj`{n%GzU!llz+DCsr@c@}(9E&ap8Nl` zEPtHF@y^ajt*JtHO$WT+uj%(|-am7AHtn3y{FXDy(!0YE%r$T-^P+kt9^#JGMmBpL zk72b4kUd@o$u1XfcY^5TYol(Z$|1ABTsgAig+#L+ZbBbYxjU_T(|A5*bQv6SfcT@Zk z#d|3Jn8FzT6G3SV&u0TO3YQB&>NSEkj$ziHH!x?`y->5kdF9|4xfo`U7oFpOkS!fl zeV2-{;N!p2KK?4brjfw+Xz=k5sehK1#`;TR_6NFK%bH%h)hi5FNCTu$jAF=M{g4Jo z9{pbBb4AI3Rvw4+`kmZ-!8d`pSH~$@V$D5n)_dAziTyp=brP0XBVYXy zM7k}iw7eUZ>T~Y~Tt3zzq|+~$|0sdkbH&2dFGI@(v*T4lU+^N=;<+bxMYDk>nQ%pNn*~@4 zVCyZ%$*r~KW&EhgXQ!QShN~#NFSuvNjW_JNVf_30XMh#*1;K4w{DZIU-W`H#@4h|` z<|3ktHtMdR>ck1$K^JCmT!!nDwBewZH6p&mHb0by6=eO}VJZw?OZaNiKjq@51z zpQ2r!I~KVE`xih2q}pM+#lHpam^Hu>I1p;g8WzTg%nrYLxbH(y`DBPPj8SC0GF`VImH zTuJ}|Ejh*aq!<+gg^l)>lDPKCPAu`q^0ABKxb}Q|IGCNY;P$Y=TyT32^ZU5Am*Rek z5f!;OR>FSUBHVuk9O@7bQI^S*PrmfVCFCF$6$puz3e03DuFmpOyp}P=aAKUnznLR= z$dUu_cIc2TDCzS|>+@tfyD#W{)w|dBkR&t&LbHNg-A%ao+mqX%4$psUMFnfa?Q!e* zR@iD6&u90<6Iz(6GzG_g;sY>ucm3mB8`NnSLr`dhz02O=g-l-~`r1E%w7}g~mDflM zFdV`ppq_3fLOF7KgdjYh*Xjtbzez(ks#UQwcu%rc_qu#wFgSwu4)!ayILafCRzVe9pA__|C?+o_vE zMhKQLvn;3X;1(S%*8$<}(nr5_{U_q_R z*U>J|9gBQjz5ztQ?K*5-zB$JlkS#fSb$D80cr=)n7Qnjv$uynzcGu-Em8Z~FWyPwh zIR4NBV-x;+eD*Ecmw>~-*E(cfenpPpA#@Iizy#l2mv0?8a`@Tz>Q+ zYk{r;X@R>f+;us^>%XxUYI0r9uBZ`Fj{iYI-E}!jav`GFb@}zV5lvqp=rXEL(OypX zwl&J#b7)uRtjli#T=1W&W$6gBRsDjl5@q%j{j4w?(+am1Ogg9upPRt!O%Ds#UJflU zq;>fZ(r=x=F5gKrApP#TJlzHqvFPAvxq^pv`HSd*1-LHXO}o15a?Lg%B1ovDwGXVK7&B8MjI<*(EIBf8c9AffF#svlD!2avnvE>jb$UQyvg#CkcU%cVwc z?YgbTn?1O=%bPv8wac45xV6iPeTnav`+5jSud?Wf^|X-#Nz%jRCRNx7qm9Q(k$tYW zSK5(?4A|a9CZ{4f5ydw=BT11hhZEgkM@%`N=-0%NhPT&EjO`y-@@ub+ERSRR+K>YK zk&M^1CjB)8)mBZwPtwAZ&;-0sfEOE{#bNM>ha*z>DWDcr%vB*^sG;Sc5b!Cl@uHx0 zq09_A!EglnYa4@pwjjYPQUBWkEA=iGuC>w7a07C?qTjn2dq+oU#!sisV00#jSp*wn z0xM8Uq??%*1m9@qk6TLtWpA$LKqT$l3Gj=w$8+H#_c(Yfh!}9kc9dr0S91dqx9 z@Ib_Kx;__&B_H49sh0ru>zp_*7p9(V*u8Rk-&DGz-V5v~AGQJY>Kh@&>d29a$+3w^ zHW1v?Ftoh$tPy@9=L(-OvPeG^m(=)PeeKu4U&4vm3B9)Nwt`){m5HhwHv%K}eqrNx zKpc`qhs2He!`wiDW~U64cj`ju-fRcKev2w)Zxh%*TigiAi9}3Osl}{>91S03Gj#L^ z#Zyy|8o8Y^<|b~+edi3nTiF;T8#%y^vOC7F(1KFbc4niu-N&^DD9C2$xCliEvUZ+$ zH~n~yzMTa-n3|-J-KnLqOMOO?6qOA&;Pi1YJ zQxJJ~fI1;8qKoJXhju{$11WqSu=Vx1YG4euDT_HLDvFE@I~o{Z|3YKf&lXV3Kr^Xe zfVpidm;nKZ3Wj!kF>MC}Avt!a?metC>R^m`9%+XThT!M14u*F8b<&QYgfWk1v?qsK zv{3f3(7J`>FY`Ba9*Y)Cs%URIGbiEKXa;;Kr^D&jnQ3oU9ZrZQ5n>qE%So7VJu?s9 zH_Kim&BMjauZVTupz%!dr>w|v^V+V+`6t?Ad0~+&a{d)WFlBVu){+0njYjAaS%Loa zXndP0_4F|FIwv8#gt;zFNvIYkDG9lrgid7u&6bhSgKTEC5w{c}6M6;spY#?62&RDL z%0bx8NI3WHE<@5ybdm~bCGf@*CUPXhlv5AUVO45XNHrM=Z2~(X6`~Abjya>gT zei&|Wkt(r2&1}gCwbsg1GBEn>I;tq+#)41HxsDB~Qn4Y=&HZ00J=%AwZZ(hU6N(3^ zIFYI-9;EzhR8cU<5Fv7f!$azdffoCtCQ1}K0UiM zdX^tt1n(Q` zThZm{)WJ0I)43{f!|Agdj#aVBK{%Qzdw6eDQ87vF(BAvU4q5F(fN#6!IM(aP{_#Vj zp00d<`cU*S2=n6qMW@H=dTj9CXbVKAB#V!!)x{bj`GGA?cd0q4QR$Q8Vb8=}33t^Hm9^9L;)4sZ|Nd z802wjTc;c_<0L&L)}0E0sB8tQDB__!@s)^eU!34)B3^*@>3zc%#WswODp5_yie}>V zf|OctWQXNMOmWc#6rZLbaaqo3fJU^-UGWC6NNY+;r{zTjIHL{5+i$8e1#fo~_81XD z`fbJjR86=GejVJWsL0M~E@A_ZNphqVk6PU)0mg(%p3s8ik+>ock?>~SHL3;4k|a@G z>@qndf2{M92+cM3s=Jib?$|5iP^2_qI7vQdF)mI%7wT~qJ1dw6;IdMzE!mr^cWJfxxu zO2m9$S1|!O zl1~m(I)#W@qVjb6l3>@ds+8mD28$XUBM%^0UZ!lod4S9Jcdyh;VAxm|ePP z?P&19_EI?`5h$<-!8M|XUj#DXN7JkTy}D*pJX4iCAylquH0XtrQW`aR3I?jIp~gq$ z@ko}h(CmsX1*Vves`lc8zL9<>SyQjT!4aaMf6}SrDhXcoEevo%VSouD=6tF+4;+`5 zmMTA9_rc&I8g4nz9}}6T8L>R4UrXpN;jfy8#0hbcs}G20syLGnw0ry>gEb&-`2sxc z>H#0;mXlv=9)LKpTtfhk{jTyXX^ixjp04lIdv8cB=Yaqt9rmy@&kfn!a6vV2=yFnqnc7MiH*vzb^hX&?V89`czQ_&Y%f;uukW+J|bovb(p8*k+R34Ib z#$|CTta}B^iy=m{0+Hf{;?((^Ze3%_$;A$1@@r&FNhiOj2>S>#;^H-PfdGaNC($z58l@yM~lUr zkZC8R+ED7_%(NpJF6L~q2O0SpTz_Y{LK3*YD@k~2{2FDkRF<`rN4kF)%EMvIgO8km zhrXcJV00hX9;bjy*gq`cOgW$6&+r?Mr+f$2WeyRvAC`X1ME3En2UIxgnHgY6iB_yj z@Q*+b8w5sh3C?FdtIclq1l`oMbMPmHiRe=lpQZpY(O*$~mf~|1pQrdFFJ+}YpY2o{ z^QH4Z)Rn3PJJp(jyz6j2r`;Bd#7&2m7m^O}zte9l2xia$G6LL*ux85! zT!~Zlda`!GTiT_Bdn?U|ITye{dq8WjyZOw7iBjvVy%9>Xx6uQ^cVI!qO@Alt^4zh= zonPMtA_kx&xg@TZU%C}N46+&d12@!abR%6WWHv=T_3j*N02mI08nXtVsHcSqHjPft zXZ9YNPJ3bQ7AD}0TyW#N`OKP#oBm#KnD|6>L_V_*$gD<@OVmUuig%Q>k*4%c)fEq(gyr};|829@HE1tX~d8a z6yO?PN!UX}58!QgqDvd|m9#O*B+OrPj9H!N+y&g;W+%F|G5?S@1{zNeHwH(M&@)MA zco@K+iglu!L`uV}G~uHEm;w!XB!_{<29fgY935q#XXC*w(fzGfRT_SrN)!0Mn!=wT zoWC<6ooS<3q(>0L*HSt@;(LU9KtwwZI7R+ByU=E&(7IhMhC4UY5+f`|> zJhVYiCbPsDiy@%7U(89R=6)IZ@p>u8(FsqqHQOVtTS!{VZ_s(OXf4xD{2_H>U{DBT z^1BMDn$#YjA^1T`=Z}2jsue8)5CVqP!vR1<6GZ zBvcK@oe*0y_>H@tk1<=8>M&dqHy!s#FpS6Uq_#*;_a)Um$*@bq1N7+&!&STsw{5Pb z^H%J$n4QOdi=JSaxgrY_9nb%#&wV9_7dcsU$iz+Rb#_rWO zR?{8Hgr2zRZIYp(p5A(w9$-rD2Fy$XWUW&A@X=#M2-W%_ENVG@tN3o~xILMffKM+1l% zM*KBqpuo`N%zN(6oLAqSO?&lOKAawHA>hcvk1@K%l85NG6d5T8soh8MIEC+X$OWJb zDu8{|aEFWj5!vutsueOgNBB_2D~Y=heBg$+tOptv(vd`wSX{lCnkb_bp)3rZe@CeO z9Z)|`@nwp?r}zrRKTsgKcvUu^z1&zteG;kbay2LpklA0)%S|3`OIGv^_RGVqe{e`1 zZcE&ypwj;!f$ciF9&SQHu^w(ugOnL_rLwz&YOjvlbW|@Z2Bz`Eju@&;>|Q8=3Edzw8unZ+Ut6_2@|dMaJz*bSb!dGyJ(l^ zjz#X_b`OXcfRZ}P!|lEtYk(ziAk>&OFhADUU`U*iRy)ypTcm-%1P#wi_8xk0?6!R* z+z1+;ut_UZ<9I6OOKo%RW7T#)O$UmuhucYKARPmT9p&M6C`a%RItN5xg712`)%S3l z0BM1{EnE*bgx4!+=thx46CQ3~)cqs6)&C%&?K-NDs~8I&ZfD!WO{L6%av0P9-rpK~ zxP@e+H5o`l;!$NF4T(pUv?U}Sl|$N6yP3%YEksC^hg*n{r~rt1t?pDgWPdirObk~P zCs7V;ikuiuDL)O+~m(@Zr-QHj!nQ+PETJb+!QR)t$jOxk|_k%XwZE<^@|VvmbQ5<@#haPY*!@mTBy!<}Rjf z0DGH`Q&{UmGHrEHdpO^=IqYV$5>~*6o+)JAj9%{q;OeV$UF+BAjD5;$40sE5#%00$ zXbLmFHgvGJPzcXvYU@hI!+<3LXfC+QdI z_5Y22Sb+MtU!+}(B?Vhs)PdICnhmsh=m!Pmt?&)FoQWbA`92{f;uvB9_Y$ z>(7W7I}s-R`Rr}P0_P<(9+Y#jrQ_Q-2THdd!6AGD((OG_k-nQI#+PBq06m8X6ej;nzocxpA z0qTU}be8EZ!h{??3+RGiuKG)Zo=rGKky5&gFoFMbjp09AtqOXC#+zz^yKS3lk+Geb zA|9HbPus%enH)P*_qK$v`Ce)fSuVEa#<>Z|9l#?&<%td@Vr&;Nz9$8DnwTPP%NLTi zgbj%1+l>C?aH|%Ix{lT@Bz4^v={y!~ogFsqA87_Ws#GU)0a5wmyaDXlOY;UK+xFPy zs_9g_0>~RM6z2_KgbhcmVxx=$nu+kfS@zNm>Xw9W6}Oe{YhVFU>6G*Jx}9y`NcTaZ#rRuV|FePuOyd!4KIM4Go(A>Om#?hgE}S(b4<5wj`YJsf6MMpjK2b_@T>ICE(A zTe)dJg@=oGl>uWAg7;DLuCl|b6~f;}^r4kmC!b^@->INydRsFP?Hqu#h3;^$r z5by=6pK#kEh*e4}PXTs7pJP-v!w&TaR$mlNrBU6|23*>>0nI3&0K9DqD0%}0RN9!! z(#G`ar{vh7`s^AgcL9&ILjxta6Ae__n9I|~K;y~b#>_7_$E_)#5Oi`FP!X=& z-=lVw%3MNbpX&aWF9$F)JvD_nK`eh~LN2pFu|A{@UKD4&*3@-H3UY#C{?3kO1J131 zuzjktR-@U_$9b;>{go;7%_5u`w_t>`5Dqa4Eg)Q#!aO&O8}ba}J}m;{fFD8>8vLtM z@S8{Wh%21k(MH6eenR)tAOLPwdB27w=!Cg1q!#)HnEZ$?Z%1nt>z=V(<+fB)^>W6nv5^(|m&m)WkRnV&Wr56hsVs8!)<0 z9J!8trXdS76NpuY1W05ZH9cUWMr`{VBf22p3lCZ$`n@E;=qQ1U&d_rjdjBzhG?&f z^qXa{pTbE0s_s)^3&qm>Q(m25%L!ZT6F!)3v9Ge03Cu9|A2w@X_A z@G>`hhF(&4@=9M*i01HKf$>1rn_YvKI(F;mk&&v3;}1PBHsQZb7`!Gy;*hY3YBCqP z5snD2FjxGjD#4H>j|+)eGvro|ZwsLtUsihPd<$weLgA{Rir*j(wx_Rj$FY}{`X-l@ z4i1$nKU69!C3#tA65?bDMreGbT+$tPA`6B$I1p*&%~MYC9n;&E5)m@K&2gJ6ADVI+ z-`1Z7j%0`r2l34U-$tEp-#fZtd}4C!fw9Vly_1ub@%tZ~9NVxZFvW|Xxk1~;QZ@M& zOLXIU%6?oy4-eY(0&_mgFSirOc=D@Eoi{%@RqhvK^w|4xy)l+R;}BkSpP zAm+s`b&)=Z^o(!#BH-GLM2!gt4H6IwgMR>IXvg%9XF#xyOGW#?Jy^zTpORLj#cQ3 zH>a)8AZ*Hx2p=kz=%YW^C*P6+3hgF`(LJLb(cos}?o+*>)#M(%Uoii2W0;#EbaVXI z0BZ(6qqGoV_b&whTT}QG^5(B`J}ZeE(cIn!5|2J0;D03rUMEdaHdIwCW!tp~c5ZRt zZ7IYFdGpuko{_{2cSa&U)q|}jaRU!WH}G#yfDa}w3!pH|sewh(JgrzQ2H%SHHv~6b0eyios)f2GEPB7k;N|?W;{RL#Wl9T7t1;ABm6GfIG}GU z+)8a`Rz7%_xFk`Y%1MR?*&%a*4>E{ zb_q1|>X*;-YIh2qJ8a|A$PwvYCnkydd{34|3f7m)B4u_pvl*f=CUvhs`vBdO?Ymhi zJu2o1{tI=lZKq6LyYyJlr%L4wIeYGRXI<+b*fn)&>0!;yV z!a@NO$gvGmc0ZEsaQyrF7kK%L!Jl^>+(&IM#r+f`Dspq2I-?YgQS75Yn_38s@7ISu z0}|_)A&>|;JoulYNbbDXv;ow`djQnsv6873nJT?uEZyAnW!||<>w)`AeUOFLY`b$63 zUpjCm_e06qcUJg2iWw|=m2*!0@7$F8&RKr#dwZvC*?Awowf5!{w?4QwF`5 z4Wa0_`1Q9beuv^PzrItoLTGJ7U;7fY=C75L{8?vfCOJcAt9|7Rv8wEBO?i0B69AI+ zS~Dh$9wg^piav@Z6lgoFK8RR@do3F+<%fO>MAmt_Rf^@){|&;f6pXRqVyKbfVpac& zUKuS0WVGnB(W2Kx2h#ly;@hsHn`ui(C^pm9f6#}bH8EIs2i0C3m_<$@`nDGiH|rhk z%Cz;Lv?~aMbBbisrWnS(X9--PYpx3ZcE&L7OJELz8DuBN4%HV-rY&fBA^AKnq2E}e zC7L7Vwe=i5ss2dZJxExv=Fx1s4o~Go;cTT>yt-#4&4@V{q6D-Dw5GjoWM5&T)scNy z(E~vRd9FpIRpnfs1G>h2F$YIr*jZ36wo}!#o_2ZeSmcht7k~&BT+93JURUanOj}!MI_-tIPuHlR z@(2s&T8#>;Te-XRl#U103Tx$5wc>@c7RXw#YIT~ZRuX%QRVx?AsujLH9PC1wegz{; zl<8MI%%lI+bqE zTowbky$hv46zB&X$3!|F{Nr zRjC+Q zDh<~8b_i&*|DlD94)#C17xXvuKTLb!d+LQCsUXA-mMTxP{z1NHLEmC|SaOV%3(_no zB1Jw3hPTu3(1GNk;L{Nv(yw*|t$8TK1=K^okvwEcTDE;j#(-jz{{cnD7_>e^@2Cxh z!CkWV2Y|nmyk@{pr>}7!jVZ6;ebXXp3<&|jYiC0&WaJ`fAoe&R)rM*^7lvP%i)&K! zF=>Q*bJ9Br`pn@RW5dF_-SyNtabykN6l8Y`^%wBNrzt)|;b(1#caiDqir43lKu}t# zO705rHHfj%&D)m?^bs*16f!5^RC*{I;0sE-KS{g1xLV}e{T~JqFokK_{a#)39Qp-S zrf~13-AH*VW#5PDhTaG+eSP`R8$6Zr@8ZIIu;npk&iRNQDNjx=GTr4Pdf1AZXl%MioI0ta1;!Qg&~H8-6KU%vN?$@Xdm_+$L>%R zj6O%f*cnH`und@sf^kPa3I_6Isq)y=*ZN}i+9i7JW9e((mc3SzW)@uIvHWW-KUO}K zz!}8Jpy161*RW$~<$K9(KGk<=OBP%MtC_JaGE`e<7eJsDdPz#oWkh zX>LYFYD!RV&`F@3%o+G5PhZZEt%*g%Ltwo?V!_2WEG6vai<5r{;a~ff!@Q+WO9FoA zFJ0TSXQ(u>HTSV)_QZXCa+}sA;m);M)2t1*Yl(yU4Tytz)Kl*T_w2awhFv$@V@Hfms6Zcv4R4YVVzdAlGC#&I>HLaVt*Tmxl$MkR=6iEPpoi> zF(dgaZ`r}vD!1HLxfRhj^%%(YKX}G=9o3C`t48OnNd`plk1lP8NiL)pP;d{KaakPP$t$Vv8$UGWJE#P2Uw{gub`>+aJK{uX~*Pj5pF3#0i z*s#-q8L1U9Eu^Ui(gwUYZGbpfQ)V_sM8RlBTmtP_&i^z881yB_4%NLMPm?`dLAdS=2#Vg;J!kplmzq8}pkZ1VzX%YAa zeDEE!`ae&>Zx-e7`GQf7sF|Rgndto~&~wANLCbB?4q3}*3UwqXA8W%iM4BHt&QWvb~O`qzN zEpG#wQoAnc1f8$3!?hagv4?y z$x~cPTW#(*wxpa`n9JpmXF(QbiOHEMcu6^zXTgh{%B5yxVp=}U+j!#-4ItcBb|$y^ zG`?*h&9gwLUgcRp0Nt!=15mg71$WZ;Ecn!vkzCF}2%ya>bwAtu=?Ij8gu zYays}Db`V}r#O$|2^3GHcoN0=6dNcy!mf)39SqR0IY6mQp2y;fH>ez5I6!RZ@RiWMn?`t<*m#3Agsl$}?PEG>=om&Uv< zS6!=mE)JCBia=!$MGqA@I4^NUYxAY9DCa`IE80gn;EHx!hOL(}+J>!{+Ho1SUTVi> z*m^lNjXZBwz6Nc&R7H8?b-(UZDU+@eJt%8KxU$-r9o8nN%HW_x5BUrZdg>e}dRSi? zUF)}<#|~3on@b}vMlVC>X6L1winR?yuhb`R09<;+p&P^pGhYSFz>+7BRHhpm z4KB9_O7p4CT24l<(l@rJFsrkhvgShm1ta%O`s9uj&;+sk^}IhLat&^VT%YQ}mLvDg zg89bAFrO_DSY7LXi?ot4gT9?^7!HBgfVExuGorWX<3EtXkDQep#yLw&3D%(rqU>#> zHP&+@nnwarzm^(HS&o(?|Ca^)O)2o8G&u}B9yRv6OAkBkZ`IN}iiS zJU0?I+!=}cRPAXsmP34k1o$6p416=R4$$qQbvMMhVwljMmo_9Jc>bDhW+i%~p4;0% z^a@g&rHi2V9rh@Zc1NP_`@C{yeJh~A-3K0gI?Zgg*OGrISwTG(p3kA77!-;%-% zQj=pQ}N|Ym%+sAvkSPE+H7-i$kc?|JTHZeJ391s@JDOVW*Ki&sP_u} zus{=<+?BPwUP6-}RLk=eQ{CoM5rhdSK9yxDf9fSW`iy)X&{a8k)nP<)S%#)iYt?20Z0#*s4K_Tq!SIS4-B3&AL8!ayO#VZt~n0_V>&yvs8(x^T?;wUUk zj7D0D+K8iZpwUH_!{rqqm#`qHU301A6XDvU_9-da#L+cSXGkS)f-~E;61p~h3tTIg zR8mHTVwg2MIbTgvnW8MgFes+5rmu-L1=`4VZ0?U|v4cwAvY*6c3L|jBNnA8hN%+(K z@GWLDhhiZDi}I<+tYNABgwm$SeO12>;dLl)Sni&9S!{dFzVlIkr^g#NKkD=>&3;EJ zE6u3&zjMyC?>xQCtQ^w7JiV;FFip2d3yFr`nVxRu4gQXHM+1jr-b9_1xYDqOY~pUiGoGAqnyG*9~bf0Deg~6gvRCZuoc#=-lYu z;AV91Q?;d|NFgD@8#}6Ocs`^-E z@8sC%bO(8CwO<5(sIeBEt@cY<%iCzHwLrm?&{kXRLtro+BToaCUZM7%2b@N^k)xSbiU0dze>i)fzv8@kGJXqN~I(}s9;UlAy zM~-OTKBaxJD^?4+i^EpChNh+x46E9%t+s2cl|W^^WF}uhBB$S0dsSUqtu46o(^h*f z!{s@`#k{iBUaTd2bP2^%C@!VwFk3Av>EC8fJ4IXVWqQvD{lmYmt=8LP*LgSYBH34a zb-Qe}&xSR$)>aE~x7AkrJlfbfw%XeO7e>%Q`)Xg1!rZLR*+#fxbfuilU+5_n* z>e^~W@spsVYpbnXe)J*RY99h0U0W^T7lO}K*H(LK=0t3@4?|R4TWxD>wf_&uT^zRB z*VEKgf?-wLwbgcQwOYSgFPX_#t+CZ!UDsA?3-0{1)&2s*6%G30ZYrQS@U)Ggi9WaH?FlI#pC;Ww{Mek#&#aA(tHwS&@)+ zQIU`p16h@P3}hKd&dcjh&wM;6SNT6Wlmtv6VoVMnA3z zZXBW?J5o{-7>S9+aV6>oHwK8HP(=%s+uQHuoww_=KfjPIY!?fH_d$%UHN8TsZ8g1q zls0yb>GjiqtFGxJ)9Ytan4480x5@PSS1F*dg5-7#n~;gL32p}bA@^C|GnQg)jxPhUs@O{ki`#`27m-A>EX7gLy*RUWx-Bc6I6A2u+!EA zRh=H}v^7Ch^TAHfbg)x}jZNdsu1JENDqgD4yKuD;*U`41|{^()Fvp<`G)wgN2n2&uW=yL z4l}N>SbQ(@1gMLXOEwHz8V4&Kbg(^u@(x(^JHJ8u}LG$O;$s5%}ZOHrhEXHG4#Vg2g z&mf_OdrT>ygb4+6QuPuQlMYx|Sr=D?3h};ItEKuOJIOVjH$+fgTTLW4(!wBE&nc3q z)w2S=Ku2&kP~8l;`s!RwBnHsNMk^sBTSxhgg%tz+;is$dr7Ma3g+>U^NAAad@jfIh+CtnI?w;ol%Q# za5KvGsb0`>%KjC>d|P9f^~=itmTjBgN9muby6TEq@3K$ud{p}g;hFblWi%w zog(5_9W5WhJN1pbXaL-!w7Aly-9x+SmQuN%k{y9AObzNbWkRby$)`!$w8}=(t?Wdz zp6k|BvIqjLxm!Eap}m+kT8E56eNn5^_ob*P50zI-iXWlxIc6NA+z{A%Hbx5@o=#bo z8=e3IiE)^>|Gp}db$R=IS9y;jaxr&yuwMEwUBlGXNH5*hMbEB_{vnV!WF$^Y@HOMe z%mm&sJzdCjNtUkvk{gfE@09VlUJw_fPM)sjZH+oPujXBDHLn!eshao9M9u4cPX|`a zqUMEv4F_AZr$)}VtW;5};Uzg$)0wQC`FDUPF6UK`^HO>xuK$3M85@{?V7>CC1)tp-_-x4ukBgUJ2)VHZO`Cx>1%tUCu{fvt^d*6 z+jVsHwev_{>x96?JGK8j1l3A?EtJ<*eeIiQVdv;;-v+o0(>X>HYp1^U?Tum9FSBa3 z0KHv7fSil)9Vz^XD9N!y^$yV2{%Q)m(Abo|7FJWRzILNN`B)05PUfJmHMl$yBov?O ztmWFC3-!%kYYek~nU%85)Z3-6C8OrAr|>6K&EMuI+lc1&cIa!#1o|5Z@IhZ|879%y z^4HCHM3u?9YkJJ9GN3Lg?dvU_!vY^cT*g*wcCpDickYSavh(W7W*zeRa+l?Ea_cTB ztqrd^(WmF-nxWgYkZ@%#bpX2sY#eXY4$JN32iri}x%anb8*-#33RQ`grz``^6O*Zv)}vPq3BNMFk$GGbL0v9yU; zBFtxfE$fxnfufVp*Iuq?N5Cxqy82qFTB^Qx+q0OXX$I7k{g~->lFP^S6|yseeGp*O;=xgYWmt|g2WEe*FGmV9+CZ~jK{9N zwpo3xr^eHxuXP5Xd8x164y_cZudSCbcpm%LBKm*lB=xmV)3YNQe*V|h*LL-_qZ_=xYxEF2l5g^tA^Y!`zHktL^&QLn-`-D9N!y^$yV29!`Ou z=la@-6j0PB$zd$d7!SdELvC+_zV=9Cn46(&i*4=H*B(vbPpF!|%~7`T&h72e*B(oN z5BgfmFrB8a_4L=l`q~P8(8>DRhiCxYW0C1=f0%YJaDDB|Xk%Aji@x?pQ&e=8zV;_` z!?UZeEk*QYZo6|k^|h~}Yr6W{Q`6V}97yaSeeD}^;}O}ftFJvUSQ?#kJcQGzuXWyx zd8w~`Bec@h*N#>8PL7Rk9XWFN=#d8}Myn4V9dnd>ME^TM(MkB%UZZD6H2nOJd2zdr z>c>=!h4{6r9KTkjLbEDBZ1`WnYZXoQ)PmQJ_OpX5m^B^SI_jtu7 z$GU-OwM#MtrcEQ))&!>2fYlAn1*YxOm%p~&u})=n?N&(R)Fc0G$bM(5vx2&?BhODFY}+(3k`rmTSptTyRs zNK?{@o;2!4*;_4-Z!)d+ly2O;&Ra42vS1yG%5kE%R6e$;bVt1x=xy4oX_994>)=3| z{|NpPPP|;(ZiI^x)d(j6K4qTPD^`1kd13u7^QF@>Yz-A8RN#r+f`Dsmb(y`dCo zkyVQJQ4ngdkR2`i^`U*2CqXS8 zTIax-vjnyD*b6fWl)7bwic^YbIWJW=|3=T9qo&o{8cpy@emkY$mGjt<4oUa-K)^+6 zWC&EbYD%EWI6!5;161}qK;@TpE1bgrAdKxgs(++Hyn=*6E3J@GDK4e`_ELlBen^$GWm-M0jp08Xq09g`2&&E@UoTs?VJZxIjBcKRAO6 z1qWS#PkD_2Z(&55e!;vhfjOeLM?iLRcsxC#3-v@l2|ku$+8hhcB-+6mfc4oJUS3{b zUMHz@eF&4`AY=h zgMFpS9u~ydsH`9(l;FdXipsmcl8&=0nkqB{g)0ge=m>dv)*b-PgzBwR#}hs4LNvC|K7=Og=$A zr;DOG$d4Gfg>ao0=|dN0By_IQO03dO;?<=|DkV4od^@3NE64Qs%!*X<&uHz+g}q^bYnbqXjunK$8E_RoiuR6$WGzKD|yG=Dfu_ZS2Z8 zYHJb+J890%A{u6ko7J$OwQJPMBy9$Gs9zHUv}B-+8+gI~|aaA#@{ zewPM_a=O}s=yo9?Bt&FsWYwRG7HbdQm66bb(jNRV?doa|N~QOMhyf3=A(_X{D5p-A zk`BXK9O-Vf#vO@kZL_)GqmO?WxMS7;FdU0Sd+?Vuo%VLM2PdsP_&7N1DDA;#a|GYj z9?VO3gU{1Ni$!~YaQu22yits(3GD&9hNL}sQFO~K;}Zu{)d32y?K-NTR52EID%6CI z&~qVrgf=VmIeJ8sn-!LLdx7>SXtRRe9CjxtR)k)dMvqWEA3Y+Rp7!VwVfzAY!&v>^U3(DvMiceE~Mn%rgICIm!onH&{)!wPEEm_`OSN7^2;xvI?}3~TeGz=_grA~{F&jP(@8b+K*>s-%%y&-kM7?g3&AuU{DWhRyXnus)R;Ps;NER@Y3B~qn4`Zq9gX%u!Q&r#PEyM$e{RIGc>s53~4aJf7)x) zyCUqh=^JFPO>g2lbHl{Q{v(wst~1c|b{$>)*J;;(MHQ%UX62V}Y10OsZH%ARD!;Zd zs@kdix`{TPgz}4iDOULvp@6;xaP@btS}%huT*q^zPkD`baSKiKpVBvOO<_iZl^mWY z=a;%`I0Y0;C5Hi>x9YCj5||@~Qpc&gJpMaa-Sq;{-!63*Y-_{#XsYe?5fcGuCl9g*~HjMyJtNJ4qK({@;u(l6rL%&d@U2;3srXgNJcPSWtVzoUix;u zoK89o$}U!!5zDsDPihCID)I#N0O;Gw-;ZI{sDMQ&@oz2>{N>(x+E zYqee1iCEjJ?Rp(;>>O>^{{UQ3XD4a9ej$Z9(RA^59%{SZkOGQWksR||+x3eH%n?JW zM?`peFN!#`3;Bc{MyFQW;Ojp~bd3s)IyFNxIEk11*3y7bj z>2uI_vEM=3uIEjRjvfDjiOI@CQxsls#_c+)A5kH#GKrXKwl`{Tqtaq4U6P``Yloxm zsma0=b~`EyQ%J|8EKDIClQuSn9gf=A7_wfgOgA=$D=klpjUJtY1+Evjxc+5(8pA$D zH9FgI#hsJbvW`~95AUr!G(I-DOlQ@xks}kMKF?*!eYWn7o};1=uf~f7$ConxG*}+d z1viZ*qQ9$NqC$?^@}sS+iz{-dRfAux7UIF3p42osx}K)A(--S;T6PlJDf*yTUo87e zvJo*Qk*@-~P(N2ym0=hBv167`c}|!x`SJEw7M^yY~h=9-YUN4WLfL9OPr05I(kO%o#se2zbZ_JvM;vr)~2GF&BLmIyIrch_&OZWGaE=R;w~C=A#U zBXo0ah{m_)uMCB0A3-7FshT}q5d$FP>H0La&rq1lKoMcf3S?8G3@e0g8AJb0hle#C zJHD@3z3{^A8zQcjYM(Ssa!GnRN`)7h7(%d)_me+>D+@#sAola zac2}COi|HU!MM)^Zm3fHsZV8cm`Fv74d%uWr$pPy;TC2~s*vBW?!?v;$2AdOnRdUv zvD0$@u#Lni1(xY)m_fIM)kOJtC@?5UA;t$sXqdFhTYPw!b$ zn$Y8>m+Xaa^b33a!Z-SbOQye2f`bx|@xmpgV=vbWElKQInr;Y%B!|N|^1$(&>Y*m$ zEJg5$=pe;?6pvGQD)N0aIt;aa)E-bVNgt-ec~A5$t}!v@#`B&+hT@1m`b2t3hp1|m zM9HP1tF!bstIsj9_zldx+bMQX+(>bS-X0`s1c4w{KZ6?Pf(V8X?WbU)Ye>PTN-^As z)xj5$OfJ$F!`{Uey;JrsE?d#FqJMC}iN%+YSlr4PDY$MKuTIuiZGBMh8mp_?l3q(0 zzdKT|B}A2y!$WwpE@;G!h6r5eCK0&Gj=&XmRr}W@0v8WkW}O^R?U1lHJ~?(cq{^}Y z_k+5l5V}XJ^;GDd(OGhsErx=d*Q>v$sMmU~+R;|M*3+`K%_(oVJD^wv3w#d+OF|CM z0(AOfj#0J@x+$zsQi}O@2LHCk@Sm+#g_RAmjAIdmB+X!aIE(|D;iQq_)NNr%6frE^ zmc65+XHHgSPuoST5$A3PE!zt>VLtRbkp#wV&{m8@T` zPudbxUJTKNtMa0C3rXemY&x&=RbDsK40x25l#Fae5umr$lXd2?T=|Vk|M3}xcF11X(RvL=q_n2yXv`&2i>(qMB zSSeIywW&p{)fuu|%ldJmr(9P%WTjBqi(G%_>H1E+_f9i{U_DL-_I3BfW{1b}KLZ=K@My`kIuqql>Qp#i+pqq0lrJJ!FNYQgb zDn$)I)uri)$FVF7OG!GftLSPcVm8IQhXY;1*mAkwQTW(894i8zr=={)faq54+fU&U z^BUE1EN&k^pad#5FjXb%XB0bF4CkoUd7e-a-AwU(id!h=v04WS_%a%Ok#HTsPXFKTk%BFx1LJ)eR>PXsLt z^n(uPW%RH`uhXu>ff(AV!#R<)ZB9BIcYv4R$T5XX$8XZ-ejIS=_c`iq+C@HZ7R*1< z80KblHGtkOT}|{Bef%d=_?ZinV~6Uq>t~F29%+MqCi-Oo|H=gTppY^1Whf_yoAGvC zs3-A?BiRnrDLzQFZXqdTUPb41zCz}8G=sUTB?_60wV6{ip^$-NAKhEoTbwfH=fD86 z&M7Nn+{QcgqP8ky{yXilM6k$}F>eA9tXVot8S^W^9S_Ncp^QPp%b-pbF?b2biWsgZ zN*D&vY)TjpvYC}I+){|tcdwf9c6v)kR|#_pp`CsYBzBMz=H0m=iUe7V62^25L#t83 zL?ms`X7{+eUbX9IjzeiYH8`vc8(uD zdT4Ct*hmAHns4j2vEWj3PU&vh!c4bGkvOUY%BcWYCpBou0Lm&M$huaHEet1wQ9m@ zSJswB&vxb9(&!pj*2!yXi=2~6sRt@EqgO`PsuC+^RIw#q8XAm*!5!z4#2sg9#ytI+ znCbiUhUh4TR#>q!xd)klKorgWTG*_DTIWOAxFLQ#S@1@5`E zTlHpNOyTbpxXEFhGaJq5*JMQVNW1iAf1LuKP&I!Id;`jc#}~{tKB`atO$um2)%-P} zGg7v}%_!Tada&he<74{9mlBwRQj(cs;d+VDx`m{a{98J27NsPzjY;q-RRW`uNQ9fw zR@;u{XHmvX9Db51xPF>7)*%}Xj!zH@KUjAmQ#&_Dun=c@tm`O|hFmXXjlNT`d;_%5 z?kV{l;T1*%u2Z44l`T_PcdgHvY~rzh8cr^a{L^GV$3N}L*guW$#{OxRGBft+(<}6I zuplB11(r$=*6dM?ZWE~%f-oG^{t1N9!|by;zpj(oCHgT>J8b z;tFYV6`1ub@Q7%ji#ays+R|6vME~)FIDHa`TUrN+NW@fr0v07gOma z{HohpE3 zFLUiyiv1L?qOj(gC&Fr&)&{iiqxJyBVHJgRz=%F{0UdC*Dq$-qUVZ@I)V7cBysKQc~Nw9CfxrsYz`eP8AMKr&4nVrMA@EdTPkJX=%EbS??@Ow-nrhN?=H} zt%_9JQfjM`Q)){$&01$}4qpn~p?PwwlHd2q7V1KX92&h6B8RDz%Fz7jQYybtuWYMQ z`D)q~#I-pom5qErcqU-e4|5}<$Y9frfnyd$LWZ4aTEPCS1oogU zf&><>wusg(ByG_)IuB8~v%_dTmu4^y#LkfZInou{r8RJVNXytz^Hki zo(Bpo^fS++$xI`xF-Blr9N=Jw4ll$)qt&FDHxRmImSgVHlkM6-cnj?@=U(JKYdb;2 zfO?|CeAey+?syDBmmG{kljCkx;xTyowIrhOogS6LRitw!rJ2+Z(0ihO2nr^TfT`+) z_G+F`BHRr=2@H`}B3M2Et6+HOUJ%znSpbhT+Q6UbhAYEC;DdPE zu;PCpH*k6d0-&XQ{P{YmU8tYbdOvP1FP14ne?puzX-o3Ko4(9xwm{#-eZbS!75RA9 zp_vC6EA*z<;XBoih{nw4l{T{FR+_p|O^f};dXkm)s{Bs9eg|O6R@dWu?>{ug61#pN zrg!OmV`3;Zt844Y9u0w*LM-u`0OP%0>ncWQh%2rD<9bBvWR>aaI$TLt*WvWEudcnc znB&@LiAHwL@y4e@M9USxEUzET4Qalqp0$U?DTY*f5fqHFDYOgT5Z%J5j+VCzuJBUY zbrQ>KBOgv%*z($GG#>_BaMK~A(=QagEP*+qw-@e8n$|6(mGsN$x6WTl{{+o|U6r!v zJO}_5njTgqNb&M(3q}?N{y5lIs_Y>=jVzksS0cSCj->V_%HXN{G=ZE@9Lw~I<`6~o z2v=l;a1?c0Jl9*+QQVN4m+_;fot>6hhO5{NFSuvNjW_JNVf_30XMhzrnpM-b_y=Fx zy*t;6oydTUYUFeuCF7ogk9a@kL6<*Pw^l#9{8BKURc1rSf?RmEU1d^<8&{@ z{S+fAatcdfzZ7XHP>S|ZU|cN(hx_%RH-p1WpCR+id=K6HjvT>5mK=1!4jonEtko{e zpG*h#1t%Y3DfjHrzLVfmsnH<hnfw(ChB{#|2R|sMB1Yq++{yL=Zd%(gsw)e7;7<@Rm6()tAy91mXF-R7ZGKo~Id> z>H~359R=4>P@NB|Js`*bkY2XysQzyi@w&XhZmxJ|m(qVN^|r1im+2w>mu%l!rfc0V zt9RJVC40A)=DH$#cQAa**e$EKuE^Le%F~@H(`{YhO1iBpoSyb=U14z#Rydd&Jizv< z!gM0`sY-5dS74|TE(Pnsfl}qFlFaE`RP=LsrG&ucDwXLh+^{Rz1*T}`xaol-TPLn&2@J`)PmQ-P>*Dl0>;>n$gx zvcCSI6lOq44v$1ehlY&ioGH*UR0>A!YJKv6LMie1tWKjKKYj^pmRg6!Of8C(;|?2j$r<13NuJej+v0_MFlq&tB(9w3h3OB zYj88<`m_k-B87cCfjP)9@ZjVyh9A*|2J61&V^c{64oaffQC?4a9Vuf&E+-zMvTmzQ zgJ&iRi>jCQ&NG+17faKV-UYi>w$j|GyXIY*yH9}ruxKK^9e(8N>$tf5v0x7z8j(l) zxMU&}w(J$m_1;)S5(}3qP0Nr;N7dD;gIx&aa&CVYxA$9e(raFlyhbXyusZclf|}dY zEOP%dx!p2Y_SKLzF8@_NpZ{vE69?6n4VAHd<7_Jn+2OvcOVwpuXWNK=(=W!#hOy(a z_D}3R#I0sJJRO8H%WvoyLQ1#`sAU8TxD7h&%fxW& z)8XOP)xZi9r50|RM+5t}SzjzD4eVEGm!;@M?qc;V5D|0~L*>;9tuev?}@z-z*aR&4i;b)O2>l_mM7 z^p%~S$A--g%{8S;O>^VL>@wA^R>=vH@XYwepiug*eh5q6Y(~s@B&cbd0+wxf-=@dH zLYHlL>zy_;l+*hTm1YB(L|E>0UG(=s1s&4WMZZ*=47G)9?)%Al$ zMh%tI9iZgrj+QTP*|Rp7p>sv~T#ZeR5qK$d3dlz18&nB+%J#0^WX@;#6tPq$5h><` zFkxMyW}i(Z8niVV&T8Lsn78yD7>b{hCVgnC;(eJF@2^j8li(Y^uvYRfig(?vPENV0 zRg!hJ@5ti0U2WFB4{4ngZQ=*^NJZNy9;ZNO=v{3=L2unW7zwO&AGHT44y!1nvqtox zm2}ohRl@$ZHJShuMZJD$-lvY0c7*0ALMs!Y=7>-sM6Ai}elzdY_^b|p2X;!o1&_P) z&PY7&l6`IbUDdy$e??H~4-KNy?{}IoHaFvXH^uD^a^J3_3R!3v;&ykgH?^shV4%2W z>Hpf@)>lNlwnqhqcCm$hZQ8{qLx4;GDy7UVwuQbbL6Gk)G8qM9-SONGLc*O-63$IJ1Op>xLZXb z!x0t!`5Eto(t-k?aeBI(25+ZHH|u@vavFRxE%dTO?VCfOH~OMj6{yLBJ$WLP%Yd40 zn4=EgPz&;y$@`Son4ns>a5d;`$rI?8HwGP3tOkxG8pMr|_C+Ht8A68JW8gJ_oHYaJ zix?7a2H6+YjIC)i7`@4{Lv_231ZJU@s2S4|FBJHxk6^d$Y)9rfn0lkqDla0z5Zl8|)0( zJ}na2z^rV6@a+1fr)}!%EjU+6k;M2Th&6#w2C)$0h(6$~&imhb~a7=AGQBXdXm8gz#? zx7Y-e{pwrVX-4~b`~V}lqxSWwtHe>3W+56yGx;? z_1al*XVeROObZv`{R!d9~MvS{aZZPI&Mhu%T5{6`E)GIlvzWD+UFD4`I zJwKA|hN-6V{kq`V=TCBwJeyR( z>{Bn(?{YC{e!)k?ItxIC=1o)xM5w%#^)*-@wfwhrq8#r`Ae?QwGr<{RgUv(4mh4QB zSBHIiuzw!gFeUeTwkg4LShgwQK7BXZOL0HNh>D!zO=G)l`w@s0Q7=pYtOnE>ZSPbi zpe?ol-BEhz{5Z^~a2YO2641!hV33XHj$?O}`ZQG^94b|ImdZkQS!Y6`u#K?wNV%jt z?nF)thNWN%dTi=zgFz^L?GnBAvGlcX%U&y~E_>}`|EIlk0kf>C?)XDWViJuKi5X246*VGCWMUGM zjB%X#CgyX#XdGYPWHO^e;*8PwCg$PD{D1#_Uia3mS9jfWtE#Kv?AqsX&)I7~)?WL+ z)?Ul6Rh7ohFf5nV%A#P~&`5b~yk6N;sg{T8_3GFy2kVvcW?1Q27f4%d>~p#&VX}TQ zD2uIs`ia<7P(oS$`+uC8&r*Sb&sNC&R5uD4C1|4Vq)`F6ko9`xw|HIGrZksoq{J*j z^FE+VXopNAb-2NjgUOXxx7syvq8OOr-esyJTJOg)OX&e!+lvpB|aU{LnTp}Pgc2S?cHIgD2i?( z*+ued5^cHCW+;*&l3PgNijjumg5FxfqW5Gf=Xt7vpp!a&bt|2e^-^ort%!XtuCmX? zHPNVkKdhhnAMAgdj^+5x40AZNlyOaPH*1hO48Jm7uilx%y!Wam3t?QSfgYgAnLA7K z9_fc3(NP7^J2TqmqyTaQX!a-&2M4+9dWQ{6KMz&{J2w& zGyed3EIAzIR^5L}MXYwiHd%EeLzuhL>^b?5XLCy-hIfd$6LG_LN!n+%VOa1}xiLur zkws}wm-u02TF(ay4GeU>QvG2cmZ!WFwPPI`qjtWL;C2k5X56lMW0r>*a9j>JYHX^A zM+#}Ln^cTX(_SYiGZhlIjK1yPcb=kqRH{Su%E)=c6Z;QL92_619XwE}ZaEN~bbgLX z1i%R1O&7c`kT2y1DKj1em3h>IbWoZ3f{|hd#k&VXzh+D|LqDM4f_^Sx$=U?|c(~Yn zi!uhmca&W5rp8~%8aab@1}F@ABpv)>2A&FT3%(e9vw@> z2o2Fizykx|tV}WN0jc+Xh(1$aj9qxH$b&ZxZamlG#xok%Z-yx$|7fypI%;22BKQ>J z;p1#PtW-F%Y;pC!%!Xx`tL;K^45DY6+;#>hA=;D~{CCQ2XYk)Ce;~mzh`f9R#~`xu z&4qcEOAxsS!5@fBg5?-Qd0mmsu)ZbtAmLQLL3Emrh@od>MJ$uxTu=-}?x0|K%i&33 zo@Ek@%Eh28BTCf}M$;B$VzkD~JCTcG5Uvm^AdTXLf@zpu!6&^l(XTW@EsqmF?jXqi)&cWL`fr>Q-zZ;hv2A^+y({#^iMz zZ(9jH7I}_xylpKN5hU9typ0*#qx^#AQ9f&1L5uZFM}sx6BaijejX^+&vJsz}Zgn2a z+mQASnVrZG3x>D&0X+<*X9&^@Wa^dGOmLn@@$_(>Ihx+rZsEgV!2m6`4Cn2so;zMS zY=;WZq!Ooq^BBeE2G3R|z+}O0t}1`I!3fS95m83VLPN{@1`N!SC>$jhpx4(jVO2m$ zG58LU(piwou*Sq}vPoInus{!r7Fb{ip!*7yC%|4FUkd~OJ16TE8UDvmn>GH&qCH|2 z?4_iaWYGaNd~`d>0!9K6?!SgWe7499NZ`7ac_c74-|AT>cLK+(=5kL$8&=O?bZ^M#Tc8BwRXd6EA_d{C@!r(M@Z1c zr3VK{(5Jkn-DSop=jBP61G`Aj5&n9jy+@hWO5-^g5ouU6g5HkviqP zA{)bDA17(LgOe`oR)E)xWZDUxwbo##JNydi^C-@)fdKk$DYF8iv@Z6ti%BY`jiCN@ zdVQ32p(W*F@F$Lzy&#C6}T{kpTS@zY+cmplpzBW0=U367mjsd?yj!4eT4|ja4$Uw#YtjyH2=0o zRaJFIfrd9j<6!^|v`dF9H1<{QsOE8iy@F}k`A8y}zCAH9S;oqJX z9yvBSJmOFiX}H~*EqS?Zpsm>Iw)5Es(t=WD$>D-dYX{RwT<5ovGDi%q$&S755hYW| z-*(I}tIc8U!*$FN7BQ~#4yr#0uPtz$6kS7i8TvU>#vD9z>G8x{{~Eoc>;aYYvgHxo zt2?>b#l|d{yR_W=P&{<>Zt93KTKmOhN4;>tLu(m4QcbiQJbG`|PnQ&U^kL{)29HXm z-=`v0)03LPqaI!JK${oK8hD+VP~t$ViTNxbyP8j=eIv80p|oj*ENyJ+amWvBcNv>H z%%C5CMjf`qrszZ|=Ce~Ld9#%2{JDG@K>*-mgP!4Qrz$&zZzd1R;Bv_;$}v3}{X_z_ z?caldfXk&^tmg`k6zXm1KT)H>>L>BGlp4gO_4%NRwTz+BHg3pKa>Z}O`%9XtHHJ2) z4SDDWqB_Z4iEspHb6C-N9M$}d&kv*>1KeVSYvIjHrHXStr zo@lg9AK4?0@*!Yt)m%(-4lj0h0Mi7Z2}x7I6yif@Tfp4X5Fg=68sa0I&U=WD0LM{P z9IE~xuLu>$!EwQ+N9dd#63gT-#@Te5M(y6pUOwQv#;DzQh2WGz>UJea-6|t>tNgI< zCT2jCz#9TzGd5D+8@&+r`7Ev>WbUnu?b%yzX(hz(!f3sNo}T_LdJU%?g7m&hHwNzp z(hL8!$Aah}L|&=yScMl|MP9fRVYd#1UwT(>E{J=hV7@>!BtM}Cg_U*|$(@MqwoyQ=?bYEaby-`YnrOGS zWW~wph+b0KlC{v~Rnk#zYI__N0ehHQTk=12^Ddhv#2jzd`;hrnunm%;x6RSq`Bs2G z^sTS9i~%3QIIWx%10HBG#DF&kP6DKO>$F(!?sV(4W2nG@lSf!H1@@aLfmZ2Cj{Bxc zpe7uo^LwpNJXsBotbr~XjeT=E45#G)b2rCD^qj(iLY2SRL&Kj*g+TAJ1@mF=WIl^J zY^fzm))4MU52}(Iv_w;<>4IjVF`?9JEXdX6D`&9e2#6otjNHe8IM0`qO$uGx~e}dLnkn!YSoHZDm zyz}wob!Yb0*@x8OqB}`I`66JmXd5rMh~%~0_yv+hp?6{#{3R9w&r{)p-su@@^LnQQ z!jE-OeX$N|t#wd+iBhMd(b1wxG5^tBr(uKvzd=i}n4u5-N#*|ZSA{8ee0sjuX)ShGqO z?(#w@(V>PaRaz`yZZ>~oT2N{#Ib6_bRXv@=Z+9kTju=Xd4!`yIU&Q$BO;rEP_$^B9 z(9dH);PCNVy&$pb282k_O@@*s?#*c~Of6HvnF|EJ9Hs`tB61nCObzU9c2@|8#y!Aj z){B=EW;qUB%a~=UbdZXGT}+Kx&WnXN`%H3ZU%>2;JpwCAvAxcNd9O{o#!+eD{W{2B zhVG8Y3E>;4!>~`k5;7tgNnk62$A; ziQy~imHo?p3{-O-$ZhsxP^DAZ%Hni#t^64Hkq#kV@1z&C7x9uepUxs)4?*K$AYQaf z!JrI~^TQNZf9HyLxwzS6=~G@~X52pa1<{xEjgO>dPS7s?dVH}sziYuDUXP>&rKXa@ z1zoU+*GH2wM+~J!hj@AXFJi>&V^n{q5HHwPd2A%L`17bX-bHRv;9Z}f{+98su%)y^ zc-J3ieRxUXU7vz3Pwhv!jox2S5e48~7$J|1B$jN9>L_DG3kdD{uW83PD$%abLw?}7 z%kB&g)1AS8qYm4Jc6lD}VfbWdm#<|4e4$oh|2^w4%V?KcWkFhk{Vk0&4`>&wOfTCO z2inB~;vJBFSZLSF$7=NkvF@HM{$SARyNNxD84!R~on=OSVt7n&Vc4k-_#OSC z9w-FgJxgEvHq9QOoB0my`#RCl(Ovl8&uEhy=kDRzA}4MD9xiOJXB{mkn``6V@MB7@ zOLIl2TypoXLVJ>n&+gi|`2Re0@sATxcNwh0*4t6Dx-@bc#s*Y$D&Vh485$W$NcoqP z6l96yFr0-IjPwM78j-R!M~8clTf-uVTjPE`UzibwwGm_EwMsQ@m48i03885jo|B=j zkEoD!^D~ilZ+FT2D#gBnrxR`7Sy3A8DGekcRYbn-D3u|rc3<|C?(OO7qDGV}m8$Qw z22VfFg0Scn{)HmBZwj$o?(Hpgwff{rJ6Gu~DI~qWH@-G`+bV~2adopI8}zzt>lw8( zYGrM}jHqX*TS}p6|Bc4mAj+l*O`9UZy?QLy8Tr)$G0j@}^tf3{*=AB^K@X_sDs$-U zctBme>y*;na-Xw;AU;_qQ(scQR42Q=zjVE6WPlBZM$g!vSqSgj%-j}~+tgkYMpUQ! z#7t{^aQc;{6(s=W7LVXs1VRcj@9?*Ocz= zZTTC`3dG{-WqydK0c}lPp^2tziYb8SZ{Je7K78jz`cA8|Oqvkbe;<$Qm+b5hQMy>y z?VlK_?AyI1zO}hO;~O&tef3bAKIp62k3Q0Nh2Ty4#)-5UN*^YKweqsM-hT&_9;>rf zpcG?OwSH@mDR#0szv#UII?bQI+tj0d%1xH z@6zkXp3=X-b6>5~i2ZQU%_Ku4w~!1g$xTOM;-#oUGD@1t&ob)8bg#ppH?a0B~0 z3vpXLMBb_!Ia|!MO^Y>K+LASF7PoayZX#a~ZEqmCheZ8-K}Tyv-u)&r;+d-dV6dc^ ztSgR5J?(8+qrfX6Qfs#(wRYFn>Cusi;g=7M?>X4)DUzPGO-Jo(O4P@}aE^226{X?} z$NB$~L!lfQjB}q}u23th?DkO(9~I?QZe5fLx=|Ds#aRhgp zVyK?TM9U1}I(UnS(|LB1qn0;BZ-ksHmFtwt#g{=HLJQQ*0qk)Tc)5<$QwS%2NT0iu z;!@hlc;d8XGA^!boZ=SOfj*!zUp94_k5ggPIW#|n)fz%Ya~GlLoq}~7eEagW{B-!_ z*sgqSMl8qm&QHSVy`LPNrH~HuA=TU!Y2kG;t1XXkm||ChsgJIK+u2$6*!gj&xZp_F( zLaff@bsVeP1w9s%j&ggh5h`M}`*nZUM!9_%rjv6p%bPFE%l<`Xy1)e*Y`U`XzkqDI z#?sD_*>oXhrkwI&ov;iDz)@a1WL$4yVuA3g14GrJ6bPhAVjz&~2@1#noe2f>Af6rt zNzyiVQ+z zsLeWBGwb|_g|ko0BWDgcYWL04gLw*y#w6JzB{y}MJu z;sa237{H>NKtn#T`63#l4vK{pSwq)&7v zJyQ5iL60T~i?RlSem5nEk((Ubm7f`lbW>ylzECW(PIbq2Weo(qJ0*xdpByF#0X*O< zi4N7n0!CFVJ{f&ppL|bRP&#*VxS-Rbk}hsG9(<}71UB174*S>o#)C9a(H95Xp7Ac8L~40j~>?31CKmX2=@k?_%v;}3_Kng@c20@u|2?}#Z#{lvhASGh*oBcxrl5eBC-LIsO$q7L6@8~i? z=n4U*-4|4X`LJ}PZ*>%q{0FEz3_y~0C>C$2BXl+Zl1ab$9yI9YTtP|GKtl%InNN9* zyH@)^7!%R}N+yJSe`-Qz15A2sc$&$Gf|eqNiiv6fCKIB5kP^ixPLA!$&kRnwN%%_vvUr37gH6OA_mpqY*>q%Vmbi9DgRg#ti! zJtJM2QIoVf5rNn!dJp*`5nrZqJ ztTISUjEBHTTR^pSuBF0P!sm(uEqkcI)hYw~n4hj~7sCl>h1+-tBew_48qu?_RC`8T zKjE}`8ld)$dT(h*u8(9=BZ8+ZBBDJywZD0FN@w@Fu65nL-KnV{?Pxkl*rM9yt+0XO zU?s=Vn_)OYsNEc=jLCVlLTH8zb$tO zXHt5XnWA|?id{!$kG7J^?)s6&@!;nQ859i5O?0l)m6>WQ_FC^QUfXrz^0w?lcOKfm z@4P*g@k({5UKu%Wcw+y7iG$-KwSxyL)h!3^w9VVuROl4A=(KzW_8It0MY%#c0Oonm z$#It+ox@?$DewWE1jErI-1Nb6E!{^Ogbpg2eZB$OzQE-`Zf9%$#0A5+$1&1wLK*Jl@!t(!}`$kXb#v|sVf0Y zH>SD4&8)FzZNvGSq9}Sh$pa+6Me+_3NL|R#DT|J;Bd8;SO=H8^y>3O{YHbzo?%UY6 zvadUOr+!Bz`X3#6n~r5qewGddj!Klz7Bi}ZOjRJjBuF(L&)DjwL1pU6T^T7)g%p%FhgTx+(Zc5IXKB zNB=6Mk5S#Nz|Nmf2nyJm89E#xIZWD8!PcN|DPe07jmKhUar^n(1sSlcQVVPicxz7B znwbd0EXK?-hG=#&SJOU5eQ_w+ZmI%7>WIDlnOH+&okYWKfgVd3NBIuoF)CuU8+K7) zVdJ^6Nxwv>pglb=JgiK8QD9+a3RM4~J+?PMRw-Q7%Jh!mpFmX%sb;7uEMPOt$-@rJ zsiomGFlNLfMa=m|fe+ArEMljEo9t4ER;6{_s@EzLFo| z<_0Y@Ax+Et>gH|Rwq5sY##l2fMAVLCr{|!?Qw|dHa9J3Zb8pNVIRSzOH_yon;P0r8 zqu(WYH_3ZQtex9IrCdkC*jgwXcaer)2GM7#P_y#_7`<1&8P?xKX2{I7SB#&Cd44v`*4PhPcsh9=3-_RhS29AHJwu_ShA`$k&o#@ zfc!FJp}o0i5PN5e2BGMLx2`5e;jMD4FX>LUL%kl5Qt$ScZe8OjAR2Gum*&26jE#~$ z<{`Y&KVF->LKC>;!KY32me@)hItIh?7uq%xqo4_Viax6eJO?=Sb9y?@{T#_wk}SID zYVK~~+7n80^3W*PY@GBMj}&U5^aa`uAS$y^Iz|>s!Aj})t~r8KEE6NvUD5m17o&$r zEDWU*@`3`X5b@ta1)M4R14wmzf8M*?hE+Mwa;s#O)RTm&xFM>H?huO7I^5`!dY^_M zj^zd%szRwkRVdU$_+NmP0!qwxw2J%mMFpu)+)-r|o}#}rvc{<*ROoYxP!X=Ib-;>a zlT(GNQ2dH~s0#gRz2jFXw#Ko3+wnkcyfzoDLXkJPazbh3cxO%|e#ItHo*JdYkIVwj zSB7=j?BZ9j{L_;Pnpf zBAdDi_(>=1p4RKTvE5Tj%#TuHG!4!QaWq%r;o^(}H|lF3j+8pbG@ne%P9IMWHw2BA za_}5qcDYSKHY|!_vvtS-cWff#pHebr+XPGz#qy-+i~8h`(}E^-&EJ{p+r`c5+o$?r zyV;QA$p3Rv<^b(74J};F$D8DHA3V zjrVyAM#AgMW4Hy3K;A9WZBBDJ2Z113MvkK|llzL2+?oj@I^{30_Y~D40%-$=OnF^z zZ&c>cE90uWc-K_~57x>T?wJL$_72S;=qkRYww+4bAB+3m8x&>>%A@x@6}@?T4CrVxy`-Q^`JJoN^@~WEuEn*JfnspBRlU8En%S`(!)Vc zMpGQb<_w*3oAAD@P5iJIdDDa4j%k#BvpA#2RWw$>VTen*Rl@3UGA*Rs_H2=q@9MMP z9PCOn7S62K(aphYp_CRX9rc@ok58+jd2?`&s>h>rULbMC$QYgu&!{C(E|YQKV(GBS ztwl4o#X#GvqwSM6Ncc(AM_8+^1HXlNCH&sC@eN&Q1!PYOt$d-!3E_F4KXqtj+{0QW zt1W{I}Nz|W09+<0oX3>&PM;u2 z@!qf$7XDZagW0VQ{Vb%WA`wg$Zo^-*m-j0pIYn#uOa*X5P6coS@{V>2ACglJJ=P=k zve5{?8Hu>a`MPQLA@N5lcpGxSQpP9mLw=9n+hh3uJ%;=>{6&K1}j3$wx>YA%Sc!ZoOJu$mln=5U7%E`zZ3;R{lgVu z|I!Qy`wyqm0O&NY0no3c1W=&soDx7P4qhutH#ze&rI8byIk_})qBBoNQSdXWrF6}y zmI#ajNm>I`vbeYkZft^^$SDF{v*-yAY)c|x_c#)EqCk)o!mCvAnI?fIioT%c2~wKl zJiU+8_bbV*F#Sl=UH!D}lki#k#ubcJ(8WgUS1nz*Ms4-zIKo}jkLo=g)gW98l|d?< z(=KX<@KR$H2be|=>2o(yTzz$}N&^>{HVgBzPkD`{|Mn>j9PexA)McKHp1|Fo$t~wZ zfMUDo?JoZZRp&RQ<)_0Z$9CmwGpYt$(fp(XssV>s+m#lc&YB!9Jb+gK91?eQ+>YQw z1>?}ar%%2*EhybIIb6_baeo&#>)Afl3)DlJ^o$A>StNb^o<)@1# zhwFUCp6!a}C!OfoA5r0l(!%RxRz2I3D-XNKh47C@^toHof+oG1zpmwJd$x<4^=zN& zhi&iKA61!$(=yMtauHPuW*cADCr8qP&aHA?+^lkaT14f3MP;reWe(~qWT2(1zJj`? zq`n%Z@qkb}r@mUPkC*q2jo+3ku-tvEz#_LO6j--YkG-zPb-^nJ<>xSG{3T_k-2|a! zmo-%Anovr`4}%JyQo38@6DSqT@HZaU?xGB8Iqo*|K2&1btB&1;z{W9KrZgFa;I#nL*sPRxcm6uw8 z;2;;Wu$7nEbA1{X!s%&P_G+)rbI}~`5X7E$S_^fdW~`S=Yo?>hsb7G8W1%Rgj&k3% z_fZ*ek~Z(y0HuOgH7KYuE|n@KIB#7KcHTjd4M>keC1L~ zVb(&kf|g6A?4MH0WwU{<+rKCd>+ifs4mMi7H7`RoA?uc#RH`>K9=0lz*TSvs z8W2!_Y^;l0t9-vE68`3|qiR}q@H<-_-6QF|B&eVLO#@w%x3?djj?RLiSLvCK&eyV~ zPelz}NT1rR4?PB{snkcoIH54+u>VUL@tFo4-IX~V-IX(K(~(`0gguznxO@ROz3J_K zQ}7P__RN(UShN|btXaSJsJKgR zV~-gCXi@b0-0*KdPLB0@ZuT7Ikv$~)l@!vl!}`!CXxX5%PrS=yZichm;AWd{!G)uz zkyt*wlAIWK$$mEi(G#44c^gEIKw^z(Mxcceb8!TQ6@djs;4H3)2v8tmmA#PFek_VF zrNUk)BI8XzYg(!O8q|NQ`ugOWxVP_=2KC?Gp=yKrkN&kyNA0haXmF$cYjRqnp4eqQ zns)zmTOGEtR9RK3c>2Ckb}T)+V7Q{K zKhn^xO-}oLD&<|!{YhJ=UrGHZg`E_FLH#F%pf&*ML-`Fr(gW&BZugHAb{Jb5G#Q3< zaHOmQtui7?qO~I<$}^oCPimzitVJ~ztxyliMP++Tcf&P3&~}6k_1b4MPB_0RwU0vz z1FBP-a{da+kYi9BuJ0aJP0tFRr45Qj5H$saJqUD(FH&quJ$dehTcv#~*QKB=a5-at&FQ)~i#*)Kx|Fo6t;%1fX(*i0v zTBS07B`I^n&|11kEz~U~r1q<{+YCr8^MQNc^Oez*A<0wYd(JLs;3H)QV(NXy6N+UU zG-(UT`^*zm3v@T^97rqTZrunKA^22cf@EpQ2)8ND&hAoOi+Y>2sG6{*jqTY2x+>6A z_H)6)O;;pZNSp{)B!?%P3%bTnvRp2s+@_Zyw5#rScq2wwxSzUn`^6Vsd(qfa`sbc` zM$M4b*7%06?b;RA>$@(D4QDiDqtmE#OPbjDM6^N94z3j_3kxiS4PK`YeSgDokw98y4 za0v#D7}|HRa`h-aZB9F*cA7Rp@Wv>mJjnHYDM)A?Apa(P{HKslDS*=~Vm@gxgnBUNQRZWB$FGJNg3>7A#%?EMzDNsr@ss zmNP|hfRfhrb>AvOM?HP3_1R%G##gNZPQVET!}h6*wK2M zjw`hVvwF;Zf^KFH9O+AXSgC*`*>wv+3RhMc7YaC%9uSBm)JPyw@EjxfNEd}2>05d1 zDA2!TP@IyKIij_fEzT5A zo*d&MR^_C>9h_cE!pdugRp9kJq9Y`2G>m_)zELiV2BgtxOv#{gyAG2l)|r@U^Q)Xt zmdUiQ)Qw`@**?1*H;XtIb8R&F5Tt+_OeKa-sR=~Yh*ShTbSXR4Pxf7dHquAFz~e& z%l&DmMmk*w*6gpK#BqnGqjRKl;qr=oa)9$;RnXN`THtQUiv2iUp7lQPMX^ptc-;u0 zr-`(%PCuqQS*L4_-W{*eyZcz9ch^{i^j}vS+EclpeBjRd-ih(@)@z2UwMz9u1%@y0 zET3B|?<#K{8{aK25W7dm_Ej$Izxn_Xb!r#%m-SEgTXwAMF?DYMew=A2}Yr&4HQeX zu~DD|S$79KOj;}1t!iI@bXxi7@u^F7Jr^882XD|+h^)Ni2s&I9m?kiG#0I_Af^~B{ z(QIwfN4LIItyaFHQmv`MUUJ%Vw>;;x@=LX>(XAl8>qTHR_VwYI9R_YU+Ybbc&m_7r z6K}u=_sg;SCHpG;i7w+kuOQh=V#UxOsky?ogZjwC!Mc@VMz)L$yYa@xFI7h(gqa&p zJlkdl)rpDv;KWG9h2rGuTW-x7vwiIt{i%w4OIb}AR!QpwA;?ceH+~~d=vuX?8Z`P7 z(bBRdX;Zi4j(T-$d{6XeJUCSYM*o4k2Dn&4hNfGs+}L>h^FQZ-Pm%mbC51H8r}d$U zSu_*F(&O$vW#(zzW#cf6&W3TA3_^QlvZp!*Y_L+#jmQ&JoYAVd;PAx$0}}_wN6KUR zTdkMR?(eT{)XlihOT!co$<4W;>X!ASe^!4sK;3s|-IiE^2MygPX}RLt(3r4$;mgNl z%RaR4uCj(5(=ghHY}l=r!$yT}@pUCVI^3c^r%Y&3O%SJO)h|VL;Hf<-Dlx4h&US z#2Zh9UyiTB8t-1cegss57loRiOtlDANA>D82I=0(wQ5e-qRBXdu1Exy%-ba}t3m3Oky`EGfl4)^s_HO}E8Iyhsa7Yd(NFao`=<=Z zmrWl|8EUP5vq z$z>#iB-fDqJjrz=pXHOUBsWSjPO^ujN^+1yn_WtgM21rII+8b%youzkB!r=KT*(M; zKM}USwj?uJ8~q`t^ybj7VWrW3Cix5r>WYkoJJ-JH)_r)Z7HbD3Os>+GYhO?zxA7YB z`me92>o{$i+f-h;Qbx$D`_}cX-}thDV+XDnc&hYNX>ee0;NZZ;`fqd~8n{XSjq02` zZ}wI0Ts-jlf#1+`_s3U~-@IDq*XjBl17phFF>u$wFZ;iP12@P24Gvu8|02CR8n{N! v4(k2e_4KHoAJng}(s96dIlFFPx32COs0`erJD0DNq}{hx|2<6}l)C;upa>>h diff --git a/docs/doxygen/doxyxml/generated/index.py b/docs/doxygen/doxyxml/generated/index.py index 0c63512..7ffbdf1 100644 --- a/docs/doxygen/doxyxml/generated/index.py +++ b/docs/doxygen/doxyxml/generated/index.py @@ -3,8 +3,6 @@ """ Generated Mon Feb 9 19:08:05 2009 by generateDS.py. """ -from __future__ import absolute_import -from __future__ import unicode_literals from xml.dom import minidom @@ -14,6 +12,7 @@ from . import indexsuper as supermod + class DoxygenTypeSub(supermod.DoxygenType): def __init__(self, version=None, compound=None): supermod.DoxygenType.__init__(self, version, compound) @@ -34,6 +33,7 @@ def find_compounds_and_members(self, details): return results + supermod.DoxygenType.subclass = DoxygenTypeSub # end class DoxygenTypeSub @@ -55,6 +55,7 @@ def find_members(self, details): return results + supermod.CompoundType.subclass = CompoundTypeSub # end class CompoundTypeSub @@ -64,6 +65,7 @@ class MemberTypeSub(supermod.MemberType): def __init__(self, kind=None, refid=None, name=''): supermod.MemberType.__init__(self, kind, refid, name) + supermod.MemberType.subclass = MemberTypeSub # end class MemberTypeSub @@ -76,4 +78,3 @@ def parse(inFilename): rootObj.build(rootNode) return rootObj - diff --git a/docs/doxygen/doxyxml/generated/index.pyc b/docs/doxygen/doxyxml/generated/index.pyc deleted file mode 100644 index f3e0c90d7b363dd2ee65eb8bc7d21beed6cdf2e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3112 zcmcImZEqVz5T1LnlQ>S3LRv@>(tQCeA>x1pYN}EJec@A$v_TXsq0`y7PR`tmxn0v( zNgfM@2;cG?DsFLrL`cC)j$v(L^mv)9*3#$}X*@965u+9l^mp0Glg>8psBlWOW=oFY@4-9t6R0@age`qx%nzJ`X-SIyw#pv*5J0dUA3&nH{#0fAH%#m~S_r zvLsL9BJ0^(=sYRTXE6VMHqj^3!7B)UgJBauX?P;=0YRX71q1~&_RuGF(qX)ZtXO== zV~Dv2Wr@ZPja|ARx^U>irMU<237cYndNS2+viWqP%dChkZ}ys3G8l&N*@j_d&MO$5 zj$}8#>Cz-cE@s0bn-tSL?!bUVndfMAr>5+hv#8YF(|lS+aZ+@7rJ_i?r{yQPeu-W; zE`|n5;E?xuhT#R?-6W6oIp9>h`X0o%P~;5x)qpJbouhL~bm`K0#QPp&KR^FKmmXbs zj0SL+J9O#Og-_+zj0xj|QJ+qMk3-I{du%k83Fw#TE{yQul@aV-V5Gv*yuiAxp#CXs zr)6$}C`c1ygJKj!X&Nj5!vFI)uxC0cgG^@wU>LkSONM7b7Fp<6+bBuR@@pO5oZ{fI zZKx5z2Fj!zdynhjf1S#F3PR-%(a;r@L0>$`iWQHM`Ot7Bs+m={3=WNTT5Qu%+mkGNcKy;%ZejXZ9!Em z<O!YDT?}~!3!^RJ@je@&<^0n!-XqNcgU%Z-Qz4G zFyBX`f*F++1iO@aiUNvqYf)Z(8aHT;>RVOYrd&BtQo+u}m4l0pf{w|p$ta7$_6pQKUXVNA z4aO@3*Km&SVV$RtiixOTTVUdj+hO{N6JZG7P_)<~+-?qW3#@=(cnbskFnLzjW)4re z^vKO@g@%%g', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, six.string_types) and inStr or + s1 = (isinstance(inStr, str) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') @@ -90,6 +93,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -121,26 +125,33 @@ class MixedContainer(object): TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value + def getCategory(self): return self.category + def getContenttype(self, content_type): return self.content_type + def getValue(self): return self.value + def getName(self): return self.name + def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace,name) + self.value.export(outfile, level, namespace, name) + def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s' % (self.name, self.value, self.name)) @@ -152,19 +163,20 @@ def exportSimple(self, outfile, level, name): outfile.write('<%s>%f' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g' % (self.name, self.value, self.name)) + def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % + (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % + (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write('MixedContainer(%d, %d, "%s",\n' % + (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') @@ -175,6 +187,7 @@ def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container + def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -190,12 +203,14 @@ def get_container(self): return self.container class DoxygenType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, version=None, compound=None): self.version = version if compound is None: self.compound = [] else: self.compound = compound + def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) @@ -208,6 +223,7 @@ def add_compound(self, value): self.compound.append(value) def insert_compound(self, index, value): self.compound[index] = value def get_version(self): return self.version def set_version(self, version): self.version = version + def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -219,27 +235,34 @@ def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacede outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): - outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) + outfile.write(' version=%s' % (self.format_string(quote_attrib( + self.version).encode(ExternalEncoding), input_name='version'), )) + def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): for compound_ in self.compound: compound_.export(outfile, level, namespace_, name_='compound') + def hasContent_(self): if ( self.compound is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = %s,\n' % (self.version,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('compound=[\n') @@ -253,18 +276,21 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'compound': + nodeName_ == 'compound': obj_ = CompoundType.factory() obj_.build(child_) self.compound.append(obj_) @@ -274,6 +300,7 @@ def buildChildren(self, child_, nodeName_): class CompoundType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, refid=None, name=None, member=None): self.kind = kind self.refid = refid @@ -282,6 +309,7 @@ def __init__(self, kind=None, refid=None, name=None, member=None): self.member = [] else: self.member = member + def factory(*args_, **kwargs_): if CompoundType.subclass: return CompoundType.subclass(*args_, **kwargs_) @@ -298,6 +326,7 @@ def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -309,28 +338,35 @@ def export(self, outfile, level, namespace_='', name_='CompoundType', namespaced outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'): outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'): if self.name is not None: showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string( + quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') + def hasContent_(self): if ( self.name is not None or self.member is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='CompoundType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) @@ -338,9 +374,11 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + outfile.write('name=%s,\n' % quote_python( + self.name).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('member=[\n') level += 1 @@ -353,26 +391,29 @@ def exportLiteralChildren(self, outfile, level, name_): level -= 1 showIndent(outfile, level) outfile.write('],\n') + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': + nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'member': + nodeName_ == 'member': obj_ = MemberType.factory() obj_.build(child_) self.member.append(obj_) @@ -382,10 +423,12 @@ def buildChildren(self, child_, nodeName_): class MemberType(GeneratedsSuper): subclass = None superclass = None + def __init__(self, kind=None, refid=None, name=None): self.kind = kind self.refid = refid self.name = name + def factory(*args_, **kwargs_): if MemberType.subclass: return MemberType.subclass(*args_, **kwargs_) @@ -398,6 +441,7 @@ def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid + def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) @@ -409,25 +453,32 @@ def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') + def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'): outfile.write(' kind=%s' % (quote_attrib(self.kind), )) - outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + outfile.write(' refid=%s' % (self.format_string(quote_attrib( + self.refid).encode(ExternalEncoding), input_name='refid'), )) + def exportChildren(self, outfile, level, namespace_='', name_='MemberType'): if self.name is not None: showIndent(outfile, level) - outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + outfile.write('<%sname>%s\n' % (namespace_, self.format_string( + quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) + def hasContent_(self): if ( self.name is not None - ): + ): return True else: return False + def exportLiteral(self, outfile, level, name_='MemberType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) + def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) @@ -435,23 +486,28 @@ def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) + def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) - outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) + outfile.write('name=%s,\n' % quote_python( + self.name).encode(ExternalEncoding)) + def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) + def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('refid'): self.refid = attrs.get('refid').value + def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ - nodeName_ == 'name': + nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue @@ -465,6 +521,7 @@ def buildChildren(self, child_, nodeName_): -s Use the SAX parser, not the minidom parser. """ + def usage(): print(USAGE_TEXT) sys.exit(1) @@ -479,7 +536,7 @@ def parse(inFileName): doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') + namespacedef_='') return rootObj @@ -492,7 +549,7 @@ def parseString(inString): doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygenindex", - namespacedef_='') + namespacedef_='') return rootObj @@ -518,9 +575,7 @@ def main(): usage() - - if __name__ == '__main__': main() #import pdb - #pdb.run('main()') + # pdb.run('main()') diff --git a/docs/doxygen/doxyxml/generated/indexsuper.pyc b/docs/doxygen/doxyxml/generated/indexsuper.pyc deleted file mode 100644 index ad401d204226a839c076776b7afe29d75923873c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27746 zcmeHQTW}o5b?w>3YnPM6Osm$MM0L=%bfwZ{K3h@{?4mQkAbH&f_OPu2dzJr1Fywf8;YsCFh(w zFMxmq%G#ymGRSUp_q@9Ay?yWPb9)Bm|Jqdj_aFT6*``Z=hVlDxq-f4_F2H}!wVj*E z%AT9?)LwG!lAABPnIY%#ys~Q#yZI3}Gh+22)kodTsPisw*CoTQJ?7>sZl>bA7d!ji zP*`;rtFCv=c{3Yu)rbo|;DS+i&AF@2y@b;`o;w?ixtWbFsIYlvlMAXU+~R@_DvY~e zqY9f{fTMT1;1*VOgDox?ci~nyi!N`@TDQ4iiwi4i-I}$ci`&xH?OE#%7wkw|C$iQF z7wmL1J6&)qn`Ulx!7decx!^VxZgauyD%|dZ-74&M;T{)db)%jMZ6?EsVVF(RV-5|8uIBHLZov_!4!yuYoTnKx9 z8Es7@{gjX&M~Y4&3(;ZYpy#fEo27)CESFU-t9?l2A(e*{_VJt%=TA?L;6^c$Yw|%% z?Dq+zh<*F4=cd)SsY%q_Bu|KWAPU>FaTSGDXJIj}2aUKV&VMb~xxZBVcrt(*t;DqH#~&dn>d7x`4DE?-9~)JI~hPCER?Q+^PnI_L3i<7 zJG}Fqj3By+EGS6PIZUUp(b>HEhRx za4T5K3DPH8h%?do?rTp&&voLXc&xOr^R{_S>gALYqQXmPAPJCT$goB`SYFsIE$7_C z6)C$5Fbnu!<{5blY{KyJ7MSeOyk&ZlQjuuL2={kxe?;xLzcIh?i0Vhwwf*hbszJwohWW}nxRH^t`UW%sAJN(7cR6L%`oRkt1}(c z<~Scpp?BU6uj*~pzl`J*9+CWaQ0NQE;Tl1dFOIn0uhT(F%3q?7gfe+JMmGsOBwH{2 zHeDqgV5L({&Po_QJxPt0vd*7CCFh*KlU0s~jPo_`R0H3{)xF9O=#;(tqP-EV;eOKT z5w#uI7ab6TeHvT;HfFS0;-bHsRa$`lvyaR&_BCP{|8viv9&xZm4sP@|db>)SO4U`S z&o^)?=1Vds0Q8r`P0t&l9^%Di*PB-(s)acP z2a$|iamP2vSKY~&J~16oWpVCI`8DM7B_mG1YDWHuQ}e(9a^k=NKeJY!+?z9vXYAce z{_Wjs^NUQLh5K}~nFY7)e78f@6>Uf6yb(;e9o|@JLy5mxifPYIx0b@-M7I++TJVRP zJe5DBgy!luQOF!%y1wT0mbi+6vYU~k3m2E|qh!nsCJ@>yx0A6f8$Nz){l)@BYm^zRImXqpgHH8kbgqTuTI_F& z8m%Q8LN2tZMq8X#Eo#9gt4usgN``X>G6C7Fqad?ZN42aLR3U>nBo9CJ&3Rwh_$F2k zbKTzMnA4*P^1+PMDF!j~&Cq4;QLvg*n zgojD4ombI6>0VNgNn|0hFmOzvbkscLv+G)+Xe9mbHQgil4JQ&pUqLnX*U3Sqx9c(q zZTeCiP!&!6r8XxO`)x8ok@y^1(yphJe!H$qGDLTFz5mo6vtECK^7{Ha=r6^2kkUaR z1(l~T(XJM{OJb!-!VD=+gc9!=7DdA3izoBw(*}CPY5lk@UnMs@ z{sa+)?C8m`*_v;(wU*$~lUlA%lCx&UmP=p%ve1 z3OToI;(yFxg-MHFqJuBi!HF?3w6l%nnDyZEsGA*1gz@(Zw~`3;zVS|P)mZgH6X4~2 zNT-7J<$@L~=s84mCwsrzP)wT@gpDGy+{2sP7u;K8qSI>m!F#I_*Aj1aVpSfNtk}V- z{N`N2`G;_^zk``Hn}joyLH{rg2tqU^_HkeUy+->cO4CQtq2EM`b|a(q+5}JNuJm__ zwcEU%Xr1IT$LA`~r&V93$fvrU&<3vY5m8PR`H3zS`I7uDP9L6{Rb&7@%v(Od>?6z` zWcCoVpJ#TE**(lY$n0KbA7Vze>vJyn96~F?uV5udp3C5ZYsc&Avs{e!HxH%4a4M1jnza?t&pv(mOCb0)WcnDw6IOOp)c zC>qRArtYgKtd0(l)|6E^^m&*6DC_c#5&UsnS0K2mXc`Q`Gu7~YJn_baTOGX#1kbws zgS5-0Ml-W#_UIUn`p213YW$PTPBD7|S+4K;Db_x5`rk#0XpKj_twRQ3FqgqOY4J?j z-0Ka_zu=n0;B1#|K@P(LKuMwYYe24GUJ&7hRT**Ns0#%(654`YLZfgk76fgvAasib z0bDEy<6=QD7XU6;C!$+0YrwluMhh@5JPW9oRe^M&s!%*H1onk<0CoXS1N>!R7vL`i z;|YIlQD+ckS0H`{f%R=_*+~OPC+zx&Ed8_04$y^k?&6s1?QwIO`t_3Qz`K+WiZYI- znt)bNA%s`MUetow8$v_o&NSiYbQe2;!PH#c_)jr=23al<{j`98jwfYk7BJ|N*iL=H zoV2*#uK5?N#yT7@#Zu2R=3{b-m$)HM4<>ar}Z+80zJr(s^ERl>2 zM7-qd(}bp-|Le6oPkh}hD5MO+MEqi^IW-UCF0# z@*wd_0xz5ki6Z@5RNu)|P`7j0Q5Co9uXDq`b1Ec#|GTvBT4_m5HYwR@k`R3l4b-m7 zz!pU6PzMA9oNU+?@eSA>or#06wtzUUmOYPi3MB-ve~X8?<-h6Vh1>#FP?Uq*o z!mjp6pgDTvEMmHX7B{7LLmWPxs$=NyluQY*xqVS>UnIHKMh*>8(8tjP2I1L6ja14)Y!n5F0dWelPV0NJ&2!jy zkT4eQL)^IsCL~PvJnTCZlcaNoaJ2!ZwM6zF-4|)cm>dnGlprTE#NWe=Q$mV1>@>08 zPh2>)1jwn=XrE#eP|{U#^p|-x2|J0@*9qmH#RGnXlm>-p)<`lg#Y8p&Jxli_M&;xy zce*P8CWlGAe42=8oFCW41`vz!U*lQ7iyB#PgLeDQ&TyGm?XBxPt1@C$CltC~W%WQU*d%m^ChDbizINV6vAg)sxUuj566gxeC4amq}%9e9%K+Dl4`uAg`=vHJjjhnE&ZL@w)Am4%-7|A)y zK4!|`3@TGEE81*|EPL9m#Ra|yO~vJL1}JkGXp$s3_7eQptcN3JU5YBcGKaJzh@1#kzYsdP(Aw9szFaz1orrb9CN zo8yE_>MHSZku0+ATDMqra>xE#8udXv{T~cA30POai_PAI@q;{J(!}~83Yo(KGWQNF ztI`5vPY?i;qGmB~gc7P9tnl--6tZUC+H`9hrT#k?vv^7gwlT4+zSk0goFdEje!(DdaxK zP2SWbCwU$iG~@9xv}yL}y$K;k2Ay(Th9}_NjaCy|!_IdD|5@JRIc7dHDgyrnW}je2 z4^V!hPftnqTWZcf$10VLQHtDFH*>*Ksb8oYL9stY60T;k!10mF@J1j}`M<3j#!KVn z%IL=7%1~ufWu#K}x!7f|^+Q#094V@Twjn@ISfN{m$s&h>!`ngGf9I}YAK|-=%wEGo z-);jU-;DyE3n&)oEudJG(E>mga4i5?s{)oqwFt}N93ZnmZwtsQAZ9)hgW912-g?rj zQ8Re!X{TClCwjXJG|ARb*=&6rt(bk(J17`Riz+6jG!YDUV0i9_Q#^M_c&>~?T}M+q zmxPy#d=Y?+G-EHEZ3VU&QT{WRA@d5`78Xgbq0wJrc7WMsWVyKYOVSSFDO))A2wkPS z72j=)mtyI2;u;|LaaK>WiqzjVAeF){4v!SSK_Hi07$A71u+N?BE4wNmhQgT%C?iFg zfI=2iX~PvJF$J_!T}OL=iF*tWsAU~yKhkspJ^IyCLQ0Ob$q>KrUe}&nJai6$t)XxAnR^Q zWBz`7I;+z;omIt!{iQQ|&WMA0PLj#;SiFJ7pT>BDVICaa4Tlc7J5Q^*R=agH`+V0!|_@ z)t#?Ra&*?-r2YVpW*pI~WIcfn637hY000Co-XA2NI27j*%ssklu;U&aE^;gSKr#Um z=ItdUGmW)dG4?^ZF3?AQ)Ti0jKsG>2BY5Ib4wF;GmBzh+Mm`J^k@{BYpt@kmZ+pA7!(~m>^R*QwZ6Qr_?hfYQG zsi*@H0MQEoRV}Zj2-BE|q)LkKX`Eb`byAQwkGSX1n`cN|0|YpiZqtmqK0NXxoVr>_ zVvXpt=(O7ThQu?>Q7(p4Mxgc945uVp6bkQ%K@(!z`dZ->3zJ;>l!a5uZmn?27`3sj zooKwKWWuOCVZaa{VVkM7EVM#&KMAeakuwMhq|_R$;}B1QT1n;%`rRAti=p&PLYZi*~a*QKhKOX zcmK5&q4rOcie{YF4vG+<&RjD{VQ=lANam#Rw5jwe&7&lCQqE#0(n#iF<*J3c(V{0( zK{}8xKS-kf6NU_f`b>k;5Cx+cVffRiknT-qdZUN2?3NP*m8JO zW&3;zU|B`$pvND?LB&xG%N=$dL}BCt=QsS8ZdlgahTo!>i}%v5A}(VaLiAAs>IE8x zio1ZXSZF_uzLQ0Gi|TsH=CL$4r@p;HcnKv%X8ayYl+P?Qr`LdWl^~MS0)$t@Xm}?> zZAcWex0&$DiPPq$7%xKAvF*^_koTcw2f#qiu8U+KU*NL1%?RACEr{OaF&yfS)nYjQ zA`cv7Mv&HjmDzD-$C!PF*)K2y&7FUR*)KBt9J9|ed!5;PHG;zx<}IYn&!J!uoE>8; zBRFX+2a%jI;yYuA&Ll71*tBu(^h$e`>P$+>=ay=|~Z6y@Cx!2S1)K95IzHS1+cQvQB>vaWFERK8BS$=IDNEAmp!+bA)l$v)lY<5c2vz>Z+K*H%Z+gQ8$5*3H}fv)nq2p&4q;! zq<0-c4zl&hI#6NlDzkOW)96(Jl^SDpITpMsJTX9Yo~ZA)NLsFp4E1fIDl?gB${X0; zPLIU)n0=b12C_jqC@g3Dc>hiICk2oe_95v*F zJzEV@^uNd(AoX|6K>YA8a&<(^(D1{xcYePOYWRLa2(MxEpW*25LBSA0d}`zRJ>KQ< zA?@)Vt(1ujMg%abFGP*G@Zp+$eztbxxke9fY9Ga45vaXf`%qMSrFNv%;fJ8>{Pg$H z>az}+j8x45^~>BeeH{;osPeyB<{sh`ZXBI*kh z@#IqmquUEJ7CMs=D7-XBW%0uFu_sT}&zw4YMz3!~mm}?w4wqVSE@~?EX|J<(VlR?# z|3I>vx*xVzB3a2Ca>^wJW|0)Y{>e9$5oN$DSl7R@Vd2X_7$0_X;OmEo5hT<=5CYKx zYqdQdwr4|;$`W^bOzVsGQ_)U5^vGjiS@sg{-un;Tx3~5f-iF2@+pwYc84&Lp%h zyR7I$tMddzN$)37nc=S5r`PLtW4^+qFK4jw?70g$JNv{*pSy1UkxaBT>p0$uz#Aj@ zTH0m=kw)KFEcM^OSXO1@1{@F@laEfm$rGPsrhv(E?)hKifi28(zKx-(uh8?GmA}km zzryTSk@cTPzlNv$>pFNuBP7#Eh4?N_yL=iZdkc!wFxxl9H!rjKs*P>M2}yr-w%47n zX`&yns zLTMXGVDdG|ACsNi(G#w=^C`C(pC`bpQL zP62&DGwxz|wKK~1X0GD)(5BOR-`ZZimIgGuL8^)*!54vEl_hD19i!@3(5e-?3=6*) z)ORV@ljiZ6!v7^{IN!K}*y?R5Nju`tlHgdqumx^X7)qvZv2nP&5GSfvpO)n<6d}f+ zmZ$EV5Vs3YbX@ye~Bd$*htOE?f(X|Z!#mypVL~UKB<0@^@#nk zm!qS2Z1ey!zA(6%|0-o~JGRB`tc{G|rMNK!Q#WsWd}7PQnThtqnUNAcpScPDZmCo^ GdjAjQKYGsq diff --git a/docs/doxygen/doxyxml/text.py b/docs/doxygen/doxyxml/text.py index 87efd20..96c5648 100644 --- a/docs/doxygen/doxyxml/text.py +++ b/docs/doxygen/doxyxml/text.py @@ -4,25 +4,13 @@ # This file was generated by gr_modtool, a tool from the GNU Radio framework # This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. # """ Utilities for extracting text from generated classes. """ -from __future__ import unicode_literals + def is_string(txt): if isinstance(txt, str): @@ -34,11 +22,13 @@ def is_string(txt): pass return False + def description(obj): if obj is None: return None return description_bit(obj).strip() + def description_bit(obj): if hasattr(obj, 'content'): contents = [description_bit(item) for item in obj.content] @@ -51,7 +41,8 @@ def description_bit(obj): elif is_string(obj): return obj else: - raise Exception('Expecting a string or something with content, content_ or value attribute') + raise Exception( + 'Expecting a string or something with content, content_ or value attribute') # If this bit is a paragraph then add one some line breaks. if hasattr(obj, 'name') and obj.name == 'para': result += "\n\n" diff --git a/docs/doxygen/doxyxml/text.pyc b/docs/doxygen/doxyxml/text.pyc deleted file mode 100644 index 72bd36fe4ad9d704f784ba2432ccffb40ab6c090..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1461 zcmb_c-D(p-6h5t1cOX)tSCEQ6 z0Vqm60Uo8k0H4wtbz61bz6}w)a9N(YOxy6VG@+h26DKY!`k@034^24=`?}C3b~+7{ zJhoQbtsvn$d9a~f4Eq?u5uHLRI`_!jAm>r{WPPsnMSEAYFN;;Gxf&e>WOhg?RVdnb zbvVX0JFNyA3m`I^71qT?qMeVabq1%1o|{U$1%Rr^k}}n95x5hN^qw)Lfgz%∾Gk zlap@ZTF{aRakgDq)3$@y=yt!TOq^zAdsL>bEc15X+%NPoX6>|0EX3plNs#|!l(#uG zTjLYCHM2c*d*7O<=%W}&;_+pS^fwgr=@Dh;h(%bY*M;FM~Dbh z^iz|z*;VZGQ8rFJkJt!fhw2|xP76;8bb)|zY+}ol3j%RUd>X`-w@1s+N9?L>9hO-U zF}EV-Z^Vt|ct;nVm&PWQu8LNnX>oZYrh**IoF4rfnnfmwjJ8#N_3bA9GOz6>1b5jj zwRR!bn1q$(}5{_M1nLY(q`W|qRAK=xfa{X`}So)*kEe4DA EKaomCrT_o{ diff --git a/docs/doxygen/other/doxypy.py b/docs/doxygen/other/doxypy.py new file mode 100644 index 0000000..28b1664 --- /dev/null +++ b/docs/doxygen/other/doxypy.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python + + +__applicationName__ = "doxypy" +__blurb__ = """ +doxypy is an input filter for Doxygen. It preprocesses python +files so that docstrings of classes and functions are reformatted +into Doxygen-conform documentation blocks. +""" + +__doc__ = __blurb__ + \ + """ +In order to make Doxygen preprocess files through doxypy, simply +add the following lines to your Doxyfile: + FILTER_SOURCE_FILES = YES + INPUT_FILTER = "python /path/to/doxypy.py" +""" + +__version__ = "0.4.2" +__date__ = "5th December 2008" +__website__ = "http://code.foosel.org/doxypy" + +__author__ = ( + "Philippe 'demod' Neumann (doxypy at demod dot org)", + "Gina 'foosel' Haeussge (gina at foosel dot net)" +) + +__licenseName__ = "GPL v2" +__license__ = """This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +""" + +import sys +import re + +from argparse import ArgumentParser + + +class FSM(object): + """Implements a finite state machine. + + Transitions are given as 4-tuples, consisting of an origin state, a target + state, a condition for the transition (given as a reference to a function + which gets called with a given piece of input) and a pointer to a function + to be called upon the execution of the given transition. + """ + + """ + @var transitions holds the transitions + @var current_state holds the current state + @var current_input holds the current input + @var current_transition hold the currently active transition + """ + + def __init__(self, start_state=None, transitions=[]): + self.transitions = transitions + self.current_state = start_state + self.current_input = None + self.current_transition = None + + def setStartState(self, state): + self.current_state = state + + def addTransition(self, from_state, to_state, condition, callback): + self.transitions.append([from_state, to_state, condition, callback]) + + def makeTransition(self, input): + """ Makes a transition based on the given input. + + @param input input to parse by the FSM + """ + for transition in self.transitions: + [from_state, to_state, condition, callback] = transition + if from_state == self.current_state: + match = condition(input) + if match: + self.current_state = to_state + self.current_input = input + self.current_transition = transition + if args.debug: + print("# FSM: executing (%s -> %s) for line '%s'" % + (from_state, to_state, input), file=sys.stderr) + callback(match) + return + + +class Doxypy(object): + def __init__(self): + string_prefixes = "[uU]?[rR]?" + + self.start_single_comment_re = re.compile( + r"^\s*%s(''')" % string_prefixes) + self.end_single_comment_re = re.compile(r"(''')\s*$") + + self.start_double_comment_re = re.compile( + r'^\s*%s(""")' % string_prefixes) + self.end_double_comment_re = re.compile(r'(""")\s*$') + + self.single_comment_re = re.compile( + r"^\s*%s(''').*(''')\s*$" % string_prefixes) + self.double_comment_re = re.compile( + r'^\s*%s(""").*(""")\s*$' % string_prefixes) + + self.defclass_re = re.compile(r"^(\s*)(def .+:|class .+:)") + self.empty_re = re.compile(r"^\s*$") + self.hashline_re = re.compile(r"^\s*#.*$") + self.importline_re = re.compile(r"^\s*(import |from .+ import)") + + self.multiline_defclass_start_re = re.compile( + r"^(\s*)(def|class)(\s.*)?$") + self.multiline_defclass_end_re = re.compile(r":\s*$") + + # Transition list format + # ["FROM", "TO", condition, action] + transitions = [ + # FILEHEAD + + # single line comments + ["FILEHEAD", "FILEHEAD", self.single_comment_re.search, + self.appendCommentLine], + ["FILEHEAD", "FILEHEAD", self.double_comment_re.search, + self.appendCommentLine], + + # multiline comments + ["FILEHEAD", "FILEHEAD_COMMENT_SINGLE", + self.start_single_comment_re.search, self.appendCommentLine], + ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD", + self.end_single_comment_re.search, self.appendCommentLine], + ["FILEHEAD_COMMENT_SINGLE", "FILEHEAD_COMMENT_SINGLE", + self.catchall, self.appendCommentLine], + ["FILEHEAD", "FILEHEAD_COMMENT_DOUBLE", + self.start_double_comment_re.search, self.appendCommentLine], + ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD", + self.end_double_comment_re.search, self.appendCommentLine], + ["FILEHEAD_COMMENT_DOUBLE", "FILEHEAD_COMMENT_DOUBLE", + self.catchall, self.appendCommentLine], + + # other lines + ["FILEHEAD", "FILEHEAD", self.empty_re.search, self.appendFileheadLine], + ["FILEHEAD", "FILEHEAD", self.hashline_re.search, self.appendFileheadLine], + ["FILEHEAD", "FILEHEAD", self.importline_re.search, + self.appendFileheadLine], + ["FILEHEAD", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch], + ["FILEHEAD", "DEFCLASS_MULTI", + self.multiline_defclass_start_re.search, self.resetCommentSearch], + ["FILEHEAD", "DEFCLASS_BODY", self.catchall, self.appendFileheadLine], + + # DEFCLASS + + # single line comments + ["DEFCLASS", "DEFCLASS_BODY", + self.single_comment_re.search, self.appendCommentLine], + ["DEFCLASS", "DEFCLASS_BODY", + self.double_comment_re.search, self.appendCommentLine], + + # multiline comments + ["DEFCLASS", "COMMENT_SINGLE", + self.start_single_comment_re.search, self.appendCommentLine], + ["COMMENT_SINGLE", "DEFCLASS_BODY", + self.end_single_comment_re.search, self.appendCommentLine], + ["COMMENT_SINGLE", "COMMENT_SINGLE", + self.catchall, self.appendCommentLine], + ["DEFCLASS", "COMMENT_DOUBLE", + self.start_double_comment_re.search, self.appendCommentLine], + ["COMMENT_DOUBLE", "DEFCLASS_BODY", + self.end_double_comment_re.search, self.appendCommentLine], + ["COMMENT_DOUBLE", "COMMENT_DOUBLE", + self.catchall, self.appendCommentLine], + + # other lines + ["DEFCLASS", "DEFCLASS", self.empty_re.search, self.appendDefclassLine], + ["DEFCLASS", "DEFCLASS", self.defclass_re.search, self.resetCommentSearch], + ["DEFCLASS", "DEFCLASS_MULTI", + self.multiline_defclass_start_re.search, self.resetCommentSearch], + ["DEFCLASS", "DEFCLASS_BODY", self.catchall, self.stopCommentSearch], + + # DEFCLASS_BODY + + ["DEFCLASS_BODY", "DEFCLASS", + self.defclass_re.search, self.startCommentSearch], + ["DEFCLASS_BODY", "DEFCLASS_MULTI", + self.multiline_defclass_start_re.search, self.startCommentSearch], + ["DEFCLASS_BODY", "DEFCLASS_BODY", self.catchall, self.appendNormalLine], + + # DEFCLASS_MULTI + ["DEFCLASS_MULTI", "DEFCLASS", + self.multiline_defclass_end_re.search, self.appendDefclassLine], + ["DEFCLASS_MULTI", "DEFCLASS_MULTI", + self.catchall, self.appendDefclassLine], + ] + + self.fsm = FSM("FILEHEAD", transitions) + self.outstream = sys.stdout + + self.output = [] + self.comment = [] + self.filehead = [] + self.defclass = [] + self.indent = "" + + def __closeComment(self): + """Appends any open comment block and triggering block to the output.""" + + if args.autobrief: + if len(self.comment) == 1 \ + or (len(self.comment) > 2 and self.comment[1].strip() == ''): + self.comment[0] = self.__docstringSummaryToBrief( + self.comment[0]) + + if self.comment: + block = self.makeCommentBlock() + self.output.extend(block) + + if self.defclass: + self.output.extend(self.defclass) + + def __docstringSummaryToBrief(self, line): + """Adds \\brief to the docstrings summary line. + + A \\brief is prepended, provided no other doxygen command is at the + start of the line. + """ + stripped = line.strip() + if stripped and not stripped[0] in ('@', '\\'): + return "\\brief " + line + else: + return line + + def __flushBuffer(self): + """Flushes the current outputbuffer to the outstream.""" + if self.output: + try: + if args.debug: + print("# OUTPUT: ", self.output, file=sys.stderr) + print("\n".join(self.output), file=self.outstream) + self.outstream.flush() + except IOError: + # Fix for FS#33. Catches "broken pipe" when doxygen closes + # stdout prematurely upon usage of INPUT_FILTER, INLINE_SOURCES + # and FILTER_SOURCE_FILES. + pass + self.output = [] + + def catchall(self, input): + """The catchall-condition, always returns true.""" + return True + + def resetCommentSearch(self, match): + """Restarts a new comment search for a different triggering line. + + Closes the current commentblock and starts a new comment search. + """ + if args.debug: + print("# CALLBACK: resetCommentSearch", file=sys.stderr) + self.__closeComment() + self.startCommentSearch(match) + + def startCommentSearch(self, match): + """Starts a new comment search. + + Saves the triggering line, resets the current comment and saves + the current indentation. + """ + if args.debug: + print("# CALLBACK: startCommentSearch", file=sys.stderr) + self.defclass = [self.fsm.current_input] + self.comment = [] + self.indent = match.group(1) + + def stopCommentSearch(self, match): + """Stops a comment search. + + Closes the current commentblock, resets the triggering line and + appends the current line to the output. + """ + if args.debug: + print("# CALLBACK: stopCommentSearch", file=sys.stderr) + self.__closeComment() + + self.defclass = [] + self.output.append(self.fsm.current_input) + + def appendFileheadLine(self, match): + """Appends a line in the FILEHEAD state. + + Closes the open comment block, resets it and appends the current line. + """ + if args.debug: + print("# CALLBACK: appendFileheadLine", file=sys.stderr) + self.__closeComment() + self.comment = [] + self.output.append(self.fsm.current_input) + + def appendCommentLine(self, match): + """Appends a comment line. + + The comment delimiter is removed from multiline start and ends as + well as singleline comments. + """ + if args.debug: + print("# CALLBACK: appendCommentLine", file=sys.stderr) + (from_state, to_state, condition, callback) = self.fsm.current_transition + + # single line comment + if (from_state == "DEFCLASS" and to_state == "DEFCLASS_BODY") \ + or (from_state == "FILEHEAD" and to_state == "FILEHEAD"): + # remove comment delimiter from begin and end of the line + activeCommentDelim = match.group(1) + line = self.fsm.current_input + self.comment.append(line[line.find( + activeCommentDelim) + len(activeCommentDelim):line.rfind(activeCommentDelim)]) + + if (to_state == "DEFCLASS_BODY"): + self.__closeComment() + self.defclass = [] + # multiline start + elif from_state == "DEFCLASS" or from_state == "FILEHEAD": + # remove comment delimiter from begin of the line + activeCommentDelim = match.group(1) + line = self.fsm.current_input + self.comment.append( + line[line.find(activeCommentDelim) + len(activeCommentDelim):]) + # multiline end + elif to_state == "DEFCLASS_BODY" or to_state == "FILEHEAD": + # remove comment delimiter from end of the line + activeCommentDelim = match.group(1) + line = self.fsm.current_input + self.comment.append(line[0:line.rfind(activeCommentDelim)]) + if (to_state == "DEFCLASS_BODY"): + self.__closeComment() + self.defclass = [] + # in multiline comment + else: + # just append the comment line + self.comment.append(self.fsm.current_input) + + def appendNormalLine(self, match): + """Appends a line to the output.""" + if args.debug: + print("# CALLBACK: appendNormalLine", file=sys.stderr) + self.output.append(self.fsm.current_input) + + def appendDefclassLine(self, match): + """Appends a line to the triggering block.""" + if args.debug: + print("# CALLBACK: appendDefclassLine", file=sys.stderr) + self.defclass.append(self.fsm.current_input) + + def makeCommentBlock(self): + """Indents the current comment block with respect to the current + indentation level. + + @returns a list of indented comment lines + """ + doxyStart = "##" + commentLines = self.comment + + commentLines = ["%s# %s" % (self.indent, x) for x in commentLines] + l = [self.indent + doxyStart] + l.extend(commentLines) + + return l + + def parse(self, input): + """Parses a python file given as input string and returns the doxygen- + compatible representation. + + @param input the python code to parse + @returns the modified python code + """ + lines = input.split("\n") + + for line in lines: + self.fsm.makeTransition(line) + + if self.fsm.current_state == "DEFCLASS": + self.__closeComment() + + return "\n".join(self.output) + + def parseFile(self, filename): + """Parses a python file given as input string and returns the doxygen- + compatible representation. + + @param input the python code to parse + @returns the modified python code + """ + f = open(filename, 'r') + + for line in f: + self.parseLine(line.rstrip('\r\n')) + if self.fsm.current_state == "DEFCLASS": + self.__closeComment() + self.__flushBuffer() + f.close() + + def parseLine(self, line): + """Parse one line of python and flush the resulting output to the + outstream. + + @param line the python code line to parse + """ + self.fsm.makeTransition(line) + self.__flushBuffer() + + +def argParse(): + """Parses commandline args.""" + parser = ArgumentParser(prog=__applicationName__) + + parser.add_argument("--version", action="version", + version="%(prog)s " + __version__ + ) + parser.add_argument("--autobrief", action="store_true", + help="use the docstring summary line as \\brief description" + ) + parser.add_argument("--debug", action="store_true", + help="enable debug output on stderr" + ) + parser.add_argument("filename", metavar="FILENAME") + + return parser.parse_args() + + +def main(): + """Starts the parser on the file given by the filename as the first + argument on the commandline. + """ + global args + args = argParse() + fsm = Doxypy() + fsm.parseFile(args.filename) + + +if __name__ == "__main__": + main() diff --git a/docs/doxygen/other/group_defs.dox b/docs/doxygen/other/group_defs.dox index 340318f..94aff62 100644 --- a/docs/doxygen/other/group_defs.dox +++ b/docs/doxygen/other/group_defs.dox @@ -4,4 +4,3 @@ * module are listed here or in the subcategories below. * */ - diff --git a/docs/doxygen/pydoc_macros.h b/docs/doxygen/pydoc_macros.h new file mode 100644 index 0000000..fb3954b --- /dev/null +++ b/docs/doxygen/pydoc_macros.h @@ -0,0 +1,19 @@ +#ifndef PYDOC_MACROS_H +#define PYDOC_MACROS_H + +#define __EXPAND(x) x +#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT +#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1)) +#define __CAT1(a, b) a##b +#define __CAT2(a, b) __CAT1(a, b) +#define __DOC1(n1) __doc_##n1 +#define __DOC2(n1, n2) __doc_##n1##_##n2 +#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3 +#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4 +#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5 +#define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6 +#define __DOC7(n1, n2, n3, n4, n5, n6, n7) \ + __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7 +#define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__)) + +#endif // PYDOC_MACROS_H diff --git a/docs/doxygen/swig_doc.pyc b/docs/doxygen/swig_doc.pyc deleted file mode 100644 index 6c03427d8147862c13700bc69408acaf31e6ae94..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9589 zcmc&)U2h!Mc|J3{Tya;VC{q$C*|IgJtZ41jV&WQ2%cia^%1)frtT8l7Wm9!MoS7wu z+}&BtnYBnrya|Qg6uC@-q6mTj1&SiQOYRC31^OG}U(n0us=aTY=Q+E}rJNM5!88PC zc;l{}8bH4HKKmW!3*o=N=@b_&z_Q#$vi7_p_J(KvR>zNh`uI!tZuX4q7 zE2dX9t*YtGnAVKx)l92qdgn~*oaxP))~xY3rmB8*)2f?Z!?YTxoiR@_{dtqrRG2f# zIg`$t4tmeB=Gd6;v@V#WKCW#{Y8Om$UV|>0WKM&s999avPmv!o~tIgtim<*zha(Z|IeHB1(Ph9ecu#+>m|#k zb={;dn$$CUFPUQye%0((Oz}-GyIA-1sDe34@M7rYwlMH-c<4a(g#u#d8x zc9O?;vS4%T{ci+Wzf6lx6sJKa>!x=a?;fP_uuOxY&H6iQ!Qimm&HE>#28Y2xuiG9( zjh~Gsfmhb?x`)y4_mlKs^0nTLtSuX(w)cuG?I-2j_;c8g z%PjAgv*QX-*!1x+8UN>4x+trt+Ky}*1K5V`6+G*BY=GAjWA+4f$l?f+gh)N}1O?yR zK-FfGy*EmZZD)MDB}la$bhA>^@8wzF)pId2&8?c1wuuU(mwgMR)uAm`?QT@0t2_N+ z5hYo^+RKwN&%3KT#q0g_5x!Q*C%nYX)rnxi#B-QpaMNdVbi!6Vjp zP$ZzOk1~3BASlUd0?6Q{2=yAi}B{RuO!@K5OQQZy;xkJF1u`6=SyzAin47 z>!W&@0yJ$l3CN8m#UM4wpxd^L`f(bvMcjHX>e^JLv?%f-ynqG?P9t_B+wP^k?X(EF z56}r^$U^R=$xW3k#a$~rXJy*csE$-oIOp1#ZAWdfoC#s)wlt&N7Ln~XRB6~v@kvy-X&g}|Tw@v*DCusVgxBzWLbgh0 zpsC>krlk%Fy0uEpmqH1;Z^WqA54n98&*#N1obngOE=)^c%foIe)c!^!Q{H^mN!GXV z`W#3mjH^^Ozzn#}?+MAM0g`J%EU-<_#fdirC91ayc2E{g$|t66R+gtbO^xF+nY45QAN4Tie-a9j&fxLR<4PI!h5sKT$C zX5^(lz>*_z^tQA9D8}1{-R(#MNm@pTcai|6F)p$JBOx2)Y#eznc&r^*xU_Kc5&IjB zCf#~T1=C#Zso@eH!?Qto8SUd0PIckvXfQw=7QTj}3k4>A0&Y_}IXx+*zMVL~lANs^ zNhLx}vS*q|`(4yqS`$;PAvRj@YTiXXF|7-!B}~`xC>Vekc3kWSCGpL{>lEv~2e%|q zhy@aqj3Gw|n{Vk8<@j*PY(cwV{G1dwp@g7q)ZDsV5<=>L>H7%2Q@2G@pzD(EssgRqYkM7 zsIa@Ja0=tq!PI&0re_I-TOFh#!y;|2OoIHCSA)ll5sreD$CJvV74hWp6h@9>@{7=# z$mOT_085b>(w@B_MRQuFyb2;i>we7?Usg&4p^Un|#}EecgLI&f)V``Ia!L(y`kITV zU{#=fXh0|YvX&ho$}|va6TGf*E(~?iXV59~Uhrr)i+4HWkQr4(H4h$T z-EOd*1~8CC6qjk@G@Pu)JW%;SMHmB?*T?ptnWUX)*hN^2SQcyCVXx+^uZjkNh*ZFQ z$e2W3SbhgAZGcmqY%E6J6=i*7azWnj9^&P~3PPPfpF~9#Z6jmk8cA?_O4_#_wNo`% z7D0Hqo56sM*Pcv(N`$Z6Nk7QS+ZI!IhsZ>&$abp*NNTZ4PQEnta8GE4HvU~2@Otdf1f4U#6M!`O^#rw zDG0R1O4;6UmCc%vXjg+4ky<+)3)j$cwtCD3l~ReG0lqB53UhuA_K}{Ce`{XDyN0m3 zhHol&Ok2r^K$pF|jsiSA6wIk0B0T5~REtXERh<2^fA<-c=!}x}DF}T&-Qd9gcje%s zr?g`lQok#(iYTGgfka$Uk)9#wW|mqKl|hd1t;j&KJL(BE(2a3upYk(*0(mNl40$-= zGCm5NW1j7X>a$DnnH~v`T&XQAd>>WPI&)dTV^Y9J!22f@XvAHBdv)XxoaJU{CVLL| z4BLJRoP?!MSAEddn5uFV5B`0_I1`V|`dHR~|C}j4P$$&@)XOmRu6g+gR=-OBkH8kS zbQ9?1d7S#;2L``ea1VR4BeyaNs>xo7%pKIh#w+uj(t`iRH_)~@^TTPxnYA?uWn`N+ znlxLXY!JIBo{%72X4G%T2n%-d;?Si~v;pDUd~uu%m}0(tX2Z?~Q7qX`KQGb*#{$fc zZK$dLfWw%Ch=8-TO&2k(ups6*F#J90T)L0Ad=9qghPMRE^t=~%u`Dy%C*XJ)_J$yU zbRbDNb?51e=1J^ENzu(KWG76Nv_kY~aCs|0#PNy?95#yG@pyQAH~#SMXo|^rSi)r( zZ>Cr^O+a9nBj5yp`p5AEIMHzvZ1tB-6(|B$_UdN8VTyk<9u9Xsb05}pufYKvKdUX( z&67Ga14tAL($LPLRj?a1%#mW(KvZH^^XMVWkLJUn5f3uyZfu0tG$I`ynbT1b6<$ny zoXaPkIrMVTjPshmE}|i7h0i;?Bbc6GxXEBPl&8Fdl1^!mql^!r)GClx*pGH7WK^hR z@h(nqb@scFZw}+#58RoR6yP2^zRee*s?)IWGD-rsj-@nk%80u|Wd}D_%ePO=QAt83 zUrH@Rs-|l5AYA=faE(l->=5C@bQ%&;WqwPF;wX{T{5jWt4KFiO!>Jfj9seaM%tgQA zU03O{cge5%3zZt2`xQj&QXq1U&pICa3%q`f0x2g2yb)3p-Os28tg$Z>N#n@TLOhZq z2G0>8DNL9a46;gn(>|$rZK@Y<8VhrD=T%{lmWE+ zp#J}e>B(W(7eIG-gDktvMH}nLx^*LhuyJ#c##twegL~7_`r0^#B%=jTOH8k=5n$8S z+!=Ez$Ghw*9iW@bFiJ&oC~o8lIu1K3GtJWm;V8dD(B+5NF`hiw34*;F$J|3J{)d+-xN`0(RV2tX*x zN*_BH;f~$`JpmAv2mlx|{4)zQ+v9qnTg1?4qn{4*S=%>e>)~IRD(9V(>Hja9{&L>z zT`+sPwcNXSYE_Q@mb`Kg^=;bfDYe(F~_liO*ta%!iLId(c;dA!q4{re9;^|xp+ zmt1~El_|kV7-xr<1@mNqz*F`Hn~aaE6q5(7+LR;RuzwBBYplN32wn?5Vv4Sd;77={ zBb=e))GCXkE=~Y&@5}TVX}B(!r!SU~b!hlI{lg&Mjru!0&CHpFk54kXIg*z?51Pvi zhZtzVLPa{6lQXO~WGz073SM9by3A5ABBR0swuZCJC+1b>aJEd=)+}xIXNOK=9*+JE z)4K>s9o&(CjS~LUB>a#adAl$o z>|Nf^xsiwNln=CHpNeB1@$gT_3gJf_M^}KWzM{mrQn~BGAB4dpZ$ZzXuu0JhBfl6t zPMb8PE;b^mJd#4R$38UrutVrKv=D3Coi8ee(ZPh6Y+(~|n#;RS`-Ui(L`l}UAo&?? zchaC6IkBvxeURl=`HALHvsL@PW>+$jT6v@K&G*?xQCG+0Y5K{DoC3RFq0P4NGP89= zQx_{ZKE~g9uj0MrFM5}}7vY+2c;|iAEF#uo+o-O(a&l-GP^47M*(iF?(xe9w#_ZDtaN*h1-z9nl8^o3DDi{ z*&Tr%AQYKVvGa+bxQUzWBq|c87qb{R{tuO~%~=%rC^1za^fBsAkfPEV+yhEnUw1dj zZ(YX{mMhl$tA5j4^6S<5%+jT$=j!L{^YwGMjDB%(d2yk>QolO)jX7OTQ{?mB+__q1 Gq4GcI#$nI^ diff --git a/docs/doxygen/swig_doc.py b/docs/doxygen/update_pydoc.py similarity index 51% rename from docs/doxygen/swig_doc.py rename to docs/doxygen/update_pydoc.py index 6b74c01..b65e168 100644 --- a/docs/doxygen/swig_doc.py +++ b/docs/doxygen/update_pydoc.py @@ -1,42 +1,38 @@ # # Copyright 2010-2012 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gnuradio # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. # """ -Creates the swig_doc.i SWIG interface file. -Execute using: python swig_doc.py xml_path outputfilename +Updates the *pydoc_h files for a module +Execute using: python update_pydoc.py xml_path outputfilename -The file instructs SWIG to transfer the doxygen comments into the +The file instructs Pybind11 to transfer the doxygen comments into the python docstrings. """ -from __future__ import unicode_literals -import sys, time +import os +import sys +import time +import glob +import re +import json +from argparse import ArgumentParser from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile from doxyxml import DoxyOther, base + def py_name(name): bits = name.split('_') return '_'.join(bits[1:]) + def make_name(name): bits = name.split('_') return bits[0] + '_make_' + '_'.join(bits[1:]) @@ -61,6 +57,7 @@ def includes(cls, item): is_a_block = di.has_member(friendname, DoxyFunction) return is_a_block + class Block2(object): """ Checks if doxyxml produced objects correspond to a new style @@ -74,7 +71,8 @@ def includes(cls, item): # Check for a parsing error. if item.error(): return False - is_a_block2 = item.has_member('make', DoxyFunction) and item.has_member('sptr', DoxyOther) + is_a_block2 = item.has_member( + 'make', DoxyFunction) and item.has_member('sptr', DoxyOther) return is_a_block2 @@ -86,6 +84,7 @@ def utoascii(text): return '' out = text.encode('ascii', 'replace') # swig will require us to replace blackslash with 4 backslashes + # TODO: evaluate what this should be for pybind11 out = out.replace(b'\\', b'\\\\\\\\') out = out.replace(b'"', b'\\"').decode('ascii') return str(out) @@ -104,6 +103,7 @@ def combine_descriptions(obj): description.append(dd) return utoascii('\n\n'.join(description)).strip() + def format_params(parameteritems): output = ['Args:'] template = ' {0} : {1}' @@ -111,10 +111,13 @@ def format_params(parameteritems): output.append(template.format(pi.name, pi.description)) return '\n'.join(output) + entry_templ = '%feature("docstring") {name} "{docstring}"' + + def make_entry(obj, name=None, templ="{description}", description=None, params=[]): """ - Create a docstring entry for a swig interface file. + Create a docstring key/value pair, where the key is the object name. obj - a doxyxml object from which documentation will be extracted. name - the name of the C object (defaults to obj.name()) @@ -124,7 +127,9 @@ def make_entry(obj, name=None, templ="{description}", description=None, params=[ used as the description instead of extracting it from obj. """ if name is None: - name=obj.name() + name = obj.name() + if hasattr(obj, '_parse_data') and hasattr(obj._parse_data, 'definition'): + name = obj._parse_data.definition.split(' ')[-1] if "operator " in name: return '' if description is None: @@ -133,56 +138,28 @@ def make_entry(obj, name=None, templ="{description}", description=None, params=[ description += '\n\n' description += utoascii(format_params(params)) docstring = templ.format(description=description) - if not docstring: - return '' - return entry_templ.format( - name=name, - docstring=docstring, - ) - - -def make_func_entry(func, name=None, description=None, params=None): - """ - Create a function docstring entry for a swig interface file. - func - a doxyxml object from which documentation will be extracted. - name - the name of the C object (defaults to func.name()) - description - if this optional variable is set then it's value is - used as the description instead of extracting it from func. - params - a parameter list that overrides using func.params. - """ - #if params is None: - # params = func.params - #params = [prm.declname for prm in params] - #if params: - # sig = "Params: (%s)" % ", ".join(params) - #else: - # sig = "Params: (NONE)" - #templ = "{description}\n\n" + sig - #return make_entry(func, name=name, templ=utoascii(templ), - # description=description) - return make_entry(func, name=name, description=description, params=params) + return {name: docstring} def make_class_entry(klass, description=None, ignored_methods=[], params=None): """ - Create a class docstring for a swig interface file. + Create a class docstring key/value pair. """ if params is None: params = klass.params - output = [] - output.append(make_entry(klass, description=description, params=params)) + output = {} + output.update(make_entry(klass, description=description, params=params)) for func in klass.in_category(DoxyFunction): if func.name() not in ignored_methods: name = klass.name() + '::' + func.name() - output.append(make_func_entry(func, name=name)) - return "\n\n".join(output) + output.update(make_entry(func, name=name)) + return output def make_block_entry(di, block): """ - Create class and function docstrings of a gnuradio block for a - swig interface file. + Create class and function docstrings of a gnuradio block """ descriptions = [] # Get the documentation associated with the class. @@ -207,48 +184,42 @@ def make_block_entry(di, block): super_description = "\n\n".join(descriptions) # Associate the combined description with the class and # the make function. - output = [] - output.append(make_class_entry(block, description=super_description)) - output.append(make_func_entry(make_func, description=super_description, - params=block.params)) - return "\n\n".join(output) + output = {} + output.update(make_class_entry(block, description=super_description)) + output.update(make_entry(make_func, description=super_description, + params=block.params)) + return output + def make_block2_entry(di, block): """ - Create class and function docstrings of a new style gnuradio block for a - swig interface file. + Create class and function docstrings of a new style gnuradio block """ - descriptions = [] # For new style blocks all the relevant documentation should be # associated with the 'make' method. class_description = combine_descriptions(block) make_func = block.get_member('make', DoxyFunction) make_description = combine_descriptions(make_func) - description = class_description + "\n\nConstructor Specific Documentation:\n\n" + make_description + description = class_description + \ + "\n\nConstructor Specific Documentation:\n\n" + make_description # Associate the combined description with the class and # the make function. - output = [] - output.append(make_class_entry( - block, description=description, - ignored_methods=['make'], params=make_func.params)) + output = {} + output.update(make_class_entry( + block, description=description, + ignored_methods=['make'], params=make_func.params)) makename = block.name() + '::make' - output.append(make_func_entry( - make_func, name=makename, description=description, - params=make_func.params)) - return "\n\n".join(output) + output.update(make_entry( + make_func, name=makename, description=description, + params=make_func.params)) + return output -def make_swig_interface_file(di, swigdocfilename, custom_output=None): - output = [""" -/* - * This file was automatically generated using swig_doc.py. - * - * Any changes to it will be lost next time it is regenerated. - */ -"""] +def get_docstrings_dict(di, custom_output=None): - if custom_output is not None: - output.append(custom_output) + output = {} + if custom_output: + output.update(custom_output) # Create docstrings for the blocks. blocks = di.in_category(Block) @@ -261,21 +232,23 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None): # Don't want to risk writing to output twice. if make_func.name() not in make_funcs: make_funcs.add(make_func.name()) - output.append(make_block_entry(di, block)) + output.update(make_block_entry(di, block)) except block.ParsingError: - sys.stderr.write('Parsing error for block {0}\n'.format(block.name())) + sys.stderr.write( + 'Parsing error for block {0}\n'.format(block.name())) raise for block in blocks2: try: make_func = block.get_member('make', DoxyFunction) - make_func_name = block.name() +'::make' + make_func_name = block.name() + '::make' # Don't want to risk writing to output twice. if make_func_name not in make_funcs: make_funcs.add(make_func_name) - output.append(make_block2_entry(di, block)) + output.update(make_block2_entry(di, block)) except block.ParsingError: - sys.stderr.write('Parsing error for block {0}\n'.format(block.name())) + sys.stderr.write( + 'Parsing error for block {0}\n'.format(block.name())) raise # Create docstrings for functions @@ -284,9 +257,10 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None): if f.name() not in make_funcs and not f.name().startswith('std::')] for f in funcs: try: - output.append(make_func_entry(f)) + output.update(make_entry(f)) except f.ParsingError: - sys.stderr.write('Parsing error for function {0}\n'.format(f.name())) + sys.stderr.write( + 'Parsing error for function {0}\n'.format(f.name())) # Create docstrings for classes block_names = [block.name() for block in blocks] @@ -295,37 +269,104 @@ def make_swig_interface_file(di, swigdocfilename, custom_output=None): if k.name() not in block_names and not k.name().startswith('std::')] for k in klasses: try: - output.append(make_class_entry(k)) + output.update(make_class_entry(k)) except k.ParsingError: sys.stderr.write('Parsing error for class {0}\n'.format(k.name())) # Docstrings are not created for anything that is not a function or a class. # If this excludes anything important please add it here. - output = "\n\n".join(output) + return output + + +def sub_docstring_in_pydoc_h(pydoc_files, docstrings_dict, output_dir, filter_str=None): + if filter_str: + docstrings_dict = { + k: v for k, v in docstrings_dict.items() if k.startswith(filter_str)} + + with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file: + + for pydoc_file in pydoc_files: + if filter_str: + filter_str2 = "::".join((filter_str, os.path.split( + pydoc_file)[-1].split('_pydoc_template.h')[0])) + docstrings_dict2 = { + k: v for k, v in docstrings_dict.items() if k.startswith(filter_str2)} + else: + docstrings_dict2 = docstrings_dict + + file_in = open(pydoc_file, 'r').read() + for key, value in docstrings_dict2.items(): + file_in_tmp = file_in + try: + doc_key = key.split("::") + # if 'gr' in doc_key: + # doc_key.remove('gr') + doc_key = '_'.join(doc_key) + regexp = r'(__doc_{} =\sR\"doc\()[^)]*(\)doc\")'.format( + doc_key) + regexp = re.compile(regexp, re.MULTILINE) + + (file_in, nsubs) = regexp.subn( + r'\1' + value + r'\2', file_in, count=1) + if nsubs == 1: + status_file.write("PASS: " + pydoc_file + "\n") + except KeyboardInterrupt: + raise KeyboardInterrupt + except: # be permissive, TODO log, but just leave the docstring blank + status_file.write("FAIL: " + pydoc_file + "\n") + file_in = file_in_tmp + + output_pathname = os.path.join(output_dir, os.path.basename( + pydoc_file).replace('_template.h', '.h')) + with open(output_pathname, 'w') as file_out: + file_out.write(file_in) + + +def copy_docstring_templates(pydoc_files, output_dir): + with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file: + for pydoc_file in pydoc_files: + file_in = open(pydoc_file, 'r').read() + output_pathname = os.path.join(output_dir, os.path.basename( + pydoc_file).replace('_template.h', '.h')) + with open(output_pathname, 'w') as file_out: + file_out.write(file_in) + status_file.write("DONE") + + +def argParse(): + """Parses commandline args.""" + desc = 'Scrape the doxygen generated xml for docstrings to insert into python bindings' + parser = ArgumentParser(description=desc) + + parser.add_argument("function", help="Operation to perform on docstrings", choices=[ + "scrape", "sub", "copy"]) + + parser.add_argument("--xml_path") + parser.add_argument("--bindings_dir") + parser.add_argument("--output_dir") + parser.add_argument("--json_path") + parser.add_argument("--filter", default=None) + + return parser.parse_args() - swig_doc = open(swigdocfilename, 'w') - swig_doc.write(output) - swig_doc.close() if __name__ == "__main__": # Parse command line options and set up doxyxml. - err_msg = "Execute using: python swig_doc.py xml_path outputfilename" - if len(sys.argv) != 3: - raise Exception(err_msg) - xml_path = sys.argv[1] - swigdocfilename = sys.argv[2] - di = DoxyIndex(xml_path) - - # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined! - # This is presumably a bug in SWIG. - #msg_q = di.get_member(u'gr_msg_queue', DoxyClass) - #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction) - #delete_head = msg_q.get_member(u'delete_head', DoxyFunction) - output = [] - #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail')) - #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head')) - custom_output = "\n\n".join(output) - - # Generate the docstrings interface file. - make_swig_interface_file(di, swigdocfilename, custom_output=custom_output) + args = argParse() + if args.function.lower() == 'scrape': + di = DoxyIndex(args.xml_path) + docstrings_dict = get_docstrings_dict(di) + with open(args.json_path, 'w') as fp: + json.dump(docstrings_dict, fp) + elif args.function.lower() == 'sub': + with open(args.json_path, 'r') as fp: + docstrings_dict = json.load(fp) + pydoc_files = glob.glob(os.path.join( + args.bindings_dir, '*_pydoc_template.h')) + sub_docstring_in_pydoc_h( + pydoc_files, docstrings_dict, args.output_dir, args.filter) + elif args.function.lower() == 'copy': + pydoc_files = glob.glob(os.path.join( + args.bindings_dir, '*_pydoc_template.h')) + copy_docstring_templates(pydoc_files, args.output_dir) diff --git a/grc/CMakeLists.txt b/grc/CMakeLists.txt index f011b98..62fde76 100644 --- a/grc/CMakeLists.txt +++ b/grc/CMakeLists.txt @@ -1,21 +1,10 @@ # Copyright 2011 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. install(FILES grverilog_verilog_axi_xx.block.yml DESTINATION share/gnuradio/grc/blocks diff --git a/include/verilog/CMakeLists.txt b/include/gnuradio/verilog/CMakeLists.txt similarity index 100% rename from include/verilog/CMakeLists.txt rename to include/gnuradio/verilog/CMakeLists.txt diff --git a/include/verilog/Shared_lib.h b/include/gnuradio/verilog/Shared_lib.h similarity index 100% rename from include/verilog/Shared_lib.h rename to include/gnuradio/verilog/Shared_lib.h diff --git a/include/verilog/Shell_cmd.h b/include/gnuradio/verilog/Shell_cmd.h similarity index 100% rename from include/verilog/Shell_cmd.h rename to include/gnuradio/verilog/Shell_cmd.h diff --git a/include/verilog/api.h b/include/gnuradio/verilog/api.h similarity index 100% rename from include/verilog/api.h rename to include/gnuradio/verilog/api.h diff --git a/include/verilog/constants.h b/include/gnuradio/verilog/constants.h similarity index 100% rename from include/verilog/constants.h rename to include/gnuradio/verilog/constants.h diff --git a/include/verilog/gr_verilog_iotype.h b/include/gnuradio/verilog/gr_verilog_iotype.h similarity index 100% rename from include/verilog/gr_verilog_iotype.h rename to include/gnuradio/verilog/gr_verilog_iotype.h diff --git a/include/verilog/verilog_axi_bb.h b/include/gnuradio/verilog/verilog_axi_bb.h similarity index 100% rename from include/verilog/verilog_axi_bb.h rename to include/gnuradio/verilog/verilog_axi_bb.h diff --git a/include/verilog/verilog_axi_cc.h b/include/gnuradio/verilog/verilog_axi_cc.h similarity index 100% rename from include/verilog/verilog_axi_cc.h rename to include/gnuradio/verilog/verilog_axi_cc.h diff --git a/include/verilog/verilog_axi_ff.h b/include/gnuradio/verilog/verilog_axi_ff.h similarity index 100% rename from include/verilog/verilog_axi_ff.h rename to include/gnuradio/verilog/verilog_axi_ff.h diff --git a/include/verilog/verilog_axi_ii.h b/include/gnuradio/verilog/verilog_axi_ii.h similarity index 100% rename from include/verilog/verilog_axi_ii.h rename to include/gnuradio/verilog/verilog_axi_ii.h diff --git a/include/verilog/verilog_axi_ss.h b/include/gnuradio/verilog/verilog_axi_ss.h similarity index 100% rename from include/verilog/verilog_axi_ss.h rename to include/gnuradio/verilog/verilog_axi_ss.h diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 4893d81..ea451b2 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -1,30 +1,16 @@ # Copyright 2011,2012,2016,2018,2019 Free Software Foundation, Inc. # -# This file is part of GNU Radio +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog # -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. +# SPDX-License-Identifier: GPL-3.0-or-later # -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. ######################################################################## # Setup library ######################################################################## include(GrPlatform) #define LIB_SUFFIX -include_directories(${Boost_INCLUDE_DIR}) -link_directories(${Boost_LIBRARY_DIRS}) - list(APPEND verilog_sources ${CMAKE_CURRENT_BINARY_DIR}/constants.cc verilog_axi_ii_impl.cc @@ -44,7 +30,7 @@ if(NOT verilog_sources) endif(NOT verilog_sources) add_library(gnuradio-verilog SHARED ${verilog_sources}) -target_link_libraries(gnuradio-verilog ${Boost_LIBRARIES} gnuradio::gnuradio-runtime) +target_link_libraries(gnuradio-verilog gnuradio::gnuradio-runtime) target_include_directories(gnuradio-verilog PUBLIC $ PUBLIC $ @@ -69,30 +55,6 @@ GR_LIBRARY_FOO(gnuradio-verilog RUNTIME_COMPONENT "verilog_runtime" DEVEL_COMPON message(STATUS "Using install prefix: ${CMAKE_INSTALL_PREFIX}") message(STATUS "Building for version: ${VERSION} / ${LIBVER}") -######################################################################## -# Build and register unit test -######################################################################## -include(GrTest) - -include_directories(${CPPUNIT_INCLUDE_DIRS}) - -list(APPEND test_verilog_sources - ${CMAKE_CURRENT_SOURCE_DIR}/test_verilog.cc - ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog.cc -) - -add_executable(test-verilog ${test_verilog_sources}) - -target_link_libraries( - test-verilog - ${GNURADIO_RUNTIME_LIBRARIES} - ${Boost_LIBRARIES} - ${CPPUNIT_LIBRARIES} - gnuradio-verilog -) - -GR_ADD_TEST(test_verilog test-verilog) - ######################################################################## # Configure templates diff --git a/lib/qa_verilog.cc b/lib/qa_verilog.cc deleted file mode 100644 index d86c3de..0000000 --- a/lib/qa_verilog.cc +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2012 Free Software Foundation, Inc. - * - * This file is part of GNU Radio - * - * GNU Radio is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * GNU Radio is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU Radio; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -/* - * This class gathers together all the test cases for the gr-filter - * directory into a single test suite. As you create new test cases, - * add them here. - */ - -#include "qa_verilog.h" - -CppUnit::TestSuite * -qa_verilog::suite() -{ - CppUnit::TestSuite *s = new CppUnit::TestSuite("verilog"); - - return s; -} diff --git a/lib/qa_verilog.h b/lib/qa_verilog.h deleted file mode 100644 index ce27264..0000000 --- a/lib/qa_verilog.h +++ /dev/null @@ -1,38 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2012 Free Software Foundation, Inc. - * - * This file is part of GNU Radio - * - * GNU Radio is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * GNU Radio is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU Radio; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifndef _QA_VERILOG_H_ -#define _QA_VERILOG_H_ - -#include -#include - -//! collect all the tests for the gr-filter directory - -class __GR_ATTR_EXPORT qa_verilog -{ - public: - //! return suite of tests for all of gr-filter directory - static CppUnit::TestSuite *suite(); -}; - -#endif /* _QA_VERILOG_H_ */ diff --git a/lib/test_verilog.cc b/lib/test_verilog.cc deleted file mode 100644 index 638bda9..0000000 --- a/lib/test_verilog.cc +++ /dev/null @@ -1,48 +0,0 @@ -/* -*- c++ -*- */ -/* - * Copyright 2012 Free Software Foundation, Inc. - * - * This file is part of GNU Radio - * - * GNU Radio is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * GNU Radio is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU Radio; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ - -#ifdef HAVE_CONFIG_H -#include "config.h" -#endif - -#include -#include - -#include -#include "qa_verilog.h" -#include -#include - -int -main (int argc, char **argv) -{ - CppUnit::TextTestRunner runner; - std::ofstream xmlfile(get_unittest_path("verilog.xml").c_str()); - CppUnit::XmlOutputter *xmlout = new CppUnit::XmlOutputter(&runner.result(), xmlfile); - - runner.addTest(qa_verilog::suite()); - runner.setOutputter(xmlout); - - bool was_successful = runner.run("", false); - - return was_successful ? 0 : 1; -} diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt deleted file mode 100644 index 7401fbe..0000000 --- a/python/CMakeLists.txt +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -######################################################################## -# Include python install macros -######################################################################## -include(GrPython) -if(NOT PYTHONINTERP_FOUND) - return() -endif() - -######################################################################## -# Install python sources -######################################################################## -GR_PYTHON_INSTALL( - FILES - __init__.py - DESTINATION ${GR_PYTHON_DIR}/verilog -) - -######################################################################## -# Handle the unit tests -######################################################################## -include(GrTest) - -set(GR_TEST_TARGET_DEPS gnuradio-verilog) -set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig) -GR_ADD_TEST(qa_verilog_axi_ii ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ii.py) -GR_ADD_TEST(qa_verilog_axi_ff ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ff.py) -GR_ADD_TEST(qa_verilog_axi_ss ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py) -GR_ADD_TEST(qa_verilog_axi_cc ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_cc.py) -GR_ADD_TEST(qa_verilog_axi_bb ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py) diff --git a/python/__init__.py b/python/__init__.py deleted file mode 100644 index cdbeadb..0000000 --- a/python/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright 2008,2009 Free Software Foundation, Inc. -# -# This application is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This application is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# - -# The presence of this file turns this directory into a Python package - -''' -This is the GNU Radio VERILOG module. Place your Python package -description here (python/__init__.py). -''' -from __future__ import unicode_literals - -# import swig generated symbols into the verilog namespace -from .verilog_swig import * - -# import any pure python here -# diff --git a/python/__init__.pyc b/python/__init__.pyc deleted file mode 100644 index ffbfa26842c3512609fa533c2a14bc248f779608..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 361 zcmZWk%TB{E5L_qap`cdc#?dzdg80e-Aud%_36-i8sJK{BY!a)f9jzUzd>4EYf4~R8 z29P+gTF=gmyz)5y7$)zpZ>s`+Ylr4F9e+lk0G(pVFu%$n{bOUx%#%IL+=!|5!Id%i~?rE-OoAtC;v z(bTS#erw8bK*n^_I1FP%hbc+;1AjqMv>t`8a@3-%(BQ|7Tq8MJo diff --git a/python/build_utils.py b/python/build_utils.py deleted file mode 100644 index cf58a97..0000000 --- a/python/build_utils.py +++ /dev/null @@ -1,226 +0,0 @@ -# -# Copyright 2004,2009,2012 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -"""Misc utilities used at build time -""" - -import re, os, os.path -from build_utils_codes import * - - -# set srcdir to the directory that contains Makefile.am -try: - srcdir = os.environ['srcdir'] -except KeyError, e: - srcdir = "." -srcdir = srcdir + '/' - -# set do_makefile to either true or false dependeing on the environment -try: - if os.environ['do_makefile'] == '0': - do_makefile = False - else: - do_makefile = True -except KeyError, e: - do_makefile = False - -# set do_sources to either true or false dependeing on the environment -try: - if os.environ['do_sources'] == '0': - do_sources = False - else: - do_sources = True -except KeyError, e: - do_sources = True - -name_dict = {} - -def log_output_name (name): - (base, ext) = os.path.splitext (name) - ext = ext[1:] # drop the leading '.' - - entry = name_dict.setdefault (ext, []) - entry.append (name) - -def open_and_log_name (name, dir): - global do_sources - if do_sources: - f = open (name, dir) - else: - f = None - log_output_name (name) - return f - -def expand_template (d, template_filename, extra = ""): - '''Given a dictionary D and a TEMPLATE_FILENAME, expand template into output file - ''' - global do_sources - output_extension = extract_extension (template_filename) - template = open_src (template_filename, 'r') - output_name = d['NAME'] + extra + '.' + output_extension - log_output_name (output_name) - if do_sources: - output = open (output_name, 'w') - do_substitution (d, template, output) - output.close () - template.close () - -def output_glue (dirname): - output_makefile_fragment () - output_ifile_include (dirname) - -def output_makefile_fragment (): - global do_makefile - if not do_makefile: - return -# overwrite the source, which must be writable; this should have been -# checked for beforehand in the top-level Makefile.gen.gen . - f = open (os.path.join (os.environ.get('gendir', os.environ.get('srcdir', '.')), 'Makefile.gen'), 'w') - f.write ('#\n# This file is machine generated. All edits will be overwritten\n#\n') - output_subfrag (f, 'h') - output_subfrag (f, 'i') - output_subfrag (f, 'cc') - f.close () - -def output_ifile_include (dirname): - global do_sources - if do_sources: - f = open ('%s_generated.i' % (dirname,), 'w') - f.write ('//\n// This file is machine generated. All edits will be overwritten\n//\n') - files = name_dict.setdefault ('i', []) - files.sort () - f.write ('%{\n') - for file in files: - f.write ('#include <%s>\n' % (file[0:-1] + 'h',)) - f.write ('%}\n\n') - for file in files: - f.write ('%%include <%s>\n' % (file,)) - -def output_subfrag (f, ext): - files = name_dict.setdefault (ext, []) - files.sort () - f.write ("GENERATED_%s =" % (ext.upper ())) - for file in files: - f.write (" \\\n\t%s" % (file,)) - f.write ("\n\n") - -def extract_extension (template_name): - # template name is something like: GrFIRfilterXXX.h.t - # we return everything between the penultimate . and .t - mo = re.search (r'\.([a-z]+)\.t$', template_name) - if not mo: - raise ValueError, "Incorrectly formed template_name '%s'" % (template_name,) - return mo.group (1) - -def open_src (name, mode): - global srcdir - return open (os.path.join (srcdir, name), mode) - -def do_substitution (d, in_file, out_file): - def repl (match_obj): - key = match_obj.group (1) - # print key - return d[key] - - inp = in_file.read () - out = re.sub (r"@([a-zA-Z0-9_]+)@", repl, inp) - out_file.write (out) - - - -copyright = '''/* -*- c++ -*- */ -/* - * Copyright 2003,2004 Free Software Foundation, Inc. - * - * This file is part of GNU Radio - * - * GNU Radio is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3, or (at your option) - * any later version. - * - * GNU Radio is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with GNU Radio; see the file COPYING. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, - * Boston, MA 02110-1301, USA. - */ -''' - -def is_complex (code3): - if i_code (code3) == 'c' or o_code (code3) == 'c': - return '1' - else: - return '0' - - -def standard_dict (name, code3, package='gr'): - d = {} - d['NAME'] = name - d['NAME_IMPL'] = name+'_impl' - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['GUARD_NAME_IMPL'] = 'INCLUDED_%s_%s_IMPL_H' % (package.upper(), name.upper()) - d['BASE_NAME'] = re.sub ('^' + package + '_', '', name) - d['SPTR_NAME'] = '%s_sptr' % name - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d - - -def standard_dict2 (name, code3, package): - d = {} - d['NAME'] = name - d['BASE_NAME'] = name - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d - -def standard_impl_dict2 (name, code3, package): - d = {} - d['NAME'] = name - d['IMPL_NAME'] = name - d['BASE_NAME'] = name.rstrip("impl").rstrip("_") - d['GUARD_NAME'] = 'INCLUDED_%s_%s_H' % (package.upper(), name.upper()) - d['WARNING'] = 'WARNING: this file is machine generated. Edits will be overwritten' - d['COPYRIGHT'] = copyright - d['FIR_TYPE'] = "fir_filter_" + code3 - d['CFIR_TYPE'] = "fir_filter_" + code3[0:2] + 'c' - d['TYPE'] = i_type (code3) - d['I_TYPE'] = i_type (code3) - d['O_TYPE'] = o_type (code3) - d['TAP_TYPE'] = tap_type (code3) - d['IS_COMPLEX'] = is_complex (code3) - return d diff --git a/python/build_utils.pyc b/python/build_utils.pyc deleted file mode 100644 index 68dff470a2f347976c8a576966bfa81ecfdfea93..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7834 zcmc&(TXz%J6+R>Rrok967h^)3L%=}+8yQG?32qu;Z15@wcO(Z)NG4;=*d8Nm)Hx%- z4S8wPHq8&{59o8=(zW`~=f3r=f1!QtOIM%U@7qU`@IET(_R@*7K?{plUnERFqR& zgDMIH)ktbkxIaNQFac09`_gDLaJjF!8s8C((M`h@^Kmb8W9l+G#y9 zby66>lgBfU$1I@8l-hFCR!+T83J7_eQ=d8NGc*MbucMab)z$#p8<>INyn2D2!Oj8) zkg67Qpj3&uL6IlS*iKBw-C9AmnT?=gT3|GKo@E6zn!$GDhjBfVLz*ZHqxGQO$b`Y5 z)ry*7k*kFSCrf25z)T*WWC-l2nWZ~LRFo9v25P0YNlRukNTbq5vz-QEoRqecFiVm~ zX(OF!M$gbHwRW=2q*)SS`&5Ftg03T2-$*w6q@A_ena_=XfG)%HE9bQ1=Ej{mnd2dP za-2p3a!gNCOOWFWM+B84N+BLALn~hga;%C%E5e^A;v%OBcorrrNi*_}p{K|Vyu&2U zT36@m2gonC;l_T@41F?Aj(?0!L+q&BkTZ_Qb-(ckEQltxUJ8By2EJzoL z!b(MpqG4~GD~K*wscEm7EY1Kr%qNQLjl@KjC`~97(%sYfJds#{0G=CWM=`4d(Y-r& z7l}jY1F@KuZ66?d$d8q!-`)5Qx(u0B1EbCn=M=CrgzuxyVMqKA`J*HSJ||HDe5hM% zI0OX@rfVP~^3#~q;iT){{Pi^0*oL`eCw2#8%E^zL^+r34tjnaDU^Nup0dh@W+d0ri zqaA&KrlCbBXV|IBHwtboJbr-+c!WKXF|Y~vZ;3}`WsulL2;Qy3?dE9-__rnjpwpUw zeR%3&MprSN5wWgejyV7wO>LyVQIH1LpoX)FSZUN1!NLX{Y@B*jhalUGqIRznKCWQY zJA)315a-?Vy0#e`F%6CH?V!FHHzSQ%5x^LQSF|oS8afK&%;;w^s%w!>o<`}jG|pfc z?s-=%XH&`;%mVZEB2VK{a$+4GMdpp55S!adV3q8Onor{thLT5pBeHIvbfatxJsm~B zq=*UcyXX?#d&JhkDc!pWa>$&Nq3O{t4&>*6Y5NgmlMy;|(fXfIf!5PM(q%xu(8Ic; zPQe>emuO4KrKYVLM1Y01H*A2F;0tI_ZJmkMUp}8<7{2w_mP)UR z3lQ;UK+I6NQ=huB$xfOo{JPtDanI8hFP(+sn1;3(D_VF?U^g_VAtPz*D%Es zl+k7dRwhX^8+hO1pm-qqRXPuwudsH2z#%Etl!I_-h*;Un-N z(|`{WGpKM6DgYp&0%Op+#SsFhvO0hMM!$;f9Du1$#qRMKQe@~e(V;ejNuVow@6X#O4NIsO-fn9SQ}|uYK2R=deo5j4vX)hI6#?R2e~#L zaau~GIRQzYwjqW`C7wi*%-*adtrhmn&s?U{g+X>q^B2h0B$J_FML1Pq#R}%}mEwoN z%+Ei%{MO?u*#$ENs!Pp!lBQ8TYwYOtB;7{x-VK71Md^!E=AuoBDPVDWX=D@lC`jv@ zlKFlRAcQQWDPT$2Zlp=OCE0W)d0!_(5rRTtJ2^l(@+N}7(n)Xg;|SHqZF<{9q{#3r zkB@(GI1mpLHt|#dC*;|U)`AU&{#{S*mDrSP0V&#CmeN25evn4S6aEU1z*OZVQ6+e} z6`^3tD1IY87%=!XxW*+0>`3l`MAfoJFrXjylvl`8;cCHbCshsU6-lZh&Ys-xIgZCn zpnyO?4aDJl;orXyC{6CU7)h+<1;y%@x1jAHt8e@AVI^+1$4^kC)2Mq+Xk;(K^YR4%r)eCu!??(9|hXBa@}^TAR0xj5m`~l4@8% zyuQPo(1#O5sm?YdxRG?*a4R-Q{ey~)#bAP3#%av9ApzL|&N^SK9CgVPi(TEp0vJvBQ7K1S; zC=Hs~4g^99xQWf*!IJXLrMpYDRdTSnRI4mht9tRir^|Y|?A4a$SMHWQy}aTr->)tp zJ*`45AW8z?B2c(-=mdBOqb%SX0nF!;RjAS6ba*YZ89a@k&U%CkCvTT^VC%oDibR4& z(%cX>cOi5`8+$ODThHHLUR|o(#(9_4bu-DPwIsSaBi2MH`{~xgc3Qu473vR~Pa4oi zH3Jwk5&KQNiy^k}mG$hkt5;`du3n$LI;~f#WwKau#i!9k>C@)%m{}C?`SkOWz~b$l zxj;YJ-;);QjTXXXRs16zOa*h0-Aj4ZV@$B_j_x@2>+t*bE-F=fP2(gET(_J-NZF^+tE-|=BJ*NHT^u<@k`@^!qNq)4$N2pj@|QMQHLVYJ4i{|FY^ z8AnmW5hTuV#j$U+$I(XU>Wv>q8%OH3PoO>A*FLG{K&Dz`c3SZ!QA!@-)B5B^9p9Mv zOL&DMH=m`-{N0sXk{=>P^zVqAZ?BZSTRtZm=6&Co!?l>mWN>$)P!x=OET!);h{}dH z%hd%-Y#3eG#I&+hMyks-&yL*02wX=6#_}%g;@j`03;SJ5C60B-$Og1OZ|U}(nuNjH z>hgkMb;<8l@7pRvPp!Oc8!j44Re%0I7+rW+taV5m*DI^LB&DhU&T3u6+ zD%WK}OZvTgXxL1eX`pwPV-!a#s6PodBCnSrh{?QaOqE>IWQa;Z8p>PQud$cmWm+L8 zbe%Cr<8uz*E<140X1^b$-|Y9JFY%iNFfGoGo=`jpov2qs_x~DX^7?l@PMMV> zi}NK*UtIFUVL>s_PL9XAgTi{g?%@9Lr27%ql`@lUSr_;JM5^4qx=xFt7SDCvEm59l(w`lRd|E)G6{FAwJdj!=&!})?EviEE4=4#9yv>SL*wbNYWPLF+Gxdtw{DK z(T=^sGxdoifG!3N6Uoa>8%xr5WM51%TG-zO7;?QgSzKaqkpBEEEnYbijDY4G?j`L;^d3X9Swu0EyWqG3dFL43$#y^VYpBj9V-kJ4j0A>Blrvy&aroNyfF43Jb0z( diff --git a/python/build_utils_codes.py b/python/build_utils_codes.py deleted file mode 100644 index 9ea96ba..0000000 --- a/python/build_utils_codes.py +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright 2004 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -def i_code (code3): - return code3[0] - -def o_code (code3): - if len (code3) >= 2: - return code3[1] - else: - return code3[0] - -def tap_code (code3): - if len (code3) >= 3: - return code3[2] - else: - return code3[0] - -def i_type (code3): - return char_to_type[i_code (code3)] - -def o_type (code3): - return char_to_type[o_code (code3)] - -def tap_type (code3): - return char_to_type[tap_code (code3)] - - -char_to_type = {} -char_to_type['s'] = 'short' -char_to_type['i'] = 'int' -char_to_type['f'] = 'float' -char_to_type['c'] = 'gr_complex' -char_to_type['b'] = 'unsigned char' diff --git a/python/build_utils_codes.pyc b/python/build_utils_codes.pyc deleted file mode 100644 index f9a9434c39e1f733c4a34a96c1ea05ee7f981a12..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1513 zcmcIk!H&}~5S=7#wk_LT;DA&vZ~;X^kOS8h0&(vly&x`CH_o;e*|>@wu`6*~{s=ya zKi~&=Z&I|;bkdBN+A={XM!L z!F*8~vqi?)IW`f=)No}O!j#TbulqSV{5zPu^KI@otG3G5P1mlBb@`<;!MQqLw=bLO zGe-ILI&55%pLe!4%P!d3FB#0gf+Ix~tHVbO%H1XUlj3ts2W5GKG)3}hDD7h<9m{M% z@(G^E=pq|n>D)4qCHG#6s$ll(s>wJg(02-n_Nd0Gni+#qoJ0?!QM?C5WZ%nT)LsNt z9#9SoBdoS6=n3%O1d*0j(FqshAMsdlN~k3!dae}}jsZ&)$}+fRxZYL;eFvo8*A;hq zhK=sl)xVK0SoQ_i9++sK5`J6%LzIPoK%&2kfjNC~OI7a%zj1BA$wNS3zHHNvta>kM zx9Z0iVFK^EMMYh1>*`8hDg6|aUE}S#smxPWiO!$Tvm-dPQuGuGDlB-ig*wPc&xo1Z d|HsVmXSlz*x_YCR6U{gor)UY. -# -# This is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -from gnuradio import gr, gr_unittest -from gnuradio import blocks -import verilog_swig as verilog -import os - -class qa_verilog_axi_ss (gr_unittest.TestCase): - - def setUp (self): - self.tb = gr.top_block () - - def tearDown (self): - self.tb = None - - def test_001_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - src = blocks.vector_source_s(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_bb(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_s() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) - - def test_002_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) - expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) - src = blocks.vector_source_s(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_bb(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_s() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) - - -if __name__ == '__main__': - gr_unittest.run(qa_verilog_axi_ss, "qa_verilog_axi_ss.xml") diff --git a/python/qa_verilog_axi_cc.py b/python/qa_verilog_axi_cc.py deleted file mode 100755 index 9c27f38..0000000 --- a/python/qa_verilog_axi_cc.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2019 <+YOU OR YOUR COMPANY+>. -# -# This is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -from gnuradio import gr, gr_unittest -from gnuradio import blocks -import verilog_swig as verilog - -class qa_verilog_axi_cc (gr_unittest.TestCase): - - def setUp (self): - self.tb = gr.top_block () - - def tearDown (self): - self.tb = None - - def test_001_t (self): - # set up fg - self.tb.run () - # check data - - -if __name__ == '__main__': - gr_unittest.run(qa_verilog_axi_cc, "qa_verilog_axi_cc.xml") diff --git a/python/qa_verilog_axi_ff.py b/python/qa_verilog_axi_ff.py deleted file mode 100755 index fce0904..0000000 --- a/python/qa_verilog_axi_ff.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2019 <+YOU OR YOUR COMPANY+>. -# -# This is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -from gnuradio import gr, gr_unittest -from gnuradio import blocks -import verilog_swig as verilog -import os - -class qa_verilog_axi_ff (gr_unittest.TestCase): - - def setUp (self): - self.tb = gr.top_block () - - def tearDown (self): - self.tb = None - - def test_001_t (self): - # set up fg - src_data = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4, 12.3, 45.5, 29.3) - expected_result = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4, 12.3, 45.5, 29.3) - src = blocks.vector_source_f(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ff(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_f() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - - - result_data = dst.data() - print (expected_result) - print (result_data) - def round_f(x): - return round(x, 3) - round_result_data = tuple(map(round_f, result_data)) - print (round_result_data) - self.assertFloatTuplesAlmostEqual(expected_result, round_result_data, 12) - - def test_002_t (self): - # set up fg - src_data = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4) - expected_result = (2.4, 7.6, 11.4, 18.8, 20.4, 24.8, 35.0, 38.4, 42.8) - src = blocks.vector_source_f(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ff(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_f() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - def round_f(x): - return round(x, 3) - round_result_data = tuple(map(round_f, result_data)) - print (round_result_data) - self.assertFloatTuplesAlmostEqual(expected_result, round_result_data, 9) - -if __name__ == '__main__': - gr_unittest.run(qa_verilog_axi_ff, "qa_verilog_axi_ff.xml") diff --git a/python/qa_verilog_axi_ii.py b/python/qa_verilog_axi_ii.py deleted file mode 100755 index b18c928..0000000 --- a/python/qa_verilog_axi_ii.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2019 <+YOU OR YOUR COMPANY+>. -# -# This is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -from gnuradio import gr, gr_unittest -from gnuradio import blocks -import verilog_swig as verilog -import os - -class qa_verilog_axi_ii (gr_unittest.TestCase): - - def setUp (self): - self.tb = gr.top_block () - - def tearDown (self): - self.tb = None - - def test_001_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - src = blocks.vector_source_i(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ii(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_i() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) - - def test_002_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) - expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) - src = blocks.vector_source_i(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ii(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_i() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) - - -if __name__ == '__main__': - gr_unittest.run(qa_verilog_axi_ii, "qa_verilog_axi_ii.xml") diff --git a/python/qa_verilog_axi_ss.py b/python/qa_verilog_axi_ss.py deleted file mode 100755 index 8ef667c..0000000 --- a/python/qa_verilog_axi_ss.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2019 <+YOU OR YOUR COMPANY+>. -# -# This is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. -# - -from gnuradio import gr, gr_unittest -from gnuradio import blocks -import verilog_swig as verilog -import os - -class qa_verilog_axi_ss (gr_unittest.TestCase): - - def setUp (self): - self.tb = gr.top_block () - - def tearDown (self): - self.tb = None - - def test_001_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) - src = blocks.vector_source_s(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ss(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_s() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) - - def test_002_t (self): - # set up fg - src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) - expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) - src = blocks.vector_source_s(src_data) - path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' - vl = verilog.verilog_axi_ss(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) - dst = blocks.vector_sink_s() - - self.tb.connect(src, vl) - self.tb.connect(vl, dst) - self.tb.run() - # check data - result_data = dst.data() - print (expected_result) - print (result_data) - self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) - - -if __name__ == '__main__': - gr_unittest.run(qa_verilog_axi_ss, "qa_verilog_axi_ss.xml") diff --git a/python/verilog/.gitignore b/python/verilog/.gitignore new file mode 100644 index 0000000..85c92e8 --- /dev/null +++ b/python/verilog/.gitignore @@ -0,0 +1,5 @@ +*~ +*.pyc +*.pyo +build*/ +examples/grc/*.py diff --git a/python/verilog/CMakeLists.txt b/python/verilog/CMakeLists.txt new file mode 100644 index 0000000..ea29eb7 --- /dev/null +++ b/python/verilog/CMakeLists.txt @@ -0,0 +1,41 @@ +# Copyright 2011 Free Software Foundation, Inc. +# +# This file was generated by gr_modtool, a tool from the GNU Radio framework +# This file is a part of gr-verilog +# +# SPDX-License-Identifier: GPL-3.0-or-later +# + +######################################################################## +# Include python install macros +######################################################################## +include(GrPython) +if(NOT PYTHONINTERP_FOUND) + return() +endif() + +add_subdirectory(bindings) + +######################################################################## +# Install python sources +######################################################################## +GR_PYTHON_INSTALL( + FILES + __init__.py + DESTINATION ${GR_PYTHON_DIR}/gnuradio/verilog +) + +######################################################################## +# Handle the unit tests +######################################################################## +include(GrTest) + +set(GR_TEST_TARGET_DEPS gnuradio-verilog) + +# Create a package directory that tests can import. It includes everything +# from `python/`. +add_custom_target( + copy_module_for_tests ALL + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/ +) diff --git a/python/verilog/__init__.py b/python/verilog/__init__.py new file mode 100644 index 0000000..89f4275 --- /dev/null +++ b/python/verilog/__init__.py @@ -0,0 +1,23 @@ +# +# Copyright 2008,2009 Free Software Foundation, Inc. +# +# SPDX-License-Identifier: GPL-3.0-or-later +# + +# The presence of this file turns this directory into a Python package + +''' +This is the GNU Radio VERILOG module. Place your Python package +description here (python/__init__.py). +''' +import os + +# import pybind11 generated symbols into the verilog namespace +try: + # this might fail if the module is python-only + from .verilog_python import * +except ModuleNotFoundError: + pass + +# import any pure python here +# diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt new file mode 100644 index 0000000..61c7d02 --- /dev/null +++ b/python/verilog/bindings/CMakeLists.txt @@ -0,0 +1,47 @@ +# Copyright 2020 Free Software Foundation, Inc. +# +# This file is part of GNU Radio +# +# SPDX-License-Identifier: GPL-3.0-or-later +# + +######################################################################## +# Check if there is C++ code at all +######################################################################## +if(NOT verilog_sources) + MESSAGE(STATUS "No C++ sources... skipping python bindings") + return() +endif(NOT verilog_sources) + +######################################################################## +# Check for pygccxml +######################################################################## +GR_PYTHON_CHECK_MODULE_RAW( + "pygccxml" + "import pygccxml" + PYGCCXML_FOUND + ) + +include(GrPybind) + +######################################################################## +# Python Bindings +######################################################################## + +list(APPEND verilog_python_files + python_bindings.cc) + +GR_PYBIND_MAKE_OOT(verilog + ../../.. + gr::verilog + "${verilog_python_files}") + +# copy in bindings .so file for use in QA test module +add_custom_target( + copy_bindings_for_tests ALL + COMMAND + ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_BINARY_DIR}/*.so" + ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/ + DEPENDS verilog_python) + +install(TARGETS verilog_python DESTINATION ${GR_PYTHON_DIR}/gnuradio/verilog COMPONENT pythonapi) diff --git a/python/verilog/bindings/README.md b/python/verilog/bindings/README.md new file mode 100644 index 0000000..e69de29 diff --git a/python/verilog/bindings/bind_oot_file.py b/python/verilog/bindings/bind_oot_file.py new file mode 100644 index 0000000..5bc3ff6 --- /dev/null +++ b/python/verilog/bindings/bind_oot_file.py @@ -0,0 +1,58 @@ +import warnings +import argparse +import os +from gnuradio.bindtool import BindingGenerator +import pathlib +import sys +import tempfile + +parser = argparse.ArgumentParser(description='Bind a GR Out of Tree Block') +parser.add_argument('--module', type=str, + help='Name of gr module containing file to bind (e.g. fft digital analog)') + +parser.add_argument('--output_dir', default=tempfile.gettempdir(), + help='Output directory of generated bindings') +parser.add_argument('--prefix', help='Prefix of Installed GNU Radio') +parser.add_argument('--src', help='Directory of gnuradio source tree', + default=os.path.dirname(os.path.abspath(__file__)) + '/../../..') + +parser.add_argument( + '--filename', help="File to be parsed") + +parser.add_argument( + '--defines', help='Set additional defines for precompiler', default=(), nargs='*') +parser.add_argument( + '--include', help='Additional Include Dirs, separated', default=(), nargs='*') + +parser.add_argument( + '--status', help='Location of output file for general status (used during cmake)', default=None +) +parser.add_argument( + '--flag_automatic', default='0' +) +parser.add_argument( + '--flag_pygccxml', default='0' +) + +args = parser.parse_args() + +prefix = args.prefix +output_dir = args.output_dir +defines = tuple(','.join(args.defines).split(',')) +includes = ','.join(args.include) +name = args.module + +namespace = ['gr', name] +prefix_include_root = name + + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + + bg = BindingGenerator(prefix, namespace, + prefix_include_root, output_dir, define_symbols=defines, addl_includes=includes, + catch_exceptions=False, write_json_output=False, status_output=args.status, + flag_automatic=True if args.flag_automatic.lower() in [ + '1', 'true'] else False, + flag_pygccxml=True if args.flag_pygccxml.lower() in ['1', 'true'] else False) + bg.gen_file_binding(args.filename) diff --git a/python/verilog/bindings/docstrings/README.md b/python/verilog/bindings/docstrings/README.md new file mode 100644 index 0000000..a506c22 --- /dev/null +++ b/python/verilog/bindings/docstrings/README.md @@ -0,0 +1 @@ +This directory stores templates for docstrings that are scraped from the include header files for each block diff --git a/python/verilog/bindings/header_utils.py b/python/verilog/bindings/header_utils.py new file mode 100644 index 0000000..7c26fe0 --- /dev/null +++ b/python/verilog/bindings/header_utils.py @@ -0,0 +1,80 @@ +# Utilities for reading values in header files + +from argparse import ArgumentParser +import re + + +class PybindHeaderParser: + def __init__(self, pathname): + with open(pathname, 'r') as f: + self.file_txt = f.read() + + def get_flag_automatic(self): + # p = re.compile(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)') + # m = p.search(self.file_txt) + m = re.search(r'BINDTOOL_GEN_AUTOMATIC\(([^\s])\)', self.file_txt) + if (m and m.group(1) == '1'): + return True + else: + return False + + def get_flag_pygccxml(self): + # p = re.compile(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)') + # m = p.search(self.file_txt) + m = re.search(r'BINDTOOL_USE_PYGCCXML\(([^\s])\)', self.file_txt) + if (m and m.group(1) == '1'): + return True + else: + return False + + def get_header_filename(self): + # p = re.compile(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)') + # m = p.search(self.file_txt) + m = re.search(r'BINDTOOL_HEADER_FILE\(([^\s]*)\)', self.file_txt) + if (m): + return m.group(1) + else: + return None + + def get_header_file_hash(self): + # p = re.compile(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)') + # m = p.search(self.file_txt) + m = re.search(r'BINDTOOL_HEADER_FILE_HASH\(([^\s]*)\)', self.file_txt) + if (m): + return m.group(1) + else: + return None + + def get_flags(self): + return f'{self.get_flag_automatic()};{self.get_flag_pygccxml()};{self.get_header_filename()};{self.get_header_file_hash()};' + + +def argParse(): + """Parses commandline args.""" + desc = 'Reads the parameters from the comment block in the pybind files' + parser = ArgumentParser(description=desc) + + parser.add_argument("function", help="Operation to perform on comment block of pybind file", choices=[ + "flag_auto", "flag_pygccxml", "header_filename", "header_file_hash", "all"]) + parser.add_argument( + "pathname", help="Pathname of pybind c++ file to read, e.g. blockname_python.cc") + + return parser.parse_args() + + +if __name__ == "__main__": + # Parse command line options and set up doxyxml. + args = argParse() + + pbhp = PybindHeaderParser(args.pathname) + + if args.function == "flag_auto": + print(pbhp.get_flag_automatic()) + elif args.function == "flag_pygccxml": + print(pbhp.get_flag_pygccxml()) + elif args.function == "header_filename": + print(pbhp.get_header_filename()) + elif args.function == "header_file_hash": + print(pbhp.get_header_file_hash()) + elif args.function == "all": + print(pbhp.get_flags()) diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc new file mode 100644 index 0000000..df5b39a --- /dev/null +++ b/python/verilog/bindings/python_bindings.cc @@ -0,0 +1,53 @@ +/* + * Copyright 2020 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#include + +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include + +namespace py = pybind11; + +// Headers for binding functions +/**************************************/ +// The following comment block is used for +// gr_modtool to insert function prototypes +// Please do not delete +/**************************************/ +// BINDING_FUNCTION_PROTOTYPES( +// ) END BINDING_FUNCTION_PROTOTYPES + + +// We need this hack because import_array() returns NULL +// for newer Python versions. +// This function is also necessary because it ensures access to the C API +// and removes a warning. +void* init_numpy() +{ + import_array(); + return NULL; +} + +PYBIND11_MODULE(verilog_python, m) +{ + // Initialize the numpy C API + // (otherwise we will see segmentation faults) + init_numpy(); + + // Allow access to base block methods + py::module::import("gnuradio.gr"); + + /**************************************/ + // The following comment block is used for + // gr_modtool to insert binding function calls + // Please do not delete + /**************************************/ + // BINDING_FUNCTION_CALLS( + // ) END BINDING_FUNCTION_CALLS +} diff --git a/swig/CMakeLists.txt b/swig/CMakeLists.txt deleted file mode 100644 index c4c2909..0000000 --- a/swig/CMakeLists.txt +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2011 Free Software Foundation, Inc. -# -# This file is part of GNU Radio -# -# GNU Radio is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3, or (at your option) -# any later version. -# -# GNU Radio is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Radio; see the file COPYING. If not, write to -# the Free Software Foundation, Inc., 51 Franklin Street, -# Boston, MA 02110-1301, USA. - -######################################################################## -# Check if there is C++ code at all -######################################################################## -if(NOT verilog_sources) - MESSAGE(STATUS "No C++ sources... skipping swig/") - return() -endif(NOT verilog_sources) - -######################################################################## -# Include swig generation macros -######################################################################## -find_package(SWIG) -find_package(PythonLibs) -if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND) - return() -endif() -include(GrSwig) -include(GrPython) - -######################################################################## -# Setup swig generation -######################################################################## -set(GR_SWIG_INCLUDE_DIRS $) -set(GR_SWIG_TARGET_DEPS gnuradio::runtime_swig) - -set(GR_SWIG_LIBRARIES gnuradio-verilog) - -set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/verilog_swig_doc.i) -set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include) - -GR_SWIG_MAKE(verilog_swig verilog_swig.i) - -######################################################################## -# Install the build swig module -######################################################################## -GR_SWIG_INSTALL(TARGETS verilog_swig DESTINATION ${GR_PYTHON_DIR}/verilog) - -######################################################################## -# Install swig .i files for development -######################################################################## -install( - FILES - verilog_swig.i - ${CMAKE_CURRENT_BINARY_DIR}/verilog_swig_doc.i - DESTINATION ${GR_INCLUDE_DIR}/verilog/swig -) diff --git a/swig/verilog_swig.i b/swig/verilog_swig.i deleted file mode 100644 index d086329..0000000 --- a/swig/verilog_swig.i +++ /dev/null @@ -1,28 +0,0 @@ -/* -*- c++ -*- */ - -#define VERILOG_API - -%include "gnuradio.i" // the common stuff - -//load generated python docstrings -%include "verilog_swig_doc.i" - -%{ -#include "verilog/verilog_axi_ii.h" -#include "verilog/verilog_axi_ff.h" -#include "verilog/verilog_axi_ss.h" -#include "verilog/verilog_axi_bb.h" -#include "verilog/verilog_axi_cc.h" -%} - - -%include "verilog/verilog_axi_ii.h" -GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ii); -%include "verilog/verilog_axi_ff.h" -GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ff); -%include "verilog/verilog_axi_ss.h" -GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_ss); -%include "verilog/verilog_axi_bb.h" -GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_bb); -%include "verilog/verilog_axi_cc.h" -GR_SWIG_BLOCK_MAGIC2(verilog, verilog_axi_cc); From f8ac34dd70084cc1baa7e0bf6dbf6ac29fc620c4 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 15:20:28 +0100 Subject: [PATCH 02/13] adapt some paths --- grc/grverilog_verilog_axi_xx.block.yml | 2 +- include/gnuradio/verilog/constants.h | 2 +- include/gnuradio/verilog/verilog_axi_bb.h | 2 +- include/gnuradio/verilog/verilog_axi_cc.h | 2 +- include/gnuradio/verilog/verilog_axi_ff.h | 2 +- include/gnuradio/verilog/verilog_axi_ii.h | 2 +- include/gnuradio/verilog/verilog_axi_ss.h | 2 +- lib/Shared_lib.cpp | 2 +- lib/Shell_cmd.cpp | 2 +- lib/constants.cc.in | 2 +- lib/verilog_axi_bb_impl.cc | 6 +++--- lib/verilog_axi_bb_impl.h | 4 ++-- lib/verilog_axi_cc_impl.cc | 6 +++--- lib/verilog_axi_cc_impl.h | 4 ++-- lib/verilog_axi_ff_impl.cc | 6 +++--- lib/verilog_axi_ff_impl.h | 4 ++-- lib/verilog_axi_ii_impl.cc | 6 +++--- lib/verilog_axi_ii_impl.h | 4 ++-- lib/verilog_axi_ss_impl.cc | 6 +++--- lib/verilog_axi_ss_impl.h | 4 ++-- 20 files changed, 35 insertions(+), 35 deletions(-) diff --git a/grc/grverilog_verilog_axi_xx.block.yml b/grc/grverilog_verilog_axi_xx.block.yml index b53c389..af8702e 100644 --- a/grc/grverilog_verilog_axi_xx.block.yml +++ b/grc/grverilog_verilog_axi_xx.block.yml @@ -3,7 +3,7 @@ label: Verilog_AXI category: '[GrVerilog]' templates: - imports: import verilog + imports: from gnuradio import verilog make: verilog.verilog_axi_${type.fcn}(${file}, ${overwrite}, ${IO_ratio}, ${verilator_options}, ${module_flag}, ${skip_output_items}) diff --git a/include/gnuradio/verilog/constants.h b/include/gnuradio/verilog/constants.h index 8989101..0082854 100644 --- a/include/gnuradio/verilog/constants.h +++ b/include/gnuradio/verilog/constants.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_GR_VERILOG_CONSTANTS_H #define INCLUDED_GR_VERILOG_CONSTANTS_H -#include +#include #include namespace gr { diff --git a/include/gnuradio/verilog/verilog_axi_bb.h b/include/gnuradio/verilog/verilog_axi_bb.h index 22d9203..864f00a 100644 --- a/include/gnuradio/verilog/verilog_axi_bb.h +++ b/include/gnuradio/verilog/verilog_axi_bb.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_BB_H #define INCLUDED_VERILOG_VERILOG_AXI_BB_H -#include +#include #include namespace gr { diff --git a/include/gnuradio/verilog/verilog_axi_cc.h b/include/gnuradio/verilog/verilog_axi_cc.h index ef26fd5..13a69e9 100644 --- a/include/gnuradio/verilog/verilog_axi_cc.h +++ b/include/gnuradio/verilog/verilog_axi_cc.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_CC_H #define INCLUDED_VERILOG_VERILOG_AXI_CC_H -#include +#include #include namespace gr { diff --git a/include/gnuradio/verilog/verilog_axi_ff.h b/include/gnuradio/verilog/verilog_axi_ff.h index 275a695..f6195c6 100644 --- a/include/gnuradio/verilog/verilog_axi_ff.h +++ b/include/gnuradio/verilog/verilog_axi_ff.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_FF_H #define INCLUDED_VERILOG_VERILOG_AXI_FF_H -#include +#include #include namespace gr { diff --git a/include/gnuradio/verilog/verilog_axi_ii.h b/include/gnuradio/verilog/verilog_axi_ii.h index 68d0998..6244ef9 100644 --- a/include/gnuradio/verilog/verilog_axi_ii.h +++ b/include/gnuradio/verilog/verilog_axi_ii.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_II_H #define INCLUDED_VERILOG_VERILOG_AXI_II_H -#include +#include #include namespace gr { diff --git a/include/gnuradio/verilog/verilog_axi_ss.h b/include/gnuradio/verilog/verilog_axi_ss.h index 7e259f2..2e9fe16 100644 --- a/include/gnuradio/verilog/verilog_axi_ss.h +++ b/include/gnuradio/verilog/verilog_axi_ss.h @@ -22,7 +22,7 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_H #define INCLUDED_VERILOG_VERILOG_AXI_SS_H -#include +#include #include namespace gr { diff --git a/lib/Shared_lib.cpp b/lib/Shared_lib.cpp index dd64e27..6c55e7a 100644 --- a/lib/Shared_lib.cpp +++ b/lib/Shared_lib.cpp @@ -22,7 +22,7 @@ #include #include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH '/' #define _EXIT_FAILURE -1 diff --git a/lib/Shell_cmd.cpp b/lib/Shell_cmd.cpp index 28aa3e9..27f5b4f 100644 --- a/lib/Shell_cmd.cpp +++ b/lib/Shell_cmd.cpp @@ -23,7 +23,7 @@ #include #include #include -#include "verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shell_cmd.h" #define BUFFER_SIZE 1024 #define _EXIT_FAILURE -1 diff --git a/lib/constants.cc.in b/lib/constants.cc.in index 98d8374..3af46fa 100644 --- a/lib/constants.cc.in +++ b/lib/constants.cc.in @@ -18,7 +18,7 @@ * Boston, MA 02110-1301, USA. */ -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" #include namespace gr { diff --git a/lib/verilog_axi_bb_impl.cc b/lib/verilog_axi_bb_impl.cc index e3e6a98..3372e4a 100644 --- a/lib/verilog_axi_bb_impl.cc +++ b/lib/verilog_axi_bb_impl.cc @@ -30,10 +30,10 @@ #include -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" -#include "verilog/Shell_cmd.h" -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shared_lib.h" #define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk" #define CPP_TEMPLATE_NAME "axi_module.cpp" diff --git a/lib/verilog_axi_bb_impl.h b/lib/verilog_axi_bb_impl.h index 0cda82e..add1b69 100644 --- a/lib/verilog_axi_bb_impl.h +++ b/lib/verilog_axi_bb_impl.h @@ -21,10 +21,10 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H #define INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H -#include +#include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH "/" diff --git a/lib/verilog_axi_cc_impl.cc b/lib/verilog_axi_cc_impl.cc index 009d9b4..48f445d 100644 --- a/lib/verilog_axi_cc_impl.cc +++ b/lib/verilog_axi_cc_impl.cc @@ -30,10 +30,10 @@ #include -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" -#include "verilog/Shell_cmd.h" -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shared_lib.h" #define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk" #define CPP_TEMPLATE_NAME "axi_module.cpp" diff --git a/lib/verilog_axi_cc_impl.h b/lib/verilog_axi_cc_impl.h index 0de610f..f96496c 100644 --- a/lib/verilog_axi_cc_impl.h +++ b/lib/verilog_axi_cc_impl.h @@ -21,10 +21,10 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_CC_IMPL_H #define INCLUDED_VERILOG_VERILOG_AXI_CC_IMPL_H -#include +#include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH "/" diff --git a/lib/verilog_axi_ff_impl.cc b/lib/verilog_axi_ff_impl.cc index 43c04cc..bd5e8c8 100644 --- a/lib/verilog_axi_ff_impl.cc +++ b/lib/verilog_axi_ff_impl.cc @@ -30,10 +30,10 @@ #include -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" -#include "verilog/Shell_cmd.h" -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shared_lib.h" #define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk" #define CPP_TEMPLATE_NAME "axi_module.cpp" diff --git a/lib/verilog_axi_ff_impl.h b/lib/verilog_axi_ff_impl.h index 33f8fae..673db90 100644 --- a/lib/verilog_axi_ff_impl.h +++ b/lib/verilog_axi_ff_impl.h @@ -21,10 +21,10 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_FF_IMPL_H #define INCLUDED_VERILOG_VERILOG_AXI_FF_IMPL_H -#include +#include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH "/" diff --git a/lib/verilog_axi_ii_impl.cc b/lib/verilog_axi_ii_impl.cc index 070b25d..d339640 100644 --- a/lib/verilog_axi_ii_impl.cc +++ b/lib/verilog_axi_ii_impl.cc @@ -30,10 +30,10 @@ #include -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" -#include "verilog/Shell_cmd.h" -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shared_lib.h" #define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk" #define CPP_TEMPLATE_NAME "axi_module.cpp" diff --git a/lib/verilog_axi_ii_impl.h b/lib/verilog_axi_ii_impl.h index 13e0c4a..bb9091c 100644 --- a/lib/verilog_axi_ii_impl.h +++ b/lib/verilog_axi_ii_impl.h @@ -21,10 +21,10 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_II_IMPL_H #define INCLUDED_VERILOG_VERILOG_AXI_II_IMPL_H -#include +#include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH "/" diff --git a/lib/verilog_axi_ss_impl.cc b/lib/verilog_axi_ss_impl.cc index 309bf89..460d21a 100644 --- a/lib/verilog_axi_ss_impl.cc +++ b/lib/verilog_axi_ss_impl.cc @@ -30,10 +30,10 @@ #include -#include "verilog/constants.h" +#include "gnuradio/verilog/constants.h" -#include "verilog/Shell_cmd.h" -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shell_cmd.h" +#include "gnuradio/verilog/Shared_lib.h" #define AXI_MODULE_CL_MAKEFILE "axi_module_cl.mk" #define CPP_TEMPLATE_NAME "axi_module.cpp" diff --git a/lib/verilog_axi_ss_impl.h b/lib/verilog_axi_ss_impl.h index 69d7aac..c5dc35c 100644 --- a/lib/verilog_axi_ss_impl.h +++ b/lib/verilog_axi_ss_impl.h @@ -21,10 +21,10 @@ #ifndef INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H #define INCLUDED_VERILOG_VERILOG_AXI_SS_IMPL_H -#include +#include #include -#include "verilog/Shared_lib.h" +#include "gnuradio/verilog/Shared_lib.h" #define SLASH "/" From 61dac0ce16fb22211997a8026de9e63c452ab3d9 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 16:14:11 +0100 Subject: [PATCH 03/13] use std shared ptr --- include/gnuradio/verilog/verilog_axi_bb.h | 2 +- include/gnuradio/verilog/verilog_axi_cc.h | 2 +- include/gnuradio/verilog/verilog_axi_ff.h | 2 +- include/gnuradio/verilog/verilog_axi_ii.h | 2 +- include/gnuradio/verilog/verilog_axi_ss.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/gnuradio/verilog/verilog_axi_bb.h b/include/gnuradio/verilog/verilog_axi_bb.h index 864f00a..e325409 100644 --- a/include/gnuradio/verilog/verilog_axi_bb.h +++ b/include/gnuradio/verilog/verilog_axi_bb.h @@ -36,7 +36,7 @@ namespace gr { class VERILOG_API verilog_axi_bb : virtual public gr::block { public: - typedef boost::shared_ptr sptr; + typedef std::shared_ptr sptr; /*! * \brief Return a shared_ptr to a new instance of verilog::verilog_axi_bb. diff --git a/include/gnuradio/verilog/verilog_axi_cc.h b/include/gnuradio/verilog/verilog_axi_cc.h index 13a69e9..81930ec 100644 --- a/include/gnuradio/verilog/verilog_axi_cc.h +++ b/include/gnuradio/verilog/verilog_axi_cc.h @@ -36,7 +36,7 @@ namespace gr { class VERILOG_API verilog_axi_cc : virtual public gr::block { public: - typedef boost::shared_ptr sptr; + typedef std::shared_ptr sptr; /*! * \brief Return a shared_ptr to a new instance of verilog::verilog_axi_cc. diff --git a/include/gnuradio/verilog/verilog_axi_ff.h b/include/gnuradio/verilog/verilog_axi_ff.h index f6195c6..8baaee0 100644 --- a/include/gnuradio/verilog/verilog_axi_ff.h +++ b/include/gnuradio/verilog/verilog_axi_ff.h @@ -36,7 +36,7 @@ namespace gr { class VERILOG_API verilog_axi_ff : virtual public gr::block { public: - typedef boost::shared_ptr sptr; + typedef std::shared_ptr sptr; /*! * \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ff. diff --git a/include/gnuradio/verilog/verilog_axi_ii.h b/include/gnuradio/verilog/verilog_axi_ii.h index 6244ef9..7b79953 100644 --- a/include/gnuradio/verilog/verilog_axi_ii.h +++ b/include/gnuradio/verilog/verilog_axi_ii.h @@ -36,7 +36,7 @@ namespace gr { class VERILOG_API verilog_axi_ii : virtual public gr::block { public: - typedef boost::shared_ptr sptr; + typedef std::shared_ptr sptr; /*! * \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ii. diff --git a/include/gnuradio/verilog/verilog_axi_ss.h b/include/gnuradio/verilog/verilog_axi_ss.h index 2e9fe16..0b82b8c 100644 --- a/include/gnuradio/verilog/verilog_axi_ss.h +++ b/include/gnuradio/verilog/verilog_axi_ss.h @@ -36,7 +36,7 @@ namespace gr { class VERILOG_API verilog_axi_ss : virtual public gr::block { public: - typedef boost::shared_ptr sptr; + typedef std::shared_ptr sptr; /*! * \brief Return a shared_ptr to a new instance of verilog::verilog_axi_ss. From d94a7956eddc5c4bf14948eb86efe00d7af0a2c3 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 16:15:53 +0100 Subject: [PATCH 04/13] avoid warning on catch --- lib/verilog_axi_bb_impl.cc | 18 +++++++++++------- lib/verilog_axi_cc_impl.cc | 14 +++++++------- lib/verilog_axi_ff_impl.cc | 14 +++++++------- lib/verilog_axi_ii_impl.cc | 14 +++++++------- lib/verilog_axi_ss_impl.cc | 14 +++++++------- 5 files changed, 39 insertions(+), 35 deletions(-) diff --git a/lib/verilog_axi_bb_impl.cc b/lib/verilog_axi_bb_impl.cc index 3372e4a..cb62172 100644 --- a/lib/verilog_axi_bb_impl.cc +++ b/lib/verilog_axi_bb_impl.cc @@ -143,7 +143,8 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->generate_so(); - } catch (std::runtime_error) { + + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -165,7 +166,8 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->load_lib(); - } catch (std::runtime_error) { + + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -251,7 +253,7 @@ namespace gr { try { status_code = this->sim(in[input_i], out[output_i], this->main_time); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -399,11 +401,12 @@ namespace gr { } bool - verilog_axi_bb_impl::test_access(const char *filepath, const char *err_msg = "") + verilog_axi_bb_impl::test_access(const char *filepath, const char *err_msg = NULL) { if ( access(filepath, R_OK) == _EXIT_FAILURE ) { - if (err_msg != "") { + + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % filepath @@ -418,13 +421,14 @@ namespace gr { } bool - verilog_axi_bb_impl::check_env(const char *package, const char *err_msg = "") + verilog_axi_bb_impl::check_env(const char *package, const char *err_msg) { Shell_cmd bash; bash.exec((std::string("which ") + package).c_str()); if (bash.get_msg(0) == "") { - if (err_msg != "") { + + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % package diff --git a/lib/verilog_axi_cc_impl.cc b/lib/verilog_axi_cc_impl.cc index 48f445d..6b1e5f8 100644 --- a/lib/verilog_axi_cc_impl.cc +++ b/lib/verilog_axi_cc_impl.cc @@ -143,7 +143,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->generate_so(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -165,7 +165,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->load_lib(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -251,7 +251,7 @@ namespace gr { try { status_code = this->sim(in[input_i], out[output_i], this->main_time); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -399,11 +399,11 @@ namespace gr { } bool - verilog_axi_cc_impl::test_access(const char *filepath, const char *err_msg = "") + verilog_axi_cc_impl::test_access(const char *filepath, const char *err_msg) { if ( access(filepath, R_OK) == _EXIT_FAILURE ) { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % filepath @@ -418,13 +418,13 @@ namespace gr { } bool - verilog_axi_cc_impl::check_env(const char *package, const char *err_msg = "") + verilog_axi_cc_impl::check_env(const char *package, const char *err_msg) { Shell_cmd bash; bash.exec((std::string("which ") + package).c_str()); if (bash.get_msg(0) == "") { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % package diff --git a/lib/verilog_axi_ff_impl.cc b/lib/verilog_axi_ff_impl.cc index bd5e8c8..0a12880 100644 --- a/lib/verilog_axi_ff_impl.cc +++ b/lib/verilog_axi_ff_impl.cc @@ -143,7 +143,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->generate_so(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -165,7 +165,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->load_lib(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -251,7 +251,7 @@ namespace gr { try { status_code = this->sim(in[input_i], out[output_i], this->main_time); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -399,11 +399,11 @@ namespace gr { } bool - verilog_axi_ff_impl::test_access(const char *filepath, const char *err_msg = "") + verilog_axi_ff_impl::test_access(const char *filepath, const char *err_msg) { if ( access(filepath, R_OK) == _EXIT_FAILURE ) { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % filepath @@ -418,13 +418,13 @@ namespace gr { } bool - verilog_axi_ff_impl::check_env(const char *package, const char *err_msg = "") + verilog_axi_ff_impl::check_env(const char *package, const char *err_msg) { Shell_cmd bash; bash.exec((std::string("which ") + package).c_str()); if (bash.get_msg(0) == "") { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % package diff --git a/lib/verilog_axi_ii_impl.cc b/lib/verilog_axi_ii_impl.cc index d339640..f2de3ec 100644 --- a/lib/verilog_axi_ii_impl.cc +++ b/lib/verilog_axi_ii_impl.cc @@ -143,7 +143,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->generate_so(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -165,7 +165,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->load_lib(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -251,7 +251,7 @@ namespace gr { try { status_code = this->sim(in[input_i], out[output_i], this->main_time); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -399,11 +399,11 @@ namespace gr { } bool - verilog_axi_ii_impl::test_access(const char *filepath, const char *err_msg = "") + verilog_axi_ii_impl::test_access(const char *filepath, const char *err_msg) { if ( access(filepath, R_OK) == _EXIT_FAILURE ) { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % filepath @@ -418,13 +418,13 @@ namespace gr { } bool - verilog_axi_ii_impl::check_env(const char *package, const char *err_msg = "") + verilog_axi_ii_impl::check_env(const char *package, const char *err_msg) { Shell_cmd bash; bash.exec((std::string("which ") + package).c_str()); if (bash.get_msg(0) == "") { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % package diff --git a/lib/verilog_axi_ss_impl.cc b/lib/verilog_axi_ss_impl.cc index 460d21a..2f91916 100644 --- a/lib/verilog_axi_ss_impl.cc +++ b/lib/verilog_axi_ss_impl.cc @@ -143,7 +143,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->generate_so(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -165,7 +165,7 @@ namespace gr { gr::thread::scoped_lock lock(this->vl_mutex); this->load_lib(); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -251,7 +251,7 @@ namespace gr { try { status_code = this->sim(in[input_i], out[output_i], this->main_time); - } catch (std::runtime_error) { + } catch (std::runtime_error const&) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % this->verilog_module_path.c_str() @@ -399,11 +399,11 @@ namespace gr { } bool - verilog_axi_ss_impl::test_access(const char *filepath, const char *err_msg = "") + verilog_axi_ss_impl::test_access(const char *filepath, const char *err_msg) { if ( access(filepath, R_OK) == _EXIT_FAILURE ) { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % filepath @@ -418,13 +418,13 @@ namespace gr { } bool - verilog_axi_ss_impl::check_env(const char *package, const char *err_msg = "") + verilog_axi_ss_impl::check_env(const char *package, const char *err_msg) { Shell_cmd bash; bash.exec((std::string("which ") + package).c_str()); if (bash.get_msg(0) == "") { - if (err_msg != "") { + if (err_msg != NULL) { GR_LOG_ERROR(d_logger, boost::format("%s: %s") % package From 75b5192b2ddca0f9f3874ca43c3697543296d8f1 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 16:19:59 +0100 Subject: [PATCH 05/13] add python binding --- python/verilog/bindings/CMakeLists.txt | 1 + .../verilog_axi_ii_pydoc_template.h | 27 +++++++++ python/verilog/bindings/python_bindings.cc | 2 + .../verilog/bindings/verilog_axi_ii_python.cc | 59 +++++++++++++++++++ 4 files changed, 89 insertions(+) create mode 100644 python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h create mode 100644 python/verilog/bindings/verilog_axi_ii_python.cc diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt index 61c7d02..660eb83 100644 --- a/python/verilog/bindings/CMakeLists.txt +++ b/python/verilog/bindings/CMakeLists.txt @@ -29,6 +29,7 @@ include(GrPybind) ######################################################################## list(APPEND verilog_python_files + verilog_axi_ii_python.cc python_bindings.cc) GR_PYBIND_MAKE_OOT(verilog diff --git a/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h new file mode 100644 index 0000000..3be7a93 --- /dev/null +++ b/python/verilog/bindings/docstrings/verilog_axi_ii_pydoc_template.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#include "pydoc_macros.h" +#define D(...) DOC(gr,verilog, __VA_ARGS__ ) +/* + This file contains placeholders for docstrings for the Python bindings. + Do not edit! These were automatically extracted during the binding process + and will be overwritten during the build process + */ + + + + static const char *__doc_gr_verilog_axi_ii = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ii = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ii_make = R"doc()doc"; + + diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc index df5b39a..f9a8028 100644 --- a/python/verilog/bindings/python_bindings.cc +++ b/python/verilog/bindings/python_bindings.cc @@ -21,6 +21,7 @@ namespace py = pybind11; // Please do not delete /**************************************/ // BINDING_FUNCTION_PROTOTYPES( + void bind_verilog_axi_ii(py::module& m); // ) END BINDING_FUNCTION_PROTOTYPES @@ -49,5 +50,6 @@ PYBIND11_MODULE(verilog_python, m) // Please do not delete /**************************************/ // BINDING_FUNCTION_CALLS( + bind_verilog_axi_ii(m); // ) END BINDING_FUNCTION_CALLS } diff --git a/python/verilog/bindings/verilog_axi_ii_python.cc b/python/verilog/bindings/verilog_axi_ii_python.cc new file mode 100644 index 0000000..0421f52 --- /dev/null +++ b/python/verilog/bindings/verilog_axi_ii_python.cc @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +/***********************************************************************************/ +/* This file is automatically generated using bindtool and can be manually edited */ +/* The following lines can be configured to regenerate this file during cmake */ +/* If manual edits are made, the following tags should be modified accordingly. */ +/* BINDTOOL_GEN_AUTOMATIC(1) */ +/* BINDTOOL_USE_PYGCCXML(1) */ +/* BINDTOOL_HEADER_FILE(verilog_axi_ii.h) */ +/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */ +/***********************************************************************************/ + +#include +#include +#include + +namespace py = pybind11; + +#include +// pydoc.h is automatically generated in the build directory +#include + +void bind_verilog_axi_ii(py::module& m) +{ + + using verilog_axi_ii = gr::verilog::verilog_axi_ii; + + + py::class_>(m, "verilog_axi_ii", D(verilog_axi_ii)) + + .def(py::init(&verilog_axi_ii::make), + D(verilog_axi_ii,make) + ) + + + + + ; + + + + +} + + + + + + + + From 71aea688ea899d8b143d0075475bb85df04b8782 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 18:15:21 +0100 Subject: [PATCH 06/13] add float python bindings --- python/verilog/bindings/CMakeLists.txt | 1 + .../verilog_axi_ff_pydoc_template.h | 27 +++++++++ python/verilog/bindings/python_bindings.cc | 2 + .../verilog/bindings/verilog_axi_ff_python.cc | 59 +++++++++++++++++++ 4 files changed, 89 insertions(+) create mode 100644 python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h create mode 100644 python/verilog/bindings/verilog_axi_ff_python.cc diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt index 660eb83..8a0b9d9 100644 --- a/python/verilog/bindings/CMakeLists.txt +++ b/python/verilog/bindings/CMakeLists.txt @@ -30,6 +30,7 @@ include(GrPybind) list(APPEND verilog_python_files verilog_axi_ii_python.cc + verilog_axi_ff_python.cc python_bindings.cc) GR_PYBIND_MAKE_OOT(verilog diff --git a/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h new file mode 100644 index 0000000..e35d22c --- /dev/null +++ b/python/verilog/bindings/docstrings/verilog_axi_ff_pydoc_template.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#include "pydoc_macros.h" +#define D(...) DOC(gr,verilog, __VA_ARGS__ ) +/* + This file contains placeholders for docstrings for the Python bindings. + Do not edit! These were automatically extracted during the binding process + and will be overwritten during the build process + */ + + + + static const char *__doc_gr_verilog_axi_ff = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ff = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ff_make = R"doc()doc"; + + diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc index f9a8028..3119667 100644 --- a/python/verilog/bindings/python_bindings.cc +++ b/python/verilog/bindings/python_bindings.cc @@ -22,6 +22,7 @@ namespace py = pybind11; /**************************************/ // BINDING_FUNCTION_PROTOTYPES( void bind_verilog_axi_ii(py::module& m); + void bind_verilog_axi_ff(py::module& m); // ) END BINDING_FUNCTION_PROTOTYPES @@ -51,5 +52,6 @@ PYBIND11_MODULE(verilog_python, m) /**************************************/ // BINDING_FUNCTION_CALLS( bind_verilog_axi_ii(m); + bind_verilog_axi_ff(m); // ) END BINDING_FUNCTION_CALLS } diff --git a/python/verilog/bindings/verilog_axi_ff_python.cc b/python/verilog/bindings/verilog_axi_ff_python.cc new file mode 100644 index 0000000..0201ead --- /dev/null +++ b/python/verilog/bindings/verilog_axi_ff_python.cc @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +/***********************************************************************************/ +/* This file is automatically generated using bindtool and can be manually edited */ +/* The following lines can be configured to regenerate this file during cmake */ +/* If manual edits are made, the following tags should be modified accordingly. */ +/* BINDTOOL_GEN_AUTOMATIC(1) */ +/* BINDTOOL_USE_PYGCCXML(1) */ +/* BINDTOOL_HEADER_FILE(verilog_axi_ff.h) */ +/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */ +/***********************************************************************************/ + +#include +#include +#include + +namespace py = pybind11; + +#include +// pydoc.h is automatically generated in the build directory +#include + +void bind_verilog_axi_ff(py::module& m) +{ + + using verilog_axi_ff = gr::verilog::verilog_axi_ff; + + + py::class_>(m, "verilog_axi_ff", D(verilog_axi_ff)) + + .def(py::init(&verilog_axi_ff::make), + D(verilog_axi_ff,make) + ) + + + + + ; + + + + +} + + + + + + + + From a09ff3f6183e2d2016719cbfdef8179850cc8d46 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Mon, 13 Mar 2023 16:04:51 +0100 Subject: [PATCH 07/13] re-enable python tests --- python/verilog/CMakeLists.txt | 2 + python/verilog/qa_verilog_axi_ff.py | 89 +++++++++++++++++++ python/verilog/qa_verilog_axi_ii.py | 80 +++++++++++++++++ .../{ => verilog}/testcases/double/double.v | 0 .../testcases/double/double_axi.v | 0 .../testcases/passthru/saxi_passthru.v | 0 6 files changed, 171 insertions(+) create mode 100755 python/verilog/qa_verilog_axi_ff.py create mode 100755 python/verilog/qa_verilog_axi_ii.py rename python/{ => verilog}/testcases/double/double.v (100%) rename python/{ => verilog}/testcases/double/double_axi.v (100%) rename python/{ => verilog}/testcases/passthru/saxi_passthru.v (100%) diff --git a/python/verilog/CMakeLists.txt b/python/verilog/CMakeLists.txt index ea29eb7..c00eca3 100644 --- a/python/verilog/CMakeLists.txt +++ b/python/verilog/CMakeLists.txt @@ -39,3 +39,5 @@ add_custom_target( COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/ ) +GR_ADD_TEST(qa_verilog_axi_ii ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ii.py) +GR_ADD_TEST(qa_verilog_axi_ff ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ff.py) diff --git a/python/verilog/qa_verilog_axi_ff.py b/python/verilog/qa_verilog_axi_ff.py new file mode 100755 index 0000000..49fb08d --- /dev/null +++ b/python/verilog/qa_verilog_axi_ff.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2019 <+YOU OR YOUR COMPANY+>. +# +# This is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +from gnuradio import gr, gr_unittest +from gnuradio import blocks +import os + +try: + from gnuradio import verilog +except ImportError: + import sys + dirname, filename = os.path.split(os.path.abspath(__file__)) + sys.path.append(os.path.join(dirname, "bindings")) + from gnuradio import verilog + +class qa_verilog_axi_ff (gr_unittest.TestCase): + + def setUp (self): + self.tb = gr.top_block () + + def tearDown (self): + self.tb = None + + def test_001_t (self): + # set up fg + src_data = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4, 12.3, 45.5, 29.3) + expected_result = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4, 12.3, 45.5, 29.3) + src = blocks.vector_source_f(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ff(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_f() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + + + result_data = dst.data() + print (expected_result) + print (result_data) + def round_f(x): + return round(x, 3) + round_result_data = tuple(map(round_f, result_data)) + print (round_result_data) + self.assertFloatTuplesAlmostEqual(expected_result, round_result_data, 12) + + def test_002_t (self): + # set up fg + src_data = (1.2, 3.8, 5.7, 9.4, 10.2, 12.4, 17.5, 19.2, 21.4) + expected_result = (2.4, 7.6, 11.4, 18.8, 20.4, 24.8, 35.0, 38.4, 42.8) + src = blocks.vector_source_f(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ff(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_f() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + def round_f(x): + return round(x, 3) + round_result_data = tuple(map(round_f, result_data)) + print (round_result_data) + self.assertFloatTuplesAlmostEqual(expected_result, round_result_data, 9) + +if __name__ == '__main__': + gr_unittest.run(qa_verilog_axi_ff, "qa_verilog_axi_ff.xml") diff --git a/python/verilog/qa_verilog_axi_ii.py b/python/verilog/qa_verilog_axi_ii.py new file mode 100755 index 0000000..0081ae3 --- /dev/null +++ b/python/verilog/qa_verilog_axi_ii.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2019 <+YOU OR YOUR COMPANY+>. +# +# This is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +from gnuradio import gr, gr_unittest +from gnuradio import blocks +import os + +try: + from gnuradio import verilog +except ImportError: + import sys + dirname, filename = os.path.split(os.path.abspath(__file__)) + sys.path.append(os.path.join(dirname, "bindings")) + from gnuradio import verilog + +class qa_verilog_axi_ii (gr_unittest.TestCase): + + def setUp (self): + self.tb = gr.top_block () + + def tearDown (self): + self.tb = None + + def test_001_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + src = blocks.vector_source_i(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ii(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_i() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) + + def test_002_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) + expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) + src = blocks.vector_source_i(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ii(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_i() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) + + +if __name__ == '__main__': + gr_unittest.run(qa_verilog_axi_ii, "qa_verilog_axi_ii.xml") diff --git a/python/testcases/double/double.v b/python/verilog/testcases/double/double.v similarity index 100% rename from python/testcases/double/double.v rename to python/verilog/testcases/double/double.v diff --git a/python/testcases/double/double_axi.v b/python/verilog/testcases/double/double_axi.v similarity index 100% rename from python/testcases/double/double_axi.v rename to python/verilog/testcases/double/double_axi.v diff --git a/python/testcases/passthru/saxi_passthru.v b/python/verilog/testcases/passthru/saxi_passthru.v similarity index 100% rename from python/testcases/passthru/saxi_passthru.v rename to python/verilog/testcases/passthru/saxi_passthru.v From 0bfd79874e8a83d97f0d8ba84fa3c4112c14dcf6 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Mon, 13 Mar 2023 16:43:53 +0100 Subject: [PATCH 08/13] add byte python binding --- python/verilog/CMakeLists.txt | 2 + python/verilog/bindings/CMakeLists.txt | 1 + .../verilog_axi_bb_pydoc_template.h | 27 +++++++ python/verilog/bindings/python_bindings.cc | 2 + .../verilog/bindings/verilog_axi_bb_python.cc | 59 ++++++++++++++ python/verilog/qa_verilog_axi_bb.py | 80 +++++++++++++++++++ 6 files changed, 171 insertions(+) create mode 100644 python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h create mode 100644 python/verilog/bindings/verilog_axi_bb_python.cc create mode 100755 python/verilog/qa_verilog_axi_bb.py diff --git a/python/verilog/CMakeLists.txt b/python/verilog/CMakeLists.txt index c00eca3..e8a2312 100644 --- a/python/verilog/CMakeLists.txt +++ b/python/verilog/CMakeLists.txt @@ -39,5 +39,7 @@ add_custom_target( COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_BINARY_DIR}/test_modules/gnuradio/verilog/ ) + +GR_ADD_TEST(qa_verilog_axi_bb ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_bb.py) GR_ADD_TEST(qa_verilog_axi_ii ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ii.py) GR_ADD_TEST(qa_verilog_axi_ff ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ff.py) diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt index 8a0b9d9..8c80f11 100644 --- a/python/verilog/bindings/CMakeLists.txt +++ b/python/verilog/bindings/CMakeLists.txt @@ -29,6 +29,7 @@ include(GrPybind) ######################################################################## list(APPEND verilog_python_files + verilog_axi_bb_python.cc verilog_axi_ii_python.cc verilog_axi_ff_python.cc python_bindings.cc) diff --git a/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h new file mode 100644 index 0000000..a0f29b8 --- /dev/null +++ b/python/verilog/bindings/docstrings/verilog_axi_bb_pydoc_template.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#include "pydoc_macros.h" +#define D(...) DOC(gr,verilog, __VA_ARGS__ ) +/* + This file contains placeholders for docstrings for the Python bindings. + Do not edit! These were automatically extracted during the binding process + and will be overwritten during the build process + */ + + + + static const char *__doc_gr_verilog_axi_bb = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_bb = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_bb_make = R"doc()doc"; + + diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc index 3119667..cda369c 100644 --- a/python/verilog/bindings/python_bindings.cc +++ b/python/verilog/bindings/python_bindings.cc @@ -21,6 +21,7 @@ namespace py = pybind11; // Please do not delete /**************************************/ // BINDING_FUNCTION_PROTOTYPES( + void bind_verilog_axi_bb(py::module& m); void bind_verilog_axi_ii(py::module& m); void bind_verilog_axi_ff(py::module& m); // ) END BINDING_FUNCTION_PROTOTYPES @@ -51,6 +52,7 @@ PYBIND11_MODULE(verilog_python, m) // Please do not delete /**************************************/ // BINDING_FUNCTION_CALLS( + bind_verilog_axi_bb(m); bind_verilog_axi_ii(m); bind_verilog_axi_ff(m); // ) END BINDING_FUNCTION_CALLS diff --git a/python/verilog/bindings/verilog_axi_bb_python.cc b/python/verilog/bindings/verilog_axi_bb_python.cc new file mode 100644 index 0000000..59edfd8 --- /dev/null +++ b/python/verilog/bindings/verilog_axi_bb_python.cc @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +/***********************************************************************************/ +/* This file is automatically generated using bindtool and can be manually edited */ +/* The following lines can be configured to regenerate this file during cmake */ +/* If manual edits are made, the following tags should be modified accordingly. */ +/* BINDTOOL_GEN_AUTOMATIC(1) */ +/* BINDTOOL_USE_PYGCCXML(1) */ +/* BINDTOOL_HEADER_FILE(verilog_axi_bb.h) */ +/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */ +/***********************************************************************************/ + +#include +#include +#include + +namespace py = pybind11; + +#include +// pydoc.h is automatically generated in the build directory +#include + +void bind_verilog_axi_bb(py::module& m) +{ + + using verilog_axi_bb = gr::verilog::verilog_axi_bb; + + + py::class_>(m, "verilog_axi_bb", D(verilog_axi_bb)) + + .def(py::init(&verilog_axi_bb::make), + D(verilog_axi_bb,make) + ) + + + + + ; + + + + +} + + + + + + + + diff --git a/python/verilog/qa_verilog_axi_bb.py b/python/verilog/qa_verilog_axi_bb.py new file mode 100755 index 0000000..9ed152f --- /dev/null +++ b/python/verilog/qa_verilog_axi_bb.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2019 <+YOU OR YOUR COMPANY+>. +# +# This is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +from gnuradio import gr, gr_unittest +from gnuradio import blocks +import os + +try: + from gnuradio import verilog +except ImportError: + import sys + dirname, filename = os.path.split(os.path.abspath(__file__)) + sys.path.append(os.path.join(dirname, "bindings")) + from gnuradio import verilog + +class qa_verilog_axi_bb (gr_unittest.TestCase): + + def setUp (self): + self.tb = gr.top_block () + + def tearDown (self): + self.tb = None + + def test_001_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + src = blocks.vector_source_b(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_bb(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_b() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) + + def test_002_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) + expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) + src = blocks.vector_source_b(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_bb(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_b() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) + + +if __name__ == '__main__': + gr_unittest.run(qa_verilog_axi_bb, "qa_verilog_axi_bb.xml") From d7b020fb8bab5b214537739c956c2a450c496022 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Mon, 13 Mar 2023 16:45:02 +0100 Subject: [PATCH 09/13] add short python binding --- python/verilog/CMakeLists.txt | 1 + python/verilog/bindings/CMakeLists.txt | 1 + .../verilog_axi_ss_pydoc_template.h | 27 +++++++ python/verilog/bindings/python_bindings.cc | 2 + .../verilog/bindings/verilog_axi_ss_python.cc | 59 ++++++++++++++ python/verilog/qa_verilog_axi_ss.py | 80 +++++++++++++++++++ 6 files changed, 170 insertions(+) create mode 100644 python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h create mode 100644 python/verilog/bindings/verilog_axi_ss_python.cc create mode 100755 python/verilog/qa_verilog_axi_ss.py diff --git a/python/verilog/CMakeLists.txt b/python/verilog/CMakeLists.txt index e8a2312..229dc19 100644 --- a/python/verilog/CMakeLists.txt +++ b/python/verilog/CMakeLists.txt @@ -41,5 +41,6 @@ add_custom_target( ) GR_ADD_TEST(qa_verilog_axi_bb ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_bb.py) +GR_ADD_TEST(qa_verilog_axi_ss ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ss.py) GR_ADD_TEST(qa_verilog_axi_ii ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ii.py) GR_ADD_TEST(qa_verilog_axi_ff ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_verilog_axi_ff.py) diff --git a/python/verilog/bindings/CMakeLists.txt b/python/verilog/bindings/CMakeLists.txt index 8c80f11..64b6092 100644 --- a/python/verilog/bindings/CMakeLists.txt +++ b/python/verilog/bindings/CMakeLists.txt @@ -30,6 +30,7 @@ include(GrPybind) list(APPEND verilog_python_files verilog_axi_bb_python.cc + verilog_axi_ss_python.cc verilog_axi_ii_python.cc verilog_axi_ff_python.cc python_bindings.cc) diff --git a/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h b/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h new file mode 100644 index 0000000..aec5663 --- /dev/null +++ b/python/verilog/bindings/docstrings/verilog_axi_ss_pydoc_template.h @@ -0,0 +1,27 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#include "pydoc_macros.h" +#define D(...) DOC(gr,verilog, __VA_ARGS__ ) +/* + This file contains placeholders for docstrings for the Python bindings. + Do not edit! These were automatically extracted during the binding process + and will be overwritten during the build process + */ + + + + static const char *__doc_gr_verilog_axi_ss = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ss = R"doc()doc"; + + + static const char *__doc_gr_verilog_verilog_axi_ss_make = R"doc()doc"; + + diff --git a/python/verilog/bindings/python_bindings.cc b/python/verilog/bindings/python_bindings.cc index cda369c..174f0d5 100644 --- a/python/verilog/bindings/python_bindings.cc +++ b/python/verilog/bindings/python_bindings.cc @@ -22,6 +22,7 @@ namespace py = pybind11; /**************************************/ // BINDING_FUNCTION_PROTOTYPES( void bind_verilog_axi_bb(py::module& m); + void bind_verilog_axi_ss(py::module& m); void bind_verilog_axi_ii(py::module& m); void bind_verilog_axi_ff(py::module& m); // ) END BINDING_FUNCTION_PROTOTYPES @@ -53,6 +54,7 @@ PYBIND11_MODULE(verilog_python, m) /**************************************/ // BINDING_FUNCTION_CALLS( bind_verilog_axi_bb(m); + bind_verilog_axi_ss(m); bind_verilog_axi_ii(m); bind_verilog_axi_ff(m); // ) END BINDING_FUNCTION_CALLS diff --git a/python/verilog/bindings/verilog_axi_ss_python.cc b/python/verilog/bindings/verilog_axi_ss_python.cc new file mode 100644 index 0000000..69c0d3f --- /dev/null +++ b/python/verilog/bindings/verilog_axi_ss_python.cc @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Free Software Foundation, Inc. + * + * This file is part of GNU Radio + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +/***********************************************************************************/ +/* This file is automatically generated using bindtool and can be manually edited */ +/* The following lines can be configured to regenerate this file during cmake */ +/* If manual edits are made, the following tags should be modified accordingly. */ +/* BINDTOOL_GEN_AUTOMATIC(1) */ +/* BINDTOOL_USE_PYGCCXML(1) */ +/* BINDTOOL_HEADER_FILE(verilog_axi_ss.h) */ +/* BINDTOOL_HEADER_FILE_HASH(78d518a7da29e3f42307301fe783be14) */ +/***********************************************************************************/ + +#include +#include +#include + +namespace py = pybind11; + +#include +// pydoc.h is automatically generated in the build directory +#include + +void bind_verilog_axi_ss(py::module& m) +{ + + using verilog_axi_ss = gr::verilog::verilog_axi_ss; + + + py::class_>(m, "verilog_axi_ss", D(verilog_axi_ss)) + + .def(py::init(&verilog_axi_ss::make), + D(verilog_axi_ss,make) + ) + + + + + ; + + + + +} + + + + + + + + diff --git a/python/verilog/qa_verilog_axi_ss.py b/python/verilog/qa_verilog_axi_ss.py new file mode 100755 index 0000000..f2ed821 --- /dev/null +++ b/python/verilog/qa_verilog_axi_ss.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2019 <+YOU OR YOUR COMPANY+>. +# +# This is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3, or (at your option) +# any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software; see the file COPYING. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, +# Boston, MA 02110-1301, USA. +# + +from gnuradio import gr, gr_unittest +from gnuradio import blocks +import os + +try: + from gnuradio import verilog +except ImportError: + import sys + dirname, filename = os.path.split(os.path.abspath(__file__)) + sys.path.append(os.path.join(dirname, "bindings")) + from gnuradio import verilog + +class qa_verilog_axi_ss (gr_unittest.TestCase): + + def setUp (self): + self.tb = gr.top_block () + + def tearDown (self): + self.tb = None + + def test_001_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + expected_result = (1, 3, 5, 9, 10, 12, 17, 19, 21, 12, 45, 29) + src = blocks.vector_source_s(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ss(path + "/testcases/passthru/saxi_passthru.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_s() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 12) + + def test_002_t (self): + # set up fg + src_data = (1, 3, 5, 9, 10, 12, 17, 19, 21) + expected_result = (2, 6, 10, 18, 20, 24, 34, 38, 42) + src = blocks.vector_source_s(src_data) + path = os.path.dirname(__file__) if len(os.path.dirname(__file__)) != 0 else '.' + vl = verilog.verilog_axi_ss(path + "/testcases/double/double_axi.v", True, 1.0, "", 0, 0) + dst = blocks.vector_sink_s() + + self.tb.connect(src, vl) + self.tb.connect(vl, dst) + self.tb.run() + # check data + result_data = dst.data() + print (expected_result) + print (result_data) + self.assertFloatTuplesAlmostEqual(expected_result, result_data, 9) + + +if __name__ == '__main__': + gr_unittest.run(qa_verilog_axi_ss, "qa_verilog_axi_ss.xml") From 211ad2dafe0a042fc14c8c1ef349d48a205790e4 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Wed, 8 Mar 2023 18:14:16 +0100 Subject: [PATCH 10/13] updated demo apps --- apps/verilog_axi_bb_demo.grc | 349 ++++++++ apps/verilog_axi_ff_demo.grc | 1000 +++++++--------------- apps/verilog_axi_ii_demo.grc | 1503 ++++++++-------------------------- apps/verilog_axi_ss_demo.grc | 252 ++++++ 4 files changed, 1261 insertions(+), 1843 deletions(-) create mode 100644 apps/verilog_axi_bb_demo.grc create mode 100644 apps/verilog_axi_ss_demo.grc diff --git a/apps/verilog_axi_bb_demo.grc b/apps/verilog_axi_bb_demo.grc new file mode 100644 index 0000000..64c0419 --- /dev/null +++ b/apps/verilog_axi_bb_demo.grc @@ -0,0 +1,349 @@ +options: + parameters: + author: Bowen Hu + catch_exceptions: 'True' + category: '[GRC Hier Blocks]' + cmake_opt: '' + comment: '' + copyright: '' + description: This is a demo of verilog_axi_ff block + gen_cmake: 'On' + gen_linking: dynamic + generate_options: qt_gui + hier_block_src_path: '.:' + id: verilog_axi_ff_demo + max_nouts: '0' + output_language: python + placement: (0,0) + qt_qss_theme: '' + realtime_scheduling: '' + run: 'True' + run_command: '{python} -u {filename}' + run_options: prompt + sizing_mode: fixed + thread_safe_setters: '' + title: verilog_axi_ii demo + window_size: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 8] + rotation: 0 + state: enabled + +blocks: +- name: samp_rate + id: variable + parameters: + comment: '' + value: '32000' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 132] + rotation: 0 + state: enabled +- name: analog_sig_source_x_0 + id: analog_sig_source_x + parameters: + affinity: '' + alias: '' + amp: '50' + comment: '' + freq: '1000' + maxoutbuf: '0' + minoutbuf: '0' + offset: '0' + phase: '0' + samp_rate: samp_rate + type: byte + waveform: analog.GR_COS_WAVE + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [152, 256.0] + rotation: 0 + state: enabled +- name: blocks_char_to_float_0 + id: blocks_char_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [848, 292.0] + rotation: 0 + state: true +- name: blocks_char_to_float_0_0 + id: blocks_char_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [848, 420.0] + rotation: 0 + state: true +- name: blocks_throttle_0 + id: blocks_throttle + parameters: + affinity: '' + alias: '' + comment: '' + ignoretag: 'True' + maxoutbuf: '0' + minoutbuf: '0' + samples_per_second: samp_rate + type: byte + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [392, 292] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: Before Verilog AXI + label10: '' + label2: After Verilog AXI + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"Input"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '4' + ymin: '-4' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [1024, 268.0] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: Before Verilog AXI + label10: '' + label2: After Verilog AXI + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"Output"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '4' + ymin: '-4' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [1024, 396.0] + rotation: 0 + state: enabled +- name: verilog_axi_xx_0 + id: verilog_axi_xx + parameters: + IO_ratio: '1.0' + affinity: '' + alias: '' + comment: '' + file: examples/double/double_axi.v + maxoutbuf: '0' + minoutbuf: '0' + module_flag: '0' + overwrite: 'True' + skip_output_items: '0' + type: byte + verilator_options: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [608, 384.0] + rotation: 0 + state: enabled + +connections: +- [analog_sig_source_x_0, '0', blocks_throttle_0, '0'] +- [blocks_char_to_float_0, '0', qtgui_time_sink_x_0, '0'] +- [blocks_char_to_float_0_0, '0', qtgui_time_sink_x_0_0, '0'] +- [blocks_throttle_0, '0', blocks_char_to_float_0, '0'] +- [blocks_throttle_0, '0', verilog_axi_xx_0, '0'] +- [verilog_axi_xx_0, '0', blocks_char_to_float_0_0, '0'] + +metadata: + file_format: 1 diff --git a/apps/verilog_axi_ff_demo.grc b/apps/verilog_axi_ff_demo.grc index 4c7a6b3..45f367e 100644 --- a/apps/verilog_axi_ff_demo.grc +++ b/apps/verilog_axi_ff_demo.grc @@ -1,687 +1,313 @@ - - - - Thu Aug 22 16:50:22 2019 - - options - - author - Bowen Hu - - - window_size - - - - category - [GRC Hier Blocks] - - - comment - - - - description - This is a demo of verilog_axi_ff block - - - _enabled - True - - - _coordinate - (8, 8) - - - _rotation - 0 - - - generate_options - qt_gui - - - hier_block_src_path - .: - - - id - verilog_axi_ff_demo - - - max_nouts - 0 - - - qt_qss_theme - - - - realtime_scheduling - - - - run_command - {python} -u {filename} - - - run_options - prompt - - - run - True - - - thread_safe_setters - - - - title - verilog_axi_ii demo - - - - variable - - comment - - - - _enabled - True - - - _coordinate - (8, 132) - - - _rotation - 0 - - - id - samp_rate - - - value - 32000 - - - - analog_sig_source_x - - amp - 1 - - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - freq - 1000 - - - _coordinate - (208, 260) - - - _rotation - 0 - - - id - analog_sig_source_x_0 - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - offset - 0 - - - type - float - - - samp_rate - samp_rate - - - waveform - analog.GR_COS_WAVE - - - - blocks_throttle - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (392, 292) - - - _rotation - 0 - - - id - blocks_throttle_0 - - - ignoretag - True - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - samples_per_second - samp_rate - - - type - float - - - vlen - 1 - - - - qtgui_time_sink_x - - autoscale - False - - - axislabels - True - - - alias - - - - comment - - - - ctrlpanel - False - - - affinity - - - - entags - True - - - _enabled - True - - - _coordinate - (920, 292) - - - gui_hint - - - - _rotation - 0 - - - grid - False - - - id - qtgui_time_sink_x_0 - - - legend - True - - - alpha1 - 1.0 - - - color1 - "blue" - - - label1 - Before Verilog AXI - - - marker1 - -1 - - - style1 - 1 - - - width1 - 1 - - - alpha10 - 1.0 - - - color10 - "blue" - - - label10 - - - - marker10 - -1 - - - style10 - 1 - - - width10 - 1 - - - alpha2 - 1.0 - - - color2 - "red" - - - label2 - After Verilog AXI - - - marker2 - -1 - - - style2 - 1 - - - width2 - 1 - - - alpha3 - 1.0 - - - color3 - "green" - - - label3 - - - - marker3 - -1 - - - style3 - 1 - - - width3 - 1 - - - alpha4 - 1.0 - - - color4 - "black" - - - label4 - - - - marker4 - -1 - - - style4 - 1 - - - width4 - 1 - - - alpha5 - 1.0 - - - color5 - "cyan" - - - label5 - - - - marker5 - -1 - - - style5 - 1 - - - width5 - 1 - - - alpha6 - 1.0 - - - color6 - "magenta" - - - label6 - - - - marker6 - -1 - - - style6 - 1 - - - width6 - 1 - - - alpha7 - 1.0 - - - color7 - "yellow" - - - label7 - - - - marker7 - -1 - - - style7 - 1 - - - width7 - 1 - - - alpha8 - 1.0 - - - color8 - "dark red" - - - label8 - - - - marker8 - -1 - - - style8 - 1 - - - width8 - 1 - - - alpha9 - 1.0 - - - color9 - "dark green" - - - label9 - - - - marker9 - -1 - - - style9 - 1 - - - width9 - 1 - - - name - "" - - - nconnections - 2 - - - size - 1024 - - - srate - samp_rate - - - stemplot - False - - - tr_chan - 0 - - - tr_delay - 0 - - - tr_level - 0.0 - - - tr_mode - qtgui.TRIG_MODE_FREE - - - tr_slope - qtgui.TRIG_SLOPE_POS - - - tr_tag - "" - - - type - float - - - update_time - 0.10 - - - ylabel - Amplitude - - - yunit - "" - - - ymax - 4 - - - ymin - -4 - - - - verilog_axi_xx - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (584, 444) - - - _rotation - 0 - - - id - verilog_axi_xx_0 - - - IO_ratio - 1.0 - - - type - float - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - module_flag - 0 - - - overwrite - True - - - skip_output_items - 0 - - - verilator_options - - - - file - - - - - analog_sig_source_x_0 - blocks_throttle_0 - 0 - 0 - - - blocks_throttle_0 - qtgui_time_sink_x_0 - 0 - 0 - - - blocks_throttle_0 - verilog_axi_xx_0 - 0 - 0 - - - verilog_axi_xx_0 - qtgui_time_sink_x_0 - 0 - 1 - - +options: + parameters: + author: Bowen Hu + catch_exceptions: 'True' + category: '[GRC Hier Blocks]' + cmake_opt: '' + comment: '' + copyright: '' + description: This is a demo of verilog_axi_ff block + gen_cmake: 'On' + gen_linking: dynamic + generate_options: qt_gui + hier_block_src_path: '.:' + id: verilog_axi_ff_demo + max_nouts: '0' + output_language: python + placement: (0,0) + qt_qss_theme: '' + realtime_scheduling: '' + run: 'True' + run_command: '{python} -u {filename}' + run_options: prompt + sizing_mode: fixed + thread_safe_setters: '' + title: verilog_axi_ii demo + window_size: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 8] + rotation: 0 + state: enabled + +blocks: +- name: samp_rate + id: variable + parameters: + comment: '' + value: '32000' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 132] + rotation: 0 + state: enabled +- name: analog_sig_source_x_0 + id: analog_sig_source_x + parameters: + affinity: '' + alias: '' + amp: '1' + comment: '' + freq: '1000' + maxoutbuf: '0' + minoutbuf: '0' + offset: '0' + phase: '0' + samp_rate: samp_rate + type: float + waveform: analog.GR_COS_WAVE + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [216, 264.0] + rotation: 0 + state: enabled +- name: blocks_throttle_0 + id: blocks_throttle + parameters: + affinity: '' + alias: '' + comment: '' + ignoretag: 'True' + maxoutbuf: '0' + minoutbuf: '0' + samples_per_second: samp_rate + type: float + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [432, 300.0] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: Before Verilog AXI + label10: '' + label2: After Verilog AXI + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"Input"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '4' + ymin: '-4' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [872, 276.0] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: Before Verilog AXI + label10: '' + label2: After Verilog AXI + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"Ouput"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '4' + ymin: '-4' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [872, 372.0] + rotation: 0 + state: enabled +- name: verilog_axi_xx_0 + id: verilog_axi_xx + parameters: + IO_ratio: '1.0' + affinity: '' + alias: '' + comment: '' + file: examples/double/double_axi.v + maxoutbuf: '0' + minoutbuf: '0' + module_flag: '0' + overwrite: 'True' + skip_output_items: '0' + type: float + verilator_options: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [664, 360.0] + rotation: 0 + state: enabled + +connections: +- [analog_sig_source_x_0, '0', blocks_throttle_0, '0'] +- [blocks_throttle_0, '0', qtgui_time_sink_x_0, '0'] +- [blocks_throttle_0, '0', verilog_axi_xx_0, '0'] +- [verilog_axi_xx_0, '0', qtgui_time_sink_x_0_0, '0'] + +metadata: + file_format: 1 diff --git a/apps/verilog_axi_ii_demo.grc b/apps/verilog_axi_ii_demo.grc index 715bc78..09a6d21 100644 --- a/apps/verilog_axi_ii_demo.grc +++ b/apps/verilog_axi_ii_demo.grc @@ -1,1156 +1,347 @@ - - - - Thu Aug 22 14:47:29 2019 - - options - - author - Bowen Hu - - - window_size - - - - category - [GRC Hier Blocks] - - - comment - - - - description - This is a demo of verilog_axi_ii block - - - _enabled - True - - - _coordinate - (8, 8) - - - _rotation - 0 - - - generate_options - qt_gui - - - hier_block_src_path - .: - - - id - verilog_axi_ii_demo - - - max_nouts - 0 - - - qt_qss_theme - - - - realtime_scheduling - - - - run_command - {python} -u {filename} - - - run_options - prompt - - - run - True - - - thread_safe_setters - - - - title - verilog_axi_ii demo - - - - variable - - comment - - - - _enabled - True - - - _coordinate - (8, 132) - - - _rotation - 0 - - - id - samp_rate - - - value - 32000 - - - - analog_fastnoise_source_x - - amp - 32 - - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (112, 268) - - - _rotation - 0 - - - id - analog_fastnoise_source_x_0 - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - noise_type - analog.GR_GAUSSIAN - - - type - int - - - seed - 0 - - - samples - 8192 - - - - blocks_int_to_float - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (760, 292) - - - _rotation - 0 - - - id - blocks_int_to_float_0 - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - scale - 1 - - - vlen - 1 - - - - blocks_int_to_float - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (760, 484) - - - _rotation - 0 - - - id - blocks_int_to_float_1 - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - scale - 1 - - - vlen - 1 - - - - blocks_throttle - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (328, 292) - - - _rotation - 0 - - - id - blocks_throttle_0 - - - ignoretag - True - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - samples_per_second - samp_rate - - - type - int - - - vlen - 1 - - - - qtgui_time_sink_x - - autoscale - False - - - axislabels - True - - - alias - - - - comment - - - - ctrlpanel - False - - - affinity - - - - entags - True - - - _enabled - True - - - _coordinate - (936, 460) - - - gui_hint - - - - _rotation - 0 - - - grid - False - - - id - qtgui_time_sink_x_0 - - - legend - True - - - alpha1 - 1.0 - - - color1 - "blue" - - - label1 - - - - marker1 - -1 - - - style1 - 1 - - - width1 - 1 - - - alpha10 - 1.0 - - - color10 - "blue" - - - label10 - - - - marker10 - -1 - - - style10 - 1 - - - width10 - 1 - - - alpha2 - 1.0 - - - color2 - "red" - - - label2 - - - - marker2 - -1 - - - style2 - 1 - - - width2 - 1 - - - alpha3 - 1.0 - - - color3 - "green" - - - label3 - - - - marker3 - -1 - - - style3 - 1 - - - width3 - 1 - - - alpha4 - 1.0 - - - color4 - "black" - - - label4 - - - - marker4 - -1 - - - style4 - 1 - - - width4 - 1 - - - alpha5 - 1.0 - - - color5 - "cyan" - - - label5 - - - - marker5 - -1 - - - style5 - 1 - - - width5 - 1 - - - alpha6 - 1.0 - - - color6 - "magenta" - - - label6 - - - - marker6 - -1 - - - style6 - 1 - - - width6 - 1 - - - alpha7 - 1.0 - - - color7 - "yellow" - - - label7 - - - - marker7 - -1 - - - style7 - 1 - - - width7 - 1 - - - alpha8 - 1.0 - - - color8 - "dark red" - - - label8 - - - - marker8 - -1 - - - style8 - 1 - - - width8 - 1 - - - alpha9 - 1.0 - - - color9 - "dark green" - - - label9 - - - - marker9 - -1 - - - style9 - 1 - - - width9 - 1 - - - name - "After Verilog AXI" - - - nconnections - 1 - - - size - 1024 - - - srate - samp_rate - - - stemplot - False - - - tr_chan - 0 - - - tr_delay - 0 - - - tr_level - 0.0 - - - tr_mode - qtgui.TRIG_MODE_FREE - - - tr_slope - qtgui.TRIG_SLOPE_POS - - - tr_tag - "" - - - type - float - - - update_time - 0.10 - - - ylabel - Amplitude - - - yunit - "" - - - ymax - 256 - - - ymin - -256 - - - - qtgui_time_sink_x - - autoscale - False - - - axislabels - True - - - alias - - - - comment - - - - ctrlpanel - False - - - affinity - - - - entags - True - - - _enabled - True - - - _coordinate - (936, 268) - - - gui_hint - - - - _rotation - 0 - - - grid - False - - - id - qtgui_time_sink_x_1 - - - legend - True - - - alpha1 - 1.0 - - - color1 - "blue" - - - label1 - - - - marker1 - -1 - - - style1 - 1 - - - width1 - 1 - - - alpha10 - 1.0 - - - color10 - "blue" - - - label10 - - - - marker10 - -1 - - - style10 - 1 - - - width10 - 1 - - - alpha2 - 1.0 - - - color2 - "red" - - - label2 - - - - marker2 - -1 - - - style2 - 1 - - - width2 - 1 - - - alpha3 - 1.0 - - - color3 - "green" - - - label3 - - - - marker3 - -1 - - - style3 - 1 - - - width3 - 1 - - - alpha4 - 1.0 - - - color4 - "black" - - - label4 - - - - marker4 - -1 - - - style4 - 1 - - - width4 - 1 - - - alpha5 - 1.0 - - - color5 - "cyan" - - - label5 - - - - marker5 - -1 - - - style5 - 1 - - - width5 - 1 - - - alpha6 - 1.0 - - - color6 - "magenta" - - - label6 - - - - marker6 - -1 - - - style6 - 1 - - - width6 - 1 - - - alpha7 - 1.0 - - - color7 - "yellow" - - - label7 - - - - marker7 - -1 - - - style7 - 1 - - - width7 - 1 - - - alpha8 - 1.0 - - - color8 - "dark red" - - - label8 - - - - marker8 - -1 - - - style8 - 1 - - - width8 - 1 - - - alpha9 - 1.0 - - - color9 - "dark green" - - - label9 - - - - marker9 - -1 - - - style9 - 1 - - - width9 - 1 - - - name - "Before Verilog AXI" - - - nconnections - 1 - - - size - 1024 - - - srate - samp_rate - - - stemplot - False - - - tr_chan - 0 - - - tr_delay - 0 - - - tr_level - 0.0 - - - tr_mode - qtgui.TRIG_MODE_FREE - - - tr_slope - qtgui.TRIG_SLOPE_POS - - - tr_tag - "" - - - type - float - - - update_time - 0.10 - - - ylabel - Amplitude - - - yunit - "" - - - ymax - 256 - - - ymin - -256 - - - - verilog_axi_xx - - alias - - - - comment - - - - affinity - - - - _enabled - True - - - _coordinate - (512, 444) - - - _rotation - 0 - - - id - verilog_axi_xx_0 - - - IO_ratio - 1.0 - - - type - int - - - maxoutbuf - 0 - - - minoutbuf - 0 - - - module_flag - 0 - - - overwrite - True - - - skip_output_items - 0 - - - verilator_options - - - - file - - - - - analog_fastnoise_source_x_0 - blocks_throttle_0 - 0 - 0 - - - blocks_int_to_float_0 - qtgui_time_sink_x_1 - 0 - 0 - - - blocks_int_to_float_1 - qtgui_time_sink_x_0 - 0 - 0 - - - blocks_throttle_0 - blocks_int_to_float_0 - 0 - 0 - - - blocks_throttle_0 - verilog_axi_xx_0 - 0 - 0 - - - verilog_axi_xx_0 - blocks_int_to_float_1 - 0 - 0 - - +options: + parameters: + author: Bowen Hu + catch_exceptions: 'True' + category: '[GRC Hier Blocks]' + cmake_opt: '' + comment: '' + copyright: '' + description: This is a demo of verilog_axi_ii block + gen_cmake: 'On' + gen_linking: dynamic + generate_options: qt_gui + hier_block_src_path: '.:' + id: verilog_axi_ii_demo + max_nouts: '0' + output_language: python + placement: (0,0) + qt_qss_theme: '' + realtime_scheduling: '' + run: 'True' + run_command: '{python} -u {filename}' + run_options: prompt + sizing_mode: fixed + thread_safe_setters: '' + title: verilog_axi_ii demo + window_size: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 8] + rotation: 0 + state: enabled + +blocks: +- name: samp_rate + id: variable + parameters: + comment: '' + value: '32000' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 132] + rotation: 0 + state: enabled +- name: analog_fastnoise_source_x_0 + id: analog_fastnoise_source_x + parameters: + affinity: '' + alias: '' + amp: '32' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + noise_type: analog.GR_GAUSSIAN + samples: '8192' + seed: '0' + type: int + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [112, 268] + rotation: 0 + state: enabled +- name: blocks_int_to_float_0 + id: blocks_int_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [760, 292] + rotation: 0 + state: enabled +- name: blocks_int_to_float_1 + id: blocks_int_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [760, 428.0] + rotation: 0 + state: enabled +- name: blocks_throttle_0 + id: blocks_throttle + parameters: + affinity: '' + alias: '' + comment: '' + ignoretag: 'True' + maxoutbuf: '0' + minoutbuf: '0' + samples_per_second: samp_rate + type: int + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [328, 292] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: '' + label10: '' + label2: '' + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"After Verilog AXI"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '256' + ymin: '-256' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [936, 404.0] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_1 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: '' + label10: '' + label2: '' + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '"Before Verilog AXI"' + nconnections: '1' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '256' + ymin: '-256' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [936, 268] + rotation: 0 + state: enabled +- name: verilog_axi_xx_0 + id: verilog_axi_xx + parameters: + IO_ratio: '1.0' + affinity: '' + alias: '' + comment: '' + file: examples/double/double_axi.v + maxoutbuf: '0' + minoutbuf: '0' + module_flag: '0' + overwrite: 'True' + skip_output_items: '0' + type: int + verilator_options: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [528, 392.0] + rotation: 0 + state: enabled + +connections: +- [analog_fastnoise_source_x_0, '0', blocks_throttle_0, '0'] +- [blocks_int_to_float_0, '0', qtgui_time_sink_x_1, '0'] +- [blocks_int_to_float_1, '0', qtgui_time_sink_x_0, '0'] +- [blocks_throttle_0, '0', blocks_int_to_float_0, '0'] +- [blocks_throttle_0, '0', verilog_axi_xx_0, '0'] +- [verilog_axi_xx_0, '0', blocks_int_to_float_1, '0'] + +metadata: + file_format: 1 diff --git a/apps/verilog_axi_ss_demo.grc b/apps/verilog_axi_ss_demo.grc new file mode 100644 index 0000000..38bf7b8 --- /dev/null +++ b/apps/verilog_axi_ss_demo.grc @@ -0,0 +1,252 @@ +options: + parameters: + author: Bowen Hu + catch_exceptions: 'True' + category: '[GRC Hier Blocks]' + cmake_opt: '' + comment: '' + copyright: '' + description: This is a demo of verilog_axi_ff block + gen_cmake: 'On' + gen_linking: dynamic + generate_options: qt_gui + hier_block_src_path: '.:' + id: verilog_axi_ff_demo + max_nouts: '0' + output_language: python + placement: (0,0) + qt_qss_theme: '' + realtime_scheduling: '' + run: 'True' + run_command: '{python} -u {filename}' + run_options: prompt + sizing_mode: fixed + thread_safe_setters: '' + title: verilog_axi_ii demo + window_size: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 8] + rotation: 0 + state: enabled + +blocks: +- name: samp_rate + id: variable + parameters: + comment: '' + value: '32000' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [8, 132] + rotation: 0 + state: enabled +- name: analog_sig_source_x_0 + id: analog_sig_source_x + parameters: + affinity: '' + alias: '' + amp: '100' + comment: '' + freq: '1000' + maxoutbuf: '0' + minoutbuf: '0' + offset: '0' + phase: '0' + samp_rate: samp_rate + type: short + waveform: analog.GR_COS_WAVE + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [152, 256.0] + rotation: 0 + state: enabled +- name: blocks_short_to_float_0 + id: blocks_short_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [848, 292.0] + rotation: 0 + state: true +- name: blocks_short_to_float_0_0 + id: blocks_short_to_float + parameters: + affinity: '' + alias: '' + comment: '' + maxoutbuf: '0' + minoutbuf: '0' + scale: '1' + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [848, 340.0] + rotation: 0 + state: true +- name: blocks_throttle_0 + id: blocks_throttle + parameters: + affinity: '' + alias: '' + comment: '' + ignoretag: 'True' + maxoutbuf: '0' + minoutbuf: '0' + samples_per_second: samp_rate + type: short + vlen: '1' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [392, 292] + rotation: 0 + state: enabled +- name: qtgui_time_sink_x_0 + id: qtgui_time_sink_x + parameters: + affinity: '' + alias: '' + alpha1: '1.0' + alpha10: '1.0' + alpha2: '1.0' + alpha3: '1.0' + alpha4: '1.0' + alpha5: '1.0' + alpha6: '1.0' + alpha7: '1.0' + alpha8: '1.0' + alpha9: '1.0' + autoscale: 'True' + axislabels: 'True' + color1: blue + color10: dark blue + color2: red + color3: green + color4: black + color5: cyan + color6: magenta + color7: yellow + color8: dark red + color9: dark green + comment: '' + ctrlpanel: 'False' + entags: 'True' + grid: 'True' + gui_hint: '' + label1: Before Verilog AXI + label10: '' + label2: After Verilog AXI + label3: '' + label4: '' + label5: '' + label6: '' + label7: '' + label8: '' + label9: '' + legend: 'True' + marker1: '-1' + marker10: '-1' + marker2: '-1' + marker3: '-1' + marker4: '-1' + marker5: '-1' + marker6: '-1' + marker7: '-1' + marker8: '-1' + marker9: '-1' + name: '""' + nconnections: '2' + size: '1024' + srate: samp_rate + stemplot: 'False' + style1: '1' + style10: '1' + style2: '1' + style3: '1' + style4: '1' + style5: '1' + style6: '1' + style7: '1' + style8: '1' + style9: '1' + tr_chan: '0' + tr_delay: '0' + tr_level: '0.0' + tr_mode: qtgui.TRIG_MODE_FREE + tr_slope: qtgui.TRIG_SLOPE_POS + tr_tag: '""' + type: float + update_time: '0.10' + width1: '1' + width10: '1' + width2: '1' + width3: '1' + width4: '1' + width5: '1' + width6: '1' + width7: '1' + width8: '1' + width9: '1' + ylabel: Amplitude + ymax: '4' + ymin: '-4' + yunit: '""' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [1040, 300.0] + rotation: 0 + state: enabled +- name: verilog_axi_xx_0 + id: verilog_axi_xx + parameters: + IO_ratio: '1.0' + affinity: '' + alias: '' + comment: '' + file: examples/double/double_axi.v + maxoutbuf: '0' + minoutbuf: '0' + module_flag: '0' + overwrite: 'True' + skip_output_items: '0' + type: short + verilator_options: '' + states: + bus_sink: false + bus_source: false + bus_structure: null + coordinate: [584, 444] + rotation: 0 + state: enabled + +connections: +- [analog_sig_source_x_0, '0', blocks_throttle_0, '0'] +- [blocks_short_to_float_0, '0', qtgui_time_sink_x_0, '0'] +- [blocks_short_to_float_0_0, '0', qtgui_time_sink_x_0, '1'] +- [blocks_throttle_0, '0', blocks_short_to_float_0, '0'] +- [blocks_throttle_0, '0', verilog_axi_xx_0, '0'] +- [verilog_axi_xx_0, '0', blocks_short_to_float_0_0, '0'] + +metadata: + file_format: 1 From f572a7fe1adafb3d61d76fdf8f961f11080d2ea1 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Tue, 14 Mar 2023 10:47:09 +0100 Subject: [PATCH 11/13] put verilator options in double quotes to allow whitespaces in it --- lib/verilog_axi_bb_impl.cc | 2 +- lib/verilog_axi_cc_impl.cc | 2 +- lib/verilog_axi_ff_impl.cc | 2 +- lib/verilog_axi_ii_impl.cc | 2 +- lib/verilog_axi_ss_impl.cc | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/verilog_axi_bb_impl.cc b/lib/verilog_axi_bb_impl.cc index cb62172..2c2d682 100644 --- a/lib/verilog_axi_bb_impl.cc +++ b/lib/verilog_axi_bb_impl.cc @@ -357,7 +357,7 @@ namespace gr { cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME; cmd += std::string(" ") + " M_DIR=" + M_dir; // cmd += verilator_options: - cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options; + cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\""; cmd += ENTER; cmd += ENTER; diff --git a/lib/verilog_axi_cc_impl.cc b/lib/verilog_axi_cc_impl.cc index 6b1e5f8..c92df37 100644 --- a/lib/verilog_axi_cc_impl.cc +++ b/lib/verilog_axi_cc_impl.cc @@ -355,7 +355,7 @@ namespace gr { cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME; cmd += std::string(" ") + " M_DIR=" + M_dir; // cmd += verilator_options: - cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options; + cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\""; cmd += ENTER; cmd += ENTER; diff --git a/lib/verilog_axi_ff_impl.cc b/lib/verilog_axi_ff_impl.cc index 0a12880..ce1925b 100644 --- a/lib/verilog_axi_ff_impl.cc +++ b/lib/verilog_axi_ff_impl.cc @@ -355,7 +355,7 @@ namespace gr { cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME; cmd += std::string(" ") + " M_DIR=" + M_dir; // cmd += verilator_options: - cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options; + cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\""; cmd += ENTER; cmd += ENTER; diff --git a/lib/verilog_axi_ii_impl.cc b/lib/verilog_axi_ii_impl.cc index f2de3ec..e846977 100644 --- a/lib/verilog_axi_ii_impl.cc +++ b/lib/verilog_axi_ii_impl.cc @@ -355,7 +355,7 @@ namespace gr { cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME; cmd += std::string(" ") + " M_DIR=" + M_dir; // cmd += verilator_options: - cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options; + cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\""; cmd += ENTER; cmd += ENTER; diff --git a/lib/verilog_axi_ss_impl.cc b/lib/verilog_axi_ss_impl.cc index 2f91916..9fda429 100644 --- a/lib/verilog_axi_ss_impl.cc +++ b/lib/verilog_axi_ss_impl.cc @@ -355,7 +355,7 @@ namespace gr { cmd += std::string(" ") + "USER_CPP_FILENAME=" + CPP_TEMPLATE_NAME; cmd += std::string(" ") + " M_DIR=" + M_dir; // cmd += verilator_options: - cmd += std::string(" ") + "VERILATOR_OPTIONS=" + this->verilator_options; + cmd += std::string(" ") + "VERILATOR_OPTIONS=\"" + this->verilator_options + "\""; cmd += ENTER; cmd += ENTER; From 0b41b2167967cf8bf8f40bfc7ef9f101f9870937 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Tue, 14 Mar 2023 10:50:16 +0100 Subject: [PATCH 12/13] avoid warning because of different signedness --- lib/verilog_axi_bb_impl.cc | 4 ++-- lib/verilog_axi_cc_impl.cc | 4 ++-- lib/verilog_axi_ff_impl.cc | 4 ++-- lib/verilog_axi_ii_impl.cc | 4 ++-- lib/verilog_axi_ss_impl.cc | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/verilog_axi_bb_impl.cc b/lib/verilog_axi_bb_impl.cc index 2c2d682..fdd2a48 100644 --- a/lib/verilog_axi_bb_impl.cc +++ b/lib/verilog_axi_bb_impl.cc @@ -244,8 +244,8 @@ namespace gr { // Do <+signal processing+> - unsigned int input_i; - unsigned int output_i; + int input_i; + int output_i; for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];) { unsigned char status_code; diff --git a/lib/verilog_axi_cc_impl.cc b/lib/verilog_axi_cc_impl.cc index c92df37..e7dc61e 100644 --- a/lib/verilog_axi_cc_impl.cc +++ b/lib/verilog_axi_cc_impl.cc @@ -242,8 +242,8 @@ namespace gr { // Do <+signal processing+> - unsigned int input_i; - unsigned int output_i; + int input_i; + int output_i; for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];) { unsigned char status_code; diff --git a/lib/verilog_axi_ff_impl.cc b/lib/verilog_axi_ff_impl.cc index ce1925b..406b227 100644 --- a/lib/verilog_axi_ff_impl.cc +++ b/lib/verilog_axi_ff_impl.cc @@ -242,8 +242,8 @@ namespace gr { // Do <+signal processing+> - unsigned int input_i; - unsigned int output_i; + int input_i; + int output_i; for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];) { unsigned char status_code; diff --git a/lib/verilog_axi_ii_impl.cc b/lib/verilog_axi_ii_impl.cc index e846977..e97aeb4 100644 --- a/lib/verilog_axi_ii_impl.cc +++ b/lib/verilog_axi_ii_impl.cc @@ -242,8 +242,8 @@ namespace gr { // Do <+signal processing+> - unsigned int input_i; - unsigned int output_i; + int input_i; + int output_i; for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];) { unsigned char status_code; diff --git a/lib/verilog_axi_ss_impl.cc b/lib/verilog_axi_ss_impl.cc index 9fda429..7d161b6 100644 --- a/lib/verilog_axi_ss_impl.cc +++ b/lib/verilog_axi_ss_impl.cc @@ -242,8 +242,8 @@ namespace gr { // Do <+signal processing+> - unsigned int input_i; - unsigned int output_i; + int input_i; + int output_i; for (input_i = 0, output_i = 0; output_i < noutput_items && input_i < ninput_items[0];) { unsigned char status_code; From 7109ad6b5c16afdac0c35f80ebe42019df1127c3 Mon Sep 17 00:00:00 2001 From: Ferdinand Stehle Date: Tue, 14 Mar 2023 11:14:22 +0100 Subject: [PATCH 13/13] extend gitignore --- .gitignore | 3 +++ python/verilog/.gitignore | 5 ----- 2 files changed, 3 insertions(+), 5 deletions(-) delete mode 100644 python/verilog/.gitignore diff --git a/.gitignore b/.gitignore index 3d23046..7405b78 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,6 @@ build/ *.pyc obj_dir +examples/*/axi_module* +python/verilog/testcases/*/axi_module* +apps/*.py diff --git a/python/verilog/.gitignore b/python/verilog/.gitignore deleted file mode 100644 index 85c92e8..0000000 --- a/python/verilog/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*~ -*.pyc -*.pyo -build*/ -examples/grc/*.py